From ab88a75c70ceaa780530e6248e29b182e9d2da09 Mon Sep 17 00:00:00 2001 From: rafaelsideguide <150964962+rafaelsideguide@users.noreply.github.com> Date: Thu, 22 Aug 2024 13:38:34 -0300 Subject: [PATCH] fixes sdks --- .../__tests__/v1/e2e_withAuth/index.test.ts | 103 +++--- apps/js-sdk/firecrawl/src/index.ts | 10 +- .../__tests__/v1/e2e_withAuth/test.py | 308 ++++++++++++++---- apps/python-sdk/firecrawl/firecrawl.py | 18 +- 4 files changed, 317 insertions(+), 122 deletions(-) diff --git a/apps/js-sdk/firecrawl/src/__tests__/v1/e2e_withAuth/index.test.ts b/apps/js-sdk/firecrawl/src/__tests__/v1/e2e_withAuth/index.test.ts index 724996bc..81c870f5 100644 --- a/apps/js-sdk/firecrawl/src/__tests__/v1/e2e_withAuth/index.test.ts +++ b/apps/js-sdk/firecrawl/src/__tests__/v1/e2e_withAuth/index.test.ts @@ -30,24 +30,24 @@ describe('FirecrawlApp E2E Tests', () => { const app = new FirecrawlApp({ apiKey: "this_is_just_a_preview_token", apiUrl: API_URL }); const response = await app.scrapeUrl('https://roastmywebsite.ai') as ScrapeResponse; expect(response).not.toBeNull(); - expect(response.data?.markdown).toContain("_Roast_"); + expect(response?.markdown).toContain("_Roast_"); }, 30000); // 30 seconds timeout test.concurrent('should return successful response for valid scrape', async () => { const app = new FirecrawlApp({ apiKey: TEST_API_KEY, apiUrl: API_URL }); const response = await app.scrapeUrl('https://roastmywebsite.ai') as ScrapeResponse; expect(response).not.toBeNull(); - expect(response.data).not.toHaveProperty('content'); // v0 - expect(response.data).not.toHaveProperty('html'); - expect(response.data).not.toHaveProperty('rawHtml'); - expect(response.data).not.toHaveProperty('screenshot'); - expect(response.data).not.toHaveProperty('links'); + expect(response).not.toHaveProperty('content'); // v0 + expect(response).not.toHaveProperty('html'); + expect(response).not.toHaveProperty('rawHtml'); + expect(response).not.toHaveProperty('screenshot'); + expect(response).not.toHaveProperty('links'); - expect(response.data).toHaveProperty('markdown'); - expect(response.data).toHaveProperty('metadata'); + expect(response).toHaveProperty('markdown'); + expect(response).toHaveProperty('metadata'); }, 30000); // 30 seconds timeout - test.concurrent('should return successful response with valid API key and include HTML', async () => { + test.concurrent('should return successful response with valid API key and options', async () => { const app = new FirecrawlApp({ apiKey: TEST_API_KEY, apiUrl: API_URL }); const response = await app.scrapeUrl( 'https://roastmywebsite.ai', { @@ -60,58 +60,58 @@ describe('FirecrawlApp E2E Tests', () => { waitFor: 1000 }) as ScrapeResponse; expect(response).not.toBeNull(); - expect(response.data).not.toHaveProperty('content'); // v0 - expect(response.data?.markdown).toContain("_Roast_"); - expect(response.data?.html).toContain(" { const app = new FirecrawlApp({ apiKey: TEST_API_KEY, apiUrl: API_URL }); const response = await app.scrapeUrl('https://arxiv.org/pdf/astro-ph/9301001.pdf') as ScrapeResponse; expect(response).not.toBeNull(); - expect(response.data?.markdown).toContain('We present spectrophotometric observations of the Broad Line Radio Galaxy'); + expect(response?.markdown).toContain('We present spectrophotometric observations of the Broad Line Radio Galaxy'); }, 30000); // 30 seconds timeout test.concurrent('should return successful response for valid scrape with PDF file without explicit extension', async () => { const app = new FirecrawlApp({ apiKey: TEST_API_KEY, apiUrl: API_URL }); const response = await app.scrapeUrl('https://arxiv.org/pdf/astro-ph/9301001') as ScrapeResponse; expect(response).not.toBeNull(); - expect(response.data?.markdown).toContain('We present spectrophotometric observations of the Broad Line Radio Galaxy'); + expect(response?.markdown).toContain('We present spectrophotometric observations of the Broad Line Radio Galaxy'); }, 30000); // 30 seconds timeout test.concurrent('should throw error for invalid API key on crawl', async () => { @@ -304,4 +304,9 @@ describe('FirecrawlApp E2E Tests', () => { const filteredLinks = response.links?.filter((link: string) => link.includes("roastmywebsite.ai")); expect(filteredLinks?.length).toBeGreaterThan(0); }, 30000); // 30 seconds timeout + + test('should throw NotImplementedError for search on v1', async () => { + const app = new FirecrawlApp({ apiUrl: API_URL, apiKey: TEST_API_KEY }); + await expect(app.search("test query")).rejects.toThrow("Search is not supported in v1"); + }); }); diff --git a/apps/js-sdk/firecrawl/src/index.ts b/apps/js-sdk/firecrawl/src/index.ts index 90c86a2a..90617de1 100644 --- a/apps/js-sdk/firecrawl/src/index.ts +++ b/apps/js-sdk/firecrawl/src/index.ts @@ -144,10 +144,9 @@ export interface ScrapeParamsV0 { * Response interface for scraping operations. * Defines the structure of the response received after a scraping operation. */ -export interface ScrapeResponse { +export interface ScrapeResponse extends FirecrawlDocument { success: boolean; warning?: string; - data?: FirecrawlDocument; error?: string; } @@ -375,7 +374,12 @@ export default class FirecrawlApp { if (this.version == 'v0') { return responseData as ScrapeResponseV0; } else { - return responseData as ScrapeResponse; + return { + success: true, + warning: responseData.warning, + error: responseData.error, + ...responseData.data + } as ScrapeResponse; } } else { throw new Error(`Failed to scrape URL. Error: ${responseData.error}`); diff --git a/apps/python-sdk/firecrawl/__tests__/v1/e2e_withAuth/test.py b/apps/python-sdk/firecrawl/__tests__/v1/e2e_withAuth/test.py index 517d8cf9..5fb2c674 100644 --- a/apps/python-sdk/firecrawl/__tests__/v1/e2e_withAuth/test.py +++ b/apps/python-sdk/firecrawl/__tests__/v1/e2e_withAuth/test.py @@ -4,6 +4,7 @@ import time import os from uuid import uuid4 from dotenv import load_dotenv +from datetime import datetime load_dotenv() @@ -27,42 +28,92 @@ def test_scrape_url_invalid_api_key(): invalid_app = FirecrawlApp(api_url=API_URL, api_key="invalid_api_key") with pytest.raises(Exception) as excinfo: invalid_app.scrape_url('https://firecrawl.dev') - assert "Unexpected error during scrape URL: Status code 401. Unauthorized: Invalid token" in str(excinfo.value) + assert "Unauthorized: Invalid token" in str(excinfo.value) def test_blocklisted_url(): blocklisted_url = "https://facebook.com/fake-test" app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY) with pytest.raises(Exception) as excinfo: app.scrape_url(blocklisted_url) - assert "Unexpected error during scrape URL: Status code 403. Firecrawl currently does not support social media scraping due to policy restrictions. We're actively working on building support for it." in str(excinfo.value) + assert "URL is blocked. Firecrawl currently does not support social media scraping due to policy restrictions." in str(excinfo.value) def test_successful_response_with_valid_preview_token(): app = FirecrawlApp(api_url=API_URL, api_key="this_is_just_a_preview_token") response = app.scrape_url('https://roastmywebsite.ai') assert response is not None - assert 'content' in response - assert "_Roast_" in response['content'] + assert "_Roast_" in response['markdown'] + assert "content" not in response + assert "html" not in response + assert "metadata" in response + assert "links" not in response + assert "rawHtml" not in response -def test_scrape_url_e2e(): +def test_successful_response_for_valid_scrape(): app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY) response = app.scrape_url('https://roastmywebsite.ai') assert response is not None - assert 'content' not in response assert 'markdown' in response - assert 'metadata' in response - assert 'html' not in response assert "_Roast_" in response['markdown'] + assert 'metadata' in response + assert 'content' not in response + assert 'html' not in response + assert 'rawHtml' not in response + assert 'screenshot' not in response + assert 'links' not in response -def test_successful_response_with_valid_api_key_and_include_html(): +def test_successful_response_with_valid_api_key_and_options(): app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY) - response = app.scrape_url('https://roastmywebsite.ai', { 'formats': [ 'markdown', 'html' ]}) + params = { + 'formats': ['markdown', 'html', 'rawHtml', 'screenshot', 'links'], + 'headers': {'x-key': 'test'}, + 'includeTags': ['h1'], + 'excludeTags': ['h2'], + 'onlyMainContent': True, + 'timeout': 30000, + 'waitFor': 1000 + } + response = app.scrape_url('https://roastmywebsite.ai', params) assert response is not None assert 'content' not in response assert 'markdown' in response assert 'html' in response - assert 'metadata' in response + assert 'rawHtml' in response + assert 'screenshot' in response + assert 'links' in response assert "_Roast_" in response['markdown'] assert " 0 + assert "https://" in response['links'][0] + assert 'metadata' in response + assert 'title' in response['metadata'] + assert 'description' in response['metadata'] + assert 'keywords' in response['metadata'] + assert 'robots' in response['metadata'] + assert 'ogTitle' in response['metadata'] + assert 'ogDescription' in response['metadata'] + assert 'ogUrl' in response['metadata'] + assert 'ogImage' in response['metadata'] + assert 'ogLocaleAlternate' in response['metadata'] + assert 'ogSiteName' in response['metadata'] + assert 'sourceURL' in response['metadata'] + assert 'statusCode' in response['metadata'] + assert 'pageStatusCode' not in response['metadata'] + assert 'pageError' not in response['metadata'] + assert 'error' not in response['metadata'] + assert response['metadata']['title'] == "Roast My Website" + assert response['metadata']['description'] == "Welcome to Roast My Website, the ultimate tool for putting your website through the wringer! This repository harnesses the power of Firecrawl to scrape and capture screenshots of websites, and then unleashes the latest LLM vision models to mercilessly roast them. 🌶️" + assert response['metadata']['keywords'] == "Roast My Website,Roast,Website,GitHub,Firecrawl" + assert response['metadata']['robots'] == "follow, index" + assert response['metadata']['ogTitle'] == "Roast My Website" + assert response['metadata']['ogDescription'] == "Welcome to Roast My Website, the ultimate tool for putting your website through the wringer! This repository harnesses the power of Firecrawl to scrape and capture screenshots of websites, and then unleashes the latest LLM vision models to mercilessly roast them. 🌶️" + assert response['metadata']['ogUrl'] == "https://www.roastmywebsite.ai" + assert response['metadata']['ogImage'] == "https://www.roastmywebsite.ai/og.png" + assert response['metadata']['ogLocaleAlternate'] == [] + assert response['metadata']['ogSiteName'] == "Roast My Website" + assert response['metadata']['sourceURL'] == "https://roastmywebsite.ai" + assert response['metadata']['statusCode'] == 200 def test_successful_response_for_valid_scrape_with_pdf_file(): app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY) @@ -70,65 +121,202 @@ def test_successful_response_for_valid_scrape_with_pdf_file(): assert response is not None assert 'content' not in response assert 'metadata' in response - assert 'We present spectrophotometric observations of the Broad Line Radio Galaxy' in response['content'] + assert 'We present spectrophotometric observations of the Broad Line Radio Galaxy' in response['markdown'] def test_successful_response_for_valid_scrape_with_pdf_file_without_explicit_extension(): app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY) response = app.scrape_url('https://arxiv.org/pdf/astro-ph/9301001') - time.sleep(6) # wait for 6 seconds + time.sleep(1) # wait for 1 second assert response is not None - assert 'content' not in response - assert 'metadata' in response - assert 'We present spectrophotometric observations of the Broad Line Radio Galaxy' in response['content'] + assert 'We present spectrophotometric observations of the Broad Line Radio Galaxy' in response['markdown'] def test_crawl_url_invalid_api_key(): invalid_app = FirecrawlApp(api_url=API_URL, api_key="invalid_api_key") with pytest.raises(Exception) as excinfo: invalid_app.crawl_url('https://firecrawl.dev') - assert "Unexpected error during start crawl job: Status code 401. Unauthorized: Invalid token" in str(excinfo.value) + assert "Unauthorized: Invalid token" in str(excinfo.value) def test_should_return_error_for_blocklisted_url(): app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY) blocklisted_url = "https://twitter.com/fake-test" with pytest.raises(Exception) as excinfo: app.crawl_url(blocklisted_url) - assert "Unexpected error during start crawl job: Status code 403. Firecrawl currently does not support social media scraping due to policy restrictions. We're actively working on building support for it." in str(excinfo.value) + assert "URL is blocked. Firecrawl currently does not support social media scraping due to policy restrictions." in str(excinfo.value) def test_crawl_url_wait_for_completion_e2e(): app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY) - response = app.crawl_url('https://roastmywebsite.ai', {'crawlerOptions': {'excludes': ['blog/*']}}, True) + response = app.crawl_url('https://roastmywebsite.ai', {'excludePaths': ['blog/*']}, True, 30) assert response is not None - assert len(response) > 0 - assert 'content' not in response[0] - assert 'markdown' in response[0] - assert "_Roast_" in response[0]['markdown'] + assert 'totalCount' in response + assert response['totalCount'] > 0 + assert 'creditsUsed' in response + assert response['creditsUsed'] > 0 + assert 'expiresAt' in response + assert datetime.strptime(response['expiresAt'], '%Y-%m-%dT%H:%M:%S.%fZ') > datetime.now() + assert 'status' in response + assert response['status'] == 'completed' + assert 'next' not in response + assert len(response['data']) > 0 + assert 'markdown' in response['data'][0] + assert "_Roast_" in response['data'][0]['markdown'] + assert 'content' not in response['data'][0] + assert 'html' not in response['data'][0] + assert 'rawHtml' not in response['data'][0] + assert 'screenshot' not in response['data'][0] + assert 'links' not in response['data'][0] + assert 'metadata' in response['data'][0] + assert 'title' in response['data'][0]['metadata'] + assert 'description' in response['data'][0]['metadata'] + assert 'language' in response['data'][0]['metadata'] + assert 'sourceURL' in response['data'][0]['metadata'] + assert 'statusCode' in response['data'][0]['metadata'] + assert 'error' not in response['data'][0]['metadata'] + +def test_crawl_url_with_options_and_wait_for_completion(): + app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY) + response = app.crawl_url('https://roastmywebsite.ai', { + 'excludePaths': ['blog/*'], + 'includePaths': ['/'], + 'maxDepth': 2, + 'ignoreSitemap': True, + 'limit': 10, + 'allowBackwardLinks': True, + 'allowExternalLinks': True, + 'scrapeOptions': { + 'formats': ['markdown', 'html', 'rawHtml', 'screenshot', 'links'], + 'headers': {"x-key": "test"}, + 'includeTags': ['h1'], + 'excludeTags': ['h2'], + 'onlyMainContent': True, + 'waitFor': 1000 + } + }, True, 30) + assert response is not None + assert 'totalCount' in response + assert response['totalCount'] > 0 + assert 'creditsUsed' in response + assert response['creditsUsed'] > 0 + assert 'expiresAt' in response + assert datetime.strptime(response['expiresAt'], '%Y-%m-%dT%H:%M:%S.%fZ') > datetime.now() + assert 'status' in response + assert response['status'] == 'completed' + assert 'next' not in response + assert len(response['data']) > 0 + assert 'markdown' in response['data'][0] + assert "_Roast_" in response['data'][0]['markdown'] + assert 'content' not in response['data'][0] + assert 'html' in response['data'][0] + assert " 0 + assert 'metadata' in response['data'][0] + assert 'title' in response['data'][0]['metadata'] + assert 'description' in response['data'][0]['metadata'] + assert 'language' in response['data'][0]['metadata'] + assert 'sourceURL' in response['data'][0]['metadata'] + assert 'statusCode' in response['data'][0]['metadata'] + assert 'error' not in response['data'][0]['metadata'] def test_crawl_url_with_idempotency_key_e2e(): app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY) uniqueIdempotencyKey = str(uuid4()) - response = app.crawl_url('https://roastmywebsite.ai', {'crawlerOptions': {'excludes': ['blog/*']}}, True, 2, uniqueIdempotencyKey) + response = app.crawl_url('https://roastmywebsite.ai', {'excludePaths': ['blog/*']}, False, 2, uniqueIdempotencyKey) assert response is not None - assert len(response) > 0 - assert 'content' in response[0] - assert "_Roast_" in response[0]['content'] + assert 'id' in response with pytest.raises(Exception) as excinfo: - app.crawl_url('https://firecrawl.dev', {'crawlerOptions': {'excludes': ['blog/*']}}, True, 2, uniqueIdempotencyKey) - assert "Conflict: Failed to start crawl job due to a conflict. Idempotency key already used" in str(excinfo.value) + app.crawl_url('https://firecrawl.dev', {'excludePaths': ['blog/*']}, True, 2, uniqueIdempotencyKey) + assert "Idempotency key already used" in str(excinfo.value) def test_check_crawl_status_e2e(): app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY) - response = app.crawl_url('https://firecrawl.dev', {'crawlerOptions': {'excludes': ['blog/*']}}, False) + response = app.crawl_url('https://firecrawl.dev', {'scrapeOptions': {'formats': ['markdown', 'html', 'rawHtml', 'screenshot', 'links']}}, False) assert response is not None - assert 'jobId' in response + assert 'id' in response - time.sleep(30) # wait for 30 seconds - status_response = app.check_crawl_status(response['jobId']) + max_checks = 15 + checks = 0 + status_response = app.check_crawl_status(response['id']) + + while status_response['status'] == 'scraping' and checks < max_checks: + time.sleep(1) # wait for 1 second + assert 'partial_data' not in status_response + assert 'current' not in status_response + assert 'data' in status_response + assert 'totalCount' in status_response + assert 'creditsUsed' in status_response + assert 'expiresAt' in status_response + assert 'status' in status_response + assert 'next' in status_response + assert status_response['totalCount'] > 0 + assert status_response['creditsUsed'] > 0 + assert datetime.strptime(status_response['expiresAt'], '%Y-%m-%dT%H:%M:%S.%fZ') > datetime.now() + assert status_response['status'] == 'scraping' + assert '/v1/crawl/' in status_response['next'] + status_response = app.check_crawl_status(response['id']) + checks += 1 + assert status_response is not None + assert 'totalCount' in status_response + assert status_response['totalCount'] > 0 + assert 'creditsUsed' in status_response + assert status_response['creditsUsed'] > 0 + assert 'expiresAt' in status_response + assert datetime.strptime(status_response['expiresAt'], '%Y-%m-%dT%H:%M:%S.%fZ') > datetime.now() assert 'status' in status_response assert status_response['status'] == 'completed' - assert 'data' in status_response assert len(status_response['data']) > 0 + assert 'markdown' in status_response['data'][0] + assert len(status_response['data'][0]['markdown']) > 10 + assert 'content' not in status_response['data'][0] + assert 'html' in status_response['data'][0] + assert " 0 + assert 'metadata' in status_response['data'][0] + assert 'title' in status_response['data'][0]['metadata'] + assert 'description' in status_response['data'][0]['metadata'] + assert 'language' in status_response['data'][0]['metadata'] + assert 'sourceURL' in status_response['data'][0]['metadata'] + assert 'statusCode' in status_response['data'][0]['metadata'] + assert 'error' not in status_response['data'][0]['metadata'] + +def test_invalid_api_key_on_map(): + invalid_app = FirecrawlApp(api_key="invalid_api_key", api_url=API_URL) + with pytest.raises(Exception) as excinfo: + invalid_app.map_url('https://roastmywebsite.ai') + assert "Unauthorized: Invalid token" in str(excinfo.value) + +def test_blocklisted_url_on_map(): + app = FirecrawlApp(api_key=TEST_API_KEY, api_url=API_URL) + blocklisted_url = "https://facebook.com/fake-test" + with pytest.raises(Exception) as excinfo: + app.map_url(blocklisted_url) + assert "URL is blocked. Firecrawl currently does not support social media scraping due to policy restrictions." in str(excinfo.value) + +def test_successful_response_with_valid_preview_token_on_map(): + app = FirecrawlApp(api_key="this_is_just_a_preview_token", api_url=API_URL) + response = app.map_url('https://roastmywebsite.ai') + assert response is not None + assert len(response) > 0 + +def test_successful_response_for_valid_map(): + app = FirecrawlApp(api_key=TEST_API_KEY, api_url=API_URL) + response = app.map_url('https://roastmywebsite.ai') + assert response is not None + assert len(response) > 0 + assert any("https://" in link for link in response) + filtered_links = [link for link in response if "roastmywebsite.ai" in link] + assert len(filtered_links) > 0 def test_search_e2e(): app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY) @@ -136,33 +324,29 @@ def test_search_e2e(): app.search("test query") assert "Search is not supported in v1" in str(excinfo.value) -def test_llm_extraction(): - app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY) - response = app.scrape_url("https://mendable.ai", { - 'extractorOptions': { - 'mode': 'llm-extraction', - 'extractionPrompt': "Based on the information on the page, find what the company's mission is and whether it supports SSO, and whether it is open source", - 'extractionSchema': { - 'type': 'object', - 'properties': { - 'company_mission': {'type': 'string'}, - 'supports_sso': {'type': 'boolean'}, - 'is_open_source': {'type': 'boolean'} - }, - 'required': ['company_mission', 'supports_sso', 'is_open_source'] - } - } - }) - assert response is not None - assert 'llm_extraction' in response - llm_extraction = response['llm_extraction'] - assert 'company_mission' in llm_extraction - assert isinstance(llm_extraction['supports_sso'], bool) - assert isinstance(llm_extraction['is_open_source'], bool) +# def test_llm_extraction(): +# app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY) +# response = app.scrape_url("https://mendable.ai", { +# 'extractorOptions': { +# 'mode': 'llm-extraction', +# 'extractionPrompt': "Based on the information on the page, find what the company's mission is and whether it supports SSO, and whether it is open source", +# 'extractionSchema': { +# 'type': 'object', +# 'properties': { +# 'company_mission': {'type': 'string'}, +# 'supports_sso': {'type': 'boolean'}, +# 'is_open_source': {'type': 'boolean'} +# }, +# 'required': ['company_mission', 'supports_sso', 'is_open_source'] +# } +# } +# }) +# assert response is not None +# assert 'llm_extraction' in response +# llm_extraction = response['llm_extraction'] +# assert 'company_mission' in llm_extraction +# assert isinstance(llm_extraction['supports_sso'], bool) +# assert isinstance(llm_extraction['is_open_source'], bool) + -def test_map_e2e(): - app = FirecrawlApp(api_url=API_URL, api_key="this_is_just_a_preview_token") - response = app.map_url('https://roastmywebsite.ai') - assert response is not None - assert isinstance(response, list) \ No newline at end of file diff --git a/apps/python-sdk/firecrawl/firecrawl.py b/apps/python-sdk/firecrawl/firecrawl.py index f67afbdb..4f71cc78 100644 --- a/apps/python-sdk/firecrawl/firecrawl.py +++ b/apps/python-sdk/firecrawl/firecrawl.py @@ -244,8 +244,9 @@ class FirecrawlApp: ) if response.status_code == 200: response = response.json() - if response['success'] and 'data' in response: - return response['data'] + print(response) + if response['success'] and 'links' in response: + return response['links'] else: raise Exception(f'Failed to map URL. Error: {response["error"]}') else: @@ -387,18 +388,19 @@ class FirecrawlApp: Raises: Exception: An exception with a message containing the status code and error details from the response. """ - error_message = response.json().get('error', 'No additional error details provided.') + error_message = response.json().get('error', 'No error message provided.') + error_details = response.json().get('details', 'No additional error details provided.') if response.status_code == 402: - message = f"Payment Required: Failed to {action}. {error_message}" + message = f"Payment Required: Failed to {action}. {error_message} - {error_details}" elif response.status_code == 408: - message = f"Request Timeout: Failed to {action} as the request timed out. {error_message}" + message = f"Request Timeout: Failed to {action} as the request timed out. {error_message} - {error_details}" elif response.status_code == 409: - message = f"Conflict: Failed to {action} due to a conflict. {error_message}" + message = f"Conflict: Failed to {action} due to a conflict. {error_message} - {error_details}" elif response.status_code == 500: - message = f"Internal Server Error: Failed to {action}. {error_message}" + message = f"Internal Server Error: Failed to {action}. {error_message} - {error_details}" else: - message = f"Unexpected error during {action}: Status code {response.status_code}. {error_message}" + message = f"Unexpected error during {action}: Status code {response.status_code}. {error_message} - {error_details}" # Raise an HTTPError with the custom message and attach the response raise requests.exceptions.HTTPError(message, response=response)