mirror of
https://github.com/mendableai/firecrawl.git
synced 2024-11-16 11:42:24 +08:00
fixes sdks
This commit is contained in:
parent
0b37cbce4a
commit
ab88a75c70
|
@ -30,24 +30,24 @@ describe('FirecrawlApp E2E Tests', () => {
|
|||
const app = new FirecrawlApp({ apiKey: "this_is_just_a_preview_token", apiUrl: API_URL });
|
||||
const response = await app.scrapeUrl('https://roastmywebsite.ai') as ScrapeResponse;
|
||||
expect(response).not.toBeNull();
|
||||
expect(response.data?.markdown).toContain("_Roast_");
|
||||
expect(response?.markdown).toContain("_Roast_");
|
||||
}, 30000); // 30 seconds timeout
|
||||
|
||||
test.concurrent('should return successful response for valid scrape', async () => {
|
||||
const app = new FirecrawlApp({ apiKey: TEST_API_KEY, apiUrl: API_URL });
|
||||
const response = await app.scrapeUrl('https://roastmywebsite.ai') as ScrapeResponse;
|
||||
expect(response).not.toBeNull();
|
||||
expect(response.data).not.toHaveProperty('content'); // v0
|
||||
expect(response.data).not.toHaveProperty('html');
|
||||
expect(response.data).not.toHaveProperty('rawHtml');
|
||||
expect(response.data).not.toHaveProperty('screenshot');
|
||||
expect(response.data).not.toHaveProperty('links');
|
||||
expect(response).not.toHaveProperty('content'); // v0
|
||||
expect(response).not.toHaveProperty('html');
|
||||
expect(response).not.toHaveProperty('rawHtml');
|
||||
expect(response).not.toHaveProperty('screenshot');
|
||||
expect(response).not.toHaveProperty('links');
|
||||
|
||||
expect(response.data).toHaveProperty('markdown');
|
||||
expect(response.data).toHaveProperty('metadata');
|
||||
expect(response).toHaveProperty('markdown');
|
||||
expect(response).toHaveProperty('metadata');
|
||||
}, 30000); // 30 seconds timeout
|
||||
|
||||
test.concurrent('should return successful response with valid API key and include HTML', async () => {
|
||||
test.concurrent('should return successful response with valid API key and options', async () => {
|
||||
const app = new FirecrawlApp({ apiKey: TEST_API_KEY, apiUrl: API_URL });
|
||||
const response = await app.scrapeUrl(
|
||||
'https://roastmywebsite.ai', {
|
||||
|
@ -60,58 +60,58 @@ describe('FirecrawlApp E2E Tests', () => {
|
|||
waitFor: 1000
|
||||
}) as ScrapeResponse;
|
||||
expect(response).not.toBeNull();
|
||||
expect(response.data).not.toHaveProperty('content'); // v0
|
||||
expect(response.data?.markdown).toContain("_Roast_");
|
||||
expect(response.data?.html).toContain("<h1");
|
||||
expect(response.data?.rawHtml).toContain("<h1");
|
||||
expect(response.data?.screenshot).not.toBeUndefined();
|
||||
expect(response.data?.screenshot).not.toBeNull();
|
||||
expect(response.data?.screenshot).toContain("https://");
|
||||
expect(response.data?.links).not.toBeNull();
|
||||
expect(response.data?.links?.length).toBeGreaterThan(0);
|
||||
expect(response.data?.links?.[0]).toContain("https://");
|
||||
expect(response.data?.metadata).not.toBeNull();
|
||||
expect(response.data?.metadata).toHaveProperty("title");
|
||||
expect(response.data?.metadata).toHaveProperty("description");
|
||||
expect(response.data?.metadata).toHaveProperty("keywords");
|
||||
expect(response.data?.metadata).toHaveProperty("robots");
|
||||
expect(response.data?.metadata).toHaveProperty("ogTitle");
|
||||
expect(response.data?.metadata).toHaveProperty("ogDescription");
|
||||
expect(response.data?.metadata).toHaveProperty("ogUrl");
|
||||
expect(response.data?.metadata).toHaveProperty("ogImage");
|
||||
expect(response.data?.metadata).toHaveProperty("ogLocaleAlternate");
|
||||
expect(response.data?.metadata).toHaveProperty("ogSiteName");
|
||||
expect(response.data?.metadata).toHaveProperty("sourceURL");
|
||||
expect(response.data?.metadata).not.toHaveProperty("pageStatusCode");
|
||||
expect(response.data?.metadata).toHaveProperty("statusCode");
|
||||
expect(response.data?.metadata).not.toHaveProperty("pageError");
|
||||
expect(response.data?.metadata.error).toBeUndefined();
|
||||
expect(response.data?.metadata.title).toBe("Roast My Website");
|
||||
expect(response.data?.metadata.description).toBe("Welcome to Roast My Website, the ultimate tool for putting your website through the wringer! This repository harnesses the power of Firecrawl to scrape and capture screenshots of websites, and then unleashes the latest LLM vision models to mercilessly roast them. 🌶️");
|
||||
expect(response.data?.metadata.keywords).toBe("Roast My Website,Roast,Website,GitHub,Firecrawl");
|
||||
expect(response.data?.metadata.robots).toBe("follow, index");
|
||||
expect(response.data?.metadata.ogTitle).toBe("Roast My Website");
|
||||
expect(response.data?.metadata.ogDescription).toBe("Welcome to Roast My Website, the ultimate tool for putting your website through the wringer! This repository harnesses the power of Firecrawl to scrape and capture screenshots of websites, and then unleashes the latest LLM vision models to mercilessly roast them. 🌶️");
|
||||
expect(response.data?.metadata.ogUrl).toBe("https://www.roastmywebsite.ai");
|
||||
expect(response.data?.metadata.ogImage).toBe("https://www.roastmywebsite.ai/og.png");
|
||||
expect(response.data?.metadata.ogLocaleAlternate).toStrictEqual([]);
|
||||
expect(response.data?.metadata.ogSiteName).toBe("Roast My Website");
|
||||
expect(response.data?.metadata.sourceURL).toBe("https://roastmywebsite.ai");
|
||||
expect(response.data?.metadata.statusCode).toBe(200);
|
||||
expect(response).not.toHaveProperty('content'); // v0
|
||||
expect(response.markdown).toContain("_Roast_");
|
||||
expect(response.html).toContain("<h1");
|
||||
expect(response.rawHtml).toContain("<h1");
|
||||
expect(response.screenshot).not.toBeUndefined();
|
||||
expect(response.screenshot).not.toBeNull();
|
||||
expect(response.screenshot).toContain("https://");
|
||||
expect(response.links).not.toBeNull();
|
||||
expect(response.links?.length).toBeGreaterThan(0);
|
||||
expect(response.links?.[0]).toContain("https://");
|
||||
expect(response.metadata).not.toBeNull();
|
||||
expect(response.metadata).toHaveProperty("title");
|
||||
expect(response.metadata).toHaveProperty("description");
|
||||
expect(response.metadata).toHaveProperty("keywords");
|
||||
expect(response.metadata).toHaveProperty("robots");
|
||||
expect(response.metadata).toHaveProperty("ogTitle");
|
||||
expect(response.metadata).toHaveProperty("ogDescription");
|
||||
expect(response.metadata).toHaveProperty("ogUrl");
|
||||
expect(response.metadata).toHaveProperty("ogImage");
|
||||
expect(response.metadata).toHaveProperty("ogLocaleAlternate");
|
||||
expect(response.metadata).toHaveProperty("ogSiteName");
|
||||
expect(response.metadata).toHaveProperty("sourceURL");
|
||||
expect(response.metadata).not.toHaveProperty("pageStatusCode");
|
||||
expect(response.metadata).toHaveProperty("statusCode");
|
||||
expect(response.metadata).not.toHaveProperty("pageError");
|
||||
expect(response.metadata.error).toBeUndefined();
|
||||
expect(response.metadata.title).toBe("Roast My Website");
|
||||
expect(response.metadata.description).toBe("Welcome to Roast My Website, the ultimate tool for putting your website through the wringer! This repository harnesses the power of Firecrawl to scrape and capture screenshots of websites, and then unleashes the latest LLM vision models to mercilessly roast them. 🌶️");
|
||||
expect(response.metadata.keywords).toBe("Roast My Website,Roast,Website,GitHub,Firecrawl");
|
||||
expect(response.metadata.robots).toBe("follow, index");
|
||||
expect(response.metadata.ogTitle).toBe("Roast My Website");
|
||||
expect(response.metadata.ogDescription).toBe("Welcome to Roast My Website, the ultimate tool for putting your website through the wringer! This repository harnesses the power of Firecrawl to scrape and capture screenshots of websites, and then unleashes the latest LLM vision models to mercilessly roast them. 🌶️");
|
||||
expect(response.metadata.ogUrl).toBe("https://www.roastmywebsite.ai");
|
||||
expect(response.metadata.ogImage).toBe("https://www.roastmywebsite.ai/og.png");
|
||||
expect(response.metadata.ogLocaleAlternate).toStrictEqual([]);
|
||||
expect(response.metadata.ogSiteName).toBe("Roast My Website");
|
||||
expect(response.metadata.sourceURL).toBe("https://roastmywebsite.ai");
|
||||
expect(response.metadata.statusCode).toBe(200);
|
||||
}, 30000); // 30 seconds timeout
|
||||
|
||||
test.concurrent('should return successful response for valid scrape with PDF file', async () => {
|
||||
const app = new FirecrawlApp({ apiKey: TEST_API_KEY, apiUrl: API_URL });
|
||||
const response = await app.scrapeUrl('https://arxiv.org/pdf/astro-ph/9301001.pdf') as ScrapeResponse;
|
||||
expect(response).not.toBeNull();
|
||||
expect(response.data?.markdown).toContain('We present spectrophotometric observations of the Broad Line Radio Galaxy');
|
||||
expect(response?.markdown).toContain('We present spectrophotometric observations of the Broad Line Radio Galaxy');
|
||||
}, 30000); // 30 seconds timeout
|
||||
|
||||
test.concurrent('should return successful response for valid scrape with PDF file without explicit extension', async () => {
|
||||
const app = new FirecrawlApp({ apiKey: TEST_API_KEY, apiUrl: API_URL });
|
||||
const response = await app.scrapeUrl('https://arxiv.org/pdf/astro-ph/9301001') as ScrapeResponse;
|
||||
expect(response).not.toBeNull();
|
||||
expect(response.data?.markdown).toContain('We present spectrophotometric observations of the Broad Line Radio Galaxy');
|
||||
expect(response?.markdown).toContain('We present spectrophotometric observations of the Broad Line Radio Galaxy');
|
||||
}, 30000); // 30 seconds timeout
|
||||
|
||||
test.concurrent('should throw error for invalid API key on crawl', async () => {
|
||||
|
@ -304,4 +304,9 @@ describe('FirecrawlApp E2E Tests', () => {
|
|||
const filteredLinks = response.links?.filter((link: string) => link.includes("roastmywebsite.ai"));
|
||||
expect(filteredLinks?.length).toBeGreaterThan(0);
|
||||
}, 30000); // 30 seconds timeout
|
||||
|
||||
test('should throw NotImplementedError for search on v1', async () => {
|
||||
const app = new FirecrawlApp({ apiUrl: API_URL, apiKey: TEST_API_KEY });
|
||||
await expect(app.search("test query")).rejects.toThrow("Search is not supported in v1");
|
||||
});
|
||||
});
|
||||
|
|
|
@ -144,10 +144,9 @@ export interface ScrapeParamsV0 {
|
|||
* Response interface for scraping operations.
|
||||
* Defines the structure of the response received after a scraping operation.
|
||||
*/
|
||||
export interface ScrapeResponse {
|
||||
export interface ScrapeResponse extends FirecrawlDocument {
|
||||
success: boolean;
|
||||
warning?: string;
|
||||
data?: FirecrawlDocument;
|
||||
error?: string;
|
||||
}
|
||||
|
||||
|
@ -375,7 +374,12 @@ export default class FirecrawlApp {
|
|||
if (this.version == 'v0') {
|
||||
return responseData as ScrapeResponseV0;
|
||||
} else {
|
||||
return responseData as ScrapeResponse;
|
||||
return {
|
||||
success: true,
|
||||
warning: responseData.warning,
|
||||
error: responseData.error,
|
||||
...responseData.data
|
||||
} as ScrapeResponse;
|
||||
}
|
||||
} else {
|
||||
throw new Error(`Failed to scrape URL. Error: ${responseData.error}`);
|
||||
|
|
|
@ -4,6 +4,7 @@ import time
|
|||
import os
|
||||
from uuid import uuid4
|
||||
from dotenv import load_dotenv
|
||||
from datetime import datetime
|
||||
|
||||
load_dotenv()
|
||||
|
||||
|
@ -27,42 +28,92 @@ def test_scrape_url_invalid_api_key():
|
|||
invalid_app = FirecrawlApp(api_url=API_URL, api_key="invalid_api_key")
|
||||
with pytest.raises(Exception) as excinfo:
|
||||
invalid_app.scrape_url('https://firecrawl.dev')
|
||||
assert "Unexpected error during scrape URL: Status code 401. Unauthorized: Invalid token" in str(excinfo.value)
|
||||
assert "Unauthorized: Invalid token" in str(excinfo.value)
|
||||
|
||||
def test_blocklisted_url():
|
||||
blocklisted_url = "https://facebook.com/fake-test"
|
||||
app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY)
|
||||
with pytest.raises(Exception) as excinfo:
|
||||
app.scrape_url(blocklisted_url)
|
||||
assert "Unexpected error during scrape URL: Status code 403. Firecrawl currently does not support social media scraping due to policy restrictions. We're actively working on building support for it." in str(excinfo.value)
|
||||
assert "URL is blocked. Firecrawl currently does not support social media scraping due to policy restrictions." in str(excinfo.value)
|
||||
|
||||
def test_successful_response_with_valid_preview_token():
|
||||
app = FirecrawlApp(api_url=API_URL, api_key="this_is_just_a_preview_token")
|
||||
response = app.scrape_url('https://roastmywebsite.ai')
|
||||
assert response is not None
|
||||
assert 'content' in response
|
||||
assert "_Roast_" in response['content']
|
||||
assert "_Roast_" in response['markdown']
|
||||
assert "content" not in response
|
||||
assert "html" not in response
|
||||
assert "metadata" in response
|
||||
assert "links" not in response
|
||||
assert "rawHtml" not in response
|
||||
|
||||
def test_scrape_url_e2e():
|
||||
def test_successful_response_for_valid_scrape():
|
||||
app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY)
|
||||
response = app.scrape_url('https://roastmywebsite.ai')
|
||||
assert response is not None
|
||||
assert 'content' not in response
|
||||
assert 'markdown' in response
|
||||
assert 'metadata' in response
|
||||
assert 'html' not in response
|
||||
assert "_Roast_" in response['markdown']
|
||||
assert 'metadata' in response
|
||||
assert 'content' not in response
|
||||
assert 'html' not in response
|
||||
assert 'rawHtml' not in response
|
||||
assert 'screenshot' not in response
|
||||
assert 'links' not in response
|
||||
|
||||
def test_successful_response_with_valid_api_key_and_include_html():
|
||||
def test_successful_response_with_valid_api_key_and_options():
|
||||
app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY)
|
||||
response = app.scrape_url('https://roastmywebsite.ai', { 'formats': [ 'markdown', 'html' ]})
|
||||
params = {
|
||||
'formats': ['markdown', 'html', 'rawHtml', 'screenshot', 'links'],
|
||||
'headers': {'x-key': 'test'},
|
||||
'includeTags': ['h1'],
|
||||
'excludeTags': ['h2'],
|
||||
'onlyMainContent': True,
|
||||
'timeout': 30000,
|
||||
'waitFor': 1000
|
||||
}
|
||||
response = app.scrape_url('https://roastmywebsite.ai', params)
|
||||
assert response is not None
|
||||
assert 'content' not in response
|
||||
assert 'markdown' in response
|
||||
assert 'html' in response
|
||||
assert 'metadata' in response
|
||||
assert 'rawHtml' in response
|
||||
assert 'screenshot' in response
|
||||
assert 'links' in response
|
||||
assert "_Roast_" in response['markdown']
|
||||
assert "<h1" in response['html']
|
||||
assert "<h1" in response['rawHtml']
|
||||
assert "https://" in response['screenshot']
|
||||
assert len(response['links']) > 0
|
||||
assert "https://" in response['links'][0]
|
||||
assert 'metadata' in response
|
||||
assert 'title' in response['metadata']
|
||||
assert 'description' in response['metadata']
|
||||
assert 'keywords' in response['metadata']
|
||||
assert 'robots' in response['metadata']
|
||||
assert 'ogTitle' in response['metadata']
|
||||
assert 'ogDescription' in response['metadata']
|
||||
assert 'ogUrl' in response['metadata']
|
||||
assert 'ogImage' in response['metadata']
|
||||
assert 'ogLocaleAlternate' in response['metadata']
|
||||
assert 'ogSiteName' in response['metadata']
|
||||
assert 'sourceURL' in response['metadata']
|
||||
assert 'statusCode' in response['metadata']
|
||||
assert 'pageStatusCode' not in response['metadata']
|
||||
assert 'pageError' not in response['metadata']
|
||||
assert 'error' not in response['metadata']
|
||||
assert response['metadata']['title'] == "Roast My Website"
|
||||
assert response['metadata']['description'] == "Welcome to Roast My Website, the ultimate tool for putting your website through the wringer! This repository harnesses the power of Firecrawl to scrape and capture screenshots of websites, and then unleashes the latest LLM vision models to mercilessly roast them. 🌶️"
|
||||
assert response['metadata']['keywords'] == "Roast My Website,Roast,Website,GitHub,Firecrawl"
|
||||
assert response['metadata']['robots'] == "follow, index"
|
||||
assert response['metadata']['ogTitle'] == "Roast My Website"
|
||||
assert response['metadata']['ogDescription'] == "Welcome to Roast My Website, the ultimate tool for putting your website through the wringer! This repository harnesses the power of Firecrawl to scrape and capture screenshots of websites, and then unleashes the latest LLM vision models to mercilessly roast them. 🌶️"
|
||||
assert response['metadata']['ogUrl'] == "https://www.roastmywebsite.ai"
|
||||
assert response['metadata']['ogImage'] == "https://www.roastmywebsite.ai/og.png"
|
||||
assert response['metadata']['ogLocaleAlternate'] == []
|
||||
assert response['metadata']['ogSiteName'] == "Roast My Website"
|
||||
assert response['metadata']['sourceURL'] == "https://roastmywebsite.ai"
|
||||
assert response['metadata']['statusCode'] == 200
|
||||
|
||||
def test_successful_response_for_valid_scrape_with_pdf_file():
|
||||
app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY)
|
||||
|
@ -70,65 +121,202 @@ def test_successful_response_for_valid_scrape_with_pdf_file():
|
|||
assert response is not None
|
||||
assert 'content' not in response
|
||||
assert 'metadata' in response
|
||||
assert 'We present spectrophotometric observations of the Broad Line Radio Galaxy' in response['content']
|
||||
assert 'We present spectrophotometric observations of the Broad Line Radio Galaxy' in response['markdown']
|
||||
|
||||
def test_successful_response_for_valid_scrape_with_pdf_file_without_explicit_extension():
|
||||
app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY)
|
||||
response = app.scrape_url('https://arxiv.org/pdf/astro-ph/9301001')
|
||||
time.sleep(6) # wait for 6 seconds
|
||||
time.sleep(1) # wait for 1 second
|
||||
assert response is not None
|
||||
assert 'content' not in response
|
||||
assert 'metadata' in response
|
||||
assert 'We present spectrophotometric observations of the Broad Line Radio Galaxy' in response['content']
|
||||
assert 'We present spectrophotometric observations of the Broad Line Radio Galaxy' in response['markdown']
|
||||
|
||||
def test_crawl_url_invalid_api_key():
|
||||
invalid_app = FirecrawlApp(api_url=API_URL, api_key="invalid_api_key")
|
||||
with pytest.raises(Exception) as excinfo:
|
||||
invalid_app.crawl_url('https://firecrawl.dev')
|
||||
assert "Unexpected error during start crawl job: Status code 401. Unauthorized: Invalid token" in str(excinfo.value)
|
||||
assert "Unauthorized: Invalid token" in str(excinfo.value)
|
||||
|
||||
def test_should_return_error_for_blocklisted_url():
|
||||
app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY)
|
||||
blocklisted_url = "https://twitter.com/fake-test"
|
||||
with pytest.raises(Exception) as excinfo:
|
||||
app.crawl_url(blocklisted_url)
|
||||
assert "Unexpected error during start crawl job: Status code 403. Firecrawl currently does not support social media scraping due to policy restrictions. We're actively working on building support for it." in str(excinfo.value)
|
||||
assert "URL is blocked. Firecrawl currently does not support social media scraping due to policy restrictions." in str(excinfo.value)
|
||||
|
||||
def test_crawl_url_wait_for_completion_e2e():
|
||||
app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY)
|
||||
response = app.crawl_url('https://roastmywebsite.ai', {'crawlerOptions': {'excludes': ['blog/*']}}, True)
|
||||
response = app.crawl_url('https://roastmywebsite.ai', {'excludePaths': ['blog/*']}, True, 30)
|
||||
assert response is not None
|
||||
assert len(response) > 0
|
||||
assert 'content' not in response[0]
|
||||
assert 'markdown' in response[0]
|
||||
assert "_Roast_" in response[0]['markdown']
|
||||
assert 'totalCount' in response
|
||||
assert response['totalCount'] > 0
|
||||
assert 'creditsUsed' in response
|
||||
assert response['creditsUsed'] > 0
|
||||
assert 'expiresAt' in response
|
||||
assert datetime.strptime(response['expiresAt'], '%Y-%m-%dT%H:%M:%S.%fZ') > datetime.now()
|
||||
assert 'status' in response
|
||||
assert response['status'] == 'completed'
|
||||
assert 'next' not in response
|
||||
assert len(response['data']) > 0
|
||||
assert 'markdown' in response['data'][0]
|
||||
assert "_Roast_" in response['data'][0]['markdown']
|
||||
assert 'content' not in response['data'][0]
|
||||
assert 'html' not in response['data'][0]
|
||||
assert 'rawHtml' not in response['data'][0]
|
||||
assert 'screenshot' not in response['data'][0]
|
||||
assert 'links' not in response['data'][0]
|
||||
assert 'metadata' in response['data'][0]
|
||||
assert 'title' in response['data'][0]['metadata']
|
||||
assert 'description' in response['data'][0]['metadata']
|
||||
assert 'language' in response['data'][0]['metadata']
|
||||
assert 'sourceURL' in response['data'][0]['metadata']
|
||||
assert 'statusCode' in response['data'][0]['metadata']
|
||||
assert 'error' not in response['data'][0]['metadata']
|
||||
|
||||
def test_crawl_url_with_options_and_wait_for_completion():
|
||||
app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY)
|
||||
response = app.crawl_url('https://roastmywebsite.ai', {
|
||||
'excludePaths': ['blog/*'],
|
||||
'includePaths': ['/'],
|
||||
'maxDepth': 2,
|
||||
'ignoreSitemap': True,
|
||||
'limit': 10,
|
||||
'allowBackwardLinks': True,
|
||||
'allowExternalLinks': True,
|
||||
'scrapeOptions': {
|
||||
'formats': ['markdown', 'html', 'rawHtml', 'screenshot', 'links'],
|
||||
'headers': {"x-key": "test"},
|
||||
'includeTags': ['h1'],
|
||||
'excludeTags': ['h2'],
|
||||
'onlyMainContent': True,
|
||||
'waitFor': 1000
|
||||
}
|
||||
}, True, 30)
|
||||
assert response is not None
|
||||
assert 'totalCount' in response
|
||||
assert response['totalCount'] > 0
|
||||
assert 'creditsUsed' in response
|
||||
assert response['creditsUsed'] > 0
|
||||
assert 'expiresAt' in response
|
||||
assert datetime.strptime(response['expiresAt'], '%Y-%m-%dT%H:%M:%S.%fZ') > datetime.now()
|
||||
assert 'status' in response
|
||||
assert response['status'] == 'completed'
|
||||
assert 'next' not in response
|
||||
assert len(response['data']) > 0
|
||||
assert 'markdown' in response['data'][0]
|
||||
assert "_Roast_" in response['data'][0]['markdown']
|
||||
assert 'content' not in response['data'][0]
|
||||
assert 'html' in response['data'][0]
|
||||
assert "<h1" in response['data'][0]['html']
|
||||
assert 'rawHtml' in response['data'][0]
|
||||
assert "<h1" in response['data'][0]['rawHtml']
|
||||
assert 'screenshot' in response['data'][0]
|
||||
assert "https://" in response['data'][0]['screenshot']
|
||||
assert 'links' in response['data'][0]
|
||||
assert len(response['data'][0]['links']) > 0
|
||||
assert 'metadata' in response['data'][0]
|
||||
assert 'title' in response['data'][0]['metadata']
|
||||
assert 'description' in response['data'][0]['metadata']
|
||||
assert 'language' in response['data'][0]['metadata']
|
||||
assert 'sourceURL' in response['data'][0]['metadata']
|
||||
assert 'statusCode' in response['data'][0]['metadata']
|
||||
assert 'error' not in response['data'][0]['metadata']
|
||||
|
||||
def test_crawl_url_with_idempotency_key_e2e():
|
||||
app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY)
|
||||
uniqueIdempotencyKey = str(uuid4())
|
||||
response = app.crawl_url('https://roastmywebsite.ai', {'crawlerOptions': {'excludes': ['blog/*']}}, True, 2, uniqueIdempotencyKey)
|
||||
response = app.crawl_url('https://roastmywebsite.ai', {'excludePaths': ['blog/*']}, False, 2, uniqueIdempotencyKey)
|
||||
assert response is not None
|
||||
assert len(response) > 0
|
||||
assert 'content' in response[0]
|
||||
assert "_Roast_" in response[0]['content']
|
||||
assert 'id' in response
|
||||
|
||||
with pytest.raises(Exception) as excinfo:
|
||||
app.crawl_url('https://firecrawl.dev', {'crawlerOptions': {'excludes': ['blog/*']}}, True, 2, uniqueIdempotencyKey)
|
||||
assert "Conflict: Failed to start crawl job due to a conflict. Idempotency key already used" in str(excinfo.value)
|
||||
app.crawl_url('https://firecrawl.dev', {'excludePaths': ['blog/*']}, True, 2, uniqueIdempotencyKey)
|
||||
assert "Idempotency key already used" in str(excinfo.value)
|
||||
|
||||
def test_check_crawl_status_e2e():
|
||||
app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY)
|
||||
response = app.crawl_url('https://firecrawl.dev', {'crawlerOptions': {'excludes': ['blog/*']}}, False)
|
||||
response = app.crawl_url('https://firecrawl.dev', {'scrapeOptions': {'formats': ['markdown', 'html', 'rawHtml', 'screenshot', 'links']}}, False)
|
||||
assert response is not None
|
||||
assert 'jobId' in response
|
||||
assert 'id' in response
|
||||
|
||||
time.sleep(30) # wait for 30 seconds
|
||||
status_response = app.check_crawl_status(response['jobId'])
|
||||
max_checks = 15
|
||||
checks = 0
|
||||
status_response = app.check_crawl_status(response['id'])
|
||||
|
||||
while status_response['status'] == 'scraping' and checks < max_checks:
|
||||
time.sleep(1) # wait for 1 second
|
||||
assert 'partial_data' not in status_response
|
||||
assert 'current' not in status_response
|
||||
assert 'data' in status_response
|
||||
assert 'totalCount' in status_response
|
||||
assert 'creditsUsed' in status_response
|
||||
assert 'expiresAt' in status_response
|
||||
assert 'status' in status_response
|
||||
assert 'next' in status_response
|
||||
assert status_response['totalCount'] > 0
|
||||
assert status_response['creditsUsed'] > 0
|
||||
assert datetime.strptime(status_response['expiresAt'], '%Y-%m-%dT%H:%M:%S.%fZ') > datetime.now()
|
||||
assert status_response['status'] == 'scraping'
|
||||
assert '/v1/crawl/' in status_response['next']
|
||||
status_response = app.check_crawl_status(response['id'])
|
||||
checks += 1
|
||||
|
||||
assert status_response is not None
|
||||
assert 'totalCount' in status_response
|
||||
assert status_response['totalCount'] > 0
|
||||
assert 'creditsUsed' in status_response
|
||||
assert status_response['creditsUsed'] > 0
|
||||
assert 'expiresAt' in status_response
|
||||
assert datetime.strptime(status_response['expiresAt'], '%Y-%m-%dT%H:%M:%S.%fZ') > datetime.now()
|
||||
assert 'status' in status_response
|
||||
assert status_response['status'] == 'completed'
|
||||
assert 'data' in status_response
|
||||
assert len(status_response['data']) > 0
|
||||
assert 'markdown' in status_response['data'][0]
|
||||
assert len(status_response['data'][0]['markdown']) > 10
|
||||
assert 'content' not in status_response['data'][0]
|
||||
assert 'html' in status_response['data'][0]
|
||||
assert "<div" in status_response['data'][0]['html']
|
||||
assert 'rawHtml' in status_response['data'][0]
|
||||
assert "<div" in status_response['data'][0]['rawHtml']
|
||||
assert 'screenshot' in status_response['data'][0]
|
||||
assert "https://" in status_response['data'][0]['screenshot']
|
||||
assert 'links' in status_response['data'][0]
|
||||
assert status_response['data'][0]['links'] is not None
|
||||
assert len(status_response['data'][0]['links']) > 0
|
||||
assert 'metadata' in status_response['data'][0]
|
||||
assert 'title' in status_response['data'][0]['metadata']
|
||||
assert 'description' in status_response['data'][0]['metadata']
|
||||
assert 'language' in status_response['data'][0]['metadata']
|
||||
assert 'sourceURL' in status_response['data'][0]['metadata']
|
||||
assert 'statusCode' in status_response['data'][0]['metadata']
|
||||
assert 'error' not in status_response['data'][0]['metadata']
|
||||
|
||||
def test_invalid_api_key_on_map():
|
||||
invalid_app = FirecrawlApp(api_key="invalid_api_key", api_url=API_URL)
|
||||
with pytest.raises(Exception) as excinfo:
|
||||
invalid_app.map_url('https://roastmywebsite.ai')
|
||||
assert "Unauthorized: Invalid token" in str(excinfo.value)
|
||||
|
||||
def test_blocklisted_url_on_map():
|
||||
app = FirecrawlApp(api_key=TEST_API_KEY, api_url=API_URL)
|
||||
blocklisted_url = "https://facebook.com/fake-test"
|
||||
with pytest.raises(Exception) as excinfo:
|
||||
app.map_url(blocklisted_url)
|
||||
assert "URL is blocked. Firecrawl currently does not support social media scraping due to policy restrictions." in str(excinfo.value)
|
||||
|
||||
def test_successful_response_with_valid_preview_token_on_map():
|
||||
app = FirecrawlApp(api_key="this_is_just_a_preview_token", api_url=API_URL)
|
||||
response = app.map_url('https://roastmywebsite.ai')
|
||||
assert response is not None
|
||||
assert len(response) > 0
|
||||
|
||||
def test_successful_response_for_valid_map():
|
||||
app = FirecrawlApp(api_key=TEST_API_KEY, api_url=API_URL)
|
||||
response = app.map_url('https://roastmywebsite.ai')
|
||||
assert response is not None
|
||||
assert len(response) > 0
|
||||
assert any("https://" in link for link in response)
|
||||
filtered_links = [link for link in response if "roastmywebsite.ai" in link]
|
||||
assert len(filtered_links) > 0
|
||||
|
||||
def test_search_e2e():
|
||||
app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY)
|
||||
|
@ -136,33 +324,29 @@ def test_search_e2e():
|
|||
app.search("test query")
|
||||
assert "Search is not supported in v1" in str(excinfo.value)
|
||||
|
||||
def test_llm_extraction():
|
||||
app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY)
|
||||
response = app.scrape_url("https://mendable.ai", {
|
||||
'extractorOptions': {
|
||||
'mode': 'llm-extraction',
|
||||
'extractionPrompt': "Based on the information on the page, find what the company's mission is and whether it supports SSO, and whether it is open source",
|
||||
'extractionSchema': {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'company_mission': {'type': 'string'},
|
||||
'supports_sso': {'type': 'boolean'},
|
||||
'is_open_source': {'type': 'boolean'}
|
||||
},
|
||||
'required': ['company_mission', 'supports_sso', 'is_open_source']
|
||||
}
|
||||
}
|
||||
})
|
||||
assert response is not None
|
||||
assert 'llm_extraction' in response
|
||||
llm_extraction = response['llm_extraction']
|
||||
assert 'company_mission' in llm_extraction
|
||||
assert isinstance(llm_extraction['supports_sso'], bool)
|
||||
assert isinstance(llm_extraction['is_open_source'], bool)
|
||||
# def test_llm_extraction():
|
||||
# app = FirecrawlApp(api_url=API_URL, api_key=TEST_API_KEY)
|
||||
# response = app.scrape_url("https://mendable.ai", {
|
||||
# 'extractorOptions': {
|
||||
# 'mode': 'llm-extraction',
|
||||
# 'extractionPrompt': "Based on the information on the page, find what the company's mission is and whether it supports SSO, and whether it is open source",
|
||||
# 'extractionSchema': {
|
||||
# 'type': 'object',
|
||||
# 'properties': {
|
||||
# 'company_mission': {'type': 'string'},
|
||||
# 'supports_sso': {'type': 'boolean'},
|
||||
# 'is_open_source': {'type': 'boolean'}
|
||||
# },
|
||||
# 'required': ['company_mission', 'supports_sso', 'is_open_source']
|
||||
# }
|
||||
# }
|
||||
# })
|
||||
# assert response is not None
|
||||
# assert 'llm_extraction' in response
|
||||
# llm_extraction = response['llm_extraction']
|
||||
# assert 'company_mission' in llm_extraction
|
||||
# assert isinstance(llm_extraction['supports_sso'], bool)
|
||||
# assert isinstance(llm_extraction['is_open_source'], bool)
|
||||
|
||||
|
||||
def test_map_e2e():
|
||||
app = FirecrawlApp(api_url=API_URL, api_key="this_is_just_a_preview_token")
|
||||
response = app.map_url('https://roastmywebsite.ai')
|
||||
assert response is not None
|
||||
assert isinstance(response, list)
|
||||
|
|
@ -244,8 +244,9 @@ class FirecrawlApp:
|
|||
)
|
||||
if response.status_code == 200:
|
||||
response = response.json()
|
||||
if response['success'] and 'data' in response:
|
||||
return response['data']
|
||||
print(response)
|
||||
if response['success'] and 'links' in response:
|
||||
return response['links']
|
||||
else:
|
||||
raise Exception(f'Failed to map URL. Error: {response["error"]}')
|
||||
else:
|
||||
|
@ -387,18 +388,19 @@ class FirecrawlApp:
|
|||
Raises:
|
||||
Exception: An exception with a message containing the status code and error details from the response.
|
||||
"""
|
||||
error_message = response.json().get('error', 'No additional error details provided.')
|
||||
error_message = response.json().get('error', 'No error message provided.')
|
||||
error_details = response.json().get('details', 'No additional error details provided.')
|
||||
|
||||
if response.status_code == 402:
|
||||
message = f"Payment Required: Failed to {action}. {error_message}"
|
||||
message = f"Payment Required: Failed to {action}. {error_message} - {error_details}"
|
||||
elif response.status_code == 408:
|
||||
message = f"Request Timeout: Failed to {action} as the request timed out. {error_message}"
|
||||
message = f"Request Timeout: Failed to {action} as the request timed out. {error_message} - {error_details}"
|
||||
elif response.status_code == 409:
|
||||
message = f"Conflict: Failed to {action} due to a conflict. {error_message}"
|
||||
message = f"Conflict: Failed to {action} due to a conflict. {error_message} - {error_details}"
|
||||
elif response.status_code == 500:
|
||||
message = f"Internal Server Error: Failed to {action}. {error_message}"
|
||||
message = f"Internal Server Error: Failed to {action}. {error_message} - {error_details}"
|
||||
else:
|
||||
message = f"Unexpected error during {action}: Status code {response.status_code}. {error_message}"
|
||||
message = f"Unexpected error during {action}: Status code {response.status_code}. {error_message} - {error_details}"
|
||||
|
||||
# Raise an HTTPError with the custom message and attach the response
|
||||
raise requests.exceptions.HTTPError(message, response=response)
|
||||
|
|
Loading…
Reference in New Issue
Block a user