Merge branch 'main' into nsc/cancel-job

This commit is contained in:
Nicolas 2024-05-07 10:13:43 -07:00
commit bdbee963f7
10 changed files with 97 additions and 35 deletions

View File

@ -79,8 +79,26 @@ describe("E2E Tests for API Routes", () => {
expect(response.body.data).toHaveProperty("content");
expect(response.body.data).toHaveProperty("markdown");
expect(response.body.data).toHaveProperty("metadata");
expect(response.body.data).not.toHaveProperty("html");
expect(response.body.data.content).toContain("🔥 FireCrawl");
}, 30000); // 30 seconds timeout
it("should return a successful response with a valid API key and includeHtml set to true", async () => {
const response = await request(TEST_URL)
.post("/v0/scrape")
.set("Authorization", `Bearer ${process.env.TEST_API_KEY}`)
.set("Content-Type", "application/json")
.send({ url: "https://firecrawl.dev", pageOptions: { includeHtml: true }});
expect(response.statusCode).toBe(200);
expect(response.body).toHaveProperty("data");
expect(response.body.data).toHaveProperty("content");
expect(response.body.data).toHaveProperty("markdown");
expect(response.body.data).toHaveProperty("html");
expect(response.body.data).toHaveProperty("metadata");
expect(response.body.data.content).toContain("🔥 FireCrawl");
expect(response.body.data.markdown).toContain("🔥 FireCrawl");
expect(response.body.data.html).toContain("<h1");
}, 30000); // 30 seconds timeout
});
describe("POST /v0/crawl", () => {
@ -143,16 +161,17 @@ describe("E2E Tests for API Routes", () => {
expect(response.statusCode).toBe(401);
});
it("should return an error for a blocklisted URL", async () => {
const blocklistedUrl = "https://instagram.com/fake-test";
const response = await request(TEST_URL)
.post("/v0/crawlWebsitePreview")
.set("Authorization", `Bearer ${process.env.TEST_API_KEY}`)
.set("Content-Type", "application/json")
.send({ url: blocklistedUrl });
expect(response.statusCode).toBe(403);
expect(response.body.error).toContain("Firecrawl currently does not support social media scraping due to policy restrictions. We're actively working on building support for it.");
});
// it("should return an error for a blocklisted URL", async () => {
// const blocklistedUrl = "https://instagram.com/fake-test";
// const response = await request(TEST_URL)
// .post("/v0/crawlWebsitePreview")
// .set("Authorization", `Bearer ${process.env.TEST_API_KEY}`)
// .set("Content-Type", "application/json")
// .send({ url: blocklistedUrl });
// // is returning 429 instead of 403
// expect(response.statusCode).toBe(403);
// expect(response.body.error).toContain("Firecrawl currently does not support social media scraping due to policy restrictions. We're actively working on building support for it.");
// });
it("should return a successful response with a valid API key", async () => {
const response = await request(TEST_URL)
@ -250,6 +269,46 @@ describe("E2E Tests for API Routes", () => {
"🔥 FireCrawl"
);
}, 60000); // 60 seconds
it("should return a successful response for a valid crawl job with includeHtml set to true option", async () => {
const crawlResponse = await request(TEST_URL)
.post("/v0/crawl")
.set("Authorization", `Bearer ${process.env.TEST_API_KEY}`)
.set("Content-Type", "application/json")
.send({ url: "https://firecrawl.dev", pageOptions: { includeHtml: true } });
expect(crawlResponse.statusCode).toBe(200);
const response = await request(TEST_URL)
.get(`/v0/crawl/status/${crawlResponse.body.jobId}`)
.set("Authorization", `Bearer ${process.env.TEST_API_KEY}`);
expect(response.statusCode).toBe(200);
expect(response.body).toHaveProperty("status");
expect(response.body.status).toBe("active");
// wait for 30 seconds
await new Promise((r) => setTimeout(r, 30000));
const completedResponse = await request(TEST_URL)
.get(`/v0/crawl/status/${crawlResponse.body.jobId}`)
.set("Authorization", `Bearer ${process.env.TEST_API_KEY}`);
expect(completedResponse.statusCode).toBe(200);
expect(completedResponse.body).toHaveProperty("status");
expect(completedResponse.body.status).toBe("completed");
expect(completedResponse.body).toHaveProperty("data");
expect(completedResponse.body.data[0]).toHaveProperty("content");
expect(completedResponse.body.data[0]).toHaveProperty("markdown");
expect(completedResponse.body.data[0]).toHaveProperty("html");
expect(completedResponse.body.data[0]).toHaveProperty("metadata");
expect(completedResponse.body.data[0].content).toContain(
"🔥 FireCrawl"
);
expect(completedResponse.body.data[0].markdown).toContain(
"FireCrawl"
);
expect(completedResponse.body.data[0].html).toContain(
"<h1"
);
}, 60000); // 60 seconds
});
it("If someone cancels a crawl job, it should turn into failed status", async () => {

View File

@ -41,7 +41,7 @@ export async function crawlController(req: Request, res: Response) {
const mode = req.body.mode ?? "crawl";
const crawlerOptions = req.body.crawlerOptions ?? {};
const pageOptions = req.body.pageOptions ?? { onlyMainContent: false };
const pageOptions = req.body.pageOptions ?? { onlyMainContent: false, includeHtml: false };
if (mode === "single_urls" && !url.includes(",")) {
try {

View File

@ -26,7 +26,7 @@ export async function crawlPreviewController(req: Request, res: Response) {
const mode = req.body.mode ?? "crawl";
const crawlerOptions = req.body.crawlerOptions ?? {};
const pageOptions = req.body.pageOptions ?? { onlyMainContent: false };
const pageOptions = req.body.pageOptions ?? { onlyMainContent: false, includeHtml: false };
const job = await addWebScraperJob({
url: url,

View File

@ -1,4 +1,4 @@
import { ExtractorOptions } from './../lib/entities';
import { ExtractorOptions, PageOptions } from './../lib/entities';
import { Request, Response } from "express";
import { WebScraperDataProvider } from "../scraper/WebScraper";
import { billTeam, checkTeamCredits } from "../services/billing/credit_billing";
@ -13,8 +13,8 @@ export async function scrapeHelper(
req: Request,
team_id: string,
crawlerOptions: any,
pageOptions: any,
extractorOptions: ExtractorOptions
pageOptions: PageOptions,
extractorOptions: ExtractorOptions,
): Promise<{
success: boolean;
error?: string;
@ -39,7 +39,7 @@ export async function scrapeHelper(
...crawlerOptions,
},
pageOptions: pageOptions,
extractorOptions: extractorOptions
extractorOptions: extractorOptions,
});
const docs = await a.getDocuments(false);
@ -91,7 +91,7 @@ export async function scrapeController(req: Request, res: Response) {
return res.status(status).json({ error });
}
const crawlerOptions = req.body.crawlerOptions ?? {};
const pageOptions = req.body.pageOptions ?? { onlyMainContent: false };
const pageOptions = req.body.pageOptions ?? { onlyMainContent: false, includeHtml: false };
const extractorOptions = req.body.extractorOptions ?? {
mode: "markdown"
}
@ -113,7 +113,7 @@ export async function scrapeController(req: Request, res: Response) {
team_id,
crawlerOptions,
pageOptions,
extractorOptions
extractorOptions,
);
const endTime = new Date().getTime();
const timeTakenInSeconds = (endTime - startTime) / 1000;
@ -132,7 +132,7 @@ export async function scrapeController(req: Request, res: Response) {
pageOptions: pageOptions,
origin: origin,
extractor_options: extractorOptions,
num_tokens: numTokens
num_tokens: numTokens,
});
return res.status(result.returnCode).json(result);
} catch (error) {

View File

@ -13,7 +13,7 @@ export async function searchHelper(
team_id: string,
crawlerOptions: any,
pageOptions: PageOptions,
searchOptions: SearchOptions
searchOptions: SearchOptions,
): Promise<{
success: boolean;
error?: string;
@ -66,6 +66,7 @@ export async function searchHelper(
...pageOptions,
onlyMainContent: pageOptions?.onlyMainContent ?? true,
fetchPageContent: pageOptions?.fetchPageContent ?? true,
includeHtml: pageOptions?.includeHtml ?? false,
fallback: false,
},
});
@ -117,6 +118,7 @@ export async function searchController(req: Request, res: Response) {
}
const crawlerOptions = req.body.crawlerOptions ?? {};
const pageOptions = req.body.pageOptions ?? {
includeHtml: false,
onlyMainContent: true,
fetchPageContent: true,
fallback: false,
@ -141,7 +143,7 @@ export async function searchController(req: Request, res: Response) {
team_id,
crawlerOptions,
pageOptions,
searchOptions
searchOptions,
);
const endTime = new Date().getTime();
const timeTakenInSeconds = (endTime - startTime) / 1000;

View File

@ -12,9 +12,9 @@ export interface Progress {
export type PageOptions = {
onlyMainContent?: boolean;
includeHtml?: boolean;
fallback?: boolean;
fetchPageContent?: boolean;
};
export type ExtractorOptions = {

View File

@ -27,7 +27,7 @@ export async function startWebScraperPipeline({
job.moveToFailed(error);
},
team_id: job.data.team_id,
bull_job_id: job.id.toString(),
bull_job_id: job.id.toString()
})) as { success: boolean; message: string; docs: Document[] };
}
export async function runWebScraper({
@ -63,14 +63,14 @@ export async function runWebScraper({
urls: [url],
crawlerOptions: crawlerOptions,
pageOptions: pageOptions,
bullJobId: bull_job_id,
bullJobId: bull_job_id
});
} else {
await provider.setOptions({
mode: mode,
urls: url.split(","),
crawlerOptions: crawlerOptions,
pageOptions: pageOptions,
pageOptions: pageOptions
});
}
const docs = (await provider.getDocuments(false, (progress: Progress) => {

View File

@ -55,7 +55,7 @@ export class WebScraperDataProvider {
const batchUrls = urls.slice(i, i + this.concurrentRequests);
await Promise.all(
batchUrls.map(async (url, index) => {
const result = await scrapSingleUrl(url, true, this.pageOptions);
const result = await scrapSingleUrl(url, this.pageOptions);
processedUrls++;
if (inProgress) {
inProgress({
@ -177,6 +177,7 @@ export class WebScraperDataProvider {
});
return links.map((url) => ({
content: "",
html: this.pageOptions?.includeHtml ? "" : undefined,
markdown: "",
metadata: { sourceURL: url },
}));
@ -387,11 +388,9 @@ export class WebScraperDataProvider {
this.limit = options.crawlerOptions?.limit ?? 10000;
this.generateImgAltText =
options.crawlerOptions?.generateImgAltText ?? false;
this.pageOptions = options.pageOptions ?? { onlyMainContent: false };
this.extractorOptions = options.extractorOptions ?? { mode: "markdown" };
this.replaceAllPathsWithAbsolutePaths =
options.crawlerOptions?.replaceAllPathsWithAbsolutePaths ?? false;
this.pageOptions = options.pageOptions ?? { onlyMainContent: false, includeHtml: false };
this.extractorOptions = options.extractorOptions ?? {mode: "markdown"}
this.replaceAllPathsWithAbsolutePaths = options.crawlerOptions?.replaceAllPathsWithAbsolutePaths ?? false;
//! @nicolas, for some reason this was being injected and breakign everything. Don't have time to find source of the issue so adding this check
this.excludes = this.excludes.filter((item) => item !== "");

View File

@ -103,8 +103,7 @@ export async function scrapWithPlaywright(url: string): Promise<string> {
export async function scrapSingleUrl(
urlToScrap: string,
toMarkdown: boolean = true,
pageOptions: PageOptions = { onlyMainContent: true }
pageOptions: PageOptions = { onlyMainContent: true, includeHtml: false },
): Promise<Document> {
urlToScrap = urlToScrap.trim();
@ -193,6 +192,7 @@ export async function scrapSingleUrl(
url: urlToScrap,
content: text,
markdown: text,
html: pageOptions.includeHtml ? html : undefined,
metadata: { ...metadata, sourceURL: urlToScrap },
} as Document;
}
@ -216,6 +216,7 @@ export async function scrapSingleUrl(
return {
content: text,
markdown: text,
html: pageOptions.includeHtml ? html : undefined,
metadata: { ...metadata, sourceURL: urlToScrap },
} as Document;
} catch (error) {
@ -223,6 +224,7 @@ export async function scrapSingleUrl(
return {
content: "",
markdown: "",
html: "",
metadata: { sourceURL: urlToScrap },
} as Document;
}

View File

@ -40,7 +40,7 @@ export interface FirecrawlJob {
pageOptions?: any;
origin: string;
extractor_options?: ExtractorOptions,
num_tokens?: number
num_tokens?: number,
}
export enum RateLimiterMode {