Compare commits

...

14 Commits

Author SHA1 Message Date
Gergő Móricz
3f2914b97f
Merge 9298a05045 into 3a342bfbf0 2024-11-15 22:54:35 +05:30
Móricz Gergő
3a342bfbf0 fix(scrapeURL/playwright): JSON body fix 2024-11-15 15:18:40 +01:00
Nicolas
3c1b1909f8 Update map.ts
Some checks are pending
Deploy Images to GHCR / push-app-image (push) Waiting to run
2024-11-14 17:52:15 -05:00
Nicolas
9519897102 Merge branch 'nsc/sitemap-only' 2024-11-14 17:44:39 -05:00
Nicolas
7f084c6c43 Nick: 2024-11-14 17:44:32 -05:00
Nicolas
e8bd089c8a
Merge pull request #901 from mendableai/nsc/sitemap-only
Allows `/map` to only return links present in the sitemap
2024-11-14 17:32:37 -05:00
Nicolas
3fcdf57d2f Update fireEngine.ts 2024-11-14 17:31:30 -05:00
Nicolas
d62f12c9d9 Nick: moved away from axios 2024-11-14 17:31:23 -05:00
Nicolas
f155449458 Nick: sitemap only 2024-11-14 17:29:53 -05:00
Móricz Gergő
431e64e752 fix(batch/scrape/webhook): add batch_scrape.started 2024-11-14 22:40:03 +01:00
Nicolas
7bca4486b4 Update package.json 2024-11-14 16:37:53 -05:00
Móricz Gergő
df05124ef5 feat(v1/batch/scrape): webhooks 2024-11-14 22:36:28 +01:00
Móricz Gergő
9298a05045 feat: turn into API 2024-11-14 12:24:53 +01:00
Móricz Gergő
faf11acf82 doctor first iteration 2024-11-14 10:12:49 +01:00
18 changed files with 393 additions and 109 deletions

2
apps/api/.gitignore vendored
View File

@ -9,3 +9,5 @@ dump.rdb
.rdb
.sentryclirc
doctor-*.html

View File

@ -47,3 +47,12 @@ content-type: application/json
# @name batchScrapeStatus
GET {{baseUrl}}/v1/crawl/{{batchScrapeId}} HTTP/1.1
Authorization: Bearer {{$dotenv TEST_API_KEY}}
### URL Doctor
# @name urlDoctor
POST {{baseUrl}}/admin/{{$dotenv BULL_AUTH_KEY}}/doctor HTTP/1.1
Content-Type: application/json
{
"url": "https://firecrawl.dev"
}

View File

@ -0,0 +1,104 @@
import { Request, Response } from "express";
import { logger as _logger } from "../../../lib/logger";
import { ScrapeUrlResponse } from "../../../scraper/scrapeURL";
import { getScrapeQueue, redisConnection } from "../../../services/queue-service";
import type { Permutation } from "./doctor";
import { Job } from "bullmq";
const logger = _logger.child({ module: "doctorStatusController" });
const errorReplacer = (_, value) => {
if (value instanceof Error) {
return {
...value,
name: value.name,
message: value.message,
stack: value.stack,
cause: value.cause,
}
} else {
return value;
}
};
type PermutationResult = ({
state: "done",
result: ScrapeUrlResponse & {
success: true
},
} | {
state: "thrownError",
error: string | Error | null | undefined,
} | {
state: "error",
result: ScrapeUrlResponse & {
success: false
},
} | {
state: "pending",
}) & {
permutation: Permutation,
};
export async function doctorStatusController(req: Request, res: Response) {
try {
const doctorId = req.params.id;
const meta: { url: string } | null = JSON.parse(await redisConnection.get("doctor:" + doctorId) ?? "null");
const permutations: Permutation[] | null = JSON.parse(await redisConnection.get("doctor:" + doctorId + ":permutations") ?? "null");
if (permutations === null || meta === null) {
return res.status(404).json({ error: "Doctor entry not found" });
}
const jobs = (await Promise.all(permutations.map(x => getScrapeQueue().getJob(x.jobId)))).filter(x => x) as Job<unknown, ScrapeUrlResponse>[];
const results: PermutationResult[] = await Promise.all(jobs.map(async job => {
const permutation = permutations.find(x => x.jobId === job.id)!;
const state = await job.getState();
if (state === "completed" && job.data) {
if (job.returnvalue.success) {
return {
state: "done",
result: job.returnvalue,
permutation,
}
} else {
return {
state: "error",
result: job.returnvalue,
permutation,
}
}
} else if (state === "failed") {
return {
state: "thrownError",
error: job.failedReason,
permutation,
}
} else {
return {
state: "pending",
permutation,
}
}
}));
const html = "<head><meta charset=\"utf8\"></head><body style=\"font-family: sans-serif; padding: 1rem;\"><h1>Doctor</h1><p>URL: <code>" + meta.url + "</code></p>"
+ results.map(x => "<h2>" + (x.state === "pending" ? "⏳" : x.state === "done" ? "✅" : "❌") + " " + x.permutation.name + "</h2><p>Scrape options: <code>" + JSON.stringify(x.permutation.options) + "</code></p>"
+ "<p>Internal options: <code>" + JSON.stringify(x.permutation.internal) + "</code></p>"
+ (x.state !== "pending" ? ("<code><pre>" + ((x.state === "done"
? JSON.stringify(x.result, errorReplacer, 4)
: x.state === "thrownError"
? (x.error instanceof Error
? (x.error.message + "\n" + (x.error.stack ?? ""))
: (x.error ?? "<unknown error>"))
: (JSON.stringify(x.result, errorReplacer, 4))))
.replaceAll("<", "&lt;").replaceAll(">", "&gt;") + "</pre></code>"): "")).join("")
+ "</body>"
res.header("Content-Type", "text/html").send(html);
} catch (error) {
logger.error("Doctor status error", { error });
res.status(500).json({ error: "Internal server error" });
}
}

View File

@ -0,0 +1,84 @@
import { Request, Response } from "express";
import { logger as _logger } from "../../../lib/logger";
import { ScrapeUrlResponse, InternalOptions } from "../../../scraper/scrapeURL";
import { z } from "zod";
import { scrapeOptions } from "../types";
import { Engine, engineOptions, engines } from "../../../scraper/scrapeURL/engines";
import { addScrapeJob, addScrapeJobs } from "../../../services/queue-jobs";
import { redisConnection } from "../../../services/queue-service";
const logger = _logger.child({ module: "doctorController" });
export type Permutation = {
options: z.input<typeof scrapeOptions>,
internal: InternalOptions,
name: string,
jobId: string,
};
export async function doctorController(req: Request, res: Response) {
try {
const doctorId = crypto.randomUUID();
const permutations: Permutation[] = [
{ options: {}, internal: { verbose: true }, name: "bare", jobId: crypto.randomUUID() },
...Object.entries(engineOptions).filter(([name, options]) => options.quality > 0 && engines.includes(name as Engine)).map(([name, _options]) => ({
options: {}, internal: { forceEngine: name as Engine, verbose: true }, name, jobId: crypto.randomUUID(),
})),
];
await addScrapeJobs(permutations.map(perm => ({
data: {
url: req.body.url,
mode: "single_urls",
team_id: null,
scrapeOptions: scrapeOptions.parse(perm.options),
internalOptions: perm.internal,
plan: null,
origin: "doctor",
is_scrape: true,
doctor: true,
},
opts: {
jobId: perm.jobId,
priority: 10,
},
})));
await redisConnection.set("doctor:" + doctorId, JSON.stringify({ url: req.body.url }), "EX", 86400);
await redisConnection.set("doctor:" + doctorId + ":permutations", JSON.stringify(permutations), "EX", 86400);
const protocol = process.env.ENV === "local" ? req.protocol : "https";
res.json({ ok: true, id: doctorId, url: `${protocol}://${req.get("host")}/admin/${process.env.BULL_AUTH_KEY}/doctor/${doctorId}` });
// await Promise.all(permutations.map(async perm => {
// try {
// const result = await scrapeURL(doctorId + ":bare", url, scrapeOptions.parse(perm.options), perm.internal);
// if (result.success) {
// results.push({
// state: "done",
// result,
// permutation: perm,
// });
// } else {
// results.push({
// state: "error",
// result,
// permutation: perm,
// });
// }
// } catch (error) {
// console.error("Permutation " + perm.name + " failed with error", { error });
// results.push({
// state: "thrownError",
// error,
// permutation: perm,
// });
// }
// }));
} catch (error) {
logger.error("Doctor error", { error });
res.status(500).json({ error: "Internal server error" });
}
}

View File

@ -16,6 +16,7 @@ import { logCrawl } from "../../services/logging/crawl_log";
import { getScrapeQueue } from "../../services/queue-service";
import { getJobPriority } from "../../lib/job-priority";
import { addScrapeJobs } from "../../services/queue-jobs";
import { callWebhook } from "../../services/webhook";
export async function batchScrapeController(
req: RequestWithAuth<{}, CrawlResponse, BatchScrapeRequest>,
@ -66,6 +67,7 @@ export async function batchScrapeController(
crawl_id: id,
sitemapped: true,
v1: true,
webhook: req.body.webhook,
},
opts: {
jobId: uuidv4(),
@ -85,6 +87,10 @@ export async function batchScrapeController(
);
await addScrapeJobs(jobs);
if(req.body.webhook) {
await callWebhook(req.auth.team_id, id, null, req.body.webhook, true, "batch_scrape.started");
}
const protocol = process.env.ENV === "local" ? req.protocol : "https";
return res.status(200).json({

View File

@ -1,10 +1,6 @@
import { Response } from "express";
import { v4 as uuidv4 } from "uuid";
import {
mapRequestSchema,
RequestWithAuth,
scrapeOptions,
} from "./types";
import { mapRequestSchema, RequestWithAuth, scrapeOptions } from "./types";
import { crawlToCrawler, StoredCrawl } from "../../lib/crawl-redis";
import { MapResponse, MapRequest } from "./types";
import { configDotenv } from "dotenv";
@ -46,6 +42,7 @@ export async function mapController(
originUrl: req.body.url,
crawlerOptions: {
...req.body,
limit: req.body.sitemapOnly ? 10000000 : limit,
scrapeOptions: undefined,
},
scrapeOptions: scrapeOptions.parse({}),
@ -57,77 +54,93 @@ export async function mapController(
const crawler = crawlToCrawler(id, sc);
let urlWithoutWww = req.body.url.replace("www.", "");
let mapUrl = req.body.search
? `"${req.body.search}" site:${urlWithoutWww}`
: `site:${req.body.url}`;
const resultsPerPage = 100;
const maxPages = Math.ceil(Math.min(MAX_FIRE_ENGINE_RESULTS, limit) / resultsPerPage);
const cacheKey = `fireEngineMap:${mapUrl}`;
const cachedResult = null;
let allResults: any[] = [];
let pagePromises: Promise<any>[] = [];
if (cachedResult) {
allResults = JSON.parse(cachedResult);
} else {
const fetchPage = async (page: number) => {
return fireEngineMap(mapUrl, {
numResults: resultsPerPage,
page: page,
// If sitemapOnly is true, only get links from sitemap
if (req.body.sitemapOnly) {
const sitemap = await crawler.tryGetSitemap(true, true);
if (sitemap !== null) {
sitemap.forEach((x) => {
links.push(x.url);
});
};
links = links.slice(1, limit);
}
} else {
let urlWithoutWww = req.body.url.replace("www.", "");
pagePromises = Array.from({ length: maxPages }, (_, i) => fetchPage(i + 1));
allResults = await Promise.all(pagePromises);
let mapUrl = req.body.search
? `"${req.body.search}" site:${urlWithoutWww}`
: `site:${req.body.url}`;
await redis.set(cacheKey, JSON.stringify(allResults), "EX", 24 * 60 * 60); // Cache for 24 hours
}
const resultsPerPage = 100;
const maxPages = Math.ceil(
Math.min(MAX_FIRE_ENGINE_RESULTS, limit) / resultsPerPage
);
// Parallelize sitemap fetch with serper search
const [sitemap, ...searchResults] = await Promise.all([
req.body.ignoreSitemap ? null : crawler.tryGetSitemap(),
...(cachedResult ? [] : pagePromises),
]);
const cacheKey = `fireEngineMap:${mapUrl}`;
const cachedResult = null;
if (!cachedResult) {
allResults = searchResults;
}
let allResults: any[] = [];
let pagePromises: Promise<any>[] = [];
if (sitemap !== null) {
sitemap.forEach((x) => {
links.push(x.url);
});
}
let mapResults = allResults
.flat()
.filter((result) => result !== null && result !== undefined);
const minumumCutoff = Math.min(MAX_MAP_LIMIT, limit);
if (mapResults.length > minumumCutoff) {
mapResults = mapResults.slice(0, minumumCutoff);
}
if (mapResults.length > 0) {
if (req.body.search) {
// Ensure all map results are first, maintaining their order
links = [
mapResults[0].url,
...mapResults.slice(1).map((x) => x.url),
...links,
];
if (cachedResult) {
allResults = JSON.parse(cachedResult);
} else {
mapResults.map((x) => {
const fetchPage = async (page: number) => {
return fireEngineMap(mapUrl, {
numResults: resultsPerPage,
page: page,
});
};
pagePromises = Array.from({ length: maxPages }, (_, i) =>
fetchPage(i + 1)
);
allResults = await Promise.all(pagePromises);
await redis.set(cacheKey, JSON.stringify(allResults), "EX", 24 * 60 * 60); // Cache for 24 hours
}
// Parallelize sitemap fetch with serper search
const [sitemap, ...searchResults] = await Promise.all([
req.body.ignoreSitemap ? null : crawler.tryGetSitemap(true),
...(cachedResult ? [] : pagePromises),
]);
if (!cachedResult) {
allResults = searchResults;
}
if (sitemap !== null) {
sitemap.forEach((x) => {
links.push(x.url);
});
}
}
let mapResults = allResults
.flat()
.filter((result) => result !== null && result !== undefined);
const minumumCutoff = Math.min(MAX_MAP_LIMIT, limit);
if (mapResults.length > minumumCutoff) {
mapResults = mapResults.slice(0, minumumCutoff);
}
if (mapResults.length > 0) {
if (req.body.search) {
// Ensure all map results are first, maintaining their order
links = [
mapResults[0].url,
...mapResults.slice(1).map((x) => x.url),
...links,
];
} else {
mapResults.map((x) => {
links.push(x.url);
});
}
}
}
// Perform cosine similarity between the search query and the list of links
if (req.body.search) {
const searchQuery = req.body.search.toLowerCase();

View File

@ -175,9 +175,21 @@ export const scrapeRequestSchema = scrapeOptions.extend({
export type ScrapeRequest = z.infer<typeof scrapeRequestSchema>;
export type ScrapeRequestInput = z.input<typeof scrapeRequestSchema>;
export const webhookSchema = z.preprocess(x => {
if (typeof x === "string") {
return { url: x };
} else {
return x;
}
}, z.object({
url: z.string().url(),
headers: z.record(z.string(), z.string()).default({}),
}).strict(strictMessage))
export const batchScrapeRequestSchema = scrapeOptions.extend({
urls: url.array(),
origin: z.string().optional().default("api"),
webhook: webhookSchema.optional(),
}).strict(strictMessage).refine(
(obj) => {
const hasExtractFormat = obj.formats?.includes("extract");
@ -220,17 +232,6 @@ const crawlerOptions = z.object({
export type CrawlerOptions = z.infer<typeof crawlerOptions>;
export const webhookSchema = z.preprocess(x => {
if (typeof x === "string") {
return { url: x };
} else {
return x;
}
}, z.object({
url: z.string().url(),
headers: z.record(z.string(), z.string()).default({}),
}).strict(strictMessage))
export const crawlRequestSchema = crawlerOptions.extend({
url,
origin: z.string().optional().default("api"),
@ -260,6 +261,7 @@ export const mapRequestSchema = crawlerOptions.extend({
includeSubdomains: z.boolean().default(true),
search: z.string().optional(),
ignoreSitemap: z.boolean().default(false),
sitemapOnly: z.boolean().default(false),
limit: z.number().min(1).max(5000).default(5000),
}).strict(strictMessage);

View File

@ -1,4 +1,5 @@
import * as winston from "winston";
import Transport from "winston-transport";
import { configDotenv } from "dotenv";
configDotenv();
@ -49,3 +50,33 @@ export const logger = winston.createLogger({
}),
],
});
export type ArrayTransportOptions = Transport.TransportStreamOptions & {
array: any[];
scrapeId?: string;
};
export class ArrayTransport extends Transport {
private array: any[];
private scrapeId?: string;
constructor(opts: ArrayTransportOptions) {
super(opts);
this.array = opts.array;
this.scrapeId = opts.scrapeId;
}
log(info, next) {
setImmediate(() => {
this.emit("logged", info);
});
if (this.scrapeId !== undefined && info.scrapeId !== this.scrapeId) {
return next();
}
this.array.push(info);
next();
}
}

View File

@ -70,7 +70,7 @@ export async function runWebScraper({
}
}
if(is_scrape === false) {
if(is_scrape === false && team_id) {
let creditsToBeBilled = 1; // Assuming 1 credit per document
if (scrapeOptions.extract) {
creditsToBeBilled = 5;

View File

@ -8,6 +8,8 @@ import {
} from "../controllers/v0/admin/queue";
import { wrap } from "./v1";
import { acucCacheClearController } from "../controllers/v0/admin/acuc-cache-clear";
import { doctorController } from "../controllers/v1/admin/doctor";
import { doctorStatusController } from "../controllers/v1/admin/doctor-status";
export const adminRouter = express.Router();
@ -40,3 +42,13 @@ adminRouter.post(
`/admin/${process.env.BULL_AUTH_KEY}/acuc-cache-clear`,
wrap(acucCacheClearController),
);
adminRouter.post(
`/admin/${process.env.BULL_AUTH_KEY}/doctor`,
wrap(doctorController),
);
adminRouter.get(
`/admin/${process.env.BULL_AUTH_KEY}/doctor/:id`,
wrap(doctorStatusController),
);

View File

@ -65,7 +65,12 @@ export class WebCrawler {
this.allowExternalContentLinks = allowExternalContentLinks ?? false;
}
public filterLinks(sitemapLinks: string[], limit: number, maxDepth: number): string[] {
public filterLinks(sitemapLinks: string[], limit: number, maxDepth: number, fromMap: boolean = false): string[] {
// If the initial URL is a sitemap.xml, skip filtering
if (this.initialUrl.endsWith('sitemap.xml') && fromMap) {
return sitemapLinks.slice(0, limit);
}
return sitemapLinks
.filter((link) => {
let url: URL;
@ -159,11 +164,14 @@ export class WebCrawler {
this.robots = robotsParser(this.robotsTxtUrl, txt);
}
public async tryGetSitemap(): Promise<{ url: string; html: string; }[] | null> {
public async tryGetSitemap(fromMap: boolean = false, onlySitemap: boolean = false): Promise<{ url: string; html: string; }[] | null> {
logger.debug(`Fetching sitemap links from ${this.initialUrl}`);
const sitemapLinks = await this.tryFetchSitemapLinks(this.initialUrl);
if(fromMap && onlySitemap) {
return sitemapLinks.map(link => ({ url: link, html: "" }));
}
if (sitemapLinks.length > 0) {
let filteredLinks = this.filterLinks(sitemapLinks, this.limit, this.maxCrawledDepth);
let filteredLinks = this.filterLinks(sitemapLinks, this.limit, this.maxCrawledDepth, fromMap);
return filteredLinks.map(link => ({ url: link, html: "" }));
}
return null;
@ -353,6 +361,7 @@ export class WebCrawler {
return url;
};
const sitemapUrl = url.endsWith("/sitemap.xml")
? url
: `${url}/sitemap.xml`;

View File

@ -13,12 +13,12 @@ export async function scrapeURLWithPlaywright(meta: Meta): Promise<EngineScrapeR
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
body: {
url: meta.url,
wait_after_load: meta.options.waitFor,
timeout,
headers: meta.options.headers,
}),
},
method: "POST",
logger: meta.logger.child("scrapeURLWithPlaywright/robustFetch"),
schema: z.object({

View File

@ -2,7 +2,7 @@ import { Logger } from "winston";
import * as Sentry from "@sentry/node";
import { Document, ScrapeOptions } from "../../controllers/v1/types";
import { logger } from "../../lib/logger";
import { ArrayTransport, logger } from "../../lib/logger";
import { buildFallbackList, Engine, EngineScrapeResult, FeatureFlag, scrapeURLWithEngine } from "./engines";
import { parseMarkdown } from "../../lib/html-to-markdown";
import { AddFeatureError, EngineError, NoEnginesLeftError, TimeoutError } from "./error";
@ -97,6 +97,9 @@ function buildMetaObject(id: string, url: string, options: ScrapeOptions, intern
const _logger = logger.child({ module: "ScrapeURL", scrapeId: id, scrapeURL: url });
const logs: any[] = [];
if (internalOptions.verbose) {
_logger.add(new ArrayTransport({ array: logs, scrapeId: id }));
}
return {
id, url, options, internalOptions,
@ -114,6 +117,8 @@ export type InternalOptions = {
v0CrawlOnlyUrls?: boolean;
v0UseFastMode?: boolean;
v0DisableJsDom?: boolean;
verbose?: boolean; // stores logs. will cause high memory usage. use with caution
};
export type EngineResultsTracker = { [E in Engine]?: ({
@ -229,7 +234,7 @@ async function scrapeURLLoop(
throw error;
} else {
Sentry.captureException(error);
meta.logger.info("An unexpected error happened while scraping with " + engine + ".", { error });
meta.logger.warn("An unexpected error happened while scraping with " + engine + ".", { error });
results[engine] = {
state: "error",
error: safeguardCircularError(error),

View File

@ -1,4 +1,3 @@
import axios from "axios";
import dotenv from "dotenv";
import { SearchResult } from "../../src/lib/entities";
import * as Sentry from "@sentry/node";
@ -6,7 +5,6 @@ import { logger } from "../lib/logger";
dotenv.config();
export async function fireEngineMap(
q: string,
options: {
@ -37,18 +35,18 @@ export async function fireEngineMap(
return [];
}
let config = {
const response = await fetch(`${process.env.FIRE_ENGINE_BETA_URL}/search`, {
method: "POST",
url: `${process.env.FIRE_ENGINE_BETA_URL}/search`,
headers: {
"Content-Type": "application/json",
"X-Disable-Cache": "true"
"X-Disable-Cache": "true",
},
data: data,
};
const response = await axios(config);
if (response && response.data) {
return response.data;
body: data,
});
if (response.ok) {
const responseData = await response.json();
return responseData;
} else {
return [];
}

View File

@ -38,6 +38,7 @@ import { configDotenv } from "dotenv";
import { scrapeOptions } from "../controllers/v1/types";
import { getRateLimiterPoints } from "./rate-limiter";
import { cleanOldConcurrencyLimitEntries, pushConcurrencyLimitActiveJob, removeConcurrencyLimitActiveJob, takeConcurrencyLimitedJob } from "../lib/concurrency-limit";
import { ScrapeUrlResponse } from "../scraper/scrapeURL";
configDotenv();
const sleep = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms));
@ -289,17 +290,12 @@ async function processJob(job: Job & { id: string }, token: string) {
] : [])
]);
if (!pipeline.success) {
// TODO: let's Not do this
throw pipeline.error;
}
const end = Date.now();
const timeTakenInSeconds = (end - start) / 1000;
const doc = pipeline.document;
const doc = (pipeline as ScrapeUrlResponse & { success: true }).document;
const rawHtml = doc.rawHtml ?? "";
const rawHtml = doc?.rawHtml ?? "";
const data = {
success: true,
@ -313,6 +309,16 @@ async function processJob(job: Job & { id: string }, token: string) {
document: doc,
};
if (job.data.doctor) {
(data.document as any) = pipeline as unknown as Document; // force it in there
return data;
}
if (!pipeline.success) {
// TODO: let's Not do this
throw pipeline.error;
}
if (job.data.webhook && job.data.mode !== "crawl" && job.data.v1) {
await callWebhook(
job.data.team_id,

View File

@ -29,8 +29,8 @@ export interface WebScraperOptions {
crawlerOptions?: any;
scrapeOptions: ScrapeOptions;
internalOptions?: InternalOptions;
team_id: string;
plan: string;
team_id: string | null;
plan: string | null;
origin?: string;
crawl_id?: string;
sitemapped?: boolean;
@ -46,7 +46,7 @@ export interface RunWebScraperParams {
internalOptions?: InternalOptions;
// onSuccess: (result: V1Document, mode: string) => void;
// onError: (error: Error) => void;
team_id: string;
team_id: string | null;
bull_job_id: string;
priority?: number;
is_scrape?: boolean;
@ -166,4 +166,4 @@ export type PlanType =
| "";
export type WebhookEventType = "crawl.page" | "batch_scrape.page" | "crawl.started" | "crawl.completed" | "batch_scrape.completed" | "crawl.failed";
export type WebhookEventType = "crawl.page" | "batch_scrape.page" | "crawl.started" | "batch_scrape.started" | "crawl.completed" | "batch_scrape.completed" | "crawl.failed";

View File

@ -1,6 +1,6 @@
{
"name": "@mendable/firecrawl-js",
"version": "1.8.2",
"version": "1.8.4",
"description": "JavaScript SDK for Firecrawl API",
"main": "dist/index.js",
"types": "dist/index.d.ts",

View File

@ -221,6 +221,7 @@ export interface MapParams {
search?: string;
ignoreSitemap?: boolean;
includeSubdomains?: boolean;
sitemapOnly?: boolean;
limit?: number;
}
@ -543,16 +544,18 @@ export default class FirecrawlApp {
* @param params - Additional parameters for the scrape request.
* @param pollInterval - Time in seconds for job status checks.
* @param idempotencyKey - Optional idempotency key for the request.
* @param webhook - Optional webhook for the batch scrape.
* @returns The response from the crawl operation.
*/
async batchScrapeUrls(
urls: string[],
params?: ScrapeParams,
pollInterval: number = 2,
idempotencyKey?: string
idempotencyKey?: string,
webhook?: CrawlParams["webhook"],
): Promise<BatchScrapeStatusResponse | ErrorResponse> {
const headers = this.prepareHeaders(idempotencyKey);
let jsonData: any = { urls, ...(params ?? {}) };
let jsonData: any = { urls, ...(params ?? {}), webhook };
try {
const response: AxiosResponse = await this.postRequest(
this.apiUrl + `/v1/batch/scrape`,