chore: apply ruff E501 line-too-long linter rule (#8275)

Co-authored-by: -LAN- <laipz8200@outlook.com>
This commit is contained in:
Bowen Liang 2024-09-12 14:00:36 +08:00 committed by GitHub
parent 56c90e212a
commit c69f5b07ba
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
85 changed files with 459 additions and 324 deletions

View File

@ -411,7 +411,8 @@ def migrate_knowledge_vector_database():
try: try:
click.echo( click.echo(
click.style( click.style(
f"Start to created vector index with {len(documents)} documents of {segments_count} segments for dataset {dataset.id}.", f"Start to created vector index with {len(documents)} documents of {segments_count}"
f" segments for dataset {dataset.id}.",
fg="green", fg="green",
) )
) )

View File

@ -29,10 +29,13 @@ class DailyMessageStatistic(Resource):
parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args") parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
args = parser.parse_args() args = parser.parse_args()
sql_query = """ sql_query = """SELECT
SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, count(*) AS message_count DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
FROM messages where app_id = :app_id COUNT(*) AS message_count
""" FROM
messages
WHERE
app_id = :app_id"""
arg_dict = {"tz": account.timezone, "app_id": app_model.id} arg_dict = {"tz": account.timezone, "app_id": app_model.id}
timezone = pytz.timezone(account.timezone) timezone = pytz.timezone(account.timezone)
@ -45,7 +48,7 @@ class DailyMessageStatistic(Resource):
start_datetime_timezone = timezone.localize(start_datetime) start_datetime_timezone = timezone.localize(start_datetime)
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone) start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
sql_query += " and created_at >= :start" sql_query += " AND created_at >= :start"
arg_dict["start"] = start_datetime_utc arg_dict["start"] = start_datetime_utc
if args["end"]: if args["end"]:
@ -55,10 +58,10 @@ class DailyMessageStatistic(Resource):
end_datetime_timezone = timezone.localize(end_datetime) end_datetime_timezone = timezone.localize(end_datetime)
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
sql_query += " and created_at < :end" sql_query += " AND created_at < :end"
arg_dict["end"] = end_datetime_utc arg_dict["end"] = end_datetime_utc
sql_query += " GROUP BY date order by date" sql_query += " GROUP BY date ORDER BY date"
response_data = [] response_data = []
@ -83,10 +86,13 @@ class DailyConversationStatistic(Resource):
parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args") parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
args = parser.parse_args() args = parser.parse_args()
sql_query = """ sql_query = """SELECT
SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, count(distinct messages.conversation_id) AS conversation_count DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
FROM messages where app_id = :app_id COUNT(DISTINCT messages.conversation_id) AS conversation_count
""" FROM
messages
WHERE
app_id = :app_id"""
arg_dict = {"tz": account.timezone, "app_id": app_model.id} arg_dict = {"tz": account.timezone, "app_id": app_model.id}
timezone = pytz.timezone(account.timezone) timezone = pytz.timezone(account.timezone)
@ -99,7 +105,7 @@ class DailyConversationStatistic(Resource):
start_datetime_timezone = timezone.localize(start_datetime) start_datetime_timezone = timezone.localize(start_datetime)
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone) start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
sql_query += " and created_at >= :start" sql_query += " AND created_at >= :start"
arg_dict["start"] = start_datetime_utc arg_dict["start"] = start_datetime_utc
if args["end"]: if args["end"]:
@ -109,10 +115,10 @@ class DailyConversationStatistic(Resource):
end_datetime_timezone = timezone.localize(end_datetime) end_datetime_timezone = timezone.localize(end_datetime)
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
sql_query += " and created_at < :end" sql_query += " AND created_at < :end"
arg_dict["end"] = end_datetime_utc arg_dict["end"] = end_datetime_utc
sql_query += " GROUP BY date order by date" sql_query += " GROUP BY date ORDER BY date"
response_data = [] response_data = []
@ -137,10 +143,13 @@ class DailyTerminalsStatistic(Resource):
parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args") parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
args = parser.parse_args() args = parser.parse_args()
sql_query = """ sql_query = """SELECT
SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, count(distinct messages.from_end_user_id) AS terminal_count DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
FROM messages where app_id = :app_id COUNT(DISTINCT messages.from_end_user_id) AS terminal_count
""" FROM
messages
WHERE
app_id = :app_id"""
arg_dict = {"tz": account.timezone, "app_id": app_model.id} arg_dict = {"tz": account.timezone, "app_id": app_model.id}
timezone = pytz.timezone(account.timezone) timezone = pytz.timezone(account.timezone)
@ -153,7 +162,7 @@ class DailyTerminalsStatistic(Resource):
start_datetime_timezone = timezone.localize(start_datetime) start_datetime_timezone = timezone.localize(start_datetime)
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone) start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
sql_query += " and created_at >= :start" sql_query += " AND created_at >= :start"
arg_dict["start"] = start_datetime_utc arg_dict["start"] = start_datetime_utc
if args["end"]: if args["end"]:
@ -163,10 +172,10 @@ class DailyTerminalsStatistic(Resource):
end_datetime_timezone = timezone.localize(end_datetime) end_datetime_timezone = timezone.localize(end_datetime)
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
sql_query += " and created_at < :end" sql_query += " AND created_at < :end"
arg_dict["end"] = end_datetime_utc arg_dict["end"] = end_datetime_utc
sql_query += " GROUP BY date order by date" sql_query += " GROUP BY date ORDER BY date"
response_data = [] response_data = []
@ -191,12 +200,14 @@ class DailyTokenCostStatistic(Resource):
parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args") parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
args = parser.parse_args() args = parser.parse_args()
sql_query = """ sql_query = """SELECT
SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
(sum(messages.message_tokens) + sum(messages.answer_tokens)) as token_count, (SUM(messages.message_tokens) + SUM(messages.answer_tokens)) AS token_count,
sum(total_price) as total_price SUM(total_price) AS total_price
FROM messages where app_id = :app_id FROM
""" messages
WHERE
app_id = :app_id"""
arg_dict = {"tz": account.timezone, "app_id": app_model.id} arg_dict = {"tz": account.timezone, "app_id": app_model.id}
timezone = pytz.timezone(account.timezone) timezone = pytz.timezone(account.timezone)
@ -209,7 +220,7 @@ class DailyTokenCostStatistic(Resource):
start_datetime_timezone = timezone.localize(start_datetime) start_datetime_timezone = timezone.localize(start_datetime)
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone) start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
sql_query += " and created_at >= :start" sql_query += " AND created_at >= :start"
arg_dict["start"] = start_datetime_utc arg_dict["start"] = start_datetime_utc
if args["end"]: if args["end"]:
@ -219,10 +230,10 @@ class DailyTokenCostStatistic(Resource):
end_datetime_timezone = timezone.localize(end_datetime) end_datetime_timezone = timezone.localize(end_datetime)
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
sql_query += " and created_at < :end" sql_query += " AND created_at < :end"
arg_dict["end"] = end_datetime_utc arg_dict["end"] = end_datetime_utc
sql_query += " GROUP BY date order by date" sql_query += " GROUP BY date ORDER BY date"
response_data = [] response_data = []
@ -249,12 +260,22 @@ class AverageSessionInteractionStatistic(Resource):
parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args") parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
args = parser.parse_args() args = parser.parse_args()
sql_query = """SELECT date(DATE_TRUNC('day', c.created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, sql_query = """SELECT
AVG(subquery.message_count) AS interactions DATE(DATE_TRUNC('day', c.created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
FROM (SELECT m.conversation_id, COUNT(m.id) AS message_count AVG(subquery.message_count) AS interactions
FROM conversations c FROM
JOIN messages m ON c.id = m.conversation_id (
WHERE c.override_model_configs IS NULL AND c.app_id = :app_id""" SELECT
m.conversation_id,
COUNT(m.id) AS message_count
FROM
conversations c
JOIN
messages m
ON c.id = m.conversation_id
WHERE
c.override_model_configs IS NULL
AND c.app_id = :app_id"""
arg_dict = {"tz": account.timezone, "app_id": app_model.id} arg_dict = {"tz": account.timezone, "app_id": app_model.id}
timezone = pytz.timezone(account.timezone) timezone = pytz.timezone(account.timezone)
@ -267,7 +288,7 @@ FROM (SELECT m.conversation_id, COUNT(m.id) AS message_count
start_datetime_timezone = timezone.localize(start_datetime) start_datetime_timezone = timezone.localize(start_datetime)
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone) start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
sql_query += " and c.created_at >= :start" sql_query += " AND c.created_at >= :start"
arg_dict["start"] = start_datetime_utc arg_dict["start"] = start_datetime_utc
if args["end"]: if args["end"]:
@ -277,14 +298,19 @@ FROM (SELECT m.conversation_id, COUNT(m.id) AS message_count
end_datetime_timezone = timezone.localize(end_datetime) end_datetime_timezone = timezone.localize(end_datetime)
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
sql_query += " and c.created_at < :end" sql_query += " AND c.created_at < :end"
arg_dict["end"] = end_datetime_utc arg_dict["end"] = end_datetime_utc
sql_query += """ sql_query += """
GROUP BY m.conversation_id) subquery GROUP BY m.conversation_id
LEFT JOIN conversations c on c.id=subquery.conversation_id ) subquery
GROUP BY date LEFT JOIN
ORDER BY date""" conversations c
ON c.id = subquery.conversation_id
GROUP BY
date
ORDER BY
date"""
response_data = [] response_data = []
@ -311,13 +337,17 @@ class UserSatisfactionRateStatistic(Resource):
parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args") parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
args = parser.parse_args() args = parser.parse_args()
sql_query = """ sql_query = """SELECT
SELECT date(DATE_TRUNC('day', m.created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, DATE(DATE_TRUNC('day', m.created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
COUNT(m.id) as message_count, COUNT(mf.id) as feedback_count COUNT(m.id) AS message_count,
FROM messages m COUNT(mf.id) AS feedback_count
LEFT JOIN message_feedbacks mf on mf.message_id=m.id and mf.rating='like' FROM
WHERE m.app_id = :app_id messages m
""" LEFT JOIN
message_feedbacks mf
ON mf.message_id=m.id AND mf.rating='like'
WHERE
m.app_id = :app_id"""
arg_dict = {"tz": account.timezone, "app_id": app_model.id} arg_dict = {"tz": account.timezone, "app_id": app_model.id}
timezone = pytz.timezone(account.timezone) timezone = pytz.timezone(account.timezone)
@ -330,7 +360,7 @@ class UserSatisfactionRateStatistic(Resource):
start_datetime_timezone = timezone.localize(start_datetime) start_datetime_timezone = timezone.localize(start_datetime)
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone) start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
sql_query += " and m.created_at >= :start" sql_query += " AND m.created_at >= :start"
arg_dict["start"] = start_datetime_utc arg_dict["start"] = start_datetime_utc
if args["end"]: if args["end"]:
@ -340,10 +370,10 @@ class UserSatisfactionRateStatistic(Resource):
end_datetime_timezone = timezone.localize(end_datetime) end_datetime_timezone = timezone.localize(end_datetime)
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
sql_query += " and m.created_at < :end" sql_query += " AND m.created_at < :end"
arg_dict["end"] = end_datetime_utc arg_dict["end"] = end_datetime_utc
sql_query += " GROUP BY date order by date" sql_query += " GROUP BY date ORDER BY date"
response_data = [] response_data = []
@ -373,12 +403,13 @@ class AverageResponseTimeStatistic(Resource):
parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args") parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
args = parser.parse_args() args = parser.parse_args()
sql_query = """ sql_query = """SELECT
SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
AVG(provider_response_latency) as latency AVG(provider_response_latency) AS latency
FROM messages FROM
WHERE app_id = :app_id messages
""" WHERE
app_id = :app_id"""
arg_dict = {"tz": account.timezone, "app_id": app_model.id} arg_dict = {"tz": account.timezone, "app_id": app_model.id}
timezone = pytz.timezone(account.timezone) timezone = pytz.timezone(account.timezone)
@ -391,7 +422,7 @@ class AverageResponseTimeStatistic(Resource):
start_datetime_timezone = timezone.localize(start_datetime) start_datetime_timezone = timezone.localize(start_datetime)
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone) start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
sql_query += " and created_at >= :start" sql_query += " AND created_at >= :start"
arg_dict["start"] = start_datetime_utc arg_dict["start"] = start_datetime_utc
if args["end"]: if args["end"]:
@ -401,10 +432,10 @@ class AverageResponseTimeStatistic(Resource):
end_datetime_timezone = timezone.localize(end_datetime) end_datetime_timezone = timezone.localize(end_datetime)
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
sql_query += " and created_at < :end" sql_query += " AND created_at < :end"
arg_dict["end"] = end_datetime_utc arg_dict["end"] = end_datetime_utc
sql_query += " GROUP BY date order by date" sql_query += " GROUP BY date ORDER BY date"
response_data = [] response_data = []
@ -429,13 +460,16 @@ class TokensPerSecondStatistic(Resource):
parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args") parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
args = parser.parse_args() args = parser.parse_args()
sql_query = """SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, sql_query = """SELECT
DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
CASE CASE
WHEN SUM(provider_response_latency) = 0 THEN 0 WHEN SUM(provider_response_latency) = 0 THEN 0
ELSE (SUM(answer_tokens) / SUM(provider_response_latency)) ELSE (SUM(answer_tokens) / SUM(provider_response_latency))
END as tokens_per_second END as tokens_per_second
FROM messages FROM
WHERE app_id = :app_id""" messages
WHERE
app_id = :app_id"""
arg_dict = {"tz": account.timezone, "app_id": app_model.id} arg_dict = {"tz": account.timezone, "app_id": app_model.id}
timezone = pytz.timezone(account.timezone) timezone = pytz.timezone(account.timezone)
@ -448,7 +482,7 @@ WHERE app_id = :app_id"""
start_datetime_timezone = timezone.localize(start_datetime) start_datetime_timezone = timezone.localize(start_datetime)
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone) start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
sql_query += " and created_at >= :start" sql_query += " AND created_at >= :start"
arg_dict["start"] = start_datetime_utc arg_dict["start"] = start_datetime_utc
if args["end"]: if args["end"]:
@ -458,10 +492,10 @@ WHERE app_id = :app_id"""
end_datetime_timezone = timezone.localize(end_datetime) end_datetime_timezone = timezone.localize(end_datetime)
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
sql_query += " and created_at < :end" sql_query += " AND created_at < :end"
arg_dict["end"] = end_datetime_utc arg_dict["end"] = end_datetime_utc
sql_query += " GROUP BY date order by date" sql_query += " GROUP BY date ORDER BY date"
response_data = [] response_data = []

View File

@ -30,12 +30,14 @@ class WorkflowDailyRunsStatistic(Resource):
parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args") parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
args = parser.parse_args() args = parser.parse_args()
sql_query = """ sql_query = """SELECT
SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, count(id) AS runs DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
FROM workflow_runs COUNT(id) AS runs
WHERE app_id = :app_id FROM
AND triggered_from = :triggered_from workflow_runs
""" WHERE
app_id = :app_id
AND triggered_from = :triggered_from"""
arg_dict = { arg_dict = {
"tz": account.timezone, "tz": account.timezone,
"app_id": app_model.id, "app_id": app_model.id,
@ -52,7 +54,7 @@ class WorkflowDailyRunsStatistic(Resource):
start_datetime_timezone = timezone.localize(start_datetime) start_datetime_timezone = timezone.localize(start_datetime)
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone) start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
sql_query += " and created_at >= :start" sql_query += " AND created_at >= :start"
arg_dict["start"] = start_datetime_utc arg_dict["start"] = start_datetime_utc
if args["end"]: if args["end"]:
@ -62,10 +64,10 @@ class WorkflowDailyRunsStatistic(Resource):
end_datetime_timezone = timezone.localize(end_datetime) end_datetime_timezone = timezone.localize(end_datetime)
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
sql_query += " and created_at < :end" sql_query += " AND created_at < :end"
arg_dict["end"] = end_datetime_utc arg_dict["end"] = end_datetime_utc
sql_query += " GROUP BY date order by date" sql_query += " GROUP BY date ORDER BY date"
response_data = [] response_data = []
@ -90,12 +92,14 @@ class WorkflowDailyTerminalsStatistic(Resource):
parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args") parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
args = parser.parse_args() args = parser.parse_args()
sql_query = """ sql_query = """SELECT
SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, count(distinct workflow_runs.created_by) AS terminal_count DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
FROM workflow_runs COUNT(DISTINCT workflow_runs.created_by) AS terminal_count
WHERE app_id = :app_id FROM
AND triggered_from = :triggered_from workflow_runs
""" WHERE
app_id = :app_id
AND triggered_from = :triggered_from"""
arg_dict = { arg_dict = {
"tz": account.timezone, "tz": account.timezone,
"app_id": app_model.id, "app_id": app_model.id,
@ -112,7 +116,7 @@ class WorkflowDailyTerminalsStatistic(Resource):
start_datetime_timezone = timezone.localize(start_datetime) start_datetime_timezone = timezone.localize(start_datetime)
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone) start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
sql_query += " and created_at >= :start" sql_query += " AND created_at >= :start"
arg_dict["start"] = start_datetime_utc arg_dict["start"] = start_datetime_utc
if args["end"]: if args["end"]:
@ -122,10 +126,10 @@ class WorkflowDailyTerminalsStatistic(Resource):
end_datetime_timezone = timezone.localize(end_datetime) end_datetime_timezone = timezone.localize(end_datetime)
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
sql_query += " and created_at < :end" sql_query += " AND created_at < :end"
arg_dict["end"] = end_datetime_utc arg_dict["end"] = end_datetime_utc
sql_query += " GROUP BY date order by date" sql_query += " GROUP BY date ORDER BY date"
response_data = [] response_data = []
@ -150,14 +154,14 @@ class WorkflowDailyTokenCostStatistic(Resource):
parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args") parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
args = parser.parse_args() args = parser.parse_args()
sql_query = """ sql_query = """SELECT
SELECT DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, SUM(workflow_runs.total_tokens) AS token_count
SUM(workflow_runs.total_tokens) as token_count FROM
FROM workflow_runs workflow_runs
WHERE app_id = :app_id WHERE
AND triggered_from = :triggered_from app_id = :app_id
""" AND triggered_from = :triggered_from"""
arg_dict = { arg_dict = {
"tz": account.timezone, "tz": account.timezone,
"app_id": app_model.id, "app_id": app_model.id,
@ -174,7 +178,7 @@ class WorkflowDailyTokenCostStatistic(Resource):
start_datetime_timezone = timezone.localize(start_datetime) start_datetime_timezone = timezone.localize(start_datetime)
start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone) start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone)
sql_query += " and created_at >= :start" sql_query += " AND created_at >= :start"
arg_dict["start"] = start_datetime_utc arg_dict["start"] = start_datetime_utc
if args["end"]: if args["end"]:
@ -184,10 +188,10 @@ class WorkflowDailyTokenCostStatistic(Resource):
end_datetime_timezone = timezone.localize(end_datetime) end_datetime_timezone = timezone.localize(end_datetime)
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
sql_query += " and created_at < :end" sql_query += " AND created_at < :end"
arg_dict["end"] = end_datetime_utc arg_dict["end"] = end_datetime_utc
sql_query += " GROUP BY date order by date" sql_query += " GROUP BY date ORDER BY date"
response_data = [] response_data = []
@ -217,23 +221,27 @@ class WorkflowAverageAppInteractionStatistic(Resource):
parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args") parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args")
args = parser.parse_args() args = parser.parse_args()
sql_query = """ sql_query = """SELECT
SELECT AVG(sub.interactions) AS interactions,
AVG(sub.interactions) as interactions, sub.date
sub.date FROM
FROM (
(SELECT SELECT
date(DATE_TRUNC('day', c.created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, DATE(DATE_TRUNC('day', c.created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date,
c.created_by, c.created_by,
COUNT(c.id) AS interactions COUNT(c.id) AS interactions
FROM workflow_runs c FROM
WHERE c.app_id = :app_id workflow_runs c
AND c.triggered_from = :triggered_from WHERE
{{start}} c.app_id = :app_id
{{end}} AND c.triggered_from = :triggered_from
GROUP BY date, c.created_by) sub {{start}}
GROUP BY sub.date {{end}}
""" GROUP BY
date, c.created_by
) sub
GROUP BY
sub.date"""
arg_dict = { arg_dict = {
"tz": account.timezone, "tz": account.timezone,
"app_id": app_model.id, "app_id": app_model.id,
@ -262,7 +270,7 @@ class WorkflowAverageAppInteractionStatistic(Resource):
end_datetime_timezone = timezone.localize(end_datetime) end_datetime_timezone = timezone.localize(end_datetime)
end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone)
sql_query = sql_query.replace("{{end}}", " and c.created_at < :end") sql_query = sql_query.replace("{{end}}", " AND c.created_at < :end")
arg_dict["end"] = end_datetime_utc arg_dict["end"] = end_datetime_utc
else: else:
sql_query = sql_query.replace("{{end}}", "") sql_query = sql_query.replace("{{end}}", "")

View File

@ -64,7 +64,8 @@ def cloud_edition_billing_resource_check(resource: str):
elif resource == "vector_space" and 0 < vector_space.limit <= vector_space.size: elif resource == "vector_space" and 0 < vector_space.limit <= vector_space.size:
abort(403, "The capacity of the vector space has reached the limit of your subscription.") abort(403, "The capacity of the vector space has reached the limit of your subscription.")
elif resource == "documents" and 0 < documents_upload_quota.limit <= documents_upload_quota.size: elif resource == "documents" and 0 < documents_upload_quota.limit <= documents_upload_quota.size:
# The api of file upload is used in the multiple places, so we need to check the source of the request from datasets # The api of file upload is used in the multiple places,
# so we need to check the source of the request from datasets
source = request.args.get("source") source = request.args.get("source")
if source == "datasets": if source == "datasets":
abort(403, "The number of documents has reached the limit of your subscription.") abort(403, "The number of documents has reached the limit of your subscription.")

View File

@ -80,7 +80,8 @@ def _validate_web_sso_token(decoded, system_features, app_code):
if not source or source != "sso": if not source or source != "sso":
raise WebSSOAuthRequiredError() raise WebSSOAuthRequiredError()
# Check if SSO is not enforced for web, and if the token source is SSO, raise an error and redirect to normal passport login # Check if SSO is not enforced for web, and if the token source is SSO,
# raise an error and redirect to normal passport login
if not system_features.sso_enforced_for_web or not app_web_sso_enabled: if not system_features.sso_enforced_for_web or not app_web_sso_enabled:
source = decoded.get("token_source") source = decoded.get("token_source")
if source and source == "sso": if source and source == "sso":

View File

@ -41,7 +41,8 @@ Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use
{{historic_messages}} {{historic_messages}}
Question: {{query}} Question: {{query}}
{{agent_scratchpad}} {{agent_scratchpad}}
Thought:""" Thought:""" # noqa: E501
ENGLISH_REACT_COMPLETION_AGENT_SCRATCHPAD_TEMPLATES = """Observation: {{observation}} ENGLISH_REACT_COMPLETION_AGENT_SCRATCHPAD_TEMPLATES = """Observation: {{observation}}
Thought:""" Thought:"""
@ -86,7 +87,8 @@ Action:
``` ```
Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:. Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:.
""" """ # noqa: E501
ENGLISH_REACT_CHAT_AGENT_SCRATCHPAD_TEMPLATES = "" ENGLISH_REACT_CHAT_AGENT_SCRATCHPAD_TEMPLATES = ""

View File

@ -84,10 +84,12 @@ class WorkflowLoggingCallback(WorkflowCallback):
if route_node_state.node_run_result: if route_node_state.node_run_result:
node_run_result = route_node_state.node_run_result node_run_result = route_node_state.node_run_result
self.print_text( self.print_text(
f"Inputs: {jsonable_encoder(node_run_result.inputs) if node_run_result.inputs else ''}", color="green" f"Inputs: " f"{jsonable_encoder(node_run_result.inputs) if node_run_result.inputs else ''}",
color="green",
) )
self.print_text( self.print_text(
f"Process Data: {jsonable_encoder(node_run_result.process_data) if node_run_result.process_data else ''}", f"Process Data: "
f"{jsonable_encoder(node_run_result.process_data) if node_run_result.process_data else ''}",
color="green", color="green",
) )
self.print_text( self.print_text(
@ -114,14 +116,17 @@ class WorkflowLoggingCallback(WorkflowCallback):
node_run_result = route_node_state.node_run_result node_run_result = route_node_state.node_run_result
self.print_text(f"Error: {node_run_result.error}", color="red") self.print_text(f"Error: {node_run_result.error}", color="red")
self.print_text( self.print_text(
f"Inputs: {jsonable_encoder(node_run_result.inputs) if node_run_result.inputs else ''}", color="red" f"Inputs: " f"" f"{jsonable_encoder(node_run_result.inputs) if node_run_result.inputs else ''}",
)
self.print_text(
f"Process Data: {jsonable_encoder(node_run_result.process_data) if node_run_result.process_data else ''}",
color="red", color="red",
) )
self.print_text( self.print_text(
f"Outputs: {jsonable_encoder(node_run_result.outputs) if node_run_result.outputs else ''}", color="red" f"Process Data: "
f"{jsonable_encoder(node_run_result.process_data) if node_run_result.process_data else ''}",
color="red",
)
self.print_text(
f"Outputs: " f"{jsonable_encoder(node_run_result.outputs) if node_run_result.outputs else ''}",
color="red",
) )
def on_node_text_chunk(self, event: NodeRunStreamChunkEvent) -> None: def on_node_text_chunk(self, event: NodeRunStreamChunkEvent) -> None:

View File

@ -188,7 +188,8 @@ class MessageFileParser:
def _check_image_remote_url(self, url): def _check_image_remote_url(self, url):
try: try:
headers = { headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)"
" Chrome/91.0.4472.124 Safari/537.36"
} }
def is_s3_presigned_url(url): def is_s3_presigned_url(url):

View File

@ -89,7 +89,8 @@ class CodeExecutor:
raise CodeExecutionError("Code execution service is unavailable") raise CodeExecutionError("Code execution service is unavailable")
elif response.status_code != 200: elif response.status_code != 200:
raise Exception( raise Exception(
f"Failed to execute code, got status code {response.status_code}, please check if the sandbox service is running" f"Failed to execute code, got status code {response.status_code},"
f" please check if the sandbox service is running"
) )
except CodeExecutionError as e: except CodeExecutionError as e:
raise e raise e

View File

@ -14,7 +14,10 @@ class ToolParameterCache:
def __init__( def __init__(
self, tenant_id: str, provider: str, tool_name: str, cache_type: ToolParameterCacheType, identity_id: str self, tenant_id: str, provider: str, tool_name: str, cache_type: ToolParameterCacheType, identity_id: str
): ):
self.cache_key = f"{cache_type.value}_secret:tenant_id:{tenant_id}:provider:{provider}:tool_name:{tool_name}:identity_id:{identity_id}" self.cache_key = (
f"{cache_type.value}_secret:tenant_id:{tenant_id}:provider:{provider}:tool_name:{tool_name}"
f":identity_id:{identity_id}"
)
def get(self) -> Optional[dict]: def get(self) -> Optional[dict]:
""" """

View File

@ -59,24 +59,27 @@ User Input: yo, 你今天咋样?
} }
User Input: User Input:
""" """ # noqa: E501
SUGGESTED_QUESTIONS_AFTER_ANSWER_INSTRUCTION_PROMPT = ( SUGGESTED_QUESTIONS_AFTER_ANSWER_INSTRUCTION_PROMPT = (
"Please help me predict the three most likely questions that human would ask, " "Please help me predict the three most likely questions that human would ask, "
"and keeping each question under 20 characters.\n" "and keeping each question under 20 characters.\n"
"MAKE SURE your output is the SAME language as the Assistant's latest response(if the main response is written in Chinese, then the language of your output must be using Chinese.)!\n" "MAKE SURE your output is the SAME language as the Assistant's latest response"
"(if the main response is written in Chinese, then the language of your output must be using Chinese.)!\n"
"The output must be an array in JSON format following the specified schema:\n" "The output must be an array in JSON format following the specified schema:\n"
'["question1","question2","question3"]\n' '["question1","question2","question3"]\n'
) )
GENERATOR_QA_PROMPT = ( GENERATOR_QA_PROMPT = (
"<Task> The user will send a long text. Generate a Question and Answer pairs only using the knowledge in the long text. Please think step by step." "<Task> The user will send a long text. Generate a Question and Answer pairs only using the knowledge"
" in the long text. Please think step by step."
"Step 1: Understand and summarize the main content of this text.\n" "Step 1: Understand and summarize the main content of this text.\n"
"Step 2: What key information or concepts are mentioned in this text?\n" "Step 2: What key information or concepts are mentioned in this text?\n"
"Step 3: Decompose or combine multiple pieces of information and concepts.\n" "Step 3: Decompose or combine multiple pieces of information and concepts.\n"
"Step 4: Generate questions and answers based on these key information and concepts.\n" "Step 4: Generate questions and answers based on these key information and concepts.\n"
"<Constraints> The questions should be clear and detailed, and the answers should be detailed and complete. " "<Constraints> The questions should be clear and detailed, and the answers should be detailed and complete. "
"You must answer in {language}, in a style that is clear and detailed in {language}. No language other than {language} should be used. \n" "You must answer in {language}, in a style that is clear and detailed in {language}."
" No language other than {language} should be used. \n"
"<Format> Use the following format: Q1:\nA1:\nQ2:\nA2:...\n" "<Format> Use the following format: Q1:\nA1:\nQ2:\nA2:...\n"
"<QA Pairs>" "<QA Pairs>"
) )
@ -94,7 +97,7 @@ Based on task description, please create a well-structured prompt template that
- Use the same language as task description. - Use the same language as task description.
- Output in ``` xml ``` and start with <instruction> - Output in ``` xml ``` and start with <instruction>
Please generate the full prompt template with at least 300 words and output only the prompt template. Please generate the full prompt template with at least 300 words and output only the prompt template.
""" """ # noqa: E501
RULE_CONFIG_PROMPT_GENERATE_TEMPLATE = """ RULE_CONFIG_PROMPT_GENERATE_TEMPLATE = """
Here is a task description for which I would like you to create a high-quality prompt template for: Here is a task description for which I would like you to create a high-quality prompt template for:
@ -109,7 +112,7 @@ Based on task description, please create a well-structured prompt template that
- Use the same language as task description. - Use the same language as task description.
- Output in ``` xml ``` and start with <instruction> - Output in ``` xml ``` and start with <instruction>
Please generate the full prompt template and output only the prompt template. Please generate the full prompt template and output only the prompt template.
""" """ # noqa: E501
RULE_CONFIG_PARAMETER_GENERATE_TEMPLATE = """ RULE_CONFIG_PARAMETER_GENERATE_TEMPLATE = """
I need to extract the following information from the input text. The <information to be extracted> tag specifies the 'type', 'description' and 'required' of the information to be extracted. I need to extract the following information from the input text. The <information to be extracted> tag specifies the 'type', 'description' and 'required' of the information to be extracted.
@ -134,7 +137,7 @@ Inside <text></text> XML tags, there is a text that I should extract parameters
### Answer ### Answer
I should always output a valid list. Output nothing other than the list of variable_name. Output an empty list if there is no variable name in input text. I should always output a valid list. Output nothing other than the list of variable_name. Output an empty list if there is no variable name in input text.
""" """ # noqa: E501
RULE_CONFIG_STATEMENT_GENERATE_TEMPLATE = """ RULE_CONFIG_STATEMENT_GENERATE_TEMPLATE = """
<instruction> <instruction>
@ -150,4 +153,4 @@ Welcome! I'm here to assist you with any questions or issues you might have with
Here is the task description: {{INPUT_TEXT}} Here is the task description: {{INPUT_TEXT}}
You just need to generate the output You just need to generate the output
""" """ # noqa: E501

View File

@ -8,8 +8,11 @@ PARAMETER_RULE_TEMPLATE: dict[DefaultParameterName, dict] = {
}, },
"type": "float", "type": "float",
"help": { "help": {
"en_US": "Controls randomness. Lower temperature results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive. Higher temperature results in more random completions.", "en_US": "Controls randomness. Lower temperature results in less random completions."
"zh_Hans": "温度控制随机性。较低的温度会导致较少的随机完成。随着温度接近零,模型将变得确定性和重复性。较高的温度会导致更多的随机完成。", " As the temperature approaches zero, the model will become deterministic and repetitive."
" Higher temperature results in more random completions.",
"zh_Hans": "温度控制随机性。较低的温度会导致较少的随机完成。随着温度接近零,模型将变得确定性和重复性。"
"较高的温度会导致更多的随机完成。",
}, },
"required": False, "required": False,
"default": 0.0, "default": 0.0,
@ -24,7 +27,8 @@ PARAMETER_RULE_TEMPLATE: dict[DefaultParameterName, dict] = {
}, },
"type": "float", "type": "float",
"help": { "help": {
"en_US": "Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered.", "en_US": "Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options"
" are considered.",
"zh_Hans": "通过核心采样控制多样性0.5表示考虑了一半的所有可能性加权选项。", "zh_Hans": "通过核心采样控制多样性0.5表示考虑了一半的所有可能性加权选项。",
}, },
"required": False, "required": False,
@ -88,7 +92,8 @@ PARAMETER_RULE_TEMPLATE: dict[DefaultParameterName, dict] = {
}, },
"type": "int", "type": "int",
"help": { "help": {
"en_US": "Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter.", "en_US": "Specifies the upper limit on the length of generated results."
" If the generated results are truncated, you can increase this parameter.",
"zh_Hans": "指定生成结果长度的上限。如果生成结果截断,可以调大该参数。", "zh_Hans": "指定生成结果长度的上限。如果生成结果截断,可以调大该参数。",
}, },
"required": False, "required": False,
@ -104,7 +109,8 @@ PARAMETER_RULE_TEMPLATE: dict[DefaultParameterName, dict] = {
}, },
"type": "string", "type": "string",
"help": { "help": {
"en_US": "Set a response format, ensure the output from llm is a valid code block as possible, such as JSON, XML, etc.", "en_US": "Set a response format, ensure the output from llm is a valid code block as possible,"
" such as JSON, XML, etc.",
"zh_Hans": "设置一个返回格式确保llm的输出尽可能是有效的代码块如JSON、XML等", "zh_Hans": "设置一个返回格式确保llm的输出尽可能是有效的代码块如JSON、XML等",
}, },
"required": False, "required": False,

View File

@ -72,7 +72,9 @@ class AIModel(ABC):
if isinstance(error, tuple(model_errors)): if isinstance(error, tuple(model_errors)):
if invoke_error == InvokeAuthorizationError: if invoke_error == InvokeAuthorizationError:
return invoke_error( return invoke_error(
description=f"[{provider_name}] Incorrect model credentials provided, please check and try again. " description=(
f"[{provider_name}] Incorrect model credentials provided, please check and try again."
)
) )
return invoke_error(description=f"[{provider_name}] {invoke_error.description}, {str(error)}") return invoke_error(description=f"[{provider_name}] {invoke_error.description}, {str(error)}")

View File

@ -187,7 +187,7 @@ if you are not sure about the structure.
<instructions> <instructions>
{{instructions}} {{instructions}}
</instructions> </instructions>
""" """ # noqa: E501
code_block = model_parameters.get("response_format", "") code_block = model_parameters.get("response_format", "")
if not code_block: if not code_block:
@ -830,7 +830,8 @@ if you are not sure about the structure.
else: else:
if parameter_value != round(parameter_value, parameter_rule.precision): if parameter_value != round(parameter_value, parameter_rule.precision):
raise ValueError( raise ValueError(
f"Model Parameter {parameter_name} should be round to {parameter_rule.precision} decimal places." f"Model Parameter {parameter_name} should be round to {parameter_rule.precision}"
f" decimal places."
) )
# validate parameter value range # validate parameter value range

View File

@ -51,7 +51,7 @@ if you are not sure about the structure.
<instructions> <instructions>
{{instructions}} {{instructions}}
</instructions> </instructions>
""" """ # noqa: E501
class AnthropicLargeLanguageModel(LargeLanguageModel): class AnthropicLargeLanguageModel(LargeLanguageModel):

View File

@ -16,6 +16,15 @@ from core.model_runtime.entities.model_entities import (
AZURE_OPENAI_API_VERSION = "2024-02-15-preview" AZURE_OPENAI_API_VERSION = "2024-02-15-preview"
AZURE_DEFAULT_PARAM_SEED_HELP = I18nObject(
zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,"
"您应该参考 system_fingerprint 响应参数来监视变化。",
en_US="If specified, model will make a best effort to sample deterministically,"
" such that repeated requests with the same seed and parameters should return the same result."
" Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter"
" to monitor changes in the backend.",
)
def _get_max_tokens(default: int, min_val: int, max_val: int) -> ParameterRule: def _get_max_tokens(default: int, min_val: int, max_val: int) -> ParameterRule:
rule = ParameterRule( rule = ParameterRule(
@ -229,10 +238,7 @@ LLM_BASE_MODELS = [
name="seed", name="seed",
label=I18nObject(zh_Hans="种子", en_US="Seed"), label=I18nObject(zh_Hans="种子", en_US="Seed"),
type="int", type="int",
help=I18nObject( help=AZURE_DEFAULT_PARAM_SEED_HELP,
zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。",
en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.",
),
required=False, required=False,
precision=2, precision=2,
min=0, min=0,
@ -297,10 +303,7 @@ LLM_BASE_MODELS = [
name="seed", name="seed",
label=I18nObject(zh_Hans="种子", en_US="Seed"), label=I18nObject(zh_Hans="种子", en_US="Seed"),
type="int", type="int",
help=I18nObject( help=AZURE_DEFAULT_PARAM_SEED_HELP,
zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。",
en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.",
),
required=False, required=False,
precision=2, precision=2,
min=0, min=0,
@ -365,10 +368,7 @@ LLM_BASE_MODELS = [
name="seed", name="seed",
label=I18nObject(zh_Hans="种子", en_US="Seed"), label=I18nObject(zh_Hans="种子", en_US="Seed"),
type="int", type="int",
help=I18nObject( help=AZURE_DEFAULT_PARAM_SEED_HELP,
zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。",
en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.",
),
required=False, required=False,
precision=2, precision=2,
min=0, min=0,
@ -433,10 +433,7 @@ LLM_BASE_MODELS = [
name="seed", name="seed",
label=I18nObject(zh_Hans="种子", en_US="Seed"), label=I18nObject(zh_Hans="种子", en_US="Seed"),
type="int", type="int",
help=I18nObject( help=AZURE_DEFAULT_PARAM_SEED_HELP,
zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。",
en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.",
),
required=False, required=False,
precision=2, precision=2,
min=0, min=0,
@ -502,10 +499,7 @@ LLM_BASE_MODELS = [
name="seed", name="seed",
label=I18nObject(zh_Hans="种子", en_US="Seed"), label=I18nObject(zh_Hans="种子", en_US="Seed"),
type="int", type="int",
help=I18nObject( help=AZURE_DEFAULT_PARAM_SEED_HELP,
zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。",
en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.",
),
required=False, required=False,
precision=2, precision=2,
min=0, min=0,
@ -571,10 +565,7 @@ LLM_BASE_MODELS = [
name="seed", name="seed",
label=I18nObject(zh_Hans="种子", en_US="Seed"), label=I18nObject(zh_Hans="种子", en_US="Seed"),
type="int", type="int",
help=I18nObject( help=AZURE_DEFAULT_PARAM_SEED_HELP,
zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。",
en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.",
),
required=False, required=False,
precision=2, precision=2,
min=0, min=0,
@ -650,10 +641,7 @@ LLM_BASE_MODELS = [
name="seed", name="seed",
label=I18nObject(zh_Hans="种子", en_US="Seed"), label=I18nObject(zh_Hans="种子", en_US="Seed"),
type="int", type="int",
help=I18nObject( help=AZURE_DEFAULT_PARAM_SEED_HELP,
zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。",
en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.",
),
required=False, required=False,
precision=2, precision=2,
min=0, min=0,
@ -719,10 +707,7 @@ LLM_BASE_MODELS = [
name="seed", name="seed",
label=I18nObject(zh_Hans="种子", en_US="Seed"), label=I18nObject(zh_Hans="种子", en_US="Seed"),
type="int", type="int",
help=I18nObject( help=AZURE_DEFAULT_PARAM_SEED_HELP,
zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。",
en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.",
),
required=False, required=False,
precision=2, precision=2,
min=0, min=0,
@ -788,10 +773,7 @@ LLM_BASE_MODELS = [
name="seed", name="seed",
label=I18nObject(zh_Hans="种子", en_US="Seed"), label=I18nObject(zh_Hans="种子", en_US="Seed"),
type="int", type="int",
help=I18nObject( help=AZURE_DEFAULT_PARAM_SEED_HELP,
zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。",
en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.",
),
required=False, required=False,
precision=2, precision=2,
min=0, min=0,
@ -867,10 +849,7 @@ LLM_BASE_MODELS = [
name="seed", name="seed",
label=I18nObject(zh_Hans="种子", en_US="Seed"), label=I18nObject(zh_Hans="种子", en_US="Seed"),
type="int", type="int",
help=I18nObject( help=AZURE_DEFAULT_PARAM_SEED_HELP,
zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。",
en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.",
),
required=False, required=False,
precision=2, precision=2,
min=0, min=0,
@ -936,10 +915,7 @@ LLM_BASE_MODELS = [
name="seed", name="seed",
label=I18nObject(zh_Hans="种子", en_US="Seed"), label=I18nObject(zh_Hans="种子", en_US="Seed"),
type="int", type="int",
help=I18nObject( help=AZURE_DEFAULT_PARAM_SEED_HELP,
zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。",
en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.",
),
required=False, required=False,
precision=2, precision=2,
min=0, min=0,
@ -1000,10 +976,7 @@ LLM_BASE_MODELS = [
name="seed", name="seed",
label=I18nObject(zh_Hans="种子", en_US="Seed"), label=I18nObject(zh_Hans="种子", en_US="Seed"),
type="int", type="int",
help=I18nObject( help=AZURE_DEFAULT_PARAM_SEED_HELP,
zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。",
en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.",
),
required=False, required=False,
precision=2, precision=2,
min=0, min=0,

View File

@ -15,6 +15,7 @@ class BaichuanTokenizer:
@classmethod @classmethod
def _get_num_tokens(cls, text: str) -> int: def _get_num_tokens(cls, text: str) -> int:
# tokens = number of Chinese characters + number of English words * 1.3 (for estimation only, subject to actual return) # tokens = number of Chinese characters + number of English words * 1.3
# (for estimation only, subject to actual return)
# https://platform.baichuan-ai.com/docs/text-Embedding # https://platform.baichuan-ai.com/docs/text-Embedding
return int(cls.count_chinese_characters(text) + cls.count_english_vocabularies(text) * 1.3) return int(cls.count_chinese_characters(text) + cls.count_english_vocabularies(text) * 1.3)

View File

@ -52,7 +52,7 @@ if you are not sure about the structure.
<instructions> <instructions>
{{instructions}} {{instructions}}
</instructions> </instructions>
""" """ # noqa: E501
class BedrockLargeLanguageModel(LargeLanguageModel): class BedrockLargeLanguageModel(LargeLanguageModel):
@ -541,7 +541,9 @@ class BedrockLargeLanguageModel(LargeLanguageModel):
"max_tokens": 32, "max_tokens": 32,
} }
elif "ai21" in model: elif "ai21" in model:
# ValidationException: Malformed input request: #/temperature: expected type: Number, found: Null#/maxTokens: expected type: Integer, found: Null#/topP: expected type: Number, found: Null, please reformat your input and try again. # ValidationException: Malformed input request: #/temperature: expected type: Number,
# found: Null#/maxTokens: expected type: Integer, found: Null#/topP: expected type: Number, found: Null,
# please reformat your input and try again.
required_params = { required_params = {
"temperature": 0.7, "temperature": 0.7,
"topP": 0.9, "topP": 0.9,

View File

@ -45,7 +45,7 @@ if you are not sure about the structure.
<instructions> <instructions>
{{instructions}} {{instructions}}
</instructions> </instructions>
""" """ # noqa: E501
class GoogleLargeLanguageModel(LargeLanguageModel): class GoogleLargeLanguageModel(LargeLanguageModel):

View File

@ -54,7 +54,8 @@ class TeiHelper:
url = str(URL(server_url) / "info") url = str(URL(server_url) / "info")
# this method is surrounded by a lock, and default requests may hang forever, so we just set a Adapter with max_retries=3 # this method is surrounded by a lock, and default requests may hang forever,
# so we just set a Adapter with max_retries=3
session = Session() session = Session()
session.mount("http://", HTTPAdapter(max_retries=3)) session.mount("http://", HTTPAdapter(max_retries=3))
session.mount("https://", HTTPAdapter(max_retries=3)) session.mount("https://", HTTPAdapter(max_retries=3))

View File

@ -131,7 +131,8 @@ class HunyuanLargeLanguageModel(LargeLanguageModel):
{ {
"Role": message.role.value, "Role": message.role.value,
# fix set content = "" while tool_call request # fix set content = "" while tool_call request
# fix [hunyuan] None, [TencentCloudSDKException] code:InvalidParameter message:Messages Content and Contents not allowed empty at the same time. # fix [hunyuan] None, [TencentCloudSDKException] code:InvalidParameter
# message:Messages Content and Contents not allowed empty at the same time.
"Content": " ", # message.content if (message.content is not None) else "", "Content": " ", # message.content if (message.content is not None) else "",
"ToolCalls": dict_tool_calls, "ToolCalls": dict_tool_calls,
} }

View File

@ -93,7 +93,8 @@ class NVIDIALargeLanguageModel(OAIAPICompatLargeLanguageModel):
def _validate_credentials(self, model: str, credentials: dict) -> None: def _validate_credentials(self, model: str, credentials: dict) -> None:
""" """
Validate model credentials using requests to ensure compatibility with all providers following OpenAI's API standard. Validate model credentials using requests to ensure compatibility with all providers following
OpenAI's API standard.
:param model: model name :param model: model name
:param credentials: model credentials :param credentials: model credentials

View File

@ -239,7 +239,8 @@ class OCILargeLanguageModel(LargeLanguageModel):
config_items = oci_config_content.split("/") config_items = oci_config_content.split("/")
if len(config_items) != 5: if len(config_items) != 5:
raise CredentialsValidateFailedError( raise CredentialsValidateFailedError(
"oci_config_content should be base64.b64encode('user_ocid/fingerprint/tenancy_ocid/region/compartment_ocid'.encode('utf-8'))" "oci_config_content should be base64.b64encode("
"'user_ocid/fingerprint/tenancy_ocid/region/compartment_ocid'.encode('utf-8'))"
) )
oci_config["user"] = config_items[0] oci_config["user"] = config_items[0]
oci_config["fingerprint"] = config_items[1] oci_config["fingerprint"] = config_items[1]

View File

@ -146,7 +146,8 @@ class OCITextEmbeddingModel(TextEmbeddingModel):
config_items = oci_config_content.split("/") config_items = oci_config_content.split("/")
if len(config_items) != 5: if len(config_items) != 5:
raise CredentialsValidateFailedError( raise CredentialsValidateFailedError(
"oci_config_content should be base64.b64encode('user_ocid/fingerprint/tenancy_ocid/region/compartment_ocid'.encode('utf-8'))" "oci_config_content should be base64.b64encode("
"'user_ocid/fingerprint/tenancy_ocid/region/compartment_ocid'.encode('utf-8'))"
) )
oci_config["user"] = config_items[0] oci_config["user"] = config_items[0]
oci_config["fingerprint"] = config_items[1] oci_config["fingerprint"] = config_items[1]

View File

@ -639,9 +639,10 @@ class OllamaLargeLanguageModel(LargeLanguageModel):
type=ParameterType.STRING, type=ParameterType.STRING,
help=I18nObject( help=I18nObject(
en_US="Sets how long the model is kept in memory after generating a response. " en_US="Sets how long the model is kept in memory after generating a response. "
"This must be a duration string with a unit (e.g., '10m' for 10 minutes or '24h' for 24 hours). " "This must be a duration string with a unit (e.g., '10m' for 10 minutes or '24h' for 24 hours)."
"A negative number keeps the model loaded indefinitely, and '0' unloads the model immediately after generating a response. " " A negative number keeps the model loaded indefinitely, and '0' unloads the model"
"Valid time units are 's','m','h'. (Default: 5m)" " immediately after generating a response."
" Valid time units are 's','m','h'. (Default: 5m)"
), ),
), ),
ParameterRule( ParameterRule(

View File

@ -37,7 +37,7 @@ if you are not sure about the structure.
<instructions> <instructions>
{{instructions}} {{instructions}}
</instructions> </instructions>
""" """ # noqa: E501
class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel): class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel):

View File

@ -103,7 +103,8 @@ class OAIAPICompatLargeLanguageModel(_CommonOaiApiCompat, LargeLanguageModel):
def validate_credentials(self, model: str, credentials: dict) -> None: def validate_credentials(self, model: str, credentials: dict) -> None:
""" """
Validate model credentials using requests to ensure compatibility with all providers following OpenAI's API standard. Validate model credentials using requests to ensure compatibility with all providers following
OpenAI's API standard.
:param model: model name :param model: model name
:param credentials: model credentials :param credentials: model credentials
@ -262,7 +263,8 @@ class OAIAPICompatLargeLanguageModel(_CommonOaiApiCompat, LargeLanguageModel):
return entity return entity
# validate_credentials method has been rewritten to use the requests library for compatibility with all providers following OpenAI's API standard. # validate_credentials method has been rewritten to use the requests library for compatibility with all providers
# following OpenAI's API standard.
def _generate( def _generate(
self, self,
model: str, model: str,

View File

@ -61,7 +61,10 @@ class SparkLLMClient:
signature_sha_base64 = base64.b64encode(signature_sha).decode(encoding="utf-8") signature_sha_base64 = base64.b64encode(signature_sha).decode(encoding="utf-8")
authorization_origin = f'api_key="{api_key}", algorithm="hmac-sha256", headers="host date request-line", signature="{signature_sha_base64}"' authorization_origin = (
f'api_key="{api_key}", algorithm="hmac-sha256", headers="host date request-line",'
f' signature="{signature_sha_base64}"'
)
authorization = base64.b64encode(authorization_origin.encode("utf-8")).decode(encoding="utf-8") authorization = base64.b64encode(authorization_origin.encode("utf-8")).decode(encoding="utf-8")

View File

@ -34,7 +34,7 @@ if you are not sure about the structure.
<instructions> <instructions>
{{instructions}} {{instructions}}
</instructions> </instructions>
""" """ # noqa: E501
class UpstageLargeLanguageModel(_CommonUpstage, LargeLanguageModel): class UpstageLargeLanguageModel(_CommonUpstage, LargeLanguageModel):

View File

@ -114,7 +114,8 @@ class VertexAiLargeLanguageModel(LargeLanguageModel):
credentials.refresh(request) credentials.refresh(request)
token = credentials.token token = credentials.token
# Vertex AI Anthropic Claude3 Opus model available in us-east5 region, Sonnet and Haiku available in us-central1 region # Vertex AI Anthropic Claude3 Opus model available in us-east5 region, Sonnet and Haiku available
# in us-central1 region
if "opus" in model or "claude-3-5-sonnet" in model: if "opus" in model or "claude-3-5-sonnet" in model:
location = "us-east5" location = "us-east5"
else: else:
@ -123,7 +124,8 @@ class VertexAiLargeLanguageModel(LargeLanguageModel):
# use access token to authenticate # use access token to authenticate
if token: if token:
client = AnthropicVertex(region=location, project_id=project_id, access_token=token) client = AnthropicVertex(region=location, project_id=project_id, access_token=token)
# When access token is empty, try to use the Google Cloud VM's built-in service account or the GOOGLE_APPLICATION_CREDENTIALS environment variable # When access token is empty, try to use the Google Cloud VM's built-in service account
# or the GOOGLE_APPLICATION_CREDENTIALS environment variable
else: else:
client = AnthropicVertex( client = AnthropicVertex(
region=location, region=location,

View File

@ -28,7 +28,7 @@ if you are not sure about the structure.
</instructions> </instructions>
You should also complete the text started with ``` but not tell ``` directly. You should also complete the text started with ``` but not tell ``` directly.
""" """ # noqa: E501
class ErnieBotLargeLanguageModel(LargeLanguageModel): class ErnieBotLargeLanguageModel(LargeLanguageModel):

View File

@ -130,7 +130,8 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel):
credentials["completion_type"] = "completion" credentials["completion_type"] = "completion"
else: else:
raise ValueError( raise ValueError(
f"xinference model ability {extra_param.model_ability} is not supported, check if you have the right model type" f"xinference model ability {extra_param.model_ability} is not supported,"
f" check if you have the right model type"
) )
if extra_param.support_function_call: if extra_param.support_function_call:
@ -358,7 +359,8 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel):
help=I18nObject( help=I18nObject(
en_US="Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they " en_US="Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they "
"appear in the text so far, increasing the model's likelihood to talk about new topics.", "appear in the text so far, increasing the model's likelihood to talk about new topics.",
zh_Hans="介于 -2.0 和 2.0 之间的数字。正值会根据新词是否已出现在文本中对其进行惩罚,从而增加模型谈论新话题的可能性。", zh_Hans="介于 -2.0 和 2.0 之间的数字。正值会根据新词是否已出现在文本中对其进行惩罚,"
"从而增加模型谈论新话题的可能性。",
), ),
default=0.0, default=0.0,
min=-2.0, min=-2.0,
@ -378,7 +380,8 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel):
en_US="Number between -2.0 and 2.0. Positive values penalize new tokens based on their " en_US="Number between -2.0 and 2.0. Positive values penalize new tokens based on their "
"existing frequency in the text so far, decreasing the model's likelihood to repeat the " "existing frequency in the text so far, decreasing the model's likelihood to repeat the "
"same line verbatim.", "same line verbatim.",
zh_Hans="介于 -2.0 和 2.0 之间的数字。正值会根据新词在文本中的现有频率对其进行惩罚,从而降低模型逐字重复相同内容的可能性。", zh_Hans="介于 -2.0 和 2.0 之间的数字。正值会根据新词在文本中的现有频率对其进行惩罚,"
"从而降低模型逐字重复相同内容的可能性。",
), ),
default=0.0, default=0.0,
min=-2.0, min=-2.0,

View File

@ -101,12 +101,16 @@ class XinferenceSpeech2TextModel(Speech2TextModel):
:param model: model name :param model: model name
:param credentials: model credentials :param credentials: model credentials
:param file: The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpe g,mpga, m4a, ogg, wav, or webm. :param file: The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg,
mpga, m4a, ogg, wav, or webm.
:param language: The language of the input audio. Supplying the input language in ISO-639-1 :param language: The language of the input audio. Supplying the input language in ISO-639-1
:param prompt: An optional text to guide the model's style or continue a previous audio segment. :param prompt: An optional text to guide the model's style or continue a previous audio segment.
The prompt should match the audio language. The prompt should match the audio language.
:param response_format: The format of the transcript output, in one of these options: json, text, srt, verbose _json, or vtt. :param response_format: The format of the transcript output, in one of these options: json, text, srt,
:param temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output mor e random,while lower values like 0.2 will make it more focused and deterministic.If set to 0, the model wi ll use log probability to automatically increase the temperature until certain thresholds are hit. verbose_json, or vtt.
:param temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more
random,while lower values like 0.2 will make it more focused and deterministic.If set to 0, the model will use
log probability to automatically increase the temperature until certain thresholds are hit.
:return: text for given audio file :return: text for given audio file
""" """
server_url = credentials["server_url"] server_url = credentials["server_url"]

View File

@ -76,7 +76,8 @@ class XinferenceHelper:
url = str(URL(server_url) / "v1" / "models" / model_uid) url = str(URL(server_url) / "v1" / "models" / model_uid)
# this method is surrounded by a lock, and default requests may hang forever, so we just set a Adapter with max_retries=3 # this method is surrounded by a lock, and default requests may hang forever,
# so we just set a Adapter with max_retries=3
session = Session() session = Session()
session.mount("http://", HTTPAdapter(max_retries=3)) session.mount("http://", HTTPAdapter(max_retries=3))
session.mount("https://", HTTPAdapter(max_retries=3)) session.mount("https://", HTTPAdapter(max_retries=3))
@ -88,7 +89,8 @@ class XinferenceHelper:
raise RuntimeError(f"get xinference model extra parameter failed, url: {url}, error: {e}") raise RuntimeError(f"get xinference model extra parameter failed, url: {url}, error: {e}")
if response.status_code != 200: if response.status_code != 200:
raise RuntimeError( raise RuntimeError(
f"get xinference model extra parameter failed, status code: {response.status_code}, response: {response.text}" f"get xinference model extra parameter failed, status code: {response.status_code},"
f" response: {response.text}"
) )
response_json = response.json() response_json = response.json()

View File

@ -31,7 +31,7 @@ And you should always end the block with a "```" to indicate the end of the JSON
{{instructions}} {{instructions}}
</instructions> </instructions>
```JSON""" ```JSON""" # noqa: E501
class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel): class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel):

View File

@ -75,7 +75,8 @@ Headers = Mapping[str, Union[str, Omit]]
ResponseT = TypeVar( ResponseT = TypeVar(
"ResponseT", "ResponseT",
bound="Union[str, None, BaseModel, list[Any], Dict[str, Any], Response, UnknownResponse, ModelBuilderProtocol, BinaryResponseContent]", bound="Union[str, None, BaseModel, list[Any], Dict[str, Any], Response, UnknownResponse, ModelBuilderProtocol,"
" BinaryResponseContent]",
) )
# for user input files # for user input files

View File

@ -67,7 +67,8 @@ class CommonValidator:
if credential_form_schema.max_length: if credential_form_schema.max_length:
if len(value) > credential_form_schema.max_length: if len(value) > credential_form_schema.max_length:
raise ValueError( raise ValueError(
f"Variable {credential_form_schema.variable} length should not greater than {credential_form_schema.max_length}" f"Variable {credential_form_schema.variable} length should not"
f" greater than {credential_form_schema.max_length}"
) )
# check the type of value # check the type of value

View File

@ -1,11 +1,11 @@
CONTEXT = "Use the following context as your learned knowledge, inside <context></context> XML tags.\n\n<context>\n{{#context#}}\n</context>\n\nWhen answer to user:\n- If you don't know, just say that you don't know.\n- If you don't know when you are not sure, ask for clarification.\nAvoid mentioning that you obtained the information from the context.\nAnd answer according to the language of the user's question.\n" CONTEXT = "Use the following context as your learned knowledge, inside <context></context> XML tags.\n\n<context>\n{{#context#}}\n</context>\n\nWhen answer to user:\n- If you don't know, just say that you don't know.\n- If you don't know when you are not sure, ask for clarification.\nAvoid mentioning that you obtained the information from the context.\nAnd answer according to the language of the user's question.\n" # noqa: E501
BAICHUAN_CONTEXT = "用户在与一个客观的助手对话。助手会尊重找到的材料,给出全面专业的解释,但不会过度演绎。同时回答中不会暴露引用的材料:\n\n```\n{{#context#}}\n```\n\n" BAICHUAN_CONTEXT = "用户在与一个客观的助手对话。助手会尊重找到的材料,给出全面专业的解释,但不会过度演绎。同时回答中不会暴露引用的材料:\n\n```\n{{#context#}}\n```\n\n" # noqa: E501
CHAT_APP_COMPLETION_PROMPT_CONFIG = { CHAT_APP_COMPLETION_PROMPT_CONFIG = {
"completion_prompt_config": { "completion_prompt_config": {
"prompt": { "prompt": {
"text": "{{#pre_prompt#}}\nHere is the chat histories between human and assistant, inside <histories></histories> XML tags.\n\n<histories>\n{{#histories#}}\n</histories>\n\n\nHuman: {{#query#}}\n\nAssistant: " "text": "{{#pre_prompt#}}\nHere is the chat histories between human and assistant, inside <histories></histories> XML tags.\n\n<histories>\n{{#histories#}}\n</histories>\n\n\nHuman: {{#query#}}\n\nAssistant: " # noqa: E501
}, },
"conversation_histories_role": {"user_prefix": "Human", "assistant_prefix": "Assistant"}, "conversation_histories_role": {"user_prefix": "Human", "assistant_prefix": "Assistant"},
}, },
@ -24,7 +24,7 @@ COMPLETION_APP_COMPLETION_PROMPT_CONFIG = {
BAICHUAN_CHAT_APP_COMPLETION_PROMPT_CONFIG = { BAICHUAN_CHAT_APP_COMPLETION_PROMPT_CONFIG = {
"completion_prompt_config": { "completion_prompt_config": {
"prompt": { "prompt": {
"text": "{{#pre_prompt#}}\n\n用户和助手的历史对话内容如下:\n```\n{{#histories#}}\n```\n\n\n\n用户:{{#query#}}" "text": "{{#pre_prompt#}}\n\n用户和助手的历史对话内容如下:\n```\n{{#histories#}}\n```\n\n\n\n用户:{{#query#}}" # noqa: E501
}, },
"conversation_histories_role": {"user_prefix": "用户", "assistant_prefix": "助手"}, "conversation_histories_role": {"user_prefix": "用户", "assistant_prefix": "助手"},
}, },

View File

@ -195,7 +195,8 @@ class OracleVector(BaseVector):
top_k = kwargs.get("top_k", 5) top_k = kwargs.get("top_k", 5)
with self._get_cursor() as cur: with self._get_cursor() as cur:
cur.execute( cur.execute(
f"SELECT meta, text, vector_distance(embedding,:1) AS distance FROM {self.table_name} ORDER BY distance fetch first {top_k} rows only", f"SELECT meta, text, vector_distance(embedding,:1) AS distance FROM {self.table_name}"
f" ORDER BY distance fetch first {top_k} rows only",
[numpy.array(query_vector)], [numpy.array(query_vector)],
) )
docs = [] docs = []
@ -254,7 +255,8 @@ class OracleVector(BaseVector):
entities.append(token) entities.append(token)
with self._get_cursor() as cur: with self._get_cursor() as cur:
cur.execute( cur.execute(
f"select meta, text, embedding FROM {self.table_name} WHERE CONTAINS(text, :1, 1) > 0 order by score(1) desc fetch first {top_k} rows only", f"select meta, text, embedding FROM {self.table_name}"
f" WHERE CONTAINS(text, :1, 1) > 0 order by score(1) desc fetch first {top_k} rows only",
[" ACCUM ".join(entities)], [" ACCUM ".join(entities)],
) )
docs = [] docs = []

View File

@ -139,7 +139,8 @@ class PGVector(BaseVector):
with self._get_cursor() as cur: with self._get_cursor() as cur:
cur.execute( cur.execute(
f"SELECT meta, text, embedding <=> %s AS distance FROM {self.table_name} ORDER BY distance LIMIT {top_k}", f"SELECT meta, text, embedding <=> %s AS distance FROM {self.table_name}"
f" ORDER BY distance LIMIT {top_k}",
(json.dumps(query_vector),), (json.dumps(query_vector),),
) )
docs = [] docs = []

View File

@ -30,7 +30,10 @@ from extensions.ext_storage import storage
from models.model import UploadFile from models.model import UploadFile
SUPPORT_URL_CONTENT_TYPES = ["application/pdf", "text/plain", "application/json"] SUPPORT_URL_CONTENT_TYPES = ["application/pdf", "text/plain", "application/json"]
USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" USER_AGENT = (
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124"
" Safari/537.36"
)
class ExtractProcessor: class ExtractProcessor:

View File

@ -14,7 +14,7 @@ from core.workflow.nodes.llm.llm_node import LLMNode
PREFIX = """Respond to the human as helpfully and accurately as possible. You have access to the following tools:""" PREFIX = """Respond to the human as helpfully and accurately as possible. You have access to the following tools:"""
SUFFIX = """Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:. SUFFIX = """Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:.
Thought:""" Thought:""" # noqa: E501
FORMAT_INSTRUCTIONS = """Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input). FORMAT_INSTRUCTIONS = """Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input).
The nouns in the format of "Thought", "Action", "Action Input", "Final Answer" must be expressed in English. The nouns in the format of "Thought", "Action", "Action Input", "Final Answer" must be expressed in English.
@ -46,7 +46,7 @@ Action:
"action": "Final Answer", "action": "Final Answer",
"action_input": "Final response to human" "action_input": "Final response to human"
}} }}
```""" ```""" # noqa: E501
class ReactMultiDatasetRouter: class ReactMultiDatasetRouter:
@ -204,7 +204,8 @@ class ReactMultiDatasetRouter:
tool_strings = [] tool_strings = []
for tool in tools: for tool in tools:
tool_strings.append( tool_strings.append(
f"{tool.name}: {tool.description}, args: {{'query': {{'title': 'Query', 'description': 'Query for the dataset to be used to retrieve the dataset.', 'type': 'string'}}}}" f"{tool.name}: {tool.description}, args: {{'query': {{'title': 'Query',"
f" 'description': 'Query for the dataset to be used to retrieve the dataset.', 'type': 'string'}}}}"
) )
formatted_tools = "\n".join(tool_strings) formatted_tools = "\n".join(tool_strings)
unique_tool_names = {tool.name for tool in tools} unique_tool_names = {tool.name for tool in tools}
@ -236,7 +237,7 @@ class ReactMultiDatasetRouter:
suffix = """Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:. suffix = """Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:.
Question: {input} Question: {input}
Thought: {agent_scratchpad} Thought: {agent_scratchpad}
""" """ # noqa: E501
tool_strings = "\n".join([f"{tool.name}: {tool.description}" for tool in tools]) tool_strings = "\n".join([f"{tool.name}: {tool.description}" for tool in tools])
tool_names = ", ".join([tool.name for tool in tools]) tool_names = ", ".join([tool.name for tool in tools])

View File

@ -7,7 +7,8 @@ from core.tools.entities.tool_entities import ToolParameter
class ApiToolBundle(BaseModel): class ApiToolBundle(BaseModel):
""" """
This class is used to store the schema information of an api based tool. such as the url, the method, the parameters, etc. This class is used to store the schema information of an api based tool.
such as the url, the method, the parameters, etc.
""" """
# server_url # server_url

View File

@ -4,52 +4,52 @@ from core.tools.entities.tool_entities import ToolLabel, ToolLabelEnum
ICONS = { ICONS = {
ToolLabelEnum.SEARCH: """<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none"> ToolLabelEnum.SEARCH: """<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none">
<path d="M7.33398 1.3335C10.646 1.3335 13.334 4.0215 13.334 7.3335C13.334 10.6455 10.646 13.3335 7.33398 13.3335C4.02198 13.3335 1.33398 10.6455 1.33398 7.3335C1.33398 4.0215 4.02198 1.3335 7.33398 1.3335ZM7.33398 12.0002C9.91232 12.0002 12.0007 9.91183 12.0007 7.3335C12.0007 4.75516 9.91232 2.66683 7.33398 2.66683C4.75565 2.66683 2.66732 4.75516 2.66732 7.3335C2.66732 9.91183 4.75565 12.0002 7.33398 12.0002ZM12.9909 12.0476L14.8764 13.9332L13.9337 14.876L12.0481 12.9904L12.9909 12.0476Z" fill="#344054"/> <path d="M7.33398 1.3335C10.646 1.3335 13.334 4.0215 13.334 7.3335C13.334 10.6455 10.646 13.3335 7.33398 13.3335C4.02198 13.3335 1.33398 10.6455 1.33398 7.3335C1.33398 4.0215 4.02198 1.3335 7.33398 1.3335ZM7.33398 12.0002C9.91232 12.0002 12.0007 9.91183 12.0007 7.3335C12.0007 4.75516 9.91232 2.66683 7.33398 2.66683C4.75565 2.66683 2.66732 4.75516 2.66732 7.3335C2.66732 9.91183 4.75565 12.0002 7.33398 12.0002ZM12.9909 12.0476L14.8764 13.9332L13.9337 14.876L12.0481 12.9904L12.9909 12.0476Z" fill="#344054"/>
</svg>""", </svg>""", # noqa: E501
ToolLabelEnum.IMAGE: """<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none"> ToolLabelEnum.IMAGE: """<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none">
<path d="M13.0514 9.71752L10.4718 7.13792C10.2115 6.87752 9.78932 6.87752 9.52898 7.13792L4.57721 12.0897C3.4097 11.1113 2.66732 9.64232 2.66732 7.99992C2.66732 5.0544 5.05513 2.66659 8.00065 2.66659C10.9462 2.66659 13.334 5.0544 13.334 7.99992C13.334 8.60085 13.2346 9.17852 13.0514 9.71752ZM5.72683 12.8257L10.0004 8.55212L12.4259 10.9777C11.4668 12.4001 9.84152 13.3331 8.00038 13.3331C7.18632 13.3331 6.41628 13.1511 5.72683 12.8257ZM8.00065 14.6666C11.6825 14.6666 14.6673 11.6818 14.6673 7.99992C14.6673 4.31802 11.6825 1.33325 8.00065 1.33325C4.31875 1.33325 1.33398 4.31802 1.33398 7.99992C1.33398 11.6818 4.31875 14.6666 8.00065 14.6666ZM7.33398 6.66658C7.33398 7.40299 6.73705 7.99992 6.00065 7.99992C5.26427 7.99992 4.66732 7.40299 4.66732 6.66658C4.66732 5.9302 5.26427 5.33325 6.00065 5.33325C6.73705 5.33325 7.33398 5.9302 7.33398 6.66658Z" fill="#344054"/> <path d="M13.0514 9.71752L10.4718 7.13792C10.2115 6.87752 9.78932 6.87752 9.52898 7.13792L4.57721 12.0897C3.4097 11.1113 2.66732 9.64232 2.66732 7.99992C2.66732 5.0544 5.05513 2.66659 8.00065 2.66659C10.9462 2.66659 13.334 5.0544 13.334 7.99992C13.334 8.60085 13.2346 9.17852 13.0514 9.71752ZM5.72683 12.8257L10.0004 8.55212L12.4259 10.9777C11.4668 12.4001 9.84152 13.3331 8.00038 13.3331C7.18632 13.3331 6.41628 13.1511 5.72683 12.8257ZM8.00065 14.6666C11.6825 14.6666 14.6673 11.6818 14.6673 7.99992C14.6673 4.31802 11.6825 1.33325 8.00065 1.33325C4.31875 1.33325 1.33398 4.31802 1.33398 7.99992C1.33398 11.6818 4.31875 14.6666 8.00065 14.6666ZM7.33398 6.66658C7.33398 7.40299 6.73705 7.99992 6.00065 7.99992C5.26427 7.99992 4.66732 7.40299 4.66732 6.66658C4.66732 5.9302 5.26427 5.33325 6.00065 5.33325C6.73705 5.33325 7.33398 5.9302 7.33398 6.66658Z" fill="#344054"/>
</svg>""", </svg>""", # noqa: E501
ToolLabelEnum.VIDEOS: """<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none"> ToolLabelEnum.VIDEOS: """<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none">
<path d="M8.00065 13.3333H13.334V14.6666H8.00065C4.31875 14.6666 1.33398 11.6818 1.33398 7.99992C1.33398 4.31802 4.31875 1.33325 8.00065 1.33325C11.6825 1.33325 14.6673 4.31802 14.6673 7.99992C14.6673 9.50072 14.1714 10.8857 13.3345 11.9999H11.5284C12.6356 11.0227 13.334 9.59285 13.334 7.99992C13.334 5.0544 10.9462 2.66659 8.00065 2.66659C5.05513 2.66659 2.66732 5.0544 2.66732 7.99992C2.66732 10.9455 5.05513 13.3333 8.00065 13.3333ZM8.00065 6.66658C7.26425 6.66658 6.66732 6.06963 6.66732 5.33325C6.66732 4.59687 7.26425 3.99992 8.00065 3.99992C8.73705 3.99992 9.33398 4.59687 9.33398 5.33325C9.33398 6.06963 8.73705 6.66658 8.00065 6.66658ZM5.33398 9.33325C4.5976 9.33325 4.00065 8.73632 4.00065 7.99992C4.00065 7.26352 4.5976 6.66658 5.33398 6.66658C6.07036 6.66658 6.66732 7.26352 6.66732 7.99992C6.66732 8.73632 6.07036 9.33325 5.33398 9.33325ZM10.6673 9.33325C9.93092 9.33325 9.33398 8.73632 9.33398 7.99992C9.33398 7.26352 9.93092 6.66658 10.6673 6.66658C11.4037 6.66658 12.0007 7.26352 12.0007 7.99992C12.0007 8.73632 11.4037 9.33325 10.6673 9.33325ZM8.00065 11.9999C7.26425 11.9999 6.66732 11.403 6.66732 10.6666C6.66732 9.93018 7.26425 9.33325 8.00065 9.33325C8.73705 9.33325 9.33398 9.93018 9.33398 10.6666C9.33398 11.403 8.73705 11.9999 8.00065 11.9999Z" fill="#344054"/> <path d="M8.00065 13.3333H13.334V14.6666H8.00065C4.31875 14.6666 1.33398 11.6818 1.33398 7.99992C1.33398 4.31802 4.31875 1.33325 8.00065 1.33325C11.6825 1.33325 14.6673 4.31802 14.6673 7.99992C14.6673 9.50072 14.1714 10.8857 13.3345 11.9999H11.5284C12.6356 11.0227 13.334 9.59285 13.334 7.99992C13.334 5.0544 10.9462 2.66659 8.00065 2.66659C5.05513 2.66659 2.66732 5.0544 2.66732 7.99992C2.66732 10.9455 5.05513 13.3333 8.00065 13.3333ZM8.00065 6.66658C7.26425 6.66658 6.66732 6.06963 6.66732 5.33325C6.66732 4.59687 7.26425 3.99992 8.00065 3.99992C8.73705 3.99992 9.33398 4.59687 9.33398 5.33325C9.33398 6.06963 8.73705 6.66658 8.00065 6.66658ZM5.33398 9.33325C4.5976 9.33325 4.00065 8.73632 4.00065 7.99992C4.00065 7.26352 4.5976 6.66658 5.33398 6.66658C6.07036 6.66658 6.66732 7.26352 6.66732 7.99992C6.66732 8.73632 6.07036 9.33325 5.33398 9.33325ZM10.6673 9.33325C9.93092 9.33325 9.33398 8.73632 9.33398 7.99992C9.33398 7.26352 9.93092 6.66658 10.6673 6.66658C11.4037 6.66658 12.0007 7.26352 12.0007 7.99992C12.0007 8.73632 11.4037 9.33325 10.6673 9.33325ZM8.00065 11.9999C7.26425 11.9999 6.66732 11.403 6.66732 10.6666C6.66732 9.93018 7.26425 9.33325 8.00065 9.33325C8.73705 9.33325 9.33398 9.93018 9.33398 10.6666C9.33398 11.403 8.73705 11.9999 8.00065 11.9999Z" fill="#344054"/>
</svg>""", </svg>""", # noqa: E501
ToolLabelEnum.WEATHER: """<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none"> ToolLabelEnum.WEATHER: """<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none">
<path d="M6.6553 3.37344C7.42088 2.1484 8.78162 1.3335 10.3327 1.3335C12.7259 1.3335 14.666 3.2736 14.666 5.66683C14.666 6.38704 14.4903 7.06623 14.1794 7.66383C14.8894 8.3325 15.3327 9.28123 15.3327 10.3335C15.3327 12.3586 13.6911 14.0002 11.666 14.0002H5.99935C3.05383 14.0002 0.666016 11.6124 0.666016 8.66683C0.666016 5.72131 3.05383 3.3335 5.99935 3.3335C6.22143 3.3335 6.44034 3.34707 6.6553 3.37344ZM8.03628 3.73629C9.37768 4.29108 10.4435 5.37735 10.9711 6.73256C11.1961 6.68943 11.4284 6.66683 11.666 6.66683C12.1561 6.66683 12.6237 6.76296 13.0511 6.93743C13.2317 6.55162 13.3327 6.12102 13.3327 5.66683C13.3327 4.00998 11.9895 2.66683 10.3327 2.66683C9.41115 2.66683 8.58662 3.08236 8.03628 3.73629ZM11.666 12.6668C12.9547 12.6668 13.9993 11.6222 13.9993 10.3335C13.9993 9.04483 12.9547 8.00016 11.666 8.00016C11.013 8.00016 10.4227 8.26836 9.99922 8.70063C9.99928 8.68936 9.99935 8.6781 9.99935 8.66683C9.99935 6.45769 8.20848 4.66683 5.99935 4.66683C3.79021 4.66683 1.99935 6.45769 1.99935 8.66683C1.99935 10.876 3.79021 12.6668 5.99935 12.6668H11.666Z" fill="#344054"/> <path d="M6.6553 3.37344C7.42088 2.1484 8.78162 1.3335 10.3327 1.3335C12.7259 1.3335 14.666 3.2736 14.666 5.66683C14.666 6.38704 14.4903 7.06623 14.1794 7.66383C14.8894 8.3325 15.3327 9.28123 15.3327 10.3335C15.3327 12.3586 13.6911 14.0002 11.666 14.0002H5.99935C3.05383 14.0002 0.666016 11.6124 0.666016 8.66683C0.666016 5.72131 3.05383 3.3335 5.99935 3.3335C6.22143 3.3335 6.44034 3.34707 6.6553 3.37344ZM8.03628 3.73629C9.37768 4.29108 10.4435 5.37735 10.9711 6.73256C11.1961 6.68943 11.4284 6.66683 11.666 6.66683C12.1561 6.66683 12.6237 6.76296 13.0511 6.93743C13.2317 6.55162 13.3327 6.12102 13.3327 5.66683C13.3327 4.00998 11.9895 2.66683 10.3327 2.66683C9.41115 2.66683 8.58662 3.08236 8.03628 3.73629ZM11.666 12.6668C12.9547 12.6668 13.9993 11.6222 13.9993 10.3335C13.9993 9.04483 12.9547 8.00016 11.666 8.00016C11.013 8.00016 10.4227 8.26836 9.99922 8.70063C9.99928 8.68936 9.99935 8.6781 9.99935 8.66683C9.99935 6.45769 8.20848 4.66683 5.99935 4.66683C3.79021 4.66683 1.99935 6.45769 1.99935 8.66683C1.99935 10.876 3.79021 12.6668 5.99935 12.6668H11.666Z" fill="#344054"/>
</svg>""", </svg>""", # noqa: E501
ToolLabelEnum.FINANCE: """<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none"> ToolLabelEnum.FINANCE: """<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none">
<path d="M8.00262 14.6685C4.32071 14.6685 1.33594 11.6838 1.33594 8.00184C1.33594 4.31997 4.32071 1.33521 8.00262 1.33521C11.6845 1.33521 14.6693 4.31997 14.6693 8.00184C14.6693 11.6838 11.6845 14.6685 8.00262 14.6685ZM8.00262 13.3352C10.9482 13.3352 13.336 10.9474 13.336 8.00184C13.336 5.05635 10.9482 2.66854 8.00262 2.66854C5.05708 2.66854 2.66927 5.05635 2.66927 8.00184C2.66927 10.9474 5.05708 13.3352 8.00262 13.3352ZM5.66927 9.33517H9.33595C9.52002 9.33517 9.66928 9.18597 9.66928 9.00184C9.66928 8.81777 9.52002 8.66851 9.33595 8.66851H6.66928C5.7488 8.66851 5.0026 7.92237 5.0026 7.00184C5.0026 6.08139 5.7488 5.33521 6.66928 5.33521H7.33595V4.00187H8.66928V5.33521H10.336V6.66851H6.66928C6.48518 6.66851 6.33594 6.81777 6.33594 7.00184C6.33594 7.18597 6.48518 7.33517 6.66928 7.33517H9.33595C10.2564 7.33517 11.0026 8.08137 11.0026 9.00184C11.0026 9.92237 10.2564 10.6685 9.33595 10.6685H8.66928V12.0018H7.33595V10.6685H5.66927V9.33517Z" fill="#344054"/> <path d="M8.00262 14.6685C4.32071 14.6685 1.33594 11.6838 1.33594 8.00184C1.33594 4.31997 4.32071 1.33521 8.00262 1.33521C11.6845 1.33521 14.6693 4.31997 14.6693 8.00184C14.6693 11.6838 11.6845 14.6685 8.00262 14.6685ZM8.00262 13.3352C10.9482 13.3352 13.336 10.9474 13.336 8.00184C13.336 5.05635 10.9482 2.66854 8.00262 2.66854C5.05708 2.66854 2.66927 5.05635 2.66927 8.00184C2.66927 10.9474 5.05708 13.3352 8.00262 13.3352ZM5.66927 9.33517H9.33595C9.52002 9.33517 9.66928 9.18597 9.66928 9.00184C9.66928 8.81777 9.52002 8.66851 9.33595 8.66851H6.66928C5.7488 8.66851 5.0026 7.92237 5.0026 7.00184C5.0026 6.08139 5.7488 5.33521 6.66928 5.33521H7.33595V4.00187H8.66928V5.33521H10.336V6.66851H6.66928C6.48518 6.66851 6.33594 6.81777 6.33594 7.00184C6.33594 7.18597 6.48518 7.33517 6.66928 7.33517H9.33595C10.2564 7.33517 11.0026 8.08137 11.0026 9.00184C11.0026 9.92237 10.2564 10.6685 9.33595 10.6685H8.66928V12.0018H7.33595V10.6685H5.66927V9.33517Z" fill="#344054"/>
</svg>""", </svg>""", # noqa: E501
ToolLabelEnum.DESIGN: """<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none"> ToolLabelEnum.DESIGN: """<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none">
<path d="M4.70152 9.41416L3.2873 10.8284L5.17292 12.714L12.7154 5.17154L10.8298 3.28592L9.41557 4.70013L10.3584 5.64295L9.41557 6.58575L8.47277 5.64295L7.52997 6.58575L8.47277 7.52856L7.52997 8.47136L6.58713 7.52856L5.64433 8.47136L6.58713 9.41416L5.64433 10.357L4.70152 9.41416ZM11.3012 1.87171L14.1296 4.70013C14.39 4.96049 14.39 5.38259 14.1296 5.64295L5.64433 14.1282C5.38397 14.3886 4.96187 14.3886 4.70152 14.1282L1.87309 11.2998C1.61274 11.0394 1.61274 10.6174 1.87309 10.357L10.3584 1.87171C10.6187 1.61136 11.0408 1.61136 11.3012 1.87171ZM9.41557 12.2423L10.3584 11.2995L11.8534 12.7945H12.7962V11.8517L11.3012 10.3567L12.244 9.41383L14.0011 11.171V13.9999H11.1732L9.41557 12.2423ZM3.75861 6.58533L1.87299 4.69971C1.61265 4.43937 1.61265 4.01725 1.87299 3.75691L3.75861 1.87129C4.01896 1.61094 4.44107 1.61094 4.70142 1.87129L6.58704 3.75691L5.64423 4.69971L4.23002 3.2855L3.28721 4.22831L4.70142 5.64253L3.75861 6.58533Z" fill="#344054"/> <path d="M4.70152 9.41416L3.2873 10.8284L5.17292 12.714L12.7154 5.17154L10.8298 3.28592L9.41557 4.70013L10.3584 5.64295L9.41557 6.58575L8.47277 5.64295L7.52997 6.58575L8.47277 7.52856L7.52997 8.47136L6.58713 7.52856L5.64433 8.47136L6.58713 9.41416L5.64433 10.357L4.70152 9.41416ZM11.3012 1.87171L14.1296 4.70013C14.39 4.96049 14.39 5.38259 14.1296 5.64295L5.64433 14.1282C5.38397 14.3886 4.96187 14.3886 4.70152 14.1282L1.87309 11.2998C1.61274 11.0394 1.61274 10.6174 1.87309 10.357L10.3584 1.87171C10.6187 1.61136 11.0408 1.61136 11.3012 1.87171ZM9.41557 12.2423L10.3584 11.2995L11.8534 12.7945H12.7962V11.8517L11.3012 10.3567L12.244 9.41383L14.0011 11.171V13.9999H11.1732L9.41557 12.2423ZM3.75861 6.58533L1.87299 4.69971C1.61265 4.43937 1.61265 4.01725 1.87299 3.75691L3.75861 1.87129C4.01896 1.61094 4.44107 1.61094 4.70142 1.87129L6.58704 3.75691L5.64423 4.69971L4.23002 3.2855L3.28721 4.22831L4.70142 5.64253L3.75861 6.58533Z" fill="#344054"/>
</svg>""", </svg>""", # noqa: E501
ToolLabelEnum.TRAVEL: """<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none"> ToolLabelEnum.TRAVEL: """<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none">
<path d="M9.44839 2C9.80198 2 10.1411 2.14047 10.3912 2.39053L13.6101 5.60947C13.8602 5.85953 14.0007 6.19866 14.0007 6.55229V11.3333H15.334V12.6667L9.91652 12.6672C9.62032 13.8171 8.57638 14.6667 7.33398 14.6667C6.0916 14.6667 5.04766 13.8171 4.75146 12.6672L2.00065 12.6667C1.63246 12.6667 1.33398 12.3682 1.33398 12V3.33333C1.33398 2.59695 1.93094 2 2.66732 2H9.44839ZM7.33398 10.6667C6.5976 10.6667 6.00065 11.2636 6.00065 12C6.00065 12.7364 6.5976 13.3333 7.33398 13.3333C8.07038 13.3333 8.66732 12.7364 8.66732 12C8.66732 11.2636 8.07038 10.6667 7.33398 10.6667ZM9.44839 3.33333H2.66732V11.3333L4.75128 11.3335C5.04726 10.1833 6.09136 9.33333 7.33398 9.33333C8.57658 9.33333 9.62072 10.1833 9.91665 11.3335L12.6673 11.3333V6.55229L9.44839 3.33333ZM9.33398 4.66667V8.66667H4.00065V4.66667H9.33398ZM8.00065 6H5.33398V7.33333H8.00065V6Z" fill="#344054"/> <path d="M9.44839 2C9.80198 2 10.1411 2.14047 10.3912 2.39053L13.6101 5.60947C13.8602 5.85953 14.0007 6.19866 14.0007 6.55229V11.3333H15.334V12.6667L9.91652 12.6672C9.62032 13.8171 8.57638 14.6667 7.33398 14.6667C6.0916 14.6667 5.04766 13.8171 4.75146 12.6672L2.00065 12.6667C1.63246 12.6667 1.33398 12.3682 1.33398 12V3.33333C1.33398 2.59695 1.93094 2 2.66732 2H9.44839ZM7.33398 10.6667C6.5976 10.6667 6.00065 11.2636 6.00065 12C6.00065 12.7364 6.5976 13.3333 7.33398 13.3333C8.07038 13.3333 8.66732 12.7364 8.66732 12C8.66732 11.2636 8.07038 10.6667 7.33398 10.6667ZM9.44839 3.33333H2.66732V11.3333L4.75128 11.3335C5.04726 10.1833 6.09136 9.33333 7.33398 9.33333C8.57658 9.33333 9.62072 10.1833 9.91665 11.3335L12.6673 11.3333V6.55229L9.44839 3.33333ZM9.33398 4.66667V8.66667H4.00065V4.66667H9.33398ZM8.00065 6H5.33398V7.33333H8.00065V6Z" fill="#344054"/>
</svg>""", </svg>""", # noqa: E501
ToolLabelEnum.SOCIAL: """<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none"> ToolLabelEnum.SOCIAL: """<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none">
<path d="M13.334 7.99992C13.334 5.0544 10.9462 2.66659 8.00065 2.66659C5.05513 2.66659 2.66732 5.0544 2.66732 7.99992C2.66732 10.9455 5.05513 13.3333 8.00065 13.3333C9.09518 13.3333 10.1127 13.0035 10.9594 12.438L11.699 13.5475C10.6408 14.2545 9.36885 14.6666 8.00065 14.6666C4.31875 14.6666 1.33398 11.6818 1.33398 7.99992C1.33398 4.31802 4.31875 1.33325 8.00065 1.33325C11.6825 1.33325 14.6673 4.31802 14.6673 7.99992V8.99992C14.6673 10.2886 13.6227 11.3333 12.334 11.3333C11.5312 11.3333 10.8231 10.9278 10.4032 10.3105C9.79678 10.9409 8.94452 11.3333 8.00065 11.3333C6.1597 11.3333 4.66732 9.84085 4.66732 7.99992C4.66732 6.15897 6.1597 4.66658 8.00065 4.66658C8.75118 4.66658 9.44378 4.91464 10.001 5.33325H11.334V8.99992C11.334 9.55219 11.7817 9.99992 12.334 9.99992C12.8863 9.99992 13.334 9.55219 13.334 8.99992V7.99992ZM8.00065 5.99992C6.89605 5.99992 6.00065 6.89532 6.00065 7.99992C6.00065 9.10452 6.89605 9.99992 8.00065 9.99992C9.10525 9.99992 10.0007 9.10452 10.0007 7.99992C10.0007 6.89532 9.10525 5.99992 8.00065 5.99992Z" fill="#344054"/> <path d="M13.334 7.99992C13.334 5.0544 10.9462 2.66659 8.00065 2.66659C5.05513 2.66659 2.66732 5.0544 2.66732 7.99992C2.66732 10.9455 5.05513 13.3333 8.00065 13.3333C9.09518 13.3333 10.1127 13.0035 10.9594 12.438L11.699 13.5475C10.6408 14.2545 9.36885 14.6666 8.00065 14.6666C4.31875 14.6666 1.33398 11.6818 1.33398 7.99992C1.33398 4.31802 4.31875 1.33325 8.00065 1.33325C11.6825 1.33325 14.6673 4.31802 14.6673 7.99992V8.99992C14.6673 10.2886 13.6227 11.3333 12.334 11.3333C11.5312 11.3333 10.8231 10.9278 10.4032 10.3105C9.79678 10.9409 8.94452 11.3333 8.00065 11.3333C6.1597 11.3333 4.66732 9.84085 4.66732 7.99992C4.66732 6.15897 6.1597 4.66658 8.00065 4.66658C8.75118 4.66658 9.44378 4.91464 10.001 5.33325H11.334V8.99992C11.334 9.55219 11.7817 9.99992 12.334 9.99992C12.8863 9.99992 13.334 9.55219 13.334 8.99992V7.99992ZM8.00065 5.99992C6.89605 5.99992 6.00065 6.89532 6.00065 7.99992C6.00065 9.10452 6.89605 9.99992 8.00065 9.99992C9.10525 9.99992 10.0007 9.10452 10.0007 7.99992C10.0007 6.89532 9.10525 5.99992 8.00065 5.99992Z" fill="#344054"/>
</svg>""", </svg>""", # noqa: E501
ToolLabelEnum.NEWS: """<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none"> ToolLabelEnum.NEWS: """<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none">
<path d="M10.6673 13.3335V2.66683H2.66732V12.6668C2.66732 13.035 2.9658 13.3335 3.33398 13.3335H10.6673ZM12.6673 14.6668H3.33398C2.22942 14.6668 1.33398 13.7714 1.33398 12.6668V2.00016C1.33398 1.63198 1.63246 1.3335 2.00065 1.3335H11.334C11.7022 1.3335 12.0007 1.63198 12.0007 2.00016V6.66683H14.6673V12.6668C14.6673 13.7714 13.7719 14.6668 12.6673 14.6668ZM12.0007 8.00016V12.6668C12.0007 13.035 12.2991 13.3335 12.6673 13.3335C13.0355 13.3335 13.334 13.035 13.334 12.6668V8.00016H12.0007ZM4.00065 4.00016H8.00065V8.00016H4.00065V4.00016ZM5.33398 5.3335V6.66683H6.66732V5.3335H5.33398ZM4.00065 8.66683H9.33398V10.0002H4.00065V8.66683ZM4.00065 10.6668H9.33398V12.0002H4.00065V10.6668Z" fill="#344054"/> <path d="M10.6673 13.3335V2.66683H2.66732V12.6668C2.66732 13.035 2.9658 13.3335 3.33398 13.3335H10.6673ZM12.6673 14.6668H3.33398C2.22942 14.6668 1.33398 13.7714 1.33398 12.6668V2.00016C1.33398 1.63198 1.63246 1.3335 2.00065 1.3335H11.334C11.7022 1.3335 12.0007 1.63198 12.0007 2.00016V6.66683H14.6673V12.6668C14.6673 13.7714 13.7719 14.6668 12.6673 14.6668ZM12.0007 8.00016V12.6668C12.0007 13.035 12.2991 13.3335 12.6673 13.3335C13.0355 13.3335 13.334 13.035 13.334 12.6668V8.00016H12.0007ZM4.00065 4.00016H8.00065V8.00016H4.00065V4.00016ZM5.33398 5.3335V6.66683H6.66732V5.3335H5.33398ZM4.00065 8.66683H9.33398V10.0002H4.00065V8.66683ZM4.00065 10.6668H9.33398V12.0002H4.00065V10.6668Z" fill="#344054"/>
</svg>""", </svg>""", # noqa: E501
ToolLabelEnum.MEDICAL: """<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none"> ToolLabelEnum.MEDICAL: """<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none">
<path d="M8.79747 1.51186L10.9641 5.26464C11.1482 5.5835 11.0389 5.99122 10.7201 6.17532L9.85373 6.67474L10.5207 7.83001L9.366 8.49668L8.699 7.34141L7.83333 7.84201C7.51447 8.02608 7.10673 7.91681 6.92267 7.59794L5.69747 5.47632C4.32922 5.89145 3.33333 7.16268 3.33333 8.66654C3.33333 9.08348 3.40987 9.48248 3.54965 9.85034C4.06613 9.52254 4.67762 9.33321 5.33333 9.33321C6.45605 9.33321 7.44913 9.88828 8.05313 10.7389L13.1787 7.78014L13.8454 8.93488L8.5932 11.9672C8.64133 12.1927 8.66667 12.4267 8.66667 12.6665C8.66667 12.895 8.64367 13.1181 8.59993 13.3337L14 13.3332V14.6665L2.66703 14.6673C2.2482 14.1101 2 13.4173 2 12.6665C2 11.9951 2.19855 11.3699 2.54014 10.8467C2.19517 10.1964 2 9.45428 2 8.66654C2 6.66968 3.25421 4.96575 5.01785 4.29953L4.75598 3.84519C4.38779 3.20747 4.60629 2.39202 5.24402 2.02382L6.97607 1.02382C7.6138 0.655637 8.42927 0.874138 8.79747 1.51186ZM5.33333 10.6665C4.22877 10.6665 3.33333 11.562 3.33333 12.6665C3.33333 12.9003 3.37343 13.1247 3.44711 13.3331H7.21953C7.29327 13.1247 7.33333 12.9003 7.33333 12.6665C7.33333 11.562 6.4379 10.6665 5.33333 10.6665ZM7.64273 2.17852L5.91068 3.17852L7.744 6.35395L9.47607 5.35395L7.64273 2.17852Z" fill="#344054"/> <path d="M8.79747 1.51186L10.9641 5.26464C11.1482 5.5835 11.0389 5.99122 10.7201 6.17532L9.85373 6.67474L10.5207 7.83001L9.366 8.49668L8.699 7.34141L7.83333 7.84201C7.51447 8.02608 7.10673 7.91681 6.92267 7.59794L5.69747 5.47632C4.32922 5.89145 3.33333 7.16268 3.33333 8.66654C3.33333 9.08348 3.40987 9.48248 3.54965 9.85034C4.06613 9.52254 4.67762 9.33321 5.33333 9.33321C6.45605 9.33321 7.44913 9.88828 8.05313 10.7389L13.1787 7.78014L13.8454 8.93488L8.5932 11.9672C8.64133 12.1927 8.66667 12.4267 8.66667 12.6665C8.66667 12.895 8.64367 13.1181 8.59993 13.3337L14 13.3332V14.6665L2.66703 14.6673C2.2482 14.1101 2 13.4173 2 12.6665C2 11.9951 2.19855 11.3699 2.54014 10.8467C2.19517 10.1964 2 9.45428 2 8.66654C2 6.66968 3.25421 4.96575 5.01785 4.29953L4.75598 3.84519C4.38779 3.20747 4.60629 2.39202 5.24402 2.02382L6.97607 1.02382C7.6138 0.655637 8.42927 0.874138 8.79747 1.51186ZM5.33333 10.6665C4.22877 10.6665 3.33333 11.562 3.33333 12.6665C3.33333 12.9003 3.37343 13.1247 3.44711 13.3331H7.21953C7.29327 13.1247 7.33333 12.9003 7.33333 12.6665C7.33333 11.562 6.4379 10.6665 5.33333 10.6665ZM7.64273 2.17852L5.91068 3.17852L7.744 6.35395L9.47607 5.35395L7.64273 2.17852Z" fill="#344054"/>
</svg>""", </svg>""", # noqa: E501
ToolLabelEnum.PRODUCTIVITY: """<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none"> ToolLabelEnum.PRODUCTIVITY: """<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none">
<path d="M6.64807 11.9999H9.35062C9.43862 11.1989 9.84742 10.5376 10.5111 9.81499C10.5858 9.73365 11.0652 9.23752 11.1221 9.16665C11.6872 8.46199 11.9993 7.58992 11.9993 6.66659C11.9993 4.45745 10.2085 2.66659 7.99935 2.66659C5.79021 2.66659 3.99935 4.45745 3.99935 6.66659C3.99935 7.58945 4.31118 8.46105 4.87576 9.16552C4.93271 9.23659 5.41322 9.73405 5.48704 9.81445C6.15112 10.5375 6.56004 11.1989 6.64807 11.9999ZM9.33268 13.3333H6.66602V13.9999H9.33268V13.3333ZM3.83532 9.99939C3.10365 9.08639 2.66602 7.92759 2.66602 6.66659C2.66602 3.72107 5.05383 1.33325 7.99935 1.33325C10.9449 1.33325 13.3327 3.72107 13.3327 6.66659C13.3327 7.92825 12.8945 9.08759 12.1622 10.0009C11.7487 10.5165 10.666 11.3333 10.666 12.3333V13.9999C10.666 14.7363 10.0691 15.3333 9.33268 15.3333H6.66602C5.92964 15.3333 5.33268 14.7363 5.33268 13.9999V12.3333C5.33268 11.3333 4.24907 10.5157 3.83532 9.99939ZM8.66602 6.66979H10.3327L7.33268 10.6698V8.00312H5.66602L8.66602 3.99992V6.66979Z" fill="#344054"/> <path d="M6.64807 11.9999H9.35062C9.43862 11.1989 9.84742 10.5376 10.5111 9.81499C10.5858 9.73365 11.0652 9.23752 11.1221 9.16665C11.6872 8.46199 11.9993 7.58992 11.9993 6.66659C11.9993 4.45745 10.2085 2.66659 7.99935 2.66659C5.79021 2.66659 3.99935 4.45745 3.99935 6.66659C3.99935 7.58945 4.31118 8.46105 4.87576 9.16552C4.93271 9.23659 5.41322 9.73405 5.48704 9.81445C6.15112 10.5375 6.56004 11.1989 6.64807 11.9999ZM9.33268 13.3333H6.66602V13.9999H9.33268V13.3333ZM3.83532 9.99939C3.10365 9.08639 2.66602 7.92759 2.66602 6.66659C2.66602 3.72107 5.05383 1.33325 7.99935 1.33325C10.9449 1.33325 13.3327 3.72107 13.3327 6.66659C13.3327 7.92825 12.8945 9.08759 12.1622 10.0009C11.7487 10.5165 10.666 11.3333 10.666 12.3333V13.9999C10.666 14.7363 10.0691 15.3333 9.33268 15.3333H6.66602C5.92964 15.3333 5.33268 14.7363 5.33268 13.9999V12.3333C5.33268 11.3333 4.24907 10.5157 3.83532 9.99939ZM8.66602 6.66979H10.3327L7.33268 10.6698V8.00312H5.66602L8.66602 3.99992V6.66979Z" fill="#344054"/>
</svg>""", </svg>""", # noqa: E501
ToolLabelEnum.EDUCATION: """<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none"> ToolLabelEnum.EDUCATION: """<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none">
<path d="M14 2.66683H4.66667C3.93029 2.66683 3.33333 3.26378 3.33333 4.00016C3.33333 4.73654 3.93029 5.3335 4.66667 5.3335H14V14.0002C14 14.3684 13.7015 14.6668 13.3333 14.6668H4.66667C3.19391 14.6668 2 13.4729 2 12.0002V4.00016C2 2.5274 3.19391 1.3335 4.66667 1.3335H13.3333C13.7015 1.3335 14 1.63198 14 2.00016V2.66683ZM3.33333 12.0002C3.33333 12.7366 3.93029 13.3335 4.66667 13.3335H12.6667V6.66683H4.66667C4.18095 6.66683 3.72557 6.53697 3.33333 6.31008V12.0002ZM13.3333 4.66683H4.66667C4.29848 4.66683 4 4.36835 4 4.00016C4 3.63198 4.29848 3.3335 4.66667 3.3335H13.3333V4.66683Z" fill="#344054"/> <path d="M14 2.66683H4.66667C3.93029 2.66683 3.33333 3.26378 3.33333 4.00016C3.33333 4.73654 3.93029 5.3335 4.66667 5.3335H14V14.0002C14 14.3684 13.7015 14.6668 13.3333 14.6668H4.66667C3.19391 14.6668 2 13.4729 2 12.0002V4.00016C2 2.5274 3.19391 1.3335 4.66667 1.3335H13.3333C13.7015 1.3335 14 1.63198 14 2.00016V2.66683ZM3.33333 12.0002C3.33333 12.7366 3.93029 13.3335 4.66667 13.3335H12.6667V6.66683H4.66667C4.18095 6.66683 3.72557 6.53697 3.33333 6.31008V12.0002ZM13.3333 4.66683H4.66667C4.29848 4.66683 4 4.36835 4 4.00016C4 3.63198 4.29848 3.3335 4.66667 3.3335H13.3333V4.66683Z" fill="#344054"/>
</svg>""", </svg>""", # noqa: E501
ToolLabelEnum.BUSINESS: """<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" viewBox="0 0 14 14" fill="none"> ToolLabelEnum.BUSINESS: """<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" viewBox="0 0 14 14" fill="none">
<path d="M3.66732 3.33341V1.33341C3.66732 0.965228 3.9658 0.666748 4.33398 0.666748H9.66732C10.0355 0.666748 10.334 0.965228 10.334 1.33341V3.33341H13.0007C13.3689 3.33341 13.6673 3.63189 13.6673 4.00008V13.3334C13.6673 13.7016 13.3689 14.0001 13.0007 14.0001H1.00065C0.632464 14.0001 0.333984 13.7016 0.333984 13.3334V4.00008C0.333984 3.63189 0.632464 3.33341 1.00065 3.33341H3.66732ZM12.334 8.66675H1.66732V12.6667H12.334V8.66675ZM12.334 4.66675H1.66732V7.33341H3.66732V6.00008H5.00065V7.33341H9.00065V6.00008H10.334V7.33341H12.334V4.66675ZM5.00065 2.00008V3.33341H9.00065V2.00008H5.00065Z" fill="#344054"/> <path d="M3.66732 3.33341V1.33341C3.66732 0.965228 3.9658 0.666748 4.33398 0.666748H9.66732C10.0355 0.666748 10.334 0.965228 10.334 1.33341V3.33341H13.0007C13.3689 3.33341 13.6673 3.63189 13.6673 4.00008V13.3334C13.6673 13.7016 13.3689 14.0001 13.0007 14.0001H1.00065C0.632464 14.0001 0.333984 13.7016 0.333984 13.3334V4.00008C0.333984 3.63189 0.632464 3.33341 1.00065 3.33341H3.66732ZM12.334 8.66675H1.66732V12.6667H12.334V8.66675ZM12.334 4.66675H1.66732V7.33341H3.66732V6.00008H5.00065V7.33341H9.00065V6.00008H10.334V7.33341H12.334V4.66675ZM5.00065 2.00008V3.33341H9.00065V2.00008H5.00065Z" fill="#344054"/>
</svg>""", </svg>""", # noqa: E501
ToolLabelEnum.ENTERTAINMENT: """<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none"> ToolLabelEnum.ENTERTAINMENT: """<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none">
<path d="M11.3327 2.66675C13.5418 2.66675 15.3327 4.45761 15.3327 6.66675V9.33342C15.3327 11.5425 13.5418 13.3334 11.3327 13.3334H4.66602C2.45688 13.3334 0.666016 11.5425 0.666016 9.33342V6.66675C0.666016 4.45761 2.45688 2.66675 4.66602 2.66675H11.3327ZM11.3327 4.00008H4.66602C3.23788 4.00008 2.07196 5.12273 2.00262 6.53365L1.99935 6.66675V9.33342C1.99935 10.7615 3.122 11.9275 4.53292 11.9968L4.66602 12.0001H11.3327C12.7608 12.0001 13.9267 10.8774 13.9961 9.46648L13.9993 9.33342V6.66675C13.9993 5.23861 12.8767 4.07269 11.4657 4.00335L11.3327 4.00008ZM6.66602 6.00008V7.33342H7.99935V8.66675H6.66535L6.66602 10.0001H5.33268L5.33202 8.66675H3.99935V7.33342H5.33268V6.00008H6.66602ZM11.9993 8.66675V10.0001H10.666V8.66675H11.9993ZM10.666 6.00008V7.33342H9.33268V6.00008H10.666Z" fill="#344054"/> <path d="M11.3327 2.66675C13.5418 2.66675 15.3327 4.45761 15.3327 6.66675V9.33342C15.3327 11.5425 13.5418 13.3334 11.3327 13.3334H4.66602C2.45688 13.3334 0.666016 11.5425 0.666016 9.33342V6.66675C0.666016 4.45761 2.45688 2.66675 4.66602 2.66675H11.3327ZM11.3327 4.00008H4.66602C3.23788 4.00008 2.07196 5.12273 2.00262 6.53365L1.99935 6.66675V9.33342C1.99935 10.7615 3.122 11.9275 4.53292 11.9968L4.66602 12.0001H11.3327C12.7608 12.0001 13.9267 10.8774 13.9961 9.46648L13.9993 9.33342V6.66675C13.9993 5.23861 12.8767 4.07269 11.4657 4.00335L11.3327 4.00008ZM6.66602 6.00008V7.33342H7.99935V8.66675H6.66535L6.66602 10.0001H5.33268L5.33202 8.66675H3.99935V7.33342H5.33268V6.00008H6.66602ZM11.9993 8.66675V10.0001H10.666V8.66675H11.9993ZM10.666 6.00008V7.33342H9.33268V6.00008H10.666Z" fill="#344054"/>
</svg>""", </svg>""", # noqa: E501
ToolLabelEnum.UTILITIES: """<svg xmlns="http://www.w3.org/2000/svg" width="13" height="15" viewBox="0 0 13 15" fill="none"> ToolLabelEnum.UTILITIES: """<svg xmlns="http://www.w3.org/2000/svg" width="13" height="15" viewBox="0 0 13 15" fill="none">
<path d="M12.3346 0.333252C12.7028 0.333252 13.0013 0.631732 13.0013 0.999919V4.33325C13.0013 4.70144 12.7028 4.99992 12.3346 4.99992H9.0013V13.6666C9.0013 14.0348 8.70284 14.3333 8.33463 14.3333H5.66797C5.29978 14.3333 5.0013 14.0348 5.0013 13.6666V4.99992H1.33464C0.966449 4.99992 0.667969 4.70144 0.667969 4.33325V2.74527C0.667969 2.49276 0.810635 2.26192 1.0365 2.14899L4.66797 0.333252H12.3346ZM9.0013 1.66659H4.98273L2.0013 3.1573V3.66659H6.33464V12.9999H7.66797V3.66659H9.0013V1.66659ZM11.668 1.66659H10.3346V3.66659H11.668V1.66659Z" fill="#344054"/> <path d="M12.3346 0.333252C12.7028 0.333252 13.0013 0.631732 13.0013 0.999919V4.33325C13.0013 4.70144 12.7028 4.99992 12.3346 4.99992H9.0013V13.6666C9.0013 14.0348 8.70284 14.3333 8.33463 14.3333H5.66797C5.29978 14.3333 5.0013 14.0348 5.0013 13.6666V4.99992H1.33464C0.966449 4.99992 0.667969 4.70144 0.667969 4.33325V2.74527C0.667969 2.49276 0.810635 2.26192 1.0365 2.14899L4.66797 0.333252H12.3346ZM9.0013 1.66659H4.98273L2.0013 3.1573V3.66659H6.33464V12.9999H7.66797V3.66659H9.0013V1.66659ZM11.668 1.66659H10.3346V3.66659H11.668V1.66659Z" fill="#344054"/>
</svg>""", </svg>""", # noqa: E501
ToolLabelEnum.OTHER: """<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none"> ToolLabelEnum.OTHER: """<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16" fill="none">
<path d="M8.00052 0.666748L4.00065 7.33342H12.0007L8.00052 0.666748ZM8.00052 3.25828L9.64572 6.00008H6.35553L8.00052 3.25828ZM4.50065 13.3334C3.48813 13.3334 2.66732 12.5126 2.66732 11.5001C2.66732 10.4875 3.48813 9.66675 4.50065 9.66675C5.51317 9.66675 6.33398 10.4875 6.33398 11.5001C6.33398 12.5126 5.51317 13.3334 4.50065 13.3334ZM4.50065 14.6667C6.24955 14.6667 7.66732 13.249 7.66732 11.5001C7.66732 9.75115 6.24955 8.33342 4.50065 8.33342C2.75175 8.33342 1.33398 9.75115 1.33398 11.5001C1.33398 13.249 2.75175 14.6667 4.50065 14.6667ZM10.0007 10.3334V13.0001H12.6673V10.3334H10.0007ZM8.66732 14.3334V9.00008H14.0007V14.3334H8.66732Z" fill="#344054"/> <path d="M8.00052 0.666748L4.00065 7.33342H12.0007L8.00052 0.666748ZM8.00052 3.25828L9.64572 6.00008H6.35553L8.00052 3.25828ZM4.50065 13.3334C3.48813 13.3334 2.66732 12.5126 2.66732 11.5001C2.66732 10.4875 3.48813 9.66675 4.50065 9.66675C5.51317 9.66675 6.33398 10.4875 6.33398 11.5001C6.33398 12.5126 5.51317 13.3334 4.50065 13.3334ZM4.50065 14.6667C6.24955 14.6667 7.66732 13.249 7.66732 11.5001C7.66732 9.75115 6.24955 8.33342 4.50065 8.33342C2.75175 8.33342 1.33398 9.75115 1.33398 11.5001C1.33398 13.249 2.75175 14.6667 4.50065 14.6667ZM10.0007 10.3334V13.0001H12.6673V10.3334H10.0007ZM8.66732 14.3334V9.00008H14.0007V14.3334H8.66732Z" fill="#344054"/>
</svg>""", </svg>""", # noqa: E501
} }
default_tool_label_dict = { default_tool_label_dict = {

View File

@ -46,7 +46,8 @@ class AIPPTGenerateTool(BuiltinTool):
tool_parameters (dict[str, Any]): The parameters for the tool tool_parameters (dict[str, Any]): The parameters for the tool
Returns: Returns:
ToolInvokeMessage | list[ToolInvokeMessage]: The result of the tool invocation, which can be a single message or a list of messages. ToolInvokeMessage | list[ToolInvokeMessage]: The result of the tool invocation,
which can be a single message or a list of messages.
""" """
title = tool_parameters.get("title", "") title = tool_parameters.get("title", "")
if not title: if not title:

View File

@ -104,7 +104,8 @@ class ArxivSearchTool(BuiltinTool):
tool_parameters (dict[str, Any]): The parameters for the tool, including the 'query' parameter. tool_parameters (dict[str, Any]): The parameters for the tool, including the 'query' parameter.
Returns: Returns:
ToolInvokeMessage | list[ToolInvokeMessage]: The result of the tool invocation, which can be a single message or a list of messages. ToolInvokeMessage | list[ToolInvokeMessage]: The result of the tool invocation,
which can be a single message or a list of messages.
""" """
query = tool_parameters.get("query", "") query = tool_parameters.get("query", "")

View File

@ -62,7 +62,8 @@ class ApplyGuardrailTool(BuiltinTool):
if isinstance(policy_data, dict) and "topics" in policy_data: if isinstance(policy_data, dict) and "topics" in policy_data:
for topic in policy_data["topics"]: for topic in policy_data["topics"]:
formatted_assessments.append( formatted_assessments.append(
f"Policy: {policy_type}, Topic: {topic['name']}, Type: {topic['type']}, Action: {topic['action']}" f"Policy: {policy_type}, Topic: {topic['name']}, Type: {topic['type']},"
f" Action: {topic['action']}"
) )
else: else:
formatted_assessments.append(f"Policy: {policy_type}, Data: {policy_data}") formatted_assessments.append(f"Policy: {policy_type}, Data: {policy_data}")

View File

@ -24,7 +24,8 @@ class SearchDevDocsTool(BuiltinTool):
tool_parameters (dict[str, Any]): The parameters for the tool, including 'doc' and 'topic'. tool_parameters (dict[str, Any]): The parameters for the tool, including 'doc' and 'topic'.
Returns: Returns:
ToolInvokeMessage | list[ToolInvokeMessage]: The result of the tool invocation, which can be a single message or a list of messages. ToolInvokeMessage | list[ToolInvokeMessage]: The result of the tool invocation,
which can be a single message or a list of messages.
""" """
doc = tool_parameters.get("doc", "") doc = tool_parameters.get("doc", "")
topic = tool_parameters.get("topic", "") topic = tool_parameters.get("topic", "")

View File

@ -70,7 +70,10 @@ class GitlabFilesTool(BuiltinTool):
) )
else: # It's a file else: # It's a file
if is_repository: if is_repository:
file_url = f"{domain}/api/v4/projects/{encoded_identifier}/repository/files/{item_path}/raw?ref={branch}" file_url = (
f"{domain}/api/v4/projects/{encoded_identifier}/repository/files"
f"/{item_path}/raw?ref={branch}"
)
else: else:
file_url = ( file_url = (
f"{domain}/api/v4/projects/{project_id}/repository/files/{item_path}/raw?ref={branch}" f"{domain}/api/v4/projects/{project_id}/repository/files/{item_path}/raw?ref={branch}"

View File

@ -35,7 +35,8 @@ class GoogleTranslate(BuiltinTool):
params = {"client": "gtx", "sl": "auto", "tl": dest, "dt": "t", "q": content} params = {"client": "gtx", "sl": "auto", "tl": dest, "dt": "t", "q": content}
headers = { headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)"
" Chrome/91.0.4472.124 Safari/537.36"
} }
response_json = requests.get(url, params=params, headers=headers).json() response_json = requests.get(url, params=params, headers=headers).json()

View File

@ -114,7 +114,8 @@ class GetWorksheetFieldsTool(BuiltinTool):
} }
fields.append(field) fields.append(field)
fields_list.append( fields_list.append(
f"|{field['id']}|{field['name']}|{field['type']}|{field['typeId']}|{field['description']}|{field['options'] if field['options'] else ''}|" f"|{field['id']}|{field['name']}|{field['type']}|{field['typeId']}|{field['description']}"
f"|{field['options'] if field['options'] else ''}|"
) )
fields.append( fields.append(

View File

@ -112,7 +112,10 @@ class ListWorksheetRecordsTool(BuiltinTool):
else: else:
result_text = f"Found {result['total']} rows in worksheet \"{worksheet_name}\"." result_text = f"Found {result['total']} rows in worksheet \"{worksheet_name}\"."
if result["total"] > 0: if result["total"] > 0:
result_text += f" The following are {result['total'] if result['total'] < limit else limit} pieces of data presented in a table format:\n\n{table_header}" result_text += (
f" The following are {result['total'] if result['total'] < limit else limit}"
f" pieces of data presented in a table format:\n\n{table_header}"
)
for row in rows: for row in rows:
result_values = [] result_values = []
for f in fields: for f in fields:

View File

@ -64,7 +64,10 @@ class SearchAPI:
elif type == "link": elif type == "link":
if "answer_box" in res and "organic_result" in res["answer_box"]: if "answer_box" in res and "organic_result" in res["answer_box"]:
if "title" in res["answer_box"]["organic_result"]: if "title" in res["answer_box"]["organic_result"]:
toret = f"[{res['answer_box']['organic_result']['title']}]({res['answer_box']['organic_result']['link']})\n" toret = (
f"[{res['answer_box']['organic_result']['title']}]"
f"({res['answer_box']['organic_result']['link']})\n"
)
elif "organic_results" in res and "link" in res["organic_results"][0]: elif "organic_results" in res and "link" in res["organic_results"][0]:
toret = "" toret = ""
for item in res["organic_results"]: for item in res["organic_results"]:

View File

@ -310,7 +310,8 @@ class StableDiffusionTool(BuiltinTool):
), ),
type=ToolParameter.ToolParameterType.STRING, type=ToolParameter.ToolParameterType.STRING,
form=ToolParameter.ToolParameterForm.LLM, form=ToolParameter.ToolParameterForm.LLM,
llm_description="Image prompt of Stable Diffusion, you should describe the image you want to generate as a list of words as possible as detailed, the prompt must be written in English.", llm_description="Image prompt of Stable Diffusion, you should describe the image you want to generate"
" as a list of words as possible as detailed, the prompt must be written in English.",
required=True, required=True,
), ),
] ]
@ -320,12 +321,14 @@ class StableDiffusionTool(BuiltinTool):
name="image_id", name="image_id",
label=I18nObject(en_US="image_id", zh_Hans="image_id"), label=I18nObject(en_US="image_id", zh_Hans="image_id"),
human_description=I18nObject( human_description=I18nObject(
en_US="Image id of the image you want to generate based on, if you want to generate image based on the default image, you can leave this field empty.", en_US="Image id of the image you want to generate based on, if you want to generate image based"
" on the default image, you can leave this field empty.",
zh_Hans="您想要生成的图像的图像 ID如果您想要基于默认图像生成图像则可以将此字段留空。", zh_Hans="您想要生成的图像的图像 ID如果您想要基于默认图像生成图像则可以将此字段留空。",
), ),
type=ToolParameter.ToolParameterType.STRING, type=ToolParameter.ToolParameterType.STRING,
form=ToolParameter.ToolParameterForm.LLM, form=ToolParameter.ToolParameterForm.LLM,
llm_description="Image id of the original image, you can leave this field empty if you want to generate a new image.", llm_description="Image id of the original image, you can leave this field empty if you want to"
" generate a new image.",
required=True, required=True,
options=[ options=[
ToolParameterOption(value=i.name, label=I18nObject(en_US=i.name, zh_Hans=i.name)) ToolParameterOption(value=i.name, label=I18nObject(en_US=i.name, zh_Hans=i.name))
@ -343,12 +346,14 @@ class StableDiffusionTool(BuiltinTool):
name="model", name="model",
label=I18nObject(en_US="Model", zh_Hans="Model"), label=I18nObject(en_US="Model", zh_Hans="Model"),
human_description=I18nObject( human_description=I18nObject(
en_US="Model of Stable Diffusion, you can check the official documentation of Stable Diffusion", en_US="Model of Stable Diffusion, you can check the official documentation"
" of Stable Diffusion",
zh_Hans="Stable Diffusion 的模型,您可以查看 Stable Diffusion 的官方文档", zh_Hans="Stable Diffusion 的模型,您可以查看 Stable Diffusion 的官方文档",
), ),
type=ToolParameter.ToolParameterType.SELECT, type=ToolParameter.ToolParameterType.SELECT,
form=ToolParameter.ToolParameterForm.FORM, form=ToolParameter.ToolParameterForm.FORM,
llm_description="Model of Stable Diffusion, you can check the official documentation of Stable Diffusion", llm_description="Model of Stable Diffusion, you can check the official documentation"
" of Stable Diffusion",
required=True, required=True,
default=models[0], default=models[0],
options=[ options=[
@ -367,12 +372,14 @@ class StableDiffusionTool(BuiltinTool):
name="sampler_name", name="sampler_name",
label=I18nObject(en_US="Sampling method", zh_Hans="Sampling method"), label=I18nObject(en_US="Sampling method", zh_Hans="Sampling method"),
human_description=I18nObject( human_description=I18nObject(
en_US="Sampling method of Stable Diffusion, you can check the official documentation of Stable Diffusion", en_US="Sampling method of Stable Diffusion, you can check the official documentation"
" of Stable Diffusion",
zh_Hans="Stable Diffusion 的Sampling method您可以查看 Stable Diffusion 的官方文档", zh_Hans="Stable Diffusion 的Sampling method您可以查看 Stable Diffusion 的官方文档",
), ),
type=ToolParameter.ToolParameterType.SELECT, type=ToolParameter.ToolParameterType.SELECT,
form=ToolParameter.ToolParameterForm.FORM, form=ToolParameter.ToolParameterForm.FORM,
llm_description="Sampling method of Stable Diffusion, you can check the official documentation of Stable Diffusion", llm_description="Sampling method of Stable Diffusion, you can check the official documentation"
" of Stable Diffusion",
required=True, required=True,
default=sample_methods[0], default=sample_methods[0],
options=[ options=[

View File

@ -17,7 +17,8 @@ class CreateListOnBoardTool(BuiltinTool):
Args: Args:
user_id (str): The ID of the user invoking the tool. user_id (str): The ID of the user invoking the tool.
tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, including the board ID and list name. tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation,
including the board ID and list name.
Returns: Returns:
ToolInvokeMessage: The result of the tool invocation. ToolInvokeMessage: The result of the tool invocation.

View File

@ -17,7 +17,8 @@ class CreateNewCardOnBoardTool(BuiltinTool):
Args: Args:
user_id (str): The ID of the user invoking the tool. user_id (str): The ID of the user invoking the tool.
tool_parameters (dict[str, Union[str, int, bool, None]]): The parameters for the tool invocation, including details for the new card. tool_parameters (dict[str, Union[str, int, bool, None]]): The parameters for the tool invocation,
including details for the new card.
Returns: Returns:
ToolInvokeMessage: The result of the tool invocation. ToolInvokeMessage: The result of the tool invocation.

View File

@ -17,7 +17,8 @@ class DeleteBoardTool(BuiltinTool):
Args: Args:
user_id (str): The ID of the user invoking the tool. user_id (str): The ID of the user invoking the tool.
tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, including the board ID. tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation,
including the board ID.
Returns: Returns:
ToolInvokeMessage: The result of the tool invocation. ToolInvokeMessage: The result of the tool invocation.

View File

@ -17,7 +17,8 @@ class DeleteCardByIdTool(BuiltinTool):
Args: Args:
user_id (str): The ID of the user invoking the tool. user_id (str): The ID of the user invoking the tool.
tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, including the card ID. tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation,
including the card ID.
Returns: Returns:
ToolInvokeMessage: The result of the tool invocation. ToolInvokeMessage: The result of the tool invocation.

View File

@ -17,7 +17,8 @@ class GetBoardActionsTool(BuiltinTool):
Args: Args:
user_id (str): The ID of the user invoking the tool. user_id (str): The ID of the user invoking the tool.
tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, including the board ID. tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation,
including the board ID.
Returns: Returns:
ToolInvokeMessage: The result of the tool invocation. ToolInvokeMessage: The result of the tool invocation.

View File

@ -17,7 +17,8 @@ class GetBoardByIdTool(BuiltinTool):
Args: Args:
user_id (str): The ID of the user invoking the tool. user_id (str): The ID of the user invoking the tool.
tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, including the board ID. tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation,
including the board ID.
Returns: Returns:
ToolInvokeMessage: The result of the tool invocation. ToolInvokeMessage: The result of the tool invocation.

View File

@ -17,7 +17,8 @@ class GetBoardCardsTool(BuiltinTool):
Args: Args:
user_id (str): The ID of the user invoking the tool. user_id (str): The ID of the user invoking the tool.
tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, including the board ID. tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation,
including the board ID.
Returns: Returns:
ToolInvokeMessage: The result of the tool invocation. ToolInvokeMessage: The result of the tool invocation.

View File

@ -17,7 +17,8 @@ class GetFilteredBoardCardsTool(BuiltinTool):
Args: Args:
user_id (str): The ID of the user invoking the tool. user_id (str): The ID of the user invoking the tool.
tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, including the board ID and filter. tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation,
including the board ID and filter.
Returns: Returns:
ToolInvokeMessage: The result of the tool invocation. ToolInvokeMessage: The result of the tool invocation.

View File

@ -17,7 +17,8 @@ class GetListsFromBoardTool(BuiltinTool):
Args: Args:
user_id (str): The ID of the user invoking the tool. user_id (str): The ID of the user invoking the tool.
tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, including the board ID. tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation,
including the board ID.
Returns: Returns:
ToolInvokeMessage: The result of the tool invocation. ToolInvokeMessage: The result of the tool invocation.

View File

@ -17,7 +17,8 @@ class UpdateBoardByIdTool(BuiltinTool):
Args: Args:
user_id (str): The ID of the user invoking the tool. user_id (str): The ID of the user invoking the tool.
tool_parameters (dict[str, Union[str, int, bool, None]]): The parameters for the tool invocation, including board ID and updates. tool_parameters (dict[str, Union[str, int, bool, None]]): The parameters for the tool invocation,
including board ID and updates.
Returns: Returns:
ToolInvokeMessage: The result of the tool invocation. ToolInvokeMessage: The result of the tool invocation.

View File

@ -17,7 +17,8 @@ class UpdateCardByIdTool(BuiltinTool):
Args: Args:
user_id (str): The ID of the user invoking the tool. user_id (str): The ID of the user invoking the tool.
tool_parameters (dict[str, Union[str, int, bool, None]]): The parameters for the tool invocation, including the card ID and updates. tool_parameters (dict[str, Union[str, int, bool, None]]): The parameters for the tool invocation,
including the card ID and updates.
Returns: Returns:
ToolInvokeMessage: The result of the tool invocation. ToolInvokeMessage: The result of the tool invocation.

View File

@ -72,7 +72,8 @@ class SendMessageTool(BuiltinTool):
tool_parameters (Dict[str, Any]): The parameters required for sending the message. tool_parameters (Dict[str, Any]): The parameters required for sending the message.
Returns: Returns:
Union[ToolInvokeMessage, List[ToolInvokeMessage]]: The result of invoking the tool, which includes the status of the message sending operation. Union[ToolInvokeMessage, List[ToolInvokeMessage]]: The result of invoking the tool,
which includes the status of the message sending operation.
""" """
def _invoke( def _invoke(

View File

@ -1 +1 @@
VECTORIZER_ICON_PNG = "iVBORw0KGgoAAAANSUhEUgAAAGAAAABgCAYAAADimHc4AAAACXBIWXMAACxLAAAsSwGlPZapAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAboSURBVHgB7Z09bBxFFMffRoAvcQqbguBUxu4wCUikMCZ0TmQK4NLQJCJOlQIkokgEGhQ7NCFIKEhQuIqNnIaGMxRY2GVwmlggDHS+pIHELmIXMTEULPP3eeXz7e7szO7MvE1ufpKV03nuNn7/mfcxH7tEHo/H42lXgqwG1bGw65+/aTQM6K0gpJdCoi7ypCIMui5s9Qv9R1OVTqrVxoL1jPbpvH4hrIp/rnmj5+YOhTQ++1kwmdZgT9ovRi6EF4Xhv/XGL0Sv6OLXYMu0BokjYOSDcBQfJI8xhKFP/HAlqCW8v5vqubBr8yn6maCexxiIDR376LnWmBBzQZtPEvx+L3mMAleOZKb1/XgM2EOnyWMFZJKt78UEQKpJHisk2TYmgM967JFk2z3kYcULwIwXgBkvADNeAGa8AMw8Qcwc6N55/eAh0cYmGaOzQtR/kOhQX+M6+/c23r+3RlT/i2ipTrSyRqw4F+CwMMbgANHQwG7jRywLw/wqDDNzI79xYPjqa2L262jjtYzaT0QT3xEbsck4MXUakgWOvUx08liy0ZPYEKNhel4Y6AZpgR7/8Tvq1wEQ+sMJN6Nh9kqwy+bWYwAM8elZovNv6xmlU7iLs280RNO9ls51os/h/8eBVQEig8Dt5OXUsNrno2tluZw0cI3qUXKONQHy9sYkVHqnjntLA2LnFTAv1gSA+zBhfIDvkfVO/B4xRgWZn4fbe2WAnGJFAAxn03+I7PtUXdzE90Sjl4ne+6L4d5nCigAyYyHPn7tFdPN30uJwX/qI6jtISkQZFVLdhd9SrtNPTrFSB6QZBAaYntsptpAyfvk+KYOCamVR/XrNtLqepduiFnkh3g4iIw6YLAhlOJmKwB9zaarhApr/MPREjAZVisSU1s/KYsGzhmKXClYEWLm/8xpV7btXhcv5I7lt2vtJFA3q/T07r1HopdG5l5xhxQVdn28YFn8kBJCBOZmiPHio1m5QuJzlu9ntXApgZwSsNYJslvGjtjrfm8Sq4neceFUtz3dZCzwW09Gqo2hreuPN7HZRnNqa1BP1x8lhczVNK+zT0TqkjYAF4e7Okxoo2PZX5K4IrhNpb/P8FTK2S1+TcUq1HpBFmquJYo1qEYU6RVarJE0c2ooL7C5IRwBZ5nJ9joyRtk5hA3YBdHqWzG1gBKgE/bzMaK5LqMIugKrbUDHu59/YWVRBsWhrsYZdANV5HBUXYGNlC9dFBW8LdgH6FQVYUnQvkQgm3NH8YuO7bM4LsWZBfT3qRY9OxRyJgJRz+Ij+FDPEQ1C3GVMiWAVQ7f31u/ncytxi4wdZTbRGgdcHnpYLD/FcwSrAoOKizfKfVAiIF4kBMPK+Opfe1iWsMUB1BJh2BRgBabSNAOiFqkXYbcNFUF9P+u82FGdWTcEmgGrvh0FUppB1kC073muXEaDq/21kIjLxV9tFAC7/n5X6tkUM0PH/dcP+P0v41fvkFBYBVHs/MD0CDmVsOzEdb7JgEYDT/8uq4rpj44NSjwDTc/CyzV1gxbH7Ac4F0PH/S4ZHAOaFZLiY+2nFuQA6/t9kQMTCz1CG66tbWvWS4VwAVf9vugAbel6efqrsYbKBcwFeVNz8ajobyTppw2F84FQAnfl/kwER6wJZcWdBc7e2KZwKoOP/TVakWb0f7md+kVhwOwI0BDCFyq42rt4PSiuAiRGAEXdK4ZQlV+8HTgVwefwHvR7nhbOA0FwBGDgTIM/Z3SLXUj2hOW1wR10eSrs7Ou9eTB3jo/dzuh/gTABdn35c8dhpM3BxOmeTuXs/cDoCdDY4qe7l32pbaZxL1jF+GXo/cLotBcWVTiZU3T7RMn8rHiijW9FgauP4Ef1TLdhHWgacCgAj6tYCqGKjU/DNbqxIkMYZNs7MpxmnLuhmwYJna1dbdzHjY42hDL4/wqkA6HWuDkAngRH0iYVjRkVwnoZO/0gsuLwpkw7OBcAtwlwvfESHxctmfMBSiOG0oStj4HCF7T3+RWARwIU7QK/HbWlqls52mYJtezqMj3v34C5VOveFy8Ll4QoTsJ8Txp0RsW8/Os2im2LCtSC1RIqLw3RldTVplOKkPEYDhMAPqttnune2rzTv5Y+WKdEem2ixkWqZYSeDSUp3qwIYNOrR7cBjcbOORxkvADNeAGa8AMx4AZjxAjATf5Ab0Tp5rJBk2/iD3PAwYo8Vkmyb9CjDGfLYIaCp1rdiAnT8S5PeDVkgoDuVCsWeJxwToHZ163m3Z8hjloDGk54vn5gFbT/5eZw8phifvZz8XPlA9qmRj8JRCumi+OkljzbbrvxM0qPMm9rIqY6FXZubVBUinMbzcP3jbuXA6Mh2kMx07KPJJLfj8Xg8Hg/4H+KfFYb2WM4MAAAAAElFTkSuQmCC" VECTORIZER_ICON_PNG = "iVBORw0KGgoAAAANSUhEUgAAAGAAAABgCAYAAADimHc4AAAACXBIWXMAACxLAAAsSwGlPZapAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAboSURBVHgB7Z09bBxFFMffRoAvcQqbguBUxu4wCUikMCZ0TmQK4NLQJCJOlQIkokgEGhQ7NCFIKEhQuIqNnIaGMxRY2GVwmlggDHS+pIHELmIXMTEULPP3eeXz7e7szO7MvE1ufpKV03nuNn7/mfcxH7tEHo/H42lXgqwG1bGw65+/aTQM6K0gpJdCoi7ypCIMui5s9Qv9R1OVTqrVxoL1jPbpvH4hrIp/rnmj5+YOhTQ++1kwmdZgT9ovRi6EF4Xhv/XGL0Sv6OLXYMu0BokjYOSDcBQfJI8xhKFP/HAlqCW8v5vqubBr8yn6maCexxiIDR376LnWmBBzQZtPEvx+L3mMAleOZKb1/XgM2EOnyWMFZJKt78UEQKpJHisk2TYmgM967JFk2z3kYcULwIwXgBkvADNeAGa8AMw8Qcwc6N55/eAh0cYmGaOzQtR/kOhQX+M6+/c23r+3RlT/i2ipTrSyRqw4F+CwMMbgANHQwG7jRywLw/wqDDNzI79xYPjqa2L262jjtYzaT0QT3xEbsck4MXUakgWOvUx08liy0ZPYEKNhel4Y6AZpgR7/8Tvq1wEQ+sMJN6Nh9kqwy+bWYwAM8elZovNv6xmlU7iLs280RNO9ls51os/h/8eBVQEig8Dt5OXUsNrno2tluZw0cI3qUXKONQHy9sYkVHqnjntLA2LnFTAv1gSA+zBhfIDvkfVO/B4xRgWZn4fbe2WAnGJFAAxn03+I7PtUXdzE90Sjl4ne+6L4d5nCigAyYyHPn7tFdPN30uJwX/qI6jtISkQZFVLdhd9SrtNPTrFSB6QZBAaYntsptpAyfvk+KYOCamVR/XrNtLqepduiFnkh3g4iIw6YLAhlOJmKwB9zaarhApr/MPREjAZVisSU1s/KYsGzhmKXClYEWLm/8xpV7btXhcv5I7lt2vtJFA3q/T07r1HopdG5l5xhxQVdn28YFn8kBJCBOZmiPHio1m5QuJzlu9ntXApgZwSsNYJslvGjtjrfm8Sq4neceFUtz3dZCzwW09Gqo2hreuPN7HZRnNqa1BP1x8lhczVNK+zT0TqkjYAF4e7Okxoo2PZX5K4IrhNpb/P8FTK2S1+TcUq1HpBFmquJYo1qEYU6RVarJE0c2ooL7C5IRwBZ5nJ9joyRtk5hA3YBdHqWzG1gBKgE/bzMaK5LqMIugKrbUDHu59/YWVRBsWhrsYZdANV5HBUXYGNlC9dFBW8LdgH6FQVYUnQvkQgm3NH8YuO7bM4LsWZBfT3qRY9OxRyJgJRz+Ij+FDPEQ1C3GVMiWAVQ7f31u/ncytxi4wdZTbRGgdcHnpYLD/FcwSrAoOKizfKfVAiIF4kBMPK+Opfe1iWsMUB1BJh2BRgBabSNAOiFqkXYbcNFUF9P+u82FGdWTcEmgGrvh0FUppB1kC073muXEaDq/21kIjLxV9tFAC7/n5X6tkUM0PH/dcP+P0v41fvkFBYBVHs/MD0CDmVsOzEdb7JgEYDT/8uq4rpj44NSjwDTc/CyzV1gxbH7Ac4F0PH/S4ZHAOaFZLiY+2nFuQA6/t9kQMTCz1CG66tbWvWS4VwAVf9vugAbel6efqrsYbKBcwFeVNz8ajobyTppw2F84FQAnfl/kwER6wJZcWdBc7e2KZwKoOP/TVakWb0f7md+kVhwOwI0BDCFyq42rt4PSiuAiRGAEXdK4ZQlV+8HTgVwefwHvR7nhbOA0FwBGDgTIM/Z3SLXUj2hOW1wR10eSrs7Ou9eTB3jo/dzuh/gTABdn35c8dhpM3BxOmeTuXs/cDoCdDY4qe7l32pbaZxL1jF+GXo/cLotBcWVTiZU3T7RMn8rHiijW9FgauP4Ef1TLdhHWgacCgAj6tYCqGKjU/DNbqxIkMYZNs7MpxmnLuhmwYJna1dbdzHjY42hDL4/wqkA6HWuDkAngRH0iYVjRkVwnoZO/0gsuLwpkw7OBcAtwlwvfESHxctmfMBSiOG0oStj4HCF7T3+RWARwIU7QK/HbWlqls52mYJtezqMj3v34C5VOveFy8Ll4QoTsJ8Txp0RsW8/Os2im2LCtSC1RIqLw3RldTVplOKkPEYDhMAPqttnune2rzTv5Y+WKdEem2ixkWqZYSeDSUp3qwIYNOrR7cBjcbOORxkvADNeAGa8AMx4AZjxAjATf5Ab0Tp5rJBk2/iD3PAwYo8Vkmyb9CjDGfLYIaCp1rdiAnT8S5PeDVkgoDuVCsWeJxwToHZ163m3Z8hjloDGk54vn5gFbT/5eZw8phifvZz8XPlA9qmRj8JRCumi+OkljzbbrvxM0qPMm9rIqY6FXZubVBUinMbzcP3jbuXA6Mh2kMx07KPJJLfj8Xg8Hg/4H+KfFYb2WM4MAAAAAElFTkSuQmCC" # noqa: E501

View File

@ -193,7 +193,10 @@ class ToolEngine:
response.type == ToolInvokeMessage.MessageType.IMAGE_LINK response.type == ToolInvokeMessage.MessageType.IMAGE_LINK
or response.type == ToolInvokeMessage.MessageType.IMAGE or response.type == ToolInvokeMessage.MessageType.IMAGE
): ):
result += "image has been created and sent to user already, you do not need to create it, just tell the user to check it now." result += (
"image has been created and sent to user already, you do not need to create it,"
" just tell the user to check it now."
)
elif response.type == ToolInvokeMessage.MessageType.JSON: elif response.type == ToolInvokeMessage.MessageType.JSON:
result += f"tool response: {json.dumps(response.message, ensure_ascii=False)}." result += f"tool response: {json.dumps(response.message, ensure_ascii=False)}."
else: else:

View File

@ -89,7 +89,7 @@ class FeishuRequest:
"content": "云文档\n多人实时协同,插入一切元素。不仅是在线文档,更是强大的创作和互动工具\n云文档:专为协作而生\n" "content": "云文档\n多人实时协同,插入一切元素。不仅是在线文档,更是强大的创作和互动工具\n云文档:专为协作而生\n"
} }
} }
""" """ # noqa: E501
params = { params = {
"document_id": document_id, "document_id": document_id,
} }

View File

@ -43,7 +43,7 @@ class ToolFileMessageTransformer:
result.append( result.append(
ToolInvokeMessage( ToolInvokeMessage(
type=ToolInvokeMessage.MessageType.TEXT, type=ToolInvokeMessage.MessageType.TEXT,
message=f"Failed to download image: {message.message}, you can try to download it yourself.", message=f"Failed to download image: {message.message}, please try to download it manually.",
meta=message.meta.copy() if message.meta is not None else {}, meta=message.meta.copy() if message.meta is not None else {},
save_as=message.save_as, save_as=message.save_as,
) )

View File

@ -315,7 +315,8 @@ class ApiBasedToolSchemaParser:
yaml_error = e yaml_error = e
if loaded_content is None: if loaded_content is None:
raise ToolApiSchemaError( raise ToolApiSchemaError(
f"Invalid api schema, schema is neither json nor yaml. json error: {str(json_error)}, yaml error: {str(yaml_error)}" f"Invalid api schema, schema is neither json nor yaml. json error: {str(json_error)},"
f" yaml error: {str(yaml_error)}"
) )
swagger_error = None swagger_error = None
@ -355,5 +356,6 @@ class ApiBasedToolSchemaParser:
openapi_plugin_error = e openapi_plugin_error = e
raise ToolApiSchemaError( raise ToolApiSchemaError(
f"Invalid api schema, openapi error: {str(openapi_error)}, swagger error: {str(swagger_error)}, openapi plugin error: {str(openapi_plugin_error)}" f"Invalid api schema, openapi error: {str(openapi_error)}, swagger error: {str(swagger_error)},"
f" openapi plugin error: {str(openapi_plugin_error)}"
) )

View File

@ -38,7 +38,8 @@ def page_result(text: str, cursor: int, max_length: int) -> str:
def get_url(url: str, user_agent: str = None) -> str: def get_url(url: str, user_agent: str = None) -> str:
"""Fetch URL and return the contents as a string.""" """Fetch URL and return the contents as a string."""
headers = { headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)"
" Chrome/91.0.4472.124 Safari/537.36"
} }
if user_agent: if user_agent:
headers["User-Agent"] = user_agent headers["User-Agent"] = user_agent

View File

@ -179,7 +179,8 @@ class CodeNode(BaseNode):
) )
else: else:
raise ValueError( raise ValueError(
f"Output {prefix}.{output_name} is not a valid array. make sure all elements are of the same type." f"Output {prefix}.{output_name} is not a valid array."
f" make sure all elements are of the same type."
) )
elif isinstance(output_value, type(None)): elif isinstance(output_value, type(None)):
pass pass
@ -201,7 +202,8 @@ class CodeNode(BaseNode):
transformed_result[output_name] = None transformed_result[output_name] = None
else: else:
raise ValueError( raise ValueError(
f"Output {prefix}{dot}{output_name} is not an object, got {type(result.get(output_name))} instead." f"Output {prefix}{dot}{output_name} is not an object,"
f" got {type(result.get(output_name))} instead."
) )
else: else:
transformed_result[output_name] = self._transform_result( transformed_result[output_name] = self._transform_result(
@ -228,7 +230,8 @@ class CodeNode(BaseNode):
transformed_result[output_name] = None transformed_result[output_name] = None
else: else:
raise ValueError( raise ValueError(
f"Output {prefix}{dot}{output_name} is not an array, got {type(result.get(output_name))} instead." f"Output {prefix}{dot}{output_name} is not an array,"
f" got {type(result.get(output_name))} instead."
) )
else: else:
if len(result[output_name]) > dify_config.CODE_MAX_NUMBER_ARRAY_LENGTH: if len(result[output_name]) > dify_config.CODE_MAX_NUMBER_ARRAY_LENGTH:
@ -248,7 +251,8 @@ class CodeNode(BaseNode):
transformed_result[output_name] = None transformed_result[output_name] = None
else: else:
raise ValueError( raise ValueError(
f"Output {prefix}{dot}{output_name} is not an array, got {type(result.get(output_name))} instead." f"Output {prefix}{dot}{output_name} is not an array,"
f" got {type(result.get(output_name))} instead."
) )
else: else:
if len(result[output_name]) > dify_config.CODE_MAX_STRING_ARRAY_LENGTH: if len(result[output_name]) > dify_config.CODE_MAX_STRING_ARRAY_LENGTH:
@ -268,7 +272,8 @@ class CodeNode(BaseNode):
transformed_result[output_name] = None transformed_result[output_name] = None
else: else:
raise ValueError( raise ValueError(
f"Output {prefix}{dot}{output_name} is not an array, got {type(result.get(output_name))} instead." f"Output {prefix}{dot}{output_name} is not an array,"
f" got {type(result.get(output_name))} instead."
) )
else: else:
if len(result[output_name]) > dify_config.CODE_MAX_OBJECT_ARRAY_LENGTH: if len(result[output_name]) > dify_config.CODE_MAX_OBJECT_ARRAY_LENGTH:
@ -283,7 +288,8 @@ class CodeNode(BaseNode):
pass pass
else: else:
raise ValueError( raise ValueError(
f"Output {prefix}{dot}{output_name}[{i}] is not an object, got {type(value)} instead at index {i}." f"Output {prefix}{dot}{output_name}[{i}] is not an object,"
f" got {type(value)} instead at index {i}."
) )
transformed_result[output_name] = [ transformed_result[output_name] = [

View File

@ -128,11 +128,12 @@ class KnowledgeRetrievalNode(BaseNode):
weights = None weights = None
elif node_data.multiple_retrieval_config.reranking_mode == "weighted_score": elif node_data.multiple_retrieval_config.reranking_mode == "weighted_score":
reranking_model = None reranking_model = None
vector_setting = node_data.multiple_retrieval_config.weights.vector_setting
weights = { weights = {
"vector_setting": { "vector_setting": {
"vector_weight": node_data.multiple_retrieval_config.weights.vector_setting.vector_weight, "vector_weight": vector_setting.vector_weight,
"embedding_provider_name": node_data.multiple_retrieval_config.weights.vector_setting.embedding_provider_name, "embedding_provider_name": vector_setting.embedding_provider_name,
"embedding_model_name": node_data.multiple_retrieval_config.weights.vector_setting.embedding_model_name, "embedding_model_name": vector_setting.embedding_model_name,
}, },
"keyword_setting": { "keyword_setting": {
"keyword_weight": node_data.multiple_retrieval_config.weights.keyword_setting.keyword_weight "keyword_weight": node_data.multiple_retrieval_config.weights.keyword_setting.keyword_weight

View File

@ -23,7 +23,7 @@ Steps:
To illustrate, if the task involves extracting a user's name and their request, your function call might look like this: Ensure your output follows a similar structure to examples. To illustrate, if the task involves extracting a user's name and their request, your function call might look like this: Ensure your output follows a similar structure to examples.
### Final Output ### Final Output
Produce well-formatted function calls in json without XML tags, as shown in the example. Produce well-formatted function calls in json without XML tags, as shown in the example.
""" """ # noqa: E501
FUNCTION_CALLING_EXTRACTOR_USER_TEMPLATE = f"""extract structured information from context inside <context></context> XML tags by calling the function {FUNCTION_CALLING_EXTRACTOR_NAME} with the correct parameters with structure inside <structure></structure> XML tags. FUNCTION_CALLING_EXTRACTOR_USER_TEMPLATE = f"""extract structured information from context inside <context></context> XML tags by calling the function {FUNCTION_CALLING_EXTRACTOR_NAME} with the correct parameters with structure inside <structure></structure> XML tags.
<context> <context>
@ -33,7 +33,7 @@ FUNCTION_CALLING_EXTRACTOR_USER_TEMPLATE = f"""extract structured information fr
<structure> <structure>
\x7bstructure\x7d \x7bstructure\x7d
</structure> </structure>
""" """ # noqa: E501
FUNCTION_CALLING_EXTRACTOR_EXAMPLE = [ FUNCTION_CALLING_EXTRACTOR_EXAMPLE = [
{ {
@ -55,7 +55,8 @@ FUNCTION_CALLING_EXTRACTOR_EXAMPLE = [
}, },
}, },
"assistant": { "assistant": {
"text": "I need always call the function with the correct parameters. in this case, I need to call the function with the location parameter.", "text": "I need always call the function with the correct parameters."
" in this case, I need to call the function with the location parameter.",
"function_call": {"name": FUNCTION_CALLING_EXTRACTOR_NAME, "parameters": {"location": "San Francisco"}}, "function_call": {"name": FUNCTION_CALLING_EXTRACTOR_NAME, "parameters": {"location": "San Francisco"}},
}, },
}, },
@ -72,7 +73,8 @@ FUNCTION_CALLING_EXTRACTOR_EXAMPLE = [
}, },
}, },
"assistant": { "assistant": {
"text": "I need always call the function with the correct parameters. in this case, I need to call the function with the food parameter.", "text": "I need always call the function with the correct parameters."
" in this case, I need to call the function with the food parameter.",
"function_call": {"name": FUNCTION_CALLING_EXTRACTOR_NAME, "parameters": {"food": "apple pie"}}, "function_call": {"name": FUNCTION_CALLING_EXTRACTOR_NAME, "parameters": {"food": "apple pie"}},
}, },
}, },
@ -117,7 +119,7 @@ Inside <text></text> XML tags, there is a text that I should extract parameters
### Answer ### Answer
I should always output a valid JSON object. Output nothing other than the JSON object. I should always output a valid JSON object. Output nothing other than the JSON object.
```JSON ```JSON
""" """ # noqa: E501
CHAT_GENERATE_JSON_PROMPT = """You should always follow the instructions and output a valid JSON object. CHAT_GENERATE_JSON_PROMPT = """You should always follow the instructions and output a valid JSON object.
The structure of the JSON object you can found in the instructions. The structure of the JSON object you can found in the instructions.

View File

@ -12,13 +12,13 @@ QUESTION_CLASSIFIER_SYSTEM_PROMPT = """
<histories> <histories>
{histories} {histories}
</histories> </histories>
""" """ # noqa: E501
QUESTION_CLASSIFIER_USER_PROMPT_1 = """ QUESTION_CLASSIFIER_USER_PROMPT_1 = """
{ "input_text": ["I recently had a great experience with your company. The service was prompt and the staff was very friendly."], { "input_text": ["I recently had a great experience with your company. The service was prompt and the staff was very friendly."],
"categories": [{"category_id":"f5660049-284f-41a7-b301-fd24176a711c","category_name":"Customer Service"},{"category_id":"8d007d06-f2c9-4be5-8ff6-cd4381c13c60","category_name":"Satisfaction"},{"category_id":"5fbbbb18-9843-466d-9b8e-b9bfbb9482c8","category_name":"Sales"},{"category_id":"23623c75-7184-4a2e-8226-466c2e4631e4","category_name":"Product"}], "categories": [{"category_id":"f5660049-284f-41a7-b301-fd24176a711c","category_name":"Customer Service"},{"category_id":"8d007d06-f2c9-4be5-8ff6-cd4381c13c60","category_name":"Satisfaction"},{"category_id":"5fbbbb18-9843-466d-9b8e-b9bfbb9482c8","category_name":"Sales"},{"category_id":"23623c75-7184-4a2e-8226-466c2e4631e4","category_name":"Product"}],
"classification_instructions": ["classify the text based on the feedback provided by customer"]} "classification_instructions": ["classify the text based on the feedback provided by customer"]}
""" """ # noqa: E501
QUESTION_CLASSIFIER_ASSISTANT_PROMPT_1 = """ QUESTION_CLASSIFIER_ASSISTANT_PROMPT_1 = """
```json ```json
@ -32,7 +32,7 @@ QUESTION_CLASSIFIER_USER_PROMPT_2 = """
{"input_text": ["bad service, slow to bring the food"], {"input_text": ["bad service, slow to bring the food"],
"categories": [{"category_id":"80fb86a0-4454-4bf5-924c-f253fdd83c02","category_name":"Food Quality"},{"category_id":"f6ff5bc3-aca0-4e4a-8627-e760d0aca78f","category_name":"Experience"},{"category_id":"cc771f63-74e7-4c61-882e-3eda9d8ba5d7","category_name":"Price"}], "categories": [{"category_id":"80fb86a0-4454-4bf5-924c-f253fdd83c02","category_name":"Food Quality"},{"category_id":"f6ff5bc3-aca0-4e4a-8627-e760d0aca78f","category_name":"Experience"},{"category_id":"cc771f63-74e7-4c61-882e-3eda9d8ba5d7","category_name":"Price"}],
"classification_instructions": []} "classification_instructions": []}
""" """ # noqa: E501
QUESTION_CLASSIFIER_ASSISTANT_PROMPT_2 = """ QUESTION_CLASSIFIER_ASSISTANT_PROMPT_2 = """
```json ```json
@ -73,4 +73,4 @@ Here is the chat histories between human and assistant, inside <histories></hist
### User Input ### User Input
{{"input_text" : ["{input_text}"], "categories" : {categories},"classification_instruction" : ["{classification_instructions}"]}} {{"input_text" : ["{input_text}"], "categories" : {categories},"classification_instruction" : ["{classification_instructions}"]}}
### Assistant Output ### Assistant Output
""" """ # noqa: E501

View File

@ -204,7 +204,8 @@ class PKCS1OAepCipher:
def new(key, hashAlgo=None, mgfunc=None, label=b"", randfunc=None): def new(key, hashAlgo=None, mgfunc=None, label=b"", randfunc=None):
"""Return a cipher object :class:`PKCS1OAEP_Cipher` that can be used to perform PKCS#1 OAEP encryption or decryption. """Return a cipher object :class:`PKCS1OAEP_Cipher`
that can be used to perform PKCS#1 OAEP encryption or decryption.
:param key: :param key:
The key object to use to encrypt or decrypt the message. The key object to use to encrypt or decrypt the message.

View File

@ -65,7 +65,10 @@ class Provider(db.Model):
updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text("CURRENT_TIMESTAMP(0)")) updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text("CURRENT_TIMESTAMP(0)"))
def __repr__(self): def __repr__(self):
return f"<Provider(id={self.id}, tenant_id={self.tenant_id}, provider_name='{self.provider_name}', provider_type='{self.provider_type}')>" return (
f"<Provider(id={self.id}, tenant_id={self.tenant_id}, provider_name='{self.provider_name}',"
f" provider_type='{self.provider_type}')>"
)
@property @property
def token_is_set(self): def token_is_set(self):

View File

@ -62,7 +62,8 @@ class PublishedAppTool(db.Model):
description = db.Column(db.Text, nullable=False) description = db.Column(db.Text, nullable=False)
# llm_description of the tool, for LLM # llm_description of the tool, for LLM
llm_description = db.Column(db.Text, nullable=False) llm_description = db.Column(db.Text, nullable=False)
# query description, query will be seem as a parameter of the tool, to describe this parameter to llm, we need this field # query description, query will be seem as a parameter of the tool,
# to describe this parameter to llm, we need this field
query_description = db.Column(db.Text, nullable=False) query_description = db.Column(db.Text, nullable=False)
# query name, the name of the query parameter # query name, the name of the query parameter
query_name = db.Column(db.String(40), nullable=False) query_name = db.Column(db.String(40), nullable=False)

View File

@ -246,7 +246,8 @@ class Workflow(db.Model):
if any(var for var in value if not var.id): if any(var for var in value if not var.id):
raise ValueError("environment variable require a unique id") raise ValueError("environment variable require a unique id")
# Compare inputs and origin variables, if the value is HIDDEN_VALUE, use the origin variable value (only update `name`). # Compare inputs and origin variables,
# if the value is HIDDEN_VALUE, use the origin variable value (only update `name`).
origin_variables_dictionary = {var.id: var for var in self.environment_variables} origin_variables_dictionary = {var.id: var for var in self.environment_variables}
for i, variable in enumerate(value): for i, variable in enumerate(value):
if variable.id in origin_variables_dictionary and variable.value == HIDDEN_VALUE: if variable.id in origin_variables_dictionary and variable.value == HIDDEN_VALUE:

2
api/poetry.lock generated
View File

@ -10388,4 +10388,4 @@ cffi = ["cffi (>=1.11)"]
[metadata] [metadata]
lock-version = "2.0" lock-version = "2.0"
python-versions = ">=3.10,<3.13" python-versions = ">=3.10,<3.13"
content-hash = "2dbff415c3c9ca95c8dcfb59fc088ce2c0d00037c44f386a34c87c98e1d8b942" content-hash = "8179c7e3f91b5a00054e26297040b1969f59b37cb9a707fbaa9c2ea419954718"

View File

@ -27,7 +27,6 @@ select = [
"W605", # invalid-escape-sequence "W605", # invalid-escape-sequence
] ]
ignore = [ ignore = [
"E501", # line-too-long
"E402", # module-import-not-at-top-of-file "E402", # module-import-not-at-top-of-file
"E711", # none-comparison "E711", # none-comparison
"E712", # true-false-comparison "E712", # true-false-comparison
@ -68,16 +67,19 @@ ignore = [
"F401", # unused-import "F401", # unused-import
"F811", # redefined-while-unused "F811", # redefined-while-unused
] ]
"tests/*" = [
"F401", # unused-import
"F811", # redefined-while-unused
]
"configs/*" = [ "configs/*" = [
"N802", # invalid-function-name "N802", # invalid-function-name
] ]
"libs/gmpy2_pkcs10aep_cipher.py" = [ "libs/gmpy2_pkcs10aep_cipher.py" = [
"N803", # invalid-argument-name "N803", # invalid-argument-name
] ]
"migrations/versions/*" = [
"E501", # line-too-long
]
"tests/*" = [
"F401", # unused-import
"F811", # redefined-while-unused
]
[tool.ruff.format] [tool.ruff.format]
exclude = [ exclude = [
@ -270,4 +272,4 @@ optional = true
[tool.poetry.group.lint.dependencies] [tool.poetry.group.lint.dependencies]
dotenv-linter = "~0.5.0" dotenv-linter = "~0.5.0"
ruff = "~0.6.1" ruff = "~0.6.4"

View File

@ -176,7 +176,8 @@ class ApiToolManageService:
get api tool provider remote schema get api tool provider remote schema
""" """
headers = { headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0", "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko)"
" Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0",
"Accept": "*/*", "Accept": "*/*",
} }

File diff suppressed because one or more lines are too long

View File

@ -21,9 +21,9 @@ def test_segment_group_to_text():
segments_group = parser.convert_template(template=template, variable_pool=variable_pool) segments_group = parser.convert_template(template=template, variable_pool=variable_pool)
assert segments_group.text == "Hello, fake-user-id! Your query is fake-user-query. And your key is fake-secret-key." assert segments_group.text == "Hello, fake-user-id! Your query is fake-user-query. And your key is fake-secret-key."
assert ( assert segments_group.log == (
segments_group.log f"Hello, fake-user-id! Your query is fake-user-query."
== f"Hello, fake-user-id! Your query is fake-user-query. And your key is {encrypter.obfuscated_token('fake-secret-key')}." f" And your key is {encrypter.obfuscated_token('fake-secret-key')}."
) )