From c69f5b07ba241e6ef08c7028c12b1125c93c1d01 Mon Sep 17 00:00:00 2001 From: Bowen Liang Date: Thu, 12 Sep 2024 14:00:36 +0800 Subject: [PATCH] chore: apply ruff E501 line-too-long linter rule (#8275) Co-authored-by: -LAN- --- api/commands.py | 3 +- api/controllers/console/app/statistic.py | 170 +++++++++++------- .../console/app/workflow_statistic.py | 102 ++++++----- api/controllers/console/wraps.py | 3 +- api/controllers/web/wraps.py | 3 +- api/core/agent/prompt/template.py | 6 +- .../app/apps/workflow_logging_callback.py | 19 +- api/core/file/message_file_parser.py | 3 +- .../helper/code_executor/code_executor.py | 3 +- api/core/helper/tool_parameter_cache.py | 5 +- api/core/llm_generator/prompts.py | 19 +- api/core/model_runtime/entities/defaults.py | 16 +- .../model_providers/__base/ai_model.py | 4 +- .../__base/large_language_model.py | 5 +- .../model_providers/anthropic/llm/llm.py | 2 +- .../model_providers/azure_openai/_constant.py | 69 +++---- .../baichuan/llm/baichuan_tokenizer.py | 3 +- .../model_providers/bedrock/llm/llm.py | 6 +- .../model_providers/google/llm/llm.py | 2 +- .../huggingface_tei/tei_helper.py | 3 +- .../model_providers/hunyuan/llm/llm.py | 3 +- .../model_providers/nvidia/llm/llm.py | 3 +- .../model_providers/oci/llm/llm.py | 3 +- .../oci/text_embedding/text_embedding.py | 3 +- .../model_providers/ollama/llm/llm.py | 7 +- .../model_providers/openai/llm/llm.py | 2 +- .../openai_api_compatible/llm/llm.py | 6 +- .../model_providers/spark/llm/_client.py | 5 +- .../model_providers/upstage/llm/llm.py | 2 +- .../model_providers/vertex_ai/llm/llm.py | 6 +- .../model_providers/wenxin/llm/llm.py | 2 +- .../model_providers/xinference/llm/llm.py | 9 +- .../xinference/speech2text/speech2text.py | 10 +- .../xinference/xinference_helper.py | 6 +- .../model_providers/zhipuai/llm/llm.py | 2 +- .../zhipuai/zhipuai_sdk/core/_base_type.py | 3 +- .../schema_validators/common_validator.py | 3 +- .../advanced_prompt_templates.py | 8 +- .../rag/datasource/vdb/oracle/oraclevector.py | 6 +- .../rag/datasource/vdb/pgvector/pgvector.py | 3 +- api/core/rag/extractor/extract_processor.py | 5 +- .../router/multi_dataset_react_route.py | 9 +- api/core/tools/entities/tool_bundle.py | 3 +- api/core/tools/entities/values.py | 32 ++-- .../provider/builtin/aippt/tools/aippt.py | 3 +- .../builtin/arxiv/tools/arxiv_search.py | 3 +- .../builtin/aws/tools/apply_guardrail.py | 3 +- .../builtin/devdocs/tools/searchDevDocs.py | 3 +- .../builtin/gitlab/tools/gitlab_files.py | 5 +- .../google_translate/tools/translate.py | 3 +- .../builtin/hap/tools/get_worksheet_fields.py | 3 +- .../hap/tools/list_worksheet_records.py | 5 +- .../builtin/searchapi/tools/google.py | 5 +- .../stablediffusion/tools/stable_diffusion.py | 21 ++- .../trello/tools/create_list_on_board.py | 3 +- .../trello/tools/create_new_card_on_board.py | 3 +- .../builtin/trello/tools/delete_board.py | 3 +- .../builtin/trello/tools/delete_card.py | 3 +- .../builtin/trello/tools/get_board_actions.py | 3 +- .../builtin/trello/tools/get_board_by_id.py | 3 +- .../builtin/trello/tools/get_board_cards.py | 3 +- .../trello/tools/get_filterd_board_cards.py | 3 +- .../trello/tools/get_lists_on_board.py | 3 +- .../builtin/trello/tools/update_board.py | 3 +- .../builtin/trello/tools/update_card.py | 3 +- .../builtin/twilio/tools/send_message.py | 3 +- .../builtin/vectorizer/tools/test_data.py | 2 +- api/core/tools/tool_engine.py | 5 +- api/core/tools/utils/feishu_api_utils.py | 2 +- api/core/tools/utils/message_transformer.py | 2 +- api/core/tools/utils/parser.py | 6 +- api/core/tools/utils/web_reader_tool.py | 3 +- api/core/workflow/nodes/code/code_node.py | 18 +- .../knowledge_retrieval_node.py | 7 +- .../nodes/parameter_extractor/prompts.py | 12 +- .../question_classifier/template_prompts.py | 8 +- api/libs/gmpy2_pkcs10aep_cipher.py | 3 +- api/models/provider.py | 5 +- api/models/tools.py | 3 +- api/models/workflow.py | 3 +- api/poetry.lock | 2 +- api/pyproject.toml | 14 +- .../tools/api_tools_manage_service.py | 3 +- .../model_runtime/__mock/openai_embeddings.py | 2 +- .../core/app/segments/test_segment.py | 6 +- 85 files changed, 459 insertions(+), 324 deletions(-) diff --git a/api/commands.py b/api/commands.py index 3bf8bc0ecc..db96fbae46 100644 --- a/api/commands.py +++ b/api/commands.py @@ -411,7 +411,8 @@ def migrate_knowledge_vector_database(): try: click.echo( click.style( - f"Start to created vector index with {len(documents)} documents of {segments_count} segments for dataset {dataset.id}.", + f"Start to created vector index with {len(documents)} documents of {segments_count}" + f" segments for dataset {dataset.id}.", fg="green", ) ) diff --git a/api/controllers/console/app/statistic.py b/api/controllers/console/app/statistic.py index 4806b02b55..3ef442812d 100644 --- a/api/controllers/console/app/statistic.py +++ b/api/controllers/console/app/statistic.py @@ -29,10 +29,13 @@ class DailyMessageStatistic(Resource): parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args") args = parser.parse_args() - sql_query = """ - SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, count(*) AS message_count - FROM messages where app_id = :app_id - """ + sql_query = """SELECT + DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, + COUNT(*) AS message_count +FROM + messages +WHERE + app_id = :app_id""" arg_dict = {"tz": account.timezone, "app_id": app_model.id} timezone = pytz.timezone(account.timezone) @@ -45,7 +48,7 @@ class DailyMessageStatistic(Resource): start_datetime_timezone = timezone.localize(start_datetime) start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone) - sql_query += " and created_at >= :start" + sql_query += " AND created_at >= :start" arg_dict["start"] = start_datetime_utc if args["end"]: @@ -55,10 +58,10 @@ class DailyMessageStatistic(Resource): end_datetime_timezone = timezone.localize(end_datetime) end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) - sql_query += " and created_at < :end" + sql_query += " AND created_at < :end" arg_dict["end"] = end_datetime_utc - sql_query += " GROUP BY date order by date" + sql_query += " GROUP BY date ORDER BY date" response_data = [] @@ -83,10 +86,13 @@ class DailyConversationStatistic(Resource): parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args") args = parser.parse_args() - sql_query = """ - SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, count(distinct messages.conversation_id) AS conversation_count - FROM messages where app_id = :app_id - """ + sql_query = """SELECT + DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, + COUNT(DISTINCT messages.conversation_id) AS conversation_count +FROM + messages +WHERE + app_id = :app_id""" arg_dict = {"tz": account.timezone, "app_id": app_model.id} timezone = pytz.timezone(account.timezone) @@ -99,7 +105,7 @@ class DailyConversationStatistic(Resource): start_datetime_timezone = timezone.localize(start_datetime) start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone) - sql_query += " and created_at >= :start" + sql_query += " AND created_at >= :start" arg_dict["start"] = start_datetime_utc if args["end"]: @@ -109,10 +115,10 @@ class DailyConversationStatistic(Resource): end_datetime_timezone = timezone.localize(end_datetime) end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) - sql_query += " and created_at < :end" + sql_query += " AND created_at < :end" arg_dict["end"] = end_datetime_utc - sql_query += " GROUP BY date order by date" + sql_query += " GROUP BY date ORDER BY date" response_data = [] @@ -137,10 +143,13 @@ class DailyTerminalsStatistic(Resource): parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args") args = parser.parse_args() - sql_query = """ - SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, count(distinct messages.from_end_user_id) AS terminal_count - FROM messages where app_id = :app_id - """ + sql_query = """SELECT + DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, + COUNT(DISTINCT messages.from_end_user_id) AS terminal_count +FROM + messages +WHERE + app_id = :app_id""" arg_dict = {"tz": account.timezone, "app_id": app_model.id} timezone = pytz.timezone(account.timezone) @@ -153,7 +162,7 @@ class DailyTerminalsStatistic(Resource): start_datetime_timezone = timezone.localize(start_datetime) start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone) - sql_query += " and created_at >= :start" + sql_query += " AND created_at >= :start" arg_dict["start"] = start_datetime_utc if args["end"]: @@ -163,10 +172,10 @@ class DailyTerminalsStatistic(Resource): end_datetime_timezone = timezone.localize(end_datetime) end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) - sql_query += " and created_at < :end" + sql_query += " AND created_at < :end" arg_dict["end"] = end_datetime_utc - sql_query += " GROUP BY date order by date" + sql_query += " GROUP BY date ORDER BY date" response_data = [] @@ -191,12 +200,14 @@ class DailyTokenCostStatistic(Resource): parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args") args = parser.parse_args() - sql_query = """ - SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, - (sum(messages.message_tokens) + sum(messages.answer_tokens)) as token_count, - sum(total_price) as total_price - FROM messages where app_id = :app_id - """ + sql_query = """SELECT + DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, + (SUM(messages.message_tokens) + SUM(messages.answer_tokens)) AS token_count, + SUM(total_price) AS total_price +FROM + messages +WHERE + app_id = :app_id""" arg_dict = {"tz": account.timezone, "app_id": app_model.id} timezone = pytz.timezone(account.timezone) @@ -209,7 +220,7 @@ class DailyTokenCostStatistic(Resource): start_datetime_timezone = timezone.localize(start_datetime) start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone) - sql_query += " and created_at >= :start" + sql_query += " AND created_at >= :start" arg_dict["start"] = start_datetime_utc if args["end"]: @@ -219,10 +230,10 @@ class DailyTokenCostStatistic(Resource): end_datetime_timezone = timezone.localize(end_datetime) end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) - sql_query += " and created_at < :end" + sql_query += " AND created_at < :end" arg_dict["end"] = end_datetime_utc - sql_query += " GROUP BY date order by date" + sql_query += " GROUP BY date ORDER BY date" response_data = [] @@ -249,12 +260,22 @@ class AverageSessionInteractionStatistic(Resource): parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args") args = parser.parse_args() - sql_query = """SELECT date(DATE_TRUNC('day', c.created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, -AVG(subquery.message_count) AS interactions -FROM (SELECT m.conversation_id, COUNT(m.id) AS message_count - FROM conversations c - JOIN messages m ON c.id = m.conversation_id - WHERE c.override_model_configs IS NULL AND c.app_id = :app_id""" + sql_query = """SELECT + DATE(DATE_TRUNC('day', c.created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, + AVG(subquery.message_count) AS interactions +FROM + ( + SELECT + m.conversation_id, + COUNT(m.id) AS message_count + FROM + conversations c + JOIN + messages m + ON c.id = m.conversation_id + WHERE + c.override_model_configs IS NULL + AND c.app_id = :app_id""" arg_dict = {"tz": account.timezone, "app_id": app_model.id} timezone = pytz.timezone(account.timezone) @@ -267,7 +288,7 @@ FROM (SELECT m.conversation_id, COUNT(m.id) AS message_count start_datetime_timezone = timezone.localize(start_datetime) start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone) - sql_query += " and c.created_at >= :start" + sql_query += " AND c.created_at >= :start" arg_dict["start"] = start_datetime_utc if args["end"]: @@ -277,14 +298,19 @@ FROM (SELECT m.conversation_id, COUNT(m.id) AS message_count end_datetime_timezone = timezone.localize(end_datetime) end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) - sql_query += " and c.created_at < :end" + sql_query += " AND c.created_at < :end" arg_dict["end"] = end_datetime_utc sql_query += """ - GROUP BY m.conversation_id) subquery -LEFT JOIN conversations c on c.id=subquery.conversation_id -GROUP BY date -ORDER BY date""" + GROUP BY m.conversation_id + ) subquery +LEFT JOIN + conversations c + ON c.id = subquery.conversation_id +GROUP BY + date +ORDER BY + date""" response_data = [] @@ -311,13 +337,17 @@ class UserSatisfactionRateStatistic(Resource): parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args") args = parser.parse_args() - sql_query = """ - SELECT date(DATE_TRUNC('day', m.created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, - COUNT(m.id) as message_count, COUNT(mf.id) as feedback_count - FROM messages m - LEFT JOIN message_feedbacks mf on mf.message_id=m.id and mf.rating='like' - WHERE m.app_id = :app_id - """ + sql_query = """SELECT + DATE(DATE_TRUNC('day', m.created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, + COUNT(m.id) AS message_count, + COUNT(mf.id) AS feedback_count +FROM + messages m +LEFT JOIN + message_feedbacks mf + ON mf.message_id=m.id AND mf.rating='like' +WHERE + m.app_id = :app_id""" arg_dict = {"tz": account.timezone, "app_id": app_model.id} timezone = pytz.timezone(account.timezone) @@ -330,7 +360,7 @@ class UserSatisfactionRateStatistic(Resource): start_datetime_timezone = timezone.localize(start_datetime) start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone) - sql_query += " and m.created_at >= :start" + sql_query += " AND m.created_at >= :start" arg_dict["start"] = start_datetime_utc if args["end"]: @@ -340,10 +370,10 @@ class UserSatisfactionRateStatistic(Resource): end_datetime_timezone = timezone.localize(end_datetime) end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) - sql_query += " and m.created_at < :end" + sql_query += " AND m.created_at < :end" arg_dict["end"] = end_datetime_utc - sql_query += " GROUP BY date order by date" + sql_query += " GROUP BY date ORDER BY date" response_data = [] @@ -373,12 +403,13 @@ class AverageResponseTimeStatistic(Resource): parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args") args = parser.parse_args() - sql_query = """ - SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, - AVG(provider_response_latency) as latency - FROM messages - WHERE app_id = :app_id - """ + sql_query = """SELECT + DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, + AVG(provider_response_latency) AS latency +FROM + messages +WHERE + app_id = :app_id""" arg_dict = {"tz": account.timezone, "app_id": app_model.id} timezone = pytz.timezone(account.timezone) @@ -391,7 +422,7 @@ class AverageResponseTimeStatistic(Resource): start_datetime_timezone = timezone.localize(start_datetime) start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone) - sql_query += " and created_at >= :start" + sql_query += " AND created_at >= :start" arg_dict["start"] = start_datetime_utc if args["end"]: @@ -401,10 +432,10 @@ class AverageResponseTimeStatistic(Resource): end_datetime_timezone = timezone.localize(end_datetime) end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) - sql_query += " and created_at < :end" + sql_query += " AND created_at < :end" arg_dict["end"] = end_datetime_utc - sql_query += " GROUP BY date order by date" + sql_query += " GROUP BY date ORDER BY date" response_data = [] @@ -429,13 +460,16 @@ class TokensPerSecondStatistic(Resource): parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args") args = parser.parse_args() - sql_query = """SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, - CASE + sql_query = """SELECT + DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, + CASE WHEN SUM(provider_response_latency) = 0 THEN 0 ELSE (SUM(answer_tokens) / SUM(provider_response_latency)) END as tokens_per_second -FROM messages -WHERE app_id = :app_id""" +FROM + messages +WHERE + app_id = :app_id""" arg_dict = {"tz": account.timezone, "app_id": app_model.id} timezone = pytz.timezone(account.timezone) @@ -448,7 +482,7 @@ WHERE app_id = :app_id""" start_datetime_timezone = timezone.localize(start_datetime) start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone) - sql_query += " and created_at >= :start" + sql_query += " AND created_at >= :start" arg_dict["start"] = start_datetime_utc if args["end"]: @@ -458,10 +492,10 @@ WHERE app_id = :app_id""" end_datetime_timezone = timezone.localize(end_datetime) end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) - sql_query += " and created_at < :end" + sql_query += " AND created_at < :end" arg_dict["end"] = end_datetime_utc - sql_query += " GROUP BY date order by date" + sql_query += " GROUP BY date ORDER BY date" response_data = [] diff --git a/api/controllers/console/app/workflow_statistic.py b/api/controllers/console/app/workflow_statistic.py index 942271a634..c7e54f2be0 100644 --- a/api/controllers/console/app/workflow_statistic.py +++ b/api/controllers/console/app/workflow_statistic.py @@ -30,12 +30,14 @@ class WorkflowDailyRunsStatistic(Resource): parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args") args = parser.parse_args() - sql_query = """ - SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, count(id) AS runs - FROM workflow_runs - WHERE app_id = :app_id - AND triggered_from = :triggered_from - """ + sql_query = """SELECT + DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, + COUNT(id) AS runs +FROM + workflow_runs +WHERE + app_id = :app_id + AND triggered_from = :triggered_from""" arg_dict = { "tz": account.timezone, "app_id": app_model.id, @@ -52,7 +54,7 @@ class WorkflowDailyRunsStatistic(Resource): start_datetime_timezone = timezone.localize(start_datetime) start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone) - sql_query += " and created_at >= :start" + sql_query += " AND created_at >= :start" arg_dict["start"] = start_datetime_utc if args["end"]: @@ -62,10 +64,10 @@ class WorkflowDailyRunsStatistic(Resource): end_datetime_timezone = timezone.localize(end_datetime) end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) - sql_query += " and created_at < :end" + sql_query += " AND created_at < :end" arg_dict["end"] = end_datetime_utc - sql_query += " GROUP BY date order by date" + sql_query += " GROUP BY date ORDER BY date" response_data = [] @@ -90,12 +92,14 @@ class WorkflowDailyTerminalsStatistic(Resource): parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args") args = parser.parse_args() - sql_query = """ - SELECT date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, count(distinct workflow_runs.created_by) AS terminal_count - FROM workflow_runs - WHERE app_id = :app_id - AND triggered_from = :triggered_from - """ + sql_query = """SELECT + DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, + COUNT(DISTINCT workflow_runs.created_by) AS terminal_count +FROM + workflow_runs +WHERE + app_id = :app_id + AND triggered_from = :triggered_from""" arg_dict = { "tz": account.timezone, "app_id": app_model.id, @@ -112,7 +116,7 @@ class WorkflowDailyTerminalsStatistic(Resource): start_datetime_timezone = timezone.localize(start_datetime) start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone) - sql_query += " and created_at >= :start" + sql_query += " AND created_at >= :start" arg_dict["start"] = start_datetime_utc if args["end"]: @@ -122,10 +126,10 @@ class WorkflowDailyTerminalsStatistic(Resource): end_datetime_timezone = timezone.localize(end_datetime) end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) - sql_query += " and created_at < :end" + sql_query += " AND created_at < :end" arg_dict["end"] = end_datetime_utc - sql_query += " GROUP BY date order by date" + sql_query += " GROUP BY date ORDER BY date" response_data = [] @@ -150,14 +154,14 @@ class WorkflowDailyTokenCostStatistic(Resource): parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args") args = parser.parse_args() - sql_query = """ - SELECT - date(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, - SUM(workflow_runs.total_tokens) as token_count - FROM workflow_runs - WHERE app_id = :app_id - AND triggered_from = :triggered_from - """ + sql_query = """SELECT + DATE(DATE_TRUNC('day', created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, + SUM(workflow_runs.total_tokens) AS token_count +FROM + workflow_runs +WHERE + app_id = :app_id + AND triggered_from = :triggered_from""" arg_dict = { "tz": account.timezone, "app_id": app_model.id, @@ -174,7 +178,7 @@ class WorkflowDailyTokenCostStatistic(Resource): start_datetime_timezone = timezone.localize(start_datetime) start_datetime_utc = start_datetime_timezone.astimezone(utc_timezone) - sql_query += " and created_at >= :start" + sql_query += " AND created_at >= :start" arg_dict["start"] = start_datetime_utc if args["end"]: @@ -184,10 +188,10 @@ class WorkflowDailyTokenCostStatistic(Resource): end_datetime_timezone = timezone.localize(end_datetime) end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) - sql_query += " and created_at < :end" + sql_query += " AND created_at < :end" arg_dict["end"] = end_datetime_utc - sql_query += " GROUP BY date order by date" + sql_query += " GROUP BY date ORDER BY date" response_data = [] @@ -217,23 +221,27 @@ class WorkflowAverageAppInteractionStatistic(Resource): parser.add_argument("end", type=DatetimeString("%Y-%m-%d %H:%M"), location="args") args = parser.parse_args() - sql_query = """ - SELECT - AVG(sub.interactions) as interactions, - sub.date - FROM - (SELECT - date(DATE_TRUNC('day', c.created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, - c.created_by, - COUNT(c.id) AS interactions - FROM workflow_runs c - WHERE c.app_id = :app_id - AND c.triggered_from = :triggered_from - {{start}} - {{end}} - GROUP BY date, c.created_by) sub - GROUP BY sub.date - """ + sql_query = """SELECT + AVG(sub.interactions) AS interactions, + sub.date +FROM + ( + SELECT + DATE(DATE_TRUNC('day', c.created_at AT TIME ZONE 'UTC' AT TIME ZONE :tz )) AS date, + c.created_by, + COUNT(c.id) AS interactions + FROM + workflow_runs c + WHERE + c.app_id = :app_id + AND c.triggered_from = :triggered_from + {{start}} + {{end}} + GROUP BY + date, c.created_by + ) sub +GROUP BY + sub.date""" arg_dict = { "tz": account.timezone, "app_id": app_model.id, @@ -262,7 +270,7 @@ class WorkflowAverageAppInteractionStatistic(Resource): end_datetime_timezone = timezone.localize(end_datetime) end_datetime_utc = end_datetime_timezone.astimezone(utc_timezone) - sql_query = sql_query.replace("{{end}}", " and c.created_at < :end") + sql_query = sql_query.replace("{{end}}", " AND c.created_at < :end") arg_dict["end"] = end_datetime_utc else: sql_query = sql_query.replace("{{end}}", "") diff --git a/api/controllers/console/wraps.py b/api/controllers/console/wraps.py index 7667b30e34..46223d104f 100644 --- a/api/controllers/console/wraps.py +++ b/api/controllers/console/wraps.py @@ -64,7 +64,8 @@ def cloud_edition_billing_resource_check(resource: str): elif resource == "vector_space" and 0 < vector_space.limit <= vector_space.size: abort(403, "The capacity of the vector space has reached the limit of your subscription.") elif resource == "documents" and 0 < documents_upload_quota.limit <= documents_upload_quota.size: - # The api of file upload is used in the multiple places, so we need to check the source of the request from datasets + # The api of file upload is used in the multiple places, + # so we need to check the source of the request from datasets source = request.args.get("source") if source == "datasets": abort(403, "The number of documents has reached the limit of your subscription.") diff --git a/api/controllers/web/wraps.py b/api/controllers/web/wraps.py index 93dc691d62..c327c3df18 100644 --- a/api/controllers/web/wraps.py +++ b/api/controllers/web/wraps.py @@ -80,7 +80,8 @@ def _validate_web_sso_token(decoded, system_features, app_code): if not source or source != "sso": raise WebSSOAuthRequiredError() - # Check if SSO is not enforced for web, and if the token source is SSO, raise an error and redirect to normal passport login + # Check if SSO is not enforced for web, and if the token source is SSO, + # raise an error and redirect to normal passport login if not system_features.sso_enforced_for_web or not app_web_sso_enabled: source = decoded.get("token_source") if source and source == "sso": diff --git a/api/core/agent/prompt/template.py b/api/core/agent/prompt/template.py index cb98f5501d..ef64fd29fc 100644 --- a/api/core/agent/prompt/template.py +++ b/api/core/agent/prompt/template.py @@ -41,7 +41,8 @@ Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use {{historic_messages}} Question: {{query}} {{agent_scratchpad}} -Thought:""" +Thought:""" # noqa: E501 + ENGLISH_REACT_COMPLETION_AGENT_SCRATCHPAD_TEMPLATES = """Observation: {{observation}} Thought:""" @@ -86,7 +87,8 @@ Action: ``` Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:. -""" +""" # noqa: E501 + ENGLISH_REACT_CHAT_AGENT_SCRATCHPAD_TEMPLATES = "" diff --git a/api/core/app/apps/workflow_logging_callback.py b/api/core/app/apps/workflow_logging_callback.py index cdd21bf7c2..388cb83180 100644 --- a/api/core/app/apps/workflow_logging_callback.py +++ b/api/core/app/apps/workflow_logging_callback.py @@ -84,10 +84,12 @@ class WorkflowLoggingCallback(WorkflowCallback): if route_node_state.node_run_result: node_run_result = route_node_state.node_run_result self.print_text( - f"Inputs: {jsonable_encoder(node_run_result.inputs) if node_run_result.inputs else ''}", color="green" + f"Inputs: " f"{jsonable_encoder(node_run_result.inputs) if node_run_result.inputs else ''}", + color="green", ) self.print_text( - f"Process Data: {jsonable_encoder(node_run_result.process_data) if node_run_result.process_data else ''}", + f"Process Data: " + f"{jsonable_encoder(node_run_result.process_data) if node_run_result.process_data else ''}", color="green", ) self.print_text( @@ -114,14 +116,17 @@ class WorkflowLoggingCallback(WorkflowCallback): node_run_result = route_node_state.node_run_result self.print_text(f"Error: {node_run_result.error}", color="red") self.print_text( - f"Inputs: {jsonable_encoder(node_run_result.inputs) if node_run_result.inputs else ''}", color="red" - ) - self.print_text( - f"Process Data: {jsonable_encoder(node_run_result.process_data) if node_run_result.process_data else ''}", + f"Inputs: " f"" f"{jsonable_encoder(node_run_result.inputs) if node_run_result.inputs else ''}", color="red", ) self.print_text( - f"Outputs: {jsonable_encoder(node_run_result.outputs) if node_run_result.outputs else ''}", color="red" + f"Process Data: " + f"{jsonable_encoder(node_run_result.process_data) if node_run_result.process_data else ''}", + color="red", + ) + self.print_text( + f"Outputs: " f"{jsonable_encoder(node_run_result.outputs) if node_run_result.outputs else ''}", + color="red", ) def on_node_text_chunk(self, event: NodeRunStreamChunkEvent) -> None: diff --git a/api/core/file/message_file_parser.py b/api/core/file/message_file_parser.py index 8feaabedbb..83059b216e 100644 --- a/api/core/file/message_file_parser.py +++ b/api/core/file/message_file_parser.py @@ -188,7 +188,8 @@ class MessageFileParser: def _check_image_remote_url(self, url): try: headers = { - "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)" + " Chrome/91.0.4472.124 Safari/537.36" } def is_s3_presigned_url(url): diff --git a/api/core/helper/code_executor/code_executor.py b/api/core/helper/code_executor/code_executor.py index 7ee6e63817..4932284540 100644 --- a/api/core/helper/code_executor/code_executor.py +++ b/api/core/helper/code_executor/code_executor.py @@ -89,7 +89,8 @@ class CodeExecutor: raise CodeExecutionError("Code execution service is unavailable") elif response.status_code != 200: raise Exception( - f"Failed to execute code, got status code {response.status_code}, please check if the sandbox service is running" + f"Failed to execute code, got status code {response.status_code}," + f" please check if the sandbox service is running" ) except CodeExecutionError as e: raise e diff --git a/api/core/helper/tool_parameter_cache.py b/api/core/helper/tool_parameter_cache.py index 4c3b736186..e848b46c56 100644 --- a/api/core/helper/tool_parameter_cache.py +++ b/api/core/helper/tool_parameter_cache.py @@ -14,7 +14,10 @@ class ToolParameterCache: def __init__( self, tenant_id: str, provider: str, tool_name: str, cache_type: ToolParameterCacheType, identity_id: str ): - self.cache_key = f"{cache_type.value}_secret:tenant_id:{tenant_id}:provider:{provider}:tool_name:{tool_name}:identity_id:{identity_id}" + self.cache_key = ( + f"{cache_type.value}_secret:tenant_id:{tenant_id}:provider:{provider}:tool_name:{tool_name}" + f":identity_id:{identity_id}" + ) def get(self) -> Optional[dict]: """ diff --git a/api/core/llm_generator/prompts.py b/api/core/llm_generator/prompts.py index 7ab257872f..c40b6d1808 100644 --- a/api/core/llm_generator/prompts.py +++ b/api/core/llm_generator/prompts.py @@ -59,24 +59,27 @@ User Input: yo, 你今天咋样? } User Input: -""" +""" # noqa: E501 SUGGESTED_QUESTIONS_AFTER_ANSWER_INSTRUCTION_PROMPT = ( "Please help me predict the three most likely questions that human would ask, " "and keeping each question under 20 characters.\n" - "MAKE SURE your output is the SAME language as the Assistant's latest response(if the main response is written in Chinese, then the language of your output must be using Chinese.)!\n" + "MAKE SURE your output is the SAME language as the Assistant's latest response" + "(if the main response is written in Chinese, then the language of your output must be using Chinese.)!\n" "The output must be an array in JSON format following the specified schema:\n" '["question1","question2","question3"]\n' ) GENERATOR_QA_PROMPT = ( - " The user will send a long text. Generate a Question and Answer pairs only using the knowledge in the long text. Please think step by step." + " The user will send a long text. Generate a Question and Answer pairs only using the knowledge" + " in the long text. Please think step by step." "Step 1: Understand and summarize the main content of this text.\n" "Step 2: What key information or concepts are mentioned in this text?\n" "Step 3: Decompose or combine multiple pieces of information and concepts.\n" "Step 4: Generate questions and answers based on these key information and concepts.\n" " The questions should be clear and detailed, and the answers should be detailed and complete. " - "You must answer in {language}, in a style that is clear and detailed in {language}. No language other than {language} should be used. \n" + "You must answer in {language}, in a style that is clear and detailed in {language}." + " No language other than {language} should be used. \n" " Use the following format: Q1:\nA1:\nQ2:\nA2:...\n" "" ) @@ -94,7 +97,7 @@ Based on task description, please create a well-structured prompt template that - Use the same language as task description. - Output in ``` xml ``` and start with Please generate the full prompt template with at least 300 words and output only the prompt template. -""" +""" # noqa: E501 RULE_CONFIG_PROMPT_GENERATE_TEMPLATE = """ Here is a task description for which I would like you to create a high-quality prompt template for: @@ -109,7 +112,7 @@ Based on task description, please create a well-structured prompt template that - Use the same language as task description. - Output in ``` xml ``` and start with Please generate the full prompt template and output only the prompt template. -""" +""" # noqa: E501 RULE_CONFIG_PARAMETER_GENERATE_TEMPLATE = """ I need to extract the following information from the input text. The tag specifies the 'type', 'description' and 'required' of the information to be extracted. @@ -134,7 +137,7 @@ Inside XML tags, there is a text that I should extract parameters ### Answer I should always output a valid list. Output nothing other than the list of variable_name. Output an empty list if there is no variable name in input text. -""" +""" # noqa: E501 RULE_CONFIG_STATEMENT_GENERATE_TEMPLATE = """ @@ -150,4 +153,4 @@ Welcome! I'm here to assist you with any questions or issues you might have with Here is the task description: {{INPUT_TEXT}} You just need to generate the output -""" +""" # noqa: E501 diff --git a/api/core/model_runtime/entities/defaults.py b/api/core/model_runtime/entities/defaults.py index e94be6f918..4d0c9aa08f 100644 --- a/api/core/model_runtime/entities/defaults.py +++ b/api/core/model_runtime/entities/defaults.py @@ -8,8 +8,11 @@ PARAMETER_RULE_TEMPLATE: dict[DefaultParameterName, dict] = { }, "type": "float", "help": { - "en_US": "Controls randomness. Lower temperature results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive. Higher temperature results in more random completions.", - "zh_Hans": "温度控制随机性。较低的温度会导致较少的随机完成。随着温度接近零,模型将变得确定性和重复性。较高的温度会导致更多的随机完成。", + "en_US": "Controls randomness. Lower temperature results in less random completions." + " As the temperature approaches zero, the model will become deterministic and repetitive." + " Higher temperature results in more random completions.", + "zh_Hans": "温度控制随机性。较低的温度会导致较少的随机完成。随着温度接近零,模型将变得确定性和重复性。" + "较高的温度会导致更多的随机完成。", }, "required": False, "default": 0.0, @@ -24,7 +27,8 @@ PARAMETER_RULE_TEMPLATE: dict[DefaultParameterName, dict] = { }, "type": "float", "help": { - "en_US": "Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options are considered.", + "en_US": "Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options" + " are considered.", "zh_Hans": "通过核心采样控制多样性:0.5表示考虑了一半的所有可能性加权选项。", }, "required": False, @@ -88,7 +92,8 @@ PARAMETER_RULE_TEMPLATE: dict[DefaultParameterName, dict] = { }, "type": "int", "help": { - "en_US": "Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter.", + "en_US": "Specifies the upper limit on the length of generated results." + " If the generated results are truncated, you can increase this parameter.", "zh_Hans": "指定生成结果长度的上限。如果生成结果截断,可以调大该参数。", }, "required": False, @@ -104,7 +109,8 @@ PARAMETER_RULE_TEMPLATE: dict[DefaultParameterName, dict] = { }, "type": "string", "help": { - "en_US": "Set a response format, ensure the output from llm is a valid code block as possible, such as JSON, XML, etc.", + "en_US": "Set a response format, ensure the output from llm is a valid code block as possible," + " such as JSON, XML, etc.", "zh_Hans": "设置一个返回格式,确保llm的输出尽可能是有效的代码块,如JSON、XML等", }, "required": False, diff --git a/api/core/model_runtime/model_providers/__base/ai_model.py b/api/core/model_runtime/model_providers/__base/ai_model.py index 09d2d7e54d..e7e343f00d 100644 --- a/api/core/model_runtime/model_providers/__base/ai_model.py +++ b/api/core/model_runtime/model_providers/__base/ai_model.py @@ -72,7 +72,9 @@ class AIModel(ABC): if isinstance(error, tuple(model_errors)): if invoke_error == InvokeAuthorizationError: return invoke_error( - description=f"[{provider_name}] Incorrect model credentials provided, please check and try again. " + description=( + f"[{provider_name}] Incorrect model credentials provided, please check and try again." + ) ) return invoke_error(description=f"[{provider_name}] {invoke_error.description}, {str(error)}") diff --git a/api/core/model_runtime/model_providers/__base/large_language_model.py b/api/core/model_runtime/model_providers/__base/large_language_model.py index 5c39186e65..e8789ec7df 100644 --- a/api/core/model_runtime/model_providers/__base/large_language_model.py +++ b/api/core/model_runtime/model_providers/__base/large_language_model.py @@ -187,7 +187,7 @@ if you are not sure about the structure. {{instructions}} -""" +""" # noqa: E501 code_block = model_parameters.get("response_format", "") if not code_block: @@ -830,7 +830,8 @@ if you are not sure about the structure. else: if parameter_value != round(parameter_value, parameter_rule.precision): raise ValueError( - f"Model Parameter {parameter_name} should be round to {parameter_rule.precision} decimal places." + f"Model Parameter {parameter_name} should be round to {parameter_rule.precision}" + f" decimal places." ) # validate parameter value range diff --git a/api/core/model_runtime/model_providers/anthropic/llm/llm.py b/api/core/model_runtime/model_providers/anthropic/llm/llm.py index 30e9d2e9f2..0cb66842e7 100644 --- a/api/core/model_runtime/model_providers/anthropic/llm/llm.py +++ b/api/core/model_runtime/model_providers/anthropic/llm/llm.py @@ -51,7 +51,7 @@ if you are not sure about the structure. {{instructions}} -""" +""" # noqa: E501 class AnthropicLargeLanguageModel(LargeLanguageModel): diff --git a/api/core/model_runtime/model_providers/azure_openai/_constant.py b/api/core/model_runtime/model_providers/azure_openai/_constant.py index c2744691c3..0dada70cc5 100644 --- a/api/core/model_runtime/model_providers/azure_openai/_constant.py +++ b/api/core/model_runtime/model_providers/azure_openai/_constant.py @@ -16,6 +16,15 @@ from core.model_runtime.entities.model_entities import ( AZURE_OPENAI_API_VERSION = "2024-02-15-preview" +AZURE_DEFAULT_PARAM_SEED_HELP = I18nObject( + zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性," + "您应该参考 system_fingerprint 响应参数来监视变化。", + en_US="If specified, model will make a best effort to sample deterministically," + " such that repeated requests with the same seed and parameters should return the same result." + " Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter" + " to monitor changes in the backend.", +) + def _get_max_tokens(default: int, min_val: int, max_val: int) -> ParameterRule: rule = ParameterRule( @@ -229,10 +238,7 @@ LLM_BASE_MODELS = [ name="seed", label=I18nObject(zh_Hans="种子", en_US="Seed"), type="int", - help=I18nObject( - zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。", - en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.", - ), + help=AZURE_DEFAULT_PARAM_SEED_HELP, required=False, precision=2, min=0, @@ -297,10 +303,7 @@ LLM_BASE_MODELS = [ name="seed", label=I18nObject(zh_Hans="种子", en_US="Seed"), type="int", - help=I18nObject( - zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。", - en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.", - ), + help=AZURE_DEFAULT_PARAM_SEED_HELP, required=False, precision=2, min=0, @@ -365,10 +368,7 @@ LLM_BASE_MODELS = [ name="seed", label=I18nObject(zh_Hans="种子", en_US="Seed"), type="int", - help=I18nObject( - zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。", - en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.", - ), + help=AZURE_DEFAULT_PARAM_SEED_HELP, required=False, precision=2, min=0, @@ -433,10 +433,7 @@ LLM_BASE_MODELS = [ name="seed", label=I18nObject(zh_Hans="种子", en_US="Seed"), type="int", - help=I18nObject( - zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。", - en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.", - ), + help=AZURE_DEFAULT_PARAM_SEED_HELP, required=False, precision=2, min=0, @@ -502,10 +499,7 @@ LLM_BASE_MODELS = [ name="seed", label=I18nObject(zh_Hans="种子", en_US="Seed"), type="int", - help=I18nObject( - zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。", - en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.", - ), + help=AZURE_DEFAULT_PARAM_SEED_HELP, required=False, precision=2, min=0, @@ -571,10 +565,7 @@ LLM_BASE_MODELS = [ name="seed", label=I18nObject(zh_Hans="种子", en_US="Seed"), type="int", - help=I18nObject( - zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。", - en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.", - ), + help=AZURE_DEFAULT_PARAM_SEED_HELP, required=False, precision=2, min=0, @@ -650,10 +641,7 @@ LLM_BASE_MODELS = [ name="seed", label=I18nObject(zh_Hans="种子", en_US="Seed"), type="int", - help=I18nObject( - zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。", - en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.", - ), + help=AZURE_DEFAULT_PARAM_SEED_HELP, required=False, precision=2, min=0, @@ -719,10 +707,7 @@ LLM_BASE_MODELS = [ name="seed", label=I18nObject(zh_Hans="种子", en_US="Seed"), type="int", - help=I18nObject( - zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。", - en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.", - ), + help=AZURE_DEFAULT_PARAM_SEED_HELP, required=False, precision=2, min=0, @@ -788,10 +773,7 @@ LLM_BASE_MODELS = [ name="seed", label=I18nObject(zh_Hans="种子", en_US="Seed"), type="int", - help=I18nObject( - zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。", - en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.", - ), + help=AZURE_DEFAULT_PARAM_SEED_HELP, required=False, precision=2, min=0, @@ -867,10 +849,7 @@ LLM_BASE_MODELS = [ name="seed", label=I18nObject(zh_Hans="种子", en_US="Seed"), type="int", - help=I18nObject( - zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。", - en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.", - ), + help=AZURE_DEFAULT_PARAM_SEED_HELP, required=False, precision=2, min=0, @@ -936,10 +915,7 @@ LLM_BASE_MODELS = [ name="seed", label=I18nObject(zh_Hans="种子", en_US="Seed"), type="int", - help=I18nObject( - zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。", - en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.", - ), + help=AZURE_DEFAULT_PARAM_SEED_HELP, required=False, precision=2, min=0, @@ -1000,10 +976,7 @@ LLM_BASE_MODELS = [ name="seed", label=I18nObject(zh_Hans="种子", en_US="Seed"), type="int", - help=I18nObject( - zh_Hans="如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint 响应参数来监视变化。", - en_US="If specified, model will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.", - ), + help=AZURE_DEFAULT_PARAM_SEED_HELP, required=False, precision=2, min=0, diff --git a/api/core/model_runtime/model_providers/baichuan/llm/baichuan_tokenizer.py b/api/core/model_runtime/model_providers/baichuan/llm/baichuan_tokenizer.py index bea6777f83..a7ca28d49d 100644 --- a/api/core/model_runtime/model_providers/baichuan/llm/baichuan_tokenizer.py +++ b/api/core/model_runtime/model_providers/baichuan/llm/baichuan_tokenizer.py @@ -15,6 +15,7 @@ class BaichuanTokenizer: @classmethod def _get_num_tokens(cls, text: str) -> int: - # tokens = number of Chinese characters + number of English words * 1.3 (for estimation only, subject to actual return) + # tokens = number of Chinese characters + number of English words * 1.3 + # (for estimation only, subject to actual return) # https://platform.baichuan-ai.com/docs/text-Embedding return int(cls.count_chinese_characters(text) + cls.count_english_vocabularies(text) * 1.3) diff --git a/api/core/model_runtime/model_providers/bedrock/llm/llm.py b/api/core/model_runtime/model_providers/bedrock/llm/llm.py index e07f2a419a..239ae52b4c 100644 --- a/api/core/model_runtime/model_providers/bedrock/llm/llm.py +++ b/api/core/model_runtime/model_providers/bedrock/llm/llm.py @@ -52,7 +52,7 @@ if you are not sure about the structure. {{instructions}} -""" +""" # noqa: E501 class BedrockLargeLanguageModel(LargeLanguageModel): @@ -541,7 +541,9 @@ class BedrockLargeLanguageModel(LargeLanguageModel): "max_tokens": 32, } elif "ai21" in model: - # ValidationException: Malformed input request: #/temperature: expected type: Number, found: Null#/maxTokens: expected type: Integer, found: Null#/topP: expected type: Number, found: Null, please reformat your input and try again. + # ValidationException: Malformed input request: #/temperature: expected type: Number, + # found: Null#/maxTokens: expected type: Integer, found: Null#/topP: expected type: Number, found: Null, + # please reformat your input and try again. required_params = { "temperature": 0.7, "topP": 0.9, diff --git a/api/core/model_runtime/model_providers/google/llm/llm.py b/api/core/model_runtime/model_providers/google/llm/llm.py index 307c15e1fd..b10d0edba3 100644 --- a/api/core/model_runtime/model_providers/google/llm/llm.py +++ b/api/core/model_runtime/model_providers/google/llm/llm.py @@ -45,7 +45,7 @@ if you are not sure about the structure. {{instructions}} -""" +""" # noqa: E501 class GoogleLargeLanguageModel(LargeLanguageModel): diff --git a/api/core/model_runtime/model_providers/huggingface_tei/tei_helper.py b/api/core/model_runtime/model_providers/huggingface_tei/tei_helper.py index 56c51e8888..288637495f 100644 --- a/api/core/model_runtime/model_providers/huggingface_tei/tei_helper.py +++ b/api/core/model_runtime/model_providers/huggingface_tei/tei_helper.py @@ -54,7 +54,8 @@ class TeiHelper: url = str(URL(server_url) / "info") - # this method is surrounded by a lock, and default requests may hang forever, so we just set a Adapter with max_retries=3 + # this method is surrounded by a lock, and default requests may hang forever, + # so we just set a Adapter with max_retries=3 session = Session() session.mount("http://", HTTPAdapter(max_retries=3)) session.mount("https://", HTTPAdapter(max_retries=3)) diff --git a/api/core/model_runtime/model_providers/hunyuan/llm/llm.py b/api/core/model_runtime/model_providers/hunyuan/llm/llm.py index c056ab7a08..b57e5e1c2b 100644 --- a/api/core/model_runtime/model_providers/hunyuan/llm/llm.py +++ b/api/core/model_runtime/model_providers/hunyuan/llm/llm.py @@ -131,7 +131,8 @@ class HunyuanLargeLanguageModel(LargeLanguageModel): { "Role": message.role.value, # fix set content = "" while tool_call request - # fix [hunyuan] None, [TencentCloudSDKException] code:InvalidParameter message:Messages Content and Contents not allowed empty at the same time. + # fix [hunyuan] None, [TencentCloudSDKException] code:InvalidParameter + # message:Messages Content and Contents not allowed empty at the same time. "Content": " ", # message.content if (message.content is not None) else "", "ToolCalls": dict_tool_calls, } diff --git a/api/core/model_runtime/model_providers/nvidia/llm/llm.py b/api/core/model_runtime/model_providers/nvidia/llm/llm.py index 4d3747dc84..1c98c6be6c 100644 --- a/api/core/model_runtime/model_providers/nvidia/llm/llm.py +++ b/api/core/model_runtime/model_providers/nvidia/llm/llm.py @@ -93,7 +93,8 @@ class NVIDIALargeLanguageModel(OAIAPICompatLargeLanguageModel): def _validate_credentials(self, model: str, credentials: dict) -> None: """ - Validate model credentials using requests to ensure compatibility with all providers following OpenAI's API standard. + Validate model credentials using requests to ensure compatibility with all providers following + OpenAI's API standard. :param model: model name :param credentials: model credentials diff --git a/api/core/model_runtime/model_providers/oci/llm/llm.py b/api/core/model_runtime/model_providers/oci/llm/llm.py index 51b634c6cf..1e1fc5b3ea 100644 --- a/api/core/model_runtime/model_providers/oci/llm/llm.py +++ b/api/core/model_runtime/model_providers/oci/llm/llm.py @@ -239,7 +239,8 @@ class OCILargeLanguageModel(LargeLanguageModel): config_items = oci_config_content.split("/") if len(config_items) != 5: raise CredentialsValidateFailedError( - "oci_config_content should be base64.b64encode('user_ocid/fingerprint/tenancy_ocid/region/compartment_ocid'.encode('utf-8'))" + "oci_config_content should be base64.b64encode(" + "'user_ocid/fingerprint/tenancy_ocid/region/compartment_ocid'.encode('utf-8'))" ) oci_config["user"] = config_items[0] oci_config["fingerprint"] = config_items[1] diff --git a/api/core/model_runtime/model_providers/oci/text_embedding/text_embedding.py b/api/core/model_runtime/model_providers/oci/text_embedding/text_embedding.py index df77db47d9..80ad2be9f5 100644 --- a/api/core/model_runtime/model_providers/oci/text_embedding/text_embedding.py +++ b/api/core/model_runtime/model_providers/oci/text_embedding/text_embedding.py @@ -146,7 +146,8 @@ class OCITextEmbeddingModel(TextEmbeddingModel): config_items = oci_config_content.split("/") if len(config_items) != 5: raise CredentialsValidateFailedError( - "oci_config_content should be base64.b64encode('user_ocid/fingerprint/tenancy_ocid/region/compartment_ocid'.encode('utf-8'))" + "oci_config_content should be base64.b64encode(" + "'user_ocid/fingerprint/tenancy_ocid/region/compartment_ocid'.encode('utf-8'))" ) oci_config["user"] = config_items[0] oci_config["fingerprint"] = config_items[1] diff --git a/api/core/model_runtime/model_providers/ollama/llm/llm.py b/api/core/model_runtime/model_providers/ollama/llm/llm.py index 160eea0148..3f32f454e4 100644 --- a/api/core/model_runtime/model_providers/ollama/llm/llm.py +++ b/api/core/model_runtime/model_providers/ollama/llm/llm.py @@ -639,9 +639,10 @@ class OllamaLargeLanguageModel(LargeLanguageModel): type=ParameterType.STRING, help=I18nObject( en_US="Sets how long the model is kept in memory after generating a response. " - "This must be a duration string with a unit (e.g., '10m' for 10 minutes or '24h' for 24 hours). " - "A negative number keeps the model loaded indefinitely, and '0' unloads the model immediately after generating a response. " - "Valid time units are 's','m','h'. (Default: 5m)" + "This must be a duration string with a unit (e.g., '10m' for 10 minutes or '24h' for 24 hours)." + " A negative number keeps the model loaded indefinitely, and '0' unloads the model" + " immediately after generating a response." + " Valid time units are 's','m','h'. (Default: 5m)" ), ), ParameterRule( diff --git a/api/core/model_runtime/model_providers/openai/llm/llm.py b/api/core/model_runtime/model_providers/openai/llm/llm.py index 5950b77a96..578687b5d3 100644 --- a/api/core/model_runtime/model_providers/openai/llm/llm.py +++ b/api/core/model_runtime/model_providers/openai/llm/llm.py @@ -37,7 +37,7 @@ if you are not sure about the structure. {{instructions}} -""" +""" # noqa: E501 class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel): diff --git a/api/core/model_runtime/model_providers/openai_api_compatible/llm/llm.py b/api/core/model_runtime/model_providers/openai_api_compatible/llm/llm.py index 24317b488c..41ca163a92 100644 --- a/api/core/model_runtime/model_providers/openai_api_compatible/llm/llm.py +++ b/api/core/model_runtime/model_providers/openai_api_compatible/llm/llm.py @@ -103,7 +103,8 @@ class OAIAPICompatLargeLanguageModel(_CommonOaiApiCompat, LargeLanguageModel): def validate_credentials(self, model: str, credentials: dict) -> None: """ - Validate model credentials using requests to ensure compatibility with all providers following OpenAI's API standard. + Validate model credentials using requests to ensure compatibility with all providers following + OpenAI's API standard. :param model: model name :param credentials: model credentials @@ -262,7 +263,8 @@ class OAIAPICompatLargeLanguageModel(_CommonOaiApiCompat, LargeLanguageModel): return entity - # validate_credentials method has been rewritten to use the requests library for compatibility with all providers following OpenAI's API standard. + # validate_credentials method has been rewritten to use the requests library for compatibility with all providers + # following OpenAI's API standard. def _generate( self, model: str, diff --git a/api/core/model_runtime/model_providers/spark/llm/_client.py b/api/core/model_runtime/model_providers/spark/llm/_client.py index 25223e8340..b99a657e71 100644 --- a/api/core/model_runtime/model_providers/spark/llm/_client.py +++ b/api/core/model_runtime/model_providers/spark/llm/_client.py @@ -61,7 +61,10 @@ class SparkLLMClient: signature_sha_base64 = base64.b64encode(signature_sha).decode(encoding="utf-8") - authorization_origin = f'api_key="{api_key}", algorithm="hmac-sha256", headers="host date request-line", signature="{signature_sha_base64}"' + authorization_origin = ( + f'api_key="{api_key}", algorithm="hmac-sha256", headers="host date request-line",' + f' signature="{signature_sha_base64}"' + ) authorization = base64.b64encode(authorization_origin.encode("utf-8")).decode(encoding="utf-8") diff --git a/api/core/model_runtime/model_providers/upstage/llm/llm.py b/api/core/model_runtime/model_providers/upstage/llm/llm.py index 1014b53f39..9646e209b2 100644 --- a/api/core/model_runtime/model_providers/upstage/llm/llm.py +++ b/api/core/model_runtime/model_providers/upstage/llm/llm.py @@ -34,7 +34,7 @@ if you are not sure about the structure. {{instructions}} -""" +""" # noqa: E501 class UpstageLargeLanguageModel(_CommonUpstage, LargeLanguageModel): diff --git a/api/core/model_runtime/model_providers/vertex_ai/llm/llm.py b/api/core/model_runtime/model_providers/vertex_ai/llm/llm.py index 110028a288..1b9931d2c3 100644 --- a/api/core/model_runtime/model_providers/vertex_ai/llm/llm.py +++ b/api/core/model_runtime/model_providers/vertex_ai/llm/llm.py @@ -114,7 +114,8 @@ class VertexAiLargeLanguageModel(LargeLanguageModel): credentials.refresh(request) token = credentials.token - # Vertex AI Anthropic Claude3 Opus model available in us-east5 region, Sonnet and Haiku available in us-central1 region + # Vertex AI Anthropic Claude3 Opus model available in us-east5 region, Sonnet and Haiku available + # in us-central1 region if "opus" in model or "claude-3-5-sonnet" in model: location = "us-east5" else: @@ -123,7 +124,8 @@ class VertexAiLargeLanguageModel(LargeLanguageModel): # use access token to authenticate if token: client = AnthropicVertex(region=location, project_id=project_id, access_token=token) - # When access token is empty, try to use the Google Cloud VM's built-in service account or the GOOGLE_APPLICATION_CREDENTIALS environment variable + # When access token is empty, try to use the Google Cloud VM's built-in service account + # or the GOOGLE_APPLICATION_CREDENTIALS environment variable else: client = AnthropicVertex( region=location, diff --git a/api/core/model_runtime/model_providers/wenxin/llm/llm.py b/api/core/model_runtime/model_providers/wenxin/llm/llm.py index 1ff0ac7ad2..8feedbfe55 100644 --- a/api/core/model_runtime/model_providers/wenxin/llm/llm.py +++ b/api/core/model_runtime/model_providers/wenxin/llm/llm.py @@ -28,7 +28,7 @@ if you are not sure about the structure. You should also complete the text started with ``` but not tell ``` directly. -""" +""" # noqa: E501 class ErnieBotLargeLanguageModel(LargeLanguageModel): diff --git a/api/core/model_runtime/model_providers/xinference/llm/llm.py b/api/core/model_runtime/model_providers/xinference/llm/llm.py index bc7531ee20..7ad236880b 100644 --- a/api/core/model_runtime/model_providers/xinference/llm/llm.py +++ b/api/core/model_runtime/model_providers/xinference/llm/llm.py @@ -130,7 +130,8 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel): credentials["completion_type"] = "completion" else: raise ValueError( - f"xinference model ability {extra_param.model_ability} is not supported, check if you have the right model type" + f"xinference model ability {extra_param.model_ability} is not supported," + f" check if you have the right model type" ) if extra_param.support_function_call: @@ -358,7 +359,8 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel): help=I18nObject( en_US="Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they " "appear in the text so far, increasing the model's likelihood to talk about new topics.", - zh_Hans="介于 -2.0 和 2.0 之间的数字。正值会根据新词是否已出现在文本中对其进行惩罚,从而增加模型谈论新话题的可能性。", + zh_Hans="介于 -2.0 和 2.0 之间的数字。正值会根据新词是否已出现在文本中对其进行惩罚," + "从而增加模型谈论新话题的可能性。", ), default=0.0, min=-2.0, @@ -378,7 +380,8 @@ class XinferenceAILargeLanguageModel(LargeLanguageModel): en_US="Number between -2.0 and 2.0. Positive values penalize new tokens based on their " "existing frequency in the text so far, decreasing the model's likelihood to repeat the " "same line verbatim.", - zh_Hans="介于 -2.0 和 2.0 之间的数字。正值会根据新词在文本中的现有频率对其进行惩罚,从而降低模型逐字重复相同内容的可能性。", + zh_Hans="介于 -2.0 和 2.0 之间的数字。正值会根据新词在文本中的现有频率对其进行惩罚," + "从而降低模型逐字重复相同内容的可能性。", ), default=0.0, min=-2.0, diff --git a/api/core/model_runtime/model_providers/xinference/speech2text/speech2text.py b/api/core/model_runtime/model_providers/xinference/speech2text/speech2text.py index 54c8b51654..18efde758c 100644 --- a/api/core/model_runtime/model_providers/xinference/speech2text/speech2text.py +++ b/api/core/model_runtime/model_providers/xinference/speech2text/speech2text.py @@ -101,12 +101,16 @@ class XinferenceSpeech2TextModel(Speech2TextModel): :param model: model name :param credentials: model credentials - :param file: The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpe g,mpga, m4a, ogg, wav, or webm. + :param file: The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, + mpga, m4a, ogg, wav, or webm. :param language: The language of the input audio. Supplying the input language in ISO-639-1 :param prompt: An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language. - :param response_format: The format of the transcript output, in one of these options: json, text, srt, verbose _json, or vtt. - :param temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output mor e random,while lower values like 0.2 will make it more focused and deterministic.If set to 0, the model wi ll use log probability to automatically increase the temperature until certain thresholds are hit. + :param response_format: The format of the transcript output, in one of these options: json, text, srt, + verbose_json, or vtt. + :param temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more + random,while lower values like 0.2 will make it more focused and deterministic.If set to 0, the model will use + log probability to automatically increase the temperature until certain thresholds are hit. :return: text for given audio file """ server_url = credentials["server_url"] diff --git a/api/core/model_runtime/model_providers/xinference/xinference_helper.py b/api/core/model_runtime/model_providers/xinference/xinference_helper.py index 6ad10e690d..1e05da9c56 100644 --- a/api/core/model_runtime/model_providers/xinference/xinference_helper.py +++ b/api/core/model_runtime/model_providers/xinference/xinference_helper.py @@ -76,7 +76,8 @@ class XinferenceHelper: url = str(URL(server_url) / "v1" / "models" / model_uid) - # this method is surrounded by a lock, and default requests may hang forever, so we just set a Adapter with max_retries=3 + # this method is surrounded by a lock, and default requests may hang forever, + # so we just set a Adapter with max_retries=3 session = Session() session.mount("http://", HTTPAdapter(max_retries=3)) session.mount("https://", HTTPAdapter(max_retries=3)) @@ -88,7 +89,8 @@ class XinferenceHelper: raise RuntimeError(f"get xinference model extra parameter failed, url: {url}, error: {e}") if response.status_code != 200: raise RuntimeError( - f"get xinference model extra parameter failed, status code: {response.status_code}, response: {response.text}" + f"get xinference model extra parameter failed, status code: {response.status_code}," + f" response: {response.text}" ) response_json = response.json() diff --git a/api/core/model_runtime/model_providers/zhipuai/llm/llm.py b/api/core/model_runtime/model_providers/zhipuai/llm/llm.py index 498962bd0f..29b873fd06 100644 --- a/api/core/model_runtime/model_providers/zhipuai/llm/llm.py +++ b/api/core/model_runtime/model_providers/zhipuai/llm/llm.py @@ -31,7 +31,7 @@ And you should always end the block with a "```" to indicate the end of the JSON {{instructions}} -```JSON""" +```JSON""" # noqa: E501 class ZhipuAILargeLanguageModel(_CommonZhipuaiAI, LargeLanguageModel): diff --git a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_type.py b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_type.py index b7cf6bb7fd..7a91f9b796 100644 --- a/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_type.py +++ b/api/core/model_runtime/model_providers/zhipuai/zhipuai_sdk/core/_base_type.py @@ -75,7 +75,8 @@ Headers = Mapping[str, Union[str, Omit]] ResponseT = TypeVar( "ResponseT", - bound="Union[str, None, BaseModel, list[Any], Dict[str, Any], Response, UnknownResponse, ModelBuilderProtocol, BinaryResponseContent]", + bound="Union[str, None, BaseModel, list[Any], Dict[str, Any], Response, UnknownResponse, ModelBuilderProtocol," + " BinaryResponseContent]", ) # for user input files diff --git a/api/core/model_runtime/schema_validators/common_validator.py b/api/core/model_runtime/schema_validators/common_validator.py index e4f3541475..c05edb72e3 100644 --- a/api/core/model_runtime/schema_validators/common_validator.py +++ b/api/core/model_runtime/schema_validators/common_validator.py @@ -67,7 +67,8 @@ class CommonValidator: if credential_form_schema.max_length: if len(value) > credential_form_schema.max_length: raise ValueError( - f"Variable {credential_form_schema.variable} length should not greater than {credential_form_schema.max_length}" + f"Variable {credential_form_schema.variable} length should not" + f" greater than {credential_form_schema.max_length}" ) # check the type of value diff --git a/api/core/prompt/prompt_templates/advanced_prompt_templates.py b/api/core/prompt/prompt_templates/advanced_prompt_templates.py index e4b3a61cb4..0ab7f526cc 100644 --- a/api/core/prompt/prompt_templates/advanced_prompt_templates.py +++ b/api/core/prompt/prompt_templates/advanced_prompt_templates.py @@ -1,11 +1,11 @@ -CONTEXT = "Use the following context as your learned knowledge, inside XML tags.\n\n\n{{#context#}}\n\n\nWhen answer to user:\n- If you don't know, just say that you don't know.\n- If you don't know when you are not sure, ask for clarification.\nAvoid mentioning that you obtained the information from the context.\nAnd answer according to the language of the user's question.\n" +CONTEXT = "Use the following context as your learned knowledge, inside XML tags.\n\n\n{{#context#}}\n\n\nWhen answer to user:\n- If you don't know, just say that you don't know.\n- If you don't know when you are not sure, ask for clarification.\nAvoid mentioning that you obtained the information from the context.\nAnd answer according to the language of the user's question.\n" # noqa: E501 -BAICHUAN_CONTEXT = "用户在与一个客观的助手对话。助手会尊重找到的材料,给出全面专业的解释,但不会过度演绎。同时回答中不会暴露引用的材料:\n\n```\n{{#context#}}\n```\n\n" +BAICHUAN_CONTEXT = "用户在与一个客观的助手对话。助手会尊重找到的材料,给出全面专业的解释,但不会过度演绎。同时回答中不会暴露引用的材料:\n\n```\n{{#context#}}\n```\n\n" # noqa: E501 CHAT_APP_COMPLETION_PROMPT_CONFIG = { "completion_prompt_config": { "prompt": { - "text": "{{#pre_prompt#}}\nHere is the chat histories between human and assistant, inside XML tags.\n\n\n{{#histories#}}\n\n\n\nHuman: {{#query#}}\n\nAssistant: " + "text": "{{#pre_prompt#}}\nHere is the chat histories between human and assistant, inside XML tags.\n\n\n{{#histories#}}\n\n\n\nHuman: {{#query#}}\n\nAssistant: " # noqa: E501 }, "conversation_histories_role": {"user_prefix": "Human", "assistant_prefix": "Assistant"}, }, @@ -24,7 +24,7 @@ COMPLETION_APP_COMPLETION_PROMPT_CONFIG = { BAICHUAN_CHAT_APP_COMPLETION_PROMPT_CONFIG = { "completion_prompt_config": { "prompt": { - "text": "{{#pre_prompt#}}\n\n用户和助手的历史对话内容如下:\n```\n{{#histories#}}\n```\n\n\n\n用户:{{#query#}}" + "text": "{{#pre_prompt#}}\n\n用户和助手的历史对话内容如下:\n```\n{{#histories#}}\n```\n\n\n\n用户:{{#query#}}" # noqa: E501 }, "conversation_histories_role": {"user_prefix": "用户", "assistant_prefix": "助手"}, }, diff --git a/api/core/rag/datasource/vdb/oracle/oraclevector.py b/api/core/rag/datasource/vdb/oracle/oraclevector.py index 06c20ceb5f..d223b0decf 100644 --- a/api/core/rag/datasource/vdb/oracle/oraclevector.py +++ b/api/core/rag/datasource/vdb/oracle/oraclevector.py @@ -195,7 +195,8 @@ class OracleVector(BaseVector): top_k = kwargs.get("top_k", 5) with self._get_cursor() as cur: cur.execute( - f"SELECT meta, text, vector_distance(embedding,:1) AS distance FROM {self.table_name} ORDER BY distance fetch first {top_k} rows only", + f"SELECT meta, text, vector_distance(embedding,:1) AS distance FROM {self.table_name}" + f" ORDER BY distance fetch first {top_k} rows only", [numpy.array(query_vector)], ) docs = [] @@ -254,7 +255,8 @@ class OracleVector(BaseVector): entities.append(token) with self._get_cursor() as cur: cur.execute( - f"select meta, text, embedding FROM {self.table_name} WHERE CONTAINS(text, :1, 1) > 0 order by score(1) desc fetch first {top_k} rows only", + f"select meta, text, embedding FROM {self.table_name}" + f" WHERE CONTAINS(text, :1, 1) > 0 order by score(1) desc fetch first {top_k} rows only", [" ACCUM ".join(entities)], ) docs = [] diff --git a/api/core/rag/datasource/vdb/pgvector/pgvector.py b/api/core/rag/datasource/vdb/pgvector/pgvector.py index 38dfd24b56..d2d9e5238b 100644 --- a/api/core/rag/datasource/vdb/pgvector/pgvector.py +++ b/api/core/rag/datasource/vdb/pgvector/pgvector.py @@ -139,7 +139,8 @@ class PGVector(BaseVector): with self._get_cursor() as cur: cur.execute( - f"SELECT meta, text, embedding <=> %s AS distance FROM {self.table_name} ORDER BY distance LIMIT {top_k}", + f"SELECT meta, text, embedding <=> %s AS distance FROM {self.table_name}" + f" ORDER BY distance LIMIT {top_k}", (json.dumps(query_vector),), ) docs = [] diff --git a/api/core/rag/extractor/extract_processor.py b/api/core/rag/extractor/extract_processor.py index a00b3cba53..244ef9614a 100644 --- a/api/core/rag/extractor/extract_processor.py +++ b/api/core/rag/extractor/extract_processor.py @@ -30,7 +30,10 @@ from extensions.ext_storage import storage from models.model import UploadFile SUPPORT_URL_CONTENT_TYPES = ["application/pdf", "text/plain", "application/json"] -USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" +USER_AGENT = ( + "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124" + " Safari/537.36" +) class ExtractProcessor: diff --git a/api/core/rag/retrieval/router/multi_dataset_react_route.py b/api/core/rag/retrieval/router/multi_dataset_react_route.py index 33841cac06..a0494adc60 100644 --- a/api/core/rag/retrieval/router/multi_dataset_react_route.py +++ b/api/core/rag/retrieval/router/multi_dataset_react_route.py @@ -14,7 +14,7 @@ from core.workflow.nodes.llm.llm_node import LLMNode PREFIX = """Respond to the human as helpfully and accurately as possible. You have access to the following tools:""" SUFFIX = """Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:. -Thought:""" +Thought:""" # noqa: E501 FORMAT_INSTRUCTIONS = """Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input). The nouns in the format of "Thought", "Action", "Action Input", "Final Answer" must be expressed in English. @@ -46,7 +46,7 @@ Action: "action": "Final Answer", "action_input": "Final response to human" }} -```""" +```""" # noqa: E501 class ReactMultiDatasetRouter: @@ -204,7 +204,8 @@ class ReactMultiDatasetRouter: tool_strings = [] for tool in tools: tool_strings.append( - f"{tool.name}: {tool.description}, args: {{'query': {{'title': 'Query', 'description': 'Query for the dataset to be used to retrieve the dataset.', 'type': 'string'}}}}" + f"{tool.name}: {tool.description}, args: {{'query': {{'title': 'Query'," + f" 'description': 'Query for the dataset to be used to retrieve the dataset.', 'type': 'string'}}}}" ) formatted_tools = "\n".join(tool_strings) unique_tool_names = {tool.name for tool in tools} @@ -236,7 +237,7 @@ class ReactMultiDatasetRouter: suffix = """Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:```$JSON_BLOB```then Observation:. Question: {input} Thought: {agent_scratchpad} -""" +""" # noqa: E501 tool_strings = "\n".join([f"{tool.name}: {tool.description}" for tool in tools]) tool_names = ", ".join([tool.name for tool in tools]) diff --git a/api/core/tools/entities/tool_bundle.py b/api/core/tools/entities/tool_bundle.py index da6201c5aa..0c15b2a371 100644 --- a/api/core/tools/entities/tool_bundle.py +++ b/api/core/tools/entities/tool_bundle.py @@ -7,7 +7,8 @@ from core.tools.entities.tool_entities import ToolParameter class ApiToolBundle(BaseModel): """ - This class is used to store the schema information of an api based tool. such as the url, the method, the parameters, etc. + This class is used to store the schema information of an api based tool. + such as the url, the method, the parameters, etc. """ # server_url diff --git a/api/core/tools/entities/values.py b/api/core/tools/entities/values.py index f9db190f91..f460df7e25 100644 --- a/api/core/tools/entities/values.py +++ b/api/core/tools/entities/values.py @@ -4,52 +4,52 @@ from core.tools.entities.tool_entities import ToolLabel, ToolLabelEnum ICONS = { ToolLabelEnum.SEARCH: """ -""", +""", # noqa: E501 ToolLabelEnum.IMAGE: """ -""", +""", # noqa: E501 ToolLabelEnum.VIDEOS: """ -""", +""", # noqa: E501 ToolLabelEnum.WEATHER: """ -""", +""", # noqa: E501 ToolLabelEnum.FINANCE: """ -""", +""", # noqa: E501 ToolLabelEnum.DESIGN: """ -""", +""", # noqa: E501 ToolLabelEnum.TRAVEL: """ -""", +""", # noqa: E501 ToolLabelEnum.SOCIAL: """ -""", +""", # noqa: E501 ToolLabelEnum.NEWS: """ -""", +""", # noqa: E501 ToolLabelEnum.MEDICAL: """ -""", +""", # noqa: E501 ToolLabelEnum.PRODUCTIVITY: """ -""", +""", # noqa: E501 ToolLabelEnum.EDUCATION: """ -""", +""", # noqa: E501 ToolLabelEnum.BUSINESS: """ -""", +""", # noqa: E501 ToolLabelEnum.ENTERTAINMENT: """ -""", +""", # noqa: E501 ToolLabelEnum.UTILITIES: """ -""", +""", # noqa: E501 ToolLabelEnum.OTHER: """ -""", +""", # noqa: E501 } default_tool_label_dict = { diff --git a/api/core/tools/provider/builtin/aippt/tools/aippt.py b/api/core/tools/provider/builtin/aippt/tools/aippt.py index 7cee8f9f79..a2d69fbcd1 100644 --- a/api/core/tools/provider/builtin/aippt/tools/aippt.py +++ b/api/core/tools/provider/builtin/aippt/tools/aippt.py @@ -46,7 +46,8 @@ class AIPPTGenerateTool(BuiltinTool): tool_parameters (dict[str, Any]): The parameters for the tool Returns: - ToolInvokeMessage | list[ToolInvokeMessage]: The result of the tool invocation, which can be a single message or a list of messages. + ToolInvokeMessage | list[ToolInvokeMessage]: The result of the tool invocation, + which can be a single message or a list of messages. """ title = tool_parameters.get("title", "") if not title: diff --git a/api/core/tools/provider/builtin/arxiv/tools/arxiv_search.py b/api/core/tools/provider/builtin/arxiv/tools/arxiv_search.py index 98d82c233e..2d65ba2d6f 100644 --- a/api/core/tools/provider/builtin/arxiv/tools/arxiv_search.py +++ b/api/core/tools/provider/builtin/arxiv/tools/arxiv_search.py @@ -104,7 +104,8 @@ class ArxivSearchTool(BuiltinTool): tool_parameters (dict[str, Any]): The parameters for the tool, including the 'query' parameter. Returns: - ToolInvokeMessage | list[ToolInvokeMessage]: The result of the tool invocation, which can be a single message or a list of messages. + ToolInvokeMessage | list[ToolInvokeMessage]: The result of the tool invocation, + which can be a single message or a list of messages. """ query = tool_parameters.get("query", "") diff --git a/api/core/tools/provider/builtin/aws/tools/apply_guardrail.py b/api/core/tools/provider/builtin/aws/tools/apply_guardrail.py index d6a65b1708..a04f5c0fe9 100644 --- a/api/core/tools/provider/builtin/aws/tools/apply_guardrail.py +++ b/api/core/tools/provider/builtin/aws/tools/apply_guardrail.py @@ -62,7 +62,8 @@ class ApplyGuardrailTool(BuiltinTool): if isinstance(policy_data, dict) and "topics" in policy_data: for topic in policy_data["topics"]: formatted_assessments.append( - f"Policy: {policy_type}, Topic: {topic['name']}, Type: {topic['type']}, Action: {topic['action']}" + f"Policy: {policy_type}, Topic: {topic['name']}, Type: {topic['type']}," + f" Action: {topic['action']}" ) else: formatted_assessments.append(f"Policy: {policy_type}, Data: {policy_data}") diff --git a/api/core/tools/provider/builtin/devdocs/tools/searchDevDocs.py b/api/core/tools/provider/builtin/devdocs/tools/searchDevDocs.py index e1effd066c..57cf6d7a30 100644 --- a/api/core/tools/provider/builtin/devdocs/tools/searchDevDocs.py +++ b/api/core/tools/provider/builtin/devdocs/tools/searchDevDocs.py @@ -24,7 +24,8 @@ class SearchDevDocsTool(BuiltinTool): tool_parameters (dict[str, Any]): The parameters for the tool, including 'doc' and 'topic'. Returns: - ToolInvokeMessage | list[ToolInvokeMessage]: The result of the tool invocation, which can be a single message or a list of messages. + ToolInvokeMessage | list[ToolInvokeMessage]: The result of the tool invocation, + which can be a single message or a list of messages. """ doc = tool_parameters.get("doc", "") topic = tool_parameters.get("topic", "") diff --git a/api/core/tools/provider/builtin/gitlab/tools/gitlab_files.py b/api/core/tools/provider/builtin/gitlab/tools/gitlab_files.py index 7606eee7af..1e77f3c6df 100644 --- a/api/core/tools/provider/builtin/gitlab/tools/gitlab_files.py +++ b/api/core/tools/provider/builtin/gitlab/tools/gitlab_files.py @@ -70,7 +70,10 @@ class GitlabFilesTool(BuiltinTool): ) else: # It's a file if is_repository: - file_url = f"{domain}/api/v4/projects/{encoded_identifier}/repository/files/{item_path}/raw?ref={branch}" + file_url = ( + f"{domain}/api/v4/projects/{encoded_identifier}/repository/files" + f"/{item_path}/raw?ref={branch}" + ) else: file_url = ( f"{domain}/api/v4/projects/{project_id}/repository/files/{item_path}/raw?ref={branch}" diff --git a/api/core/tools/provider/builtin/google_translate/tools/translate.py b/api/core/tools/provider/builtin/google_translate/tools/translate.py index 5d57b5fabf..ea3f2077d5 100644 --- a/api/core/tools/provider/builtin/google_translate/tools/translate.py +++ b/api/core/tools/provider/builtin/google_translate/tools/translate.py @@ -35,7 +35,8 @@ class GoogleTranslate(BuiltinTool): params = {"client": "gtx", "sl": "auto", "tl": dest, "dt": "t", "q": content} headers = { - "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)" + " Chrome/91.0.4472.124 Safari/537.36" } response_json = requests.get(url, params=params, headers=headers).json() diff --git a/api/core/tools/provider/builtin/hap/tools/get_worksheet_fields.py b/api/core/tools/provider/builtin/hap/tools/get_worksheet_fields.py index 69cf8aa740..9e0918afa9 100644 --- a/api/core/tools/provider/builtin/hap/tools/get_worksheet_fields.py +++ b/api/core/tools/provider/builtin/hap/tools/get_worksheet_fields.py @@ -114,7 +114,8 @@ class GetWorksheetFieldsTool(BuiltinTool): } fields.append(field) fields_list.append( - f"|{field['id']}|{field['name']}|{field['type']}|{field['typeId']}|{field['description']}|{field['options'] if field['options'] else ''}|" + f"|{field['id']}|{field['name']}|{field['type']}|{field['typeId']}|{field['description']}" + f"|{field['options'] if field['options'] else ''}|" ) fields.append( diff --git a/api/core/tools/provider/builtin/hap/tools/list_worksheet_records.py b/api/core/tools/provider/builtin/hap/tools/list_worksheet_records.py index 71f8356ab8..5888f7443f 100644 --- a/api/core/tools/provider/builtin/hap/tools/list_worksheet_records.py +++ b/api/core/tools/provider/builtin/hap/tools/list_worksheet_records.py @@ -112,7 +112,10 @@ class ListWorksheetRecordsTool(BuiltinTool): else: result_text = f"Found {result['total']} rows in worksheet \"{worksheet_name}\"." if result["total"] > 0: - result_text += f" The following are {result['total'] if result['total'] < limit else limit} pieces of data presented in a table format:\n\n{table_header}" + result_text += ( + f" The following are {result['total'] if result['total'] < limit else limit}" + f" pieces of data presented in a table format:\n\n{table_header}" + ) for row in rows: result_values = [] for f in fields: diff --git a/api/core/tools/provider/builtin/searchapi/tools/google.py b/api/core/tools/provider/builtin/searchapi/tools/google.py index 6d88d74635..16ae14549d 100644 --- a/api/core/tools/provider/builtin/searchapi/tools/google.py +++ b/api/core/tools/provider/builtin/searchapi/tools/google.py @@ -64,7 +64,10 @@ class SearchAPI: elif type == "link": if "answer_box" in res and "organic_result" in res["answer_box"]: if "title" in res["answer_box"]["organic_result"]: - toret = f"[{res['answer_box']['organic_result']['title']}]({res['answer_box']['organic_result']['link']})\n" + toret = ( + f"[{res['answer_box']['organic_result']['title']}]" + f"({res['answer_box']['organic_result']['link']})\n" + ) elif "organic_results" in res and "link" in res["organic_results"][0]: toret = "" for item in res["organic_results"]: diff --git a/api/core/tools/provider/builtin/stablediffusion/tools/stable_diffusion.py b/api/core/tools/provider/builtin/stablediffusion/tools/stable_diffusion.py index 344f916494..64fdc961b4 100644 --- a/api/core/tools/provider/builtin/stablediffusion/tools/stable_diffusion.py +++ b/api/core/tools/provider/builtin/stablediffusion/tools/stable_diffusion.py @@ -310,7 +310,8 @@ class StableDiffusionTool(BuiltinTool): ), type=ToolParameter.ToolParameterType.STRING, form=ToolParameter.ToolParameterForm.LLM, - llm_description="Image prompt of Stable Diffusion, you should describe the image you want to generate as a list of words as possible as detailed, the prompt must be written in English.", + llm_description="Image prompt of Stable Diffusion, you should describe the image you want to generate" + " as a list of words as possible as detailed, the prompt must be written in English.", required=True, ), ] @@ -320,12 +321,14 @@ class StableDiffusionTool(BuiltinTool): name="image_id", label=I18nObject(en_US="image_id", zh_Hans="image_id"), human_description=I18nObject( - en_US="Image id of the image you want to generate based on, if you want to generate image based on the default image, you can leave this field empty.", + en_US="Image id of the image you want to generate based on, if you want to generate image based" + " on the default image, you can leave this field empty.", zh_Hans="您想要生成的图像的图像 ID,如果您想要基于默认图像生成图像,则可以将此字段留空。", ), type=ToolParameter.ToolParameterType.STRING, form=ToolParameter.ToolParameterForm.LLM, - llm_description="Image id of the original image, you can leave this field empty if you want to generate a new image.", + llm_description="Image id of the original image, you can leave this field empty if you want to" + " generate a new image.", required=True, options=[ ToolParameterOption(value=i.name, label=I18nObject(en_US=i.name, zh_Hans=i.name)) @@ -343,12 +346,14 @@ class StableDiffusionTool(BuiltinTool): name="model", label=I18nObject(en_US="Model", zh_Hans="Model"), human_description=I18nObject( - en_US="Model of Stable Diffusion, you can check the official documentation of Stable Diffusion", + en_US="Model of Stable Diffusion, you can check the official documentation" + " of Stable Diffusion", zh_Hans="Stable Diffusion 的模型,您可以查看 Stable Diffusion 的官方文档", ), type=ToolParameter.ToolParameterType.SELECT, form=ToolParameter.ToolParameterForm.FORM, - llm_description="Model of Stable Diffusion, you can check the official documentation of Stable Diffusion", + llm_description="Model of Stable Diffusion, you can check the official documentation" + " of Stable Diffusion", required=True, default=models[0], options=[ @@ -367,12 +372,14 @@ class StableDiffusionTool(BuiltinTool): name="sampler_name", label=I18nObject(en_US="Sampling method", zh_Hans="Sampling method"), human_description=I18nObject( - en_US="Sampling method of Stable Diffusion, you can check the official documentation of Stable Diffusion", + en_US="Sampling method of Stable Diffusion, you can check the official documentation" + " of Stable Diffusion", zh_Hans="Stable Diffusion 的Sampling method,您可以查看 Stable Diffusion 的官方文档", ), type=ToolParameter.ToolParameterType.SELECT, form=ToolParameter.ToolParameterForm.FORM, - llm_description="Sampling method of Stable Diffusion, you can check the official documentation of Stable Diffusion", + llm_description="Sampling method of Stable Diffusion, you can check the official documentation" + " of Stable Diffusion", required=True, default=sample_methods[0], options=[ diff --git a/api/core/tools/provider/builtin/trello/tools/create_list_on_board.py b/api/core/tools/provider/builtin/trello/tools/create_list_on_board.py index 26f12864c3..b32b0124dd 100644 --- a/api/core/tools/provider/builtin/trello/tools/create_list_on_board.py +++ b/api/core/tools/provider/builtin/trello/tools/create_list_on_board.py @@ -17,7 +17,8 @@ class CreateListOnBoardTool(BuiltinTool): Args: user_id (str): The ID of the user invoking the tool. - tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, including the board ID and list name. + tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, + including the board ID and list name. Returns: ToolInvokeMessage: The result of the tool invocation. diff --git a/api/core/tools/provider/builtin/trello/tools/create_new_card_on_board.py b/api/core/tools/provider/builtin/trello/tools/create_new_card_on_board.py index dfc013a6b8..e98efb81ca 100644 --- a/api/core/tools/provider/builtin/trello/tools/create_new_card_on_board.py +++ b/api/core/tools/provider/builtin/trello/tools/create_new_card_on_board.py @@ -17,7 +17,8 @@ class CreateNewCardOnBoardTool(BuiltinTool): Args: user_id (str): The ID of the user invoking the tool. - tool_parameters (dict[str, Union[str, int, bool, None]]): The parameters for the tool invocation, including details for the new card. + tool_parameters (dict[str, Union[str, int, bool, None]]): The parameters for the tool invocation, + including details for the new card. Returns: ToolInvokeMessage: The result of the tool invocation. diff --git a/api/core/tools/provider/builtin/trello/tools/delete_board.py b/api/core/tools/provider/builtin/trello/tools/delete_board.py index 9dbd8f78d5..7fc9d1f13c 100644 --- a/api/core/tools/provider/builtin/trello/tools/delete_board.py +++ b/api/core/tools/provider/builtin/trello/tools/delete_board.py @@ -17,7 +17,8 @@ class DeleteBoardTool(BuiltinTool): Args: user_id (str): The ID of the user invoking the tool. - tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, including the board ID. + tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, + including the board ID. Returns: ToolInvokeMessage: The result of the tool invocation. diff --git a/api/core/tools/provider/builtin/trello/tools/delete_card.py b/api/core/tools/provider/builtin/trello/tools/delete_card.py index 960c3055fe..1de98d639e 100644 --- a/api/core/tools/provider/builtin/trello/tools/delete_card.py +++ b/api/core/tools/provider/builtin/trello/tools/delete_card.py @@ -17,7 +17,8 @@ class DeleteCardByIdTool(BuiltinTool): Args: user_id (str): The ID of the user invoking the tool. - tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, including the card ID. + tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, + including the card ID. Returns: ToolInvokeMessage: The result of the tool invocation. diff --git a/api/core/tools/provider/builtin/trello/tools/get_board_actions.py b/api/core/tools/provider/builtin/trello/tools/get_board_actions.py index 03510f1964..cabc7ce093 100644 --- a/api/core/tools/provider/builtin/trello/tools/get_board_actions.py +++ b/api/core/tools/provider/builtin/trello/tools/get_board_actions.py @@ -17,7 +17,8 @@ class GetBoardActionsTool(BuiltinTool): Args: user_id (str): The ID of the user invoking the tool. - tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, including the board ID. + tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, + including the board ID. Returns: ToolInvokeMessage: The result of the tool invocation. diff --git a/api/core/tools/provider/builtin/trello/tools/get_board_by_id.py b/api/core/tools/provider/builtin/trello/tools/get_board_by_id.py index 5b41b128d0..fe42cd9c5c 100644 --- a/api/core/tools/provider/builtin/trello/tools/get_board_by_id.py +++ b/api/core/tools/provider/builtin/trello/tools/get_board_by_id.py @@ -17,7 +17,8 @@ class GetBoardByIdTool(BuiltinTool): Args: user_id (str): The ID of the user invoking the tool. - tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, including the board ID. + tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, + including the board ID. Returns: ToolInvokeMessage: The result of the tool invocation. diff --git a/api/core/tools/provider/builtin/trello/tools/get_board_cards.py b/api/core/tools/provider/builtin/trello/tools/get_board_cards.py index e3bed2e6e6..ff2b1221e7 100644 --- a/api/core/tools/provider/builtin/trello/tools/get_board_cards.py +++ b/api/core/tools/provider/builtin/trello/tools/get_board_cards.py @@ -17,7 +17,8 @@ class GetBoardCardsTool(BuiltinTool): Args: user_id (str): The ID of the user invoking the tool. - tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, including the board ID. + tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, + including the board ID. Returns: ToolInvokeMessage: The result of the tool invocation. diff --git a/api/core/tools/provider/builtin/trello/tools/get_filterd_board_cards.py b/api/core/tools/provider/builtin/trello/tools/get_filterd_board_cards.py index 4d8854747c..3d7f9f4ad1 100644 --- a/api/core/tools/provider/builtin/trello/tools/get_filterd_board_cards.py +++ b/api/core/tools/provider/builtin/trello/tools/get_filterd_board_cards.py @@ -17,7 +17,8 @@ class GetFilteredBoardCardsTool(BuiltinTool): Args: user_id (str): The ID of the user invoking the tool. - tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, including the board ID and filter. + tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, + including the board ID and filter. Returns: ToolInvokeMessage: The result of the tool invocation. diff --git a/api/core/tools/provider/builtin/trello/tools/get_lists_on_board.py b/api/core/tools/provider/builtin/trello/tools/get_lists_on_board.py index ca8aa9c2d5..ccf404068f 100644 --- a/api/core/tools/provider/builtin/trello/tools/get_lists_on_board.py +++ b/api/core/tools/provider/builtin/trello/tools/get_lists_on_board.py @@ -17,7 +17,8 @@ class GetListsFromBoardTool(BuiltinTool): Args: user_id (str): The ID of the user invoking the tool. - tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, including the board ID. + tool_parameters (dict[str, Union[str, int, bool]]): The parameters for the tool invocation, + including the board ID. Returns: ToolInvokeMessage: The result of the tool invocation. diff --git a/api/core/tools/provider/builtin/trello/tools/update_board.py b/api/core/tools/provider/builtin/trello/tools/update_board.py index 62681eea6b..1e358b00f4 100644 --- a/api/core/tools/provider/builtin/trello/tools/update_board.py +++ b/api/core/tools/provider/builtin/trello/tools/update_board.py @@ -17,7 +17,8 @@ class UpdateBoardByIdTool(BuiltinTool): Args: user_id (str): The ID of the user invoking the tool. - tool_parameters (dict[str, Union[str, int, bool, None]]): The parameters for the tool invocation, including board ID and updates. + tool_parameters (dict[str, Union[str, int, bool, None]]): The parameters for the tool invocation, + including board ID and updates. Returns: ToolInvokeMessage: The result of the tool invocation. diff --git a/api/core/tools/provider/builtin/trello/tools/update_card.py b/api/core/tools/provider/builtin/trello/tools/update_card.py index 26113f1229..d25fcbafaa 100644 --- a/api/core/tools/provider/builtin/trello/tools/update_card.py +++ b/api/core/tools/provider/builtin/trello/tools/update_card.py @@ -17,7 +17,8 @@ class UpdateCardByIdTool(BuiltinTool): Args: user_id (str): The ID of the user invoking the tool. - tool_parameters (dict[str, Union[str, int, bool, None]]): The parameters for the tool invocation, including the card ID and updates. + tool_parameters (dict[str, Union[str, int, bool, None]]): The parameters for the tool invocation, + including the card ID and updates. Returns: ToolInvokeMessage: The result of the tool invocation. diff --git a/api/core/tools/provider/builtin/twilio/tools/send_message.py b/api/core/tools/provider/builtin/twilio/tools/send_message.py index 822d0c0ebd..156249bc96 100644 --- a/api/core/tools/provider/builtin/twilio/tools/send_message.py +++ b/api/core/tools/provider/builtin/twilio/tools/send_message.py @@ -72,7 +72,8 @@ class SendMessageTool(BuiltinTool): tool_parameters (Dict[str, Any]): The parameters required for sending the message. Returns: - Union[ToolInvokeMessage, List[ToolInvokeMessage]]: The result of invoking the tool, which includes the status of the message sending operation. + Union[ToolInvokeMessage, List[ToolInvokeMessage]]: The result of invoking the tool, + which includes the status of the message sending operation. """ def _invoke( diff --git a/api/core/tools/provider/builtin/vectorizer/tools/test_data.py b/api/core/tools/provider/builtin/vectorizer/tools/test_data.py index 8e1b097776..8effa9818a 100644 --- a/api/core/tools/provider/builtin/vectorizer/tools/test_data.py +++ b/api/core/tools/provider/builtin/vectorizer/tools/test_data.py @@ -1 +1 @@ -VECTORIZER_ICON_PNG = "iVBORw0KGgoAAAANSUhEUgAAAGAAAABgCAYAAADimHc4AAAACXBIWXMAACxLAAAsSwGlPZapAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAboSURBVHgB7Z09bBxFFMffRoAvcQqbguBUxu4wCUikMCZ0TmQK4NLQJCJOlQIkokgEGhQ7NCFIKEhQuIqNnIaGMxRY2GVwmlggDHS+pIHELmIXMTEULPP3eeXz7e7szO7MvE1ufpKV03nuNn7/mfcxH7tEHo/H42lXgqwG1bGw65+/aTQM6K0gpJdCoi7ypCIMui5s9Qv9R1OVTqrVxoL1jPbpvH4hrIp/rnmj5+YOhTQ++1kwmdZgT9ovRi6EF4Xhv/XGL0Sv6OLXYMu0BokjYOSDcBQfJI8xhKFP/HAlqCW8v5vqubBr8yn6maCexxiIDR376LnWmBBzQZtPEvx+L3mMAleOZKb1/XgM2EOnyWMFZJKt78UEQKpJHisk2TYmgM967JFk2z3kYcULwIwXgBkvADNeAGa8AMw8Qcwc6N55/eAh0cYmGaOzQtR/kOhQX+M6+/c23r+3RlT/i2ipTrSyRqw4F+CwMMbgANHQwG7jRywLw/wqDDNzI79xYPjqa2L262jjtYzaT0QT3xEbsck4MXUakgWOvUx08liy0ZPYEKNhel4Y6AZpgR7/8Tvq1wEQ+sMJN6Nh9kqwy+bWYwAM8elZovNv6xmlU7iLs280RNO9ls51os/h/8eBVQEig8Dt5OXUsNrno2tluZw0cI3qUXKONQHy9sYkVHqnjntLA2LnFTAv1gSA+zBhfIDvkfVO/B4xRgWZn4fbe2WAnGJFAAxn03+I7PtUXdzE90Sjl4ne+6L4d5nCigAyYyHPn7tFdPN30uJwX/qI6jtISkQZFVLdhd9SrtNPTrFSB6QZBAaYntsptpAyfvk+KYOCamVR/XrNtLqepduiFnkh3g4iIw6YLAhlOJmKwB9zaarhApr/MPREjAZVisSU1s/KYsGzhmKXClYEWLm/8xpV7btXhcv5I7lt2vtJFA3q/T07r1HopdG5l5xhxQVdn28YFn8kBJCBOZmiPHio1m5QuJzlu9ntXApgZwSsNYJslvGjtjrfm8Sq4neceFUtz3dZCzwW09Gqo2hreuPN7HZRnNqa1BP1x8lhczVNK+zT0TqkjYAF4e7Okxoo2PZX5K4IrhNpb/P8FTK2S1+TcUq1HpBFmquJYo1qEYU6RVarJE0c2ooL7C5IRwBZ5nJ9joyRtk5hA3YBdHqWzG1gBKgE/bzMaK5LqMIugKrbUDHu59/YWVRBsWhrsYZdANV5HBUXYGNlC9dFBW8LdgH6FQVYUnQvkQgm3NH8YuO7bM4LsWZBfT3qRY9OxRyJgJRz+Ij+FDPEQ1C3GVMiWAVQ7f31u/ncytxi4wdZTbRGgdcHnpYLD/FcwSrAoOKizfKfVAiIF4kBMPK+Opfe1iWsMUB1BJh2BRgBabSNAOiFqkXYbcNFUF9P+u82FGdWTcEmgGrvh0FUppB1kC073muXEaDq/21kIjLxV9tFAC7/n5X6tkUM0PH/dcP+P0v41fvkFBYBVHs/MD0CDmVsOzEdb7JgEYDT/8uq4rpj44NSjwDTc/CyzV1gxbH7Ac4F0PH/S4ZHAOaFZLiY+2nFuQA6/t9kQMTCz1CG66tbWvWS4VwAVf9vugAbel6efqrsYbKBcwFeVNz8ajobyTppw2F84FQAnfl/kwER6wJZcWdBc7e2KZwKoOP/TVakWb0f7md+kVhwOwI0BDCFyq42rt4PSiuAiRGAEXdK4ZQlV+8HTgVwefwHvR7nhbOA0FwBGDgTIM/Z3SLXUj2hOW1wR10eSrs7Ou9eTB3jo/dzuh/gTABdn35c8dhpM3BxOmeTuXs/cDoCdDY4qe7l32pbaZxL1jF+GXo/cLotBcWVTiZU3T7RMn8rHiijW9FgauP4Ef1TLdhHWgacCgAj6tYCqGKjU/DNbqxIkMYZNs7MpxmnLuhmwYJna1dbdzHjY42hDL4/wqkA6HWuDkAngRH0iYVjRkVwnoZO/0gsuLwpkw7OBcAtwlwvfESHxctmfMBSiOG0oStj4HCF7T3+RWARwIU7QK/HbWlqls52mYJtezqMj3v34C5VOveFy8Ll4QoTsJ8Txp0RsW8/Os2im2LCtSC1RIqLw3RldTVplOKkPEYDhMAPqttnune2rzTv5Y+WKdEem2ixkWqZYSeDSUp3qwIYNOrR7cBjcbOORxkvADNeAGa8AMx4AZjxAjATf5Ab0Tp5rJBk2/iD3PAwYo8Vkmyb9CjDGfLYIaCp1rdiAnT8S5PeDVkgoDuVCsWeJxwToHZ163m3Z8hjloDGk54vn5gFbT/5eZw8phifvZz8XPlA9qmRj8JRCumi+OkljzbbrvxM0qPMm9rIqY6FXZubVBUinMbzcP3jbuXA6Mh2kMx07KPJJLfj8Xg8Hg/4H+KfFYb2WM4MAAAAAElFTkSuQmCC" +VECTORIZER_ICON_PNG = "iVBORw0KGgoAAAANSUhEUgAAAGAAAABgCAYAAADimHc4AAAACXBIWXMAACxLAAAsSwGlPZapAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAboSURBVHgB7Z09bBxFFMffRoAvcQqbguBUxu4wCUikMCZ0TmQK4NLQJCJOlQIkokgEGhQ7NCFIKEhQuIqNnIaGMxRY2GVwmlggDHS+pIHELmIXMTEULPP3eeXz7e7szO7MvE1ufpKV03nuNn7/mfcxH7tEHo/H42lXgqwG1bGw65+/aTQM6K0gpJdCoi7ypCIMui5s9Qv9R1OVTqrVxoL1jPbpvH4hrIp/rnmj5+YOhTQ++1kwmdZgT9ovRi6EF4Xhv/XGL0Sv6OLXYMu0BokjYOSDcBQfJI8xhKFP/HAlqCW8v5vqubBr8yn6maCexxiIDR376LnWmBBzQZtPEvx+L3mMAleOZKb1/XgM2EOnyWMFZJKt78UEQKpJHisk2TYmgM967JFk2z3kYcULwIwXgBkvADNeAGa8AMw8Qcwc6N55/eAh0cYmGaOzQtR/kOhQX+M6+/c23r+3RlT/i2ipTrSyRqw4F+CwMMbgANHQwG7jRywLw/wqDDNzI79xYPjqa2L262jjtYzaT0QT3xEbsck4MXUakgWOvUx08liy0ZPYEKNhel4Y6AZpgR7/8Tvq1wEQ+sMJN6Nh9kqwy+bWYwAM8elZovNv6xmlU7iLs280RNO9ls51os/h/8eBVQEig8Dt5OXUsNrno2tluZw0cI3qUXKONQHy9sYkVHqnjntLA2LnFTAv1gSA+zBhfIDvkfVO/B4xRgWZn4fbe2WAnGJFAAxn03+I7PtUXdzE90Sjl4ne+6L4d5nCigAyYyHPn7tFdPN30uJwX/qI6jtISkQZFVLdhd9SrtNPTrFSB6QZBAaYntsptpAyfvk+KYOCamVR/XrNtLqepduiFnkh3g4iIw6YLAhlOJmKwB9zaarhApr/MPREjAZVisSU1s/KYsGzhmKXClYEWLm/8xpV7btXhcv5I7lt2vtJFA3q/T07r1HopdG5l5xhxQVdn28YFn8kBJCBOZmiPHio1m5QuJzlu9ntXApgZwSsNYJslvGjtjrfm8Sq4neceFUtz3dZCzwW09Gqo2hreuPN7HZRnNqa1BP1x8lhczVNK+zT0TqkjYAF4e7Okxoo2PZX5K4IrhNpb/P8FTK2S1+TcUq1HpBFmquJYo1qEYU6RVarJE0c2ooL7C5IRwBZ5nJ9joyRtk5hA3YBdHqWzG1gBKgE/bzMaK5LqMIugKrbUDHu59/YWVRBsWhrsYZdANV5HBUXYGNlC9dFBW8LdgH6FQVYUnQvkQgm3NH8YuO7bM4LsWZBfT3qRY9OxRyJgJRz+Ij+FDPEQ1C3GVMiWAVQ7f31u/ncytxi4wdZTbRGgdcHnpYLD/FcwSrAoOKizfKfVAiIF4kBMPK+Opfe1iWsMUB1BJh2BRgBabSNAOiFqkXYbcNFUF9P+u82FGdWTcEmgGrvh0FUppB1kC073muXEaDq/21kIjLxV9tFAC7/n5X6tkUM0PH/dcP+P0v41fvkFBYBVHs/MD0CDmVsOzEdb7JgEYDT/8uq4rpj44NSjwDTc/CyzV1gxbH7Ac4F0PH/S4ZHAOaFZLiY+2nFuQA6/t9kQMTCz1CG66tbWvWS4VwAVf9vugAbel6efqrsYbKBcwFeVNz8ajobyTppw2F84FQAnfl/kwER6wJZcWdBc7e2KZwKoOP/TVakWb0f7md+kVhwOwI0BDCFyq42rt4PSiuAiRGAEXdK4ZQlV+8HTgVwefwHvR7nhbOA0FwBGDgTIM/Z3SLXUj2hOW1wR10eSrs7Ou9eTB3jo/dzuh/gTABdn35c8dhpM3BxOmeTuXs/cDoCdDY4qe7l32pbaZxL1jF+GXo/cLotBcWVTiZU3T7RMn8rHiijW9FgauP4Ef1TLdhHWgacCgAj6tYCqGKjU/DNbqxIkMYZNs7MpxmnLuhmwYJna1dbdzHjY42hDL4/wqkA6HWuDkAngRH0iYVjRkVwnoZO/0gsuLwpkw7OBcAtwlwvfESHxctmfMBSiOG0oStj4HCF7T3+RWARwIU7QK/HbWlqls52mYJtezqMj3v34C5VOveFy8Ll4QoTsJ8Txp0RsW8/Os2im2LCtSC1RIqLw3RldTVplOKkPEYDhMAPqttnune2rzTv5Y+WKdEem2ixkWqZYSeDSUp3qwIYNOrR7cBjcbOORxkvADNeAGa8AMx4AZjxAjATf5Ab0Tp5rJBk2/iD3PAwYo8Vkmyb9CjDGfLYIaCp1rdiAnT8S5PeDVkgoDuVCsWeJxwToHZ163m3Z8hjloDGk54vn5gFbT/5eZw8phifvZz8XPlA9qmRj8JRCumi+OkljzbbrvxM0qPMm9rIqY6FXZubVBUinMbzcP3jbuXA6Mh2kMx07KPJJLfj8Xg8Hg/4H+KfFYb2WM4MAAAAAElFTkSuQmCC" # noqa: E501 diff --git a/api/core/tools/tool_engine.py b/api/core/tools/tool_engine.py index 9a6a49d8f4..645f0861fa 100644 --- a/api/core/tools/tool_engine.py +++ b/api/core/tools/tool_engine.py @@ -193,7 +193,10 @@ class ToolEngine: response.type == ToolInvokeMessage.MessageType.IMAGE_LINK or response.type == ToolInvokeMessage.MessageType.IMAGE ): - result += "image has been created and sent to user already, you do not need to create it, just tell the user to check it now." + result += ( + "image has been created and sent to user already, you do not need to create it," + " just tell the user to check it now." + ) elif response.type == ToolInvokeMessage.MessageType.JSON: result += f"tool response: {json.dumps(response.message, ensure_ascii=False)}." else: diff --git a/api/core/tools/utils/feishu_api_utils.py b/api/core/tools/utils/feishu_api_utils.py index 7bb026a383..44803d7d65 100644 --- a/api/core/tools/utils/feishu_api_utils.py +++ b/api/core/tools/utils/feishu_api_utils.py @@ -89,7 +89,7 @@ class FeishuRequest: "content": "云文档\n多人实时协同,插入一切元素。不仅是在线文档,更是强大的创作和互动工具\n云文档:专为协作而生\n" } } - """ + """ # noqa: E501 params = { "document_id": document_id, } diff --git a/api/core/tools/utils/message_transformer.py b/api/core/tools/utils/message_transformer.py index 1109ed7df2..bf040d91d3 100644 --- a/api/core/tools/utils/message_transformer.py +++ b/api/core/tools/utils/message_transformer.py @@ -43,7 +43,7 @@ class ToolFileMessageTransformer: result.append( ToolInvokeMessage( type=ToolInvokeMessage.MessageType.TEXT, - message=f"Failed to download image: {message.message}, you can try to download it yourself.", + message=f"Failed to download image: {message.message}, please try to download it manually.", meta=message.meta.copy() if message.meta is not None else {}, save_as=message.save_as, ) diff --git a/api/core/tools/utils/parser.py b/api/core/tools/utils/parser.py index 654c9acaf9..210b84b29a 100644 --- a/api/core/tools/utils/parser.py +++ b/api/core/tools/utils/parser.py @@ -315,7 +315,8 @@ class ApiBasedToolSchemaParser: yaml_error = e if loaded_content is None: raise ToolApiSchemaError( - f"Invalid api schema, schema is neither json nor yaml. json error: {str(json_error)}, yaml error: {str(yaml_error)}" + f"Invalid api schema, schema is neither json nor yaml. json error: {str(json_error)}," + f" yaml error: {str(yaml_error)}" ) swagger_error = None @@ -355,5 +356,6 @@ class ApiBasedToolSchemaParser: openapi_plugin_error = e raise ToolApiSchemaError( - f"Invalid api schema, openapi error: {str(openapi_error)}, swagger error: {str(swagger_error)}, openapi plugin error: {str(openapi_plugin_error)}" + f"Invalid api schema, openapi error: {str(openapi_error)}, swagger error: {str(swagger_error)}," + f" openapi plugin error: {str(openapi_plugin_error)}" ) diff --git a/api/core/tools/utils/web_reader_tool.py b/api/core/tools/utils/web_reader_tool.py index 3639b5fff7..fc2f63a241 100644 --- a/api/core/tools/utils/web_reader_tool.py +++ b/api/core/tools/utils/web_reader_tool.py @@ -38,7 +38,8 @@ def page_result(text: str, cursor: int, max_length: int) -> str: def get_url(url: str, user_agent: str = None) -> str: """Fetch URL and return the contents as a string.""" headers = { - "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36" + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)" + " Chrome/91.0.4472.124 Safari/537.36" } if user_agent: headers["User-Agent"] = user_agent diff --git a/api/core/workflow/nodes/code/code_node.py b/api/core/workflow/nodes/code/code_node.py index a07ba2f740..73164fff9a 100644 --- a/api/core/workflow/nodes/code/code_node.py +++ b/api/core/workflow/nodes/code/code_node.py @@ -179,7 +179,8 @@ class CodeNode(BaseNode): ) else: raise ValueError( - f"Output {prefix}.{output_name} is not a valid array. make sure all elements are of the same type." + f"Output {prefix}.{output_name} is not a valid array." + f" make sure all elements are of the same type." ) elif isinstance(output_value, type(None)): pass @@ -201,7 +202,8 @@ class CodeNode(BaseNode): transformed_result[output_name] = None else: raise ValueError( - f"Output {prefix}{dot}{output_name} is not an object, got {type(result.get(output_name))} instead." + f"Output {prefix}{dot}{output_name} is not an object," + f" got {type(result.get(output_name))} instead." ) else: transformed_result[output_name] = self._transform_result( @@ -228,7 +230,8 @@ class CodeNode(BaseNode): transformed_result[output_name] = None else: raise ValueError( - f"Output {prefix}{dot}{output_name} is not an array, got {type(result.get(output_name))} instead." + f"Output {prefix}{dot}{output_name} is not an array," + f" got {type(result.get(output_name))} instead." ) else: if len(result[output_name]) > dify_config.CODE_MAX_NUMBER_ARRAY_LENGTH: @@ -248,7 +251,8 @@ class CodeNode(BaseNode): transformed_result[output_name] = None else: raise ValueError( - f"Output {prefix}{dot}{output_name} is not an array, got {type(result.get(output_name))} instead." + f"Output {prefix}{dot}{output_name} is not an array," + f" got {type(result.get(output_name))} instead." ) else: if len(result[output_name]) > dify_config.CODE_MAX_STRING_ARRAY_LENGTH: @@ -268,7 +272,8 @@ class CodeNode(BaseNode): transformed_result[output_name] = None else: raise ValueError( - f"Output {prefix}{dot}{output_name} is not an array, got {type(result.get(output_name))} instead." + f"Output {prefix}{dot}{output_name} is not an array," + f" got {type(result.get(output_name))} instead." ) else: if len(result[output_name]) > dify_config.CODE_MAX_OBJECT_ARRAY_LENGTH: @@ -283,7 +288,8 @@ class CodeNode(BaseNode): pass else: raise ValueError( - f"Output {prefix}{dot}{output_name}[{i}] is not an object, got {type(value)} instead at index {i}." + f"Output {prefix}{dot}{output_name}[{i}] is not an object," + f" got {type(value)} instead at index {i}." ) transformed_result[output_name] = [ diff --git a/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py b/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py index 53e8be6415..af55688a52 100644 --- a/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py +++ b/api/core/workflow/nodes/knowledge_retrieval/knowledge_retrieval_node.py @@ -128,11 +128,12 @@ class KnowledgeRetrievalNode(BaseNode): weights = None elif node_data.multiple_retrieval_config.reranking_mode == "weighted_score": reranking_model = None + vector_setting = node_data.multiple_retrieval_config.weights.vector_setting weights = { "vector_setting": { - "vector_weight": node_data.multiple_retrieval_config.weights.vector_setting.vector_weight, - "embedding_provider_name": node_data.multiple_retrieval_config.weights.vector_setting.embedding_provider_name, - "embedding_model_name": node_data.multiple_retrieval_config.weights.vector_setting.embedding_model_name, + "vector_weight": vector_setting.vector_weight, + "embedding_provider_name": vector_setting.embedding_provider_name, + "embedding_model_name": vector_setting.embedding_model_name, }, "keyword_setting": { "keyword_weight": node_data.multiple_retrieval_config.weights.keyword_setting.keyword_weight diff --git a/api/core/workflow/nodes/parameter_extractor/prompts.py b/api/core/workflow/nodes/parameter_extractor/prompts.py index c63fded4d0..58fcecc53b 100644 --- a/api/core/workflow/nodes/parameter_extractor/prompts.py +++ b/api/core/workflow/nodes/parameter_extractor/prompts.py @@ -23,7 +23,7 @@ Steps: To illustrate, if the task involves extracting a user's name and their request, your function call might look like this: Ensure your output follows a similar structure to examples. ### Final Output Produce well-formatted function calls in json without XML tags, as shown in the example. -""" +""" # noqa: E501 FUNCTION_CALLING_EXTRACTOR_USER_TEMPLATE = f"""extract structured information from context inside XML tags by calling the function {FUNCTION_CALLING_EXTRACTOR_NAME} with the correct parameters with structure inside XML tags. @@ -33,7 +33,7 @@ FUNCTION_CALLING_EXTRACTOR_USER_TEMPLATE = f"""extract structured information fr \x7bstructure\x7d -""" +""" # noqa: E501 FUNCTION_CALLING_EXTRACTOR_EXAMPLE = [ { @@ -55,7 +55,8 @@ FUNCTION_CALLING_EXTRACTOR_EXAMPLE = [ }, }, "assistant": { - "text": "I need always call the function with the correct parameters. in this case, I need to call the function with the location parameter.", + "text": "I need always call the function with the correct parameters." + " in this case, I need to call the function with the location parameter.", "function_call": {"name": FUNCTION_CALLING_EXTRACTOR_NAME, "parameters": {"location": "San Francisco"}}, }, }, @@ -72,7 +73,8 @@ FUNCTION_CALLING_EXTRACTOR_EXAMPLE = [ }, }, "assistant": { - "text": "I need always call the function with the correct parameters. in this case, I need to call the function with the food parameter.", + "text": "I need always call the function with the correct parameters." + " in this case, I need to call the function with the food parameter.", "function_call": {"name": FUNCTION_CALLING_EXTRACTOR_NAME, "parameters": {"food": "apple pie"}}, }, }, @@ -117,7 +119,7 @@ Inside XML tags, there is a text that I should extract parameters ### Answer I should always output a valid JSON object. Output nothing other than the JSON object. ```JSON -""" +""" # noqa: E501 CHAT_GENERATE_JSON_PROMPT = """You should always follow the instructions and output a valid JSON object. The structure of the JSON object you can found in the instructions. diff --git a/api/core/workflow/nodes/question_classifier/template_prompts.py b/api/core/workflow/nodes/question_classifier/template_prompts.py index 581f986922..ce32b01aa4 100644 --- a/api/core/workflow/nodes/question_classifier/template_prompts.py +++ b/api/core/workflow/nodes/question_classifier/template_prompts.py @@ -12,13 +12,13 @@ QUESTION_CLASSIFIER_SYSTEM_PROMPT = """ {histories} -""" +""" # noqa: E501 QUESTION_CLASSIFIER_USER_PROMPT_1 = """ { "input_text": ["I recently had a great experience with your company. The service was prompt and the staff was very friendly."], "categories": [{"category_id":"f5660049-284f-41a7-b301-fd24176a711c","category_name":"Customer Service"},{"category_id":"8d007d06-f2c9-4be5-8ff6-cd4381c13c60","category_name":"Satisfaction"},{"category_id":"5fbbbb18-9843-466d-9b8e-b9bfbb9482c8","category_name":"Sales"},{"category_id":"23623c75-7184-4a2e-8226-466c2e4631e4","category_name":"Product"}], "classification_instructions": ["classify the text based on the feedback provided by customer"]} -""" +""" # noqa: E501 QUESTION_CLASSIFIER_ASSISTANT_PROMPT_1 = """ ```json @@ -32,7 +32,7 @@ QUESTION_CLASSIFIER_USER_PROMPT_2 = """ {"input_text": ["bad service, slow to bring the food"], "categories": [{"category_id":"80fb86a0-4454-4bf5-924c-f253fdd83c02","category_name":"Food Quality"},{"category_id":"f6ff5bc3-aca0-4e4a-8627-e760d0aca78f","category_name":"Experience"},{"category_id":"cc771f63-74e7-4c61-882e-3eda9d8ba5d7","category_name":"Price"}], "classification_instructions": []} -""" +""" # noqa: E501 QUESTION_CLASSIFIER_ASSISTANT_PROMPT_2 = """ ```json @@ -73,4 +73,4 @@ Here is the chat histories between human and assistant, inside " + return ( + f"" + ) @property def token_is_set(self): diff --git a/api/models/tools.py b/api/models/tools.py index 6b69a219b1..861066a2d5 100644 --- a/api/models/tools.py +++ b/api/models/tools.py @@ -62,7 +62,8 @@ class PublishedAppTool(db.Model): description = db.Column(db.Text, nullable=False) # llm_description of the tool, for LLM llm_description = db.Column(db.Text, nullable=False) - # query description, query will be seem as a parameter of the tool, to describe this parameter to llm, we need this field + # query description, query will be seem as a parameter of the tool, + # to describe this parameter to llm, we need this field query_description = db.Column(db.Text, nullable=False) # query name, the name of the query parameter query_name = db.Column(db.String(40), nullable=False) diff --git a/api/models/workflow.py b/api/models/workflow.py index d52749f0ff..9c93ea4cea 100644 --- a/api/models/workflow.py +++ b/api/models/workflow.py @@ -246,7 +246,8 @@ class Workflow(db.Model): if any(var for var in value if not var.id): raise ValueError("environment variable require a unique id") - # Compare inputs and origin variables, if the value is HIDDEN_VALUE, use the origin variable value (only update `name`). + # Compare inputs and origin variables, + # if the value is HIDDEN_VALUE, use the origin variable value (only update `name`). origin_variables_dictionary = {var.id: var for var in self.environment_variables} for i, variable in enumerate(value): if variable.id in origin_variables_dictionary and variable.value == HIDDEN_VALUE: diff --git a/api/poetry.lock b/api/poetry.lock index 103423e5c7..6023f98e2a 100644 --- a/api/poetry.lock +++ b/api/poetry.lock @@ -10388,4 +10388,4 @@ cffi = ["cffi (>=1.11)"] [metadata] lock-version = "2.0" python-versions = ">=3.10,<3.13" -content-hash = "2dbff415c3c9ca95c8dcfb59fc088ce2c0d00037c44f386a34c87c98e1d8b942" +content-hash = "8179c7e3f91b5a00054e26297040b1969f59b37cb9a707fbaa9c2ea419954718" diff --git a/api/pyproject.toml b/api/pyproject.toml index 3d100ebc58..616794cf3a 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -27,7 +27,6 @@ select = [ "W605", # invalid-escape-sequence ] ignore = [ - "E501", # line-too-long "E402", # module-import-not-at-top-of-file "E711", # none-comparison "E712", # true-false-comparison @@ -68,16 +67,19 @@ ignore = [ "F401", # unused-import "F811", # redefined-while-unused ] -"tests/*" = [ - "F401", # unused-import - "F811", # redefined-while-unused -] "configs/*" = [ "N802", # invalid-function-name ] "libs/gmpy2_pkcs10aep_cipher.py" = [ "N803", # invalid-argument-name ] +"migrations/versions/*" = [ + "E501", # line-too-long +] +"tests/*" = [ + "F401", # unused-import + "F811", # redefined-while-unused +] [tool.ruff.format] exclude = [ @@ -270,4 +272,4 @@ optional = true [tool.poetry.group.lint.dependencies] dotenv-linter = "~0.5.0" -ruff = "~0.6.1" +ruff = "~0.6.4" diff --git a/api/services/tools/api_tools_manage_service.py b/api/services/tools/api_tools_manage_service.py index 3ded9c0989..6f6074f596 100644 --- a/api/services/tools/api_tools_manage_service.py +++ b/api/services/tools/api_tools_manage_service.py @@ -176,7 +176,8 @@ class ApiToolManageService: get api tool provider remote schema """ headers = { - "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0", + "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko)" + " Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0", "Accept": "*/*", } diff --git a/api/tests/integration_tests/model_runtime/__mock/openai_embeddings.py b/api/tests/integration_tests/model_runtime/__mock/openai_embeddings.py index 4138cdd40d..025913cb17 100644 --- a/api/tests/integration_tests/model_runtime/__mock/openai_embeddings.py +++ b/api/tests/integration_tests/model_runtime/__mock/openai_embeddings.py @@ -40,7 +40,7 @@ class MockEmbeddingsClass: usage=Usage(prompt_tokens=2, total_tokens=2), ) - embeddings = "VEfNvMLUnrwFleO8hcj9vEE/yrzyjOA84E1MvNfoCrxjrI+8sZUKvNgrBT17uY07gJ/IvNvhHLrUemc8KXXGumalIT3YKwU7ZsnbPMhATrwTt6u8JEwRPNMmCjxGREW7TRKvu6/MG7zAyDU8wXLkuuMDZDsXsL28zHzaOw0IArzOiMO8LtASvPKM4Dul5l+80V0bPGVDZ7wYNrI89ucsvJZdYztzRm+8P8ysOyGbc7zrdgK9sdiEPKQ8sbulKdq7KIgdvKIMDj25dNc8k0AXPBn/oLzrdgK8IXe5uz0Dvrt50V68tTjLO4ZOcjoG9x29oGfZufiwmzwMDXy8EL6ZPHvdx7nKjzE8+LCbPG22hTs3EZq7TM+0POrRzTxVZo084wPkO8Nak7z8cpw8pDwxvA2T8LvBC7C72fltvC8Atjp3fYE8JHDLvEYgC7xAdls8YiabPPkEeTzPUbK8gOLCPEBSIbyt5Oy8CpreusNakzywUhA824vLPHRlr7zAhTs7IZtzvHd9AT2xY/O6ok8IvOihqrql5l88K4EvuknWorvYKwW9iXkbvGMTRLw5qPG7onPCPLgNIzwAbK67ftbZPMxYILvAyDW9TLB0vIid1buzCKi7u+d0u8iDSLxNVam8PZyJPNxnETvVANw8Oi5mu9nVszzl65I7DIKNvLGVirxsMJE7tPXQu2PvCT1zRm87p1l9uyRMkbsdfqe8U52ePHRlr7wt9Mw8/C8ivTu02rwJFGq8tpoFPWnC7blWumq7sfy+vG1zCzy9Nlg8iv+PuvxT3DuLU228kVhoOkmTqDrv1kg8ocmTu1WpBzsKml48DzglvI8ECzxwTd27I+pWvIWkQ7xUR007GqlPPBFEDrzGECu865q8PI7BkDwNxYc8tgG6ullMSLsIajs84lk1PNLjD70mv648ZmInO2tnIjzvb5Q8o5KCPLo9xrwKMyq9QqGEvI8ECzxO2508ATUdPRAlTry5kxc8KVGMPJyBHjxIUC476KGqvIU9DzwX87c88PUIParrWrzdlzS/G3K+uzEw2TxB2BU86AhfPAMiRj2dK808a85WPPCft7xU4Bg95Q9NPDxZjzwrpek7yNkZvHa0EjyQ0nM6Nq9fuyjvUbsRq8I7CAMHO3VSWLyuauE7U1qkvPkEeTxs7ZY7B6FMO48Eizy75/S7ieBPvB07rTxmyVu8onPCO5rc6Tu7XIa7oEMfPYngT7u24vk7/+W5PE8eGDxJ1iI9t4cuvBGHiLyH1GY7jfghu+oUSDwa7Mk7iXmbuut2grrq8I2563v8uyofdTxRTrs44lm1vMeWnzukf6s7r4khvEKhhDyhyZO8G5Z4Oy56wTz4sBs81Zknuz3fg7wnJuO74n1vvASEADu98128gUl3vBtyvrtZCU47yep8u5FYaDx2G0e8a85WO5cmUjz3kds8qgqbPCUaerx50d67WKIZPI7BkDua3Om74vKAvL3zXbzXpRA9CI51vLo9xryKzXg7tXtFO9RWLTwnJuM854LqPEIs8zuO5cq8d8V1u9P0cjrQ++C8cGwdPDdUlLoOGeW8auEtu8Z337nlzFK8aRg/vFCkDD0nRSM879bIvKUFID1iStU8EL6ZvLufgLtKgNE7KVEMvJOnSzwahRU895HbvJiIjLvc8n88bmC0PPLP2rywM9C7jTscOoS3mjy/Znu7dhvHuu5Q1Dyq61o6CI71u09hkry0jhw8gb6IPI8EC7uoVAM8gs9rvGM3fjx2G8e81FYtu/ojubyYRRK72Riuu83elDtNNmk70/TyuzUFsbvgKZI7onNCvAehzLumr8679R6+urr6SztX2So8Bl5SOwSEgLv5NpA8LwC2PGPvibzJ6vw7H2tQvOtXwrzXpRC8j0z/uxwcbTy2vr+8VWYNu+t2ArwKmt68NKN2O3XrIzw9A747UU47vaavzjwU+qW8YBqyvE02aTyEt5o8cCmjOxtyPrxs7ZY775NOu+SJWLxMJQY8/bWWu6IMDrzSSsQ7GSPbPLlQnbpVzcE7Pka4PJ96sLycxJg8v/9GPO2HZTyeW3C8Vpawtx2iYTwWBg87/qI/OviwGzxyWcY7M9WNPIA4FD32C2e8tNGWPJ43trxCoYS8FGHavItTbbu7n4C80NemPLm30Ty1OMu7vG1pvG3aPztBP0o75Q/NPJhFEj2V9i683PL/O97+aLz6iu27cdPRum/mKLwvVgc89fqDu3LA+jvm2Ls8mVZ1PIuFBD3ZGK47Cpreut7+aLziWTU8XSEgPMvSKzzO73e5040+vBlmVTxS1K+8mQ4BPZZ8o7w8FpW6OR0DPSSPCz21Vwu99fqDOjMYiDy7XAY8oYaZO+aVwTyX49c84OaXOqdZfTunEQk7B8AMvMDs7zo/D6e8OP5CvN9gIzwNCII8FefOPE026TpzIjU8XsvOO+J9b7rkIiQ8is34O+e0AbxBpv67hcj9uiPq1jtCoQQ8JfY/u86nAz0Wkf28LnrBPJlW9Tt8P4K7BbSjO9grhbyAOJS8G3K+vJLe3LzXpZA7NQUxPJs+JDz6vAS8QHZbvYNVYDrj3yk88PWIPOJ97zuSIVc8ZUPnPMqPsbx2cZi7QfzPOxYGDz2hqtO6H2tQO543NjyFPY+7JRUAOt0wgDyJeZu8MpKTu6AApTtg1ze82JI5vKllZjvrV0I7HX6nu7vndDxg1ze8jwQLu1ZTNjuJvBU7BXGpvAP+C7xJk6g8j2u/vBABlLzlqBi8M9WNutRWLTx0zGM9sHbKPLoZDDtmyVu8tpqFOvPumjyuRqe87lBUvFU0drxs7Za8ejMZOzJPGbyC7qu863v8PDPVjTxJ1iI7Ca01PLuAQLuNHFy7At9LOwP+i7tYxlO80NemO9elkDx45LU8h9TmuzxZjzz/5bk8p84OurvndLwAkGi7XL9luCSzRTwMgg08vrxMPKIwyDwdomG8K6VpPGPvCTxkmTi7M/lHPGxUSzxwKSM8wQuwvOqtkzrLFSa8SbdivAMixjw2r9+7xWt2vAyCDT1NEi87B8CMvG1zi7xpwm27MrbNO9R6Z7xJt+K7jNnhu9ZiFrve/ug55CKkvCwHJLqsOr47+ortvPwvIr2v8NW8YmmVOE+FTLywUhA8MTBZvMiDyLtx8hG8OEE9vMDsbzroCF88DelBOobnPbx+b6U8sbnEOywr3ro93wO9dMzjup2xwbwnRaO7cRZMu8Z337vS44+7VpYwvFWphzxKgNE8L1aHPLPFLbunzo66zFggPN+jHbs7tFo8nW7HO9JKRLyoeD28Fm1DPGZip7u5dNe7KMsXvFnlkzxQpAw7MrZNPHpX0zwSyoK7ayQovPR0Dz3gClK8/juLPDjaCLvqrZO7a4vcO9HEzzvife88KKzXvDmocbwpMkw7t2huvaIMjjznguo7Gy/EOzxZjzoLuZ48qi5VvCjLFzuDmNo654LquyrXgDy7XAa8e7mNvJ7QAb0Rq8K7ojBIvBN0MTuOfha8GoUVveb89bxMsHS8jV9WPPKM4LyAOJS8me9AvZv7qbsbcr47tuL5uaXmXzweKNa7rkYnPINV4Lxcv+W8tVcLvI8oxbzvbxS7oYaZu9+jHT0cHO08c7uAPCSzRTywUhA85xu2u+wBcTuJvJU8PBYVusTghzsnAim8acJtPFQE0zzFIwI9C7meO1DIRry7XAY8MKpkPJZd47suN0e5JTm6u6BDn7zfx1e8AJDoOr9CQbwaQps7x/1TPLTRFryqLtU8JybjPIXI/Tz6I7k6mVb1PMWKNryd1fs8Ok0mPHt2kzy9Ep48TTZpvPS3ibwGOpi8Ns4fPBqFlbr3Kqc8+QR5vHLA+rt7uY289YXyPI6iULxL4gu8Tv/XuycCKbwCnFG8C7kevVG1b7zIXw68GoWVO4rNeDnrM4i8MxgIPUNLs7zSoJW86ScfO+rRzbs6Cqw8NxGautP0cjw0wjY8CGq7vAkU6rxKgNG5+uA+vJXXbrwKM6o86vCNOu+yjjoQAZS8xATCOQVxKbynzo68wxcZvMhATjzS4488ArsRvNEaobwRh4i7t4euvAvd2DwnAik8UtQvvBFEDrz4sJs79gtnvOknnzy+vEy8D3sfPLH8vjzmLo28KVGMvOtXwjvpapm8HBxtPH3K8Lu753Q8/l9FvLvn9DomoG48fET8u9zy/7wMpke8zmQJu3oU2TzlD828KteAPAwNfLu+mBI5ldduPNZDVjq+vEy8eEvqvDHJpLwUPaC6qi7VPABsLjwFcSm72sJcu+bYO7v41NW8RiALvYB7DjzL0is7qLs3us1FSbzaf2K8MnNTuxABFDzF8Wo838fXvOBNzDzre3w8afQEvQE1nbulBaC78zEVvG5B9LzH/VM82Riuuwu5nrwsByQ8Y6yPvHXro7yQ0nM8nStNPJkyOzwnJmM80m7+O1VmjTzqrZM8dhvHOyAQBbz3baG8KTJMPOlqmbxsVEs8Pq3suy56QbzUVq08X3CDvAE1nTwUHuA7hue9vF8tCbvwOAO6F7A9ugd9kryqLtW7auEtu9ONPryPa7+8o9r2O570OzyFpEO8ntCBPOqtk7sykhO7lC1AOw2TcLswhiq6vx4HvP5fRbwuesG7Mk8ZvA4Z5TlfcAM9DrIwPL//xrzMm5q8JEwRPHBsnbxL4gu8jyjFu99gozrkZZ483GeRPLuAwDuYiIw8iv8PvK5Gpzx+b6W87Yflu3NGbzyE+hQ8a4tcPItT7bsoy5e8L1YHvWQyBDwrga86kPEzvBQ9oDxtl0W8lwKYvGpIYrxQ5wY8AJDovOLyALyw3f489JjJvMdTpTkKMyo8V9mqvH3K8LpyNYy8JHDLOixu2LpQ54Y8Q0uzu8LUnrs0wrY84vIAveihqjwfihA8DIKNvLDd/jywM1C7FB7gOxsLirxAUqE7sulnvH3K8DkAkGg8jsGQvO+TzrynWf287CCxvK4Drbwg8UQ8JRr6vFEqAbskjwu76q2TPNP0cjopDhK8dVJYvFIXKrxLn5G8AK8oPAb3HbxbOXE8Bvedun5Q5ThHyjk8QdiVvBXDlLw0o/Y7aLGKupkOgTxKPdc81kNWPtUAXLxUR827X1FDPf47izxsEVE8akhiPIhaWzxYX5+7hT0PPSrXgLxQC0E8i4WEvKUp2jtCLHM8DcWHO768zLxnK5a89R6+vH9czrorpem73h0pvAnwr7yKzXi8gDgUPf47Czq9zyO8728UOf34EDy6PUY76OSkvKZIGr2ZDgE8gzEmPG3av7v77Ce7/oP/O3MiNTtas/w8x1OlO/D1CDvDfs27ll1jO2Ufrbv1hXK8WINZuxN0sbuxlYq8OYS3uia/rjyiTwi9O7TaO+/WyDyiDA49E7erO3fF9bj6I7k7qHi9O3SoKbyBSfc7drSSvGPvCT2pQay7t2huPGnC7byUCQY8CEaBu6rHoDhx8hE8/fgQvCjLl7zdeHS8x/3TO0Isc7tas3y8jwQLvUKhhDz+foU8fCDCPC+ZgTywD5Y7ZR8tOla66rtCCLm8gWg3vDoKrLxbWDE76SefPBkj2zrlqJi7pebfuv6Df7zWQ9a7lHA6PGDXtzzMv1Q8mtxpOwJ4lzxKGZ28mGnMPDw6z7yxY/O7m2Leu7juYjwvVge8zFigPGpIYjtWumo5xs2wOgyCjbxrZ6K8bbaFvKzTCbsks8W7C7mePIU9DzxQyEY8posUvAW0ozrHlh88CyBTPJRwursxySQ757SBuqcRCbwNCIK8EL6ZvIG+iLsIRgE8rF74vOJZtbuUcDq8r/DVPMpMt7sL3Vi8eWqquww/kzqj2vY5auGtu85kiTwMPxM66KGqvBIxNzuwUpA8v2b7u09C0rx7ms08NUirvFYQPLxKPdc68mimvP5fRTtoPPm7XuqOOgOJ+jxfLYm7u58AvXz8B72PR4W6ldfuuys+tbvYKwW7pkiaPLB2SjvKj7G875POvA6yML7qFEg9Eu68O6Up2rz77Kc84CmSPP6ivzz4sJu6/C+iOaUpWjwq14A84E3MOYB7Dr2d1Xu775NOvC6e+7spUYw8PzPhO5TGizt29ww9yNkZPY7lyrz020M7QRsQu3z8BzwkCZe79YXyO8jZmTzvGUM8HgQcO9kYrrzxBmy8hLeaPLYBOjz+oj88flBlO6GqUzuiMMi8fxlUvCr7ujz41NU8DA38PBeMAzx7uY28TTZpvFG1bzxtc4s89ucsPEereTwfipC82p4iPKtNFbzo5KQ7pcKlOW5gtDzO73c7B6FMOzRbgjxCXoo8v0JBOSl1RrwxDJ+7XWSaPD3Aw7sOsjA8tuJ5vKw6Pry5k5c8ZUNnvG/H6DyVTAA8Shkdvd7+aDvtpiW9qUGsPFTgmDwbcr68TTbpO1DnhryNX9a7mrivvIqpPjxsqhy81HrnOzv31Dvth+U6UtQvPBz4MrvtpqW84OYXvRz4sjxwkFe8zSGPuycCqbyFPY8818nKOw84JTy8bWk8USqBvBGHiLtosQo8BOs0u9skl7xQ54Y8uvrLPOknn7w705o8Jny0PAd9EjxhoKa8Iv2tu2M3/jtsVEs8DcUHPQSEADs3eE48GkKbupRR+rvdeHQ7Xy2JvO1jKz0xMFm8sWPzux07LbyrTZW7bdq/O6Pa9r0ahRW9CyDTOjSjdjyQ8bO8yaIIPfupLTz/CfQ7xndfvJs+JD0zPEK8KO/RvMpw8bwObzY7fm+lPJtiXrz5BHm8WmsIvKlBrLuDdKA7hWHJOgd9Ers0o/Y7nlvwu5NAl7u8BrW6utYRO2SZuDxyNYw8CppevAY6GDxVqQe9oGdZPFa6ary3RLS70NcmO2PQSb36ZrM86q2TPML42LwewaE8k2RRPDmocTsi/S29o/k2PHRlr7zjnC+8gHsOPUpcFzxtl8W6tuL5vHw/gry/2wy9yaIIvINV4Dx3fQG7ISFoPO7pnzwGXlK8HPiyPGAaMjzBC7A7MQyfu+eC6jyV1+67pDyxvBWkVLxrJKg754LqOScCKbwpUQy8KIgdOJDSc7zDfk08tLLWvNZDVjyh7c28ShmdvMnlgjs2NdS8ISHovP5+hbxGIIs8ayQouyKnXDzBcmS6zw44u86IQ7yl5l+7cngGvWvOVrsEhIC7yNkZPJODkbuAn0g8XN6lPOaVwbuTgxG8OR2DPAb3HTzlqJi8nUoNvCAVf73Mmxo9afSEu4FotzveHSk8c0ZvOMFOqjwP9Sq87iwavIEBg7xIUK68IbozuozZ4btg17c7vx4Hvarr2rtp9IQ8Rt0QO+1jqzyeNzY8kNLzO8sVpry98108OCL9uyisV7vhr4Y8FgaPvLFjczw42og8gWg3vPX6gzsNk/C83GeRPCUVgDy0jpw7yNkZu2VD5zvh93o81h+cuw3Fhzyl5t+86Y7TvHa0EjyzCCi7WmsIPIy1Jzy00Ra6NUiru50rTTx50d47/HKcO2wwETw0f7y8sFIQvNxnkbzS4w855pVBu9FdGzx9yvC6TM80vFQjkzy/Zvs7BhtYPLjKKLqPa787A/6LOyiInbzooSq8728UPIFJ97wq+7q8R6v5u1tYMbwdomG6iSPKPAb3HTx3oTu7fGO8POqtk7ze/ug84wNkPMnq/DsB8iK9ogwOu6lBrDznguo8NQUxvHKcwDo28tm7yNmZPN1UurxCoYS80m7+Oy+9OzzGzTC836MdvCDNCrtaawi7dVLYPEfKuTxzRm88cCmjOyXSBbwGOpi879ZIO8dTJbtqnrO8NMI2vR1+J7xwTV087umfPFG17zsC30s8oYaZPKllZrzZGK47zss9vP21FryZywa9bbYFPVNapDt2G0e7E3SxPMUjgry5dNc895Hbu0H8z7ueN7a7OccxPFhfH7vC1B48n3owvEhQLrzu6Z+8HTutvEBSITw6Taa5g1XgPCzEqbxfLYk9OYQ3vBlm1bvPUTI8wIU7PIy1pzyFyP07gzGmO3NGb7yS3ty7O5CguyEhaLyWoF28pmxUOaZImrz+g/87mnU1vFbsgTxvo668PFmPO2KNTzy09VC8LG5YPHhL6rsvJPC7kTQuvEGCxDlhB9s6u58AvfCAd7z0t4k7kVjoOCkOkrxMjDq8iPOmPL0SnrxsMJG7OEG9vCUa+rvx4rE7cpxAPDCGqjukf6u8TEnAvNn57TweBBw7JdKFvIy1p7vIg8i7" + embeddings = "VEfNvMLUnrwFleO8hcj9vEE/yrzyjOA84E1MvNfoCrxjrI+8sZUKvNgrBT17uY07gJ/IvNvhHLrUemc8KXXGumalIT3YKwU7ZsnbPMhATrwTt6u8JEwRPNMmCjxGREW7TRKvu6/MG7zAyDU8wXLkuuMDZDsXsL28zHzaOw0IArzOiMO8LtASvPKM4Dul5l+80V0bPGVDZ7wYNrI89ucsvJZdYztzRm+8P8ysOyGbc7zrdgK9sdiEPKQ8sbulKdq7KIgdvKIMDj25dNc8k0AXPBn/oLzrdgK8IXe5uz0Dvrt50V68tTjLO4ZOcjoG9x29oGfZufiwmzwMDXy8EL6ZPHvdx7nKjzE8+LCbPG22hTs3EZq7TM+0POrRzTxVZo084wPkO8Nak7z8cpw8pDwxvA2T8LvBC7C72fltvC8Atjp3fYE8JHDLvEYgC7xAdls8YiabPPkEeTzPUbK8gOLCPEBSIbyt5Oy8CpreusNakzywUhA824vLPHRlr7zAhTs7IZtzvHd9AT2xY/O6ok8IvOihqrql5l88K4EvuknWorvYKwW9iXkbvGMTRLw5qPG7onPCPLgNIzwAbK67ftbZPMxYILvAyDW9TLB0vIid1buzCKi7u+d0u8iDSLxNVam8PZyJPNxnETvVANw8Oi5mu9nVszzl65I7DIKNvLGVirxsMJE7tPXQu2PvCT1zRm87p1l9uyRMkbsdfqe8U52ePHRlr7wt9Mw8/C8ivTu02rwJFGq8tpoFPWnC7blWumq7sfy+vG1zCzy9Nlg8iv+PuvxT3DuLU228kVhoOkmTqDrv1kg8ocmTu1WpBzsKml48DzglvI8ECzxwTd27I+pWvIWkQ7xUR007GqlPPBFEDrzGECu865q8PI7BkDwNxYc8tgG6ullMSLsIajs84lk1PNLjD70mv648ZmInO2tnIjzvb5Q8o5KCPLo9xrwKMyq9QqGEvI8ECzxO2508ATUdPRAlTry5kxc8KVGMPJyBHjxIUC476KGqvIU9DzwX87c88PUIParrWrzdlzS/G3K+uzEw2TxB2BU86AhfPAMiRj2dK808a85WPPCft7xU4Bg95Q9NPDxZjzwrpek7yNkZvHa0EjyQ0nM6Nq9fuyjvUbsRq8I7CAMHO3VSWLyuauE7U1qkvPkEeTxs7ZY7B6FMO48Eizy75/S7ieBPvB07rTxmyVu8onPCO5rc6Tu7XIa7oEMfPYngT7u24vk7/+W5PE8eGDxJ1iI9t4cuvBGHiLyH1GY7jfghu+oUSDwa7Mk7iXmbuut2grrq8I2563v8uyofdTxRTrs44lm1vMeWnzukf6s7r4khvEKhhDyhyZO8G5Z4Oy56wTz4sBs81Zknuz3fg7wnJuO74n1vvASEADu98128gUl3vBtyvrtZCU47yep8u5FYaDx2G0e8a85WO5cmUjz3kds8qgqbPCUaerx50d67WKIZPI7BkDua3Om74vKAvL3zXbzXpRA9CI51vLo9xryKzXg7tXtFO9RWLTwnJuM854LqPEIs8zuO5cq8d8V1u9P0cjrQ++C8cGwdPDdUlLoOGeW8auEtu8Z337nlzFK8aRg/vFCkDD0nRSM879bIvKUFID1iStU8EL6ZvLufgLtKgNE7KVEMvJOnSzwahRU895HbvJiIjLvc8n88bmC0PPLP2rywM9C7jTscOoS3mjy/Znu7dhvHuu5Q1Dyq61o6CI71u09hkry0jhw8gb6IPI8EC7uoVAM8gs9rvGM3fjx2G8e81FYtu/ojubyYRRK72Riuu83elDtNNmk70/TyuzUFsbvgKZI7onNCvAehzLumr8679R6+urr6SztX2So8Bl5SOwSEgLv5NpA8LwC2PGPvibzJ6vw7H2tQvOtXwrzXpRC8j0z/uxwcbTy2vr+8VWYNu+t2ArwKmt68NKN2O3XrIzw9A747UU47vaavzjwU+qW8YBqyvE02aTyEt5o8cCmjOxtyPrxs7ZY775NOu+SJWLxMJQY8/bWWu6IMDrzSSsQ7GSPbPLlQnbpVzcE7Pka4PJ96sLycxJg8v/9GPO2HZTyeW3C8Vpawtx2iYTwWBg87/qI/OviwGzxyWcY7M9WNPIA4FD32C2e8tNGWPJ43trxCoYS8FGHavItTbbu7n4C80NemPLm30Ty1OMu7vG1pvG3aPztBP0o75Q/NPJhFEj2V9i683PL/O97+aLz6iu27cdPRum/mKLwvVgc89fqDu3LA+jvm2Ls8mVZ1PIuFBD3ZGK47Cpreut7+aLziWTU8XSEgPMvSKzzO73e5040+vBlmVTxS1K+8mQ4BPZZ8o7w8FpW6OR0DPSSPCz21Vwu99fqDOjMYiDy7XAY8oYaZO+aVwTyX49c84OaXOqdZfTunEQk7B8AMvMDs7zo/D6e8OP5CvN9gIzwNCII8FefOPE026TpzIjU8XsvOO+J9b7rkIiQ8is34O+e0AbxBpv67hcj9uiPq1jtCoQQ8JfY/u86nAz0Wkf28LnrBPJlW9Tt8P4K7BbSjO9grhbyAOJS8G3K+vJLe3LzXpZA7NQUxPJs+JDz6vAS8QHZbvYNVYDrj3yk88PWIPOJ97zuSIVc8ZUPnPMqPsbx2cZi7QfzPOxYGDz2hqtO6H2tQO543NjyFPY+7JRUAOt0wgDyJeZu8MpKTu6AApTtg1ze82JI5vKllZjvrV0I7HX6nu7vndDxg1ze8jwQLu1ZTNjuJvBU7BXGpvAP+C7xJk6g8j2u/vBABlLzlqBi8M9WNutRWLTx0zGM9sHbKPLoZDDtmyVu8tpqFOvPumjyuRqe87lBUvFU0drxs7Za8ejMZOzJPGbyC7qu863v8PDPVjTxJ1iI7Ca01PLuAQLuNHFy7At9LOwP+i7tYxlO80NemO9elkDx45LU8h9TmuzxZjzz/5bk8p84OurvndLwAkGi7XL9luCSzRTwMgg08vrxMPKIwyDwdomG8K6VpPGPvCTxkmTi7M/lHPGxUSzxwKSM8wQuwvOqtkzrLFSa8SbdivAMixjw2r9+7xWt2vAyCDT1NEi87B8CMvG1zi7xpwm27MrbNO9R6Z7xJt+K7jNnhu9ZiFrve/ug55CKkvCwHJLqsOr47+ortvPwvIr2v8NW8YmmVOE+FTLywUhA8MTBZvMiDyLtx8hG8OEE9vMDsbzroCF88DelBOobnPbx+b6U8sbnEOywr3ro93wO9dMzjup2xwbwnRaO7cRZMu8Z337vS44+7VpYwvFWphzxKgNE8L1aHPLPFLbunzo66zFggPN+jHbs7tFo8nW7HO9JKRLyoeD28Fm1DPGZip7u5dNe7KMsXvFnlkzxQpAw7MrZNPHpX0zwSyoK7ayQovPR0Dz3gClK8/juLPDjaCLvqrZO7a4vcO9HEzzvife88KKzXvDmocbwpMkw7t2huvaIMjjznguo7Gy/EOzxZjzoLuZ48qi5VvCjLFzuDmNo654LquyrXgDy7XAa8e7mNvJ7QAb0Rq8K7ojBIvBN0MTuOfha8GoUVveb89bxMsHS8jV9WPPKM4LyAOJS8me9AvZv7qbsbcr47tuL5uaXmXzweKNa7rkYnPINV4Lxcv+W8tVcLvI8oxbzvbxS7oYaZu9+jHT0cHO08c7uAPCSzRTywUhA85xu2u+wBcTuJvJU8PBYVusTghzsnAim8acJtPFQE0zzFIwI9C7meO1DIRry7XAY8MKpkPJZd47suN0e5JTm6u6BDn7zfx1e8AJDoOr9CQbwaQps7x/1TPLTRFryqLtU8JybjPIXI/Tz6I7k6mVb1PMWKNryd1fs8Ok0mPHt2kzy9Ep48TTZpvPS3ibwGOpi8Ns4fPBqFlbr3Kqc8+QR5vHLA+rt7uY289YXyPI6iULxL4gu8Tv/XuycCKbwCnFG8C7kevVG1b7zIXw68GoWVO4rNeDnrM4i8MxgIPUNLs7zSoJW86ScfO+rRzbs6Cqw8NxGautP0cjw0wjY8CGq7vAkU6rxKgNG5+uA+vJXXbrwKM6o86vCNOu+yjjoQAZS8xATCOQVxKbynzo68wxcZvMhATjzS4488ArsRvNEaobwRh4i7t4euvAvd2DwnAik8UtQvvBFEDrz4sJs79gtnvOknnzy+vEy8D3sfPLH8vjzmLo28KVGMvOtXwjvpapm8HBxtPH3K8Lu753Q8/l9FvLvn9DomoG48fET8u9zy/7wMpke8zmQJu3oU2TzlD828KteAPAwNfLu+mBI5ldduPNZDVjq+vEy8eEvqvDHJpLwUPaC6qi7VPABsLjwFcSm72sJcu+bYO7v41NW8RiALvYB7DjzL0is7qLs3us1FSbzaf2K8MnNTuxABFDzF8Wo838fXvOBNzDzre3w8afQEvQE1nbulBaC78zEVvG5B9LzH/VM82Riuuwu5nrwsByQ8Y6yPvHXro7yQ0nM8nStNPJkyOzwnJmM80m7+O1VmjTzqrZM8dhvHOyAQBbz3baG8KTJMPOlqmbxsVEs8Pq3suy56QbzUVq08X3CDvAE1nTwUHuA7hue9vF8tCbvwOAO6F7A9ugd9kryqLtW7auEtu9ONPryPa7+8o9r2O570OzyFpEO8ntCBPOqtk7sykhO7lC1AOw2TcLswhiq6vx4HvP5fRbwuesG7Mk8ZvA4Z5TlfcAM9DrIwPL//xrzMm5q8JEwRPHBsnbxL4gu8jyjFu99gozrkZZ483GeRPLuAwDuYiIw8iv8PvK5Gpzx+b6W87Yflu3NGbzyE+hQ8a4tcPItT7bsoy5e8L1YHvWQyBDwrga86kPEzvBQ9oDxtl0W8lwKYvGpIYrxQ5wY8AJDovOLyALyw3f489JjJvMdTpTkKMyo8V9mqvH3K8LpyNYy8JHDLOixu2LpQ54Y8Q0uzu8LUnrs0wrY84vIAveihqjwfihA8DIKNvLDd/jywM1C7FB7gOxsLirxAUqE7sulnvH3K8DkAkGg8jsGQvO+TzrynWf287CCxvK4Drbwg8UQ8JRr6vFEqAbskjwu76q2TPNP0cjopDhK8dVJYvFIXKrxLn5G8AK8oPAb3HbxbOXE8Bvedun5Q5ThHyjk8QdiVvBXDlLw0o/Y7aLGKupkOgTxKPdc81kNWPtUAXLxUR827X1FDPf47izxsEVE8akhiPIhaWzxYX5+7hT0PPSrXgLxQC0E8i4WEvKUp2jtCLHM8DcWHO768zLxnK5a89R6+vH9czrorpem73h0pvAnwr7yKzXi8gDgUPf47Czq9zyO8728UOf34EDy6PUY76OSkvKZIGr2ZDgE8gzEmPG3av7v77Ce7/oP/O3MiNTtas/w8x1OlO/D1CDvDfs27ll1jO2Ufrbv1hXK8WINZuxN0sbuxlYq8OYS3uia/rjyiTwi9O7TaO+/WyDyiDA49E7erO3fF9bj6I7k7qHi9O3SoKbyBSfc7drSSvGPvCT2pQay7t2huPGnC7byUCQY8CEaBu6rHoDhx8hE8/fgQvCjLl7zdeHS8x/3TO0Isc7tas3y8jwQLvUKhhDz+foU8fCDCPC+ZgTywD5Y7ZR8tOla66rtCCLm8gWg3vDoKrLxbWDE76SefPBkj2zrlqJi7pebfuv6Df7zWQ9a7lHA6PGDXtzzMv1Q8mtxpOwJ4lzxKGZ28mGnMPDw6z7yxY/O7m2Leu7juYjwvVge8zFigPGpIYjtWumo5xs2wOgyCjbxrZ6K8bbaFvKzTCbsks8W7C7mePIU9DzxQyEY8posUvAW0ozrHlh88CyBTPJRwursxySQ757SBuqcRCbwNCIK8EL6ZvIG+iLsIRgE8rF74vOJZtbuUcDq8r/DVPMpMt7sL3Vi8eWqquww/kzqj2vY5auGtu85kiTwMPxM66KGqvBIxNzuwUpA8v2b7u09C0rx7ms08NUirvFYQPLxKPdc68mimvP5fRTtoPPm7XuqOOgOJ+jxfLYm7u58AvXz8B72PR4W6ldfuuys+tbvYKwW7pkiaPLB2SjvKj7G875POvA6yML7qFEg9Eu68O6Up2rz77Kc84CmSPP6ivzz4sJu6/C+iOaUpWjwq14A84E3MOYB7Dr2d1Xu775NOvC6e+7spUYw8PzPhO5TGizt29ww9yNkZPY7lyrz020M7QRsQu3z8BzwkCZe79YXyO8jZmTzvGUM8HgQcO9kYrrzxBmy8hLeaPLYBOjz+oj88flBlO6GqUzuiMMi8fxlUvCr7ujz41NU8DA38PBeMAzx7uY28TTZpvFG1bzxtc4s89ucsPEereTwfipC82p4iPKtNFbzo5KQ7pcKlOW5gtDzO73c7B6FMOzRbgjxCXoo8v0JBOSl1RrwxDJ+7XWSaPD3Aw7sOsjA8tuJ5vKw6Pry5k5c8ZUNnvG/H6DyVTAA8Shkdvd7+aDvtpiW9qUGsPFTgmDwbcr68TTbpO1DnhryNX9a7mrivvIqpPjxsqhy81HrnOzv31Dvth+U6UtQvPBz4MrvtpqW84OYXvRz4sjxwkFe8zSGPuycCqbyFPY8818nKOw84JTy8bWk8USqBvBGHiLtosQo8BOs0u9skl7xQ54Y8uvrLPOknn7w705o8Jny0PAd9EjxhoKa8Iv2tu2M3/jtsVEs8DcUHPQSEADs3eE48GkKbupRR+rvdeHQ7Xy2JvO1jKz0xMFm8sWPzux07LbyrTZW7bdq/O6Pa9r0ahRW9CyDTOjSjdjyQ8bO8yaIIPfupLTz/CfQ7xndfvJs+JD0zPEK8KO/RvMpw8bwObzY7fm+lPJtiXrz5BHm8WmsIvKlBrLuDdKA7hWHJOgd9Ers0o/Y7nlvwu5NAl7u8BrW6utYRO2SZuDxyNYw8CppevAY6GDxVqQe9oGdZPFa6ary3RLS70NcmO2PQSb36ZrM86q2TPML42LwewaE8k2RRPDmocTsi/S29o/k2PHRlr7zjnC+8gHsOPUpcFzxtl8W6tuL5vHw/gry/2wy9yaIIvINV4Dx3fQG7ISFoPO7pnzwGXlK8HPiyPGAaMjzBC7A7MQyfu+eC6jyV1+67pDyxvBWkVLxrJKg754LqOScCKbwpUQy8KIgdOJDSc7zDfk08tLLWvNZDVjyh7c28ShmdvMnlgjs2NdS8ISHovP5+hbxGIIs8ayQouyKnXDzBcmS6zw44u86IQ7yl5l+7cngGvWvOVrsEhIC7yNkZPJODkbuAn0g8XN6lPOaVwbuTgxG8OR2DPAb3HTzlqJi8nUoNvCAVf73Mmxo9afSEu4FotzveHSk8c0ZvOMFOqjwP9Sq87iwavIEBg7xIUK68IbozuozZ4btg17c7vx4Hvarr2rtp9IQ8Rt0QO+1jqzyeNzY8kNLzO8sVpry98108OCL9uyisV7vhr4Y8FgaPvLFjczw42og8gWg3vPX6gzsNk/C83GeRPCUVgDy0jpw7yNkZu2VD5zvh93o81h+cuw3Fhzyl5t+86Y7TvHa0EjyzCCi7WmsIPIy1Jzy00Ra6NUiru50rTTx50d47/HKcO2wwETw0f7y8sFIQvNxnkbzS4w855pVBu9FdGzx9yvC6TM80vFQjkzy/Zvs7BhtYPLjKKLqPa787A/6LOyiInbzooSq8728UPIFJ97wq+7q8R6v5u1tYMbwdomG6iSPKPAb3HTx3oTu7fGO8POqtk7ze/ug84wNkPMnq/DsB8iK9ogwOu6lBrDznguo8NQUxvHKcwDo28tm7yNmZPN1UurxCoYS80m7+Oy+9OzzGzTC836MdvCDNCrtaawi7dVLYPEfKuTxzRm88cCmjOyXSBbwGOpi879ZIO8dTJbtqnrO8NMI2vR1+J7xwTV087umfPFG17zsC30s8oYaZPKllZrzZGK47zss9vP21FryZywa9bbYFPVNapDt2G0e7E3SxPMUjgry5dNc895Hbu0H8z7ueN7a7OccxPFhfH7vC1B48n3owvEhQLrzu6Z+8HTutvEBSITw6Taa5g1XgPCzEqbxfLYk9OYQ3vBlm1bvPUTI8wIU7PIy1pzyFyP07gzGmO3NGb7yS3ty7O5CguyEhaLyWoF28pmxUOaZImrz+g/87mnU1vFbsgTxvo668PFmPO2KNTzy09VC8LG5YPHhL6rsvJPC7kTQuvEGCxDlhB9s6u58AvfCAd7z0t4k7kVjoOCkOkrxMjDq8iPOmPL0SnrxsMJG7OEG9vCUa+rvx4rE7cpxAPDCGqjukf6u8TEnAvNn57TweBBw7JdKFvIy1p7vIg8i7" # noqa: E501 data = [] for i, text in enumerate(input): diff --git a/api/tests/unit_tests/core/app/segments/test_segment.py b/api/tests/unit_tests/core/app/segments/test_segment.py index 7cc339d212..73002623f0 100644 --- a/api/tests/unit_tests/core/app/segments/test_segment.py +++ b/api/tests/unit_tests/core/app/segments/test_segment.py @@ -21,9 +21,9 @@ def test_segment_group_to_text(): segments_group = parser.convert_template(template=template, variable_pool=variable_pool) assert segments_group.text == "Hello, fake-user-id! Your query is fake-user-query. And your key is fake-secret-key." - assert ( - segments_group.log - == f"Hello, fake-user-id! Your query is fake-user-query. And your key is {encrypter.obfuscated_token('fake-secret-key')}." + assert segments_group.log == ( + f"Hello, fake-user-id! Your query is fake-user-query." + f" And your key is {encrypter.obfuscated_token('fake-secret-key')}." )