mirror of
https://github.com/langgenius/dify.git
synced 2024-11-16 11:42:29 +08:00
chore: apply ruff rules on tests and app.py (#3605)
This commit is contained in:
parent
d5448e07ab
commit
d9b821cecc
12
api/app.py
12
api/app.py
|
@ -17,10 +17,13 @@ import warnings
|
|||
|
||||
from flask import Flask, Response, request
|
||||
from flask_cors import CORS
|
||||
|
||||
from werkzeug.exceptions import Unauthorized
|
||||
|
||||
from commands import register_commands
|
||||
from config import CloudEditionConfig, Config
|
||||
|
||||
# DO NOT REMOVE BELOW
|
||||
from events import event_handlers
|
||||
from extensions import (
|
||||
ext_celery,
|
||||
ext_code_based_extension,
|
||||
|
@ -37,11 +40,8 @@ from extensions import (
|
|||
from extensions.ext_database import db
|
||||
from extensions.ext_login import login_manager
|
||||
from libs.passport import PassportService
|
||||
from services.account_service import AccountService
|
||||
|
||||
# DO NOT REMOVE BELOW
|
||||
from events import event_handlers
|
||||
from models import account, dataset, model, source, task, tool, tools, web
|
||||
from services.account_service import AccountService
|
||||
|
||||
# DO NOT REMOVE ABOVE
|
||||
|
||||
|
@ -151,9 +151,9 @@ def unauthorized_handler():
|
|||
def register_blueprints(app):
|
||||
from controllers.console import bp as console_app_bp
|
||||
from controllers.files import bp as files_bp
|
||||
from controllers.inner_api import bp as inner_api_bp
|
||||
from controllers.service_api import bp as service_api_bp
|
||||
from controllers.web import bp as web_bp
|
||||
from controllers.inner_api import bp as inner_api_bp
|
||||
|
||||
CORS(service_api_bp,
|
||||
allow_headers=['Content-Type', 'Authorization', 'X-App-Code'],
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding:utf-8 -*-
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -1,24 +1,57 @@
|
|||
from flask import Blueprint
|
||||
|
||||
from libs.external_api import ExternalApi
|
||||
|
||||
bp = Blueprint('console', __name__, url_prefix='/console/api')
|
||||
api = ExternalApi(bp)
|
||||
|
||||
# Import other controllers
|
||||
from . import admin, apikey, extension, feature, setup, version, ping
|
||||
from . import admin, apikey, extension, feature, ping, setup, version
|
||||
|
||||
# Import app controllers
|
||||
from .app import (advanced_prompt_template, annotation, app, audio, completion, conversation, generator, message,
|
||||
model_config, site, statistic, workflow, workflow_run, workflow_app_log, workflow_statistic, agent)
|
||||
from .app import (
|
||||
advanced_prompt_template,
|
||||
agent,
|
||||
annotation,
|
||||
app,
|
||||
audio,
|
||||
completion,
|
||||
conversation,
|
||||
generator,
|
||||
message,
|
||||
model_config,
|
||||
site,
|
||||
statistic,
|
||||
workflow,
|
||||
workflow_app_log,
|
||||
workflow_run,
|
||||
workflow_statistic,
|
||||
)
|
||||
|
||||
# Import auth controllers
|
||||
from .auth import activate, data_source_oauth, login, oauth
|
||||
|
||||
# Import billing controllers
|
||||
from .billing import billing
|
||||
|
||||
# Import datasets controllers
|
||||
from .datasets import data_source, datasets, datasets_document, datasets_segments, file, hit_testing
|
||||
# Import explore controllers
|
||||
from .explore import (audio, completion, conversation, installed_app, message, parameter, recommended_app,
|
||||
saved_message, workflow)
|
||||
# Import workspace controllers
|
||||
from .workspace import account, members, model_providers, models, tool_providers, workspace
|
||||
|
||||
# Import enterprise controllers
|
||||
from .enterprise import enterprise_sso
|
||||
|
||||
# Import explore controllers
|
||||
from .explore import (
|
||||
audio,
|
||||
completion,
|
||||
conversation,
|
||||
installed_app,
|
||||
message,
|
||||
parameter,
|
||||
recommended_app,
|
||||
saved_message,
|
||||
workflow,
|
||||
)
|
||||
|
||||
# Import workspace controllers
|
||||
from .workspace import account, members, model_providers, models, tool_providers, workspace
|
||||
|
|
|
@ -2,13 +2,15 @@ import json
|
|||
|
||||
from flask_login import current_user
|
||||
from flask_restful import Resource, inputs, marshal_with, reqparse
|
||||
from werkzeug.exceptions import Forbidden, BadRequest
|
||||
from werkzeug.exceptions import BadRequest, Forbidden
|
||||
|
||||
from controllers.console import api
|
||||
from controllers.console.app.wraps import get_app_model
|
||||
from controllers.console.setup import setup_required
|
||||
from controllers.console.wraps import account_initialization_required, cloud_edition_billing_resource_check
|
||||
from core.agent.entities import AgentToolEntity
|
||||
from core.tools.tool_manager import ToolManager
|
||||
from core.tools.utils.configuration import ToolParameterConfigurationManager
|
||||
from extensions.ext_database import db
|
||||
from fields.app_fields import (
|
||||
app_detail_fields,
|
||||
|
@ -16,11 +18,8 @@ from fields.app_fields import (
|
|||
app_pagination_fields,
|
||||
)
|
||||
from libs.login import login_required
|
||||
from models.model import App, AppMode, AppModelConfig
|
||||
from services.app_service import AppService
|
||||
from models.model import App, AppModelConfig, AppMode
|
||||
from core.tools.utils.configuration import ToolParameterConfigurationManager
|
||||
from core.tools.tool_manager import ToolManager
|
||||
|
||||
|
||||
ALLOW_CREATE_APP_MODES = ['chat', 'agent-chat', 'advanced-chat', 'workflow', 'completion']
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# -*- coding:utf-8 -*-
|
||||
from flask import Blueprint
|
||||
|
||||
from libs.external_api import ExternalApi
|
||||
|
||||
bp = Blueprint('files', __name__)
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
from flask import Blueprint
|
||||
|
||||
from libs.external_api import ExternalApi
|
||||
|
||||
bp = Blueprint('inner_api', __name__, url_prefix='/inner/api')
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# -*- coding:utf-8 -*-
|
||||
from flask import Blueprint
|
||||
|
||||
from libs.external_api import ExternalApi
|
||||
|
||||
bp = Blueprint('service_api', __name__, url_prefix='/v1')
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# -*- coding:utf-8 -*-
|
||||
from flask import Blueprint
|
||||
|
||||
from libs.external_api import ExternalApi
|
||||
|
||||
bp = Blueprint('web', __name__, url_prefix='/api')
|
||||
|
|
|
@ -7,7 +7,7 @@ from controllers.web import api
|
|||
from controllers.web.error import AppUnavailableError
|
||||
from controllers.web.wraps import WebApiResource
|
||||
from extensions.ext_database import db
|
||||
from models.model import App, AppModelConfig, AppMode
|
||||
from models.model import App, AppMode, AppModelConfig
|
||||
from models.tools import ApiToolProvider
|
||||
from services.app_service import AppService
|
||||
|
||||
|
|
|
@ -1,6 +1,15 @@
|
|||
|
||||
from .__version__ import __version__
|
||||
from ._client import ZhipuAI
|
||||
from .core._errors import (APIAuthenticationError, APIInternalError, APIReachLimitError, APIRequestFailedError,
|
||||
APIResponseError, APIResponseValidationError, APIServerFlowExceedError, APIStatusError,
|
||||
APITimeoutError, ZhipuAIError)
|
||||
from .core._errors import (
|
||||
APIAuthenticationError,
|
||||
APIInternalError,
|
||||
APIReachLimitError,
|
||||
APIRequestFailedError,
|
||||
APIResponseError,
|
||||
APIResponseValidationError,
|
||||
APIServerFlowExceedError,
|
||||
APIStatusError,
|
||||
APITimeoutError,
|
||||
ZhipuAIError,
|
||||
)
|
||||
|
|
|
@ -6,5 +6,5 @@ from .create_site_record_when_app_created import handle
|
|||
from .deduct_quota_when_messaeg_created import handle
|
||||
from .delete_installed_app_when_app_deleted import handle
|
||||
from .update_app_dataset_join_when_app_model_config_updated import handle
|
||||
from .update_provider_last_used_at_when_messaeg_created import handle
|
||||
from .update_app_dataset_join_when_app_published_workflow_updated import handle
|
||||
from .update_provider_last_used_at_when_messaeg_created import handle
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
# -*- coding:utf-8 -*-
|
|
@ -3,9 +3,6 @@ requires-python = ">=3.10"
|
|||
|
||||
[tool.ruff]
|
||||
exclude = [
|
||||
"app.py",
|
||||
"__init__.py",
|
||||
"tests/",
|
||||
]
|
||||
line-length = 120
|
||||
|
||||
|
@ -26,6 +23,20 @@ ignore = [
|
|||
"UP032", # f-string
|
||||
]
|
||||
|
||||
[tool.ruff.lint.per-file-ignores]
|
||||
"app.py" = [
|
||||
"F401", # unused-import
|
||||
"F811", # redefined-while-unused
|
||||
]
|
||||
"__init__.py" = [
|
||||
"F401", # unused-import
|
||||
"F811", # redefined-while-unused
|
||||
]
|
||||
"tests/*" = [
|
||||
"F401", # unused-import
|
||||
"F811", # redefined-while-unused
|
||||
]
|
||||
|
||||
|
||||
[tool.pytest_env]
|
||||
OPENAI_API_KEY = "sk-IamNotARealKeyJustForMockTestKawaiiiiiiiiii"
|
||||
|
|
|
@ -1,2 +1 @@
|
|||
# -*- coding:utf-8 -*-
|
||||
import services.errors
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
# -*- coding:utf-8 -*-
|
||||
__all__ = [
|
||||
'base', 'conversation', 'message', 'index', 'app_model_config', 'account', 'document', 'dataset',
|
||||
'app', 'completion', 'audio', 'file'
|
||||
|
|
|
@ -1,22 +1,32 @@
|
|||
import os
|
||||
from collections.abc import Iterable
|
||||
from time import sleep
|
||||
from typing import Any, Literal, Union, Iterable
|
||||
|
||||
from anthropic.resources import Messages
|
||||
from anthropic.types.message_delta_event import Delta
|
||||
from typing import Any, Literal, Union
|
||||
|
||||
import anthropic
|
||||
import pytest
|
||||
from _pytest.monkeypatch import MonkeyPatch
|
||||
from anthropic import Anthropic, Stream
|
||||
from anthropic.types import MessageParam, Message, MessageStreamEvent, \
|
||||
ContentBlock, MessageStartEvent, Usage, TextDelta, MessageDeltaEvent, MessageStopEvent, ContentBlockDeltaEvent, \
|
||||
MessageDeltaUsage
|
||||
from anthropic.resources import Messages
|
||||
from anthropic.types import (
|
||||
ContentBlock,
|
||||
ContentBlockDeltaEvent,
|
||||
Message,
|
||||
MessageDeltaEvent,
|
||||
MessageDeltaUsage,
|
||||
MessageParam,
|
||||
MessageStartEvent,
|
||||
MessageStopEvent,
|
||||
MessageStreamEvent,
|
||||
TextDelta,
|
||||
Usage,
|
||||
)
|
||||
from anthropic.types.message_delta_event import Delta
|
||||
|
||||
MOCK = os.getenv('MOCK_SWITCH', 'false') == 'true'
|
||||
|
||||
|
||||
class MockAnthropicClass(object):
|
||||
class MockAnthropicClass:
|
||||
@staticmethod
|
||||
def mocked_anthropic_chat_create_sync(model: str) -> Message:
|
||||
return Message(
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
from typing import Generator, List
|
||||
from collections.abc import Generator
|
||||
|
||||
import google.generativeai.types.content_types as content_types
|
||||
import google.generativeai.types.generation_types as generation_config_types
|
||||
|
@ -6,15 +6,15 @@ import google.generativeai.types.safety_types as safety_types
|
|||
import pytest
|
||||
from _pytest.monkeypatch import MonkeyPatch
|
||||
from google.ai import generativelanguage as glm
|
||||
from google.ai.generativelanguage_v1beta.types import content as gag_content
|
||||
from google.generativeai import GenerativeModel
|
||||
from google.generativeai.client import _ClientManager, configure
|
||||
from google.generativeai.types import GenerateContentResponse
|
||||
from google.generativeai.types.generation_types import BaseGenerateContentResponse
|
||||
from google.ai.generativelanguage_v1beta.types import content as gag_content
|
||||
|
||||
current_api_key = ''
|
||||
|
||||
class MockGoogleResponseClass(object):
|
||||
class MockGoogleResponseClass:
|
||||
_done = False
|
||||
|
||||
def __iter__(self):
|
||||
|
@ -41,7 +41,7 @@ class MockGoogleResponseClass(object):
|
|||
chunks=[]
|
||||
)
|
||||
|
||||
class MockGoogleResponseCandidateClass(object):
|
||||
class MockGoogleResponseCandidateClass:
|
||||
finish_reason = 'stop'
|
||||
|
||||
@property
|
||||
|
@ -52,7 +52,7 @@ class MockGoogleResponseCandidateClass(object):
|
|||
]
|
||||
)
|
||||
|
||||
class MockGoogleClass(object):
|
||||
class MockGoogleClass:
|
||||
@staticmethod
|
||||
def generate_content_sync() -> GenerateContentResponse:
|
||||
return GenerateContentResponse(
|
||||
|
@ -91,7 +91,7 @@ class MockGoogleClass(object):
|
|||
return 'it\'s google!'
|
||||
|
||||
@property
|
||||
def generative_response_candidates(self) -> List[MockGoogleResponseCandidateClass]:
|
||||
def generative_response_candidates(self) -> list[MockGoogleResponseCandidateClass]:
|
||||
return [MockGoogleResponseCandidateClass()]
|
||||
|
||||
def make_client(self: _ClientManager, name: str):
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
import os
|
||||
from typing import Any, Dict, List
|
||||
|
||||
import pytest
|
||||
from _pytest.monkeypatch import MonkeyPatch
|
||||
from huggingface_hub import InferenceClient
|
||||
|
||||
from tests.integration_tests.model_runtime.__mock.huggingface_chat import MockHuggingfaceChatClass
|
||||
|
||||
MOCK = os.getenv('MOCK_SWITCH', 'false').lower() == 'true'
|
||||
|
|
|
@ -1,14 +1,20 @@
|
|||
import re
|
||||
from typing import Any, Generator, List, Literal, Optional, Union
|
||||
from collections.abc import Generator
|
||||
from typing import Any, Literal, Optional, Union
|
||||
|
||||
from _pytest.monkeypatch import MonkeyPatch
|
||||
from huggingface_hub import InferenceClient
|
||||
from huggingface_hub.inference._text_generation import (Details, StreamDetails, TextGenerationResponse,
|
||||
TextGenerationStreamResponse, Token)
|
||||
from huggingface_hub.inference._text_generation import (
|
||||
Details,
|
||||
StreamDetails,
|
||||
TextGenerationResponse,
|
||||
TextGenerationStreamResponse,
|
||||
Token,
|
||||
)
|
||||
from huggingface_hub.utils import BadRequestError
|
||||
|
||||
|
||||
class MockHuggingfaceChatClass(object):
|
||||
class MockHuggingfaceChatClass:
|
||||
@staticmethod
|
||||
def generate_create_sync(model: str) -> TextGenerationResponse:
|
||||
response = TextGenerationResponse(
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
import os
|
||||
from typing import Callable, List, Literal
|
||||
from collections.abc import Callable
|
||||
from typing import Literal
|
||||
|
||||
import pytest
|
||||
|
||||
# import monkeypatch
|
||||
from _pytest.monkeypatch import MonkeyPatch
|
||||
from openai.resources.audio.transcriptions import Transcriptions
|
||||
|
@ -10,6 +12,7 @@ from openai.resources.completions import Completions
|
|||
from openai.resources.embeddings import Embeddings
|
||||
from openai.resources.models import Models
|
||||
from openai.resources.moderations import Moderations
|
||||
|
||||
from tests.integration_tests.model_runtime.__mock.openai_chat import MockChatClass
|
||||
from tests.integration_tests.model_runtime.__mock.openai_completion import MockCompletionsClass
|
||||
from tests.integration_tests.model_runtime.__mock.openai_embeddings import MockEmbeddingsClass
|
||||
|
@ -18,7 +21,7 @@ from tests.integration_tests.model_runtime.__mock.openai_remote import MockModel
|
|||
from tests.integration_tests.model_runtime.__mock.openai_speech2text import MockSpeech2TextClass
|
||||
|
||||
|
||||
def mock_openai(monkeypatch: MonkeyPatch, methods: List[Literal["completion", "chat", "remote", "moderation", "speech2text", "text_embedding"]]) -> Callable[[], None]:
|
||||
def mock_openai(monkeypatch: MonkeyPatch, methods: list[Literal["completion", "chat", "remote", "moderation", "speech2text", "text_embedding"]]) -> Callable[[], None]:
|
||||
"""
|
||||
mock openai module
|
||||
|
||||
|
|
|
@ -1,31 +1,44 @@
|
|||
import re
|
||||
from collections.abc import Generator
|
||||
from json import dumps, loads
|
||||
from time import sleep, time
|
||||
|
||||
# import monkeypatch
|
||||
from typing import Any, Generator, List, Literal, Optional, Union
|
||||
from typing import Any, Literal, Optional, Union
|
||||
|
||||
import openai.types.chat.completion_create_params as completion_create_params
|
||||
from core.model_runtime.errors.invoke import InvokeAuthorizationError
|
||||
from openai import AzureOpenAI, OpenAI
|
||||
from openai._types import NOT_GIVEN, NotGiven
|
||||
from openai.resources.chat.completions import Completions
|
||||
from openai.types import Completion as CompletionMessage
|
||||
from openai.types.chat import (ChatCompletion, ChatCompletionChunk, ChatCompletionMessageParam,
|
||||
ChatCompletionMessageToolCall, ChatCompletionToolChoiceOptionParam,
|
||||
ChatCompletionToolParam)
|
||||
from openai.types.chat import (
|
||||
ChatCompletion,
|
||||
ChatCompletionChunk,
|
||||
ChatCompletionMessageParam,
|
||||
ChatCompletionMessageToolCall,
|
||||
ChatCompletionToolChoiceOptionParam,
|
||||
ChatCompletionToolParam,
|
||||
)
|
||||
from openai.types.chat.chat_completion import ChatCompletion as _ChatCompletion
|
||||
from openai.types.chat.chat_completion import Choice as _ChatCompletionChoice
|
||||
from openai.types.chat.chat_completion_chunk import (Choice, ChoiceDelta, ChoiceDeltaFunctionCall, ChoiceDeltaToolCall,
|
||||
ChoiceDeltaToolCallFunction)
|
||||
from openai.types.chat.chat_completion_chunk import (
|
||||
Choice,
|
||||
ChoiceDelta,
|
||||
ChoiceDeltaFunctionCall,
|
||||
ChoiceDeltaToolCall,
|
||||
ChoiceDeltaToolCallFunction,
|
||||
)
|
||||
from openai.types.chat.chat_completion_message import ChatCompletionMessage, FunctionCall
|
||||
from openai.types.chat.chat_completion_message_tool_call import Function
|
||||
from openai.types.completion_usage import CompletionUsage
|
||||
|
||||
from core.model_runtime.errors.invoke import InvokeAuthorizationError
|
||||
|
||||
class MockChatClass(object):
|
||||
|
||||
class MockChatClass:
|
||||
@staticmethod
|
||||
def generate_function_call(
|
||||
functions: List[completion_create_params.Function] | NotGiven = NOT_GIVEN,
|
||||
functions: list[completion_create_params.Function] | NotGiven = NOT_GIVEN,
|
||||
) -> Optional[FunctionCall]:
|
||||
if not functions or len(functions) == 0:
|
||||
return None
|
||||
|
@ -61,8 +74,8 @@ class MockChatClass(object):
|
|||
|
||||
@staticmethod
|
||||
def generate_tool_calls(
|
||||
tools: List[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
|
||||
) -> Optional[List[ChatCompletionMessageToolCall]]:
|
||||
tools: list[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
|
||||
) -> Optional[list[ChatCompletionMessageToolCall]]:
|
||||
list_tool_calls = []
|
||||
if not tools or len(tools) == 0:
|
||||
return None
|
||||
|
@ -91,8 +104,8 @@ class MockChatClass(object):
|
|||
@staticmethod
|
||||
def mocked_openai_chat_create_sync(
|
||||
model: str,
|
||||
functions: List[completion_create_params.Function] | NotGiven = NOT_GIVEN,
|
||||
tools: List[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
|
||||
functions: list[completion_create_params.Function] | NotGiven = NOT_GIVEN,
|
||||
tools: list[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
|
||||
) -> CompletionMessage:
|
||||
tool_calls = []
|
||||
function_call = MockChatClass.generate_function_call(functions=functions)
|
||||
|
@ -128,8 +141,8 @@ class MockChatClass(object):
|
|||
@staticmethod
|
||||
def mocked_openai_chat_create_stream(
|
||||
model: str,
|
||||
functions: List[completion_create_params.Function] | NotGiven = NOT_GIVEN,
|
||||
tools: List[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
|
||||
functions: list[completion_create_params.Function] | NotGiven = NOT_GIVEN,
|
||||
tools: list[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
|
||||
) -> Generator[ChatCompletionChunk, None, None]:
|
||||
tool_calls = []
|
||||
function_call = MockChatClass.generate_function_call(functions=functions)
|
||||
|
@ -197,17 +210,17 @@ class MockChatClass(object):
|
|||
)
|
||||
|
||||
def chat_create(self: Completions, *,
|
||||
messages: List[ChatCompletionMessageParam],
|
||||
messages: list[ChatCompletionMessageParam],
|
||||
model: Union[str,Literal[
|
||||
"gpt-4-1106-preview", "gpt-4-vision-preview", "gpt-4", "gpt-4-0314", "gpt-4-0613",
|
||||
"gpt-4-32k", "gpt-4-32k-0314", "gpt-4-32k-0613",
|
||||
"gpt-3.5-turbo-1106", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-0301",
|
||||
"gpt-3.5-turbo-0613", "gpt-3.5-turbo-16k-0613"],
|
||||
],
|
||||
functions: List[completion_create_params.Function] | NotGiven = NOT_GIVEN,
|
||||
functions: list[completion_create_params.Function] | NotGiven = NOT_GIVEN,
|
||||
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
|
||||
stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
|
||||
tools: List[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
|
||||
tools: list[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
|
||||
**kwargs: Any,
|
||||
):
|
||||
openai_models = [
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
import re
|
||||
from collections.abc import Generator
|
||||
from time import sleep, time
|
||||
# import monkeypatch
|
||||
from typing import Any, Generator, List, Literal, Optional, Union
|
||||
|
||||
from core.model_runtime.errors.invoke import InvokeAuthorizationError
|
||||
# import monkeypatch
|
||||
from typing import Any, Literal, Optional, Union
|
||||
|
||||
from openai import AzureOpenAI, BadRequestError, OpenAI
|
||||
from openai._types import NOT_GIVEN, NotGiven
|
||||
from openai.resources.completions import Completions
|
||||
|
@ -11,8 +12,10 @@ from openai.types import Completion as CompletionMessage
|
|||
from openai.types.completion import CompletionChoice
|
||||
from openai.types.completion_usage import CompletionUsage
|
||||
|
||||
from core.model_runtime.errors.invoke import InvokeAuthorizationError
|
||||
|
||||
class MockCompletionsClass(object):
|
||||
|
||||
class MockCompletionsClass:
|
||||
@staticmethod
|
||||
def mocked_openai_completion_create_sync(
|
||||
model: str
|
||||
|
@ -90,7 +93,7 @@ class MockCompletionsClass(object):
|
|||
"code-davinci-002", "text-curie-001", "text-babbage-001",
|
||||
"text-ada-001"],
|
||||
],
|
||||
prompt: Union[str, List[str], List[int], List[List[int]], None],
|
||||
prompt: Union[str, list[str], list[int], list[list[int]], None],
|
||||
stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
|
||||
**kwargs: Any
|
||||
):
|
||||
|
|
|
@ -1,18 +1,19 @@
|
|||
import re
|
||||
from typing import Any, List, Literal, Union
|
||||
from typing import Any, Literal, Union
|
||||
|
||||
from core.model_runtime.errors.invoke import InvokeAuthorizationError
|
||||
from openai import OpenAI
|
||||
from openai._types import NOT_GIVEN, NotGiven
|
||||
from openai.resources.embeddings import Embeddings
|
||||
from openai.types.create_embedding_response import CreateEmbeddingResponse, Usage
|
||||
from openai.types.embedding import Embedding
|
||||
|
||||
from core.model_runtime.errors.invoke import InvokeAuthorizationError
|
||||
|
||||
class MockEmbeddingsClass(object):
|
||||
|
||||
class MockEmbeddingsClass:
|
||||
def create_embeddings(
|
||||
self: Embeddings, *,
|
||||
input: Union[str, List[str], List[int], List[List[int]]],
|
||||
input: Union[str, list[str], list[int], list[list[int]]],
|
||||
model: Union[str, Literal["text-embedding-ada-002"]],
|
||||
encoding_format: Literal["float", "base64"] | NotGiven = NOT_GIVEN,
|
||||
**kwargs: Any
|
||||
|
|
|
@ -1,16 +1,17 @@
|
|||
import re
|
||||
from typing import Any, List, Literal, Union
|
||||
from typing import Any, Literal, Union
|
||||
|
||||
from core.model_runtime.errors.invoke import InvokeAuthorizationError
|
||||
from openai._types import NOT_GIVEN, NotGiven
|
||||
from openai.resources.moderations import Moderations
|
||||
from openai.types import ModerationCreateResponse
|
||||
from openai.types.moderation import Categories, CategoryScores, Moderation
|
||||
|
||||
from core.model_runtime.errors.invoke import InvokeAuthorizationError
|
||||
|
||||
class MockModerationClass(object):
|
||||
|
||||
class MockModerationClass:
|
||||
def moderation_create(self: Moderations,*,
|
||||
input: Union[str, List[str]],
|
||||
input: Union[str, list[str]],
|
||||
model: Union[str, Literal["text-moderation-latest", "text-moderation-stable"]] | NotGiven = NOT_GIVEN,
|
||||
**kwargs: Any
|
||||
) -> ModerationCreateResponse:
|
||||
|
|
|
@ -1,18 +1,17 @@
|
|||
from time import time
|
||||
from typing import List
|
||||
|
||||
from openai.resources.models import Models
|
||||
from openai.types.model import Model
|
||||
|
||||
|
||||
class MockModelClass(object):
|
||||
class MockModelClass:
|
||||
"""
|
||||
mock class for openai.models.Models
|
||||
"""
|
||||
def list(
|
||||
self,
|
||||
**kwargs,
|
||||
) -> List[Model]:
|
||||
) -> list[Model]:
|
||||
return [
|
||||
Model(
|
||||
id='ft:gpt-3.5-turbo-0613:personal::8GYJLPDQ',
|
||||
|
|
|
@ -1,13 +1,14 @@
|
|||
import re
|
||||
from typing import Any, List, Literal, Union
|
||||
from typing import Any, Literal, Union
|
||||
|
||||
from core.model_runtime.errors.invoke import InvokeAuthorizationError
|
||||
from openai._types import NOT_GIVEN, FileTypes, NotGiven
|
||||
from openai.resources.audio.transcriptions import Transcriptions
|
||||
from openai.types.audio.transcription import Transcription
|
||||
|
||||
from core.model_runtime.errors.invoke import InvokeAuthorizationError
|
||||
|
||||
class MockSpeech2TextClass(object):
|
||||
|
||||
class MockSpeech2TextClass:
|
||||
def speech2text_create(self: Transcriptions,
|
||||
*,
|
||||
file: FileTypes,
|
||||
|
|
|
@ -1,19 +1,24 @@
|
|||
import os
|
||||
import re
|
||||
from typing import List, Union
|
||||
from typing import Union
|
||||
|
||||
import pytest
|
||||
from _pytest.monkeypatch import MonkeyPatch
|
||||
from requests import Response
|
||||
from requests.exceptions import ConnectionError
|
||||
from requests.sessions import Session
|
||||
from xinference_client.client.restful.restful_client import (Client, RESTfulChatglmCppChatModelHandle,
|
||||
RESTfulChatModelHandle, RESTfulEmbeddingModelHandle,
|
||||
RESTfulGenerateModelHandle, RESTfulRerankModelHandle)
|
||||
from xinference_client.client.restful.restful_client import (
|
||||
Client,
|
||||
RESTfulChatglmCppChatModelHandle,
|
||||
RESTfulChatModelHandle,
|
||||
RESTfulEmbeddingModelHandle,
|
||||
RESTfulGenerateModelHandle,
|
||||
RESTfulRerankModelHandle,
|
||||
)
|
||||
from xinference_client.types import Embedding, EmbeddingData, EmbeddingUsage
|
||||
|
||||
|
||||
class MockXinferenceClass(object):
|
||||
class MockXinferenceClass:
|
||||
def get_chat_model(self: Client, model_uid: str) -> Union[RESTfulChatglmCppChatModelHandle, RESTfulGenerateModelHandle, RESTfulChatModelHandle]:
|
||||
if not re.match(r'https?:\/\/[^\s\/$.?#].[^\s]*$', self.base_url):
|
||||
raise RuntimeError('404 Not Found')
|
||||
|
@ -101,7 +106,7 @@ class MockXinferenceClass(object):
|
|||
def _check_cluster_authenticated(self):
|
||||
self._cluster_authed = True
|
||||
|
||||
def rerank(self: RESTfulRerankModelHandle, documents: List[str], query: str, top_n: int) -> dict:
|
||||
def rerank(self: RESTfulRerankModelHandle, documents: list[str], query: str, top_n: int) -> dict:
|
||||
# check if self._model_uid is a valid uuid
|
||||
if not re.match(r'[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}', self._model_uid) and \
|
||||
self._model_uid != 'rerank':
|
||||
|
@ -126,7 +131,7 @@ class MockXinferenceClass(object):
|
|||
|
||||
def create_embedding(
|
||||
self: RESTfulGenerateModelHandle,
|
||||
input: Union[str, List[str]],
|
||||
input: Union[str, list[str]],
|
||||
**kwargs
|
||||
) -> dict:
|
||||
# check if self._model_uid is a valid uuid
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
import os
|
||||
from typing import Generator
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.anthropic.anthropic import AnthropicProvider
|
||||
from tests.integration_tests.model_runtime.__mock.anthropic import setup_anthropic_mock
|
||||
|
|
|
@ -1,11 +1,17 @@
|
|||
import os
|
||||
from typing import Generator
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import (AssistantPromptMessage, ImagePromptMessageContent,
|
||||
PromptMessageTool, SystemPromptMessage,
|
||||
TextPromptMessageContent, UserPromptMessage)
|
||||
from core.model_runtime.entities.message_entities import (
|
||||
AssistantPromptMessage,
|
||||
ImagePromptMessageContent,
|
||||
PromptMessageTool,
|
||||
SystemPromptMessage,
|
||||
TextPromptMessageContent,
|
||||
UserPromptMessage,
|
||||
)
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.azure_openai.llm.llm import AzureOpenAILargeLanguageModel
|
||||
from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.azure_openai.text_embedding.text_embedding import AzureOpenAITextEmbeddingModel
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
import os
|
||||
from collections.abc import Generator
|
||||
from time import sleep
|
||||
from typing import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage
|
||||
from core.model_runtime.entities.model_entities import AIModelEntity
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.baichuan.baichuan import BaichuanProvider
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.baichuan.text_embedding.text_embedding import BaichuanTextEmbeddingModel
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
import os
|
||||
from typing import Generator
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.bedrock.bedrock import BedrockProvider
|
||||
|
||||
|
|
|
@ -1,11 +1,16 @@
|
|||
import os
|
||||
from typing import Generator
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import (AssistantPromptMessage, PromptMessageTool,
|
||||
SystemPromptMessage, TextPromptMessageContent,
|
||||
UserPromptMessage)
|
||||
from core.model_runtime.entities.message_entities import (
|
||||
AssistantPromptMessage,
|
||||
PromptMessageTool,
|
||||
SystemPromptMessage,
|
||||
TextPromptMessageContent,
|
||||
UserPromptMessage,
|
||||
)
|
||||
from core.model_runtime.entities.model_entities import AIModelEntity
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.chatglm.llm.llm import ChatGLMLargeLanguageModel
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.chatglm.chatglm import ChatGLMProvider
|
||||
from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
import os
|
||||
from typing import Generator
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.cohere.cohere import CohereProvider
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.rerank_entities import RerankResult
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.cohere.rerank.rerank import CohereRerankModel
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.cohere.text_embedding.text_embedding import CohereTextEmbeddingModel
|
||||
|
|
|
@ -1,11 +1,16 @@
|
|||
import os
|
||||
from typing import Generator
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import (AssistantPromptMessage, ImagePromptMessageContent,
|
||||
SystemPromptMessage, TextPromptMessageContent,
|
||||
UserPromptMessage)
|
||||
from core.model_runtime.entities.message_entities import (
|
||||
AssistantPromptMessage,
|
||||
ImagePromptMessageContent,
|
||||
SystemPromptMessage,
|
||||
TextPromptMessageContent,
|
||||
UserPromptMessage,
|
||||
)
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.google.llm.llm import GoogleLargeLanguageModel
|
||||
from tests.integration_tests.model_runtime.__mock.google import setup_google_mock
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.google.google import GoogleProvider
|
||||
from tests.integration_tests.model_runtime.__mock.google import setup_google_mock
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
import os
|
||||
from typing import Generator
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import AssistantPromptMessage, UserPromptMessage
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
|
|
|
@ -1,10 +1,12 @@
|
|||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.huggingface_hub.text_embedding.text_embedding import \
|
||||
HuggingfaceHubTextEmbeddingModel
|
||||
from core.model_runtime.model_providers.huggingface_hub.text_embedding.text_embedding import (
|
||||
HuggingfaceHubTextEmbeddingModel,
|
||||
)
|
||||
|
||||
|
||||
def test_hosted_inference_api_validate_credentials():
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.jina.jina import JinaProvider
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.jina.text_embedding.text_embedding import JinaTextEmbeddingModel
|
||||
|
|
|
@ -1,11 +1,16 @@
|
|||
import os
|
||||
from typing import Generator
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import (AssistantPromptMessage, PromptMessageTool,
|
||||
SystemPromptMessage, TextPromptMessageContent,
|
||||
UserPromptMessage)
|
||||
from core.model_runtime.entities.message_entities import (
|
||||
AssistantPromptMessage,
|
||||
PromptMessageTool,
|
||||
SystemPromptMessage,
|
||||
TextPromptMessageContent,
|
||||
UserPromptMessage,
|
||||
)
|
||||
from core.model_runtime.entities.model_entities import ParameterRule
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.localai.llm.llm import LocalAILarguageModel
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.minimax.text_embedding.text_embedding import MinimaxTextEmbeddingModel
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
import os
|
||||
from collections.abc import Generator
|
||||
from time import sleep
|
||||
from typing import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import AssistantPromptMessage, UserPromptMessage
|
||||
from core.model_runtime.entities.model_entities import AIModelEntity
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.minimax.minimax import MinimaxProvider
|
||||
|
||||
|
|
|
@ -1,11 +1,16 @@
|
|||
import os
|
||||
from typing import Generator
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import (AssistantPromptMessage, ImagePromptMessageContent,
|
||||
SystemPromptMessage, TextPromptMessageContent,
|
||||
UserPromptMessage)
|
||||
from core.model_runtime.entities.message_entities import (
|
||||
AssistantPromptMessage,
|
||||
ImagePromptMessageContent,
|
||||
SystemPromptMessage,
|
||||
TextPromptMessageContent,
|
||||
UserPromptMessage,
|
||||
)
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.ollama.llm.llm import OllamaLargeLanguageModel
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.ollama.text_embedding.text_embedding import OllamaEmbeddingModel
|
||||
|
|
|
@ -1,11 +1,17 @@
|
|||
import os
|
||||
from typing import Generator
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import (AssistantPromptMessage, ImagePromptMessageContent,
|
||||
PromptMessageTool, SystemPromptMessage,
|
||||
TextPromptMessageContent, UserPromptMessage)
|
||||
from core.model_runtime.entities.message_entities import (
|
||||
AssistantPromptMessage,
|
||||
ImagePromptMessageContent,
|
||||
PromptMessageTool,
|
||||
SystemPromptMessage,
|
||||
TextPromptMessageContent,
|
||||
UserPromptMessage,
|
||||
)
|
||||
from core.model_runtime.entities.model_entities import AIModelEntity, ModelType
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.openai.moderation.moderation import OpenAIModerationModel
|
||||
from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.openai.openai import OpenAIProvider
|
||||
from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.openai.speech2text.speech2text import OpenAISpeech2TextModel
|
||||
from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.openai.text_embedding.text_embedding import OpenAITextEmbeddingModel
|
||||
|
|
|
@ -1,10 +1,15 @@
|
|||
import os
|
||||
from typing import Generator
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import (AssistantPromptMessage, PromptMessageTool,
|
||||
SystemPromptMessage, UserPromptMessage)
|
||||
from core.model_runtime.entities.message_entities import (
|
||||
AssistantPromptMessage,
|
||||
PromptMessageTool,
|
||||
SystemPromptMessage,
|
||||
UserPromptMessage,
|
||||
)
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.openai_api_compatible.llm.llm import OAIAPICompatLargeLanguageModel
|
||||
|
||||
|
|
|
@ -1,10 +1,12 @@
|
|||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.openai_api_compatible.text_embedding.text_embedding import \
|
||||
OAICompatEmbeddingModel
|
||||
from core.model_runtime.model_providers.openai_api_compatible.text_embedding.text_embedding import (
|
||||
OAICompatEmbeddingModel,
|
||||
)
|
||||
|
||||
"""
|
||||
Using OpenAI's API as testing endpoint
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.openllm.text_embedding.text_embedding import OpenLLMTextEmbeddingModel
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
import os
|
||||
from typing import Generator
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import AssistantPromptMessage, UserPromptMessage
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
|
|
|
@ -1,10 +1,15 @@
|
|||
import os
|
||||
from typing import Generator
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import (AssistantPromptMessage, PromptMessageTool,
|
||||
SystemPromptMessage, UserPromptMessage)
|
||||
from core.model_runtime.entities.message_entities import (
|
||||
AssistantPromptMessage,
|
||||
PromptMessageTool,
|
||||
SystemPromptMessage,
|
||||
UserPromptMessage,
|
||||
)
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.openrouter.llm.llm import OpenRouterLargeLanguageModel
|
||||
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
import os
|
||||
from typing import Generator
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.replicate.text_embedding.text_embedding import ReplicateEmbeddingModel
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
import os
|
||||
from typing import Generator
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.spark.spark import SparkProvider
|
||||
|
||||
|
|
|
@ -1,10 +1,15 @@
|
|||
import os
|
||||
from typing import Generator
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import (AssistantPromptMessage, PromptMessageTool,
|
||||
SystemPromptMessage, UserPromptMessage)
|
||||
from core.model_runtime.entities.message_entities import (
|
||||
AssistantPromptMessage,
|
||||
PromptMessageTool,
|
||||
SystemPromptMessage,
|
||||
UserPromptMessage,
|
||||
)
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.togetherai.llm.llm import TogetherAILargeLanguageModel
|
||||
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
import os
|
||||
from typing import Generator
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.tongyi.tongyi import TongyiProvider
|
||||
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
import os
|
||||
from collections.abc import Generator
|
||||
from time import sleep
|
||||
from typing import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import AssistantPromptMessage, SystemPromptMessage, UserPromptMessage
|
||||
from core.model_runtime.entities.model_entities import AIModelEntity
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.wenxin.wenxin import WenxinProvider
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.xinference.text_embedding.text_embedding import XinferenceTextEmbeddingModel
|
||||
|
|
|
@ -1,11 +1,16 @@
|
|||
import os
|
||||
from typing import Generator
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import (AssistantPromptMessage, PromptMessageTool,
|
||||
SystemPromptMessage, TextPromptMessageContent,
|
||||
UserPromptMessage)
|
||||
from core.model_runtime.entities.message_entities import (
|
||||
AssistantPromptMessage,
|
||||
PromptMessageTool,
|
||||
SystemPromptMessage,
|
||||
TextPromptMessageContent,
|
||||
UserPromptMessage,
|
||||
)
|
||||
from core.model_runtime.entities.model_entities import AIModelEntity
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.xinference.llm.llm import XinferenceAILargeLanguageModel
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.rerank_entities import RerankResult
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.xinference.rerank.rerank import XinferenceRerankModel
|
||||
|
|
|
@ -1,10 +1,15 @@
|
|||
import os
|
||||
from typing import Generator
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import (AssistantPromptMessage, PromptMessageTool,
|
||||
SystemPromptMessage, UserPromptMessage)
|
||||
from core.model_runtime.entities.message_entities import (
|
||||
AssistantPromptMessage,
|
||||
PromptMessageTool,
|
||||
SystemPromptMessage,
|
||||
UserPromptMessage,
|
||||
)
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.zhipuai.llm.llm import ZhipuAILargeLanguageModel
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.zhipuai.zhipuai import ZhipuaiProvider
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.zhipuai.text_embedding.text_embedding import ZhipuAITextEmbeddingModel
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
import pytest
|
||||
|
||||
from core.tools.tool_manager import ToolManager
|
||||
|
||||
provider_generator = ToolManager.list_builtin_providers()
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
import os
|
||||
|
||||
from core.utils.module_import_helper import load_single_subclass_from_source, import_module_from_source
|
||||
from core.utils.module_import_helper import import_module_from_source, load_single_subclass_from_source
|
||||
from tests.integration_tests.utils.parent_class import ParentClass
|
||||
|
||||
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
import os
|
||||
import pytest
|
||||
|
||||
from typing import Literal
|
||||
|
||||
import pytest
|
||||
from _pytest.monkeypatch import MonkeyPatch
|
||||
|
||||
from core.helper.code_executor.code_executor import CodeExecutor
|
||||
|
||||
MOCK = os.getenv('MOCK_SWITCH', 'false') == 'true'
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
import os
|
||||
from json import dumps
|
||||
from typing import Literal
|
||||
|
||||
import httpx._api as httpx
|
||||
import pytest
|
||||
import requests.api as requests
|
||||
import httpx._api as httpx
|
||||
from requests import Response as RequestsResponse
|
||||
from httpx import Request as HttpxRequest
|
||||
from yarl import URL
|
||||
|
||||
from typing import Literal
|
||||
from _pytest.monkeypatch import MonkeyPatch
|
||||
from json import dumps
|
||||
from httpx import Request as HttpxRequest
|
||||
from requests import Response as RequestsResponse
|
||||
from yarl import URL
|
||||
|
||||
MOCK = os.getenv('MOCK_SWITCH', 'false') == 'true'
|
||||
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
import pytest
|
||||
from core.app.entities.app_invoke_entities import InvokeFrom
|
||||
from os import getenv
|
||||
|
||||
import pytest
|
||||
|
||||
from core.app.entities.app_invoke_entities import InvokeFrom
|
||||
from core.workflow.entities.variable_pool import VariablePool
|
||||
from core.workflow.nodes.code.code_node import CodeNode
|
||||
from models.workflow import WorkflowNodeExecutionStatus
|
||||
from tests.integration_tests.workflow.nodes.__mock.code_executor import setup_code_executor_mock
|
||||
|
||||
from os import getenv
|
||||
|
||||
CODE_MAX_STRING_LENGTH = int(getenv('CODE_MAX_STRING_LENGTH', '10000'))
|
||||
|
||||
@pytest.mark.parametrize('setup_code_executor_mock', [['none']], indirect=True)
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
import pytest
|
||||
|
||||
from core.app.entities.app_invoke_entities import InvokeFrom
|
||||
from core.workflow.entities.variable_pool import VariablePool
|
||||
from core.workflow.nodes.http_request.http_request_node import HttpRequestNode
|
||||
|
||||
from tests.integration_tests.workflow.nodes.__mock.http import setup_http_mock
|
||||
|
||||
BASIC_NODE_DATA = {
|
||||
|
|
|
@ -4,8 +4,8 @@ from unittest.mock import MagicMock
|
|||
import pytest
|
||||
|
||||
from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
|
||||
from core.entities.provider_configuration import ProviderModelBundle, ProviderConfiguration
|
||||
from core.entities.provider_entities import SystemConfiguration, CustomConfiguration, CustomProviderConfiguration
|
||||
from core.entities.provider_configuration import ProviderConfiguration, ProviderModelBundle
|
||||
from core.entities.provider_entities import CustomConfiguration, CustomProviderConfiguration, SystemConfiguration
|
||||
from core.model_manager import ModelInstance
|
||||
from core.model_runtime.entities.model_entities import ModelType
|
||||
from core.model_runtime.model_providers import ModelProviderFactory
|
||||
|
|
|
@ -6,6 +6,7 @@ from core.workflow.nodes.template_transform.template_transform_node import Templ
|
|||
from models.workflow import WorkflowNodeExecutionStatus
|
||||
from tests.integration_tests.workflow.nodes.__mock.code_executor import setup_code_executor_mock
|
||||
|
||||
|
||||
@pytest.mark.parametrize('setup_code_executor_mock', [['none']], indirect=True)
|
||||
def test_execute_code(setup_code_executor_mock):
|
||||
code = '''{{args2}}'''
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
from core.app.entities.app_invoke_entities import InvokeFrom
|
||||
|
||||
from core.workflow.entities.variable_pool import VariablePool
|
||||
from core.workflow.nodes.tool.tool_node import ToolNode
|
||||
from models.workflow import WorkflowNodeExecutionStatus
|
||||
|
||||
|
||||
def test_tool_variable_invoke():
|
||||
pool = VariablePool(system_variables={}, user_inputs={})
|
||||
pool.append_variable(node_id='1', variable_key_list=['123', 'args1'], value='1+1')
|
||||
|
|
|
@ -2,12 +2,12 @@ from unittest.mock import MagicMock
|
|||
|
||||
import pytest
|
||||
|
||||
from core.app.app_config.entities import ModelConfigEntity, FileExtraConfig
|
||||
from core.file.file_obj import FileVar, FileType, FileTransferMethod
|
||||
from core.app.app_config.entities import FileExtraConfig, ModelConfigEntity
|
||||
from core.file.file_obj import FileTransferMethod, FileType, FileVar
|
||||
from core.memory.token_buffer_memory import TokenBufferMemory
|
||||
from core.model_runtime.entities.message_entities import UserPromptMessage, AssistantPromptMessage, PromptMessageRole
|
||||
from core.model_runtime.entities.message_entities import AssistantPromptMessage, PromptMessageRole, UserPromptMessage
|
||||
from core.prompt.advanced_prompt_transform import AdvancedPromptTransform
|
||||
from core.prompt.entities.advanced_prompt_entities import CompletionModelPromptTemplate, MemoryConfig, ChatModelMessage
|
||||
from core.prompt.entities.advanced_prompt_entities import ChatModelMessage, CompletionModelPromptTemplate, MemoryConfig
|
||||
from core.prompt.utils.prompt_template_parser import PromptTemplateParser
|
||||
from models.model import Conversation
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ from unittest.mock import MagicMock
|
|||
from core.app.app_config.entities import ModelConfigEntity
|
||||
from core.entities.provider_configuration import ProviderModelBundle
|
||||
from core.model_runtime.entities.message_entities import UserPromptMessage
|
||||
from core.model_runtime.entities.model_entities import ModelPropertyKey, AIModelEntity, ParameterRule
|
||||
from core.model_runtime.entities.model_entities import AIModelEntity, ModelPropertyKey, ParameterRule
|
||||
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
|
||||
from core.prompt.prompt_transform import PromptTransform
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@ from unittest.mock import MagicMock
|
|||
|
||||
from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity
|
||||
from core.memory.token_buffer_memory import TokenBufferMemory
|
||||
from core.model_runtime.entities.message_entities import UserPromptMessage, AssistantPromptMessage
|
||||
from core.model_runtime.entities.message_entities import AssistantPromptMessage, UserPromptMessage
|
||||
from core.prompt.simple_prompt_transform import SimplePromptTransform
|
||||
from models.model import AppMode, Conversation
|
||||
|
||||
|
|
|
@ -4,9 +4,17 @@ from unittest.mock import MagicMock
|
|||
|
||||
import pytest
|
||||
|
||||
from core.app.app_config.entities import VariableEntity, ExternalDataVariableEntity, DatasetEntity, \
|
||||
DatasetRetrieveConfigEntity, ModelConfigEntity, PromptTemplateEntity, AdvancedChatPromptTemplateEntity, \
|
||||
AdvancedChatMessageEntity, AdvancedCompletionPromptTemplateEntity
|
||||
from core.app.app_config.entities import (
|
||||
AdvancedChatMessageEntity,
|
||||
AdvancedChatPromptTemplateEntity,
|
||||
AdvancedCompletionPromptTemplateEntity,
|
||||
DatasetEntity,
|
||||
DatasetRetrieveConfigEntity,
|
||||
ExternalDataVariableEntity,
|
||||
ModelConfigEntity,
|
||||
PromptTemplateEntity,
|
||||
VariableEntity,
|
||||
)
|
||||
from core.helper import encrypter
|
||||
from core.model_runtime.entities.llm_entities import LLMMode
|
||||
from core.model_runtime.entities.message_entities import PromptMessageRole
|
||||
|
|
Loading…
Reference in New Issue
Block a user