diff --git a/api/core/model_runtime/docs/zh_Hans/customizable_model_scale_out.md b/api/core/model_runtime/docs/zh_Hans/customizable_model_scale_out.md index 1a138861c1..97b3720e8c 100644 --- a/api/core/model_runtime/docs/zh_Hans/customizable_model_scale_out.md +++ b/api/core/model_runtime/docs/zh_Hans/customizable_model_scale_out.md @@ -260,7 +260,7 @@ provider_credential_schema: fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, model_type=model_type, model_properties={ - 'mode': ModelType.LLM, + ModelPropertyKey.MODE: ModelType.LLM, }, parameter_rules=rules ) diff --git a/api/core/model_runtime/model_providers/azure_openai/_constant.py b/api/core/model_runtime/model_providers/azure_openai/_constant.py index 3ffcd96769..f9d94c9612 100644 --- a/api/core/model_runtime/model_providers/azure_openai/_constant.py +++ b/api/core/model_runtime/model_providers/azure_openai/_constant.py @@ -40,8 +40,8 @@ LLM_BASE_MODELS = [ ], fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, model_properties={ - 'mode': LLMMode.CHAT.value, - 'context_size': 4096, + ModelPropertyKey.MODE: LLMMode.CHAT.value, + ModelPropertyKey.CONTEXT_SIZE: 4096, }, parameter_rules=[ ParameterRule( @@ -84,8 +84,8 @@ LLM_BASE_MODELS = [ ], fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, model_properties={ - 'mode': LLMMode.CHAT.value, - 'context_size': 16385, + ModelPropertyKey.MODE: LLMMode.CHAT.value, + ModelPropertyKey.CONTEXT_SIZE: 16385, }, parameter_rules=[ ParameterRule( @@ -128,8 +128,8 @@ LLM_BASE_MODELS = [ ], fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, model_properties={ - 'mode': LLMMode.CHAT.value, - 'context_size': 8192, + ModelPropertyKey.MODE: LLMMode.CHAT.value, + ModelPropertyKey.CONTEXT_SIZE: 8192, }, parameter_rules=[ ParameterRule( @@ -202,8 +202,8 @@ LLM_BASE_MODELS = [ ], fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, model_properties={ - 'mode': LLMMode.CHAT.value, - 'context_size': 32768, + ModelPropertyKey.MODE: LLMMode.CHAT.value, + ModelPropertyKey.CONTEXT_SIZE: 32768, }, parameter_rules=[ ParameterRule( @@ -276,8 +276,8 @@ LLM_BASE_MODELS = [ ], fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, model_properties={ - 'mode': LLMMode.CHAT.value, - 'context_size': 128000, + ModelPropertyKey.MODE: LLMMode.CHAT.value, + ModelPropertyKey.CONTEXT_SIZE: 128000, }, parameter_rules=[ ParameterRule( @@ -349,8 +349,8 @@ LLM_BASE_MODELS = [ ], fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, model_properties={ - 'mode': LLMMode.CHAT.value, - 'context_size': 128000, + ModelPropertyKey.MODE: LLMMode.CHAT.value, + ModelPropertyKey.CONTEXT_SIZE: 128000, }, parameter_rules=[ ParameterRule( @@ -419,8 +419,8 @@ LLM_BASE_MODELS = [ model_type=ModelType.LLM, fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, model_properties={ - 'mode': LLMMode.COMPLETION.value, - 'context_size': 4096, + ModelPropertyKey.MODE: LLMMode.COMPLETION.value, + ModelPropertyKey.CONTEXT_SIZE: 4096, }, parameter_rules=[ ParameterRule( @@ -459,8 +459,8 @@ LLM_BASE_MODELS = [ model_type=ModelType.LLM, fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, model_properties={ - 'mode': LLMMode.COMPLETION.value, - 'context_size': 4096, + ModelPropertyKey.MODE: LLMMode.COMPLETION.value, + ModelPropertyKey.CONTEXT_SIZE: 4096, }, parameter_rules=[ ParameterRule( diff --git a/api/core/model_runtime/model_providers/huggingface_hub/llm/llm.py b/api/core/model_runtime/model_providers/huggingface_hub/llm/llm.py index 33df2ec340..8f4b9d903c 100644 --- a/api/core/model_runtime/model_providers/huggingface_hub/llm/llm.py +++ b/api/core/model_runtime/model_providers/huggingface_hub/llm/llm.py @@ -10,7 +10,7 @@ from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, from core.model_runtime.entities.message_entities import PromptMessage, PromptMessageTool, AssistantPromptMessage, \ UserPromptMessage, SystemPromptMessage from core.model_runtime.entities.model_entities import ParameterRule, DefaultParameterName, AIModelEntity, ModelType, \ - FetchFrom + FetchFrom, ModelPropertyKey from core.model_runtime.errors.validate import CredentialsValidateFailedError from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel from core.model_runtime.model_providers.huggingface_hub._common import _CommonHuggingfaceHub @@ -97,7 +97,7 @@ class HuggingfaceHubLargeLanguageModel(_CommonHuggingfaceHub, LargeLanguageModel fetch_from=FetchFrom.CUSTOMIZABLE_MODEL, model_type=ModelType.LLM, model_properties={ - 'mode': LLMMode.COMPLETION.value + ModelPropertyKey.MODE: LLMMode.COMPLETION.value }, parameter_rules=self._get_customizable_model_parameter_rules() ) diff --git a/api/requirements.txt b/api/requirements.txt index faf8ea06a7..0780ef3644 100644 --- a/api/requirements.txt +++ b/api/requirements.txt @@ -57,8 +57,8 @@ qdrant-client==1.6.4 cohere~=4.32 pyyaml~=6.0.1 numpy~=1.25.2 -unstructured~=0.10.27 unstructured[docx,pptx,msg,md,ppt]~=0.10.27 bs4~=0.0.1 markdown~=3.5.1 -google-generativeai~=0.3.2 \ No newline at end of file +google-generativeai~=0.3.2 +httpx[socks]~=0.24.1 \ No newline at end of file