mirror of
https://github.com/RockChinQ/QChatGPT.git
synced 2024-11-16 11:42:44 +08:00
chore: 更改 provider.json 格式
This commit is contained in:
parent
82763f8ec5
commit
867093cc88
47
pkg/config/migrations/m2_openai_config_migration.py
Normal file
47
pkg/config/migrations/m2_openai_config_migration.py
Normal file
|
@ -0,0 +1,47 @@
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from .. import migration
|
||||||
|
|
||||||
|
|
||||||
|
@migration.migration_class("openai-config-migration", 2)
|
||||||
|
class OpenAIConfigMigration(migration.Migration):
|
||||||
|
"""OpenAI配置迁移
|
||||||
|
"""
|
||||||
|
|
||||||
|
async def need_migrate(self) -> bool:
|
||||||
|
"""判断当前环境是否需要运行此迁移
|
||||||
|
"""
|
||||||
|
return 'openai-config' in self.ap.provider_cfg.data
|
||||||
|
|
||||||
|
async def run(self):
|
||||||
|
"""执行迁移
|
||||||
|
"""
|
||||||
|
old_openai_config = self.ap.provider_cfg.data['openai-config'].copy()
|
||||||
|
|
||||||
|
if 'keys' not in self.ap.provider_cfg.data:
|
||||||
|
self.ap.provider_cfg.data['keys'] = {}
|
||||||
|
|
||||||
|
if 'openai' not in self.ap.provider_cfg.data['keys']:
|
||||||
|
self.ap.provider_cfg.data['keys']['openai'] = []
|
||||||
|
|
||||||
|
self.ap.provider_cfg.data['keys']['openai'] = old_openai_config['api-keys']
|
||||||
|
|
||||||
|
self.ap.provider_cfg.data['model'] = old_openai_config['chat-completions-params']['model']
|
||||||
|
|
||||||
|
del old_openai_config['chat-completions-params']['model']
|
||||||
|
|
||||||
|
if 'requester' not in self.ap.provider_cfg.data:
|
||||||
|
self.ap.provider_cfg.data['requester'] = {}
|
||||||
|
|
||||||
|
if 'openai-chat-completions' not in self.ap.provider_cfg.data['requester']:
|
||||||
|
self.ap.provider_cfg.data['requester']['openai-chat-completions'] = {}
|
||||||
|
|
||||||
|
self.ap.provider_cfg.data['requester']['openai-chat-completions'] = {
|
||||||
|
'base-url': old_openai_config['base_url'],
|
||||||
|
'args': old_openai_config['chat-completions-params'],
|
||||||
|
'timeout': old_openai_config['request-timeout'],
|
||||||
|
}
|
||||||
|
|
||||||
|
del self.ap.provider_cfg.data['openai-config']
|
||||||
|
|
||||||
|
await self.ap.provider_cfg.dump_config()
|
|
@ -4,7 +4,7 @@ import importlib
|
||||||
|
|
||||||
from .. import stage, app
|
from .. import stage, app
|
||||||
from ...config import migration
|
from ...config import migration
|
||||||
from ...config.migrations import m1_sensitive_word_migration
|
from ...config.migrations import m1_sensitive_word_migration, m2_openai_config_migration
|
||||||
|
|
||||||
|
|
||||||
@stage.stage_class("MigrationStage")
|
@stage.stage_class("MigrationStage")
|
||||||
|
|
|
@ -17,8 +17,8 @@ from ... import entities as llm_entities
|
||||||
from ...tools import entities as tools_entities
|
from ...tools import entities as tools_entities
|
||||||
|
|
||||||
|
|
||||||
@api.requester_class("openai-chat-completion")
|
@api.requester_class("openai-chat-completions")
|
||||||
class OpenAIChatCompletion(api.LLMAPIRequester):
|
class OpenAIChatCompletions(api.LLMAPIRequester):
|
||||||
"""OpenAI ChatCompletion API 请求器"""
|
"""OpenAI ChatCompletion API 请求器"""
|
||||||
|
|
||||||
client: openai.AsyncClient
|
client: openai.AsyncClient
|
||||||
|
@ -26,8 +26,8 @@ class OpenAIChatCompletion(api.LLMAPIRequester):
|
||||||
async def initialize(self):
|
async def initialize(self):
|
||||||
self.client = openai.AsyncClient(
|
self.client = openai.AsyncClient(
|
||||||
api_key="",
|
api_key="",
|
||||||
base_url=self.ap.provider_cfg.data['openai-config']['base_url'],
|
base_url=self.ap.provider_cfg.data['requester']['openai-chat-completions']['base-url'],
|
||||||
timeout=self.ap.provider_cfg.data['openai-config']['request-timeout'],
|
timeout=self.ap.provider_cfg.data['requester']['openai-chat-completions']['timeout'],
|
||||||
http_client=httpx.AsyncClient(
|
http_client=httpx.AsyncClient(
|
||||||
proxies=self.ap.proxy_mgr.get_forward_proxies()
|
proxies=self.ap.proxy_mgr.get_forward_proxies()
|
||||||
)
|
)
|
||||||
|
@ -58,7 +58,7 @@ class OpenAIChatCompletion(api.LLMAPIRequester):
|
||||||
) -> llm_entities.Message:
|
) -> llm_entities.Message:
|
||||||
self.client.api_key = use_model.token_mgr.get_token()
|
self.client.api_key = use_model.token_mgr.get_token()
|
||||||
|
|
||||||
args = self.ap.provider_cfg.data['openai-config']['chat-completions-params'].copy()
|
args = self.ap.provider_cfg.data['requester']['openai-chat-completions']['args'].copy()
|
||||||
args["model"] = use_model.name if use_model.model_name is None else use_model.model_name
|
args["model"] = use_model.name if use_model.model_name is None else use_model.model_name
|
||||||
|
|
||||||
if use_model.tool_call_supported:
|
if use_model.tool_call_supported:
|
||||||
|
|
|
@ -39,9 +39,8 @@ class ModelManager:
|
||||||
async def initialize(self):
|
async def initialize(self):
|
||||||
|
|
||||||
# 初始化token_mgr, requester
|
# 初始化token_mgr, requester
|
||||||
self.token_mgrs = {
|
for k, v in self.ap.provider_cfg.data['keys'].items():
|
||||||
"openai": token.TokenManager("openai", list(self.ap.provider_cfg.data['openai-config']['api-keys']))
|
self.token_mgrs[k] = token.TokenManager(k, v)
|
||||||
}
|
|
||||||
|
|
||||||
for api_cls in api.preregistered_requesters:
|
for api_cls in api.preregistered_requesters:
|
||||||
api_inst = api_cls(self.ap)
|
api_inst = api_cls(self.ap)
|
||||||
|
|
|
@ -50,7 +50,7 @@ class SessionManager:
|
||||||
conversation = core_entities.Conversation(
|
conversation = core_entities.Conversation(
|
||||||
prompt=await self.ap.prompt_mgr.get_prompt(session.use_prompt_name),
|
prompt=await self.ap.prompt_mgr.get_prompt(session.use_prompt_name),
|
||||||
messages=[],
|
messages=[],
|
||||||
use_model=await self.ap.model_mgr.get_model_by_name(self.ap.provider_cfg.data['openai-config']['chat-completions-params']['model']),
|
use_model=await self.ap.model_mgr.get_model_by_name(self.ap.provider_cfg.data['model']),
|
||||||
use_funcs=await self.ap.tool_mgr.get_all_functions(),
|
use_funcs=await self.ap.tool_mgr.get_all_functions(),
|
||||||
)
|
)
|
||||||
session.conversations.append(conversation)
|
session.conversations.append(conversation)
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
"list": [
|
"list": [
|
||||||
{
|
{
|
||||||
"name": "default",
|
"name": "default",
|
||||||
"requester": "openai-chat-completion",
|
"requester": "openai-chat-completions",
|
||||||
"token_mgr": "openai",
|
"token_mgr": "openai",
|
||||||
"tool_call_supported": false
|
"tool_call_supported": false
|
||||||
},
|
},
|
||||||
|
|
|
@ -1,15 +1,18 @@
|
||||||
{
|
{
|
||||||
"enable-chat": true,
|
"enable-chat": true,
|
||||||
"openai-config": {
|
"keys": {
|
||||||
"api-keys": [
|
"openai": [
|
||||||
"sk-1234567890"
|
"sk-1234567890"
|
||||||
],
|
]
|
||||||
"base_url": "https://api.openai.com/v1",
|
|
||||||
"chat-completions-params": {
|
|
||||||
"model": "gpt-3.5-turbo"
|
|
||||||
},
|
},
|
||||||
"request-timeout": 120
|
"requester": {
|
||||||
|
"openai-chat-completions": {
|
||||||
|
"base-url": "https://api.openai.com/v1",
|
||||||
|
"args": {},
|
||||||
|
"timeout": 120
|
||||||
|
}
|
||||||
},
|
},
|
||||||
|
"model": "gpt-3.5-turbo",
|
||||||
"prompt-mode": "normal",
|
"prompt-mode": "normal",
|
||||||
"prompt": {
|
"prompt": {
|
||||||
"default": ""
|
"default": ""
|
||||||
|
|
Loading…
Reference in New Issue
Block a user