From caa72fa40c9dc1472757c9d8538d36446a22e186 Mon Sep 17 00:00:00 2001 From: Rock Chin <1010553892@qq.com> Date: Thu, 27 Jul 2023 14:27:36 +0800 Subject: [PATCH 01/16] =?UTF-8?q?feat:=20=E5=9C=A8=E6=8F=92=E4=BB=B6?= =?UTF-8?q?=E5=B1=82=E9=9D=A2=E5=88=9D=E6=AD=A5=E6=94=AF=E6=8C=81=E5=86=85?= =?UTF-8?q?=E5=AE=B9=E5=87=BD=E6=95=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- main.py | 2 +- pkg/plugin/host.py | 7 +++++++ pkg/plugin/models.py | 42 +++++++++++++++++++++++++++++++++--------- requirements.txt | 3 ++- 4 files changed, 43 insertions(+), 11 deletions(-) diff --git a/main.py b/main.py index 218021c..2f18421 100644 --- a/main.py +++ b/main.py @@ -47,7 +47,7 @@ def init_db(): def ensure_dependencies(): import pkg.utils.pkgmgr as pkgmgr - pkgmgr.run_pip(["install", "openai", "Pillow", "nakuru-project-idk", "--upgrade", + pkgmgr.run_pip(["install", "openai", "Pillow", "nakuru-project-idk", "CallingGPT", "--upgrade", "-i", "https://pypi.tuna.tsinghua.edu.cn/simple", "--trusted-host", "pypi.tuna.tsinghua.edu.cn"]) diff --git a/pkg/plugin/host.py b/pkg/plugin/host.py index 4249c37..7343fcf 100644 --- a/pkg/plugin/host.py +++ b/pkg/plugin/host.py @@ -16,6 +16,8 @@ import pkg.qqbot.adapter as msadapter from mirai import Mirai +from CallingGPT.session.session import Session + __plugins__ = {} """插件列表 @@ -42,6 +44,9 @@ __plugins__ = {} __plugins_order__ = [] """插件顺序""" +__callable_functions__ = [] +"""供GPT调用的函数""" + def generate_plugin_order(): """根据__plugin__生成插件初始顺序,无视是否启用""" @@ -300,7 +305,9 @@ class PluginHost: """插件宿主""" def __init__(self): + """初始化插件宿主""" context.set_plugin_host(self) + self.calling_gpt_session = Session([]) def get_runtime_context(self) -> context: """获取运行时上下文(pkg.utils.context模块的对象) diff --git a/pkg/plugin/models.py b/pkg/plugin/models.py index 180f074..3f5f9ec 100644 --- a/pkg/plugin/models.py +++ b/pkg/plugin/models.py @@ -132,6 +132,13 @@ KeySwitched = "key_switched" key_list: list[str] api-key列表 """ +ContentFunction = "content_function" +"""声明此函数为一个内容函数,在对话中将发送此函数给GPT以供其调用 + 此函数可以具有任意的参数,但必须按照[此文档](https://github.com/RockChinQ/CallingGPT/wiki/1.-Function-Format#function-format) + 所述的格式编写函数的docstring。 + 此功能仅支持在使用gpt-3.5或gpt-4系列模型时使用。 +""" + def on(event: str): """注册事件监听器 @@ -161,20 +168,37 @@ class Plugin: """ global __current_registering_plugin__ - def wrapper(func): - plugin_hooks = host.__plugins__[__current_registering_plugin__]["hooks"] + if event != ContentFunction: + def wrapper(func): + plugin_hooks = host.__plugins__[__current_registering_plugin__]["hooks"] - if event not in plugin_hooks: - plugin_hooks[event] = [] - plugin_hooks[event].append(func) + if event not in plugin_hooks: + plugin_hooks[event] = [] + plugin_hooks[event].append(func) - # print("registering hook: p='{}', e='{}', f={}".format(__current_registering_plugin__, event, func)) + # print("registering hook: p='{}', e='{}', f={}".format(__current_registering_plugin__, event, func)) - host.__plugins__[__current_registering_plugin__]["hooks"] = plugin_hooks + host.__plugins__[__current_registering_plugin__]["hooks"] = plugin_hooks - return func + return func - return wrapper + return wrapper + else: + from CallingGPT.entities.namespace import get_func_schema + + def wrapper(func): + + function_schema = get_func_schema(func) + + # logging.debug("registering content function: p='{}', f='{}', s={}".format(__current_registering_plugin__, func, function_schema)) + + host.__callable_functions__.append( + function_schema + ) + + return func + + return wrapper def register(name: str, description: str, version: str, author: str): diff --git a/requirements.txt b/requirements.txt index 64160e9..60a072f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,4 +7,5 @@ websockets urllib3~=1.26.10 func_timeout~=4.3.5 Pillow -nakuru-project-idk \ No newline at end of file +nakuru-project-idk +CallingGPT \ No newline at end of file From ae6994e24107303818b651afc23b85b2a4925ba6 Mon Sep 17 00:00:00 2001 From: RockChinQ <1010553892@qq.com> Date: Fri, 28 Jul 2023 19:03:02 +0800 Subject: [PATCH 02/16] =?UTF-8?q?feat(contentPlugin):=20=E5=AE=8C=E6=88=90?= =?UTF-8?q?=E5=9F=BA=E6=9C=AC=E7=9A=84=E5=86=85=E5=AE=B9=E5=87=BD=E6=95=B0?= =?UTF-8?q?=E8=B0=83=E7=94=A8=E5=8A=9F=E8=83=BD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pkg/openai/api/__init__.py | 0 pkg/openai/api/chat_completion.py | 182 +++++++++++++++++++++ pkg/openai/api/completion.py | 111 +++++++++++++ pkg/openai/api/model.py | 42 +++++ pkg/openai/funcmgr.py | 37 +++++ pkg/openai/manager.py | 83 ++++++---- pkg/openai/modelmgr.py | 257 ++++++++++++++++-------------- pkg/openai/sess.py | 79 +++++++++ pkg/openai/session.py | 61 ++++++- pkg/plugin/host.py | 9 +- pkg/plugin/models.py | 5 + 11 files changed, 700 insertions(+), 166 deletions(-) create mode 100644 pkg/openai/api/__init__.py create mode 100644 pkg/openai/api/chat_completion.py create mode 100644 pkg/openai/api/completion.py create mode 100644 pkg/openai/api/model.py create mode 100644 pkg/openai/funcmgr.py create mode 100644 pkg/openai/sess.py diff --git a/pkg/openai/api/__init__.py b/pkg/openai/api/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/pkg/openai/api/chat_completion.py b/pkg/openai/api/chat_completion.py new file mode 100644 index 0000000..836cac5 --- /dev/null +++ b/pkg/openai/api/chat_completion.py @@ -0,0 +1,182 @@ +import openai +import json + +from .model import RequestBase + +from ..funcmgr import get_func_schema_list, execute_function, get_func, get_func_schema, ContentFunctionNotFoundError + + +class ChatCompletionRequest(RequestBase): + """调用ChatCompletion接口的请求类。 + + 此类保证每一次返回的角色为assistant的信息的finish_reason一定为stop。 + 若有函数调用响应,本类的返回瀑布是:函数调用请求->函数调用结果->...->assistant的信息->stop。 + """ + model: str + messages: list[dict[str, str]] + kwargs: dict + + stopped: bool = False + + pending_func_call: dict = None + + pending_msg: str + + def flush_pending_msg(self): + self.append_message( + role="assistant", + content=self.pending_msg + ) + self.pending_msg = "" + + def append_message(self, role: str, content: str, name: str=None): + msg = { + "role": role, + "content": content + } + + if name is not None: + msg['name'] = name + + self.messages.append(msg) + + def __init__( + self, + model: str, + messages: list[dict[str, str]], + **kwargs + ): + self.model = model + self.messages = messages.copy() + + self.kwargs = kwargs + + self.req_func = openai.ChatCompletion.acreate + + self.pending_func_call = None + + self.stopped = False + + self.pending_msg = "" + + def __iter__(self): + return self + + def __next__(self) -> dict: + if self.stopped: + raise StopIteration() + + if self.pending_func_call is None: # 没有待处理的函数调用请求 + + resp = self._req( + model=self.model, + messages=self.messages, + functions=get_func_schema_list(), + **self.kwargs + ) + + choice0 = resp["choices"][0] + + # 如果不是函数调用,且finish_reason为stop,则停止迭代 + if 'function_call' not in choice0['message'] and choice0["finish_reason"] == "stop": + self.stopped = True + + if 'function_call' in choice0['message']: + self.pending_func_call = choice0['message']['function_call'] + + self.append_message( + role="assistant", + content="function call: "+json.dumps(self.pending_func_call) + ) + + return { + "id": resp["id"], + "choices": [ + { + "index": choice0["index"], + "message": { + "role": "assistant", + "type": "function_call", + "content": None, + "function_call": choice0['message']['function_call'] + }, + "finish_reason": "function_call" + } + ], + "usage": resp["usage"] + } + else: + + # self.pending_msg += choice0['message']['content'] + # 普通回复一定处于最后方,故不用再追加进内部messages + + return { + "id": resp["id"], + "choices": [ + { + "index": choice0["index"], + "message": { + "role": "assistant", + "type": "text", + "content": choice0['message']['content'] + }, + "finish_reason": "stop" + } + ], + "usage": resp["usage"] + } + else: # 处理函数调用请求 + + cp_pending_func_call = self.pending_func_call.copy() + + self.pending_func_call = None + + func_name = cp_pending_func_call['name'] + arguments = {} + + try: + + try: + arguments = json.loads(cp_pending_func_call['arguments']) + # 若不是json格式的异常处理 + except json.decoder.JSONDecodeError: + # 获取函数的参数列表 + func_schema = get_func_schema(func_name) + + arguments = { + func_schema['parameters']['required'][0]: cp_pending_func_call['arguments'] + } + + # 执行函数调用 + ret = execute_function(func_name, arguments) + + self.append_message( + role="function", + content=json.dumps(ret), + name=func_name + ) + + return { + "id": -1, + "choices": [ + { + "index": -1, + "message": { + "role": "function", + "type": "function_return", + "function_name": func_name, + "content": json.dumps(ret) + }, + "finish_reason": "function_return" + } + ], + "usage": { + "prompt_tokens": 0, + "completion_tokens": 0, + "total_tokens": 0 + } + } + + except ContentFunctionNotFoundError: + raise Exception("没有找到函数: {}".format(func_name)) + diff --git a/pkg/openai/api/completion.py b/pkg/openai/api/completion.py new file mode 100644 index 0000000..ee0d34e --- /dev/null +++ b/pkg/openai/api/completion.py @@ -0,0 +1,111 @@ +import openai + +from .model import RequestBase + + +class CompletionRequest(RequestBase): + """调用Completion接口的请求类。 + + 调用方可以一直next completion直到finish_reason为stop。 + """ + + model: str + prompt: str + kwargs: dict + + stopped: bool = False + + def __init__( + self, + model: str, + messages: list[dict[str, str]], + **kwargs + ): + self.model = model + self.prompt = "" + + for message in messages: + self.prompt += message["role"] + ": " + message["content"] + "\n" + + self.prompt += "assistant: " + + self.kwargs = kwargs + + self.req_func = openai.Completion.acreate + + def __iter__(self): + return self + + def __next__(self) -> dict: + """调用Completion接口,返回生成的文本 + + { + "id": "id", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "type": "text", + "content": "message" + }, + "finish_reason": "reason" + } + ], + "usage": { + "prompt_tokens": 10, + "completion_tokens": 20, + "total_tokens": 30 + } + } + """ + + if self.stopped: + raise StopIteration() + + resp = self._req( + model=self.model, + prompt=self.prompt, + **self.kwargs + ) + + if resp["choices"][0]["finish_reason"] == "stop": + self.stopped = True + + choice0 = resp["choices"][0] + + self.prompt += choice0["text"] + + return { + "id": resp["id"], + "choices": [ + { + "index": choice0["index"], + "message": { + "role": "assistant", + "type": "text", + "content": choice0["text"] + }, + "finish_reason": choice0["finish_reason"] + } + ], + "usage": resp["usage"] + } + +if __name__ == "__main__": + import os + + openai.api_key = os.environ["OPENAI_API_KEY"] + + for resp in CompletionRequest( + model="text-davinci-003", + messages=[ + { + "role": "user", + "content": "Hello, who are you?" + } + ] + ): + print(resp) + if resp["choices"][0]["finish_reason"] == "stop": + break diff --git a/pkg/openai/api/model.py b/pkg/openai/api/model.py new file mode 100644 index 0000000..bf5afab --- /dev/null +++ b/pkg/openai/api/model.py @@ -0,0 +1,42 @@ +# 定义不同接口请求的模型 +import threading +import asyncio + +import openai + + +class RequestBase: + + req_func: callable + + def __init__(self, *args, **kwargs): + raise NotImplementedError + + def _req(self, **kwargs): + """处理代理问题""" + + ret: dict = {} + + async def awrapper(**kwargs): + nonlocal ret + + ret = await self.req_func(**kwargs) + return ret + + loop = asyncio.new_event_loop() + + thr = threading.Thread( + target=loop.run_until_complete, + args=(awrapper(**kwargs),) + ) + + thr.start() + thr.join() + + return ret + + def __iter__(self): + raise self + + def __next__(self): + raise NotImplementedError diff --git a/pkg/openai/funcmgr.py b/pkg/openai/funcmgr.py new file mode 100644 index 0000000..c3cfcb9 --- /dev/null +++ b/pkg/openai/funcmgr.py @@ -0,0 +1,37 @@ +# 封装了function calling的一些支持函数 +import logging + + +from pkg.plugin.host import __callable_functions__, __function_inst_map__ + + +class ContentFunctionNotFoundError(Exception): + pass + + +def get_func_schema_list() -> list: + """从plugin包中的函数结构中获取并处理成受GPT支持的格式""" + + schemas = __callable_functions__ + + return schemas + +def get_func(name: str) -> callable: + if name not in __function_inst_map__: + raise ContentFunctionNotFoundError("没有找到内容函数: {}".format(name)) + + return __function_inst_map__[name] + +def get_func_schema(name: str) -> dict: + for func in __callable_functions__: + if func['name'] == name: + return func + raise ContentFunctionNotFoundError("没有找到内容函数: {}".format(name)) + +def execute_function(name: str, kwargs: dict) -> any: + """执行函数调用""" + + logging.debug("executing function: name='{}', kwargs={}".format(name, kwargs)) + + func = get_func(name) + return func(**kwargs) diff --git a/pkg/openai/manager.py b/pkg/openai/manager.py index 8a368c3..ebd94b6 100644 --- a/pkg/openai/manager.py +++ b/pkg/openai/manager.py @@ -5,7 +5,9 @@ import openai import pkg.openai.keymgr import pkg.utils.context import pkg.audit.gatherer -from pkg.openai.modelmgr import ModelRequest, create_openai_model_request +from pkg.openai.modelmgr import select_request_cls + +from pkg.openai.api.model import RequestBase class OpenAIInteract: @@ -33,45 +35,58 @@ class OpenAIInteract: pkg.utils.context.set_openai_manager(self) - # 请求OpenAI Completion - def request_completion(self, prompts) -> tuple[str, int]: - """请求补全接口回复 - - Parameters: - prompts (str): 提示语 - - Returns: - str: 回复 + def request_completion(self, messages: list): + """请求补全接口回复= """ - + # 选择接口请求类 config = pkg.utils.context.get_config() - # 根据模型选择使用的接口 - ai: ModelRequest = create_openai_model_request( - config.completion_api_params['model'], - 'user', - config.openai_config["http_proxy"] if "http_proxy" in config.openai_config else None - ) - ai.request( - prompts, - **config.completion_api_params - ) - response = ai.get_response() + request: RequestBase - logging.debug("OpenAI response: %s", response) + model: str = config.completion_api_params['model'] - # 记录使用量 - current_round_token = 0 - if 'model' in config.completion_api_params: - self.audit_mgr.report_text_model_usage(config.completion_api_params['model'], - ai.get_total_tokens()) - current_round_token = ai.get_total_tokens() - elif 'engine' in config.completion_api_params: - self.audit_mgr.report_text_model_usage(config.completion_api_params['engine'], - response['usage']['total_tokens']) - current_round_token = response['usage']['total_tokens'] + cp_parmas = config.completion_api_params.copy() + del cp_parmas['model'] - return ai.get_message(), current_round_token + request = select_request_cls(model, messages, cp_parmas) + + # 请求接口 + for resp in request: + yield resp + + # 请求OpenAI Completion + # def request_completion(self, prompts): + # """请求补全接口回复 + # """ + + # config = pkg.utils.context.get_config() + + # # 根据模型选择使用的接口 + # ai: ModelRequest = create_openai_model_request( + # config.completion_api_params['model'], + # 'user', + # config.openai_config["http_proxy"] if "http_proxy" in config.openai_config else None + # ) + # ai.request( + # prompts, + # **config.completion_api_params + # ) + # response = ai.get_response() + + # logging.debug("OpenAI response: %s", response) + + # # 记录使用量 + # current_round_token = 0 + # if 'model' in config.completion_api_params: + # self.audit_mgr.report_text_model_usage(config.completion_api_params['model'], + # ai.get_total_tokens()) + # current_round_token = ai.get_total_tokens() + # elif 'engine' in config.completion_api_params: + # self.audit_mgr.report_text_model_usage(config.completion_api_params['engine'], + # response['usage']['total_tokens']) + # current_round_token = response['usage']['total_tokens'] + + # return ai.get_message(), current_round_token def request_image(self, prompt) -> dict: """请求图片接口回复 diff --git a/pkg/openai/modelmgr.py b/pkg/openai/modelmgr.py index f16105c..a73c869 100644 --- a/pkg/openai/modelmgr.py +++ b/pkg/openai/modelmgr.py @@ -8,6 +8,10 @@ Completion - text-davinci-003 等模型 import openai, logging, threading, asyncio import openai.error as aiE +from pkg.openai.api.model import RequestBase +from pkg.openai.api.completion import CompletionRequest +from pkg.openai.api.chat_completion import ChatCompletionRequest + COMPLETION_MODELS = { 'text-davinci-003', 'text-davinci-002', @@ -39,153 +43,160 @@ IMAGE_MODELS = { } -class ModelRequest: - """模型接口请求父类""" +# class ModelRequest: +# """模型接口请求父类""" - can_chat = False - runtime: threading.Thread = None - ret = {} - proxy: str = None - request_ready = True - error_info: str = "若在没有任何错误的情况下看到这句话,请带着配置文件上报Issues" +# can_chat = False +# runtime: threading.Thread = None +# ret = {} +# proxy: str = None +# request_ready = True +# error_info: str = "若在没有任何错误的情况下看到这句话,请带着配置文件上报Issues" - def __init__(self, model_name, user_name, request_fun, http_proxy:str = None, time_out = None): - self.model_name = model_name - self.user_name = user_name - self.request_fun = request_fun - self.time_out = time_out - if http_proxy != None: - self.proxy = http_proxy - openai.proxy = self.proxy - self.request_ready = False +# def __init__(self, model_name, user_name, request_fun, http_proxy:str = None, time_out = None): +# self.model_name = model_name +# self.user_name = user_name +# self.request_fun = request_fun +# self.time_out = time_out +# if http_proxy != None: +# self.proxy = http_proxy +# openai.proxy = self.proxy +# self.request_ready = False - async def __a_request__(self, **kwargs): - """异步请求""" +# async def __a_request__(self, **kwargs): +# """异步请求""" - try: - self.ret: dict = await self.request_fun(**kwargs) - self.request_ready = True - except aiE.APIConnectionError as e: - self.error_info = "{}\n请检查网络连接或代理是否正常".format(e) - raise ConnectionError(self.error_info) - except ValueError as e: - self.error_info = "{}\n该错误可能是由于http_proxy格式设置错误引起的" - except Exception as e: - self.error_info = "{}\n由于请求异常产生的未知错误,请查看日志".format(e) - raise type(e)(self.error_info) +# try: +# self.ret: dict = await self.request_fun(**kwargs) +# self.request_ready = True +# except aiE.APIConnectionError as e: +# self.error_info = "{}\n请检查网络连接或代理是否正常".format(e) +# raise ConnectionError(self.error_info) +# except ValueError as e: +# self.error_info = "{}\n该错误可能是由于http_proxy格式设置错误引起的" +# except Exception as e: +# self.error_info = "{}\n由于请求异常产生的未知错误,请查看日志".format(e) +# raise type(e)(self.error_info) - def request(self, **kwargs): - """向接口发起请求""" +# def request(self, **kwargs): +# """向接口发起请求""" - if self.proxy != None: #异步请求 - self.request_ready = False - loop = asyncio.new_event_loop() - self.runtime = threading.Thread( - target=loop.run_until_complete, - args=(self.__a_request__(**kwargs),) - ) - self.runtime.start() - else: #同步请求 - self.ret = self.request_fun(**kwargs) +# if self.proxy != None: #异步请求 +# self.request_ready = False +# loop = asyncio.new_event_loop() +# self.runtime = threading.Thread( +# target=loop.run_until_complete, +# args=(self.__a_request__(**kwargs),) +# ) +# self.runtime.start() +# else: #同步请求 +# self.ret = self.request_fun(**kwargs) - def __msg_handle__(self, msg): - """将prompt dict转换成接口需要的格式""" - return msg +# def __msg_handle__(self, msg): +# """将prompt dict转换成接口需要的格式""" +# return msg - def ret_handle(self): - ''' - API消息返回处理函数 - 若重写该方法,应检查异步线程状态,或在需要检查处super该方法 - ''' - if self.runtime != None and isinstance(self.runtime, threading.Thread): - self.runtime.join(self.time_out) - if self.request_ready: - return - raise Exception(self.error_info) +# def ret_handle(self): +# ''' +# API消息返回处理函数 +# 若重写该方法,应检查异步线程状态,或在需要检查处super该方法 +# ''' +# if self.runtime != None and isinstance(self.runtime, threading.Thread): +# self.runtime.join(self.time_out) +# if self.request_ready: +# return +# raise Exception(self.error_info) - def get_total_tokens(self): - try: - return self.ret['usage']['total_tokens'] - except: - return 0 +# def get_total_tokens(self): +# try: +# return self.ret['usage']['total_tokens'] +# except: +# return 0 - def get_message(self): - return self.message +# def get_message(self): +# return self.message - def get_response(self): - return self.ret +# def get_response(self): +# return self.ret -class ChatCompletionModel(ModelRequest): - """ChatCompletion接口的请求实现""" +# class ChatCompletionModel(ModelRequest): +# """ChatCompletion接口的请求实现""" - Chat_role = ['system', 'user', 'assistant'] - def __init__(self, model_name, user_name, http_proxy:str = None, **kwargs): - if http_proxy == None: - request_fun = openai.ChatCompletion.create - else: - request_fun = openai.ChatCompletion.acreate - self.can_chat = True - super().__init__(model_name, user_name, request_fun, http_proxy, **kwargs) +# Chat_role = ['system', 'user', 'assistant'] +# def __init__(self, model_name, user_name, http_proxy:str = None, **kwargs): +# if http_proxy == None: +# request_fun = openai.ChatCompletion.create +# else: +# request_fun = openai.ChatCompletion.acreate +# self.can_chat = True +# super().__init__(model_name, user_name, request_fun, http_proxy, **kwargs) - def request(self, prompts, **kwargs): - prompts = self.__msg_handle__(prompts) - kwargs['messages'] = prompts - super().request(**kwargs) - self.ret_handle() +# def request(self, prompts, **kwargs): +# prompts = self.__msg_handle__(prompts) +# kwargs['messages'] = prompts +# super().request(**kwargs) +# self.ret_handle() - def __msg_handle__(self, msgs): - temp_msgs = [] - # 把msgs拷贝进temp_msgs - for msg in msgs: - temp_msgs.append(msg.copy()) - return temp_msgs +# def __msg_handle__(self, msgs): +# temp_msgs = [] +# # 把msgs拷贝进temp_msgs +# for msg in msgs: +# temp_msgs.append(msg.copy()) +# return temp_msgs - def get_message(self): - return self.ret["choices"][0]["message"]['content'] #需要时直接加载加快请求速度,降低内存消耗 +# def get_message(self): +# return self.ret["choices"][0]["message"]['content'] #需要时直接加载加快请求速度,降低内存消耗 -class CompletionModel(ModelRequest): - """Completion接口的请求实现""" +# class CompletionModel(ModelRequest): +# """Completion接口的请求实现""" - def __init__(self, model_name, user_name, http_proxy:str = None, **kwargs): - if http_proxy == None: - request_fun = openai.Completion.create - else: - request_fun = openai.Completion.acreate - super().__init__(model_name, user_name, request_fun, http_proxy, **kwargs) +# def __init__(self, model_name, user_name, http_proxy:str = None, **kwargs): +# if http_proxy == None: +# request_fun = openai.Completion.create +# else: +# request_fun = openai.Completion.acreate +# super().__init__(model_name, user_name, request_fun, http_proxy, **kwargs) - def request(self, prompts, **kwargs): - prompts = self.__msg_handle__(prompts) - kwargs['prompt'] = prompts - super().request(**kwargs) - self.ret_handle() +# def request(self, prompts, **kwargs): +# prompts = self.__msg_handle__(prompts) +# kwargs['prompt'] = prompts +# super().request(**kwargs) +# self.ret_handle() - def __msg_handle__(self, msgs): - prompt = '' - for msg in msgs: - prompt = prompt + "{}: {}\n".format(msg['role'], msg['content']) - # for msg in msgs: - # if msg['role'] == 'assistant': - # prompt = prompt + "{}\n".format(msg['content']) - # else: - # prompt = prompt + "{}:{}\n".format(msg['role'] , msg['content']) - prompt = prompt + "assistant: " - return prompt +# def __msg_handle__(self, msgs): +# prompt = '' +# for msg in msgs: +# prompt = prompt + "{}: {}\n".format(msg['role'], msg['content']) +# # for msg in msgs: +# # if msg['role'] == 'assistant': +# # prompt = prompt + "{}\n".format(msg['content']) +# # else: +# # prompt = prompt + "{}:{}\n".format(msg['role'] , msg['content']) +# prompt = prompt + "assistant: " +# return prompt - def get_message(self): - return self.ret["choices"][0]["text"] +# def get_message(self): +# return self.ret["choices"][0]["text"] -def create_openai_model_request(model_name: str, user_name: str = 'user', http_proxy:str = None) -> ModelRequest: - """使用给定的模型名称创建模型请求对象""" +# def create_openai_model_request(model_name: str, user_name: str = 'user', http_proxy:str = None) -> ModelRequest: +# """使用给定的模型名称创建模型请求对象""" +# if model_name in CHAT_COMPLETION_MODELS: +# model = ChatCompletionModel(model_name, user_name, http_proxy) +# elif model_name in COMPLETION_MODELS: +# model = CompletionModel(model_name, user_name, http_proxy) +# else : +# log = "找不到模型[{}],请检查配置文件".format(model_name) +# logging.error(log) +# raise IndexError(log) +# logging.debug("使用接口[{}]创建模型请求[{}]".format(model.__class__.__name__, model_name)) +# return model + +def select_request_cls(model_name: str, messages: list, args: dict) -> RequestBase: if model_name in CHAT_COMPLETION_MODELS: - model = ChatCompletionModel(model_name, user_name, http_proxy) + return ChatCompletionRequest(model_name, messages, **args) elif model_name in COMPLETION_MODELS: - model = CompletionModel(model_name, user_name, http_proxy) - else : - log = "找不到模型[{}],请检查配置文件".format(model_name) - logging.error(log) - raise IndexError(log) - logging.debug("使用接口[{}]创建模型请求[{}]".format(model.__class__.__name__, model_name)) - return model + return CompletionRequest(model_name, messages, **args) + raise ValueError("不支持模型[{}],请检查配置文件".format(model_name)) \ No newline at end of file diff --git a/pkg/openai/sess.py b/pkg/openai/sess.py new file mode 100644 index 0000000..898e8b0 --- /dev/null +++ b/pkg/openai/sess.py @@ -0,0 +1,79 @@ +import time +import threading +import logging + + +sessions = {} + + +class SessionOfflineStatus: + ON_GOING = "on_going" + EXPLICITLY_CLOSED = "explicitly_closed" + + +def reset_session_prompt(session_name, prompt): + pass + + +def load_sessions(): + pass + + +def get_session(session_name: str) -> 'Session': + pass + + +def dump_session(session_name: str): + pass + + +class Session: + name: str = '' + + default_prompt: list = [] + """会话系统提示语""" + + messages: list = [] + """保存消息历史记录""" + + token_counts: list = [] + """记录每回合的token数量""" + + create_ts: int = 0 + """会话创建时间戳""" + + last_active_ts: int = 0 + """会话最后活跃时间戳""" + + just_switched_to_exist_session: bool = False + + response_lock = None + + def __init__(self, name: str): + self.name = name + self.default_prompt = self.get_runtime_default_prompt() + logging.debug("prompt is: {}".format(self.default_prompt)) + self.messages = [] + self.token_counts = [] + self.create_ts = int(time.time()) + self.last_active_ts = int(time.time()) + + self.response_lock = threading.Lock() + + self.schedule() + + def get_runtime_default_prompt(self, use_default: str = None) -> list: + """从提示词管理器中获取所需提示词""" + import pkg.openai.dprompt as dprompt + + if use_default is None: + use_default = dprompt.mode_inst().get_using_name() + + current_default_prompt, _ = dprompt.mode_inst().get_prompt(use_default) + return current_default_prompt + + def schedule(self): + """定时会话过期检查任务""" + + def expire_check_timer_loop(self): + """会话过期检查任务""" diff --git a/pkg/openai/session.py b/pkg/openai/session.py index 06b0075..9c718f5 100644 --- a/pkg/openai/session.py +++ b/pkg/openai/session.py @@ -222,22 +222,67 @@ class Session: for token_count in counts: total_token_before_query += token_count + res_text = "" + + pending_msgs = [] + + total_tokens = 0 + + for resp in pkg.utils.context.get_openai_manager().request_completion(prompts): + if resp['choices'][0]['message']['type'] == 'text': # 普通回复 + res_text += resp['choices'][0]['message']['content'] + + total_tokens += resp['usage']['total_tokens'] + + pending_msgs.append( + { + "role": "assistant", + "content": resp['choices'][0]['message']['content'] + } + ) + + elif resp['choices'][0]['message']['type'] == 'function_call': + # self.prompt.append( + # { + # "role": "assistant", + # "content": "function call: "+json.dumps(resp['choices'][0]['message']['function_call']) + # } + # ) + + total_tokens += resp['usage']['total_tokens'] + elif resp['choices'][0]['message']['type'] == 'function_return': + # self.prompt.append( + # { + # "role": "function", + # "name": resp['choices'][0]['message']['function_name'], + # "content": json.dumps(resp['choices'][0]['message']['content']) + # } + # ) + + # total_tokens += resp['usage']['total_tokens'] + pass + + + # 向API请求补全 - message, total_token = pkg.utils.context.get_openai_manager().request_completion( - prompts, - ) + # message, total_token = pkg.utils.context.get_openai_manager().request_completion( + # prompts, + # ) # 成功获取,处理回复 - res_test = message - res_ans = res_test.strip() + # res_test = message + res_ans = res_text.strip() # 将此次对话的双方内容加入到prompt中 + # self.prompt.append({'role': 'user', 'content': text}) + # self.prompt.append({'role': 'assistant', 'content': res_ans}) self.prompt.append({'role': 'user', 'content': text}) - self.prompt.append({'role': 'assistant', 'content': res_ans}) + # 添加pending_msgs + self.prompt += pending_msgs # 向token_counts中添加本回合的token数量 - self.token_counts.append(total_token-total_token_before_query) - logging.debug("本回合使用token: {}, session counts: {}".format(total_token-total_token_before_query, self.token_counts)) + self.token_counts.append(total_tokens-total_token_before_query) + logging.debug("本回合使用token: {}, session counts: {}".format(total_tokens-total_token_before_query, self.token_counts)) if self.just_switched_to_exist_session: self.just_switched_to_exist_session = False diff --git a/pkg/plugin/host.py b/pkg/plugin/host.py index 7343fcf..e0c00b8 100644 --- a/pkg/plugin/host.py +++ b/pkg/plugin/host.py @@ -45,7 +45,10 @@ __plugins_order__ = [] """插件顺序""" __callable_functions__ = [] -"""供GPT调用的函数""" +"""供GPT调用的函数结构""" + +__function_inst_map__: dict[str, callable] = {} +"""函数名:实例 映射""" def generate_plugin_order(): @@ -107,6 +110,10 @@ def load_plugins(): # 加载插件顺序 settings.load_settings() + # 输出已注册的内容函数列表 + logging.debug("registered content functions: {}".format(__callable_functions__)) + logging.debug("function instance map: {}".format(__function_inst_map__)) + def initialize_plugins(): """初始化插件""" diff --git a/pkg/plugin/models.py b/pkg/plugin/models.py index 3f5f9ec..17ed209 100644 --- a/pkg/plugin/models.py +++ b/pkg/plugin/models.py @@ -189,6 +189,11 @@ class Plugin: def wrapper(func): function_schema = get_func_schema(func) + function_schema['name'] = __current_registering_plugin__ + '-' + func.__name__ + + host.__function_inst_map__[function_schema['name']] = function_schema['function'] + + del function_schema['function'] # logging.debug("registering content function: p='{}', f='{}', s={}".format(__current_registering_plugin__, func, function_schema)) From 8c69b8a1d9d5c499af6f8d6fa1777ab502a80a15 Mon Sep 17 00:00:00 2001 From: RockChinQ <1010553892@qq.com> Date: Sat, 29 Jul 2023 16:28:18 +0800 Subject: [PATCH 03/16] =?UTF-8?q?feat:=20=E5=86=85=E5=AE=B9=E5=87=BD?= =?UTF-8?q?=E6=95=B0=E5=85=A8=E5=B1=80=E5=BC=80=E5=85=B3=E6=94=AF=E6=8C=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pkg/openai/api/chat_completion.py | 11 ++- pkg/openai/api/model.py | 13 ++- pkg/openai/funcmgr.py | 12 ++- pkg/openai/modelmgr.py | 151 ------------------------------ pkg/plugin/host.py | 3 + pkg/plugin/settings.py | 21 ++++- 6 files changed, 48 insertions(+), 163 deletions(-) diff --git a/pkg/openai/api/chat_completion.py b/pkg/openai/api/chat_completion.py index 836cac5..c33c19a 100644 --- a/pkg/openai/api/chat_completion.py +++ b/pkg/openai/api/chat_completion.py @@ -1,5 +1,6 @@ import openai import json +import logging from .model import RequestBase @@ -86,7 +87,7 @@ class ChatCompletionRequest(RequestBase): self.append_message( role="assistant", - content="function call: "+json.dumps(self.pending_func_call) + content="function call: "+json.dumps(self.pending_func_call, ensure_ascii=False) ) return { @@ -147,12 +148,16 @@ class ChatCompletionRequest(RequestBase): func_schema['parameters']['required'][0]: cp_pending_func_call['arguments'] } + logging.info("执行函数调用: name={}, arguments={}".format(func_name, arguments)) + # 执行函数调用 ret = execute_function(func_name, arguments) + logging.info("函数执行完成。") + self.append_message( role="function", - content=json.dumps(ret), + content=json.dumps(ret, ensure_ascii=False), name=func_name ) @@ -165,7 +170,7 @@ class ChatCompletionRequest(RequestBase): "role": "function", "type": "function_return", "function_name": func_name, - "content": json.dumps(ret) + "content": json.dumps(ret, ensure_ascii=False) }, "finish_reason": "function_return" } diff --git a/pkg/openai/api/model.py b/pkg/openai/api/model.py index bf5afab..f58ece0 100644 --- a/pkg/openai/api/model.py +++ b/pkg/openai/api/model.py @@ -16,12 +16,16 @@ class RequestBase: """处理代理问题""" ret: dict = {} + exception: Exception = None async def awrapper(**kwargs): - nonlocal ret + nonlocal ret, exception - ret = await self.req_func(**kwargs) - return ret + try: + ret = await self.req_func(**kwargs) + return ret + except Exception as e: + exception = e loop = asyncio.new_event_loop() @@ -33,6 +37,9 @@ class RequestBase: thr.start() thr.join() + if exception is not None: + raise exception + return ret def __iter__(self): diff --git a/pkg/openai/funcmgr.py b/pkg/openai/funcmgr.py index c3cfcb9..8f4969a 100644 --- a/pkg/openai/funcmgr.py +++ b/pkg/openai/funcmgr.py @@ -2,7 +2,7 @@ import logging -from pkg.plugin.host import __callable_functions__, __function_inst_map__ +from pkg.plugin import host class ContentFunctionNotFoundError(Exception): @@ -11,19 +11,21 @@ class ContentFunctionNotFoundError(Exception): def get_func_schema_list() -> list: """从plugin包中的函数结构中获取并处理成受GPT支持的格式""" + if not host.__enable_content_functions__: + return [] - schemas = __callable_functions__ + schemas = host.__callable_functions__ return schemas def get_func(name: str) -> callable: - if name not in __function_inst_map__: + if name not in host.__function_inst_map__: raise ContentFunctionNotFoundError("没有找到内容函数: {}".format(name)) - return __function_inst_map__[name] + return host.__function_inst_map__[name] def get_func_schema(name: str) -> dict: - for func in __callable_functions__: + for func in host.__callable_functions__: if func['name'] == name: return func raise ContentFunctionNotFoundError("没有找到内容函数: {}".format(name)) diff --git a/pkg/openai/modelmgr.py b/pkg/openai/modelmgr.py index a73c869..8e6c7f2 100644 --- a/pkg/openai/modelmgr.py +++ b/pkg/openai/modelmgr.py @@ -43,157 +43,6 @@ IMAGE_MODELS = { } -# class ModelRequest: -# """模型接口请求父类""" - -# can_chat = False -# runtime: threading.Thread = None -# ret = {} -# proxy: str = None -# request_ready = True -# error_info: str = "若在没有任何错误的情况下看到这句话,请带着配置文件上报Issues" - -# def __init__(self, model_name, user_name, request_fun, http_proxy:str = None, time_out = None): -# self.model_name = model_name -# self.user_name = user_name -# self.request_fun = request_fun -# self.time_out = time_out -# if http_proxy != None: -# self.proxy = http_proxy -# openai.proxy = self.proxy -# self.request_ready = False - -# async def __a_request__(self, **kwargs): -# """异步请求""" - -# try: -# self.ret: dict = await self.request_fun(**kwargs) -# self.request_ready = True -# except aiE.APIConnectionError as e: -# self.error_info = "{}\n请检查网络连接或代理是否正常".format(e) -# raise ConnectionError(self.error_info) -# except ValueError as e: -# self.error_info = "{}\n该错误可能是由于http_proxy格式设置错误引起的" -# except Exception as e: -# self.error_info = "{}\n由于请求异常产生的未知错误,请查看日志".format(e) -# raise type(e)(self.error_info) - -# def request(self, **kwargs): -# """向接口发起请求""" - -# if self.proxy != None: #异步请求 -# self.request_ready = False -# loop = asyncio.new_event_loop() -# self.runtime = threading.Thread( -# target=loop.run_until_complete, -# args=(self.__a_request__(**kwargs),) -# ) -# self.runtime.start() -# else: #同步请求 -# self.ret = self.request_fun(**kwargs) - -# def __msg_handle__(self, msg): -# """将prompt dict转换成接口需要的格式""" -# return msg - -# def ret_handle(self): -# ''' -# API消息返回处理函数 -# 若重写该方法,应检查异步线程状态,或在需要检查处super该方法 -# ''' -# if self.runtime != None and isinstance(self.runtime, threading.Thread): -# self.runtime.join(self.time_out) -# if self.request_ready: -# return -# raise Exception(self.error_info) - -# def get_total_tokens(self): -# try: -# return self.ret['usage']['total_tokens'] -# except: -# return 0 - -# def get_message(self): -# return self.message - -# def get_response(self): -# return self.ret - - -# class ChatCompletionModel(ModelRequest): -# """ChatCompletion接口的请求实现""" - -# Chat_role = ['system', 'user', 'assistant'] -# def __init__(self, model_name, user_name, http_proxy:str = None, **kwargs): -# if http_proxy == None: -# request_fun = openai.ChatCompletion.create -# else: -# request_fun = openai.ChatCompletion.acreate -# self.can_chat = True -# super().__init__(model_name, user_name, request_fun, http_proxy, **kwargs) - -# def request(self, prompts, **kwargs): -# prompts = self.__msg_handle__(prompts) -# kwargs['messages'] = prompts -# super().request(**kwargs) -# self.ret_handle() - -# def __msg_handle__(self, msgs): -# temp_msgs = [] -# # 把msgs拷贝进temp_msgs -# for msg in msgs: -# temp_msgs.append(msg.copy()) -# return temp_msgs - -# def get_message(self): -# return self.ret["choices"][0]["message"]['content'] #需要时直接加载加快请求速度,降低内存消耗 - - -# class CompletionModel(ModelRequest): -# """Completion接口的请求实现""" - -# def __init__(self, model_name, user_name, http_proxy:str = None, **kwargs): -# if http_proxy == None: -# request_fun = openai.Completion.create -# else: -# request_fun = openai.Completion.acreate -# super().__init__(model_name, user_name, request_fun, http_proxy, **kwargs) - -# def request(self, prompts, **kwargs): -# prompts = self.__msg_handle__(prompts) -# kwargs['prompt'] = prompts -# super().request(**kwargs) -# self.ret_handle() - -# def __msg_handle__(self, msgs): -# prompt = '' -# for msg in msgs: -# prompt = prompt + "{}: {}\n".format(msg['role'], msg['content']) -# # for msg in msgs: -# # if msg['role'] == 'assistant': -# # prompt = prompt + "{}\n".format(msg['content']) -# # else: -# # prompt = prompt + "{}:{}\n".format(msg['role'] , msg['content']) -# prompt = prompt + "assistant: " -# return prompt - -# def get_message(self): -# return self.ret["choices"][0]["text"] - - -# def create_openai_model_request(model_name: str, user_name: str = 'user', http_proxy:str = None) -> ModelRequest: -# """使用给定的模型名称创建模型请求对象""" -# if model_name in CHAT_COMPLETION_MODELS: -# model = ChatCompletionModel(model_name, user_name, http_proxy) -# elif model_name in COMPLETION_MODELS: -# model = CompletionModel(model_name, user_name, http_proxy) -# else : -# log = "找不到模型[{}],请检查配置文件".format(model_name) -# logging.error(log) -# raise IndexError(log) -# logging.debug("使用接口[{}]创建模型请求[{}]".format(model.__class__.__name__, model_name)) -# return model - def select_request_cls(model_name: str, messages: list, args: dict) -> RequestBase: if model_name in CHAT_COMPLETION_MODELS: return ChatCompletionRequest(model_name, messages, **args) diff --git a/pkg/plugin/host.py b/pkg/plugin/host.py index e0c00b8..12269bf 100644 --- a/pkg/plugin/host.py +++ b/pkg/plugin/host.py @@ -44,6 +44,9 @@ __plugins__ = {} __plugins_order__ = [] """插件顺序""" +__enable_content_functions__ = True +"""是否启用内容函数""" + __callable_functions__ = [] """供GPT调用的函数结构""" diff --git a/pkg/plugin/settings.py b/pkg/plugin/settings.py index f68ef2c..beb4168 100644 --- a/pkg/plugin/settings.py +++ b/pkg/plugin/settings.py @@ -8,7 +8,10 @@ import logging def wrapper_dict_from_runtime_context() -> dict: """从变量中包装settings.json的数据字典""" settings = { - "order": [] + "order": [], + "functions": { + "enable": host.__enable_content_functions__ + } } for plugin_name in host.__plugins_order__: @@ -22,6 +25,11 @@ def apply_settings(settings: dict): if "order" in settings: host.__plugins_order__ = settings["order"] + if "functions" in settings: + if "enable" in settings["functions"]: + host.__enable_content_functions__ = settings["functions"]["enable"] + # logging.debug("set content function enable: {}".format(host.__enable_content_functions__)) + def dump_settings(): """保存settings.json数据""" @@ -78,6 +86,17 @@ def load_settings(): settings["order"].append(plugin_name) settings_modified = True + if "functions" not in settings: + settings["functions"] = { + "enable": host.__enable_content_functions__ + } + settings_modified = True + elif "enable" not in settings["functions"]: + settings["functions"]["enable"] = host.__enable_content_functions__ + settings_modified = True + + logging.info("已全局{}内容函数。".format("启用" if settings["functions"]["enable"] else "禁用")) + apply_settings(settings) if settings_modified: From 3c894fe70e5950e9227f2cde515f07790ad6ff17 Mon Sep 17 00:00:00 2001 From: RockChinQ <1010553892@qq.com> Date: Sat, 29 Jul 2023 16:29:16 +0800 Subject: [PATCH 04/16] =?UTF-8?q?feat:=20chat=5Fcompletion=E7=9A=84?= =?UTF-8?q?=E5=87=BD=E6=95=B0=E5=BC=80=E5=85=B3=E6=94=AF=E6=8C=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pkg/openai/api/chat_completion.py | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/pkg/openai/api/chat_completion.py b/pkg/openai/api/chat_completion.py index c33c19a..f9cab13 100644 --- a/pkg/openai/api/chat_completion.py +++ b/pkg/openai/api/chat_completion.py @@ -69,12 +69,20 @@ class ChatCompletionRequest(RequestBase): if self.pending_func_call is None: # 没有待处理的函数调用请求 - resp = self._req( - model=self.model, - messages=self.messages, - functions=get_func_schema_list(), - **self.kwargs - ) + args = { + "model": self.model, + "messages": self.messages, + } + + funcs = get_func_schema_list() + + if len(funcs) > 0: + args['functions'] = funcs + + # 拼接kwargs + args = {**args, **self.kwargs} + + resp = self._req(**args) choice0 = resp["choices"][0] From a002f93f7b1abd8f1856fd9d770ea8c6ff33e144 Mon Sep 17 00:00:00 2001 From: RockChinQ <1010553892@qq.com> Date: Sat, 29 Jul 2023 16:30:09 +0800 Subject: [PATCH 05/16] =?UTF-8?q?chore:=20=E5=88=A0=E9=99=A4=E8=BF=87?= =?UTF-8?q?=E6=97=B6=E4=BB=A3=E7=A0=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pkg/openai/manager.py | 34 ---------------------------------- 1 file changed, 34 deletions(-) diff --git a/pkg/openai/manager.py b/pkg/openai/manager.py index ebd94b6..c607f22 100644 --- a/pkg/openai/manager.py +++ b/pkg/openai/manager.py @@ -54,40 +54,6 @@ class OpenAIInteract: for resp in request: yield resp - # 请求OpenAI Completion - # def request_completion(self, prompts): - # """请求补全接口回复 - # """ - - # config = pkg.utils.context.get_config() - - # # 根据模型选择使用的接口 - # ai: ModelRequest = create_openai_model_request( - # config.completion_api_params['model'], - # 'user', - # config.openai_config["http_proxy"] if "http_proxy" in config.openai_config else None - # ) - # ai.request( - # prompts, - # **config.completion_api_params - # ) - # response = ai.get_response() - - # logging.debug("OpenAI response: %s", response) - - # # 记录使用量 - # current_round_token = 0 - # if 'model' in config.completion_api_params: - # self.audit_mgr.report_text_model_usage(config.completion_api_params['model'], - # ai.get_total_tokens()) - # current_round_token = ai.get_total_tokens() - # elif 'engine' in config.completion_api_params: - # self.audit_mgr.report_text_model_usage(config.completion_api_params['engine'], - # response['usage']['total_tokens']) - # current_round_token = response['usage']['total_tokens'] - - # return ai.get_message(), current_round_token - def request_image(self, prompt) -> dict: """请求图片接口回复 From 0481167dc641428a7a461af2468c46d7685a1eda Mon Sep 17 00:00:00 2001 From: RockChinQ <1010553892@qq.com> Date: Sat, 29 Jul 2023 16:36:31 +0800 Subject: [PATCH 06/16] =?UTF-8?q?feat:=20=E6=94=B9=E4=B8=BA=E5=9C=A8start?= =?UTF-8?q?=E6=B5=81=E7=A8=8B=E8=AE=BE=E7=BD=AEopenai.proxy?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- main.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/main.py b/main.py index 2f18421..15d5701 100644 --- a/main.py +++ b/main.py @@ -178,9 +178,14 @@ def start(first_time_init=False): logging.error(e) traceback.print_exc() + # 配置OpenAI proxy + import openai + openai.proxy = None # 先重置,因为重载后可能需要清除proxy + if "http_proxy" in config.openai_config and config.openai_config["http_proxy"] is not None: + openai.proxy = config.openai_config["http_proxy"] + # 配置openai api_base if "reverse_proxy" in config.openai_config and config.openai_config["reverse_proxy"] is not None: - import openai openai.api_base = config.openai_config["reverse_proxy"] # 主启动流程 From dce6734ba2b73afa503012c780f89b57c3bba388 Mon Sep 17 00:00:00 2001 From: RockChinQ <1010553892@qq.com> Date: Sat, 29 Jul 2023 16:51:19 +0800 Subject: [PATCH 07/16] =?UTF-8?q?feat:=20=E6=94=B9=E4=B8=BA=E6=8E=A8?= =?UTF-8?q?=E8=8D=90=E4=BD=BF=E7=94=A8func()=E8=A3=85=E9=A5=B0=E5=99=A8?= =?UTF-8?q?=E6=B3=A8=E5=86=8C=E5=86=85=E5=AE=B9=E5=87=BD=E6=95=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pkg/plugin/models.py | 37 +++++++++++++++++++++++++++++++++---- 1 file changed, 33 insertions(+), 4 deletions(-) diff --git a/pkg/plugin/models.py b/pkg/plugin/models.py index 17ed209..d7c8ab2 100644 --- a/pkg/plugin/models.py +++ b/pkg/plugin/models.py @@ -140,12 +140,15 @@ ContentFunction = "content_function" """ -def on(event: str): +def on(*args, **kwargs): """注册事件监听器 - :param - event: str 事件名称 """ - return Plugin.on(event) + return Plugin.on(*args, **kwargs) + +def func(*args, **kwargs): + """注册内容函数 + """ + return Plugin.func(*args, **kwargs) __current_registering_plugin__ = "" @@ -205,6 +208,32 @@ class Plugin: return wrapper + @classmethod + def func(cls, name: str=None): + """内容函数装饰器 + """ + global __current_registering_plugin__ + from CallingGPT.entities.namespace import get_func_schema + + def wrapper(func): + + function_schema = get_func_schema(func) + function_schema['name'] = __current_registering_plugin__ + '-' + (func.__name__ if name is None else name) + + host.__function_inst_map__[function_schema['name']] = function_schema['function'] + + del function_schema['function'] + + # logging.debug("registering content function: p='{}', f='{}', s={}".format(__current_registering_plugin__, func, function_schema)) + + host.__callable_functions__.append( + function_schema + ) + + return func + + return wrapper + def register(name: str, description: str, version: str, author: str): """注册插件, 此函数作为装饰器使用 From 833d29b101e473fe446ef6f89e737d358bbd093f Mon Sep 17 00:00:00 2001 From: RockChinQ <1010553892@qq.com> Date: Sat, 29 Jul 2023 16:55:01 +0800 Subject: [PATCH 08/16] typo: enable->enabled --- pkg/plugin/settings.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/pkg/plugin/settings.py b/pkg/plugin/settings.py index beb4168..92fcfe7 100644 --- a/pkg/plugin/settings.py +++ b/pkg/plugin/settings.py @@ -10,7 +10,7 @@ def wrapper_dict_from_runtime_context() -> dict: settings = { "order": [], "functions": { - "enable": host.__enable_content_functions__ + "enabled": host.__enable_content_functions__ } } @@ -26,9 +26,9 @@ def apply_settings(settings: dict): host.__plugins_order__ = settings["order"] if "functions" in settings: - if "enable" in settings["functions"]: - host.__enable_content_functions__ = settings["functions"]["enable"] - # logging.debug("set content function enable: {}".format(host.__enable_content_functions__)) + if "enabled" in settings["functions"]: + host.__enable_content_functions__ = settings["functions"]["enabled"] + # logging.debug("set content function enabled: {}".format(host.__enable_content_functions__)) def dump_settings(): @@ -88,14 +88,14 @@ def load_settings(): if "functions" not in settings: settings["functions"] = { - "enable": host.__enable_content_functions__ + "enabled": host.__enable_content_functions__ } settings_modified = True - elif "enable" not in settings["functions"]: - settings["functions"]["enable"] = host.__enable_content_functions__ + elif "enabled" not in settings["functions"]: + settings["functions"]["enabled"] = host.__enable_content_functions__ settings_modified = True - logging.info("已全局{}内容函数。".format("启用" if settings["functions"]["enable"] else "禁用")) + logging.info("已全局{}内容函数。".format("启用" if settings["functions"]["enabled"] else "禁用")) apply_settings(settings) From 96e474a5552cd626e397bcd02f0caf7fee845f2b Mon Sep 17 00:00:00 2001 From: RockChinQ <1010553892@qq.com> Date: Sat, 29 Jul 2023 17:10:47 +0800 Subject: [PATCH 09/16] =?UTF-8?q?feat:=20=E6=8F=92=E4=BB=B6=E5=BC=80?= =?UTF-8?q?=E5=85=B3=E5=AF=B9=E5=85=B6=E5=86=85=E5=AE=B9=E5=87=BD=E6=95=B0?= =?UTF-8?q?=E7=94=9F=E6=95=88?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pkg/openai/funcmgr.py | 10 +++++++- pkg/plugin/models.py | 54 ++++++++++++------------------------------- pkg/plugin/switch.py | 5 ++++ 3 files changed, 29 insertions(+), 40 deletions(-) diff --git a/pkg/openai/funcmgr.py b/pkg/openai/funcmgr.py index 8f4969a..06c72e2 100644 --- a/pkg/openai/funcmgr.py +++ b/pkg/openai/funcmgr.py @@ -14,7 +14,15 @@ def get_func_schema_list() -> list: if not host.__enable_content_functions__: return [] - schemas = host.__callable_functions__ + schemas = [] + + for func in host.__callable_functions__: + if func['enabled']: + fun_cp = func.copy() + + del fun_cp['enabled'] + + schemas.append(fun_cp) return schemas diff --git a/pkg/plugin/models.py b/pkg/plugin/models.py index d7c8ab2..4075675 100644 --- a/pkg/plugin/models.py +++ b/pkg/plugin/models.py @@ -132,13 +132,6 @@ KeySwitched = "key_switched" key_list: list[str] api-key列表 """ -ContentFunction = "content_function" -"""声明此函数为一个内容函数,在对话中将发送此函数给GPT以供其调用 - 此函数可以具有任意的参数,但必须按照[此文档](https://github.com/RockChinQ/CallingGPT/wiki/1.-Function-Format#function-format) - 所述的格式编写函数的docstring。 - 此功能仅支持在使用gpt-3.5或gpt-4系列模型时使用。 -""" - def on(*args, **kwargs): """注册事件监听器 @@ -146,7 +139,10 @@ def on(*args, **kwargs): return Plugin.on(*args, **kwargs) def func(*args, **kwargs): - """注册内容函数 + """注册内容函数,声明此函数为一个内容函数,在对话中将发送此函数给GPT以供其调用 + 此函数可以具有任意的参数,但必须按照[此文档](https://github.com/RockChinQ/CallingGPT/wiki/1.-Function-Format#function-format) + 所述的格式编写函数的docstring。 + 此功能仅支持在使用gpt-3.5或gpt-4系列模型时使用。 """ return Plugin.func(*args, **kwargs) @@ -171,42 +167,20 @@ class Plugin: """ global __current_registering_plugin__ - if event != ContentFunction: - def wrapper(func): - plugin_hooks = host.__plugins__[__current_registering_plugin__]["hooks"] + def wrapper(func): + plugin_hooks = host.__plugins__[__current_registering_plugin__]["hooks"] - if event not in plugin_hooks: - plugin_hooks[event] = [] - plugin_hooks[event].append(func) + if event not in plugin_hooks: + plugin_hooks[event] = [] + plugin_hooks[event].append(func) - # print("registering hook: p='{}', e='{}', f={}".format(__current_registering_plugin__, event, func)) + # print("registering hook: p='{}', e='{}', f={}".format(__current_registering_plugin__, event, func)) - host.__plugins__[__current_registering_plugin__]["hooks"] = plugin_hooks + host.__plugins__[__current_registering_plugin__]["hooks"] = plugin_hooks - return func + return func - return wrapper - else: - from CallingGPT.entities.namespace import get_func_schema - - def wrapper(func): - - function_schema = get_func_schema(func) - function_schema['name'] = __current_registering_plugin__ + '-' + func.__name__ - - host.__function_inst_map__[function_schema['name']] = function_schema['function'] - - del function_schema['function'] - - # logging.debug("registering content function: p='{}', f='{}', s={}".format(__current_registering_plugin__, func, function_schema)) - - host.__callable_functions__.append( - function_schema - ) - - return func - - return wrapper + return wrapper @classmethod def func(cls, name: str=None): @@ -220,6 +194,8 @@ class Plugin: function_schema = get_func_schema(func) function_schema['name'] = __current_registering_plugin__ + '-' + (func.__name__ if name is None else name) + function_schema['enabled'] = True + host.__function_inst_map__[function_schema['name']] = function_schema['function'] del function_schema['function'] diff --git a/pkg/plugin/switch.py b/pkg/plugin/switch.py index 1b15a11..041ec12 100644 --- a/pkg/plugin/switch.py +++ b/pkg/plugin/switch.py @@ -28,6 +28,11 @@ def apply_switch(switch: dict): for plugin_name in switch: host.__plugins__[plugin_name]["enabled"] = switch[plugin_name]["enabled"] + # 查找此插件的所有内容函数 + for func in host.__callable_functions__: + if func['name'].startswith(plugin_name + '-'): + func['enabled'] = switch[plugin_name]["enabled"] + def dump_switch(): """保存开关数据""" From f1bb3045aa0ba4a7a39bc4ca8b64e7280228253f Mon Sep 17 00:00:00 2001 From: RockChinQ <1010553892@qq.com> Date: Sat, 29 Jul 2023 17:26:07 +0800 Subject: [PATCH 10/16] =?UTF-8?q?feat:=20=E6=B7=BB=E5=8A=A0func=E5=91=BD?= =?UTF-8?q?=E4=BB=A4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pkg/qqbot/cmds/funcs/func.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 pkg/qqbot/cmds/funcs/func.py diff --git a/pkg/qqbot/cmds/funcs/func.py b/pkg/qqbot/cmds/funcs/func.py new file mode 100644 index 0000000..414f406 --- /dev/null +++ b/pkg/qqbot/cmds/funcs/func.py @@ -0,0 +1,29 @@ +from ..aamgr import AbstractCommandNode, Context +import logging + + +@AbstractCommandNode.register( + parent=None, + name="func", + description="管理内容函数", + usage="!func", + aliases=[], + privilege=1 +) +class FuncCommand(AbstractCommandNode): + @classmethod + def process(cls, ctx: Context) -> tuple[bool, list]: + from pkg.plugin.models import host + + reply = [] + + reply_str = "当前已加载的内容函数:\n\n" + + index = 1 + for func in host.__callable_functions__: + reply_str += "{}. {}{}:\n{}\n\n".format(index, ("(已禁用) " if not func['enabled'] else ""), func['name'], func['description']) + + reply = [reply_str] + + return True, reply + \ No newline at end of file From f4735e5e3050fe01d4985411e678f79272e5a159 Mon Sep 17 00:00:00 2001 From: RockChinQ <1010553892@qq.com> Date: Sat, 29 Jul 2023 17:28:11 +0800 Subject: [PATCH 11/16] =?UTF-8?q?ci(cmd=5Fpriv):=20=E6=B7=BB=E5=8A=A0Calli?= =?UTF-8?q?ngGPT=E4=BE=9D=E8=B5=96?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .github/workflows/update-cmdpriv-template.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/update-cmdpriv-template.yml b/.github/workflows/update-cmdpriv-template.yml index 04862f3..b8e77f0 100644 --- a/.github/workflows/update-cmdpriv-template.yml +++ b/.github/workflows/update-cmdpriv-template.yml @@ -26,7 +26,7 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip - python -m pip install --upgrade yiri-mirai openai colorlog func_timeout dulwich Pillow + python -m pip install --upgrade yiri-mirai openai colorlog func_timeout dulwich Pillow CallingGPT - name: Copy Scripts run: | From 89a01378e742365c96ca6ecb3a972b3ee70b8502 Mon Sep 17 00:00:00 2001 From: RockChinQ <1010553892@qq.com> Date: Sat, 29 Jul 2023 17:29:52 +0800 Subject: [PATCH 12/16] =?UTF-8?q?ci:=20=E8=B7=91=E5=B7=A5=E4=BD=9C?= =?UTF-8?q?=E6=B5=81?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- pkg/qqbot/cmds/funcs/func.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/qqbot/cmds/funcs/func.py b/pkg/qqbot/cmds/funcs/func.py index 414f406..9199292 100644 --- a/pkg/qqbot/cmds/funcs/func.py +++ b/pkg/qqbot/cmds/funcs/func.py @@ -26,4 +26,3 @@ class FuncCommand(AbstractCommandNode): reply = [reply_str] return True, reply - \ No newline at end of file From 796eb7c95d5ead741218ad6d02db757648da191f Mon Sep 17 00:00:00 2001 From: GitHub Actions Bot Date: Sat, 29 Jul 2023 09:30:22 +0000 Subject: [PATCH 13/16] Update cmdpriv-template.json --- res/templates/cmdpriv-template.json | 1 + 1 file changed, 1 insertion(+) diff --git a/res/templates/cmdpriv-template.json b/res/templates/cmdpriv-template.json index fea372b..18bc3db 100644 --- a/res/templates/cmdpriv-template.json +++ b/res/templates/cmdpriv-template.json @@ -1,6 +1,7 @@ { "comment": "以下为命令权限,请设置到cmdpriv.json中。关于此功能的说明,请查看:https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E5%91%BD%E4%BB%A4%E6%9D%83%E9%99%90%E6%8E%A7%E5%88%B6", "draw": 1, + "func": 1, "plugin": 2, "plugin.get": 2, "plugin.update": 2, From 5fbf369f8234c1d2aa093bd8bffd99ad95e95d2f Mon Sep 17 00:00:00 2001 From: RockChinQ <1010553892@qq.com> Date: Sat, 29 Jul 2023 18:37:03 +0800 Subject: [PATCH 14/16] =?UTF-8?q?doc(wiki):=20=E6=9B=B4=E6=96=B0=E6=8F=92?= =?UTF-8?q?=E4=BB=B6=E9=A1=B5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- res/wiki/插件使用.md | 2 + res/wiki/插件开发.md | 176 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 178 insertions(+) diff --git a/res/wiki/插件使用.md b/res/wiki/插件使用.md index f0db3f1..2469584 100644 --- a/res/wiki/插件使用.md +++ b/res/wiki/插件使用.md @@ -33,6 +33,8 @@ QChatGPT 插件使用Wiki !plugin del <插件名> 删除插件(需要管理员权限) !plugin on <插件名> 启用插件(需要管理员权限) !plugin off <插件名> 禁用插件(需要管理员权限) + +!func 列出所有内容函数 ``` ### 控制插件执行顺序 diff --git a/res/wiki/插件开发.md b/res/wiki/插件开发.md index e3c6a7b..6888f72 100644 --- a/res/wiki/插件开发.md +++ b/res/wiki/插件开发.md @@ -113,6 +113,182 @@ class HelloPlugin(Plugin): - 一个目录内可以存放多个Python程序文件,以独立出插件的各个功能,便于开发者管理,但不建议在一个目录内注册多个插件 - 插件需要的依赖库请在插件目录下的`requirements.txt`中指定,程序从储存库获取此插件时将自动安装依赖 +## 🪝内容函数 + +通过[GPT的Function Calling能力](https://platform.openai.com/docs/guides/gpt/function-calling)实现的`内容函数`,这是一种嵌入对话中,由GPT自动调用的函数。 + +
+示例:联网插件 + +加载含有联网功能的内容函数的插件[WebwlkrPlugin](https://github.com/RockChinQ/WebwlkrPlugin),向机器人询问在线内容 + +``` +# 控制台输出 +[2023-07-29 17:37:18.698] message.py (26) - [INFO] : [person_1010553892]发送消息:介绍一下这个项目:https://git... +[2023-07-29 17:37:21.292] util.py (67) - [INFO] : message='OpenAI API response' path=https://api.openai.com/v1/chat/completions processing_ms=1902 request_id=941afc13b2e1bba1e7877b92a970cdea response_code=200 +[2023-07-29 17:37:21.293] chat_completion.py (159) - [INFO] : 执行函数调用: name=Webwlkr-access_the_web, arguments={'url': 'https://github.com/RockChinQ/QChatGPT', 'brief_len': 512} +[2023-07-29 17:37:21.848] chat_completion.py (164) - [INFO] : 函数执行完成。 +``` + +![Webwlkr插件](https://github.com/RockChinQ/QChatGPT/blob/master/res/screenshots/webwlkr_plugin.png?raw=true) + +
+ +### 内容函数编写步骤 + +1️⃣ 请先按照上方步骤编写您的插件基础结构,现在请删除(当然你也可以不删,只是为了简洁)上述插件内容的诸个由`@on`装饰的类函数 + +
+删除后的结构 + +```python +from pkg.plugin.models import * +from pkg.plugin.host import EventContext, PluginHost + +""" +在收到私聊或群聊消息"hello"时,回复"hello, <发送者id>!"或"hello, everyone!" +""" + + +# 注册插件 +@register(name="Hello", description="hello world", version="0.1", author="RockChinQ") +class HelloPlugin(Plugin): + + # 插件加载时触发 + # plugin_host (pkg.plugin.host.PluginHost) 提供了与主程序交互的一些方法,详细请查看其源码 + def __init__(self, plugin_host: PluginHost): + pass + + # 插件卸载时触发 + def __del__(self): + pass +``` + +
+ +2️⃣ 现在我们将以下函数添加到刚刚删除的函数的位置 + +```Python + +# 要添加的函数 + +@func(name="access_the_web") # 设置函数名称 +def _(url: str): + """Call this function to search about the question before you answer any questions. + - Do not search through baidu.com at any time. + - If you need to search somthing, visit https://www.google.com/search?q=xxx. + - If user ask you to open a url (start with http:// or https://), visit it directly. + - Summary the plain content result by yourself, DO NOT directly output anything in the result you got. + + Args: + url(str): url to visit + + Returns: + str: plain text content of the web page + """ + import requests + from bs4 import BeautifulSoup + # 你需要先使用 + # pip install beautifulsoup4 + # 安装依赖 + + r = requests.get( + url, + timeout=10, + headers={ + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36 Edg/115.0.1901.183" + } + ) + soup = BeautifulSoup(r.text, 'html.parser') + + s = soup.get_text() + + # 删除多余的空行或仅有\t和空格的行 + s = re.sub(r'\n\s*\n', '\n', s) + + if len(s) >= 512: # 截取获取到的网页纯文本内容的前512个字 + return s[:512] + + return s + +``` +
+现在这个文件内容应该是这样 + +```python +from pkg.plugin.models import * +from pkg.plugin.host import EventContext, PluginHost + +""" +在收到私聊或群聊消息"hello"时,回复"hello, <发送者id>!"或"hello, everyone!" +""" + + +# 注册插件 +@register(name="Hello", description="hello world", version="0.1", author="RockChinQ") +class HelloPlugin(Plugin): + + # 插件加载时触发 + # plugin_host (pkg.plugin.host.PluginHost) 提供了与主程序交互的一些方法,详细请查看其源码 + def __init__(self, plugin_host: PluginHost): + pass + + @func(name="access_the_web") + def _(url: str): + """Call this function to search about the question before you answer any questions. + - Do not search through baidu.com at any time. + - If you need to search somthing, visit https://www.google.com/search?q=xxx. + - If user ask you to open a url (start with http:// or https://), visit it directly. + - Summary the plain content result by yourself, DO NOT directly output anything in the result you got. + + Args: + url(str): url to visit + + Returns: + str: plain text content of the web page + """ + import requests + from bs4 import BeautifulSoup + # 你需要先使用 + # pip install beautifulsoup4 + # 安装依赖 + + r = requests.get( + url, + timeout=10, + headers={ + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36 Edg/115.0.1901.183" + } + ) + soup = BeautifulSoup(r.text, 'html.parser') + + s = soup.get_text() + + # 删除多余的空行或仅有\t和空格的行 + s = re.sub(r'\n\s*\n', '\n', s) + + if len(s) >= 512: # 截取获取到的网页纯文本内容的前512个字 + return s[:512] + + return s + + # 插件卸载时触发 + def __del__(self): + pass +``` + +
+ +#### 请注意: + +- 函数的注释必须严格按照要求的格式进行书写,具体格式请查看[此文档](https://github.com/RockChinQ/CallingGPT/wiki/1.-Function-Format#function-format) +- 内容函数和`以@on装饰的行为函数`可以同时存在于同一个插件,并同时受到`switch.json`中的插件开关的控制 +- 务必确保您使用的模型支持函数调用功能,可以到`config.py`的`completion_api_params`中修改模型,推荐使用`gpt-3.5-turbo-16k` + +3️⃣ 现在您的程序已具备网络访问功能,重启程序,询问机器人有关在线的内容或直接发送文章链接请求其总结。 + +- 这仅仅是一个示例,需要更高效的网络访问能力支持插件,请查看[WebwlkrPlugin](https://github.com/RockChinQ/WebwlkrPlugin) + ## 📄API参考 ### 说明 From fe6275000ec2ff6aa5d501d3b6cac9c2ed74da7d Mon Sep 17 00:00:00 2001 From: RockChinQ <1010553892@qq.com> Date: Sat, 29 Jul 2023 18:40:49 +0800 Subject: [PATCH 15/16] =?UTF-8?q?doc(wiki):=20=E6=9B=B4=E6=96=B0wiki?= =?UTF-8?q?=E6=8F=92=E4=BB=B6=E9=A1=B5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- res/wiki/插件使用.md | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/res/wiki/插件使用.md b/res/wiki/插件使用.md index 2469584..143c7e8 100644 --- a/res/wiki/插件使用.md +++ b/res/wiki/插件使用.md @@ -44,4 +44,9 @@ QChatGPT 插件使用Wiki ### 启用或关闭插件 无需卸载即可管理插件的开关 -编辑`plugins`目录下的`switch.json`文件,将相应的插件的`enabled`字段设置为`true/false(开/关)`,之后重启程序或执行热重载即可控制插件开关 \ No newline at end of file +编辑`plugins`目录下的`switch.json`文件,将相应的插件的`enabled`字段设置为`true/false(开/关)`,之后重启程序或执行热重载即可控制插件开关 + +### 控制全局内容函数开关 + +内容函数是基于[GPT的Function Calling能力](https://platform.openai.com/docs/guides/gpt/function-calling)实现的,这是一种嵌入对话中,由GPT自动调用的函数。 +每个插件可以自行注册内容函数,您可以在`plugins`目录下的`settings.json`中设置`functions`下的`enabled`为`true`或`false`控制这些内容函数的启用或禁用。 \ No newline at end of file From 24273ac158ffac0c550ae34d8f10ad7c9f06f27b Mon Sep 17 00:00:00 2001 From: RockChinQ <1010553892@qq.com> Date: Sat, 29 Jul 2023 18:55:18 +0800 Subject: [PATCH 16/16] =?UTF-8?q?doc:=20README=E6=B7=BB=E5=8A=A0=E5=86=85?= =?UTF-8?q?=E5=AE=B9=E5=87=BD=E6=95=B0=E7=9B=B8=E5=85=B3=E5=86=85=E5=AE=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.md b/README.md index e296d3d..e70b264 100644 --- a/README.md +++ b/README.md @@ -10,6 +10,7 @@ ![Wakapi Count](https://wakapi.dev/api/badge/RockChinQ/interval:any/project:QChatGPT) +> 2023/7/29 支持使用GPT的Function Calling功能实现类似ChatGPT Plugin的效果,请见[Wiki中的内容函数节](https://github.com/RockChinQ/QChatGPT/wiki/%E6%8F%92%E4%BB%B6%E5%BC%80%E5%8F%91#%E5%86%85%E5%AE%B9%E5%87%BD%E6%95%B0) > 2023/4/24 支持使用go-cqhttp登录QQ,请查看[此文档](https://github.com/RockChinQ/QChatGPT/wiki/go-cqhttp%E9%85%8D%E7%BD%AE) > 2023/3/18 现已支持GPT-4 API(内测),请查看`config-template.py`中的`completion_api_params` > 2023/3/15 逆向库已支持New Bing,使用方法查看[插件文档](https://github.com/RockChinQ/revLibs) @@ -111,6 +112,7 @@ ✅支持插件加载🧩 - 自行实现插件加载器及相关支持 + - 支持GPT的Function Calling功能 - 详细查看[插件使用页](https://github.com/RockChinQ/QChatGPT/wiki/%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8)
@@ -280,6 +282,8 @@ python3 main.py 详见[Wiki插件使用页](https://github.com/RockChinQ/QChatGPT/wiki/%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8) 开发教程见[Wiki插件开发页](https://github.com/RockChinQ/QChatGPT/wiki/%E6%8F%92%E4%BB%B6%E5%BC%80%E5%8F%91) +⭐我们已经支持了[GPT的Function Calling能力](https://platform.openai.com/docs/guides/gpt/function-calling),请查看wiki的插件开发页以查看如何在QChatGPT中使用此功能 +
查看插件列表 @@ -295,6 +299,7 @@ python3 main.py [插件列表](https://github.com/stars/RockChinQ/lists/qchatgpt-%E6%8F%92%E4%BB%B6),欢迎提出issue以提交新的插件 +- [WebwlkrPlugin](https://github.com/RockChinQ/WebwlkrPlugin) - 让机器人能联网!! - [revLibs](https://github.com/RockChinQ/revLibs) - 将ChatGPT网页版接入此项目,关于[官方接口和网页版有什么区别](https://github.com/RockChinQ/QChatGPT/wiki/%E5%AE%98%E6%96%B9%E6%8E%A5%E5%8F%A3%E3%80%81ChatGPT%E7%BD%91%E9%A1%B5%E7%89%88%E3%80%81ChatGPT-API%E5%8C%BA%E5%88%AB) - [Switcher](https://github.com/RockChinQ/Switcher) - 支持通过指令切换使用的模型 - [hello_plugin](https://github.com/RockChinQ/hello_plugin) - `hello_plugin` 的储存库形式,插件开发模板