Merge pull request #513 from RockChinQ/feat-function-calling-integration

[Feat] 支持GPT的函数调用功能
This commit is contained in:
Junyan Qin 2023-07-29 18:57:29 +08:00 committed by GitHub
commit 7473cdfe16
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 865 additions and 200 deletions

View File

@ -26,7 +26,7 @@ jobs:
- name: Install dependencies
run: |
python -m pip install --upgrade pip
python -m pip install --upgrade yiri-mirai openai colorlog func_timeout dulwich Pillow
python -m pip install --upgrade yiri-mirai openai colorlog func_timeout dulwich Pillow CallingGPT
- name: Copy Scripts
run: |

View File

@ -10,6 +10,7 @@
![Wakapi Count](https://wakapi.dev/api/badge/RockChinQ/interval:any/project:QChatGPT)
> 2023/7/29 支持使用GPT的Function Calling功能实现类似ChatGPT Plugin的效果请见[Wiki中的内容函数节](https://github.com/RockChinQ/QChatGPT/wiki/%E6%8F%92%E4%BB%B6%E5%BC%80%E5%8F%91#%E5%86%85%E5%AE%B9%E5%87%BD%E6%95%B0)
> 2023/4/24 支持使用go-cqhttp登录QQ请查看[此文档](https://github.com/RockChinQ/QChatGPT/wiki/go-cqhttp%E9%85%8D%E7%BD%AE)
> 2023/3/18 现已支持GPT-4 API内测请查看`config-template.py`中的`completion_api_params`
> 2023/3/15 逆向库已支持New Bing使用方法查看[插件文档](https://github.com/RockChinQ/revLibs)
@ -111,6 +112,7 @@
<summary>✅支持插件加载🧩</summary>
- 自行实现插件加载器及相关支持
- 支持GPT的Function Calling功能
- 详细查看[插件使用页](https://github.com/RockChinQ/QChatGPT/wiki/%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8)
</details>
<details>
@ -280,6 +282,8 @@ python3 main.py
详见[Wiki插件使用页](https://github.com/RockChinQ/QChatGPT/wiki/%E6%8F%92%E4%BB%B6%E4%BD%BF%E7%94%A8)
开发教程见[Wiki插件开发页](https://github.com/RockChinQ/QChatGPT/wiki/%E6%8F%92%E4%BB%B6%E5%BC%80%E5%8F%91)
⭐我们已经支持了[GPT的Function Calling能力](https://platform.openai.com/docs/guides/gpt/function-calling)请查看wiki的插件开发页以查看如何在QChatGPT中使用此功能
<details>
<summary>查看插件列表</summary>
@ -295,6 +299,7 @@ python3 main.py
[插件列表](https://github.com/stars/RockChinQ/lists/qchatgpt-%E6%8F%92%E4%BB%B6)欢迎提出issue以提交新的插件
- [WebwlkrPlugin](https://github.com/RockChinQ/WebwlkrPlugin) - 让机器人能联网!!
- [revLibs](https://github.com/RockChinQ/revLibs) - 将ChatGPT网页版接入此项目关于[官方接口和网页版有什么区别](https://github.com/RockChinQ/QChatGPT/wiki/%E5%AE%98%E6%96%B9%E6%8E%A5%E5%8F%A3%E3%80%81ChatGPT%E7%BD%91%E9%A1%B5%E7%89%88%E3%80%81ChatGPT-API%E5%8C%BA%E5%88%AB)
- [Switcher](https://github.com/RockChinQ/Switcher) - 支持通过指令切换使用的模型
- [hello_plugin](https://github.com/RockChinQ/hello_plugin) - `hello_plugin` 的储存库形式,插件开发模板

View File

@ -47,7 +47,7 @@ def init_db():
def ensure_dependencies():
import pkg.utils.pkgmgr as pkgmgr
pkgmgr.run_pip(["install", "openai", "Pillow", "nakuru-project-idk", "--upgrade",
pkgmgr.run_pip(["install", "openai", "Pillow", "nakuru-project-idk", "CallingGPT", "--upgrade",
"-i", "https://pypi.tuna.tsinghua.edu.cn/simple",
"--trusted-host", "pypi.tuna.tsinghua.edu.cn"])
@ -178,9 +178,14 @@ def start(first_time_init=False):
logging.error(e)
traceback.print_exc()
# 配置OpenAI proxy
import openai
openai.proxy = None # 先重置因为重载后可能需要清除proxy
if "http_proxy" in config.openai_config and config.openai_config["http_proxy"] is not None:
openai.proxy = config.openai_config["http_proxy"]
# 配置openai api_base
if "reverse_proxy" in config.openai_config and config.openai_config["reverse_proxy"] is not None:
import openai
openai.api_base = config.openai_config["reverse_proxy"]
# 主启动流程

View File

View File

@ -0,0 +1,195 @@
import openai
import json
import logging
from .model import RequestBase
from ..funcmgr import get_func_schema_list, execute_function, get_func, get_func_schema, ContentFunctionNotFoundError
class ChatCompletionRequest(RequestBase):
"""调用ChatCompletion接口的请求类。
此类保证每一次返回的角色为assistant的信息的finish_reason一定为stop
若有函数调用响应本类的返回瀑布是函数调用请求->函数调用结果->...->assistant的信息->stop
"""
model: str
messages: list[dict[str, str]]
kwargs: dict
stopped: bool = False
pending_func_call: dict = None
pending_msg: str
def flush_pending_msg(self):
self.append_message(
role="assistant",
content=self.pending_msg
)
self.pending_msg = ""
def append_message(self, role: str, content: str, name: str=None):
msg = {
"role": role,
"content": content
}
if name is not None:
msg['name'] = name
self.messages.append(msg)
def __init__(
self,
model: str,
messages: list[dict[str, str]],
**kwargs
):
self.model = model
self.messages = messages.copy()
self.kwargs = kwargs
self.req_func = openai.ChatCompletion.acreate
self.pending_func_call = None
self.stopped = False
self.pending_msg = ""
def __iter__(self):
return self
def __next__(self) -> dict:
if self.stopped:
raise StopIteration()
if self.pending_func_call is None: # 没有待处理的函数调用请求
args = {
"model": self.model,
"messages": self.messages,
}
funcs = get_func_schema_list()
if len(funcs) > 0:
args['functions'] = funcs
# 拼接kwargs
args = {**args, **self.kwargs}
resp = self._req(**args)
choice0 = resp["choices"][0]
# 如果不是函数调用且finish_reason为stop则停止迭代
if 'function_call' not in choice0['message'] and choice0["finish_reason"] == "stop":
self.stopped = True
if 'function_call' in choice0['message']:
self.pending_func_call = choice0['message']['function_call']
self.append_message(
role="assistant",
content="function call: "+json.dumps(self.pending_func_call, ensure_ascii=False)
)
return {
"id": resp["id"],
"choices": [
{
"index": choice0["index"],
"message": {
"role": "assistant",
"type": "function_call",
"content": None,
"function_call": choice0['message']['function_call']
},
"finish_reason": "function_call"
}
],
"usage": resp["usage"]
}
else:
# self.pending_msg += choice0['message']['content']
# 普通回复一定处于最后方故不用再追加进内部messages
return {
"id": resp["id"],
"choices": [
{
"index": choice0["index"],
"message": {
"role": "assistant",
"type": "text",
"content": choice0['message']['content']
},
"finish_reason": "stop"
}
],
"usage": resp["usage"]
}
else: # 处理函数调用请求
cp_pending_func_call = self.pending_func_call.copy()
self.pending_func_call = None
func_name = cp_pending_func_call['name']
arguments = {}
try:
try:
arguments = json.loads(cp_pending_func_call['arguments'])
# 若不是json格式的异常处理
except json.decoder.JSONDecodeError:
# 获取函数的参数列表
func_schema = get_func_schema(func_name)
arguments = {
func_schema['parameters']['required'][0]: cp_pending_func_call['arguments']
}
logging.info("执行函数调用: name={}, arguments={}".format(func_name, arguments))
# 执行函数调用
ret = execute_function(func_name, arguments)
logging.info("函数执行完成。")
self.append_message(
role="function",
content=json.dumps(ret, ensure_ascii=False),
name=func_name
)
return {
"id": -1,
"choices": [
{
"index": -1,
"message": {
"role": "function",
"type": "function_return",
"function_name": func_name,
"content": json.dumps(ret, ensure_ascii=False)
},
"finish_reason": "function_return"
}
],
"usage": {
"prompt_tokens": 0,
"completion_tokens": 0,
"total_tokens": 0
}
}
except ContentFunctionNotFoundError:
raise Exception("没有找到函数: {}".format(func_name))

View File

@ -0,0 +1,111 @@
import openai
from .model import RequestBase
class CompletionRequest(RequestBase):
"""调用Completion接口的请求类。
调用方可以一直next completion直到finish_reason为stop
"""
model: str
prompt: str
kwargs: dict
stopped: bool = False
def __init__(
self,
model: str,
messages: list[dict[str, str]],
**kwargs
):
self.model = model
self.prompt = ""
for message in messages:
self.prompt += message["role"] + ": " + message["content"] + "\n"
self.prompt += "assistant: "
self.kwargs = kwargs
self.req_func = openai.Completion.acreate
def __iter__(self):
return self
def __next__(self) -> dict:
"""调用Completion接口返回生成的文本
{
"id": "id",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"type": "text",
"content": "message"
},
"finish_reason": "reason"
}
],
"usage": {
"prompt_tokens": 10,
"completion_tokens": 20,
"total_tokens": 30
}
}
"""
if self.stopped:
raise StopIteration()
resp = self._req(
model=self.model,
prompt=self.prompt,
**self.kwargs
)
if resp["choices"][0]["finish_reason"] == "stop":
self.stopped = True
choice0 = resp["choices"][0]
self.prompt += choice0["text"]
return {
"id": resp["id"],
"choices": [
{
"index": choice0["index"],
"message": {
"role": "assistant",
"type": "text",
"content": choice0["text"]
},
"finish_reason": choice0["finish_reason"]
}
],
"usage": resp["usage"]
}
if __name__ == "__main__":
import os
openai.api_key = os.environ["OPENAI_API_KEY"]
for resp in CompletionRequest(
model="text-davinci-003",
messages=[
{
"role": "user",
"content": "Hello, who are you?"
}
]
):
print(resp)
if resp["choices"][0]["finish_reason"] == "stop":
break

49
pkg/openai/api/model.py Normal file
View File

@ -0,0 +1,49 @@
# 定义不同接口请求的模型
import threading
import asyncio
import openai
class RequestBase:
req_func: callable
def __init__(self, *args, **kwargs):
raise NotImplementedError
def _req(self, **kwargs):
"""处理代理问题"""
ret: dict = {}
exception: Exception = None
async def awrapper(**kwargs):
nonlocal ret, exception
try:
ret = await self.req_func(**kwargs)
return ret
except Exception as e:
exception = e
loop = asyncio.new_event_loop()
thr = threading.Thread(
target=loop.run_until_complete,
args=(awrapper(**kwargs),)
)
thr.start()
thr.join()
if exception is not None:
raise exception
return ret
def __iter__(self):
raise self
def __next__(self):
raise NotImplementedError

47
pkg/openai/funcmgr.py Normal file
View File

@ -0,0 +1,47 @@
# 封装了function calling的一些支持函数
import logging
from pkg.plugin import host
class ContentFunctionNotFoundError(Exception):
pass
def get_func_schema_list() -> list:
"""从plugin包中的函数结构中获取并处理成受GPT支持的格式"""
if not host.__enable_content_functions__:
return []
schemas = []
for func in host.__callable_functions__:
if func['enabled']:
fun_cp = func.copy()
del fun_cp['enabled']
schemas.append(fun_cp)
return schemas
def get_func(name: str) -> callable:
if name not in host.__function_inst_map__:
raise ContentFunctionNotFoundError("没有找到内容函数: {}".format(name))
return host.__function_inst_map__[name]
def get_func_schema(name: str) -> dict:
for func in host.__callable_functions__:
if func['name'] == name:
return func
raise ContentFunctionNotFoundError("没有找到内容函数: {}".format(name))
def execute_function(name: str, kwargs: dict) -> any:
"""执行函数调用"""
logging.debug("executing function: name='{}', kwargs={}".format(name, kwargs))
func = get_func(name)
return func(**kwargs)

View File

@ -5,7 +5,9 @@ import openai
import pkg.openai.keymgr
import pkg.utils.context
import pkg.audit.gatherer
from pkg.openai.modelmgr import ModelRequest, create_openai_model_request
from pkg.openai.modelmgr import select_request_cls
from pkg.openai.api.model import RequestBase
class OpenAIInteract:
@ -33,45 +35,24 @@ class OpenAIInteract:
pkg.utils.context.set_openai_manager(self)
# 请求OpenAI Completion
def request_completion(self, prompts) -> tuple[str, int]:
"""请求补全接口回复
Parameters:
prompts (str): 提示语
Returns:
str: 回复
def request_completion(self, messages: list):
"""请求补全接口回复=
"""
# 选择接口请求类
config = pkg.utils.context.get_config()
# 根据模型选择使用的接口
ai: ModelRequest = create_openai_model_request(
config.completion_api_params['model'],
'user',
config.openai_config["http_proxy"] if "http_proxy" in config.openai_config else None
)
ai.request(
prompts,
**config.completion_api_params
)
response = ai.get_response()
request: RequestBase
logging.debug("OpenAI response: %s", response)
model: str = config.completion_api_params['model']
# 记录使用量
current_round_token = 0
if 'model' in config.completion_api_params:
self.audit_mgr.report_text_model_usage(config.completion_api_params['model'],
ai.get_total_tokens())
current_round_token = ai.get_total_tokens()
elif 'engine' in config.completion_api_params:
self.audit_mgr.report_text_model_usage(config.completion_api_params['engine'],
response['usage']['total_tokens'])
current_round_token = response['usage']['total_tokens']
cp_parmas = config.completion_api_params.copy()
del cp_parmas['model']
return ai.get_message(), current_round_token
request = select_request_cls(model, messages, cp_parmas)
# 请求接口
for resp in request:
yield resp
def request_image(self, prompt) -> dict:
"""请求图片接口回复

View File

@ -8,6 +8,10 @@ Completion - text-davinci-003 等模型
import openai, logging, threading, asyncio
import openai.error as aiE
from pkg.openai.api.model import RequestBase
from pkg.openai.api.completion import CompletionRequest
from pkg.openai.api.chat_completion import ChatCompletionRequest
COMPLETION_MODELS = {
'text-davinci-003',
'text-davinci-002',
@ -39,153 +43,9 @@ IMAGE_MODELS = {
}
class ModelRequest:
"""模型接口请求父类"""
can_chat = False
runtime: threading.Thread = None
ret = {}
proxy: str = None
request_ready = True
error_info: str = "若在没有任何错误的情况下看到这句话请带着配置文件上报Issues"
def __init__(self, model_name, user_name, request_fun, http_proxy:str = None, time_out = None):
self.model_name = model_name
self.user_name = user_name
self.request_fun = request_fun
self.time_out = time_out
if http_proxy != None:
self.proxy = http_proxy
openai.proxy = self.proxy
self.request_ready = False
async def __a_request__(self, **kwargs):
"""异步请求"""
try:
self.ret: dict = await self.request_fun(**kwargs)
self.request_ready = True
except aiE.APIConnectionError as e:
self.error_info = "{}\n请检查网络连接或代理是否正常".format(e)
raise ConnectionError(self.error_info)
except ValueError as e:
self.error_info = "{}\n该错误可能是由于http_proxy格式设置错误引起的"
except Exception as e:
self.error_info = "{}\n由于请求异常产生的未知错误,请查看日志".format(e)
raise type(e)(self.error_info)
def request(self, **kwargs):
"""向接口发起请求"""
if self.proxy != None: #异步请求
self.request_ready = False
loop = asyncio.new_event_loop()
self.runtime = threading.Thread(
target=loop.run_until_complete,
args=(self.__a_request__(**kwargs),)
)
self.runtime.start()
else: #同步请求
self.ret = self.request_fun(**kwargs)
def __msg_handle__(self, msg):
"""将prompt dict转换成接口需要的格式"""
return msg
def ret_handle(self):
'''
API消息返回处理函数
若重写该方法应检查异步线程状态或在需要检查处super该方法
'''
if self.runtime != None and isinstance(self.runtime, threading.Thread):
self.runtime.join(self.time_out)
if self.request_ready:
return
raise Exception(self.error_info)
def get_total_tokens(self):
try:
return self.ret['usage']['total_tokens']
except:
return 0
def get_message(self):
return self.message
def get_response(self):
return self.ret
class ChatCompletionModel(ModelRequest):
"""ChatCompletion接口的请求实现"""
Chat_role = ['system', 'user', 'assistant']
def __init__(self, model_name, user_name, http_proxy:str = None, **kwargs):
if http_proxy == None:
request_fun = openai.ChatCompletion.create
else:
request_fun = openai.ChatCompletion.acreate
self.can_chat = True
super().__init__(model_name, user_name, request_fun, http_proxy, **kwargs)
def request(self, prompts, **kwargs):
prompts = self.__msg_handle__(prompts)
kwargs['messages'] = prompts
super().request(**kwargs)
self.ret_handle()
def __msg_handle__(self, msgs):
temp_msgs = []
# 把msgs拷贝进temp_msgs
for msg in msgs:
temp_msgs.append(msg.copy())
return temp_msgs
def get_message(self):
return self.ret["choices"][0]["message"]['content'] #需要时直接加载加快请求速度,降低内存消耗
class CompletionModel(ModelRequest):
"""Completion接口的请求实现"""
def __init__(self, model_name, user_name, http_proxy:str = None, **kwargs):
if http_proxy == None:
request_fun = openai.Completion.create
else:
request_fun = openai.Completion.acreate
super().__init__(model_name, user_name, request_fun, http_proxy, **kwargs)
def request(self, prompts, **kwargs):
prompts = self.__msg_handle__(prompts)
kwargs['prompt'] = prompts
super().request(**kwargs)
self.ret_handle()
def __msg_handle__(self, msgs):
prompt = ''
for msg in msgs:
prompt = prompt + "{}: {}\n".format(msg['role'], msg['content'])
# for msg in msgs:
# if msg['role'] == 'assistant':
# prompt = prompt + "{}\n".format(msg['content'])
# else:
# prompt = prompt + "{}:{}\n".format(msg['role'] , msg['content'])
prompt = prompt + "assistant: "
return prompt
def get_message(self):
return self.ret["choices"][0]["text"]
def create_openai_model_request(model_name: str, user_name: str = 'user', http_proxy:str = None) -> ModelRequest:
"""使用给定的模型名称创建模型请求对象"""
def select_request_cls(model_name: str, messages: list, args: dict) -> RequestBase:
if model_name in CHAT_COMPLETION_MODELS:
model = ChatCompletionModel(model_name, user_name, http_proxy)
return ChatCompletionRequest(model_name, messages, **args)
elif model_name in COMPLETION_MODELS:
model = CompletionModel(model_name, user_name, http_proxy)
else :
log = "找不到模型[{}],请检查配置文件".format(model_name)
logging.error(log)
raise IndexError(log)
logging.debug("使用接口[{}]创建模型请求[{}]".format(model.__class__.__name__, model_name))
return model
return CompletionRequest(model_name, messages, **args)
raise ValueError("不支持模型[{}],请检查配置文件".format(model_name))

79
pkg/openai/sess.py Normal file
View File

@ -0,0 +1,79 @@
import time
import threading
import logging
sessions = {}
class SessionOfflineStatus:
ON_GOING = "on_going"
EXPLICITLY_CLOSED = "explicitly_closed"
def reset_session_prompt(session_name, prompt):
pass
def load_sessions():
pass
def get_session(session_name: str) -> 'Session':
pass
def dump_session(session_name: str):
pass
class Session:
name: str = ''
default_prompt: list = []
"""会话系统提示语"""
messages: list = []
"""保存消息历史记录"""
token_counts: list = []
"""记录每回合的token数量"""
create_ts: int = 0
"""会话创建时间戳"""
last_active_ts: int = 0
"""会话最后活跃时间戳"""
just_switched_to_exist_session: bool = False
response_lock = None
def __init__(self, name: str):
self.name = name
self.default_prompt = self.get_runtime_default_prompt()
logging.debug("prompt is: {}".format(self.default_prompt))
self.messages = []
self.token_counts = []
self.create_ts = int(time.time())
self.last_active_ts = int(time.time())
self.response_lock = threading.Lock()
self.schedule()
def get_runtime_default_prompt(self, use_default: str = None) -> list:
"""从提示词管理器中获取所需提示词"""
import pkg.openai.dprompt as dprompt
if use_default is None:
use_default = dprompt.mode_inst().get_using_name()
current_default_prompt, _ = dprompt.mode_inst().get_prompt(use_default)
return current_default_prompt
def schedule(self):
"""定时会话过期检查任务"""
def expire_check_timer_loop(self):
"""会话过期检查任务"""

View File

@ -222,22 +222,67 @@ class Session:
for token_count in counts:
total_token_before_query += token_count
res_text = ""
pending_msgs = []
total_tokens = 0
for resp in pkg.utils.context.get_openai_manager().request_completion(prompts):
if resp['choices'][0]['message']['type'] == 'text': # 普通回复
res_text += resp['choices'][0]['message']['content']
total_tokens += resp['usage']['total_tokens']
pending_msgs.append(
{
"role": "assistant",
"content": resp['choices'][0]['message']['content']
}
)
elif resp['choices'][0]['message']['type'] == 'function_call':
# self.prompt.append(
# {
# "role": "assistant",
# "content": "function call: "+json.dumps(resp['choices'][0]['message']['function_call'])
# }
# )
total_tokens += resp['usage']['total_tokens']
elif resp['choices'][0]['message']['type'] == 'function_return':
# self.prompt.append(
# {
# "role": "function",
# "name": resp['choices'][0]['message']['function_name'],
# "content": json.dumps(resp['choices'][0]['message']['content'])
# }
# )
# total_tokens += resp['usage']['total_tokens']
pass
# 向API请求补全
message, total_token = pkg.utils.context.get_openai_manager().request_completion(
prompts,
)
# message, total_token = pkg.utils.context.get_openai_manager().request_completion(
# prompts,
# )
# 成功获取,处理回复
res_test = message
res_ans = res_test.strip()
# res_test = message
res_ans = res_text.strip()
# 将此次对话的双方内容加入到prompt中
# self.prompt.append({'role': 'user', 'content': text})
# self.prompt.append({'role': 'assistant', 'content': res_ans})
self.prompt.append({'role': 'user', 'content': text})
self.prompt.append({'role': 'assistant', 'content': res_ans})
# 添加pending_msgs
self.prompt += pending_msgs
# 向token_counts中添加本回合的token数量
self.token_counts.append(total_token-total_token_before_query)
logging.debug("本回合使用token: {}, session counts: {}".format(total_token-total_token_before_query, self.token_counts))
self.token_counts.append(total_tokens-total_token_before_query)
logging.debug("本回合使用token: {}, session counts: {}".format(total_tokens-total_token_before_query, self.token_counts))
if self.just_switched_to_exist_session:
self.just_switched_to_exist_session = False

View File

@ -16,6 +16,8 @@ import pkg.qqbot.adapter as msadapter
from mirai import Mirai
from CallingGPT.session.session import Session
__plugins__ = {}
"""插件列表
@ -42,6 +44,15 @@ __plugins__ = {}
__plugins_order__ = []
"""插件顺序"""
__enable_content_functions__ = True
"""是否启用内容函数"""
__callable_functions__ = []
"""供GPT调用的函数结构"""
__function_inst_map__: dict[str, callable] = {}
"""函数名:实例 映射"""
def generate_plugin_order():
"""根据__plugin__生成插件初始顺序无视是否启用"""
@ -102,6 +113,10 @@ def load_plugins():
# 加载插件顺序
settings.load_settings()
# 输出已注册的内容函数列表
logging.debug("registered content functions: {}".format(__callable_functions__))
logging.debug("function instance map: {}".format(__function_inst_map__))
def initialize_plugins():
"""初始化插件"""
@ -300,7 +315,9 @@ class PluginHost:
"""插件宿主"""
def __init__(self):
"""初始化插件宿主"""
context.set_plugin_host(self)
self.calling_gpt_session = Session([])
def get_runtime_context(self) -> context:
"""获取运行时上下文pkg.utils.context模块的对象

View File

@ -133,12 +133,18 @@ KeySwitched = "key_switched"
"""
def on(event: str):
def on(*args, **kwargs):
"""注册事件监听器
:param
event: str 事件名称
"""
return Plugin.on(event)
return Plugin.on(*args, **kwargs)
def func(*args, **kwargs):
"""注册内容函数声明此函数为一个内容函数在对话中将发送此函数给GPT以供其调用
此函数可以具有任意的参数但必须按照[此文档](https://github.com/RockChinQ/CallingGPT/wiki/1.-Function-Format#function-format)
所述的格式编写函数的docstring
此功能仅支持在使用gpt-3.5或gpt-4系列模型时使用
"""
return Plugin.func(*args, **kwargs)
__current_registering_plugin__ = ""
@ -176,6 +182,34 @@ class Plugin:
return wrapper
@classmethod
def func(cls, name: str=None):
"""内容函数装饰器
"""
global __current_registering_plugin__
from CallingGPT.entities.namespace import get_func_schema
def wrapper(func):
function_schema = get_func_schema(func)
function_schema['name'] = __current_registering_plugin__ + '-' + (func.__name__ if name is None else name)
function_schema['enabled'] = True
host.__function_inst_map__[function_schema['name']] = function_schema['function']
del function_schema['function']
# logging.debug("registering content function: p='{}', f='{}', s={}".format(__current_registering_plugin__, func, function_schema))
host.__callable_functions__.append(
function_schema
)
return func
return wrapper
def register(name: str, description: str, version: str, author: str):
"""注册插件, 此函数作为装饰器使用

View File

@ -8,7 +8,10 @@ import logging
def wrapper_dict_from_runtime_context() -> dict:
"""从变量中包装settings.json的数据字典"""
settings = {
"order": []
"order": [],
"functions": {
"enabled": host.__enable_content_functions__
}
}
for plugin_name in host.__plugins_order__:
@ -22,6 +25,11 @@ def apply_settings(settings: dict):
if "order" in settings:
host.__plugins_order__ = settings["order"]
if "functions" in settings:
if "enabled" in settings["functions"]:
host.__enable_content_functions__ = settings["functions"]["enabled"]
# logging.debug("set content function enabled: {}".format(host.__enable_content_functions__))
def dump_settings():
"""保存settings.json数据"""
@ -78,6 +86,17 @@ def load_settings():
settings["order"].append(plugin_name)
settings_modified = True
if "functions" not in settings:
settings["functions"] = {
"enabled": host.__enable_content_functions__
}
settings_modified = True
elif "enabled" not in settings["functions"]:
settings["functions"]["enabled"] = host.__enable_content_functions__
settings_modified = True
logging.info("已全局{}内容函数。".format("启用" if settings["functions"]["enabled"] else "禁用"))
apply_settings(settings)
if settings_modified:

View File

@ -28,6 +28,11 @@ def apply_switch(switch: dict):
for plugin_name in switch:
host.__plugins__[plugin_name]["enabled"] = switch[plugin_name]["enabled"]
# 查找此插件的所有内容函数
for func in host.__callable_functions__:
if func['name'].startswith(plugin_name + '-'):
func['enabled'] = switch[plugin_name]["enabled"]
def dump_switch():
"""保存开关数据"""

View File

@ -0,0 +1,28 @@
from ..aamgr import AbstractCommandNode, Context
import logging
@AbstractCommandNode.register(
parent=None,
name="func",
description="管理内容函数",
usage="!func",
aliases=[],
privilege=1
)
class FuncCommand(AbstractCommandNode):
@classmethod
def process(cls, ctx: Context) -> tuple[bool, list]:
from pkg.plugin.models import host
reply = []
reply_str = "当前已加载的内容函数:\n\n"
index = 1
for func in host.__callable_functions__:
reply_str += "{}. {}{}:\n{}\n\n".format(index, ("(已禁用) " if not func['enabled'] else ""), func['name'], func['description'])
reply = [reply_str]
return True, reply

View File

@ -7,4 +7,5 @@ websockets
urllib3~=1.26.10
func_timeout~=4.3.5
Pillow
nakuru-project-idk
nakuru-project-idk
CallingGPT

View File

@ -1,6 +1,7 @@
{
"comment": "以下为命令权限请设置到cmdpriv.json中。关于此功能的说明请查看https://github.com/RockChinQ/QChatGPT/wiki/%E5%8A%9F%E8%83%BD%E4%BD%BF%E7%94%A8#%E5%91%BD%E4%BB%A4%E6%9D%83%E9%99%90%E6%8E%A7%E5%88%B6",
"draw": 1,
"func": 1,
"plugin": 2,
"plugin.get": 2,
"plugin.update": 2,

View File

@ -33,6 +33,8 @@ QChatGPT 插件使用Wiki
!plugin del <插件名> 删除插件(需要管理员权限)
!plugin on <插件名> 启用插件(需要管理员权限)
!plugin off <插件名> 禁用插件(需要管理员权限)
!func 列出所有内容函数
```
### 控制插件执行顺序
@ -42,4 +44,9 @@ QChatGPT 插件使用Wiki
### 启用或关闭插件
无需卸载即可管理插件的开关
编辑`plugins`目录下的`switch.json`文件,将相应的插件的`enabled`字段设置为`true/false(开/关)`,之后重启程序或执行热重载即可控制插件开关
编辑`plugins`目录下的`switch.json`文件,将相应的插件的`enabled`字段设置为`true/false(开/关)`,之后重启程序或执行热重载即可控制插件开关
### 控制全局内容函数开关
内容函数是基于[GPT的Function Calling能力](https://platform.openai.com/docs/guides/gpt/function-calling)实现的这是一种嵌入对话中由GPT自动调用的函数。
每个插件可以自行注册内容函数,您可以在`plugins`目录下的`settings.json`中设置`functions`下的`enabled`为`true`或`false`控制这些内容函数的启用或禁用。

View File

@ -113,6 +113,182 @@ class HelloPlugin(Plugin):
- 一个目录内可以存放多个Python程序文件以独立出插件的各个功能便于开发者管理但不建议在一个目录内注册多个插件
- 插件需要的依赖库请在插件目录下的`requirements.txt`中指定,程序从储存库获取此插件时将自动安装依赖
## 🪝内容函数
通过[GPT的Function Calling能力](https://platform.openai.com/docs/guides/gpt/function-calling)实现的`内容函数`这是一种嵌入对话中由GPT自动调用的函数。
<details>
<summary>示例:联网插件</summary>
加载含有联网功能的内容函数的插件[WebwlkrPlugin](https://github.com/RockChinQ/WebwlkrPlugin),向机器人询问在线内容
```
# 控制台输出
[2023-07-29 17:37:18.698] message.py (26) - [INFO] : [person_1010553892]发送消息:介绍一下这个项目https://git...
[2023-07-29 17:37:21.292] util.py (67) - [INFO] : message='OpenAI API response' path=https://api.openai.com/v1/chat/completions processing_ms=1902 request_id=941afc13b2e1bba1e7877b92a970cdea response_code=200
[2023-07-29 17:37:21.293] chat_completion.py (159) - [INFO] : 执行函数调用: name=Webwlkr-access_the_web, arguments={'url': 'https://github.com/RockChinQ/QChatGPT', 'brief_len': 512}
[2023-07-29 17:37:21.848] chat_completion.py (164) - [INFO] : 函数执行完成。
```
![Webwlkr插件](https://github.com/RockChinQ/QChatGPT/blob/master/res/screenshots/webwlkr_plugin.png?raw=true)
</details>
### 内容函数编写步骤
1⃣ 请先按照上方步骤编写您的插件基础结构,现在请删除(当然你也可以不删,只是为了简洁)上述插件内容的诸个由`@on`装饰的类函数
<details>
<summary>删除后的结构</summary>
```python
from pkg.plugin.models import *
from pkg.plugin.host import EventContext, PluginHost
"""
在收到私聊或群聊消息"hello"时,回复"hello, <发送者id>!"或"hello, everyone!"
"""
# 注册插件
@register(name="Hello", description="hello world", version="0.1", author="RockChinQ")
class HelloPlugin(Plugin):
# 插件加载时触发
# plugin_host (pkg.plugin.host.PluginHost) 提供了与主程序交互的一些方法,详细请查看其源码
def __init__(self, plugin_host: PluginHost):
pass
# 插件卸载时触发
def __del__(self):
pass
```
</details>
2⃣ 现在我们将以下函数添加到刚刚删除的函数的位置
```Python
# 要添加的函数
@func(name="access_the_web") # 设置函数名称
def _(url: str):
"""Call this function to search about the question before you answer any questions.
- Do not search through baidu.com at any time.
- If you need to search somthing, visit https://www.google.com/search?q=xxx.
- If user ask you to open a url (start with http:// or https://), visit it directly.
- Summary the plain content result by yourself, DO NOT directly output anything in the result you got.
Args:
url(str): url to visit
Returns:
str: plain text content of the web page
"""
import requests
from bs4 import BeautifulSoup
# 你需要先使用
# pip install beautifulsoup4
# 安装依赖
r = requests.get(
url,
timeout=10,
headers={
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36 Edg/115.0.1901.183"
}
)
soup = BeautifulSoup(r.text, 'html.parser')
s = soup.get_text()
# 删除多余的空行或仅有\t和空格的行
s = re.sub(r'\n\s*\n', '\n', s)
if len(s) >= 512: # 截取获取到的网页纯文本内容的前512个字
return s[:512]
return s
```
<details>
<summary>现在这个文件内容应该是这样</summary>
```python
from pkg.plugin.models import *
from pkg.plugin.host import EventContext, PluginHost
"""
在收到私聊或群聊消息"hello"时,回复"hello, <发送者id>!"或"hello, everyone!"
"""
# 注册插件
@register(name="Hello", description="hello world", version="0.1", author="RockChinQ")
class HelloPlugin(Plugin):
# 插件加载时触发
# plugin_host (pkg.plugin.host.PluginHost) 提供了与主程序交互的一些方法,详细请查看其源码
def __init__(self, plugin_host: PluginHost):
pass
@func(name="access_the_web")
def _(url: str):
"""Call this function to search about the question before you answer any questions.
- Do not search through baidu.com at any time.
- If you need to search somthing, visit https://www.google.com/search?q=xxx.
- If user ask you to open a url (start with http:// or https://), visit it directly.
- Summary the plain content result by yourself, DO NOT directly output anything in the result you got.
Args:
url(str): url to visit
Returns:
str: plain text content of the web page
"""
import requests
from bs4 import BeautifulSoup
# 你需要先使用
# pip install beautifulsoup4
# 安装依赖
r = requests.get(
url,
timeout=10,
headers={
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36 Edg/115.0.1901.183"
}
)
soup = BeautifulSoup(r.text, 'html.parser')
s = soup.get_text()
# 删除多余的空行或仅有\t和空格的行
s = re.sub(r'\n\s*\n', '\n', s)
if len(s) >= 512: # 截取获取到的网页纯文本内容的前512个字
return s[:512]
return s
# 插件卸载时触发
def __del__(self):
pass
```
</details>
#### 请注意:
- 函数的注释必须严格按照要求的格式进行书写,具体格式请查看[此文档](https://github.com/RockChinQ/CallingGPT/wiki/1.-Function-Format#function-format)
- 内容函数和`以@on装饰的行为函数`可以同时存在于同一个插件,并同时受到`switch.json`中的插件开关的控制
- 务必确保您使用的模型支持函数调用功能,可以到`config.py`的`completion_api_params`中修改模型,推荐使用`gpt-3.5-turbo-16k`
3⃣ 现在您的程序已具备网络访问功能,重启程序,询问机器人有关在线的内容或直接发送文章链接请求其总结。
- 这仅仅是一个示例,需要更高效的网络访问能力支持插件,请查看[WebwlkrPlugin](https://github.com/RockChinQ/WebwlkrPlugin)
## 📄API参考
### 说明