fix: wenxin model name invalid when llm call (#1248)

This commit is contained in:
takatost 2023-09-27 16:29:13 +08:00 committed by GitHub
parent 9dbb8acd4b
commit d38eac959b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 7 additions and 1 deletions

View File

@ -18,6 +18,7 @@ class WenxinModel(BaseLLM):
provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, self.model_kwargs)
# TODO load price_config from configs(db)
return Wenxin(
model=self.name,
streaming=self.streaming,
callbacks=self.callbacks,
**self.credentials,

View File

@ -61,13 +61,18 @@ class WenxinProvider(BaseModelProvider):
:param model_type:
:return:
"""
model_max_tokens = {
'ernie-bot': 4800,
'ernie-bot-turbo': 11200,
}
if model_name in ['ernie-bot', 'ernie-bot-turbo']:
return ModelKwargsRules(
temperature=KwargRule[float](min=0.01, max=1, default=0.95, precision=2),
top_p=KwargRule[float](min=0.01, max=1, default=0.8, precision=2),
presence_penalty=KwargRule[float](enabled=False),
frequency_penalty=KwargRule[float](enabled=False),
max_tokens=KwargRule[int](enabled=False),
max_tokens=KwargRule[int](enabled=False, max=model_max_tokens.get(model_name)),
)
else:
return ModelKwargsRules(