feat: add qwen2.5-72b and llama3.2 for openrouter (#8956)

This commit is contained in:
zhuhao 2024-10-01 10:55:51 +08:00 committed by GitHub
parent c2d606d587
commit 824a0dd63e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 215 additions and 0 deletions

View File

@ -14,6 +14,10 @@
- google/gemini-pro - google/gemini-pro
- cohere/command-r-plus - cohere/command-r-plus
- cohere/command-r - cohere/command-r
- meta-llama/llama-3.2-1b-instruct
- meta-llama/llama-3.2-3b-instruct
- meta-llama/llama-3.2-11b-vision-instruct
- meta-llama/llama-3.2-90b-vision-instruct
- meta-llama/llama-3.1-405b-instruct - meta-llama/llama-3.1-405b-instruct
- meta-llama/llama-3.1-70b-instruct - meta-llama/llama-3.1-70b-instruct
- meta-llama/llama-3.1-8b-instruct - meta-llama/llama-3.1-8b-instruct
@ -22,6 +26,7 @@
- mistralai/mixtral-8x22b-instruct - mistralai/mixtral-8x22b-instruct
- mistralai/mixtral-8x7b-instruct - mistralai/mixtral-8x7b-instruct
- mistralai/mistral-7b-instruct - mistralai/mistral-7b-instruct
- qwen/qwen-2.5-72b-instruct
- qwen/qwen-2-72b-instruct - qwen/qwen-2-72b-instruct
- deepseek/deepseek-chat - deepseek/deepseek-chat
- deepseek/deepseek-coder - deepseek/deepseek-coder

View File

@ -0,0 +1,45 @@
model: meta-llama/llama-3.2-11b-vision-instruct
label:
zh_Hans: llama-3.2-11b-vision-instruct
en_US: llama-3.2-11b-vision-instruct
model_type: llm
features:
- agent-thought
model_properties:
mode: chat
context_size: 131072
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: top_k
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
- name: max_tokens
use_template: max_tokens
- name: context_length_exceeded_behavior
default: None
label:
zh_Hans: 上下文长度超出行为
en_US: Context Length Exceeded Behavior
help:
zh_Hans: 上下文长度超出行为
en_US: Context Length Exceeded Behavior
type: string
options:
- None
- truncate
- error
- name: response_format
use_template: response_format
pricing:
input: '0.055'
output: '0.055'
unit: '0.000001'
currency: USD

View File

@ -0,0 +1,45 @@
model: meta-llama/llama-3.2-1b-instruct
label:
zh_Hans: llama-3.2-1b-instruct
en_US: llama-3.2-1b-instruct
model_type: llm
features:
- agent-thought
model_properties:
mode: chat
context_size: 131072
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: top_k
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
- name: max_tokens
use_template: max_tokens
- name: context_length_exceeded_behavior
default: None
label:
zh_Hans: 上下文长度超出行为
en_US: Context Length Exceeded Behavior
help:
zh_Hans: 上下文长度超出行为
en_US: Context Length Exceeded Behavior
type: string
options:
- None
- truncate
- error
- name: response_format
use_template: response_format
pricing:
input: '0.01'
output: '0.02'
unit: '0.000001'
currency: USD

View File

@ -0,0 +1,45 @@
model: meta-llama/llama-3.2-3b-instruct
label:
zh_Hans: llama-3.2-3b-instruct
en_US: llama-3.2-3b-instruct
model_type: llm
features:
- agent-thought
model_properties:
mode: chat
context_size: 131072
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: top_k
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
- name: max_tokens
use_template: max_tokens
- name: context_length_exceeded_behavior
default: None
label:
zh_Hans: 上下文长度超出行为
en_US: Context Length Exceeded Behavior
help:
zh_Hans: 上下文长度超出行为
en_US: Context Length Exceeded Behavior
type: string
options:
- None
- truncate
- error
- name: response_format
use_template: response_format
pricing:
input: '0.03'
output: '0.05'
unit: '0.000001'
currency: USD

View File

@ -0,0 +1,45 @@
model: meta-llama/llama-3.2-90b-vision-instruct
label:
zh_Hans: llama-3.2-90b-vision-instruct
en_US: llama-3.2-90b-vision-instruct
model_type: llm
features:
- agent-thought
model_properties:
mode: chat
context_size: 131072
parameter_rules:
- name: temperature
use_template: temperature
- name: top_p
use_template: top_p
- name: top_k
label:
zh_Hans: 取样数量
en_US: Top k
type: int
help:
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
en_US: Only sample from the top K options for each subsequent token.
- name: max_tokens
use_template: max_tokens
- name: context_length_exceeded_behavior
default: None
label:
zh_Hans: 上下文长度超出行为
en_US: Context Length Exceeded Behavior
help:
zh_Hans: 上下文长度超出行为
en_US: Context Length Exceeded Behavior
type: string
options:
- None
- truncate
- error
- name: response_format
use_template: response_format
pricing:
input: '0.35'
output: '0.4'
unit: '0.000001'
currency: USD

View File

@ -0,0 +1,30 @@
model: qwen/qwen-2.5-72b-instruct
label:
en_US: qwen-2.5-72b-instruct
model_type: llm
features:
- agent-thought
model_properties:
mode: chat
context_size: 131072
parameter_rules:
- name: temperature
use_template: temperature
- name: max_tokens
use_template: max_tokens
type: int
default: 512
min: 1
max: 8192
help:
zh_Hans: 指定生成结果长度的上限。如果生成结果截断,可以调大该参数。
en_US: Specifies the upper limit on the length of generated results. If the generated results are truncated, you can increase this parameter.
- name: top_p
use_template: top_p
- name: frequency_penalty
use_template: frequency_penalty
pricing:
input: "0.35"
output: "0.4"
unit: "0.000001"
currency: USD