add-llama3-for-nvidia-api-catalog (#3631)

This commit is contained in:
Joshua 2024-04-19 14:51:22 +08:00 committed by GitHub
parent a0c30702c1
commit 7545e5de6c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 111 additions and 7 deletions

View File

@ -1,5 +1,7 @@
- google/gemma-7b
- google/codegemma-7b
- meta/llama2-70b
- meta/llama3-8b
- meta/llama3-70b
- mistralai/mixtral-8x7b-instruct-v0.1
- fuyu-8b

View File

@ -11,13 +11,19 @@ model_properties:
parameter_rules:
- name: temperature
use_template: temperature
min: 0
max: 1
default: 0.5
- name: top_p
use_template: top_p
min: 0
max: 1
default: 1
- name: max_tokens
use_template: max_tokens
default: 1024
min: 1
max: 1024
default: 1024
- name: frequency_penalty
use_template: frequency_penalty
min: -2

View File

@ -22,6 +22,6 @@ parameter_rules:
max: 1
- name: max_tokens
use_template: max_tokens
default: 512
default: 1024
min: 1
max: 1024

View File

@ -11,13 +11,19 @@ model_properties:
parameter_rules:
- name: temperature
use_template: temperature
min: 0
max: 1
default: 0.5
- name: top_p
use_template: top_p
min: 0
max: 1
default: 1
- name: max_tokens
use_template: max_tokens
default: 512
min: 1
max: 1024
default: 1024
- name: frequency_penalty
use_template: frequency_penalty
min: -2

View File

@ -7,17 +7,23 @@ features:
- agent-thought
model_properties:
mode: chat
context_size: 32768
context_size: 4096
parameter_rules:
- name: temperature
use_template: temperature
min: 0
max: 1
default: 0.5
- name: top_p
use_template: top_p
min: 0
max: 1
default: 1
- name: max_tokens
use_template: max_tokens
default: 512
min: 1
max: 1024
default: 1024
- name: frequency_penalty
use_template: frequency_penalty
min: -2

View File

@ -0,0 +1,36 @@
model: meta/llama3-70b
label:
zh_Hans: meta/llama3-70b
en_US: meta/llama3-70b
model_type: llm
features:
- agent-thought
model_properties:
mode: chat
context_size: 8192
parameter_rules:
- name: temperature
use_template: temperature
min: 0
max: 1
default: 0.5
- name: top_p
use_template: top_p
min: 0
max: 1
default: 1
- name: max_tokens
use_template: max_tokens
min: 1
max: 1024
default: 1024
- name: frequency_penalty
use_template: frequency_penalty
min: -2
max: 2
default: 0
- name: presence_penalty
use_template: presence_penalty
min: -2
max: 2
default: 0

View File

@ -0,0 +1,36 @@
model: meta/llama3-8b
label:
zh_Hans: meta/llama3-8b
en_US: meta/llama3-8b
model_type: llm
features:
- agent-thought
model_properties:
mode: chat
context_size: 8192
parameter_rules:
- name: temperature
use_template: temperature
min: 0
max: 1
default: 0.5
- name: top_p
use_template: top_p
min: 0
max: 1
default: 1
- name: max_tokens
use_template: max_tokens
min: 1
max: 1024
default: 1024
- name: frequency_penalty
use_template: frequency_penalty
min: -2
max: 2
default: 0
- name: presence_penalty
use_template: presence_penalty
min: -2
max: 2
default: 0

View File

@ -25,7 +25,10 @@ class NVIDIALargeLanguageModel(OAIAPICompatLargeLanguageModel):
'mistralai/mixtral-8x7b-instruct-v0.1': '',
'google/gemma-7b': '',
'google/codegemma-7b': '',
'meta/llama2-70b': ''
'meta/llama2-70b': '',
'meta/llama3-8b': '',
'meta/llama3-70b': ''
}
def _invoke(self, model: str, credentials: dict,

View File

@ -11,13 +11,19 @@ model_properties:
parameter_rules:
- name: temperature
use_template: temperature
min: 0
max: 1
default: 0.5
- name: top_p
use_template: top_p
min: 0
max: 1
default: 1
- name: max_tokens
use_template: max_tokens
default: 512
min: 1
max: 1024
default: 1024
- name: frequency_penalty
use_template: frequency_penalty
min: -2

View File

@ -1,6 +1,9 @@
provider: nvidia
label:
en_US: API Catalog
description:
en_US: API Catalog
zh_Hans: API Catalog
icon_small:
en_US: icon_s_en.svg
icon_large: