From 7785dec0c49f4b500be27d027a01633988820c44 Mon Sep 17 00:00:00 2001 From: sigoden Date: Wed, 13 Nov 2024 07:02:28 +0800 Subject: [PATCH] feat: remove supports for huggingface --- Argcfile.sh | 1 - config.example.yaml | 6 ------ models.yaml | 24 ------------------------ src/client/mod.rs | 3 +-- 4 files changed, 1 insertion(+), 33 deletions(-) diff --git a/Argcfile.sh b/Argcfile.sh index 558d0565..2c8a830b 100755 --- a/Argcfile.sh +++ b/Argcfile.sh @@ -87,7 +87,6 @@ OPENAI_COMPATIBLE_PLATFORMS=( \ fireworks,accounts/fireworks/models/llama-v3p1-8b-instruct,https://api.fireworks.ai/inference/v1 \ github,gpt-4o-mini,https://models.inference.ai.azure.com \ groq,llama3-8b-8192,https://api.groq.com/openai/v1 \ - huggingface,meta-llama/Meta-Llama-3-8B-Instruct,https://api-inference.huggingface.co/v1 \ hunyuan,hunyuan-large,https://api.hunyuan.cloud.tencent.com/v1 \ lingyiwanwu,yi-large,https://api.lingyiwanwu.com/v1 \ mistral,open-mistral-nemo,https://api.mistral.ai/v1 \ diff --git a/config.example.yaml b/config.example.yaml index 706c8c6f..f46cddd1 100644 --- a/config.example.yaml +++ b/config.example.yaml @@ -244,12 +244,6 @@ clients: api_base: https://api.cloudflare.com/client/v4/accounts/{ACCOUNT_ID}/ai/v1 api_key: xxx - # See https://huggingface.co/inference-api/serverless - - type: openai-compatible - name: huggingface - api_base: https://api-inference.huggingface.co/v1 - api_key: xxx - # See https://cloud.baidu.com/doc/WENXINWORKSHOP/index.html - type: ernie api_key: xxx diff --git a/models.yaml b/models.yaml index 80e3fc7f..7c2b2526 100644 --- a/models.yaml +++ b/models.yaml @@ -685,30 +685,6 @@ default_chunk_size: 1000 max_batch_size: 100 -# Links: -# - https://huggingface.co/models?other=text-generation-inference -# - https://huggingface.co/docs/text-generation-inference/en/reference/api_reference -- platform: huggingface - models: - - name: NousResearch/Hermes-3-Llama-3.1-8B - max_input_tokens: 8192 - max_output_tokens: 4096 - require_max_tokens: true - input_price: 0 - output_price: 0 - - name: mistralai/Mistral-Small-Instruct-2409 - max_input_tokens: 128000 - max_output_tokens: 4096 - require_max_tokens: true - input_price: 0 - output_price: 0 - - name: mistralai/Mistral-Nemo-Instruct-2407 - max_input_tokens: 128000 - max_output_tokens: 4096 - require_max_tokens: true - input_price: 0 - output_price: 0 - # Links: # - https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu # - https://cloud.baidu.com/doc/WENXINWORKSHOP/s/hlrk4akp7 diff --git a/src/client/mod.rs b/src/client/mod.rs index 2430d94c..b22508b5 100644 --- a/src/client/mod.rs +++ b/src/client/mod.rs @@ -35,7 +35,7 @@ register_client!( (ernie, "ernie", ErnieConfig, ErnieClient), ); -pub const OPENAI_COMPATIBLE_PLATFORMS: [(&str, &str); 22] = [ +pub const OPENAI_COMPATIBLE_PLATFORMS: [(&str, &str); 21] = [ ("ai21", "https://api.ai21.com/studio/v1"), ("cloudflare", ""), ("deepinfra", "https://api.deepinfra.com/v1/openai"), @@ -43,7 +43,6 @@ pub const OPENAI_COMPATIBLE_PLATFORMS: [(&str, &str); 22] = [ ("fireworks", "https://api.fireworks.ai/inference/v1"), ("github", "https://models.inference.ai.azure.com"), ("groq", "https://api.groq.com/openai/v1"), - ("huggingface", "https://api-inference.huggingface.co/v1"), ("hunyuan", "https://api.hunyuan.cloud.tencent.com/v1"), ("lingyiwanwu", "https://api.lingyiwanwu.com/v1"), ("mistral", "https://api.mistral.ai/v1"),