diff --git a/config.ini b/config.ini index 5352d753..d1f08c0c 100644 --- a/config.ini +++ b/config.ini @@ -52,11 +52,11 @@ local_llm_bind_port = 8888 # xi-api and alles-apin is chinese gpt proxy # for internlm, see https://internlm.intern-ai.org.cn/api/document -remote_type = "kimi" -remote_api_key = "YOUR-API-KEY-HERE" +remote_type = "deepseek" +remote_api_key = "sk-f33d96e4650a436d81eca1f0d03c0480" # max text length for remote LLM. # use 128000 for kimi, 192000 for gpt/xi-api, 16000 for deepseek, 128000 for zhipuai, 40000 for internlm2 -remote_llm_max_text_length = 128000 +remote_llm_max_text_length = 16000 # openai API model type, support model list: # "auto" for kimi. To save money, we auto select model name by prompt length. # "auto" for step to save money, see https://platform.stepfun.com/ @@ -66,7 +66,7 @@ remote_llm_max_text_length = 128000 # "gpt-4-1106-preview" for alles-apin or OpenAOE # "internlm2-latest" for internlm # for example "alibaba/Qwen1.5-110B-Chat", see https://siliconflow.readme.io/reference/chat-completions-1 -remote_llm_model = "auto" +remote_llm_model = "deepseek-Chat" # request per minute rpm = 500