mirror of
https://github.com/open-webui/open-webui
synced 2025-04-07 22:25:05 +00:00
Add LLMs to liteLLM config 3
This commit is contained in:
parent
caea901724
commit
34ced03d7c
@ -67,7 +67,7 @@ class CompletionTable:
|
||||
return None
|
||||
|
||||
def calculate_saved_time_in_seconds(last_message, response_message):
|
||||
print(last_message + " ----- " + response_message)
|
||||
# print(last_message + " ----- " + response_message)
|
||||
|
||||
writing_speed_per_word = 600 / 500 # 500 words in 600 seconds = 1.2 sec per word
|
||||
reading_speed_per_word = 400 / 500 # 500 words in 400 seconds = 0.8 sec per word
|
||||
|
@ -66,29 +66,6 @@ model_list:
|
||||
arena_elo: 1231
|
||||
knowledge_cutoff: 'November 2023'
|
||||
context_window: 1_000_000
|
||||
- model_name: 'Mistral Medium'
|
||||
litellm_params:
|
||||
model: mistral/mistral-medium
|
||||
model_info:
|
||||
description: 'Good balance between performance and capacity.'
|
||||
arena_elo: 1148
|
||||
knowledge_cutoff: 'Unknown'
|
||||
context_window: 32_000
|
||||
- model_name: 'Mistral Large'
|
||||
litellm_params:
|
||||
model: mistral/mistral-large-latest
|
||||
model_info:
|
||||
description: 'Ideal for specialized and complex text and code generation.'
|
||||
arena_elo: 1156
|
||||
knowledge_cutoff: 'Unknown'
|
||||
context_window: 32_000
|
||||
- model_name: 'Perplexity'
|
||||
litellm_params:
|
||||
model: 'perplexity/llama-3-sonar-large-32k-online'
|
||||
model_info:
|
||||
max_input_tokens: '32000'
|
||||
input_cost_per_token: 0.000001
|
||||
output_cost_per_token: 0.000001
|
||||
description: 'Optimal for search queries and research.'
|
||||
|
||||
|
||||
general_settings: {}
|
||||
|
Loading…
Reference in New Issue
Block a user