mirror of
https://github.com/open-webui/open-webui
synced 2025-06-26 18:26:48 +00:00
feat: read max_tokens from model config with fallback to 1000 for title and tag generation
Improves title and tag generation by using the max_tokens value from the model configuration when available, with a fallback to the previous default of 1000. This change is necessary for models like Gemini Pro that generate longer responses and require a higher token limit to successfully generate titles or tags.
This commit is contained in:
parent
56740ab8d6
commit
eabdd4a140
@ -192,15 +192,19 @@ async def generate_title(
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
max_tokens = (
|
||||||
|
models[task_model_id].get("info", {}).get("params", {}).get("max_tokens", 1000)
|
||||||
|
)
|
||||||
|
|
||||||
payload = {
|
payload = {
|
||||||
"model": task_model_id,
|
"model": task_model_id,
|
||||||
"messages": [{"role": "user", "content": content}],
|
"messages": [{"role": "user", "content": content}],
|
||||||
"stream": False,
|
"stream": False,
|
||||||
**(
|
**(
|
||||||
{"max_tokens": 1000}
|
{"max_tokens": max_tokens}
|
||||||
if models[task_model_id].get("owned_by") == "ollama"
|
if models[task_model_id].get("owned_by") == "ollama"
|
||||||
else {
|
else {
|
||||||
"max_completion_tokens": 1000,
|
"max_completion_tokens": max_tokens,
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
"metadata": {
|
"metadata": {
|
||||||
|
Loading…
Reference in New Issue
Block a user