diff --git a/backend/open_webui/routers/tasks.py b/backend/open_webui/routers/tasks.py index 8b17c6c4b..0328cefe0 100644 --- a/backend/open_webui/routers/tasks.py +++ b/backend/open_webui/routers/tasks.py @@ -208,7 +208,7 @@ async def generate_title( "stream": False, **( {"max_tokens": 1000} - if models[task_model_id]["owned_by"] == "ollama" + if models[task_model_id].get("owned_by") == "ollama" else { "max_completion_tokens": 1000, } @@ -571,7 +571,7 @@ async def generate_emoji( "stream": False, **( {"max_tokens": 4} - if models[task_model_id]["owned_by"] == "ollama" + if models[task_model_id].get("owned_by") == "ollama" else { "max_completion_tokens": 4, } diff --git a/backend/open_webui/utils/chat.py b/backend/open_webui/utils/chat.py index 73e4264bf..209fb02dc 100644 --- a/backend/open_webui/utils/chat.py +++ b/backend/open_webui/utils/chat.py @@ -200,7 +200,7 @@ async def generate_chat_completion( except Exception as e: raise e - if model["owned_by"] == "arena": + if model.get("owned_by") == "arena": model_ids = model.get("info", {}).get("meta", {}).get("model_ids") filter_mode = model.get("info", {}).get("meta", {}).get("filter_mode") if model_ids and filter_mode == "exclude": @@ -253,7 +253,7 @@ async def generate_chat_completion( return await generate_function_chat_completion( request, form_data, user=user, models=models ) - if model["owned_by"] == "ollama": + if model.get("owned_by") == "ollama": # Using /ollama/api/chat endpoint form_data = convert_payload_openai_to_ollama(form_data) response = await generate_ollama_chat_completion( diff --git a/backend/open_webui/utils/middleware.py b/backend/open_webui/utils/middleware.py index ba55c095e..27e751e0b 100644 --- a/backend/open_webui/utils/middleware.py +++ b/backend/open_webui/utils/middleware.py @@ -800,7 +800,7 @@ async def process_chat_payload(request, form_data, metadata, user, model): # Workaround for Ollama 2.0+ system prompt issue # TODO: replace with add_or_update_system_message - if model["owned_by"] == "ollama": + if model.get("owned_by") == "ollama": form_data["messages"] = prepend_to_first_user_message_content( rag_template( request.app.state.config.RAG_TEMPLATE, context_string, prompt diff --git a/backend/open_webui/utils/models.py b/backend/open_webui/utils/models.py index 975f8cb09..a2c0eadca 100644 --- a/backend/open_webui/utils/models.py +++ b/backend/open_webui/utils/models.py @@ -142,7 +142,7 @@ async def get_all_models(request): custom_model.base_model_id == model["id"] or custom_model.base_model_id == model["id"].split(":")[0] ): - owned_by = model["owned_by"] + owned_by = model.get("owned_by", "unknown owner") if "pipe" in model: pipe = model["pipe"] break diff --git a/backend/open_webui/utils/task.py b/backend/open_webui/utils/task.py index 3d8c05d45..5663ce2ac 100644 --- a/backend/open_webui/utils/task.py +++ b/backend/open_webui/utils/task.py @@ -22,7 +22,7 @@ def get_task_model_id( # Set the task model task_model_id = default_model_id # Check if the user has a custom task model and use that model - if models[task_model_id]["owned_by"] == "ollama": + if models[task_model_id].get("owned_by") == "ollama": if task_model and task_model in models: task_model_id = task_model else: