diff --git a/backend/open_webui/retrieval/web/duckduckgo.py b/backend/open_webui/retrieval/web/duckduckgo.py index 7c0c3f1c2..d95086671 100644 --- a/backend/open_webui/retrieval/web/duckduckgo.py +++ b/backend/open_webui/retrieval/web/duckduckgo.py @@ -32,19 +32,15 @@ def search_duckduckgo( # Convert the search results into a list search_results = [r for r in ddgs_gen] - # Create an empty list to store the SearchResult objects - results = [] - # Iterate over each search result - for result in search_results: - # Create a SearchResult object and append it to the results list - results.append( - SearchResult( - link=result["href"], - title=result.get("title"), - snippet=result.get("body"), - ) - ) if filter_list: - results = get_filtered_results(results, filter_list) + search_results = get_filtered_results(search_results, filter_list) + # Return the list of search results - return results + return [ + SearchResult( + link=result["href"], + title=result.get("title"), + snippet=result.get("body"), + ) + for result in search_results + ] diff --git a/backend/open_webui/routers/ollama.py b/backend/open_webui/routers/ollama.py index 64373c616..4fca10e1f 100644 --- a/backend/open_webui/routers/ollama.py +++ b/backend/open_webui/routers/ollama.py @@ -944,7 +944,7 @@ class ChatMessage(BaseModel): class GenerateChatCompletionForm(BaseModel): model: str messages: list[ChatMessage] - format: Optional[dict] = None + format: Optional[Union[dict, str]] = None options: Optional[dict] = None template: Optional[str] = None stream: Optional[bool] = True diff --git a/backend/open_webui/utils/middleware.py b/backend/open_webui/utils/middleware.py index d1aaacb13..756c4f005 100644 --- a/backend/open_webui/utils/middleware.py +++ b/backend/open_webui/utils/middleware.py @@ -772,7 +772,7 @@ async def process_chat_payload(request, form_data, metadata, user, model): if "document" in source: for doc_idx, doc_context in enumerate(source["document"]): - context_string += f"{doc_idx}{doc_context}\n" + context_string += f"{source_idx}{doc_context}\n" context_string = context_string.strip() prompt = get_last_user_message(form_data["messages"]) diff --git a/src/lib/apis/chats/index.ts b/src/lib/apis/chats/index.ts index 7af504cc7..02bdd4eb3 100644 --- a/src/lib/apis/chats/index.ts +++ b/src/lib/apis/chats/index.ts @@ -459,7 +459,7 @@ export const getChatById = async (token: string, id: string) => { return json; }) .catch((err) => { - error = err; + error = err.detail; console.log(err); return null; diff --git a/src/lib/components/admin/Settings/General.svelte b/src/lib/components/admin/Settings/General.svelte index 7423a314f..7df2678eb 100644 --- a/src/lib/components/admin/Settings/General.svelte +++ b/src/lib/components/admin/Settings/General.svelte @@ -1,5 +1,5 @@ @@ -27,6 +42,6 @@ }} > - {attributes.title} + {formattedTitle(attributes.title)} diff --git a/src/lib/components/chat/Messages/ResponseMessage.svelte b/src/lib/components/chat/Messages/ResponseMessage.svelte index 249320c81..980d6bcc3 100644 --- a/src/lib/components/chat/Messages/ResponseMessage.svelte +++ b/src/lib/components/chat/Messages/ResponseMessage.svelte @@ -15,7 +15,7 @@ import { getChatById } from '$lib/apis/chats'; import { generateTags } from '$lib/apis'; - import { config, models, settings, TTSWorker, user } from '$lib/stores'; + import { config, models, settings, temporaryChatEnabled, TTSWorker, user } from '$lib/stores'; import { synthesizeOpenAISpeech } from '$lib/apis/audio'; import { imageGenerations } from '$lib/apis/images'; import { @@ -1089,7 +1089,7 @@ {/if} {#if !readOnly} - {#if $config?.features.enable_message_rating ?? true} + {#if !$temporaryChatEnabled && ($config?.features.enable_message_rating ?? true)}