diff --git a/CHANGELOG.md b/CHANGELOG.md index d57ba400c..638417b98 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,14 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.1.112] - 2024-03-15 + +### Fixed + +- 🗨️ Resolved chat malfunction after image generation. +- 🎨 Fixed various RAG issues. +- 🧪 Rectified experimental broken GGUF upload logic. + ## [0.1.111] - 2024-03-10 ### Added diff --git a/backend/apps/images/main.py b/backend/apps/images/main.py index 31bfc0f5d..e14b0f6a7 100644 --- a/backend/apps/images/main.py +++ b/backend/apps/images/main.py @@ -293,6 +293,7 @@ def generate_image( "size": form_data.size if form_data.size else app.state.IMAGE_SIZE, "response_format": "b64_json", } + r = requests.post( url=f"https://api.openai.com/v1/images/generations", json=data, @@ -300,7 +301,6 @@ def generate_image( ) r.raise_for_status() - res = r.json() images = [] @@ -356,7 +356,10 @@ def generate_image( return images except Exception as e: - print(e) - if r: - print(r.json()) - raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(e)) + error = e + + if r != None: + data = r.json() + if "error" in data: + error = data["error"]["message"] + raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(error)) diff --git a/backend/apps/ollama/main.py b/backend/apps/ollama/main.py index 5ecbaa297..2e236f343 100644 --- a/backend/apps/ollama/main.py +++ b/backend/apps/ollama/main.py @@ -123,6 +123,7 @@ async def get_all_models(): map(lambda response: response["models"], responses) ) } + app.state.MODELS = {model["model"]: model for model in models["models"]} return models @@ -181,11 +182,17 @@ async def get_ollama_versions(url_idx: Optional[int] = None): responses = await asyncio.gather(*tasks) responses = list(filter(lambda x: x is not None, responses)) - lowest_version = min( - responses, key=lambda x: tuple(map(int, x["version"].split("."))) - ) + if len(responses) > 0: + lowest_version = min( + responses, key=lambda x: tuple(map(int, x["version"].split("."))) + ) - return {"version": lowest_version["version"]} + return {"version": lowest_version["version"]} + else: + raise HTTPException( + status_code=500, + detail=ERROR_MESSAGES.OLLAMA_NOT_FOUND, + ) else: url = app.state.OLLAMA_BASE_URLS[url_idx] try: diff --git a/backend/apps/rag/utils.py b/backend/apps/rag/utils.py index b2da7d90c..a3537d4d3 100644 --- a/backend/apps/rag/utils.py +++ b/backend/apps/rag/utils.py @@ -91,9 +91,8 @@ def query_collection( def rag_template(template: str, context: str, query: str): - template = re.sub(r"\[context\]", context, template) - template = re.sub(r"\[query\]", query, template) - + template = template.replace("[context]", context) + template = template.replace("[query]", query) return template diff --git a/backend/apps/web/routers/utils.py b/backend/apps/web/routers/utils.py index fbb350cf2..0d34b0405 100644 --- a/backend/apps/web/routers/utils.py +++ b/backend/apps/web/routers/utils.py @@ -75,7 +75,7 @@ async def download_file_stream(url, file_path, file_name, chunk_size=1024 * 1024 hashed = calculate_sha256(file) file.seek(0) - url = f"{OLLAMA_BASE_URLS[0]}/blobs/sha256:{hashed}" + url = f"{OLLAMA_BASE_URLS[0]}/api/blobs/sha256:{hashed}" response = requests.post(url, data=file) if response.ok: diff --git a/backend/constants.py b/backend/constants.py index eacf8a20f..05bdebc54 100644 --- a/backend/constants.py +++ b/backend/constants.py @@ -52,3 +52,4 @@ class ERROR_MESSAGES(str, Enum): MODEL_NOT_FOUND = lambda name="": f"Model '{name}' was not found" OPENAI_NOT_FOUND = lambda name="": f"OpenAI API was not found" + OLLAMA_NOT_FOUND = "WebUI could not connect to Ollama" diff --git a/package.json b/package.json index 572443a54..d7be99999 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "open-webui", - "version": "0.1.111", + "version": "0.1.112", "private": true, "scripts": { "dev": "vite dev --host", diff --git a/src/lib/components/chat/Settings/Images.svelte b/src/lib/components/chat/Settings/Images.svelte index a6463691f..8f75c0c92 100644 --- a/src/lib/components/chat/Settings/Images.svelte +++ b/src/lib/components/chat/Settings/Images.svelte @@ -116,11 +116,13 @@ class="flex flex-col h-full justify-between space-y-3 text-sm" on:submit|preventDefault={async () => { loading = true; - await updateOpenAIKey(localStorage.token, OPENAI_API_KEY); + + if (imageGenerationEngine === 'openai') { + await updateOpenAIKey(localStorage.token, OPENAI_API_KEY); + } await updateDefaultImageGenerationModel(localStorage.token, selectedModel); - await updateDefaultImageGenerationModel(localStorage.token, selectedModel); await updateImageSize(localStorage.token, imageSize).catch((error) => { toast.error(error); return null; diff --git a/src/routes/(app)/+page.svelte b/src/routes/(app)/+page.svelte index 07fcc1768..926681b14 100644 --- a/src/routes/(app)/+page.svelte +++ b/src/routes/(app)/+page.svelte @@ -140,7 +140,9 @@ }; const scrollToBottom = () => { - messagesContainerElement.scrollTop = messagesContainerElement.scrollHeight; + if (messagesContainerElement) { + messagesContainerElement.scrollTop = messagesContainerElement.scrollHeight; + } }; ////////////////////////// @@ -308,7 +310,7 @@ .map((file) => file.url.slice(file.url.indexOf(',') + 1)); // Add images array only if it contains elements - if (imageUrls && imageUrls.length > 0) { + if (imageUrls && imageUrls.length > 0 && message.role === 'user') { baseMessage.images = imageUrls; } @@ -532,7 +534,8 @@ .filter((message) => message) .map((message, idx, arr) => ({ role: message.role, - ...(message.files?.filter((file) => file.type === 'image').length > 0 ?? false + ...((message.files?.filter((file) => file.type === 'image').length > 0 ?? false) && + message.role === 'user' ? { content: [ { diff --git a/src/routes/(app)/c/[id]/+page.svelte b/src/routes/(app)/c/[id]/+page.svelte index 4bc6acfa2..081dac761 100644 --- a/src/routes/(app)/c/[id]/+page.svelte +++ b/src/routes/(app)/c/[id]/+page.svelte @@ -160,7 +160,9 @@ }; const scrollToBottom = () => { - messagesContainerElement.scrollTop = messagesContainerElement.scrollHeight; + if (messagesContainerElement) { + messagesContainerElement.scrollTop = messagesContainerElement.scrollHeight; + } }; ////////////////////////// @@ -321,7 +323,7 @@ .map((file) => file.url.slice(file.url.indexOf(',') + 1)); // Add images array only if it contains elements - if (imageUrls && imageUrls.length > 0) { + if (imageUrls && imageUrls.length > 0 && message.role === 'user') { baseMessage.images = imageUrls; } @@ -545,7 +547,8 @@ .filter((message) => message) .map((message, idx, arr) => ({ role: message.role, - ...(message.files?.filter((file) => file.type === 'image').length > 0 ?? false + ...((message.files?.filter((file) => file.type === 'image').length > 0 ?? false) && + message.role === 'user' ? { content: [ { @@ -688,7 +691,12 @@ if (messages.length == 2) { window.history.replaceState(history.state, '', `/c/${_chatId}`); - await setChatTitle(_chatId, userPrompt); + + if ($settings?.titleAutoGenerateModel) { + await generateChatTitle(_chatId, userPrompt); + } else { + await setChatTitle(_chatId, userPrompt); + } } }; diff --git a/src/tailwind.css b/src/tailwind.css index 60b6b04c4..10aca3482 100644 --- a/src/tailwind.css +++ b/src/tailwind.css @@ -3,16 +3,13 @@ @tailwind utilities; @layer base { - html { + html, pre { font-family: -apple-system, 'Arimo', ui-sans-serif, system-ui, 'Segoe UI', Roboto, Ubuntu, Cantarell, 'Noto Sans', sans-serif, 'Helvetica Neue', Arial, 'Apple Color Emoji', 'Segoe UI Emoji', 'Segoe UI Symbol', 'Noto Color Emoji'; } - pre { - font-family: -apple-system, 'Arimo', ui-sans-serif, system-ui, 'Segoe UI', Roboto, Ubuntu, - Cantarell, 'Noto Sans', sans-serif, 'Helvetica Neue', Arial, 'Apple Color Emoji', - 'Segoe UI Emoji', 'Segoe UI Symbol', 'Noto Color Emoji'; - white-space: pre-wrap; - } + pre { + white-space: pre-wrap; + } }