Merge pull request #5565 from open-webui/dev

0.3.23
This commit is contained in:
Timothy Jaeryang Baek 2024-09-21 04:55:29 +02:00 committed by GitHub
commit ff8a2da751
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
17 changed files with 552 additions and 291 deletions

View File

@ -5,6 +5,27 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [0.3.23] - 2024-09-21
### Added
- **🚀 WebSocket Redis Support**: Enhanced load balancing capabilities for multiple instance setups, promoting better performance and reliability in WebUI.
- **🔧 Adjustable Chat Controls**: Introduced width-adjustable chat controls, enabling a personalized and more comfortable user interface.
- **🌎 i18n Updates**: Improved and updated the Chinese translations.
### Fixed
- **🌐 Task Model Unloading Issue**: Modified task handling to use the Ollama /api/chat endpoint instead of OpenAI compatible endpoint, ensuring models stay loaded and ready with custom parameters, thus minimizing delays in task execution.
- **📝 Title Generation Fix for OpenAI Compatible APIs**: Resolved an issue preventing the generation of titles, enhancing consistency and reliability when using multiple API providers.
- **🗃️ RAG Duplicate Collection Issue**: Fixed a bug causing repeated processing of the same uploaded file. Now utilizes indexed files to prevent unnecessary duplications, optimizing resource usage.
- **🖼️ Image Generation Enhancement**: Refactored OpenAI image generation endpoint to be asynchronous, preventing the WebUI from becoming unresponsive during processing, thus enhancing user experience.
- **🔓 Downgrade Authlib**: Reverted Authlib to version 1.3.1 to address and resolve issues concerning OAuth functionality.
### Changed
- **🔍 Improved Message Interaction**: Enhanced the message node interface to allow for easier focus redirection with a simple click, streamlining user interaction.
- **✨ Styling Refactor**: Updated WebUI styling for a cleaner, more modern look, enhancing user experience across the platform.
## [0.3.22] - 2024-09-19 ## [0.3.22] - 2024-09-19
### Added ### Added

View File

@ -405,14 +405,19 @@ async def generate_chat_completion(
"role": user.role, "role": user.role,
} }
url = app.state.config.OPENAI_API_BASE_URLS[idx]
key = app.state.config.OPENAI_API_KEYS[idx]
# Change max_completion_tokens to max_tokens (Backward compatible)
if "api.openai.com" not in url and not payload["model"].lower().startswith("o1-"):
if "max_completion_tokens" in payload:
payload["max_tokens"] = payload.pop("max_completion_tokens")
# Convert the modified body back to JSON # Convert the modified body back to JSON
payload = json.dumps(payload) payload = json.dumps(payload)
log.debug(payload) log.debug(payload)
url = app.state.config.OPENAI_API_BASE_URLS[idx]
key = app.state.config.OPENAI_API_KEYS[idx]
headers = {} headers = {}
headers["Authorization"] = f"Bearer {key}" headers["Authorization"] = f"Bearer {key}"
headers["Content-Type"] = "application/json" headers["Content-Type"] = "application/json"

View File

@ -1099,6 +1099,10 @@ def store_docs_in_vector_db(
log.info(f"deleting existing collection {collection_name}") log.info(f"deleting existing collection {collection_name}")
VECTOR_DB_CLIENT.delete_collection(collection_name=collection_name) VECTOR_DB_CLIENT.delete_collection(collection_name=collection_name)
if VECTOR_DB_CLIENT.has_collection(collection_name=collection_name):
log.info(f"collection {collection_name} already exists")
return True
else:
embedding_function = get_embedding_function( embedding_function = get_embedding_function(
app.state.config.RAG_EMBEDDING_ENGINE, app.state.config.RAG_EMBEDDING_ENGINE,
app.state.config.RAG_EMBEDDING_MODEL, app.state.config.RAG_EMBEDDING_MODEL,
@ -1123,11 +1127,7 @@ def store_docs_in_vector_db(
return True return True
except Exception as e: except Exception as e:
if e.__class__.__name__ == "UniqueConstraintError":
return True
log.exception(e) log.exception(e)
return False return False

View File

@ -2,16 +2,38 @@ import asyncio
import socketio import socketio
from open_webui.apps.webui.models.users import Users from open_webui.apps.webui.models.users import Users
from open_webui.env import ENABLE_WEBSOCKET_SUPPORT from open_webui.env import (
ENABLE_WEBSOCKET_SUPPORT,
WEBSOCKET_MANAGER,
WEBSOCKET_REDIS_URL,
)
from open_webui.utils.utils import decode_token from open_webui.utils.utils import decode_token
sio = socketio.AsyncServer(
if WEBSOCKET_MANAGER == "redis":
mgr = socketio.AsyncRedisManager(WEBSOCKET_REDIS_URL)
sio = socketio.AsyncServer(
cors_allowed_origins=[], cors_allowed_origins=[],
async_mode="asgi", async_mode="asgi",
transports=(["polling", "websocket"] if ENABLE_WEBSOCKET_SUPPORT else ["polling"]), transports=(
["polling", "websocket"] if ENABLE_WEBSOCKET_SUPPORT else ["polling"]
),
allow_upgrades=ENABLE_WEBSOCKET_SUPPORT, allow_upgrades=ENABLE_WEBSOCKET_SUPPORT,
always_connect=True, always_connect=True,
) client_manager=mgr,
)
else:
sio = socketio.AsyncServer(
cors_allowed_origins=[],
async_mode="asgi",
transports=(
["polling", "websocket"] if ENABLE_WEBSOCKET_SUPPORT else ["polling"]
),
allow_upgrades=ENABLE_WEBSOCKET_SUPPORT,
always_connect=True,
)
app = socketio.ASGIApp(sio, socketio_path="/ws/socket.io") app = socketio.ASGIApp(sio, socketio_path="/ws/socket.io")
# Dictionary to maintain the user pool # Dictionary to maintain the user pool

View File

@ -302,3 +302,7 @@ if WEBUI_AUTH and WEBUI_SECRET_KEY == "":
ENABLE_WEBSOCKET_SUPPORT = ( ENABLE_WEBSOCKET_SUPPORT = (
os.environ.get("ENABLE_WEBSOCKET_SUPPORT", "True").lower() == "true" os.environ.get("ENABLE_WEBSOCKET_SUPPORT", "True").lower() == "true"
) )
WEBSOCKET_MANAGER = os.environ.get("WEBSOCKET_MANAGER", "")
WEBSOCKET_REDIS_URL = os.environ.get("WEBSOCKET_REDIS_URL", "redis://localhost:6379/0")

View File

@ -19,7 +19,9 @@ from open_webui.apps.audio.main import app as audio_app
from open_webui.apps.images.main import app as images_app from open_webui.apps.images.main import app as images_app
from open_webui.apps.ollama.main import app as ollama_app from open_webui.apps.ollama.main import app as ollama_app
from open_webui.apps.ollama.main import ( from open_webui.apps.ollama.main import (
generate_openai_chat_completion as generate_ollama_chat_completion, GenerateChatCompletionForm,
generate_chat_completion as generate_ollama_chat_completion,
generate_openai_chat_completion as generate_ollama_openai_chat_completion,
) )
from open_webui.apps.ollama.main import get_all_models as get_ollama_models from open_webui.apps.ollama.main import get_all_models as get_ollama_models
from open_webui.apps.openai.main import app as openai_app from open_webui.apps.openai.main import app as openai_app
@ -135,6 +137,12 @@ from open_webui.utils.utils import (
) )
from open_webui.utils.webhook import post_webhook from open_webui.utils.webhook import post_webhook
from open_webui.utils.payload import convert_payload_openai_to_ollama
from open_webui.utils.response import (
convert_response_ollama_to_openai,
convert_streaming_response_ollama_to_openai,
)
if SAFE_MODE: if SAFE_MODE:
print("SAFE MODE ENABLED") print("SAFE MODE ENABLED")
Functions.deactivate_all_functions() Functions.deactivate_all_functions()
@ -1048,7 +1056,18 @@ async def generate_chat_completions(form_data: dict, user=Depends(get_verified_u
if model.get("pipe"): if model.get("pipe"):
return await generate_function_chat_completion(form_data, user=user) return await generate_function_chat_completion(form_data, user=user)
if model["owned_by"] == "ollama": if model["owned_by"] == "ollama":
return await generate_ollama_chat_completion(form_data, user=user) # Using /ollama/api/chat endpoint
form_data = convert_payload_openai_to_ollama(form_data)
form_data = GenerateChatCompletionForm(**form_data)
response = await generate_ollama_chat_completion(form_data=form_data, user=user)
if form_data.stream:
response.headers["content-type"] = "text/event-stream"
return StreamingResponse(
convert_streaming_response_ollama_to_openai(response),
headers=dict(response.headers),
)
else:
return convert_response_ollama_to_openai(response)
else: else:
return await generate_openai_chat_completion(form_data, user=user) return await generate_openai_chat_completion(form_data, user=user)
@ -1399,9 +1418,10 @@ async def generate_title(form_data: dict, user=Depends(get_verified_user)):
# Check if the user has a custom task model # Check if the user has a custom task model
# If the user has a custom task model, use that model # If the user has a custom task model, use that model
task_model_id = get_task_model_id(model_id) task_model_id = get_task_model_id(model_id)
print(task_model_id) print(task_model_id)
model = app.state.MODELS[task_model_id]
if app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE != "": if app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE != "":
template = app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE template = app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE
else: else:
@ -1440,9 +1460,9 @@ Prompt: {{prompt:middletruncate:8000}}"""
"chat_id": form_data.get("chat_id", None), "chat_id": form_data.get("chat_id", None),
"metadata": {"task": str(TASKS.TITLE_GENERATION)}, "metadata": {"task": str(TASKS.TITLE_GENERATION)},
} }
log.debug(payload) log.debug(payload)
# Handle pipeline filters
try: try:
payload = filter_pipeline(payload, user) payload = filter_pipeline(payload, user)
except Exception as e: except Exception as e:
@ -1456,7 +1476,6 @@ Prompt: {{prompt:middletruncate:8000}}"""
status_code=status.HTTP_400_BAD_REQUEST, status_code=status.HTTP_400_BAD_REQUEST,
content={"detail": str(e)}, content={"detail": str(e)},
) )
if "chat_id" in payload: if "chat_id" in payload:
del payload["chat_id"] del payload["chat_id"]
@ -1484,6 +1503,8 @@ async def generate_search_query(form_data: dict, user=Depends(get_verified_user)
task_model_id = get_task_model_id(model_id) task_model_id = get_task_model_id(model_id)
print(task_model_id) print(task_model_id)
model = app.state.MODELS[task_model_id]
if app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE != "": if app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE != "":
template = app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE template = app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
else: else:
@ -1516,9 +1537,9 @@ Search Query:"""
), ),
"metadata": {"task": str(TASKS.QUERY_GENERATION)}, "metadata": {"task": str(TASKS.QUERY_GENERATION)},
} }
log.debug(payload)
print(payload) # Handle pipeline filters
try: try:
payload = filter_pipeline(payload, user) payload = filter_pipeline(payload, user)
except Exception as e: except Exception as e:
@ -1532,7 +1553,6 @@ Search Query:"""
status_code=status.HTTP_400_BAD_REQUEST, status_code=status.HTTP_400_BAD_REQUEST,
content={"detail": str(e)}, content={"detail": str(e)},
) )
if "chat_id" in payload: if "chat_id" in payload:
del payload["chat_id"] del payload["chat_id"]
@ -1555,12 +1575,13 @@ async def generate_emoji(form_data: dict, user=Depends(get_verified_user)):
task_model_id = get_task_model_id(model_id) task_model_id = get_task_model_id(model_id)
print(task_model_id) print(task_model_id)
model = app.state.MODELS[task_model_id]
template = ''' template = '''
Your task is to reflect the speaker's likely facial expression through a fitting emoji. Interpret emotions from the message and reflect their facial expression using fitting, diverse emojis (e.g., 😊, 😢, 😡, 😱). Your task is to reflect the speaker's likely facial expression through a fitting emoji. Interpret emotions from the message and reflect their facial expression using fitting, diverse emojis (e.g., 😊, 😢, 😡, 😱).
Message: """{{prompt}}""" Message: """{{prompt}}"""
''' '''
content = title_generation_template( content = title_generation_template(
template, template,
form_data["prompt"], form_data["prompt"],
@ -1584,9 +1605,9 @@ Message: """{{prompt}}"""
"chat_id": form_data.get("chat_id", None), "chat_id": form_data.get("chat_id", None),
"metadata": {"task": str(TASKS.EMOJI_GENERATION)}, "metadata": {"task": str(TASKS.EMOJI_GENERATION)},
} }
log.debug(payload) log.debug(payload)
# Handle pipeline filters
try: try:
payload = filter_pipeline(payload, user) payload = filter_pipeline(payload, user)
except Exception as e: except Exception as e:
@ -1600,7 +1621,6 @@ Message: """{{prompt}}"""
status_code=status.HTTP_400_BAD_REQUEST, status_code=status.HTTP_400_BAD_REQUEST,
content={"detail": str(e)}, content={"detail": str(e)},
) )
if "chat_id" in payload: if "chat_id" in payload:
del payload["chat_id"] del payload["chat_id"]
@ -1620,8 +1640,10 @@ async def generate_moa_response(form_data: dict, user=Depends(get_verified_user)
# Check if the user has a custom task model # Check if the user has a custom task model
# If the user has a custom task model, use that model # If the user has a custom task model, use that model
model_id = get_task_model_id(model_id) task_model_id = get_task_model_id(model_id)
print(model_id) print(task_model_id)
model = app.state.MODELS[task_model_id]
template = """You have been provided with a set of responses from various models to the latest user query: "{{prompt}}" template = """You have been provided with a set of responses from various models to the latest user query: "{{prompt}}"
@ -1636,13 +1658,12 @@ Responses from models: {{responses}}"""
) )
payload = { payload = {
"model": model_id, "model": task_model_id,
"messages": [{"role": "user", "content": content}], "messages": [{"role": "user", "content": content}],
"stream": form_data.get("stream", False), "stream": form_data.get("stream", False),
"chat_id": form_data.get("chat_id", None), "chat_id": form_data.get("chat_id", None),
"metadata": {"task": str(TASKS.MOA_RESPONSE_GENERATION)}, "metadata": {"task": str(TASKS.MOA_RESPONSE_GENERATION)},
} }
log.debug(payload) log.debug(payload)
try: try:
@ -1658,7 +1679,6 @@ Responses from models: {{responses}}"""
status_code=status.HTTP_400_BAD_REQUEST, status_code=status.HTTP_400_BAD_REQUEST,
content={"detail": str(e)}, content={"detail": str(e)},
) )
if "chat_id" in payload: if "chat_id" in payload:
del payload["chat_id"] del payload["chat_id"]

View File

@ -105,16 +105,24 @@ def openai_chat_message_template(model: str):
} }
def openai_chat_chunk_message_template(model: str, message: str) -> dict: def openai_chat_chunk_message_template(
model: str, message: Optional[str] = None
) -> dict:
template = openai_chat_message_template(model) template = openai_chat_message_template(model)
template["object"] = "chat.completion.chunk" template["object"] = "chat.completion.chunk"
if message:
template["choices"][0]["delta"] = {"content": message} template["choices"][0]["delta"] = {"content": message}
else:
template["choices"][0]["finish_reason"] = "stop"
return template return template
def openai_chat_completion_message_template(model: str, message: str) -> dict: def openai_chat_completion_message_template(
model: str, message: Optional[str] = None
) -> dict:
template = openai_chat_message_template(model) template = openai_chat_message_template(model)
template["object"] = "chat.completion" template["object"] = "chat.completion"
if message:
template["choices"][0]["message"] = {"content": message, "role": "assistant"} template["choices"][0]["message"] = {"content": message, "role": "assistant"}
template["choices"][0]["finish_reason"] = "stop" template["choices"][0]["finish_reason"] = "stop"
return template return template

View File

@ -86,3 +86,49 @@ def apply_model_params_to_body_ollama(params: dict, form_data: dict) -> dict:
form_data[value] = param form_data[value] = param
return form_data return form_data
def convert_payload_openai_to_ollama(openai_payload: dict) -> dict:
"""
Converts a payload formatted for OpenAI's API to be compatible with Ollama's API endpoint for chat completions.
Args:
openai_payload (dict): The payload originally designed for OpenAI API usage.
Returns:
dict: A modified payload compatible with the Ollama API.
"""
ollama_payload = {}
# Mapping basic model and message details
ollama_payload["model"] = openai_payload.get("model")
ollama_payload["messages"] = openai_payload.get("messages")
ollama_payload["stream"] = openai_payload.get("stream", False)
# If there are advanced parameters in the payload, format them in Ollama's options field
ollama_options = {}
# Handle parameters which map directly
for param in ["temperature", "top_p", "seed"]:
if param in openai_payload:
ollama_options[param] = openai_payload[param]
# Mapping OpenAI's `max_tokens` -> Ollama's `num_predict`
if "max_completion_tokens" in openai_payload:
ollama_options["num_predict"] = openai_payload["max_completion_tokens"]
elif "max_tokens" in openai_payload:
ollama_options["num_predict"] = openai_payload["max_tokens"]
# Handle frequency / presence_penalty, which needs renaming and checking
if "frequency_penalty" in openai_payload:
ollama_options["repeat_penalty"] = openai_payload["frequency_penalty"]
if "presence_penalty" in openai_payload and "penalty" not in ollama_options:
# We are assuming presence penalty uses a similar concept in Ollama, which needs custom handling if exists.
ollama_options["new_topic_penalty"] = openai_payload["presence_penalty"]
# Add options to payload if any have been set
if ollama_options:
ollama_payload["options"] = ollama_options
return ollama_payload

View File

@ -0,0 +1,32 @@
import json
from open_webui.utils.misc import (
openai_chat_chunk_message_template,
openai_chat_completion_message_template,
)
def convert_response_ollama_to_openai(ollama_response: dict) -> dict:
model = ollama_response.get("model", "ollama")
message_content = ollama_response.get("message", {}).get("content", "")
response = openai_chat_completion_message_template(model, message_content)
return response
async def convert_streaming_response_ollama_to_openai(ollama_streaming_response):
async for data in ollama_streaming_response.body_iterator:
data = json.loads(data)
model = data.get("model", "ollama")
message_content = data.get("message", {}).get("content", "")
done = data.get("done", False)
data = openai_chat_chunk_message_template(
model, message_content if not done else None
)
line = f"data: {json.dumps(data)}\n\n"
if done:
line += "data: [DONE]\n\n"
yield line

16
package-lock.json generated
View File

@ -1,12 +1,12 @@
{ {
"name": "open-webui", "name": "open-webui",
"version": "0.3.22", "version": "0.3.23",
"lockfileVersion": 3, "lockfileVersion": 3,
"requires": true, "requires": true,
"packages": { "packages": {
"": { "": {
"name": "open-webui", "name": "open-webui",
"version": "0.3.22", "version": "0.3.23",
"dependencies": { "dependencies": {
"@codemirror/lang-javascript": "^6.2.2", "@codemirror/lang-javascript": "^6.2.2",
"@codemirror/lang-python": "^6.1.6", "@codemirror/lang-python": "^6.1.6",
@ -32,6 +32,7 @@
"katex": "^0.16.9", "katex": "^0.16.9",
"marked": "^9.1.0", "marked": "^9.1.0",
"mermaid": "^10.9.1", "mermaid": "^10.9.1",
"paneforge": "^0.0.6",
"pyodide": "^0.26.1", "pyodide": "^0.26.1",
"socket.io-client": "^4.2.0", "socket.io-client": "^4.2.0",
"sortablejs": "^1.15.2", "sortablejs": "^1.15.2",
@ -6986,6 +6987,17 @@
"url": "https://github.com/sponsors/sindresorhus" "url": "https://github.com/sponsors/sindresorhus"
} }
}, },
"node_modules/paneforge": {
"version": "0.0.6",
"resolved": "https://registry.npmjs.org/paneforge/-/paneforge-0.0.6.tgz",
"integrity": "sha512-jYeN/wdREihja5c6nK3S5jritDQ+EbCqC5NrDo97qCZzZ9GkmEcN5C0ZCjF4nmhBwkDKr6tLIgz4QUKWxLXjAw==",
"dependencies": {
"nanoid": "^5.0.4"
},
"peerDependencies": {
"svelte": "^4.0.0 || ^5.0.0-next.1"
}
},
"node_modules/parent-module": { "node_modules/parent-module": {
"version": "1.0.1", "version": "1.0.1",
"resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz",

View File

@ -1,6 +1,6 @@
{ {
"name": "open-webui", "name": "open-webui",
"version": "0.3.22", "version": "0.3.23",
"private": true, "private": true,
"scripts": { "scripts": {
"dev": "npm run pyodide:fetch && vite dev --host", "dev": "npm run pyodide:fetch && vite dev --host",
@ -72,6 +72,7 @@
"katex": "^0.16.9", "katex": "^0.16.9",
"marked": "^9.1.0", "marked": "^9.1.0",
"mermaid": "^10.9.1", "mermaid": "^10.9.1",
"paneforge": "^0.0.6",
"pyodide": "^0.26.1", "pyodide": "^0.26.1",
"socket.io-client": "^4.2.0", "socket.io-client": "^4.2.0",
"sortablejs": "^1.15.2", "sortablejs": "^1.15.2",

View File

@ -2,6 +2,7 @@
import { v4 as uuidv4 } from 'uuid'; import { v4 as uuidv4 } from 'uuid';
import { toast } from 'svelte-sonner'; import { toast } from 'svelte-sonner';
import mermaid from 'mermaid'; import mermaid from 'mermaid';
import { PaneGroup, Pane, PaneResizer } from 'paneforge';
import { getContext, onDestroy, onMount, tick } from 'svelte'; import { getContext, onDestroy, onMount, tick } from 'svelte';
import { goto } from '$app/navigation'; import { goto } from '$app/navigation';
@ -26,7 +27,9 @@
showControls, showControls,
showCallOverlay, showCallOverlay,
currentChatPage, currentChatPage,
temporaryChatEnabled temporaryChatEnabled,
mobile,
showOverview
} from '$lib/stores'; } from '$lib/stores';
import { import {
convertMessagesToHistory, convertMessagesToHistory,
@ -64,12 +67,14 @@
import Navbar from '$lib/components/layout/Navbar.svelte'; import Navbar from '$lib/components/layout/Navbar.svelte';
import ChatControls from './ChatControls.svelte'; import ChatControls from './ChatControls.svelte';
import EventConfirmDialog from '../common/ConfirmDialog.svelte'; import EventConfirmDialog from '../common/ConfirmDialog.svelte';
import EllipsisVertical from '../icons/EllipsisVertical.svelte';
const i18n: Writable<i18nType> = getContext('i18n'); const i18n: Writable<i18nType> = getContext('i18n');
export let chatIdProp = ''; export let chatIdProp = '';
let loaded = false; let loaded = false;
const eventTarget = new EventTarget(); const eventTarget = new EventTarget();
let controlPane;
let stopResponseFlag = false; let stopResponseFlag = false;
let autoScroll = true; let autoScroll = true;
@ -279,6 +284,29 @@
await goto('/'); await goto('/');
} }
} }
showControls.subscribe(async (value) => {
if (controlPane && !$mobile) {
try {
if (value) {
controlPane.resize(
parseInt(localStorage.getItem('chat-controls-size') || '35')
? parseInt(localStorage.getItem('chat-controls-size') || '35')
: 35
);
} else {
controlPane.resize(0);
}
} catch (e) {
// ignore
}
}
if (!value) {
showCallOverlay.set(false);
showOverview.set(false);
}
});
}); });
onDestroy(() => { onDestroy(() => {
@ -1764,12 +1792,10 @@
{initNewChat} {initNewChat}
/> />
<PaneGroup direction="horizontal" class="w-full h-full">
<Pane defaultSize={50} class="h-full flex w-full relative">
{#if $banners.length > 0 && messages.length === 0 && !$chatId && selectedModels.length <= 1} {#if $banners.length > 0 && messages.length === 0 && !$chatId && selectedModels.length <= 1}
<div <div class="absolute top-3 left-0 right-0 w-full z-20">
class="absolute top-[4.25rem] w-full {$showSidebar
? 'md:max-w-[calc(100%-260px)]'
: ''} {$showControls ? 'lg:pr-[26rem]' : ''} z-20"
>
<div class=" flex flex-col gap-1 w-full"> <div class=" flex flex-col gap-1 w-full">
{#each $banners.filter( (b) => (b.dismissible ? !JSON.parse(localStorage.getItem('dismissedBannerIds') ?? '[]').includes(b.id) : true) ) as banner} {#each $banners.filter( (b) => (b.dismissible ? !JSON.parse(localStorage.getItem('dismissedBannerIds') ?? '[]').includes(b.id) : true) ) as banner}
<Banner <Banner
@ -1793,11 +1819,9 @@
</div> </div>
{/if} {/if}
<div class="flex flex-col flex-auto z-10"> <div class="flex flex-col flex-auto z-10 w-full">
<div <div
class=" pb-2.5 flex flex-col justify-between w-full flex-auto overflow-auto h-0 max-w-full z-10 scrollbar-hidden {$showControls class=" pb-2.5 flex flex-col justify-between w-full flex-auto overflow-auto h-0 max-w-full z-10 scrollbar-hidden"
? 'lg:pr-[26rem]'
: ''}"
id="messages-container" id="messages-container"
bind:this={messagesContainerElement} bind:this={messagesContainerElement}
on:scroll={(e) => { on:scroll={(e) => {
@ -1826,7 +1850,7 @@
</div> </div>
</div> </div>
<div class={$showControls ? 'lg:pr-[26rem]' : ''}> <div class="">
<MessageInput <MessageInput
bind:files bind:files
bind:prompt bind:prompt
@ -1846,16 +1870,15 @@
{messages} {messages}
{submitPrompt} {submitPrompt}
{stopResponse} {stopResponse}
on:call={() => { on:call={async () => {
showControls.set(true); await showControls.set(true);
}} }}
/> />
</div> </div>
</div> </div>
</div> </Pane>
{/if}
<ChatControls <ChatControls
models={selectedModelIds.reduce((a, e, i, arr) => { models={selectedModelIds.reduce((a, e, i, arr) => {
const model = $models.find((m) => m.id === e); const model = $models.find((m) => m.id === e);
if (model) { if (model) {
@ -1867,10 +1890,14 @@
bind:chatFiles bind:chatFiles
bind:params bind:params
bind:files bind:files
bind:pane={controlPane}
{submitPrompt} {submitPrompt}
{stopResponse} {stopResponse}
{showMessage} {showMessage}
modelId={selectedModelIds?.at(0) ?? null} modelId={selectedModelIds?.at(0) ?? null}
chatId={$chatId} chatId={$chatId}
{eventTarget} {eventTarget}
/> />
</PaneGroup>
</div>
{/if}

View File

@ -10,6 +10,9 @@
import CallOverlay from './MessageInput/CallOverlay.svelte'; import CallOverlay from './MessageInput/CallOverlay.svelte';
import Drawer from '../common/Drawer.svelte'; import Drawer from '../common/Drawer.svelte';
import Overview from './Overview.svelte'; import Overview from './Overview.svelte';
import { Pane, PaneResizer } from 'paneforge';
import EllipsisVertical from '../icons/EllipsisVertical.svelte';
import { get } from 'svelte/store';
export let history; export let history;
export let models = []; export let models = [];
@ -25,7 +28,9 @@
export let files; export let files;
export let modelId; export let modelId;
export let pane;
let largeScreen = false; let largeScreen = false;
onMount(() => { onMount(() => {
// listen to resize 1024px // listen to resize 1024px
const mediaQuery = window.matchMedia('(min-width: 1024px)'); const mediaQuery = window.matchMedia('(min-width: 1024px)');
@ -35,6 +40,7 @@
largeScreen = true; largeScreen = true;
} else { } else {
largeScreen = false; largeScreen = false;
pane = null;
} }
}; };
@ -58,10 +64,19 @@
<SvelteFlowProvider> <SvelteFlowProvider>
{#if !largeScreen} {#if !largeScreen}
{#if $showCallOverlay} {#if $showControls}
<div class=" absolute w-full h-screen max-h-[100dvh] flex z-[999] overflow-hidden"> <Drawer
show={$showControls}
on:close={() => {
showControls.set(false);
}}
>
<div <div
class="absolute w-full h-screen max-h-[100dvh] bg-white text-gray-700 dark:bg-black dark:text-gray-300 flex justify-center" class=" {$showCallOverlay || $showOverview ? ' h-screen w-screen' : 'px-6 py-4'} h-full"
>
{#if $showCallOverlay}
<div
class=" h-full max-h-[100dvh] bg-white text-gray-700 dark:bg-black dark:text-gray-300 flex justify-center"
> >
<CallOverlay <CallOverlay
bind:files bind:files
@ -75,16 +90,7 @@
}} }}
/> />
</div> </div>
</div> {:else if $showOverview}
{:else if $showControls}
<Drawer
show={$showControls}
on:close={() => {
showControls.set(false);
}}
>
<div class=" {$showOverview ? ' h-screen w-screen' : 'px-6 py-4'} h-full">
{#if $showOverview}
<Overview <Overview
{history} {history}
on:nodeclick={(e) => { on:nodeclick={(e) => {
@ -107,15 +113,40 @@
</div> </div>
</Drawer> </Drawer>
{/if} {/if}
{:else if $showControls} {:else}
<div class=" absolute bottom-0 right-0 z-20 h-full pointer-events-none"> <!-- if $showControls -->
<div class="pr-4 pt-14 pb-8 w-[26rem] h-full" in:slide={{ duration: 200, axis: 'x' }}> <PaneResizer class="relative flex w-2 items-center justify-center bg-background">
<div class="z-10 flex h-7 w-5 items-center justify-center rounded-sm">
<EllipsisVertical />
</div>
</PaneResizer>
<Pane
bind:pane
defaultSize={$showControls
? parseInt(localStorage.getItem('chat-controls-size') || '35')
? parseInt(localStorage.getItem('chat-controls-size') || '35')
: 35
: 0}
onResize={(size) => {
if (size === 0) {
showControls.set(false);
} else {
if (!$showControls) {
showControls.set(true);
}
localStorage.setItem('chat-controls-size', size);
}
}}
>
{#if $showControls}
<div class="pr-4 pb-8 flex max-h-full min-h-full">
<div <div
class="w-full h-full {$showOverview && !$showCallOverlay class="w-full {$showOverview && !$showCallOverlay
? ' ' ? ' '
: 'px-5 py-4 bg-white dark:shadow-lg dark:bg-gray-850 border border-gray-50 dark:border-gray-800'} rounded-lg z-50 pointer-events-auto overflow-y-auto scrollbar-hidden" : 'px-5 py-4 bg-white dark:shadow-lg dark:bg-gray-850 border border-gray-50 dark:border-gray-800'} rounded-lg z-50 pointer-events-auto overflow-y-auto scrollbar-hidden"
> >
{#if $showCallOverlay} {#if $showCallOverlay}
<div class="w-full h-full flex justify-center">
<CallOverlay <CallOverlay
bind:files bind:files
{submitPrompt} {submitPrompt}
@ -127,6 +158,7 @@
showControls.set(false); showControls.set(false);
}} }}
/> />
</div>
{:else if $showOverview} {:else if $showOverview}
<Overview <Overview
{history} {history}
@ -149,6 +181,7 @@
{/if} {/if}
</div> </div>
</div> </div>
</div> {/if}
</Pane>
{/if} {/if}
</SvelteFlowProvider> </SvelteFlowProvider>

View File

@ -220,7 +220,9 @@
}; };
const startRecording = async () => { const startRecording = async () => {
if (!audioStream) {
audioStream = await navigator.mediaDevices.getUserMedia({ audio: true }); audioStream = await navigator.mediaDevices.getUserMedia({ audio: true });
}
mediaRecorder = new MediaRecorder(audioStream); mediaRecorder = new MediaRecorder(audioStream);
mediaRecorder.onstart = () => { mediaRecorder.onstart = () => {
@ -236,7 +238,7 @@
}; };
mediaRecorder.onstop = (e) => { mediaRecorder.onstop = (e) => {
console.log('Recording stopped', e); console.log('Recording stopped', audioStream, e);
stopRecordingCallback(); stopRecordingCallback();
}; };
@ -244,10 +246,11 @@
}; };
const stopAudioStream = async () => { const stopAudioStream = async () => {
if (audioStream) { if (!audioStream) return;
const tracks = audioStream.getTracks();
tracks.forEach((track) => track.stop()); audioStream.getAudioTracks().forEach(function (track) {
} track.stop();
});
audioStream = null; audioStream = null;
}; };
@ -525,39 +528,6 @@
console.log(`Audio monitoring and playing stopped for message ID ${id}`); console.log(`Audio monitoring and playing stopped for message ID ${id}`);
}; };
onMount(async () => {
const setWakeLock = async () => {
try {
wakeLock = await navigator.wakeLock.request('screen');
} catch (err) {
// The Wake Lock request has failed - usually system related, such as battery.
console.log(err);
}
if (wakeLock) {
// Add a listener to release the wake lock when the page is unloaded
wakeLock.addEventListener('release', () => {
// the wake lock has been released
console.log('Wake Lock released');
});
}
};
if ('wakeLock' in navigator) {
await setWakeLock();
document.addEventListener('visibilitychange', async () => {
// Re-request the wake lock if the document becomes visible
if (wakeLock !== null && document.visibilityState === 'visible') {
await setWakeLock();
}
});
}
model = $models.find((m) => m.id === modelId);
startRecording();
const chatStartHandler = async (e) => { const chatStartHandler = async (e) => {
const { id } = e.detail; const { id } = e.detail;
@ -612,11 +582,48 @@
chatStreaming = false; chatStreaming = false;
}; };
onMount(async () => {
const setWakeLock = async () => {
try {
wakeLock = await navigator.wakeLock.request('screen');
} catch (err) {
// The Wake Lock request has failed - usually system related, such as battery.
console.log(err);
}
if (wakeLock) {
// Add a listener to release the wake lock when the page is unloaded
wakeLock.addEventListener('release', () => {
// the wake lock has been released
console.log('Wake Lock released');
});
}
};
if ('wakeLock' in navigator) {
await setWakeLock();
document.addEventListener('visibilitychange', async () => {
// Re-request the wake lock if the document becomes visible
if (wakeLock !== null && document.visibilityState === 'visible') {
await setWakeLock();
}
});
}
model = $models.find((m) => m.id === modelId);
startRecording();
eventTarget.addEventListener('chat:start', chatStartHandler); eventTarget.addEventListener('chat:start', chatStartHandler);
eventTarget.addEventListener('chat', chatEventHandler); eventTarget.addEventListener('chat', chatEventHandler);
eventTarget.addEventListener('chat:finish', chatFinishHandler); eventTarget.addEventListener('chat:finish', chatFinishHandler);
return async () => { return async () => {
await stopAllAudio();
stopAudioStream();
eventTarget.removeEventListener('chat:start', chatStartHandler); eventTarget.removeEventListener('chat:start', chatStartHandler);
eventTarget.removeEventListener('chat', chatEventHandler); eventTarget.removeEventListener('chat', chatEventHandler);
eventTarget.removeEventListener('chat:finish', chatFinishHandler); eventTarget.removeEventListener('chat:finish', chatFinishHandler);
@ -633,6 +640,17 @@
onDestroy(async () => { onDestroy(async () => {
await stopAllAudio(); await stopAllAudio();
stopAudioStream();
eventTarget.removeEventListener('chat:start', chatStartHandler);
eventTarget.removeEventListener('chat', chatEventHandler);
eventTarget.removeEventListener('chat:finish', chatFinishHandler);
audioAbortController.abort();
await tick();
await stopAllAudio();
await stopRecordingCallback(false); await stopRecordingCallback(false);
await stopCamera(); await stopCamera();
}); });
@ -924,6 +942,10 @@
on:click={async () => { on:click={async () => {
await stopAudioStream(); await stopAudioStream();
await stopVideoStream(); await stopVideoStream();
console.log(audioStream);
console.log(cameraStream);
showCallOverlay.set(false); showCallOverlay.set(false);
dispatch('close'); dispatch('close');
}} }}

View File

@ -44,6 +44,7 @@
return `${minutes}:${formattedSeconds}`; return `${minutes}:${formattedSeconds}`;
}; };
let stream;
let speechRecognition; let speechRecognition;
let mediaRecorder; let mediaRecorder;
@ -159,7 +160,7 @@
const startRecording = async () => { const startRecording = async () => {
startDurationCounter(); startDurationCounter();
const stream = await navigator.mediaDevices.getUserMedia({ audio: true }); stream = await navigator.mediaDevices.getUserMedia({ audio: true });
mediaRecorder = new MediaRecorder(stream); mediaRecorder = new MediaRecorder(stream);
mediaRecorder.onstart = () => { mediaRecorder.onstart = () => {
console.log('Recording started'); console.log('Recording started');
@ -251,6 +252,13 @@
} }
stopDurationCounter(); stopDurationCounter();
audioChunks = []; audioChunks = [];
if (stream) {
const tracks = stream.getTracks();
tracks.forEach((track) => track.stop());
}
stream = null;
}; };
const confirmRecording = async () => { const confirmRecording = async () => {

View File

@ -48,7 +48,7 @@
{#each _citations as citation, idx} {#each _citations as citation, idx}
<div class="flex gap-1 text-xs font-semibold"> <div class="flex gap-1 text-xs font-semibold">
<button <button
class="flex dark:text-gray-300 py-1 px-1 bg-gray-50 hover:bg-gray-100 dark:bg-gray-850 dark:hover:bg-gray-800 transition rounded-xl" class="flex dark:text-gray-300 py-1 px-1 bg-gray-50 hover:bg-gray-100 dark:bg-gray-850 dark:hover:bg-gray-800 transition rounded-xl max-w-96"
on:click={() => { on:click={() => {
showCitationModal = true; showCitationModal = true;
selectedCitation = citation; selectedCitation = citation;

View File

@ -109,8 +109,8 @@
<Tooltip content={$i18n.t('Controls')}> <Tooltip content={$i18n.t('Controls')}>
<button <button
class=" flex cursor-pointer px-2 py-2 rounded-xl hover:bg-gray-50 dark:hover:bg-gray-850 transition" class=" flex cursor-pointer px-2 py-2 rounded-xl hover:bg-gray-50 dark:hover:bg-gray-850 transition"
on:click={() => { on:click={async () => {
showControls.set(!$showControls); await showControls.set(!$showControls);
}} }}
aria-label="Controls" aria-label="Controls"
> >