diff --git a/CHANGELOG.md b/CHANGELOG.md index 2e02cd753..be0661a93 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,13 +5,24 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## [0.1.121] - 2024-04-22 - -### Added +## [0.1.122] - 2024-04-24 - **🛠️ Improved Embedding Model Support**: You can now use any embedding model `sentence_transformers` supports. - **🌟 Enhanced RAG Pipeline**: Added `BM25` hybrid searching with reranking model support using `sentence_transformers`. +## [0.1.121] - 2024-04-24 + +### Fixed + +- **🔧 Translation Issues**: Addressed various translation discrepancies. +- **🔒 LiteLLM Security Fix**: Updated LiteLLM version to resolve a security vulnerability. +- **🖥️ HTML Tag Display**: Rectified the issue where the '< br >' tag wasn't displaying correctly. +- **🔗 WebSocket Connection**: Resolved the failure of WebSocket connection under HTTPS security for ComfyUI server. +- **📜 FileReader Optimization**: Implemented FileReader initialization per image in multi-file drag & drop to ensure reusability. +- **🏷️ Tag Display**: Corrected tag display inconsistencies. +- **📦 Archived Chat Styling**: Fixed styling issues in archived chat. +- **🔖 Safari Copy Button Bug**: Addressed the bug where the copy button failed to copy links in Safari. + ## [0.1.120] - 2024-04-20 ### Added diff --git a/backend/apps/images/main.py b/backend/apps/images/main.py index a3939d206..2059ac3c0 100644 --- a/backend/apps/images/main.py +++ b/backend/apps/images/main.py @@ -35,8 +35,8 @@ from config import ( ENABLE_IMAGE_GENERATION, AUTOMATIC1111_BASE_URL, COMFYUI_BASE_URL, - OPENAI_API_BASE_URL, - OPENAI_API_KEY, + IMAGES_OPENAI_API_BASE_URL, + IMAGES_OPENAI_API_KEY, ) @@ -58,8 +58,8 @@ app.add_middleware( app.state.ENGINE = "" app.state.ENABLED = ENABLE_IMAGE_GENERATION -app.state.OPENAI_API_BASE_URL = OPENAI_API_BASE_URL -app.state.OPENAI_API_KEY = OPENAI_API_KEY +app.state.OPENAI_API_BASE_URL = IMAGES_OPENAI_API_BASE_URL +app.state.OPENAI_API_KEY = IMAGES_OPENAI_API_KEY app.state.MODEL = "" @@ -135,27 +135,33 @@ async def update_engine_url( } -class OpenAIKeyUpdateForm(BaseModel): +class OpenAIConfigUpdateForm(BaseModel): + url: str key: str -@app.get("/key") -async def get_openai_key(user=Depends(get_admin_user)): - return {"OPENAI_API_KEY": app.state.OPENAI_API_KEY} +@app.get("/openai/config") +async def get_openai_config(user=Depends(get_admin_user)): + return { + "OPENAI_API_BASE_URL": app.state.OPENAI_API_BASE_URL, + "OPENAI_API_KEY": app.state.OPENAI_API_KEY, + } -@app.post("/key/update") -async def update_openai_key( - form_data: OpenAIKeyUpdateForm, user=Depends(get_admin_user) +@app.post("/openai/config/update") +async def update_openai_config( + form_data: OpenAIConfigUpdateForm, user=Depends(get_admin_user) ): - if form_data.key == "": raise HTTPException(status_code=400, detail=ERROR_MESSAGES.API_KEY_NOT_FOUND) + app.state.OPENAI_API_BASE_URL = form_data.url app.state.OPENAI_API_KEY = form_data.key + return { - "OPENAI_API_KEY": app.state.OPENAI_API_KEY, "status": True, + "OPENAI_API_BASE_URL": app.state.OPENAI_API_BASE_URL, + "OPENAI_API_KEY": app.state.OPENAI_API_KEY, } diff --git a/backend/apps/litellm/main.py b/backend/apps/litellm/main.py index 52e0c7002..119e9107e 100644 --- a/backend/apps/litellm/main.py +++ b/backend/apps/litellm/main.py @@ -1,3 +1,5 @@ +import sys + from fastapi import FastAPI, Depends, HTTPException from fastapi.routing import APIRoute from fastapi.middleware.cors import CORSMiddleware @@ -23,7 +25,13 @@ log = logging.getLogger(__name__) log.setLevel(SRC_LOG_LEVELS["LITELLM"]) -from config import MODEL_FILTER_ENABLED, MODEL_FILTER_LIST, DATA_DIR +from config import ( + MODEL_FILTER_ENABLED, + MODEL_FILTER_LIST, + DATA_DIR, + LITELLM_PROXY_PORT, + LITELLM_PROXY_HOST, +) from litellm.utils import get_llm_provider @@ -64,7 +72,7 @@ async def run_background_process(command): log.info(f"Executing command: {command}") # Execute the command and create a subprocess process = await asyncio.create_subprocess_exec( - *command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE + *command, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) background_process = process log.info("Subprocess started successfully.") @@ -90,9 +98,17 @@ async def run_background_process(command): async def start_litellm_background(): log.info("start_litellm_background") # Command to run in the background - command = ( - "litellm --port 14365 --telemetry False --config ./data/litellm/config.yaml" - ) + command = [ + "litellm", + "--port", + str(LITELLM_PROXY_PORT), + "--host", + LITELLM_PROXY_HOST, + "--telemetry", + "False", + "--config", + LITELLM_CONFIG_DIR, + ] await run_background_process(command) @@ -109,7 +125,6 @@ async def shutdown_litellm_background(): @app.on_event("startup") async def startup_event(): - log.info("startup_event") # TODO: Check config.yaml file and create one asyncio.create_task(start_litellm_background()) @@ -186,7 +201,7 @@ async def get_models(user=Depends(get_current_user)): while not background_process: await asyncio.sleep(0.1) - url = "http://localhost:14365/v1" + url = f"http://localhost:{LITELLM_PROXY_PORT}/v1" r = None try: r = requests.request(method="GET", url=f"{url}/models") @@ -289,7 +304,7 @@ async def delete_model_from_config( async def proxy(path: str, request: Request, user=Depends(get_verified_user)): body = await request.body() - url = "http://localhost:14365" + url = f"http://localhost:{LITELLM_PROXY_PORT}" target_url = f"{url}/{path}" diff --git a/backend/config.py b/backend/config.py index 8242941d1..013df5edd 100644 --- a/backend/config.py +++ b/backend/config.py @@ -499,9 +499,24 @@ AUTOMATIC1111_BASE_URL = os.getenv("AUTOMATIC1111_BASE_URL", "") COMFYUI_BASE_URL = os.getenv("COMFYUI_BASE_URL", "") +IMAGES_OPENAI_API_BASE_URL = os.getenv( + "IMAGES_OPENAI_API_BASE_URL", OPENAI_API_BASE_URL +) +IMAGES_OPENAI_API_KEY = os.getenv("IMAGES_OPENAI_API_KEY", OPENAI_API_KEY) + + #################################### # Audio #################################### AUDIO_OPENAI_API_BASE_URL = os.getenv("AUDIO_OPENAI_API_BASE_URL", OPENAI_API_BASE_URL) AUDIO_OPENAI_API_KEY = os.getenv("AUDIO_OPENAI_API_KEY", OPENAI_API_KEY) + +#################################### +# LiteLLM +#################################### + +LITELLM_PROXY_PORT = int(os.getenv("LITELLM_PROXY_PORT", "14365")) +if LITELLM_PROXY_PORT < 0 or LITELLM_PROXY_PORT > 65535: + raise ValueError("Invalid port number for LITELLM_PROXY_PORT") +LITELLM_PROXY_HOST = os.getenv("LITELLM_PROXY_HOST", "127.0.0.1") diff --git a/package-lock.json b/package-lock.json index a310c609d..55b35dd58 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "open-webui", - "version": "0.1.120", + "version": "0.1.121", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "open-webui", - "version": "0.1.120", + "version": "0.1.121", "dependencies": { "@sveltejs/adapter-node": "^1.3.1", "async": "^3.2.5", diff --git a/package.json b/package.json index 12afea0f4..777f0f07b 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "open-webui", - "version": "0.1.120", + "version": "0.1.121", "private": true, "scripts": { "dev": "vite dev --host", diff --git a/src/lib/apis/images/index.ts b/src/lib/apis/images/index.ts index aadfafd14..3f624704e 100644 --- a/src/lib/apis/images/index.ts +++ b/src/lib/apis/images/index.ts @@ -72,10 +72,10 @@ export const updateImageGenerationConfig = async ( return res; }; -export const getOpenAIKey = async (token: string = '') => { +export const getOpenAIConfig = async (token: string = '') => { let error = null; - const res = await fetch(`${IMAGES_API_BASE_URL}/key`, { + const res = await fetch(`${IMAGES_API_BASE_URL}/openai/config`, { method: 'GET', headers: { Accept: 'application/json', @@ -101,13 +101,13 @@ export const getOpenAIKey = async (token: string = '') => { throw error; } - return res.OPENAI_API_KEY; + return res; }; -export const updateOpenAIKey = async (token: string = '', key: string) => { +export const updateOpenAIConfig = async (token: string = '', url: string, key: string) => { let error = null; - const res = await fetch(`${IMAGES_API_BASE_URL}/key/update`, { + const res = await fetch(`${IMAGES_API_BASE_URL}/openai/config/update`, { method: 'POST', headers: { Accept: 'application/json', @@ -115,6 +115,7 @@ export const updateOpenAIKey = async (token: string = '', key: string) => { ...(token && { authorization: `Bearer ${token}` }) }, body: JSON.stringify({ + url: url, key: key }) }) @@ -136,7 +137,7 @@ export const updateOpenAIKey = async (token: string = '', key: string) => { throw error; } - return res.OPENAI_API_KEY; + return res; }; export const getImageGenerationEngineUrls = async (token: string = '') => { diff --git a/src/lib/components/chat/Settings/Audio.svelte b/src/lib/components/chat/Settings/Audio.svelte index 32a71dc18..71fb7957e 100644 --- a/src/lib/components/chat/Settings/Audio.svelte +++ b/src/lib/components/chat/Settings/Audio.svelte @@ -75,14 +75,16 @@ }; const updateConfigHandler = async () => { - const res = await updateAudioConfig(localStorage.token, { - url: OpenAIUrl, - key: OpenAIKey - }); + if (TTSEngine === 'openai') { + const res = await updateAudioConfig(localStorage.token, { + url: OpenAIUrl, + key: OpenAIKey + }); - if (res) { - OpenAIUrl = res.OPENAI_API_BASE_URL; - OpenAIKey = res.OPENAI_API_KEY; + if (res) { + OpenAIUrl = res.OPENAI_API_BASE_URL; + OpenAIKey = res.OPENAI_API_KEY; + } } }; diff --git a/src/lib/components/chat/Settings/Images.svelte b/src/lib/components/chat/Settings/Images.svelte index 7282c184a..19e050ca8 100644 --- a/src/lib/components/chat/Settings/Images.svelte +++ b/src/lib/components/chat/Settings/Images.svelte @@ -15,8 +15,8 @@ updateImageSize, getImageSteps, updateImageSteps, - getOpenAIKey, - updateOpenAIKey + getOpenAIConfig, + updateOpenAIConfig } from '$lib/apis/images'; import { getBackendConfig } from '$lib/apis'; const dispatch = createEventDispatcher(); @@ -33,6 +33,7 @@ let AUTOMATIC1111_BASE_URL = ''; let COMFYUI_BASE_URL = ''; + let OPENAI_API_BASE_URL = ''; let OPENAI_API_KEY = ''; let selectedModel = ''; @@ -131,7 +132,10 @@ AUTOMATIC1111_BASE_URL = URLS.AUTOMATIC1111_BASE_URL; COMFYUI_BASE_URL = URLS.COMFYUI_BASE_URL; - OPENAI_API_KEY = await getOpenAIKey(localStorage.token); + const config = await getOpenAIConfig(localStorage.token); + + OPENAI_API_KEY = config.OPENAI_API_KEY; + OPENAI_API_BASE_URL = config.OPENAI_API_BASE_URL; imageSize = await getImageSize(localStorage.token); steps = await getImageSteps(localStorage.token); @@ -149,7 +153,7 @@ loading = true; if (imageGenerationEngine === 'openai') { - await updateOpenAIKey(localStorage.token, OPENAI_API_KEY); + await updateOpenAIConfig(localStorage.token, OPENAI_API_BASE_URL, OPENAI_API_KEY); } await updateDefaultImageGenerationModel(localStorage.token, selectedModel); @@ -300,13 +304,22 @@ {:else if imageGenerationEngine === 'openai'} -
{$i18n.t('OpenAI API Key')}
-
-
+
+
{$i18n.t('OpenAI API Config')}
+ +
+ +
@@ -319,19 +332,39 @@
{$i18n.t('Set Default Model')}
- + {#if imageGenerationEngine === 'openai' && !OPENAI_API_BASE_URL.includes('https://api.openai.com')} +
+
+ + + + {#each models ?? [] as model} + + {/each} + +
+
+ {:else} + + {/if}
diff --git a/src/lib/components/chat/Settings/Models.svelte b/src/lib/components/chat/Settings/Models.svelte index 688774d78..821d0fc4c 100644 --- a/src/lib/components/chat/Settings/Models.svelte +++ b/src/lib/components/chat/Settings/Models.svelte @@ -13,7 +13,7 @@ uploadModel } from '$lib/apis/ollama'; import { WEBUI_API_BASE_URL, WEBUI_BASE_URL } from '$lib/constants'; - import { WEBUI_NAME, models, user } from '$lib/stores'; + import { WEBUI_NAME, models, MODEL_DOWNLOAD_POOL, user } from '$lib/stores'; import { splitStream } from '$lib/utils'; import { onMount, getContext } from 'svelte'; import { addLiteLLMModel, deleteLiteLLMModel, getLiteLLMModelInfo } from '$lib/apis/litellm'; @@ -50,12 +50,6 @@ let showExperimentalOllama = false; let ollamaVersion = ''; const MAX_PARALLEL_DOWNLOADS = 3; - const modelDownloadQueue = queue( - (task: { modelName: string }, cb) => - pullModelHandlerProcessor({ modelName: task.modelName, callback: cb }), - MAX_PARALLEL_DOWNLOADS - ); - let modelDownloadStatus: Record = {}; let modelTransferring = false; let modelTag = ''; @@ -140,7 +134,8 @@ const pullModelHandler = async () => { const sanitizedModelTag = modelTag.trim().replace(/^ollama\s+(run|pull)\s+/, ''); - if (modelDownloadStatus[sanitizedModelTag]) { + console.log($MODEL_DOWNLOAD_POOL); + if ($MODEL_DOWNLOAD_POOL[sanitizedModelTag]) { toast.error( $i18n.t(`Model '{{modelTag}}' is already in queue for downloading.`, { modelTag: sanitizedModelTag @@ -148,40 +143,117 @@ ); return; } - if (Object.keys(modelDownloadStatus).length === 3) { + if (Object.keys($MODEL_DOWNLOAD_POOL).length === MAX_PARALLEL_DOWNLOADS) { toast.error( $i18n.t('Maximum of 3 models can be downloaded simultaneously. Please try again later.') ); return; } - modelTransferring = true; + const res = await pullModel(localStorage.token, sanitizedModelTag, '0').catch((error) => { + toast.error(error); + return null; + }); - modelDownloadQueue.push( - { modelName: sanitizedModelTag }, - async (data: { modelName: string; success: boolean; error?: Error }) => { - const { modelName } = data; - // Remove the downloaded model - delete modelDownloadStatus[modelName]; + if (res) { + const reader = res.body + .pipeThrough(new TextDecoderStream()) + .pipeThrough(splitStream('\n')) + .getReader(); - modelDownloadStatus = { ...modelDownloadStatus }; + while (true) { + try { + const { value, done } = await reader.read(); + if (done) break; - if (!data.success) { - toast.error(data.error); - } else { - toast.success( - $i18n.t(`Model '{{modelName}}' has been successfully downloaded.`, { modelName }) - ); + let lines = value.split('\n'); - const notification = new Notification($WEBUI_NAME, { - body: $i18n.t(`Model '{{modelName}}' has been successfully downloaded.`, { modelName }), - icon: `${WEBUI_BASE_URL}/static/favicon.png` - }); + for (const line of lines) { + if (line !== '') { + let data = JSON.parse(line); + console.log(data); + if (data.error) { + throw data.error; + } + if (data.detail) { + throw data.detail; + } - models.set(await getModels()); + if (data.id) { + MODEL_DOWNLOAD_POOL.set({ + ...$MODEL_DOWNLOAD_POOL, + [sanitizedModelTag]: { + ...$MODEL_DOWNLOAD_POOL[sanitizedModelTag], + requestId: data.id, + reader, + done: false + } + }); + console.log(data); + } + + if (data.status) { + if (data.digest) { + let downloadProgress = 0; + if (data.completed) { + downloadProgress = Math.round((data.completed / data.total) * 1000) / 10; + } else { + downloadProgress = 100; + } + + MODEL_DOWNLOAD_POOL.set({ + ...$MODEL_DOWNLOAD_POOL, + [sanitizedModelTag]: { + ...$MODEL_DOWNLOAD_POOL[sanitizedModelTag], + pullProgress: downloadProgress, + digest: data.digest + } + }); + } else { + toast.success(data.status); + + MODEL_DOWNLOAD_POOL.set({ + ...$MODEL_DOWNLOAD_POOL, + [sanitizedModelTag]: { + ...$MODEL_DOWNLOAD_POOL[sanitizedModelTag], + done: data.status === 'success' + } + }); + } + } + } + } + } catch (error) { + console.log(error); + if (typeof error !== 'string') { + error = error.message; + } + + toast.error(error); + // opts.callback({ success: false, error, modelName: opts.modelName }); } } - ); + + console.log($MODEL_DOWNLOAD_POOL[sanitizedModelTag]); + + if ($MODEL_DOWNLOAD_POOL[sanitizedModelTag].done) { + toast.success( + $i18n.t(`Model '{{modelName}}' has been successfully downloaded.`, { + modelName: sanitizedModelTag + }) + ); + + models.set(await getModels(localStorage.token)); + } else { + toast.error('Download canceled'); + } + + delete $MODEL_DOWNLOAD_POOL[sanitizedModelTag]; + + MODEL_DOWNLOAD_POOL.set({ + ...$MODEL_DOWNLOAD_POOL + }); + } modelTag = ''; modelTransferring = false; @@ -352,88 +424,18 @@ models.set(await getModels()); }; - const pullModelHandlerProcessor = async (opts: { modelName: string; callback: Function }) => { - const res = await pullModel(localStorage.token, opts.modelName, selectedOllamaUrlIdx).catch( - (error) => { - opts.callback({ success: false, error, modelName: opts.modelName }); - return null; - } - ); + const cancelModelPullHandler = async (model: string) => { + const { reader, requestId } = $MODEL_DOWNLOAD_POOL[model]; + if (reader) { + await reader.cancel(); - if (res) { - const reader = res.body - .pipeThrough(new TextDecoderStream()) - .pipeThrough(splitStream('\n')) - .getReader(); - - while (true) { - try { - const { value, done } = await reader.read(); - if (done) break; - - let lines = value.split('\n'); - - for (const line of lines) { - if (line !== '') { - let data = JSON.parse(line); - console.log(data); - if (data.error) { - throw data.error; - } - if (data.detail) { - throw data.detail; - } - - if (data.id) { - modelDownloadStatus[opts.modelName] = { - ...modelDownloadStatus[opts.modelName], - requestId: data.id, - reader, - done: false - }; - console.log(data); - } - - if (data.status) { - if (data.digest) { - let downloadProgress = 0; - if (data.completed) { - downloadProgress = Math.round((data.completed / data.total) * 1000) / 10; - } else { - downloadProgress = 100; - } - modelDownloadStatus[opts.modelName] = { - ...modelDownloadStatus[opts.modelName], - pullProgress: downloadProgress, - digest: data.digest - }; - } else { - toast.success(data.status); - - modelDownloadStatus[opts.modelName] = { - ...modelDownloadStatus[opts.modelName], - done: data.status === 'success' - }; - } - } - } - } - } catch (error) { - console.log(error); - if (typeof error !== 'string') { - error = error.message; - } - opts.callback({ success: false, error, modelName: opts.modelName }); - } - } - - console.log(modelDownloadStatus[opts.modelName]); - - if (modelDownloadStatus[opts.modelName].done) { - opts.callback({ success: true, modelName: opts.modelName }); - } else { - opts.callback({ success: false, error: 'Download canceled', modelName: opts.modelName }); - } + await cancelOllamaRequest(localStorage.token, requestId); + delete $MODEL_DOWNLOAD_POOL[model]; + MODEL_DOWNLOAD_POOL.set({ + ...$MODEL_DOWNLOAD_POOL + }); + await deleteModel(localStorage.token, model); + toast.success(`${model} download has been canceled`); } }; @@ -503,18 +505,6 @@ ollamaVersion = await getOllamaVersion(localStorage.token).catch((error) => false); liteLLMModelInfo = await getLiteLLMModelInfo(localStorage.token); }); - - const cancelModelPullHandler = async (model: string) => { - const { reader, requestId } = modelDownloadStatus[model]; - if (reader) { - await reader.cancel(); - - await cancelOllamaRequest(localStorage.token, requestId); - delete modelDownloadStatus[model]; - await deleteModel(localStorage.token, model); - toast.success(`${model} download has been canceled`); - } - };
@@ -643,9 +633,9 @@ >
- {#if Object.keys(modelDownloadStatus).length > 0} - {#each Object.keys(modelDownloadStatus) as model} - {#if 'pullProgress' in modelDownloadStatus[model]} + {#if Object.keys($MODEL_DOWNLOAD_POOL).length > 0} + {#each Object.keys($MODEL_DOWNLOAD_POOL) as model} + {#if 'pullProgress' in $MODEL_DOWNLOAD_POOL[model]}
{model}
@@ -655,10 +645,10 @@ class="dark:bg-gray-600 bg-gray-500 text-xs font-medium text-gray-100 text-center p-0.5 leading-none rounded-full" style="width: {Math.max( 15, - modelDownloadStatus[model].pullProgress ?? 0 + $MODEL_DOWNLOAD_POOL[model].pullProgress ?? 0 )}%" > - {modelDownloadStatus[model].pullProgress ?? 0}% + {$MODEL_DOWNLOAD_POOL[model].pullProgress ?? 0}%
@@ -689,9 +679,9 @@
- {#if 'digest' in modelDownloadStatus[model]} + {#if 'digest' in $MODEL_DOWNLOAD_POOL[model]}
- {modelDownloadStatus[model].digest} + {$MODEL_DOWNLOAD_POOL[model].digest}
{/if} diff --git a/src/lib/i18n/locales/nl-NL/translation.json b/src/lib/i18n/locales/nl-NL/translation.json index 35f33ad0d..881ef59f8 100644 --- a/src/lib/i18n/locales/nl-NL/translation.json +++ b/src/lib/i18n/locales/nl-NL/translation.json @@ -62,7 +62,7 @@ "Click here to check other modelfiles.": "Klik hier om andere modelfiles te controleren.", "Click here to select": "Klik hier om te selecteren", "Click here to select documents.": "Klik hier om documenten te selecteren", - "click here.": "click here.", + "click here.": "klik hier.", "Click on the user role button to change a user's role.": "Klik op de gebruikersrol knop om de rol van een gebruiker te wijzigen.", "Close": "Sluiten", "Collection": "Verzameling", diff --git a/src/lib/i18n/locales/pl-pl/translation.json b/src/lib/i18n/locales/pl-PL/translation.json similarity index 100% rename from src/lib/i18n/locales/pl-pl/translation.json rename to src/lib/i18n/locales/pl-PL/translation.json diff --git a/src/lib/i18n/locales/ru-RU/translation.json b/src/lib/i18n/locales/ru-RU/translation.json index 3b4c551a9..431d53766 100644 --- a/src/lib/i18n/locales/ru-RU/translation.json +++ b/src/lib/i18n/locales/ru-RU/translation.json @@ -2,39 +2,39 @@ "'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.": "'s', 'm', 'h', 'd', 'w' или '-1' для не истечение.", "(Beta)": "(бета)", "(e.g. `sh webui.sh --api`)": "(например: `sh webui.sh --api`)", - "(latest)": "(новый)", - "{{modelName}} is thinking...": "{{modelName}} это думает...", + "(latest)": "(последний)", + "{{modelName}} is thinking...": "{{modelName}} думает...", "{{webUIName}} Backend Required": "{{webUIName}} бэкенд требуемый", - "a user": "юзер", - "About": "Относительно", + "a user": "пользователь", + "About": "Об", "Account": "Аккаунт", "Action": "Действие", "Add a model": "Добавьте модель", - "Add a model tag name": "Добавьте тэг модели имя", - "Add a short description about what this modelfile does": "Добавьте краткое описание, что делает этот моделифайл", - "Add a short title for this prompt": "Добавьте краткое название для этого взаимодействия", + "Add a model tag name": "Добавьте имя тэга модели", + "Add a short description about what this modelfile does": "Добавьте краткое описание, что делает этот моделфайл", + "Add a short title for this prompt": "Добавьте краткий заголовок для этого ввода", "Add a tag": "Добавьте тэг", "Add Docs": "Добавьте документы", "Add Files": "Добавьте файлы", - "Add message": "Добавьте message", + "Add message": "Добавьте сообщение", "add tags": "Добавьте тэгы", - "Adjusting these settings will apply changes universally to all users.": "Регулирующий этих настроек приведет к изменениям для все юзеры.", + "Adjusting these settings will apply changes universally to all users.": "Регулирующий этих настроек приведет к изменениям для все пользователей.", "admin": "админ", "Admin Panel": "Панель админ", "Admin Settings": "Настройки админ", "Advanced Parameters": "Расширенные Параметры", "all": "всё", - "All Users": "Всё юзеры", - "Allow": "Дозволять", + "All Users": "Все пользователи", + "Allow": "Разрешить", "Allow Chat Deletion": "Дозволять удаление чат", "alphanumeric characters and hyphens": "буквенно цифровые символы и дефисы", - "Already have an account?": "у вас есть аккаунт уже?", + "Already have an account?": "у вас уже есть аккаунт?", "an assistant": "ассистент", "and": "и", "API Base URL": "Базовый адрес API", "API Key": "Ключ API", "API RPM": "API RPM", - "are allowed - Activate this command by typing": "разрешено - активируйте эту команду набором", + "are allowed - Activate this command by typing": "разрешено - активируйте эту команду вводом", "Are you sure?": "Вы уверены?", "Audio": "Аудио", "Auto-playback response": "Автоматическое воспроизведение ответа", diff --git a/static/manifest.json b/static/manifest.json new file mode 100644 index 000000000..0967ef424 --- /dev/null +++ b/static/manifest.json @@ -0,0 +1 @@ +{}