Merge branch 'dev' into feat/model-config

This commit is contained in:
Timothy Jaeryang Baek 2024-05-21 21:37:04 -10:00 committed by GitHub
commit 8df0429c99
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
54 changed files with 2020 additions and 157 deletions

View File

@ -0,0 +1,59 @@
name: Deploy to HuggingFace Spaces
on:
push:
branches:
- dev
- main
workflow_dispatch:
jobs:
check-secret:
runs-on: ubuntu-latest
outputs:
token-set: ${{ steps.check-key.outputs.defined }}
steps:
- id: check-key
env:
HF_TOKEN: ${{ secrets.HF_TOKEN }}
if: "${{ env.HF_TOKEN != '' }}"
run: echo "defined=true" >> $GITHUB_OUTPUT
deploy:
runs-on: ubuntu-latest
needs: [check-secret]
if: needs.check-secret.outputs.token-set == 'true'
env:
HF_TOKEN: ${{ secrets.HF_TOKEN }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Remove git history
run: rm -rf .git
- name: Prepend YAML front matter to README.md
run: |
echo "---" > temp_readme.md
echo "title: Open WebUI" >> temp_readme.md
echo "emoji: 🐳" >> temp_readme.md
echo "colorFrom: purple" >> temp_readme.md
echo "colorTo: gray" >> temp_readme.md
echo "sdk: docker" >> temp_readme.md
echo "app_port: 8080" >> temp_readme.md
echo "---" >> temp_readme.md
cat README.md >> temp_readme.md
mv temp_readme.md README.md
- name: Configure git
run: |
git config --global user.email "41898282+github-actions[bot]@users.noreply.github.com"
git config --global user.name "github-actions[bot]"
- name: Set up Git and push to Space
run: |
git init --initial-branch=main
git lfs track "*.ttf"
rm demo.gif
git add .
git commit -m "GitHub deploy: ${{ github.sha }}"
git push --force https://open-webui:${HF_TOKEN}@huggingface.co/spaces/open-webui/open-webui main

32
.github/workflows/release-pypi.yml vendored Normal file
View File

@ -0,0 +1,32 @@
name: Release to PyPI
on:
push:
branches:
- main # or whatever branch you want to use
- dev
jobs:
release:
runs-on: ubuntu-latest
environment:
name: pypi
url: https://pypi.org/p/open-webui
permissions:
id-token: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: 18
- uses: actions/setup-python@v5
with:
python-version: 3.11
- name: Build
run: |
python -m pip install --upgrade pip
pip install build
python -m build .
- name: Publish package distributions to PyPI
uses: pypa/gh-action-pypi-publish@release/v1

View File

@ -132,7 +132,8 @@ RUN pip3 install uv && \
uv pip install --system -r requirements.txt --no-cache-dir && \ uv pip install --system -r requirements.txt --no-cache-dir && \
python -c "import os; from sentence_transformers import SentenceTransformer; SentenceTransformer(os.environ['RAG_EMBEDDING_MODEL'], device='cpu')" && \ python -c "import os; from sentence_transformers import SentenceTransformer; SentenceTransformer(os.environ['RAG_EMBEDDING_MODEL'], device='cpu')" && \
python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])"; \ python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])"; \
fi fi; \
chown -R $UID:$GID /app/backend/data/

View File

@ -43,6 +43,7 @@ from utils.utils import (
from config import ( from config import (
SRC_LOG_LEVELS, SRC_LOG_LEVELS,
OLLAMA_BASE_URLS, OLLAMA_BASE_URLS,
ENABLE_OLLAMA_API,
ENABLE_MODEL_FILTER, ENABLE_MODEL_FILTER,
MODEL_FILTER_LIST, MODEL_FILTER_LIST,
UPLOAD_DIR, UPLOAD_DIR,
@ -68,6 +69,8 @@ app.state.config.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER
app.state.config.MODEL_FILTER_LIST = MODEL_FILTER_LIST app.state.config.MODEL_FILTER_LIST = MODEL_FILTER_LIST
app.state.MODEL_CONFIG = Models.get_all_models() app.state.MODEL_CONFIG = Models.get_all_models()
app.state.config.ENABLE_OLLAMA_API = ENABLE_OLLAMA_API
app.state.config.OLLAMA_BASE_URLS = OLLAMA_BASE_URLS app.state.config.OLLAMA_BASE_URLS = OLLAMA_BASE_URLS
app.state.MODELS = {} app.state.MODELS = {}
@ -97,6 +100,21 @@ async def get_status():
return {"status": True} return {"status": True}
@app.get("/config")
async def get_config(user=Depends(get_admin_user)):
return {"ENABLE_OLLAMA_API": app.state.config.ENABLE_OLLAMA_API}
class OllamaConfigForm(BaseModel):
enable_ollama_api: Optional[bool] = None
@app.post("/config/update")
async def update_config(form_data: OllamaConfigForm, user=Depends(get_admin_user)):
app.state.config.ENABLE_OLLAMA_API = form_data.enable_ollama_api
return {"ENABLE_OLLAMA_API": app.state.config.ENABLE_OLLAMA_API}
@app.get("/urls") @app.get("/urls")
async def get_ollama_api_urls(user=Depends(get_admin_user)): async def get_ollama_api_urls(user=Depends(get_admin_user)):
return {"OLLAMA_BASE_URLS": app.state.config.OLLAMA_BASE_URLS} return {"OLLAMA_BASE_URLS": app.state.config.OLLAMA_BASE_URLS}
@ -157,17 +175,24 @@ def merge_models_lists(model_lists):
async def get_all_models(): async def get_all_models():
log.info("get_all_models()") log.info("get_all_models()")
tasks = [fetch_url(f"{url}/api/tags") for url in app.state.config.OLLAMA_BASE_URLS]
if app.state.config.ENABLE_OLLAMA_API:
tasks = [
fetch_url(f"{url}/api/tags") for url in app.state.config.OLLAMA_BASE_URLS
]
responses = await asyncio.gather(*tasks) responses = await asyncio.gather(*tasks)
models = { models = {
"models": merge_models_lists( "models": merge_models_lists(
map( map(
lambda response: (response["models"] if response else None), lambda response: response["models"] if response else None, responses
responses,
) )
) )
} }
else:
models = {"models": []}
for model in models["models"]: for model in models["models"]:
add_custom_info_to_model(model) add_custom_info_to_model(model)

View File

@ -316,6 +316,7 @@ async def get_models(url_idx: Optional[int] = None, user=Depends(get_current_use
@app.api_route("/{path:path}", methods=["GET", "POST", "PUT", "DELETE"]) @app.api_route("/{path:path}", methods=["GET", "POST", "PUT", "DELETE"])
async def proxy(path: str, request: Request, user=Depends(get_verified_user)): async def proxy(path: str, request: Request, user=Depends(get_verified_user)):
idx = 0 idx = 0
pipeline = False
body = await request.body() body = await request.body()
# TODO: Remove below after gpt-4-vision fix from Open AI # TODO: Remove below after gpt-4-vision fix from Open AI
@ -324,7 +325,15 @@ async def proxy(path: str, request: Request, user=Depends(get_verified_user)):
body = body.decode("utf-8") body = body.decode("utf-8")
body = json.loads(body) body = json.loads(body)
idx = app.state.MODELS[body.get("model")]["urlIdx"] model = app.state.MODELS[body.get("model")]
idx = model["urlIdx"]
if "pipeline" in model:
pipeline = model.get("pipeline")
if pipeline:
body["user"] = {"name": user.name, "id": user.id}
# Check if the model is "gpt-4-vision-preview" and set "max_tokens" to 4000 # Check if the model is "gpt-4-vision-preview" and set "max_tokens" to 4000
# This is a workaround until OpenAI fixes the issue with this model # This is a workaround until OpenAI fixes the issue with this model

View File

@ -3,7 +3,7 @@ import json
from peewee import * from peewee import *
from peewee_migrate import Router from peewee_migrate import Router
from playhouse.db_url import connect from playhouse.db_url import connect
from config import SRC_LOG_LEVELS, DATA_DIR, DATABASE_URL from config import SRC_LOG_LEVELS, DATA_DIR, DATABASE_URL, BACKEND_DIR
import os import os
import logging import logging
@ -30,6 +30,8 @@ else:
DB = connect(DATABASE_URL) DB = connect(DATABASE_URL)
log.info(f"Connected to a {DB.__class__.__name__} database.") log.info(f"Connected to a {DB.__class__.__name__} database.")
router = Router(DB, migrate_dir="apps/web/internal/migrations", logger=log) router = Router(
DB, migrate_dir=BACKEND_DIR / "apps" / "web" / "internal" / "migrations", logger=log
)
router.run() router.run()
DB.connect(reuse_if_open=True) DB.connect(reuse_if_open=True)

View File

@ -1,6 +1,8 @@
import os import os
import sys import sys
import logging import logging
import importlib.metadata
import pkgutil
import chromadb import chromadb
from chromadb import Settings from chromadb import Settings
from base64 import b64encode from base64 import b64encode
@ -22,10 +24,13 @@ from constants import ERROR_MESSAGES
# Load .env file # Load .env file
#################################### ####################################
BACKEND_DIR = Path(__file__).parent # the path containing this file
BASE_DIR = BACKEND_DIR.parent # the path containing the backend/
try: try:
from dotenv import load_dotenv, find_dotenv from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv("../.env")) load_dotenv(find_dotenv(str(BASE_DIR / ".env")))
except ImportError: except ImportError:
print("dotenv not installed, skipping...") print("dotenv not installed, skipping...")
@ -87,9 +92,11 @@ WEBUI_FAVICON_URL = "https://openwebui.com/favicon.png"
ENV = os.environ.get("ENV", "dev") ENV = os.environ.get("ENV", "dev")
try: try:
with open(f"../package.json", "r") as f: PACKAGE_DATA = json.loads((BASE_DIR / "package.json").read_text())
PACKAGE_DATA = json.load(f)
except: except:
try:
PACKAGE_DATA = {"version": importlib.metadata.version("open-webui")}
except importlib.metadata.PackageNotFoundError:
PACKAGE_DATA = {"version": "0.0.0"} PACKAGE_DATA = {"version": "0.0.0"}
VERSION = PACKAGE_DATA["version"] VERSION = PACKAGE_DATA["version"]
@ -115,10 +122,10 @@ def parse_section(section):
try: try:
with open("../CHANGELOG.md", "r") as file: changelog_content = (BASE_DIR / "CHANGELOG.md").read_text()
changelog_content = file.read()
except: except:
changelog_content = "" changelog_content = (pkgutil.get_data("open_webui", "CHANGELOG.md") or b"").decode()
# Convert markdown content to HTML # Convert markdown content to HTML
html_content = markdown.markdown(changelog_content) html_content = markdown.markdown(changelog_content)
@ -164,12 +171,11 @@ WEBUI_VERSION = os.environ.get("WEBUI_VERSION", "v1.0.0-alpha.100")
# DATA/FRONTEND BUILD DIR # DATA/FRONTEND BUILD DIR
#################################### ####################################
DATA_DIR = str(Path(os.getenv("DATA_DIR", "./data")).resolve()) DATA_DIR = Path(os.getenv("DATA_DIR", BACKEND_DIR / "data")).resolve()
FRONTEND_BUILD_DIR = str(Path(os.getenv("FRONTEND_BUILD_DIR", "../build"))) FRONTEND_BUILD_DIR = Path(os.getenv("FRONTEND_BUILD_DIR", BASE_DIR / "build")).resolve()
try: try:
with open(f"{DATA_DIR}/config.json", "r") as f: CONFIG_DATA = json.loads((DATA_DIR / "config.json").read_text())
CONFIG_DATA = json.load(f)
except: except:
CONFIG_DATA = {} CONFIG_DATA = {}
@ -279,11 +285,11 @@ JWT_EXPIRES_IN = PersistentConfig(
# Static DIR # Static DIR
#################################### ####################################
STATIC_DIR = str(Path(os.getenv("STATIC_DIR", "./static")).resolve()) STATIC_DIR = Path(os.getenv("STATIC_DIR", BACKEND_DIR / "static")).resolve()
frontend_favicon = f"{FRONTEND_BUILD_DIR}/favicon.png" frontend_favicon = FRONTEND_BUILD_DIR / "favicon.png"
if os.path.exists(frontend_favicon): if frontend_favicon.exists():
shutil.copyfile(frontend_favicon, f"{STATIC_DIR}/favicon.png") shutil.copyfile(frontend_favicon, STATIC_DIR / "favicon.png")
else: else:
logging.warning(f"Frontend favicon not found at {frontend_favicon}") logging.warning(f"Frontend favicon not found at {frontend_favicon}")
@ -378,6 +384,13 @@ if not os.path.exists(LITELLM_CONFIG_PATH):
# OLLAMA_BASE_URL # OLLAMA_BASE_URL
#################################### ####################################
ENABLE_OLLAMA_API = PersistentConfig(
"ENABLE_OLLAMA_API",
"ollama.enable",
os.environ.get("ENABLE_OLLAMA_API", "True").lower() == "true",
)
OLLAMA_API_BASE_URL = os.environ.get( OLLAMA_API_BASE_URL = os.environ.get(
"OLLAMA_API_BASE_URL", "http://localhost:11434/api" "OLLAMA_API_BASE_URL", "http://localhost:11434/api"
) )

View File

@ -8,6 +8,7 @@ import sys
import logging import logging
import aiohttp import aiohttp
import requests import requests
import mimetypes
from fastapi import FastAPI, Request, Depends, status from fastapi import FastAPI, Request, Depends, status
from fastapi.staticfiles import StaticFiles from fastapi.staticfiles import StaticFiles
@ -437,6 +438,7 @@ app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static")
app.mount("/cache", StaticFiles(directory=CACHE_DIR), name="cache") app.mount("/cache", StaticFiles(directory=CACHE_DIR), name="cache")
if os.path.exists(FRONTEND_BUILD_DIR): if os.path.exists(FRONTEND_BUILD_DIR):
mimetypes.add_type("text/javascript", ".js")
app.mount( app.mount(
"/", "/",
SPAStaticFiles(directory=FRONTEND_BUILD_DIR, html=True), SPAStaticFiles(directory=FRONTEND_BUILD_DIR, html=True),

View File

@ -0,0 +1,60 @@
import base64
import os
import random
from pathlib import Path
import typer
import uvicorn
app = typer.Typer()
KEY_FILE = Path.cwd() / ".webui_secret_key"
if (frontend_build_dir := Path(__file__).parent / "frontend").exists():
os.environ["FRONTEND_BUILD_DIR"] = str(frontend_build_dir)
@app.command()
def serve(
host: str = "0.0.0.0",
port: int = 8080,
):
if os.getenv("WEBUI_SECRET_KEY") is None:
typer.echo(
"Loading WEBUI_SECRET_KEY from file, not provided as an environment variable."
)
if not KEY_FILE.exists():
typer.echo(f"Generating a new secret key and saving it to {KEY_FILE}")
KEY_FILE.write_bytes(base64.b64encode(random.randbytes(12)))
typer.echo(f"Loading WEBUI_SECRET_KEY from {KEY_FILE}")
os.environ["WEBUI_SECRET_KEY"] = KEY_FILE.read_text()
if os.getenv("USE_CUDA_DOCKER", "false") == "true":
typer.echo(
"CUDA is enabled, appending LD_LIBRARY_PATH to include torch/cudnn & cublas libraries."
)
LD_LIBRARY_PATH = os.getenv("LD_LIBRARY_PATH", "").split(":")
os.environ["LD_LIBRARY_PATH"] = ":".join(
LD_LIBRARY_PATH
+ [
"/usr/local/lib/python3.11/site-packages/torch/lib",
"/usr/local/lib/python3.11/site-packages/nvidia/cudnn/lib",
]
)
import main # we need set environment variables before importing main
uvicorn.run(main.app, host=host, port=port, forwarded_allow_ips="*")
@app.command()
def dev(
host: str = "0.0.0.0",
port: int = 8080,
reload: bool = True,
):
uvicorn.run(
"main:app", host=host, port=port, reload=reload, forwarded_allow_ips="*"
)
if __name__ == "__main__":
app()

View File

@ -0,0 +1,43 @@
litellm_settings:
drop_params: true
model_list:
- model_name: 'HuggingFace: Mistral: Mistral 7B Instruct v0.1'
litellm_params:
model: huggingface/mistralai/Mistral-7B-Instruct-v0.1
api_key: os.environ/HF_TOKEN
max_tokens: 1024
- model_name: 'HuggingFace: Mistral: Mistral 7B Instruct v0.2'
litellm_params:
model: huggingface/mistralai/Mistral-7B-Instruct-v0.2
api_key: os.environ/HF_TOKEN
max_tokens: 1024
- model_name: 'HuggingFace: Meta: Llama 3 8B Instruct'
litellm_params:
model: huggingface/meta-llama/Meta-Llama-3-8B-Instruct
api_key: os.environ/HF_TOKEN
max_tokens: 2047
- model_name: 'HuggingFace: Mistral: Mixtral 8x7B Instruct v0.1'
litellm_params:
model: huggingface/mistralai/Mixtral-8x7B-Instruct-v0.1
api_key: os.environ/HF_TOKEN
max_tokens: 8192
- model_name: 'HuggingFace: Microsoft: Phi-3 Mini-4K-Instruct'
litellm_params:
model: huggingface/microsoft/Phi-3-mini-4k-instruct
api_key: os.environ/HF_TOKEN
max_tokens: 1024
- model_name: 'HuggingFace: Google: Gemma 7B 1.1'
litellm_params:
model: huggingface/google/gemma-1.1-7b-it
api_key: os.environ/HF_TOKEN
max_tokens: 1024
- model_name: 'HuggingFace: Yi-1.5 34B Chat'
litellm_params:
model: huggingface/01-ai/Yi-1.5-34B-Chat
api_key: os.environ/HF_TOKEN
max_tokens: 1024
- model_name: 'HuggingFace: Nous Research: Nous Hermes 2 Mixtral 8x7B DPO'
litellm_params:
model: huggingface/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO
api_key: os.environ/HF_TOKEN
max_tokens: 2048

View File

@ -30,4 +30,34 @@ if [ "$USE_CUDA_DOCKER" = "true" ]; then
export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/lib/python3.11/site-packages/torch/lib:/usr/local/lib/python3.11/site-packages/nvidia/cudnn/lib" export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/lib/python3.11/site-packages/torch/lib:/usr/local/lib/python3.11/site-packages/nvidia/cudnn/lib"
fi fi
# Check if SPACE_ID is set, if so, configure for space
if [ -n "$SPACE_ID" ]; then
echo "Configuring for HuggingFace Space deployment"
# Copy litellm_config.yaml with specified ownership
echo "Copying litellm_config.yaml to the desired location with specified ownership..."
cp -f ./space/litellm_config.yaml ./data/litellm/config.yaml
if [ -n "$ADMIN_USER_EMAIL" ] && [ -n "$ADMIN_USER_PASSWORD" ]; then
echo "Admin user configured, creating"
WEBUI_SECRET_KEY="$WEBUI_SECRET_KEY" uvicorn main:app --host "$HOST" --port "$PORT" --forwarded-allow-ips '*' &
webui_pid=$!
echo "Waiting for webui to start..."
while ! curl -s http://localhost:8080/health > /dev/null; do
sleep 1
done
echo "Creating admin user..."
curl \
-X POST "http://localhost:8080/api/v1/auths/signup" \
-H "accept: application/json" \
-H "Content-Type: application/json" \
-d "{ \"email\": \"${ADMIN_USER_EMAIL}\", \"password\": \"${ADMIN_USER_PASSWORD}\", \"name\": \"Admin\" }"
echo "Shutting down webui..."
kill $webui_pid
fi
export WEBUI_URL=${SPACE_HOST}
fi
WEBUI_SECRET_KEY="$WEBUI_SECRET_KEY" exec uvicorn main:app --host "$HOST" --port "$PORT" --forwarded-allow-ips '*' WEBUI_SECRET_KEY="$WEBUI_SECRET_KEY" exec uvicorn main:app --host "$HOST" --port "$PORT" --forwarded-allow-ips '*'

21
hatch_build.py Normal file
View File

@ -0,0 +1,21 @@
# noqa: INP001
import shutil
import subprocess
from sys import stderr
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
class CustomBuildHook(BuildHookInterface):
def initialize(self, version, build_data):
super().initialize(version, build_data)
stderr.write(">>> Building Open Webui frontend\n")
npm = shutil.which("npm")
if npm is None:
raise RuntimeError(
"NodeJS `npm` is required for building Open Webui but it was not found"
)
stderr.write("### npm install\n")
subprocess.run([npm, "install"], check=True) # noqa: S603
stderr.write("\n### npm run build\n")
subprocess.run([npm, "run", "build"], check=True) # noqa: S603

4
package-lock.json generated
View File

@ -1,12 +1,12 @@
{ {
"name": "open-webui", "name": "open-webui",
"version": "0.1.125", "version": "0.2.0.dev1",
"lockfileVersion": 3, "lockfileVersion": 3,
"requires": true, "requires": true,
"packages": { "packages": {
"": { "": {
"name": "open-webui", "name": "open-webui",
"version": "0.1.125", "version": "0.2.0.dev1",
"dependencies": { "dependencies": {
"@pyscript/core": "^0.4.32", "@pyscript/core": "^0.4.32",
"@sveltejs/adapter-node": "^1.3.1", "@sveltejs/adapter-node": "^1.3.1",

View File

@ -1,6 +1,6 @@
{ {
"name": "open-webui", "name": "open-webui",
"version": "0.1.125", "version": "0.2.0.dev1",
"private": true, "private": true,
"scripts": { "scripts": {
"dev": "npm run pyodide:fetch && vite dev --host", "dev": "npm run pyodide:fetch && vite dev --host",
@ -13,7 +13,7 @@
"lint:types": "npm run check", "lint:types": "npm run check",
"lint:backend": "pylint backend/", "lint:backend": "pylint backend/",
"format": "prettier --plugin-search-dir --write \"**/*.{js,ts,svelte,css,md,html,json}\"", "format": "prettier --plugin-search-dir --write \"**/*.{js,ts,svelte,css,md,html,json}\"",
"format:backend": "black . --exclude \"/venv/\"", "format:backend": "black . --exclude \".venv/|/venv/\"",
"i18n:parse": "i18next --config i18next-parser.config.ts && prettier --write \"src/lib/i18n/**/*.{js,json}\"", "i18n:parse": "i18next --config i18next-parser.config.ts && prettier --write \"src/lib/i18n/**/*.{js,json}\"",
"cy:open": "cypress open", "cy:open": "cypress open",
"test:frontend": "vitest", "test:frontend": "vitest",

115
pyproject.toml Normal file
View File

@ -0,0 +1,115 @@
[project]
name = "open-webui"
description = "Open WebUI (Formerly Ollama WebUI)"
authors = [
{ name = "Timothy Jaeryang Baek", email = "tim@openwebui.com" }
]
license = { file = "LICENSE" }
dependencies = [
"fastapi==0.109.2",
"uvicorn[standard]==0.22.0",
"pydantic==2.7.1",
"python-multipart==0.0.9",
"Flask==3.0.3",
"Flask-Cors==4.0.0",
"python-socketio==5.11.2",
"python-jose==3.3.0",
"passlib[bcrypt]==1.7.4",
"requests==2.31.0",
"aiohttp==3.9.5",
"peewee==3.17.3",
"peewee-migrate==1.12.2",
"psycopg2-binary==2.9.9",
"PyMySQL==1.1.0",
"bcrypt==4.1.2",
"litellm[proxy]==1.35.28",
"boto3==1.34.95",
"argon2-cffi==23.1.0",
"APScheduler==3.10.4",
"google-generativeai==0.5.2",
"langchain==0.1.16",
"langchain-community==0.0.34",
"langchain-chroma==0.1.0",
"fake-useragent==1.5.1",
"chromadb==0.4.24",
"sentence-transformers==2.7.0",
"pypdf==4.2.0",
"docx2txt==0.8",
"unstructured==0.11.8",
"Markdown==3.6",
"pypandoc==1.13",
"pandas==2.2.2",
"openpyxl==3.1.2",
"pyxlsb==1.0.10",
"xlrd==2.0.1",
"validators==0.28.1",
"opencv-python-headless==4.9.0.80",
"rapidocr-onnxruntime==1.2.3",
"fpdf2==2.7.8",
"rank-bm25==0.2.2",
"faster-whisper==1.0.1",
"PyJWT[crypto]==2.8.0",
"black==24.4.2",
"langfuse==2.27.3",
"youtube-transcript-api==0.6.2",
"pytube",
]
readme = "README.md"
requires-python = ">= 3.11, < 3.12.0a1"
dynamic = ["version"]
classifiers = [
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.11",
"Topic :: Communications :: Chat",
"Topic :: Multimedia",
]
[project.scripts]
open-webui = "open_webui:app"
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[tool.rye]
managed = true
dev-dependencies = []
[tool.hatch.metadata]
allow-direct-references = true
[tool.hatch.version]
path = "package.json"
pattern = '"version":\s*"(?P<version>[^"]+)"'
[tool.hatch.build.hooks.custom] # keep this for reading hooks from `hatch_build.py`
[tool.hatch.build.targets.wheel]
sources = ["backend"]
exclude = [
".dockerignore",
".gitignore",
".webui_secret_key",
"dev.sh",
"requirements.txt",
"start.sh",
"start_windows.bat",
"webui.db",
"chroma.sqlite3",
]
force-include = { "CHANGELOG.md" = "open_webui/CHANGELOG.md", build = "open_webui/frontend" }

681
requirements-dev.lock Normal file
View File

@ -0,0 +1,681 @@
# generated by rye
# use `rye lock` or `rye sync` to update this lockfile
#
# last locked with the following flags:
# pre: false
# features: []
# all-features: false
# with-sources: false
# generate-hashes: false
-e file:.
aiohttp==3.9.5
# via langchain
# via langchain-community
# via litellm
# via open-webui
aiosignal==1.3.1
# via aiohttp
annotated-types==0.6.0
# via pydantic
anyio==4.3.0
# via httpx
# via openai
# via starlette
# via watchfiles
apscheduler==3.10.4
# via litellm
# via open-webui
argon2-cffi==23.1.0
# via open-webui
argon2-cffi-bindings==21.2.0
# via argon2-cffi
asgiref==3.8.1
# via opentelemetry-instrumentation-asgi
attrs==23.2.0
# via aiohttp
av==11.0.0
# via faster-whisper
backoff==2.2.1
# via langfuse
# via litellm
# via posthog
# via unstructured
bcrypt==4.1.2
# via chromadb
# via open-webui
# via passlib
beautifulsoup4==4.12.3
# via unstructured
bidict==0.23.1
# via python-socketio
black==24.4.2
# via open-webui
blinker==1.8.2
# via flask
boto3==1.34.95
# via open-webui
botocore==1.34.103
# via boto3
# via s3transfer
build==1.2.1
# via chromadb
cachetools==5.3.3
# via google-auth
certifi==2024.2.2
# via httpcore
# via httpx
# via kubernetes
# via pulsar-client
# via requests
# via unstructured-client
cffi==1.16.0
# via argon2-cffi-bindings
# via cryptography
chardet==5.2.0
# via unstructured
charset-normalizer==3.3.2
# via requests
# via unstructured-client
chroma-hnswlib==0.7.3
# via chromadb
chromadb==0.4.24
# via langchain-chroma
# via open-webui
click==8.1.7
# via black
# via flask
# via litellm
# via nltk
# via peewee-migrate
# via rq
# via typer
# via uvicorn
coloredlogs==15.0.1
# via onnxruntime
cryptography==42.0.7
# via litellm
# via pyjwt
ctranslate2==4.2.1
# via faster-whisper
dataclasses-json==0.6.6
# via langchain
# via langchain-community
# via unstructured
# via unstructured-client
deepdiff==7.0.1
# via unstructured-client
defusedxml==0.7.1
# via fpdf2
deprecated==1.2.14
# via opentelemetry-api
# via opentelemetry-exporter-otlp-proto-grpc
distro==1.9.0
# via openai
dnspython==2.6.1
# via email-validator
docx2txt==0.8
# via open-webui
ecdsa==0.19.0
# via python-jose
email-validator==2.1.1
# via pydantic
emoji==2.11.1
# via unstructured
et-xmlfile==1.1.0
# via openpyxl
fake-useragent==1.5.1
# via open-webui
fastapi==0.109.2
# via chromadb
# via fastapi-sso
# via langchain-chroma
# via litellm
# via open-webui
fastapi-sso==0.10.0
# via litellm
faster-whisper==1.0.1
# via open-webui
filelock==3.14.0
# via huggingface-hub
# via torch
# via transformers
filetype==1.2.0
# via unstructured
flask==3.0.3
# via flask-cors
# via open-webui
flask-cors==4.0.0
# via open-webui
flatbuffers==24.3.25
# via onnxruntime
fonttools==4.51.0
# via fpdf2
fpdf2==2.7.8
# via open-webui
frozenlist==1.4.1
# via aiohttp
# via aiosignal
fsspec==2024.3.1
# via huggingface-hub
# via torch
google-ai-generativelanguage==0.6.2
# via google-generativeai
google-api-core==2.19.0
# via google-ai-generativelanguage
# via google-api-python-client
# via google-generativeai
google-api-python-client==2.129.0
# via google-generativeai
google-auth==2.29.0
# via google-ai-generativelanguage
# via google-api-core
# via google-api-python-client
# via google-auth-httplib2
# via google-generativeai
# via kubernetes
google-auth-httplib2==0.2.0
# via google-api-python-client
google-generativeai==0.5.2
# via open-webui
googleapis-common-protos==1.63.0
# via google-api-core
# via grpcio-status
# via opentelemetry-exporter-otlp-proto-grpc
grpcio==1.63.0
# via chromadb
# via google-api-core
# via grpcio-status
# via opentelemetry-exporter-otlp-proto-grpc
grpcio-status==1.62.2
# via google-api-core
gunicorn==21.2.0
# via litellm
h11==0.14.0
# via httpcore
# via uvicorn
# via wsproto
httpcore==1.0.5
# via httpx
httplib2==0.22.0
# via google-api-python-client
# via google-auth-httplib2
httptools==0.6.1
# via uvicorn
httpx==0.27.0
# via fastapi-sso
# via langfuse
# via openai
huggingface-hub==0.23.0
# via faster-whisper
# via sentence-transformers
# via tokenizers
# via transformers
humanfriendly==10.0
# via coloredlogs
idna==3.7
# via anyio
# via email-validator
# via httpx
# via langfuse
# via requests
# via unstructured-client
# via yarl
importlib-metadata==7.0.0
# via litellm
# via opentelemetry-api
importlib-resources==6.4.0
# via chromadb
itsdangerous==2.2.0
# via flask
jinja2==3.1.4
# via flask
# via litellm
# via torch
jmespath==1.0.1
# via boto3
# via botocore
joblib==1.4.2
# via nltk
# via scikit-learn
jsonpatch==1.33
# via langchain
# via langchain-core
jsonpath-python==1.0.6
# via unstructured-client
jsonpointer==2.4
# via jsonpatch
kubernetes==29.0.0
# via chromadb
langchain==0.1.16
# via open-webui
langchain-chroma==0.1.0
# via open-webui
langchain-community==0.0.34
# via langchain
# via open-webui
langchain-core==0.1.52
# via langchain
# via langchain-chroma
# via langchain-community
# via langchain-text-splitters
langchain-text-splitters==0.0.1
# via langchain
langdetect==1.0.9
# via unstructured
langfuse==2.27.3
# via open-webui
langsmith==0.1.57
# via langchain
# via langchain-community
# via langchain-core
litellm==1.35.28
# via open-webui
lxml==5.2.2
# via unstructured
markdown==3.6
# via open-webui
markdown-it-py==3.0.0
# via rich
markupsafe==2.1.5
# via jinja2
# via werkzeug
marshmallow==3.21.2
# via dataclasses-json
# via unstructured-client
mdurl==0.1.2
# via markdown-it-py
mmh3==4.1.0
# via chromadb
monotonic==1.6
# via posthog
mpmath==1.3.0
# via sympy
multidict==6.0.5
# via aiohttp
# via yarl
mypy-extensions==1.0.0
# via black
# via typing-inspect
# via unstructured-client
networkx==3.3
# via torch
nltk==3.8.1
# via unstructured
numpy==1.26.4
# via chroma-hnswlib
# via chromadb
# via ctranslate2
# via langchain
# via langchain-chroma
# via langchain-community
# via onnxruntime
# via opencv-python
# via opencv-python-headless
# via pandas
# via rank-bm25
# via rapidocr-onnxruntime
# via scikit-learn
# via scipy
# via sentence-transformers
# via shapely
# via transformers
# via unstructured
oauthlib==3.2.2
# via fastapi-sso
# via kubernetes
# via requests-oauthlib
onnxruntime==1.17.3
# via chromadb
# via faster-whisper
# via rapidocr-onnxruntime
openai==1.28.1
# via litellm
opencv-python==4.9.0.80
# via rapidocr-onnxruntime
opencv-python-headless==4.9.0.80
# via open-webui
openpyxl==3.1.2
# via open-webui
opentelemetry-api==1.24.0
# via chromadb
# via opentelemetry-exporter-otlp-proto-grpc
# via opentelemetry-instrumentation
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
# via opentelemetry-sdk
opentelemetry-exporter-otlp-proto-common==1.24.0
# via opentelemetry-exporter-otlp-proto-grpc
opentelemetry-exporter-otlp-proto-grpc==1.24.0
# via chromadb
opentelemetry-instrumentation==0.45b0
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
opentelemetry-instrumentation-asgi==0.45b0
# via opentelemetry-instrumentation-fastapi
opentelemetry-instrumentation-fastapi==0.45b0
# via chromadb
opentelemetry-proto==1.24.0
# via opentelemetry-exporter-otlp-proto-common
# via opentelemetry-exporter-otlp-proto-grpc
opentelemetry-sdk==1.24.0
# via chromadb
# via opentelemetry-exporter-otlp-proto-grpc
opentelemetry-semantic-conventions==0.45b0
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
# via opentelemetry-sdk
opentelemetry-util-http==0.45b0
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
ordered-set==4.1.0
# via deepdiff
orjson==3.10.3
# via chromadb
# via langsmith
# via litellm
overrides==7.7.0
# via chromadb
packaging==23.2
# via black
# via build
# via gunicorn
# via huggingface-hub
# via langchain-core
# via langfuse
# via marshmallow
# via onnxruntime
# via transformers
# via unstructured-client
pandas==2.2.2
# via open-webui
passlib==1.7.4
# via open-webui
pathspec==0.12.1
# via black
peewee==3.17.3
# via open-webui
# via peewee-migrate
peewee-migrate==1.12.2
# via open-webui
pillow==10.3.0
# via fpdf2
# via rapidocr-onnxruntime
# via sentence-transformers
platformdirs==4.2.1
# via black
posthog==3.5.0
# via chromadb
proto-plus==1.23.0
# via google-ai-generativelanguage
# via google-api-core
protobuf==4.25.3
# via google-ai-generativelanguage
# via google-api-core
# via google-generativeai
# via googleapis-common-protos
# via grpcio-status
# via onnxruntime
# via opentelemetry-proto
# via proto-plus
psycopg2-binary==2.9.9
# via open-webui
pulsar-client==3.5.0
# via chromadb
pyasn1==0.6.0
# via pyasn1-modules
# via python-jose
# via rsa
pyasn1-modules==0.4.0
# via google-auth
pyclipper==1.3.0.post5
# via rapidocr-onnxruntime
pycparser==2.22
# via cffi
pydantic==2.7.1
# via chromadb
# via fastapi
# via fastapi-sso
# via google-generativeai
# via langchain
# via langchain-core
# via langfuse
# via langsmith
# via open-webui
# via openai
pydantic-core==2.18.2
# via pydantic
pygments==2.18.0
# via rich
pyjwt==2.8.0
# via litellm
# via open-webui
pymysql==1.1.0
# via open-webui
pypandoc==1.13
# via open-webui
pyparsing==3.1.2
# via httplib2
pypdf==4.2.0
# via open-webui
# via unstructured-client
pypika==0.48.9
# via chromadb
pyproject-hooks==1.1.0
# via build
python-dateutil==2.9.0.post0
# via botocore
# via kubernetes
# via pandas
# via posthog
# via unstructured-client
python-dotenv==1.0.1
# via litellm
# via uvicorn
python-engineio==4.9.0
# via python-socketio
python-iso639==2024.4.27
# via unstructured
python-jose==3.3.0
# via open-webui
python-magic==0.4.27
# via unstructured
python-multipart==0.0.9
# via litellm
# via open-webui
python-socketio==5.11.2
# via open-webui
pytube==15.0.0
# via open-webui
pytz==2024.1
# via apscheduler
# via pandas
pyxlsb==1.0.10
# via open-webui
pyyaml==6.0.1
# via chromadb
# via ctranslate2
# via huggingface-hub
# via kubernetes
# via langchain
# via langchain-community
# via langchain-core
# via litellm
# via rapidocr-onnxruntime
# via transformers
# via uvicorn
rank-bm25==0.2.2
# via open-webui
rapidfuzz==3.9.0
# via unstructured
rapidocr-onnxruntime==1.2.3
# via open-webui
redis==5.0.4
# via rq
regex==2024.5.10
# via nltk
# via tiktoken
# via transformers
requests==2.31.0
# via chromadb
# via google-api-core
# via huggingface-hub
# via kubernetes
# via langchain
# via langchain-community
# via langsmith
# via litellm
# via open-webui
# via posthog
# via requests-oauthlib
# via tiktoken
# via transformers
# via unstructured
# via unstructured-client
# via youtube-transcript-api
requests-oauthlib==2.0.0
# via kubernetes
rich==13.7.1
# via typer
rq==1.16.2
# via litellm
rsa==4.9
# via google-auth
# via python-jose
s3transfer==0.10.1
# via boto3
safetensors==0.4.3
# via transformers
scikit-learn==1.4.2
# via sentence-transformers
scipy==1.13.0
# via scikit-learn
# via sentence-transformers
sentence-transformers==2.7.0
# via open-webui
setuptools==69.5.1
# via ctranslate2
# via opentelemetry-instrumentation
shapely==2.0.4
# via rapidocr-onnxruntime
shellingham==1.5.4
# via typer
simple-websocket==1.0.0
# via python-engineio
six==1.16.0
# via apscheduler
# via ecdsa
# via kubernetes
# via langdetect
# via posthog
# via python-dateutil
# via rapidocr-onnxruntime
# via unstructured-client
sniffio==1.3.1
# via anyio
# via httpx
# via openai
soupsieve==2.5
# via beautifulsoup4
sqlalchemy==2.0.30
# via langchain
# via langchain-community
starlette==0.36.3
# via fastapi
sympy==1.12
# via onnxruntime
# via torch
tabulate==0.9.0
# via unstructured
tenacity==8.3.0
# via chromadb
# via langchain
# via langchain-community
# via langchain-core
threadpoolctl==3.5.0
# via scikit-learn
tiktoken==0.6.0
# via litellm
tokenizers==0.15.2
# via chromadb
# via faster-whisper
# via litellm
# via transformers
torch==2.3.0
# via sentence-transformers
tqdm==4.66.4
# via chromadb
# via google-generativeai
# via huggingface-hub
# via nltk
# via openai
# via sentence-transformers
# via transformers
transformers==4.39.3
# via sentence-transformers
typer==0.12.3
# via chromadb
typing-extensions==4.11.0
# via chromadb
# via fastapi
# via google-generativeai
# via huggingface-hub
# via openai
# via opentelemetry-sdk
# via pydantic
# via pydantic-core
# via sqlalchemy
# via torch
# via typer
# via typing-inspect
# via unstructured
# via unstructured-client
typing-inspect==0.9.0
# via dataclasses-json
# via unstructured-client
tzdata==2024.1
# via pandas
tzlocal==5.2
# via apscheduler
unstructured==0.11.8
# via open-webui
unstructured-client==0.22.0
# via unstructured
uritemplate==4.1.1
# via google-api-python-client
urllib3==2.2.1
# via botocore
# via kubernetes
# via requests
# via unstructured-client
uvicorn==0.22.0
# via chromadb
# via litellm
# via open-webui
uvloop==0.19.0
# via uvicorn
validators==0.28.1
# via open-webui
watchfiles==0.21.0
# via uvicorn
websocket-client==1.8.0
# via kubernetes
websockets==12.0
# via uvicorn
werkzeug==3.0.3
# via flask
wrapt==1.16.0
# via deprecated
# via langfuse
# via opentelemetry-instrumentation
# via unstructured
wsproto==1.2.0
# via simple-websocket
xlrd==2.0.1
# via open-webui
yarl==1.9.4
# via aiohttp
youtube-transcript-api==0.6.2
# via open-webui
zipp==3.18.1
# via importlib-metadata

681
requirements.lock Normal file
View File

@ -0,0 +1,681 @@
# generated by rye
# use `rye lock` or `rye sync` to update this lockfile
#
# last locked with the following flags:
# pre: false
# features: []
# all-features: false
# with-sources: false
# generate-hashes: false
-e file:.
aiohttp==3.9.5
# via langchain
# via langchain-community
# via litellm
# via open-webui
aiosignal==1.3.1
# via aiohttp
annotated-types==0.6.0
# via pydantic
anyio==4.3.0
# via httpx
# via openai
# via starlette
# via watchfiles
apscheduler==3.10.4
# via litellm
# via open-webui
argon2-cffi==23.1.0
# via open-webui
argon2-cffi-bindings==21.2.0
# via argon2-cffi
asgiref==3.8.1
# via opentelemetry-instrumentation-asgi
attrs==23.2.0
# via aiohttp
av==11.0.0
# via faster-whisper
backoff==2.2.1
# via langfuse
# via litellm
# via posthog
# via unstructured
bcrypt==4.1.2
# via chromadb
# via open-webui
# via passlib
beautifulsoup4==4.12.3
# via unstructured
bidict==0.23.1
# via python-socketio
black==24.4.2
# via open-webui
blinker==1.8.2
# via flask
boto3==1.34.95
# via open-webui
botocore==1.34.103
# via boto3
# via s3transfer
build==1.2.1
# via chromadb
cachetools==5.3.3
# via google-auth
certifi==2024.2.2
# via httpcore
# via httpx
# via kubernetes
# via pulsar-client
# via requests
# via unstructured-client
cffi==1.16.0
# via argon2-cffi-bindings
# via cryptography
chardet==5.2.0
# via unstructured
charset-normalizer==3.3.2
# via requests
# via unstructured-client
chroma-hnswlib==0.7.3
# via chromadb
chromadb==0.4.24
# via langchain-chroma
# via open-webui
click==8.1.7
# via black
# via flask
# via litellm
# via nltk
# via peewee-migrate
# via rq
# via typer
# via uvicorn
coloredlogs==15.0.1
# via onnxruntime
cryptography==42.0.7
# via litellm
# via pyjwt
ctranslate2==4.2.1
# via faster-whisper
dataclasses-json==0.6.6
# via langchain
# via langchain-community
# via unstructured
# via unstructured-client
deepdiff==7.0.1
# via unstructured-client
defusedxml==0.7.1
# via fpdf2
deprecated==1.2.14
# via opentelemetry-api
# via opentelemetry-exporter-otlp-proto-grpc
distro==1.9.0
# via openai
dnspython==2.6.1
# via email-validator
docx2txt==0.8
# via open-webui
ecdsa==0.19.0
# via python-jose
email-validator==2.1.1
# via pydantic
emoji==2.11.1
# via unstructured
et-xmlfile==1.1.0
# via openpyxl
fake-useragent==1.5.1
# via open-webui
fastapi==0.109.2
# via chromadb
# via fastapi-sso
# via langchain-chroma
# via litellm
# via open-webui
fastapi-sso==0.10.0
# via litellm
faster-whisper==1.0.1
# via open-webui
filelock==3.14.0
# via huggingface-hub
# via torch
# via transformers
filetype==1.2.0
# via unstructured
flask==3.0.3
# via flask-cors
# via open-webui
flask-cors==4.0.0
# via open-webui
flatbuffers==24.3.25
# via onnxruntime
fonttools==4.51.0
# via fpdf2
fpdf2==2.7.8
# via open-webui
frozenlist==1.4.1
# via aiohttp
# via aiosignal
fsspec==2024.3.1
# via huggingface-hub
# via torch
google-ai-generativelanguage==0.6.2
# via google-generativeai
google-api-core==2.19.0
# via google-ai-generativelanguage
# via google-api-python-client
# via google-generativeai
google-api-python-client==2.129.0
# via google-generativeai
google-auth==2.29.0
# via google-ai-generativelanguage
# via google-api-core
# via google-api-python-client
# via google-auth-httplib2
# via google-generativeai
# via kubernetes
google-auth-httplib2==0.2.0
# via google-api-python-client
google-generativeai==0.5.2
# via open-webui
googleapis-common-protos==1.63.0
# via google-api-core
# via grpcio-status
# via opentelemetry-exporter-otlp-proto-grpc
grpcio==1.63.0
# via chromadb
# via google-api-core
# via grpcio-status
# via opentelemetry-exporter-otlp-proto-grpc
grpcio-status==1.62.2
# via google-api-core
gunicorn==21.2.0
# via litellm
h11==0.14.0
# via httpcore
# via uvicorn
# via wsproto
httpcore==1.0.5
# via httpx
httplib2==0.22.0
# via google-api-python-client
# via google-auth-httplib2
httptools==0.6.1
# via uvicorn
httpx==0.27.0
# via fastapi-sso
# via langfuse
# via openai
huggingface-hub==0.23.0
# via faster-whisper
# via sentence-transformers
# via tokenizers
# via transformers
humanfriendly==10.0
# via coloredlogs
idna==3.7
# via anyio
# via email-validator
# via httpx
# via langfuse
# via requests
# via unstructured-client
# via yarl
importlib-metadata==7.0.0
# via litellm
# via opentelemetry-api
importlib-resources==6.4.0
# via chromadb
itsdangerous==2.2.0
# via flask
jinja2==3.1.4
# via flask
# via litellm
# via torch
jmespath==1.0.1
# via boto3
# via botocore
joblib==1.4.2
# via nltk
# via scikit-learn
jsonpatch==1.33
# via langchain
# via langchain-core
jsonpath-python==1.0.6
# via unstructured-client
jsonpointer==2.4
# via jsonpatch
kubernetes==29.0.0
# via chromadb
langchain==0.1.16
# via open-webui
langchain-chroma==0.1.0
# via open-webui
langchain-community==0.0.34
# via langchain
# via open-webui
langchain-core==0.1.52
# via langchain
# via langchain-chroma
# via langchain-community
# via langchain-text-splitters
langchain-text-splitters==0.0.1
# via langchain
langdetect==1.0.9
# via unstructured
langfuse==2.27.3
# via open-webui
langsmith==0.1.57
# via langchain
# via langchain-community
# via langchain-core
litellm==1.35.28
# via open-webui
lxml==5.2.2
# via unstructured
markdown==3.6
# via open-webui
markdown-it-py==3.0.0
# via rich
markupsafe==2.1.5
# via jinja2
# via werkzeug
marshmallow==3.21.2
# via dataclasses-json
# via unstructured-client
mdurl==0.1.2
# via markdown-it-py
mmh3==4.1.0
# via chromadb
monotonic==1.6
# via posthog
mpmath==1.3.0
# via sympy
multidict==6.0.5
# via aiohttp
# via yarl
mypy-extensions==1.0.0
# via black
# via typing-inspect
# via unstructured-client
networkx==3.3
# via torch
nltk==3.8.1
# via unstructured
numpy==1.26.4
# via chroma-hnswlib
# via chromadb
# via ctranslate2
# via langchain
# via langchain-chroma
# via langchain-community
# via onnxruntime
# via opencv-python
# via opencv-python-headless
# via pandas
# via rank-bm25
# via rapidocr-onnxruntime
# via scikit-learn
# via scipy
# via sentence-transformers
# via shapely
# via transformers
# via unstructured
oauthlib==3.2.2
# via fastapi-sso
# via kubernetes
# via requests-oauthlib
onnxruntime==1.17.3
# via chromadb
# via faster-whisper
# via rapidocr-onnxruntime
openai==1.28.1
# via litellm
opencv-python==4.9.0.80
# via rapidocr-onnxruntime
opencv-python-headless==4.9.0.80
# via open-webui
openpyxl==3.1.2
# via open-webui
opentelemetry-api==1.24.0
# via chromadb
# via opentelemetry-exporter-otlp-proto-grpc
# via opentelemetry-instrumentation
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
# via opentelemetry-sdk
opentelemetry-exporter-otlp-proto-common==1.24.0
# via opentelemetry-exporter-otlp-proto-grpc
opentelemetry-exporter-otlp-proto-grpc==1.24.0
# via chromadb
opentelemetry-instrumentation==0.45b0
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
opentelemetry-instrumentation-asgi==0.45b0
# via opentelemetry-instrumentation-fastapi
opentelemetry-instrumentation-fastapi==0.45b0
# via chromadb
opentelemetry-proto==1.24.0
# via opentelemetry-exporter-otlp-proto-common
# via opentelemetry-exporter-otlp-proto-grpc
opentelemetry-sdk==1.24.0
# via chromadb
# via opentelemetry-exporter-otlp-proto-grpc
opentelemetry-semantic-conventions==0.45b0
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
# via opentelemetry-sdk
opentelemetry-util-http==0.45b0
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
ordered-set==4.1.0
# via deepdiff
orjson==3.10.3
# via chromadb
# via langsmith
# via litellm
overrides==7.7.0
# via chromadb
packaging==23.2
# via black
# via build
# via gunicorn
# via huggingface-hub
# via langchain-core
# via langfuse
# via marshmallow
# via onnxruntime
# via transformers
# via unstructured-client
pandas==2.2.2
# via open-webui
passlib==1.7.4
# via open-webui
pathspec==0.12.1
# via black
peewee==3.17.3
# via open-webui
# via peewee-migrate
peewee-migrate==1.12.2
# via open-webui
pillow==10.3.0
# via fpdf2
# via rapidocr-onnxruntime
# via sentence-transformers
platformdirs==4.2.1
# via black
posthog==3.5.0
# via chromadb
proto-plus==1.23.0
# via google-ai-generativelanguage
# via google-api-core
protobuf==4.25.3
# via google-ai-generativelanguage
# via google-api-core
# via google-generativeai
# via googleapis-common-protos
# via grpcio-status
# via onnxruntime
# via opentelemetry-proto
# via proto-plus
psycopg2-binary==2.9.9
# via open-webui
pulsar-client==3.5.0
# via chromadb
pyasn1==0.6.0
# via pyasn1-modules
# via python-jose
# via rsa
pyasn1-modules==0.4.0
# via google-auth
pyclipper==1.3.0.post5
# via rapidocr-onnxruntime
pycparser==2.22
# via cffi
pydantic==2.7.1
# via chromadb
# via fastapi
# via fastapi-sso
# via google-generativeai
# via langchain
# via langchain-core
# via langfuse
# via langsmith
# via open-webui
# via openai
pydantic-core==2.18.2
# via pydantic
pygments==2.18.0
# via rich
pyjwt==2.8.0
# via litellm
# via open-webui
pymysql==1.1.0
# via open-webui
pypandoc==1.13
# via open-webui
pyparsing==3.1.2
# via httplib2
pypdf==4.2.0
# via open-webui
# via unstructured-client
pypika==0.48.9
# via chromadb
pyproject-hooks==1.1.0
# via build
python-dateutil==2.9.0.post0
# via botocore
# via kubernetes
# via pandas
# via posthog
# via unstructured-client
python-dotenv==1.0.1
# via litellm
# via uvicorn
python-engineio==4.9.0
# via python-socketio
python-iso639==2024.4.27
# via unstructured
python-jose==3.3.0
# via open-webui
python-magic==0.4.27
# via unstructured
python-multipart==0.0.9
# via litellm
# via open-webui
python-socketio==5.11.2
# via open-webui
pytube==15.0.0
# via open-webui
pytz==2024.1
# via apscheduler
# via pandas
pyxlsb==1.0.10
# via open-webui
pyyaml==6.0.1
# via chromadb
# via ctranslate2
# via huggingface-hub
# via kubernetes
# via langchain
# via langchain-community
# via langchain-core
# via litellm
# via rapidocr-onnxruntime
# via transformers
# via uvicorn
rank-bm25==0.2.2
# via open-webui
rapidfuzz==3.9.0
# via unstructured
rapidocr-onnxruntime==1.2.3
# via open-webui
redis==5.0.4
# via rq
regex==2024.5.10
# via nltk
# via tiktoken
# via transformers
requests==2.31.0
# via chromadb
# via google-api-core
# via huggingface-hub
# via kubernetes
# via langchain
# via langchain-community
# via langsmith
# via litellm
# via open-webui
# via posthog
# via requests-oauthlib
# via tiktoken
# via transformers
# via unstructured
# via unstructured-client
# via youtube-transcript-api
requests-oauthlib==2.0.0
# via kubernetes
rich==13.7.1
# via typer
rq==1.16.2
# via litellm
rsa==4.9
# via google-auth
# via python-jose
s3transfer==0.10.1
# via boto3
safetensors==0.4.3
# via transformers
scikit-learn==1.4.2
# via sentence-transformers
scipy==1.13.0
# via scikit-learn
# via sentence-transformers
sentence-transformers==2.7.0
# via open-webui
setuptools==69.5.1
# via ctranslate2
# via opentelemetry-instrumentation
shapely==2.0.4
# via rapidocr-onnxruntime
shellingham==1.5.4
# via typer
simple-websocket==1.0.0
# via python-engineio
six==1.16.0
# via apscheduler
# via ecdsa
# via kubernetes
# via langdetect
# via posthog
# via python-dateutil
# via rapidocr-onnxruntime
# via unstructured-client
sniffio==1.3.1
# via anyio
# via httpx
# via openai
soupsieve==2.5
# via beautifulsoup4
sqlalchemy==2.0.30
# via langchain
# via langchain-community
starlette==0.36.3
# via fastapi
sympy==1.12
# via onnxruntime
# via torch
tabulate==0.9.0
# via unstructured
tenacity==8.3.0
# via chromadb
# via langchain
# via langchain-community
# via langchain-core
threadpoolctl==3.5.0
# via scikit-learn
tiktoken==0.6.0
# via litellm
tokenizers==0.15.2
# via chromadb
# via faster-whisper
# via litellm
# via transformers
torch==2.3.0
# via sentence-transformers
tqdm==4.66.4
# via chromadb
# via google-generativeai
# via huggingface-hub
# via nltk
# via openai
# via sentence-transformers
# via transformers
transformers==4.39.3
# via sentence-transformers
typer==0.12.3
# via chromadb
typing-extensions==4.11.0
# via chromadb
# via fastapi
# via google-generativeai
# via huggingface-hub
# via openai
# via opentelemetry-sdk
# via pydantic
# via pydantic-core
# via sqlalchemy
# via torch
# via typer
# via typing-inspect
# via unstructured
# via unstructured-client
typing-inspect==0.9.0
# via dataclasses-json
# via unstructured-client
tzdata==2024.1
# via pandas
tzlocal==5.2
# via apscheduler
unstructured==0.11.8
# via open-webui
unstructured-client==0.22.0
# via unstructured
uritemplate==4.1.1
# via google-api-python-client
urllib3==2.2.1
# via botocore
# via kubernetes
# via requests
# via unstructured-client
uvicorn==0.22.0
# via chromadb
# via litellm
# via open-webui
uvloop==0.19.0
# via uvicorn
validators==0.28.1
# via open-webui
watchfiles==0.21.0
# via uvicorn
websocket-client==1.8.0
# via kubernetes
websockets==12.0
# via uvicorn
werkzeug==3.0.3
# via flask
wrapt==1.16.0
# via deprecated
# via langfuse
# via opentelemetry-instrumentation
# via unstructured
wsproto==1.2.0
# via simple-websocket
xlrd==2.0.1
# via open-webui
yarl==1.9.4
# via aiohttp
youtube-transcript-api==0.6.2
# via open-webui
zipp==3.18.1
# via importlib-metadata

View File

@ -1,6 +1,73 @@
import { OLLAMA_API_BASE_URL } from '$lib/constants'; import { OLLAMA_API_BASE_URL } from '$lib/constants';
import { promptTemplate } from '$lib/utils'; import { promptTemplate } from '$lib/utils';
export const getOllamaConfig = async (token: string = '') => {
let error = null;
const res = await fetch(`${OLLAMA_API_BASE_URL}/config`, {
method: 'GET',
headers: {
Accept: 'application/json',
'Content-Type': 'application/json',
...(token && { authorization: `Bearer ${token}` })
}
})
.then(async (res) => {
if (!res.ok) throw await res.json();
return res.json();
})
.catch((err) => {
console.log(err);
if ('detail' in err) {
error = err.detail;
} else {
error = 'Server connection failed';
}
return null;
});
if (error) {
throw error;
}
return res;
};
export const updateOllamaConfig = async (token: string = '', enable_ollama_api: boolean) => {
let error = null;
const res = await fetch(`${OLLAMA_API_BASE_URL}/config/update`, {
method: 'POST',
headers: {
Accept: 'application/json',
'Content-Type': 'application/json',
...(token && { authorization: `Bearer ${token}` })
},
body: JSON.stringify({
enable_ollama_api: enable_ollama_api
})
})
.then(async (res) => {
if (!res.ok) throw await res.json();
return res.json();
})
.catch((err) => {
console.log(err);
if ('detail' in err) {
error = err.detail;
} else {
error = 'Server connection failed';
}
return null;
});
if (error) {
throw error;
}
return res;
};
export const getOllamaUrls = async (token: string = '') => { export const getOllamaUrls = async (token: string = '') => {
let error = null; let error = null;

View File

@ -123,7 +123,7 @@
} }
onMount(async () => { onMount(async () => {
if (!chatId) { if (!$chatId) {
await initNewChat(); await initNewChat();
} else { } else {
if (!($settings.saveChatHistory ?? true)) { if (!($settings.saveChatHistory ?? true)) {
@ -442,8 +442,7 @@
: undefined, : undefined,
...messages ...messages
] ]
.filter((message) => message) .filter((message) => message?.content?.trim())
.filter((message) => message.content != '')
.map((message, idx, arr) => { .map((message, idx, arr) => {
// Prepare the base message object // Prepare the base message object
const baseMessage = { const baseMessage = {
@ -703,7 +702,7 @@
: undefined, : undefined,
...messages ...messages
] ]
.filter((message) => message) .filter((message) => message?.content?.trim())
.map((message, idx, arr) => ({ .map((message, idx, arr) => ({
role: message.role, role: message.role,
...((message.files?.filter((file) => file.type === 'image').length > 0 ?? false) && ...((message.files?.filter((file) => file.type === 'image').length > 0 ?? false) &&

View File

@ -3,7 +3,13 @@
import { createEventDispatcher, onMount, getContext } from 'svelte'; import { createEventDispatcher, onMount, getContext } from 'svelte';
const dispatch = createEventDispatcher(); const dispatch = createEventDispatcher();
import { getOllamaUrls, getOllamaVersion, updateOllamaUrls } from '$lib/apis/ollama'; import {
getOllamaConfig,
getOllamaUrls,
getOllamaVersion,
updateOllamaConfig,
updateOllamaUrls
} from '$lib/apis/ollama';
import { import {
getOpenAIConfig, getOpenAIConfig,
getOpenAIKeys, getOpenAIKeys,
@ -26,6 +32,7 @@
let OPENAI_API_BASE_URLS = ['']; let OPENAI_API_BASE_URLS = [''];
let ENABLE_OPENAI_API = false; let ENABLE_OPENAI_API = false;
let ENABLE_OLLAMA_API = false;
const updateOpenAIHandler = async () => { const updateOpenAIHandler = async () => {
OPENAI_API_BASE_URLS = await updateOpenAIUrls(localStorage.token, OPENAI_API_BASE_URLS); OPENAI_API_BASE_URLS = await updateOpenAIUrls(localStorage.token, OPENAI_API_BASE_URLS);
@ -50,10 +57,13 @@
onMount(async () => { onMount(async () => {
if ($user.role === 'admin') { if ($user.role === 'admin') {
OLLAMA_BASE_URLS = await getOllamaUrls(localStorage.token); const ollamaConfig = await getOllamaConfig(localStorage.token);
const openaiConfig = await getOpenAIConfig(localStorage.token);
const config = await getOpenAIConfig(localStorage.token); ENABLE_OPENAI_API = openaiConfig.ENABLE_OPENAI_API;
ENABLE_OPENAI_API = config.ENABLE_OPENAI_API; ENABLE_OLLAMA_API = ollamaConfig.ENABLE_OLLAMA_API;
OLLAMA_BASE_URLS = await getOllamaUrls(localStorage.token);
OPENAI_API_BASE_URLS = await getOpenAIUrls(localStorage.token); OPENAI_API_BASE_URLS = await getOpenAIUrls(localStorage.token);
OPENAI_API_KEYS = await getOpenAIKeys(localStorage.token); OPENAI_API_KEYS = await getOpenAIKeys(localStorage.token);
@ -161,8 +171,20 @@
<hr class=" dark:border-gray-700" /> <hr class=" dark:border-gray-700" />
<div> <div class="pr-1.5 space-y-2">
<div class=" mb-2.5 text-sm font-medium">{$i18n.t('Ollama Base URL')}</div> <div class="flex justify-between items-center text-sm">
<div class=" font-medium">{$i18n.t('Ollama API')}</div>
<div class="mt-1">
<Switch
bind:state={ENABLE_OLLAMA_API}
on:change={async () => {
updateOllamaConfig(localStorage.token, ENABLE_OLLAMA_API);
}}
/>
</div>
</div>
{#if ENABLE_OLLAMA_API}
<div class="flex w-full gap-1.5"> <div class="flex w-full gap-1.5">
<div class="flex-1 flex flex-col gap-2"> <div class="flex-1 flex flex-col gap-2">
{#each OLLAMA_BASE_URLS as url, idx} {#each OLLAMA_BASE_URLS as url, idx}
@ -216,9 +238,9 @@
{/each} {/each}
</div> </div>
<div class=""> <div class="flex">
<button <button
class="p-2.5 bg-gray-200 hover:bg-gray-300 dark:bg-gray-850 dark:hover:bg-gray-800 rounded-lg transition" class="self-center p-2 bg-gray-200 hover:bg-gray-300 dark:bg-gray-900 dark:hover:bg-gray-850 rounded-lg transition"
on:click={() => { on:click={() => {
updateOllamaUrlsHandler(); updateOllamaUrlsHandler();
}} }}
@ -250,6 +272,7 @@
{$i18n.t('Click here for help.')} {$i18n.t('Click here for help.')}
</a> </a>
</div> </div>
{/if}
</div> </div>
</div> </div>

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "حسنا دعنا نذهب!", "Okay, Let's Go!": "حسنا دعنا نذهب!",
"OLED Dark": "OLED داكن", "OLED Dark": "OLED داكن",
"Ollama": "Ollama", "Ollama": "Ollama",
"Ollama Base URL": "Ollama الرابط الافتراضي", "Ollama API": "",
"Ollama Version": "Ollama الاصدار", "Ollama Version": "Ollama الاصدار",
"On": "تشغيل", "On": "تشغيل",
"Only": "فقط", "Only": "فقط",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "ОК, Нека започваме!", "Okay, Let's Go!": "ОК, Нека започваме!",
"OLED Dark": "OLED тъмно", "OLED Dark": "OLED тъмно",
"Ollama": "Ollama", "Ollama": "Ollama",
"Ollama Base URL": "Ollama Базов URL", "Ollama API": "",
"Ollama Version": "Ollama Версия", "Ollama Version": "Ollama Версия",
"On": "Вкл.", "On": "Вкл.",
"Only": "Само", "Only": "Само",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "ঠিক আছে, চলুন যাই!", "Okay, Let's Go!": "ঠিক আছে, চলুন যাই!",
"OLED Dark": "OLED ডার্ক", "OLED Dark": "OLED ডার্ক",
"Ollama": "Ollama", "Ollama": "Ollama",
"Ollama Base URL": "Ollama বেজ ইউআরএল", "Ollama API": "",
"Ollama Version": "Ollama ভার্সন", "Ollama Version": "Ollama ভার্সন",
"On": "চালু", "On": "চালু",
"Only": "শুধুমাত্র", "Only": "শুধুমাত্র",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "D'acord, Anem!", "Okay, Let's Go!": "D'acord, Anem!",
"OLED Dark": "OLED Fosc", "OLED Dark": "OLED Fosc",
"Ollama": "Ollama", "Ollama": "Ollama",
"Ollama Base URL": "URL Base d'Ollama", "Ollama API": "",
"Ollama Version": "Versió d'Ollama", "Ollama Version": "Versió d'Ollama",
"On": "Activat", "On": "Activat",
"Only": "Només", "Only": "Només",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "Okay, los geht's!", "Okay, Let's Go!": "Okay, los geht's!",
"OLED Dark": "OLED Dunkel", "OLED Dark": "OLED Dunkel",
"Ollama": "Ollama", "Ollama": "Ollama",
"Ollama Base URL": "Ollama Basis URL", "Ollama API": "",
"Ollama Version": "Ollama-Version", "Ollama Version": "Ollama-Version",
"On": "Ein", "On": "Ein",
"Only": "Nur", "Only": "Nur",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "Okay, Let's Go!", "Okay, Let's Go!": "Okay, Let's Go!",
"OLED Dark": "OLED Dark", "OLED Dark": "OLED Dark",
"Ollama": "", "Ollama": "",
"Ollama Base URL": "Ollama Base Bark", "Ollama API": "",
"Ollama Version": "Ollama Version", "Ollama Version": "Ollama Version",
"On": "On", "On": "On",
"Only": "Only", "Only": "Only",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "", "Okay, Let's Go!": "",
"OLED Dark": "", "OLED Dark": "",
"Ollama": "", "Ollama": "",
"Ollama Base URL": "", "Ollama API": "",
"Ollama Version": "", "Ollama Version": "",
"On": "", "On": "",
"Only": "", "Only": "",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "", "Okay, Let's Go!": "",
"OLED Dark": "", "OLED Dark": "",
"Ollama": "", "Ollama": "",
"Ollama Base URL": "", "Ollama API": "",
"Ollama Version": "", "Ollama Version": "",
"On": "", "On": "",
"Only": "", "Only": "",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "Bien, ¡Vamos!", "Okay, Let's Go!": "Bien, ¡Vamos!",
"OLED Dark": "OLED oscuro", "OLED Dark": "OLED oscuro",
"Ollama": "Ollama", "Ollama": "Ollama",
"Ollama Base URL": "URL base de Ollama", "Ollama API": "",
"Ollama Version": "Versión de Ollama", "Ollama Version": "Versión de Ollama",
"On": "Activado", "On": "Activado",
"Only": "Solamente", "Only": "Solamente",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "باشه، بزن بریم!", "Okay, Let's Go!": "باشه، بزن بریم!",
"OLED Dark": "OLED تیره", "OLED Dark": "OLED تیره",
"Ollama": "Ollama", "Ollama": "Ollama",
"Ollama Base URL": "URL پایه اولاما", "Ollama API": "",
"Ollama Version": "نسخه اولاما", "Ollama Version": "نسخه اولاما",
"On": "روشن", "On": "روشن",
"Only": "فقط", "Only": "فقط",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "Eikun menoksi!", "Okay, Let's Go!": "Eikun menoksi!",
"OLED Dark": "OLED-tumma", "OLED Dark": "OLED-tumma",
"Ollama": "Ollama", "Ollama": "Ollama",
"Ollama Base URL": "Ollama-perus-URL", "Ollama API": "",
"Ollama Version": "Ollama-versio", "Ollama Version": "Ollama-versio",
"On": "Päällä", "On": "Päällä",
"Only": "Vain", "Only": "Vain",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "Okay, Allons-y !", "Okay, Let's Go!": "Okay, Allons-y !",
"OLED Dark": "OLED Sombre", "OLED Dark": "OLED Sombre",
"Ollama": "Ollama", "Ollama": "Ollama",
"Ollama Base URL": "URL de Base Ollama", "Ollama API": "",
"Ollama Version": "Version Ollama", "Ollama Version": "Version Ollama",
"On": "Activé", "On": "Activé",
"Only": "Seulement", "Only": "Seulement",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "D'accord, allons-y !", "Okay, Let's Go!": "D'accord, allons-y !",
"OLED Dark": "OLED Sombre", "OLED Dark": "OLED Sombre",
"Ollama": "Ollama", "Ollama": "Ollama",
"Ollama Base URL": "URL de Base Ollama", "Ollama API": "",
"Ollama Version": "Version Ollama", "Ollama Version": "Version Ollama",
"On": "Activé", "On": "Activé",
"Only": "Seulement", "Only": "Seulement",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "בסדר, בואו נתחיל!", "Okay, Let's Go!": "בסדר, בואו נתחיל!",
"OLED Dark": "OLED כהה", "OLED Dark": "OLED כהה",
"Ollama": "Ollama", "Ollama": "Ollama",
"Ollama Base URL": "כתובת URL בסיסית של Ollama", "Ollama API": "",
"Ollama Version": "גרסת Ollama", "Ollama Version": "גרסת Ollama",
"On": "פועל", "On": "פועל",
"Only": "רק", "Only": "רק",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "ठीक है, चलिए चलते हैं!", "Okay, Let's Go!": "ठीक है, चलिए चलते हैं!",
"OLED Dark": "OLEDescuro", "OLED Dark": "OLEDescuro",
"Ollama": "Ollama", "Ollama": "Ollama",
"Ollama Base URL": "Ollama URL", "Ollama API": "",
"Ollama Version": "Ollama Version", "Ollama Version": "Ollama Version",
"On": "चालू", "On": "चालू",
"Only": "केवल", "Only": "केवल",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "U redu, idemo!", "Okay, Let's Go!": "U redu, idemo!",
"OLED Dark": "OLED Tamno", "OLED Dark": "OLED Tamno",
"Ollama": "Ollama", "Ollama": "Ollama",
"Ollama Base URL": "Osnovni URL Ollama", "Ollama API": "",
"Ollama Version": "Verzija Ollama", "Ollama Version": "Verzija Ollama",
"On": "Uključeno", "On": "Uključeno",
"Only": "Samo", "Only": "Samo",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "Ok, andiamo!", "Okay, Let's Go!": "Ok, andiamo!",
"OLED Dark": "OLED scuro", "OLED Dark": "OLED scuro",
"Ollama": "Ollama", "Ollama": "Ollama",
"Ollama Base URL": "URL base Ollama", "Ollama API": "",
"Ollama Version": "Versione Ollama", "Ollama Version": "Versione Ollama",
"On": "Attivato", "On": "Attivato",
"Only": "Solo", "Only": "Solo",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "OK、始めましょう", "Okay, Let's Go!": "OK、始めましょう",
"OLED Dark": "OLED ダーク", "OLED Dark": "OLED ダーク",
"Ollama": "Ollama", "Ollama": "Ollama",
"Ollama Base URL": "Ollama ベース URL", "Ollama API": "",
"Ollama Version": "Ollama バージョン", "Ollama Version": "Ollama バージョン",
"On": "オン", "On": "オン",
"Only": "のみ", "Only": "のみ",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "კარგი, წავედით!", "Okay, Let's Go!": "კარგი, წავედით!",
"OLED Dark": "OLED მუქი", "OLED Dark": "OLED მუქი",
"Ollama": "Ollama", "Ollama": "Ollama",
"Ollama Base URL": "Ollama ბაზისური მისამართი", "Ollama API": "",
"Ollama Version": "Ollama ვერსია", "Ollama Version": "Ollama ვერსია",
"On": "ჩართვა", "On": "ჩართვა",
"Only": "მხოლოდ", "Only": "მხოლოდ",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "그렇습니다, 시작합시다!", "Okay, Let's Go!": "그렇습니다, 시작합시다!",
"OLED Dark": "OLED 어두운", "OLED Dark": "OLED 어두운",
"Ollama": "Ollama", "Ollama": "Ollama",
"Ollama Base URL": "Ollama 기본 URL", "Ollama API": "",
"Ollama Version": "Ollama 버전", "Ollama Version": "Ollama 버전",
"On": "켜기", "On": "켜기",
"Only": "오직", "Only": "오직",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "Okay, Laten we gaan!", "Okay, Let's Go!": "Okay, Laten we gaan!",
"OLED Dark": "OLED Donker", "OLED Dark": "OLED Donker",
"Ollama": "Ollama", "Ollama": "Ollama",
"Ollama Base URL": "Ollama Basis URL", "Ollama API": "",
"Ollama Version": "Ollama Versie", "Ollama Version": "Ollama Versie",
"On": "Aan", "On": "Aan",
"Only": "Alleen", "Only": "Alleen",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "ਠੀਕ ਹੈ, ਚੱਲੋ ਚੱਲੀਏ!", "Okay, Let's Go!": "ਠੀਕ ਹੈ, ਚੱਲੋ ਚੱਲੀਏ!",
"OLED Dark": "OLED ਗੂੜ੍ਹਾ", "OLED Dark": "OLED ਗੂੜ੍ਹਾ",
"Ollama": "ਓਲਾਮਾ", "Ollama": "ਓਲਾਮਾ",
"Ollama Base URL": "ਓਲਾਮਾ ਬੇਸ URL", "Ollama API": "",
"Ollama Version": "ਓਲਾਮਾ ਵਰਜਨ", "Ollama Version": "ਓਲਾਮਾ ਵਰਜਨ",
"On": "ਚਾਲੂ", "On": "ਚਾਲੂ",
"Only": "ਸਿਰਫ਼", "Only": "ਸਿਰਫ਼",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "Okej, zaczynamy!", "Okay, Let's Go!": "Okej, zaczynamy!",
"OLED Dark": "Ciemny OLED", "OLED Dark": "Ciemny OLED",
"Ollama": "Ollama", "Ollama": "Ollama",
"Ollama Base URL": "Adres bazowy URL Ollama", "Ollama API": "",
"Ollama Version": "Wersja Ollama", "Ollama Version": "Wersja Ollama",
"On": "Włączony", "On": "Włączony",
"Only": "Tylko", "Only": "Tylko",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "Ok, Vamos Lá!", "Okay, Let's Go!": "Ok, Vamos Lá!",
"OLED Dark": "OLED Escuro", "OLED Dark": "OLED Escuro",
"Ollama": "Ollama", "Ollama": "Ollama",
"Ollama Base URL": "URL Base do Ollama", "Ollama API": "",
"Ollama Version": "Versão do Ollama", "Ollama Version": "Versão do Ollama",
"On": "Ligado", "On": "Ligado",
"Only": "Somente", "Only": "Somente",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "Ok, Vamos Lá!", "Okay, Let's Go!": "Ok, Vamos Lá!",
"OLED Dark": "OLED Escuro", "OLED Dark": "OLED Escuro",
"Ollama": "Ollama", "Ollama": "Ollama",
"Ollama Base URL": "URL Base do Ollama", "Ollama API": "",
"Ollama Version": "Versão do Ollama", "Ollama Version": "Versão do Ollama",
"On": "Ligado", "On": "Ligado",
"Only": "Somente", "Only": "Somente",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "Давайте начнём!", "Okay, Let's Go!": "Давайте начнём!",
"OLED Dark": "OLED темная", "OLED Dark": "OLED темная",
"Ollama": "Ollama", "Ollama": "Ollama",
"Ollama Base URL": "Базовый адрес URL Ollama", "Ollama API": "",
"Ollama Version": "Версия Ollama", "Ollama Version": "Версия Ollama",
"On": "Включено.", "On": "Включено.",
"Only": "Только", "Only": "Только",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "У реду, хајде да кренемо!", "Okay, Let's Go!": "У реду, хајде да кренемо!",
"OLED Dark": "OLED тамна", "OLED Dark": "OLED тамна",
"Ollama": "Ollama", "Ollama": "Ollama",
"Ollama Base URL": "Основна адреса Ollama-е", "Ollama API": "",
"Ollama Version": "Издање Ollama-е", "Ollama Version": "Издање Ollama-е",
"On": "Укључено", "On": "Укључено",
"Only": "Само", "Only": "Само",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "Okej, nu kör vi!", "Okay, Let's Go!": "Okej, nu kör vi!",
"OLED Dark": "OLED mörkt", "OLED Dark": "OLED mörkt",
"Ollama": "Ollama", "Ollama": "Ollama",
"Ollama Base URL": "Ollama bas-URL", "Ollama API": "",
"Ollama Version": "Ollama-version", "Ollama Version": "Ollama-version",
"On": "På", "On": "På",
"Only": "Endast", "Only": "Endast",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "Tamam, Hadi Başlayalım!", "Okay, Let's Go!": "Tamam, Hadi Başlayalım!",
"OLED Dark": "OLED Koyu", "OLED Dark": "OLED Koyu",
"Ollama": "Ollama", "Ollama": "Ollama",
"Ollama Base URL": "Ollama Temel URL", "Ollama API": "",
"Ollama Version": "Ollama Sürümü", "Ollama Version": "Ollama Sürümü",
"On": "Açık", "On": "Açık",
"Only": "Yalnızca", "Only": "Yalnızca",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "Гаразд, давайте почнемо!", "Okay, Let's Go!": "Гаразд, давайте почнемо!",
"OLED Dark": "Темний OLED", "OLED Dark": "Темний OLED",
"Ollama": "Ollama", "Ollama": "Ollama",
"Ollama Base URL": "URL-адреса Ollama", "Ollama API": "",
"Ollama Version": "Версія Ollama", "Ollama Version": "Версія Ollama",
"On": "Увімк", "On": "Увімк",
"Only": "Тільки", "Only": "Тільки",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "Được rồi, Bắt đầu thôi!", "Okay, Let's Go!": "Được rồi, Bắt đầu thôi!",
"OLED Dark": "OLED Dark", "OLED Dark": "OLED Dark",
"Ollama": "Ollama", "Ollama": "Ollama",
"Ollama Base URL": "Đường dẫn tới API của Ollama (Ollama Base URL)", "Ollama API": "",
"Ollama Version": "Phiên bản Ollama", "Ollama Version": "Phiên bản Ollama",
"On": "Bật", "On": "Bật",
"Only": "Only", "Only": "Only",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "好的,我们开始吧!", "Okay, Let's Go!": "好的,我们开始吧!",
"OLED Dark": "暗黑色", "OLED Dark": "暗黑色",
"Ollama": "Ollama", "Ollama": "Ollama",
"Ollama Base URL": "Ollama 基础 URL", "Ollama API": "",
"Ollama Version": "Ollama 版本", "Ollama Version": "Ollama 版本",
"On": "开", "On": "开",
"Only": "仅", "Only": "仅",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "好的,啟動吧!", "Okay, Let's Go!": "好的,啟動吧!",
"OLED Dark": "`", "OLED Dark": "`",
"Ollama": "Ollama", "Ollama": "Ollama",
"Ollama Base URL": "Ollama 基本 URL", "Ollama API": "",
"Ollama Version": "Ollama 版本", "Ollama Version": "Ollama 版本",
"On": "開啟", "On": "開啟",
"Only": "僅有", "Only": "僅有",