Merge branch 'dev' into feat/model-config

This commit is contained in:
Timothy Jaeryang Baek 2024-05-21 21:37:04 -10:00 committed by GitHub
commit 8df0429c99
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
54 changed files with 2020 additions and 157 deletions

View File

@ -0,0 +1,59 @@
name: Deploy to HuggingFace Spaces
on:
push:
branches:
- dev
- main
workflow_dispatch:
jobs:
check-secret:
runs-on: ubuntu-latest
outputs:
token-set: ${{ steps.check-key.outputs.defined }}
steps:
- id: check-key
env:
HF_TOKEN: ${{ secrets.HF_TOKEN }}
if: "${{ env.HF_TOKEN != '' }}"
run: echo "defined=true" >> $GITHUB_OUTPUT
deploy:
runs-on: ubuntu-latest
needs: [check-secret]
if: needs.check-secret.outputs.token-set == 'true'
env:
HF_TOKEN: ${{ secrets.HF_TOKEN }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Remove git history
run: rm -rf .git
- name: Prepend YAML front matter to README.md
run: |
echo "---" > temp_readme.md
echo "title: Open WebUI" >> temp_readme.md
echo "emoji: 🐳" >> temp_readme.md
echo "colorFrom: purple" >> temp_readme.md
echo "colorTo: gray" >> temp_readme.md
echo "sdk: docker" >> temp_readme.md
echo "app_port: 8080" >> temp_readme.md
echo "---" >> temp_readme.md
cat README.md >> temp_readme.md
mv temp_readme.md README.md
- name: Configure git
run: |
git config --global user.email "41898282+github-actions[bot]@users.noreply.github.com"
git config --global user.name "github-actions[bot]"
- name: Set up Git and push to Space
run: |
git init --initial-branch=main
git lfs track "*.ttf"
rm demo.gif
git add .
git commit -m "GitHub deploy: ${{ github.sha }}"
git push --force https://open-webui:${HF_TOKEN}@huggingface.co/spaces/open-webui/open-webui main

32
.github/workflows/release-pypi.yml vendored Normal file
View File

@ -0,0 +1,32 @@
name: Release to PyPI
on:
push:
branches:
- main # or whatever branch you want to use
- dev
jobs:
release:
runs-on: ubuntu-latest
environment:
name: pypi
url: https://pypi.org/p/open-webui
permissions:
id-token: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: 18
- uses: actions/setup-python@v5
with:
python-version: 3.11
- name: Build
run: |
python -m pip install --upgrade pip
pip install build
python -m build .
- name: Publish package distributions to PyPI
uses: pypa/gh-action-pypi-publish@release/v1

View File

@ -132,7 +132,8 @@ RUN pip3 install uv && \
uv pip install --system -r requirements.txt --no-cache-dir && \
python -c "import os; from sentence_transformers import SentenceTransformer; SentenceTransformer(os.environ['RAG_EMBEDDING_MODEL'], device='cpu')" && \
python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])"; \
fi
fi; \
chown -R $UID:$GID /app/backend/data/

View File

@ -43,6 +43,7 @@ from utils.utils import (
from config import (
SRC_LOG_LEVELS,
OLLAMA_BASE_URLS,
ENABLE_OLLAMA_API,
ENABLE_MODEL_FILTER,
MODEL_FILTER_LIST,
UPLOAD_DIR,
@ -68,6 +69,8 @@ app.state.config.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER
app.state.config.MODEL_FILTER_LIST = MODEL_FILTER_LIST
app.state.MODEL_CONFIG = Models.get_all_models()
app.state.config.ENABLE_OLLAMA_API = ENABLE_OLLAMA_API
app.state.config.OLLAMA_BASE_URLS = OLLAMA_BASE_URLS
app.state.MODELS = {}
@ -97,6 +100,21 @@ async def get_status():
return {"status": True}
@app.get("/config")
async def get_config(user=Depends(get_admin_user)):
return {"ENABLE_OLLAMA_API": app.state.config.ENABLE_OLLAMA_API}
class OllamaConfigForm(BaseModel):
enable_ollama_api: Optional[bool] = None
@app.post("/config/update")
async def update_config(form_data: OllamaConfigForm, user=Depends(get_admin_user)):
app.state.config.ENABLE_OLLAMA_API = form_data.enable_ollama_api
return {"ENABLE_OLLAMA_API": app.state.config.ENABLE_OLLAMA_API}
@app.get("/urls")
async def get_ollama_api_urls(user=Depends(get_admin_user)):
return {"OLLAMA_BASE_URLS": app.state.config.OLLAMA_BASE_URLS}
@ -157,17 +175,24 @@ def merge_models_lists(model_lists):
async def get_all_models():
log.info("get_all_models()")
tasks = [fetch_url(f"{url}/api/tags") for url in app.state.config.OLLAMA_BASE_URLS]
responses = await asyncio.gather(*tasks)
models = {
"models": merge_models_lists(
map(
lambda response: (response["models"] if response else None),
responses,
if app.state.config.ENABLE_OLLAMA_API:
tasks = [
fetch_url(f"{url}/api/tags") for url in app.state.config.OLLAMA_BASE_URLS
]
responses = await asyncio.gather(*tasks)
models = {
"models": merge_models_lists(
map(
lambda response: response["models"] if response else None, responses
)
)
)
}
}
else:
models = {"models": []}
for model in models["models"]:
add_custom_info_to_model(model)

View File

@ -316,6 +316,7 @@ async def get_models(url_idx: Optional[int] = None, user=Depends(get_current_use
@app.api_route("/{path:path}", methods=["GET", "POST", "PUT", "DELETE"])
async def proxy(path: str, request: Request, user=Depends(get_verified_user)):
idx = 0
pipeline = False
body = await request.body()
# TODO: Remove below after gpt-4-vision fix from Open AI
@ -324,7 +325,15 @@ async def proxy(path: str, request: Request, user=Depends(get_verified_user)):
body = body.decode("utf-8")
body = json.loads(body)
idx = app.state.MODELS[body.get("model")]["urlIdx"]
model = app.state.MODELS[body.get("model")]
idx = model["urlIdx"]
if "pipeline" in model:
pipeline = model.get("pipeline")
if pipeline:
body["user"] = {"name": user.name, "id": user.id}
# Check if the model is "gpt-4-vision-preview" and set "max_tokens" to 4000
# This is a workaround until OpenAI fixes the issue with this model

View File

@ -3,7 +3,7 @@ import json
from peewee import *
from peewee_migrate import Router
from playhouse.db_url import connect
from config import SRC_LOG_LEVELS, DATA_DIR, DATABASE_URL
from config import SRC_LOG_LEVELS, DATA_DIR, DATABASE_URL, BACKEND_DIR
import os
import logging
@ -30,6 +30,8 @@ else:
DB = connect(DATABASE_URL)
log.info(f"Connected to a {DB.__class__.__name__} database.")
router = Router(DB, migrate_dir="apps/web/internal/migrations", logger=log)
router = Router(
DB, migrate_dir=BACKEND_DIR / "apps" / "web" / "internal" / "migrations", logger=log
)
router.run()
DB.connect(reuse_if_open=True)

View File

@ -1,6 +1,8 @@
import os
import sys
import logging
import importlib.metadata
import pkgutil
import chromadb
from chromadb import Settings
from base64 import b64encode
@ -22,10 +24,13 @@ from constants import ERROR_MESSAGES
# Load .env file
####################################
BACKEND_DIR = Path(__file__).parent # the path containing this file
BASE_DIR = BACKEND_DIR.parent # the path containing the backend/
try:
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv("../.env"))
load_dotenv(find_dotenv(str(BASE_DIR / ".env")))
except ImportError:
print("dotenv not installed, skipping...")
@ -87,10 +92,12 @@ WEBUI_FAVICON_URL = "https://openwebui.com/favicon.png"
ENV = os.environ.get("ENV", "dev")
try:
with open(f"../package.json", "r") as f:
PACKAGE_DATA = json.load(f)
PACKAGE_DATA = json.loads((BASE_DIR / "package.json").read_text())
except:
PACKAGE_DATA = {"version": "0.0.0"}
try:
PACKAGE_DATA = {"version": importlib.metadata.version("open-webui")}
except importlib.metadata.PackageNotFoundError:
PACKAGE_DATA = {"version": "0.0.0"}
VERSION = PACKAGE_DATA["version"]
@ -115,10 +122,10 @@ def parse_section(section):
try:
with open("../CHANGELOG.md", "r") as file:
changelog_content = file.read()
changelog_content = (BASE_DIR / "CHANGELOG.md").read_text()
except:
changelog_content = ""
changelog_content = (pkgutil.get_data("open_webui", "CHANGELOG.md") or b"").decode()
# Convert markdown content to HTML
html_content = markdown.markdown(changelog_content)
@ -164,12 +171,11 @@ WEBUI_VERSION = os.environ.get("WEBUI_VERSION", "v1.0.0-alpha.100")
# DATA/FRONTEND BUILD DIR
####################################
DATA_DIR = str(Path(os.getenv("DATA_DIR", "./data")).resolve())
FRONTEND_BUILD_DIR = str(Path(os.getenv("FRONTEND_BUILD_DIR", "../build")))
DATA_DIR = Path(os.getenv("DATA_DIR", BACKEND_DIR / "data")).resolve()
FRONTEND_BUILD_DIR = Path(os.getenv("FRONTEND_BUILD_DIR", BASE_DIR / "build")).resolve()
try:
with open(f"{DATA_DIR}/config.json", "r") as f:
CONFIG_DATA = json.load(f)
CONFIG_DATA = json.loads((DATA_DIR / "config.json").read_text())
except:
CONFIG_DATA = {}
@ -279,11 +285,11 @@ JWT_EXPIRES_IN = PersistentConfig(
# Static DIR
####################################
STATIC_DIR = str(Path(os.getenv("STATIC_DIR", "./static")).resolve())
STATIC_DIR = Path(os.getenv("STATIC_DIR", BACKEND_DIR / "static")).resolve()
frontend_favicon = f"{FRONTEND_BUILD_DIR}/favicon.png"
if os.path.exists(frontend_favicon):
shutil.copyfile(frontend_favicon, f"{STATIC_DIR}/favicon.png")
frontend_favicon = FRONTEND_BUILD_DIR / "favicon.png"
if frontend_favicon.exists():
shutil.copyfile(frontend_favicon, STATIC_DIR / "favicon.png")
else:
logging.warning(f"Frontend favicon not found at {frontend_favicon}")
@ -378,6 +384,13 @@ if not os.path.exists(LITELLM_CONFIG_PATH):
# OLLAMA_BASE_URL
####################################
ENABLE_OLLAMA_API = PersistentConfig(
"ENABLE_OLLAMA_API",
"ollama.enable",
os.environ.get("ENABLE_OLLAMA_API", "True").lower() == "true",
)
OLLAMA_API_BASE_URL = os.environ.get(
"OLLAMA_API_BASE_URL", "http://localhost:11434/api"
)

View File

@ -8,6 +8,7 @@ import sys
import logging
import aiohttp
import requests
import mimetypes
from fastapi import FastAPI, Request, Depends, status
from fastapi.staticfiles import StaticFiles
@ -437,6 +438,7 @@ app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static")
app.mount("/cache", StaticFiles(directory=CACHE_DIR), name="cache")
if os.path.exists(FRONTEND_BUILD_DIR):
mimetypes.add_type("text/javascript", ".js")
app.mount(
"/",
SPAStaticFiles(directory=FRONTEND_BUILD_DIR, html=True),

View File

@ -0,0 +1,60 @@
import base64
import os
import random
from pathlib import Path
import typer
import uvicorn
app = typer.Typer()
KEY_FILE = Path.cwd() / ".webui_secret_key"
if (frontend_build_dir := Path(__file__).parent / "frontend").exists():
os.environ["FRONTEND_BUILD_DIR"] = str(frontend_build_dir)
@app.command()
def serve(
host: str = "0.0.0.0",
port: int = 8080,
):
if os.getenv("WEBUI_SECRET_KEY") is None:
typer.echo(
"Loading WEBUI_SECRET_KEY from file, not provided as an environment variable."
)
if not KEY_FILE.exists():
typer.echo(f"Generating a new secret key and saving it to {KEY_FILE}")
KEY_FILE.write_bytes(base64.b64encode(random.randbytes(12)))
typer.echo(f"Loading WEBUI_SECRET_KEY from {KEY_FILE}")
os.environ["WEBUI_SECRET_KEY"] = KEY_FILE.read_text()
if os.getenv("USE_CUDA_DOCKER", "false") == "true":
typer.echo(
"CUDA is enabled, appending LD_LIBRARY_PATH to include torch/cudnn & cublas libraries."
)
LD_LIBRARY_PATH = os.getenv("LD_LIBRARY_PATH", "").split(":")
os.environ["LD_LIBRARY_PATH"] = ":".join(
LD_LIBRARY_PATH
+ [
"/usr/local/lib/python3.11/site-packages/torch/lib",
"/usr/local/lib/python3.11/site-packages/nvidia/cudnn/lib",
]
)
import main # we need set environment variables before importing main
uvicorn.run(main.app, host=host, port=port, forwarded_allow_ips="*")
@app.command()
def dev(
host: str = "0.0.0.0",
port: int = 8080,
reload: bool = True,
):
uvicorn.run(
"main:app", host=host, port=port, reload=reload, forwarded_allow_ips="*"
)
if __name__ == "__main__":
app()

View File

@ -0,0 +1,43 @@
litellm_settings:
drop_params: true
model_list:
- model_name: 'HuggingFace: Mistral: Mistral 7B Instruct v0.1'
litellm_params:
model: huggingface/mistralai/Mistral-7B-Instruct-v0.1
api_key: os.environ/HF_TOKEN
max_tokens: 1024
- model_name: 'HuggingFace: Mistral: Mistral 7B Instruct v0.2'
litellm_params:
model: huggingface/mistralai/Mistral-7B-Instruct-v0.2
api_key: os.environ/HF_TOKEN
max_tokens: 1024
- model_name: 'HuggingFace: Meta: Llama 3 8B Instruct'
litellm_params:
model: huggingface/meta-llama/Meta-Llama-3-8B-Instruct
api_key: os.environ/HF_TOKEN
max_tokens: 2047
- model_name: 'HuggingFace: Mistral: Mixtral 8x7B Instruct v0.1'
litellm_params:
model: huggingface/mistralai/Mixtral-8x7B-Instruct-v0.1
api_key: os.environ/HF_TOKEN
max_tokens: 8192
- model_name: 'HuggingFace: Microsoft: Phi-3 Mini-4K-Instruct'
litellm_params:
model: huggingface/microsoft/Phi-3-mini-4k-instruct
api_key: os.environ/HF_TOKEN
max_tokens: 1024
- model_name: 'HuggingFace: Google: Gemma 7B 1.1'
litellm_params:
model: huggingface/google/gemma-1.1-7b-it
api_key: os.environ/HF_TOKEN
max_tokens: 1024
- model_name: 'HuggingFace: Yi-1.5 34B Chat'
litellm_params:
model: huggingface/01-ai/Yi-1.5-34B-Chat
api_key: os.environ/HF_TOKEN
max_tokens: 1024
- model_name: 'HuggingFace: Nous Research: Nous Hermes 2 Mixtral 8x7B DPO'
litellm_params:
model: huggingface/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO
api_key: os.environ/HF_TOKEN
max_tokens: 2048

View File

@ -30,4 +30,34 @@ if [ "$USE_CUDA_DOCKER" = "true" ]; then
export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/lib/python3.11/site-packages/torch/lib:/usr/local/lib/python3.11/site-packages/nvidia/cudnn/lib"
fi
# Check if SPACE_ID is set, if so, configure for space
if [ -n "$SPACE_ID" ]; then
echo "Configuring for HuggingFace Space deployment"
# Copy litellm_config.yaml with specified ownership
echo "Copying litellm_config.yaml to the desired location with specified ownership..."
cp -f ./space/litellm_config.yaml ./data/litellm/config.yaml
if [ -n "$ADMIN_USER_EMAIL" ] && [ -n "$ADMIN_USER_PASSWORD" ]; then
echo "Admin user configured, creating"
WEBUI_SECRET_KEY="$WEBUI_SECRET_KEY" uvicorn main:app --host "$HOST" --port "$PORT" --forwarded-allow-ips '*' &
webui_pid=$!
echo "Waiting for webui to start..."
while ! curl -s http://localhost:8080/health > /dev/null; do
sleep 1
done
echo "Creating admin user..."
curl \
-X POST "http://localhost:8080/api/v1/auths/signup" \
-H "accept: application/json" \
-H "Content-Type: application/json" \
-d "{ \"email\": \"${ADMIN_USER_EMAIL}\", \"password\": \"${ADMIN_USER_PASSWORD}\", \"name\": \"Admin\" }"
echo "Shutting down webui..."
kill $webui_pid
fi
export WEBUI_URL=${SPACE_HOST}
fi
WEBUI_SECRET_KEY="$WEBUI_SECRET_KEY" exec uvicorn main:app --host "$HOST" --port "$PORT" --forwarded-allow-ips '*'

21
hatch_build.py Normal file
View File

@ -0,0 +1,21 @@
# noqa: INP001
import shutil
import subprocess
from sys import stderr
from hatchling.builders.hooks.plugin.interface import BuildHookInterface
class CustomBuildHook(BuildHookInterface):
def initialize(self, version, build_data):
super().initialize(version, build_data)
stderr.write(">>> Building Open Webui frontend\n")
npm = shutil.which("npm")
if npm is None:
raise RuntimeError(
"NodeJS `npm` is required for building Open Webui but it was not found"
)
stderr.write("### npm install\n")
subprocess.run([npm, "install"], check=True) # noqa: S603
stderr.write("\n### npm run build\n")
subprocess.run([npm, "run", "build"], check=True) # noqa: S603

4
package-lock.json generated
View File

@ -1,12 +1,12 @@
{
"name": "open-webui",
"version": "0.1.125",
"version": "0.2.0.dev1",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "open-webui",
"version": "0.1.125",
"version": "0.2.0.dev1",
"dependencies": {
"@pyscript/core": "^0.4.32",
"@sveltejs/adapter-node": "^1.3.1",

View File

@ -1,6 +1,6 @@
{
"name": "open-webui",
"version": "0.1.125",
"version": "0.2.0.dev1",
"private": true,
"scripts": {
"dev": "npm run pyodide:fetch && vite dev --host",
@ -13,7 +13,7 @@
"lint:types": "npm run check",
"lint:backend": "pylint backend/",
"format": "prettier --plugin-search-dir --write \"**/*.{js,ts,svelte,css,md,html,json}\"",
"format:backend": "black . --exclude \"/venv/\"",
"format:backend": "black . --exclude \".venv/|/venv/\"",
"i18n:parse": "i18next --config i18next-parser.config.ts && prettier --write \"src/lib/i18n/**/*.{js,json}\"",
"cy:open": "cypress open",
"test:frontend": "vitest",

115
pyproject.toml Normal file
View File

@ -0,0 +1,115 @@
[project]
name = "open-webui"
description = "Open WebUI (Formerly Ollama WebUI)"
authors = [
{ name = "Timothy Jaeryang Baek", email = "tim@openwebui.com" }
]
license = { file = "LICENSE" }
dependencies = [
"fastapi==0.109.2",
"uvicorn[standard]==0.22.0",
"pydantic==2.7.1",
"python-multipart==0.0.9",
"Flask==3.0.3",
"Flask-Cors==4.0.0",
"python-socketio==5.11.2",
"python-jose==3.3.0",
"passlib[bcrypt]==1.7.4",
"requests==2.31.0",
"aiohttp==3.9.5",
"peewee==3.17.3",
"peewee-migrate==1.12.2",
"psycopg2-binary==2.9.9",
"PyMySQL==1.1.0",
"bcrypt==4.1.2",
"litellm[proxy]==1.35.28",
"boto3==1.34.95",
"argon2-cffi==23.1.0",
"APScheduler==3.10.4",
"google-generativeai==0.5.2",
"langchain==0.1.16",
"langchain-community==0.0.34",
"langchain-chroma==0.1.0",
"fake-useragent==1.5.1",
"chromadb==0.4.24",
"sentence-transformers==2.7.0",
"pypdf==4.2.0",
"docx2txt==0.8",
"unstructured==0.11.8",
"Markdown==3.6",
"pypandoc==1.13",
"pandas==2.2.2",
"openpyxl==3.1.2",
"pyxlsb==1.0.10",
"xlrd==2.0.1",
"validators==0.28.1",
"opencv-python-headless==4.9.0.80",
"rapidocr-onnxruntime==1.2.3",
"fpdf2==2.7.8",
"rank-bm25==0.2.2",
"faster-whisper==1.0.1",
"PyJWT[crypto]==2.8.0",
"black==24.4.2",
"langfuse==2.27.3",
"youtube-transcript-api==0.6.2",
"pytube",
]
readme = "README.md"
requires-python = ">= 3.11, < 3.12.0a1"
dynamic = ["version"]
classifiers = [
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.11",
"Topic :: Communications :: Chat",
"Topic :: Multimedia",
]
[project.scripts]
open-webui = "open_webui:app"
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[tool.rye]
managed = true
dev-dependencies = []
[tool.hatch.metadata]
allow-direct-references = true
[tool.hatch.version]
path = "package.json"
pattern = '"version":\s*"(?P<version>[^"]+)"'
[tool.hatch.build.hooks.custom] # keep this for reading hooks from `hatch_build.py`
[tool.hatch.build.targets.wheel]
sources = ["backend"]
exclude = [
".dockerignore",
".gitignore",
".webui_secret_key",
"dev.sh",
"requirements.txt",
"start.sh",
"start_windows.bat",
"webui.db",
"chroma.sqlite3",
]
force-include = { "CHANGELOG.md" = "open_webui/CHANGELOG.md", build = "open_webui/frontend" }

681
requirements-dev.lock Normal file
View File

@ -0,0 +1,681 @@
# generated by rye
# use `rye lock` or `rye sync` to update this lockfile
#
# last locked with the following flags:
# pre: false
# features: []
# all-features: false
# with-sources: false
# generate-hashes: false
-e file:.
aiohttp==3.9.5
# via langchain
# via langchain-community
# via litellm
# via open-webui
aiosignal==1.3.1
# via aiohttp
annotated-types==0.6.0
# via pydantic
anyio==4.3.0
# via httpx
# via openai
# via starlette
# via watchfiles
apscheduler==3.10.4
# via litellm
# via open-webui
argon2-cffi==23.1.0
# via open-webui
argon2-cffi-bindings==21.2.0
# via argon2-cffi
asgiref==3.8.1
# via opentelemetry-instrumentation-asgi
attrs==23.2.0
# via aiohttp
av==11.0.0
# via faster-whisper
backoff==2.2.1
# via langfuse
# via litellm
# via posthog
# via unstructured
bcrypt==4.1.2
# via chromadb
# via open-webui
# via passlib
beautifulsoup4==4.12.3
# via unstructured
bidict==0.23.1
# via python-socketio
black==24.4.2
# via open-webui
blinker==1.8.2
# via flask
boto3==1.34.95
# via open-webui
botocore==1.34.103
# via boto3
# via s3transfer
build==1.2.1
# via chromadb
cachetools==5.3.3
# via google-auth
certifi==2024.2.2
# via httpcore
# via httpx
# via kubernetes
# via pulsar-client
# via requests
# via unstructured-client
cffi==1.16.0
# via argon2-cffi-bindings
# via cryptography
chardet==5.2.0
# via unstructured
charset-normalizer==3.3.2
# via requests
# via unstructured-client
chroma-hnswlib==0.7.3
# via chromadb
chromadb==0.4.24
# via langchain-chroma
# via open-webui
click==8.1.7
# via black
# via flask
# via litellm
# via nltk
# via peewee-migrate
# via rq
# via typer
# via uvicorn
coloredlogs==15.0.1
# via onnxruntime
cryptography==42.0.7
# via litellm
# via pyjwt
ctranslate2==4.2.1
# via faster-whisper
dataclasses-json==0.6.6
# via langchain
# via langchain-community
# via unstructured
# via unstructured-client
deepdiff==7.0.1
# via unstructured-client
defusedxml==0.7.1
# via fpdf2
deprecated==1.2.14
# via opentelemetry-api
# via opentelemetry-exporter-otlp-proto-grpc
distro==1.9.0
# via openai
dnspython==2.6.1
# via email-validator
docx2txt==0.8
# via open-webui
ecdsa==0.19.0
# via python-jose
email-validator==2.1.1
# via pydantic
emoji==2.11.1
# via unstructured
et-xmlfile==1.1.0
# via openpyxl
fake-useragent==1.5.1
# via open-webui
fastapi==0.109.2
# via chromadb
# via fastapi-sso
# via langchain-chroma
# via litellm
# via open-webui
fastapi-sso==0.10.0
# via litellm
faster-whisper==1.0.1
# via open-webui
filelock==3.14.0
# via huggingface-hub
# via torch
# via transformers
filetype==1.2.0
# via unstructured
flask==3.0.3
# via flask-cors
# via open-webui
flask-cors==4.0.0
# via open-webui
flatbuffers==24.3.25
# via onnxruntime
fonttools==4.51.0
# via fpdf2
fpdf2==2.7.8
# via open-webui
frozenlist==1.4.1
# via aiohttp
# via aiosignal
fsspec==2024.3.1
# via huggingface-hub
# via torch
google-ai-generativelanguage==0.6.2
# via google-generativeai
google-api-core==2.19.0
# via google-ai-generativelanguage
# via google-api-python-client
# via google-generativeai
google-api-python-client==2.129.0
# via google-generativeai
google-auth==2.29.0
# via google-ai-generativelanguage
# via google-api-core
# via google-api-python-client
# via google-auth-httplib2
# via google-generativeai
# via kubernetes
google-auth-httplib2==0.2.0
# via google-api-python-client
google-generativeai==0.5.2
# via open-webui
googleapis-common-protos==1.63.0
# via google-api-core
# via grpcio-status
# via opentelemetry-exporter-otlp-proto-grpc
grpcio==1.63.0
# via chromadb
# via google-api-core
# via grpcio-status
# via opentelemetry-exporter-otlp-proto-grpc
grpcio-status==1.62.2
# via google-api-core
gunicorn==21.2.0
# via litellm
h11==0.14.0
# via httpcore
# via uvicorn
# via wsproto
httpcore==1.0.5
# via httpx
httplib2==0.22.0
# via google-api-python-client
# via google-auth-httplib2
httptools==0.6.1
# via uvicorn
httpx==0.27.0
# via fastapi-sso
# via langfuse
# via openai
huggingface-hub==0.23.0
# via faster-whisper
# via sentence-transformers
# via tokenizers
# via transformers
humanfriendly==10.0
# via coloredlogs
idna==3.7
# via anyio
# via email-validator
# via httpx
# via langfuse
# via requests
# via unstructured-client
# via yarl
importlib-metadata==7.0.0
# via litellm
# via opentelemetry-api
importlib-resources==6.4.0
# via chromadb
itsdangerous==2.2.0
# via flask
jinja2==3.1.4
# via flask
# via litellm
# via torch
jmespath==1.0.1
# via boto3
# via botocore
joblib==1.4.2
# via nltk
# via scikit-learn
jsonpatch==1.33
# via langchain
# via langchain-core
jsonpath-python==1.0.6
# via unstructured-client
jsonpointer==2.4
# via jsonpatch
kubernetes==29.0.0
# via chromadb
langchain==0.1.16
# via open-webui
langchain-chroma==0.1.0
# via open-webui
langchain-community==0.0.34
# via langchain
# via open-webui
langchain-core==0.1.52
# via langchain
# via langchain-chroma
# via langchain-community
# via langchain-text-splitters
langchain-text-splitters==0.0.1
# via langchain
langdetect==1.0.9
# via unstructured
langfuse==2.27.3
# via open-webui
langsmith==0.1.57
# via langchain
# via langchain-community
# via langchain-core
litellm==1.35.28
# via open-webui
lxml==5.2.2
# via unstructured
markdown==3.6
# via open-webui
markdown-it-py==3.0.0
# via rich
markupsafe==2.1.5
# via jinja2
# via werkzeug
marshmallow==3.21.2
# via dataclasses-json
# via unstructured-client
mdurl==0.1.2
# via markdown-it-py
mmh3==4.1.0
# via chromadb
monotonic==1.6
# via posthog
mpmath==1.3.0
# via sympy
multidict==6.0.5
# via aiohttp
# via yarl
mypy-extensions==1.0.0
# via black
# via typing-inspect
# via unstructured-client
networkx==3.3
# via torch
nltk==3.8.1
# via unstructured
numpy==1.26.4
# via chroma-hnswlib
# via chromadb
# via ctranslate2
# via langchain
# via langchain-chroma
# via langchain-community
# via onnxruntime
# via opencv-python
# via opencv-python-headless
# via pandas
# via rank-bm25
# via rapidocr-onnxruntime
# via scikit-learn
# via scipy
# via sentence-transformers
# via shapely
# via transformers
# via unstructured
oauthlib==3.2.2
# via fastapi-sso
# via kubernetes
# via requests-oauthlib
onnxruntime==1.17.3
# via chromadb
# via faster-whisper
# via rapidocr-onnxruntime
openai==1.28.1
# via litellm
opencv-python==4.9.0.80
# via rapidocr-onnxruntime
opencv-python-headless==4.9.0.80
# via open-webui
openpyxl==3.1.2
# via open-webui
opentelemetry-api==1.24.0
# via chromadb
# via opentelemetry-exporter-otlp-proto-grpc
# via opentelemetry-instrumentation
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
# via opentelemetry-sdk
opentelemetry-exporter-otlp-proto-common==1.24.0
# via opentelemetry-exporter-otlp-proto-grpc
opentelemetry-exporter-otlp-proto-grpc==1.24.0
# via chromadb
opentelemetry-instrumentation==0.45b0
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
opentelemetry-instrumentation-asgi==0.45b0
# via opentelemetry-instrumentation-fastapi
opentelemetry-instrumentation-fastapi==0.45b0
# via chromadb
opentelemetry-proto==1.24.0
# via opentelemetry-exporter-otlp-proto-common
# via opentelemetry-exporter-otlp-proto-grpc
opentelemetry-sdk==1.24.0
# via chromadb
# via opentelemetry-exporter-otlp-proto-grpc
opentelemetry-semantic-conventions==0.45b0
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
# via opentelemetry-sdk
opentelemetry-util-http==0.45b0
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
ordered-set==4.1.0
# via deepdiff
orjson==3.10.3
# via chromadb
# via langsmith
# via litellm
overrides==7.7.0
# via chromadb
packaging==23.2
# via black
# via build
# via gunicorn
# via huggingface-hub
# via langchain-core
# via langfuse
# via marshmallow
# via onnxruntime
# via transformers
# via unstructured-client
pandas==2.2.2
# via open-webui
passlib==1.7.4
# via open-webui
pathspec==0.12.1
# via black
peewee==3.17.3
# via open-webui
# via peewee-migrate
peewee-migrate==1.12.2
# via open-webui
pillow==10.3.0
# via fpdf2
# via rapidocr-onnxruntime
# via sentence-transformers
platformdirs==4.2.1
# via black
posthog==3.5.0
# via chromadb
proto-plus==1.23.0
# via google-ai-generativelanguage
# via google-api-core
protobuf==4.25.3
# via google-ai-generativelanguage
# via google-api-core
# via google-generativeai
# via googleapis-common-protos
# via grpcio-status
# via onnxruntime
# via opentelemetry-proto
# via proto-plus
psycopg2-binary==2.9.9
# via open-webui
pulsar-client==3.5.0
# via chromadb
pyasn1==0.6.0
# via pyasn1-modules
# via python-jose
# via rsa
pyasn1-modules==0.4.0
# via google-auth
pyclipper==1.3.0.post5
# via rapidocr-onnxruntime
pycparser==2.22
# via cffi
pydantic==2.7.1
# via chromadb
# via fastapi
# via fastapi-sso
# via google-generativeai
# via langchain
# via langchain-core
# via langfuse
# via langsmith
# via open-webui
# via openai
pydantic-core==2.18.2
# via pydantic
pygments==2.18.0
# via rich
pyjwt==2.8.0
# via litellm
# via open-webui
pymysql==1.1.0
# via open-webui
pypandoc==1.13
# via open-webui
pyparsing==3.1.2
# via httplib2
pypdf==4.2.0
# via open-webui
# via unstructured-client
pypika==0.48.9
# via chromadb
pyproject-hooks==1.1.0
# via build
python-dateutil==2.9.0.post0
# via botocore
# via kubernetes
# via pandas
# via posthog
# via unstructured-client
python-dotenv==1.0.1
# via litellm
# via uvicorn
python-engineio==4.9.0
# via python-socketio
python-iso639==2024.4.27
# via unstructured
python-jose==3.3.0
# via open-webui
python-magic==0.4.27
# via unstructured
python-multipart==0.0.9
# via litellm
# via open-webui
python-socketio==5.11.2
# via open-webui
pytube==15.0.0
# via open-webui
pytz==2024.1
# via apscheduler
# via pandas
pyxlsb==1.0.10
# via open-webui
pyyaml==6.0.1
# via chromadb
# via ctranslate2
# via huggingface-hub
# via kubernetes
# via langchain
# via langchain-community
# via langchain-core
# via litellm
# via rapidocr-onnxruntime
# via transformers
# via uvicorn
rank-bm25==0.2.2
# via open-webui
rapidfuzz==3.9.0
# via unstructured
rapidocr-onnxruntime==1.2.3
# via open-webui
redis==5.0.4
# via rq
regex==2024.5.10
# via nltk
# via tiktoken
# via transformers
requests==2.31.0
# via chromadb
# via google-api-core
# via huggingface-hub
# via kubernetes
# via langchain
# via langchain-community
# via langsmith
# via litellm
# via open-webui
# via posthog
# via requests-oauthlib
# via tiktoken
# via transformers
# via unstructured
# via unstructured-client
# via youtube-transcript-api
requests-oauthlib==2.0.0
# via kubernetes
rich==13.7.1
# via typer
rq==1.16.2
# via litellm
rsa==4.9
# via google-auth
# via python-jose
s3transfer==0.10.1
# via boto3
safetensors==0.4.3
# via transformers
scikit-learn==1.4.2
# via sentence-transformers
scipy==1.13.0
# via scikit-learn
# via sentence-transformers
sentence-transformers==2.7.0
# via open-webui
setuptools==69.5.1
# via ctranslate2
# via opentelemetry-instrumentation
shapely==2.0.4
# via rapidocr-onnxruntime
shellingham==1.5.4
# via typer
simple-websocket==1.0.0
# via python-engineio
six==1.16.0
# via apscheduler
# via ecdsa
# via kubernetes
# via langdetect
# via posthog
# via python-dateutil
# via rapidocr-onnxruntime
# via unstructured-client
sniffio==1.3.1
# via anyio
# via httpx
# via openai
soupsieve==2.5
# via beautifulsoup4
sqlalchemy==2.0.30
# via langchain
# via langchain-community
starlette==0.36.3
# via fastapi
sympy==1.12
# via onnxruntime
# via torch
tabulate==0.9.0
# via unstructured
tenacity==8.3.0
# via chromadb
# via langchain
# via langchain-community
# via langchain-core
threadpoolctl==3.5.0
# via scikit-learn
tiktoken==0.6.0
# via litellm
tokenizers==0.15.2
# via chromadb
# via faster-whisper
# via litellm
# via transformers
torch==2.3.0
# via sentence-transformers
tqdm==4.66.4
# via chromadb
# via google-generativeai
# via huggingface-hub
# via nltk
# via openai
# via sentence-transformers
# via transformers
transformers==4.39.3
# via sentence-transformers
typer==0.12.3
# via chromadb
typing-extensions==4.11.0
# via chromadb
# via fastapi
# via google-generativeai
# via huggingface-hub
# via openai
# via opentelemetry-sdk
# via pydantic
# via pydantic-core
# via sqlalchemy
# via torch
# via typer
# via typing-inspect
# via unstructured
# via unstructured-client
typing-inspect==0.9.0
# via dataclasses-json
# via unstructured-client
tzdata==2024.1
# via pandas
tzlocal==5.2
# via apscheduler
unstructured==0.11.8
# via open-webui
unstructured-client==0.22.0
# via unstructured
uritemplate==4.1.1
# via google-api-python-client
urllib3==2.2.1
# via botocore
# via kubernetes
# via requests
# via unstructured-client
uvicorn==0.22.0
# via chromadb
# via litellm
# via open-webui
uvloop==0.19.0
# via uvicorn
validators==0.28.1
# via open-webui
watchfiles==0.21.0
# via uvicorn
websocket-client==1.8.0
# via kubernetes
websockets==12.0
# via uvicorn
werkzeug==3.0.3
# via flask
wrapt==1.16.0
# via deprecated
# via langfuse
# via opentelemetry-instrumentation
# via unstructured
wsproto==1.2.0
# via simple-websocket
xlrd==2.0.1
# via open-webui
yarl==1.9.4
# via aiohttp
youtube-transcript-api==0.6.2
# via open-webui
zipp==3.18.1
# via importlib-metadata

681
requirements.lock Normal file
View File

@ -0,0 +1,681 @@
# generated by rye
# use `rye lock` or `rye sync` to update this lockfile
#
# last locked with the following flags:
# pre: false
# features: []
# all-features: false
# with-sources: false
# generate-hashes: false
-e file:.
aiohttp==3.9.5
# via langchain
# via langchain-community
# via litellm
# via open-webui
aiosignal==1.3.1
# via aiohttp
annotated-types==0.6.0
# via pydantic
anyio==4.3.0
# via httpx
# via openai
# via starlette
# via watchfiles
apscheduler==3.10.4
# via litellm
# via open-webui
argon2-cffi==23.1.0
# via open-webui
argon2-cffi-bindings==21.2.0
# via argon2-cffi
asgiref==3.8.1
# via opentelemetry-instrumentation-asgi
attrs==23.2.0
# via aiohttp
av==11.0.0
# via faster-whisper
backoff==2.2.1
# via langfuse
# via litellm
# via posthog
# via unstructured
bcrypt==4.1.2
# via chromadb
# via open-webui
# via passlib
beautifulsoup4==4.12.3
# via unstructured
bidict==0.23.1
# via python-socketio
black==24.4.2
# via open-webui
blinker==1.8.2
# via flask
boto3==1.34.95
# via open-webui
botocore==1.34.103
# via boto3
# via s3transfer
build==1.2.1
# via chromadb
cachetools==5.3.3
# via google-auth
certifi==2024.2.2
# via httpcore
# via httpx
# via kubernetes
# via pulsar-client
# via requests
# via unstructured-client
cffi==1.16.0
# via argon2-cffi-bindings
# via cryptography
chardet==5.2.0
# via unstructured
charset-normalizer==3.3.2
# via requests
# via unstructured-client
chroma-hnswlib==0.7.3
# via chromadb
chromadb==0.4.24
# via langchain-chroma
# via open-webui
click==8.1.7
# via black
# via flask
# via litellm
# via nltk
# via peewee-migrate
# via rq
# via typer
# via uvicorn
coloredlogs==15.0.1
# via onnxruntime
cryptography==42.0.7
# via litellm
# via pyjwt
ctranslate2==4.2.1
# via faster-whisper
dataclasses-json==0.6.6
# via langchain
# via langchain-community
# via unstructured
# via unstructured-client
deepdiff==7.0.1
# via unstructured-client
defusedxml==0.7.1
# via fpdf2
deprecated==1.2.14
# via opentelemetry-api
# via opentelemetry-exporter-otlp-proto-grpc
distro==1.9.0
# via openai
dnspython==2.6.1
# via email-validator
docx2txt==0.8
# via open-webui
ecdsa==0.19.0
# via python-jose
email-validator==2.1.1
# via pydantic
emoji==2.11.1
# via unstructured
et-xmlfile==1.1.0
# via openpyxl
fake-useragent==1.5.1
# via open-webui
fastapi==0.109.2
# via chromadb
# via fastapi-sso
# via langchain-chroma
# via litellm
# via open-webui
fastapi-sso==0.10.0
# via litellm
faster-whisper==1.0.1
# via open-webui
filelock==3.14.0
# via huggingface-hub
# via torch
# via transformers
filetype==1.2.0
# via unstructured
flask==3.0.3
# via flask-cors
# via open-webui
flask-cors==4.0.0
# via open-webui
flatbuffers==24.3.25
# via onnxruntime
fonttools==4.51.0
# via fpdf2
fpdf2==2.7.8
# via open-webui
frozenlist==1.4.1
# via aiohttp
# via aiosignal
fsspec==2024.3.1
# via huggingface-hub
# via torch
google-ai-generativelanguage==0.6.2
# via google-generativeai
google-api-core==2.19.0
# via google-ai-generativelanguage
# via google-api-python-client
# via google-generativeai
google-api-python-client==2.129.0
# via google-generativeai
google-auth==2.29.0
# via google-ai-generativelanguage
# via google-api-core
# via google-api-python-client
# via google-auth-httplib2
# via google-generativeai
# via kubernetes
google-auth-httplib2==0.2.0
# via google-api-python-client
google-generativeai==0.5.2
# via open-webui
googleapis-common-protos==1.63.0
# via google-api-core
# via grpcio-status
# via opentelemetry-exporter-otlp-proto-grpc
grpcio==1.63.0
# via chromadb
# via google-api-core
# via grpcio-status
# via opentelemetry-exporter-otlp-proto-grpc
grpcio-status==1.62.2
# via google-api-core
gunicorn==21.2.0
# via litellm
h11==0.14.0
# via httpcore
# via uvicorn
# via wsproto
httpcore==1.0.5
# via httpx
httplib2==0.22.0
# via google-api-python-client
# via google-auth-httplib2
httptools==0.6.1
# via uvicorn
httpx==0.27.0
# via fastapi-sso
# via langfuse
# via openai
huggingface-hub==0.23.0
# via faster-whisper
# via sentence-transformers
# via tokenizers
# via transformers
humanfriendly==10.0
# via coloredlogs
idna==3.7
# via anyio
# via email-validator
# via httpx
# via langfuse
# via requests
# via unstructured-client
# via yarl
importlib-metadata==7.0.0
# via litellm
# via opentelemetry-api
importlib-resources==6.4.0
# via chromadb
itsdangerous==2.2.0
# via flask
jinja2==3.1.4
# via flask
# via litellm
# via torch
jmespath==1.0.1
# via boto3
# via botocore
joblib==1.4.2
# via nltk
# via scikit-learn
jsonpatch==1.33
# via langchain
# via langchain-core
jsonpath-python==1.0.6
# via unstructured-client
jsonpointer==2.4
# via jsonpatch
kubernetes==29.0.0
# via chromadb
langchain==0.1.16
# via open-webui
langchain-chroma==0.1.0
# via open-webui
langchain-community==0.0.34
# via langchain
# via open-webui
langchain-core==0.1.52
# via langchain
# via langchain-chroma
# via langchain-community
# via langchain-text-splitters
langchain-text-splitters==0.0.1
# via langchain
langdetect==1.0.9
# via unstructured
langfuse==2.27.3
# via open-webui
langsmith==0.1.57
# via langchain
# via langchain-community
# via langchain-core
litellm==1.35.28
# via open-webui
lxml==5.2.2
# via unstructured
markdown==3.6
# via open-webui
markdown-it-py==3.0.0
# via rich
markupsafe==2.1.5
# via jinja2
# via werkzeug
marshmallow==3.21.2
# via dataclasses-json
# via unstructured-client
mdurl==0.1.2
# via markdown-it-py
mmh3==4.1.0
# via chromadb
monotonic==1.6
# via posthog
mpmath==1.3.0
# via sympy
multidict==6.0.5
# via aiohttp
# via yarl
mypy-extensions==1.0.0
# via black
# via typing-inspect
# via unstructured-client
networkx==3.3
# via torch
nltk==3.8.1
# via unstructured
numpy==1.26.4
# via chroma-hnswlib
# via chromadb
# via ctranslate2
# via langchain
# via langchain-chroma
# via langchain-community
# via onnxruntime
# via opencv-python
# via opencv-python-headless
# via pandas
# via rank-bm25
# via rapidocr-onnxruntime
# via scikit-learn
# via scipy
# via sentence-transformers
# via shapely
# via transformers
# via unstructured
oauthlib==3.2.2
# via fastapi-sso
# via kubernetes
# via requests-oauthlib
onnxruntime==1.17.3
# via chromadb
# via faster-whisper
# via rapidocr-onnxruntime
openai==1.28.1
# via litellm
opencv-python==4.9.0.80
# via rapidocr-onnxruntime
opencv-python-headless==4.9.0.80
# via open-webui
openpyxl==3.1.2
# via open-webui
opentelemetry-api==1.24.0
# via chromadb
# via opentelemetry-exporter-otlp-proto-grpc
# via opentelemetry-instrumentation
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
# via opentelemetry-sdk
opentelemetry-exporter-otlp-proto-common==1.24.0
# via opentelemetry-exporter-otlp-proto-grpc
opentelemetry-exporter-otlp-proto-grpc==1.24.0
# via chromadb
opentelemetry-instrumentation==0.45b0
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
opentelemetry-instrumentation-asgi==0.45b0
# via opentelemetry-instrumentation-fastapi
opentelemetry-instrumentation-fastapi==0.45b0
# via chromadb
opentelemetry-proto==1.24.0
# via opentelemetry-exporter-otlp-proto-common
# via opentelemetry-exporter-otlp-proto-grpc
opentelemetry-sdk==1.24.0
# via chromadb
# via opentelemetry-exporter-otlp-proto-grpc
opentelemetry-semantic-conventions==0.45b0
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
# via opentelemetry-sdk
opentelemetry-util-http==0.45b0
# via opentelemetry-instrumentation-asgi
# via opentelemetry-instrumentation-fastapi
ordered-set==4.1.0
# via deepdiff
orjson==3.10.3
# via chromadb
# via langsmith
# via litellm
overrides==7.7.0
# via chromadb
packaging==23.2
# via black
# via build
# via gunicorn
# via huggingface-hub
# via langchain-core
# via langfuse
# via marshmallow
# via onnxruntime
# via transformers
# via unstructured-client
pandas==2.2.2
# via open-webui
passlib==1.7.4
# via open-webui
pathspec==0.12.1
# via black
peewee==3.17.3
# via open-webui
# via peewee-migrate
peewee-migrate==1.12.2
# via open-webui
pillow==10.3.0
# via fpdf2
# via rapidocr-onnxruntime
# via sentence-transformers
platformdirs==4.2.1
# via black
posthog==3.5.0
# via chromadb
proto-plus==1.23.0
# via google-ai-generativelanguage
# via google-api-core
protobuf==4.25.3
# via google-ai-generativelanguage
# via google-api-core
# via google-generativeai
# via googleapis-common-protos
# via grpcio-status
# via onnxruntime
# via opentelemetry-proto
# via proto-plus
psycopg2-binary==2.9.9
# via open-webui
pulsar-client==3.5.0
# via chromadb
pyasn1==0.6.0
# via pyasn1-modules
# via python-jose
# via rsa
pyasn1-modules==0.4.0
# via google-auth
pyclipper==1.3.0.post5
# via rapidocr-onnxruntime
pycparser==2.22
# via cffi
pydantic==2.7.1
# via chromadb
# via fastapi
# via fastapi-sso
# via google-generativeai
# via langchain
# via langchain-core
# via langfuse
# via langsmith
# via open-webui
# via openai
pydantic-core==2.18.2
# via pydantic
pygments==2.18.0
# via rich
pyjwt==2.8.0
# via litellm
# via open-webui
pymysql==1.1.0
# via open-webui
pypandoc==1.13
# via open-webui
pyparsing==3.1.2
# via httplib2
pypdf==4.2.0
# via open-webui
# via unstructured-client
pypika==0.48.9
# via chromadb
pyproject-hooks==1.1.0
# via build
python-dateutil==2.9.0.post0
# via botocore
# via kubernetes
# via pandas
# via posthog
# via unstructured-client
python-dotenv==1.0.1
# via litellm
# via uvicorn
python-engineio==4.9.0
# via python-socketio
python-iso639==2024.4.27
# via unstructured
python-jose==3.3.0
# via open-webui
python-magic==0.4.27
# via unstructured
python-multipart==0.0.9
# via litellm
# via open-webui
python-socketio==5.11.2
# via open-webui
pytube==15.0.0
# via open-webui
pytz==2024.1
# via apscheduler
# via pandas
pyxlsb==1.0.10
# via open-webui
pyyaml==6.0.1
# via chromadb
# via ctranslate2
# via huggingface-hub
# via kubernetes
# via langchain
# via langchain-community
# via langchain-core
# via litellm
# via rapidocr-onnxruntime
# via transformers
# via uvicorn
rank-bm25==0.2.2
# via open-webui
rapidfuzz==3.9.0
# via unstructured
rapidocr-onnxruntime==1.2.3
# via open-webui
redis==5.0.4
# via rq
regex==2024.5.10
# via nltk
# via tiktoken
# via transformers
requests==2.31.0
# via chromadb
# via google-api-core
# via huggingface-hub
# via kubernetes
# via langchain
# via langchain-community
# via langsmith
# via litellm
# via open-webui
# via posthog
# via requests-oauthlib
# via tiktoken
# via transformers
# via unstructured
# via unstructured-client
# via youtube-transcript-api
requests-oauthlib==2.0.0
# via kubernetes
rich==13.7.1
# via typer
rq==1.16.2
# via litellm
rsa==4.9
# via google-auth
# via python-jose
s3transfer==0.10.1
# via boto3
safetensors==0.4.3
# via transformers
scikit-learn==1.4.2
# via sentence-transformers
scipy==1.13.0
# via scikit-learn
# via sentence-transformers
sentence-transformers==2.7.0
# via open-webui
setuptools==69.5.1
# via ctranslate2
# via opentelemetry-instrumentation
shapely==2.0.4
# via rapidocr-onnxruntime
shellingham==1.5.4
# via typer
simple-websocket==1.0.0
# via python-engineio
six==1.16.0
# via apscheduler
# via ecdsa
# via kubernetes
# via langdetect
# via posthog
# via python-dateutil
# via rapidocr-onnxruntime
# via unstructured-client
sniffio==1.3.1
# via anyio
# via httpx
# via openai
soupsieve==2.5
# via beautifulsoup4
sqlalchemy==2.0.30
# via langchain
# via langchain-community
starlette==0.36.3
# via fastapi
sympy==1.12
# via onnxruntime
# via torch
tabulate==0.9.0
# via unstructured
tenacity==8.3.0
# via chromadb
# via langchain
# via langchain-community
# via langchain-core
threadpoolctl==3.5.0
# via scikit-learn
tiktoken==0.6.0
# via litellm
tokenizers==0.15.2
# via chromadb
# via faster-whisper
# via litellm
# via transformers
torch==2.3.0
# via sentence-transformers
tqdm==4.66.4
# via chromadb
# via google-generativeai
# via huggingface-hub
# via nltk
# via openai
# via sentence-transformers
# via transformers
transformers==4.39.3
# via sentence-transformers
typer==0.12.3
# via chromadb
typing-extensions==4.11.0
# via chromadb
# via fastapi
# via google-generativeai
# via huggingface-hub
# via openai
# via opentelemetry-sdk
# via pydantic
# via pydantic-core
# via sqlalchemy
# via torch
# via typer
# via typing-inspect
# via unstructured
# via unstructured-client
typing-inspect==0.9.0
# via dataclasses-json
# via unstructured-client
tzdata==2024.1
# via pandas
tzlocal==5.2
# via apscheduler
unstructured==0.11.8
# via open-webui
unstructured-client==0.22.0
# via unstructured
uritemplate==4.1.1
# via google-api-python-client
urllib3==2.2.1
# via botocore
# via kubernetes
# via requests
# via unstructured-client
uvicorn==0.22.0
# via chromadb
# via litellm
# via open-webui
uvloop==0.19.0
# via uvicorn
validators==0.28.1
# via open-webui
watchfiles==0.21.0
# via uvicorn
websocket-client==1.8.0
# via kubernetes
websockets==12.0
# via uvicorn
werkzeug==3.0.3
# via flask
wrapt==1.16.0
# via deprecated
# via langfuse
# via opentelemetry-instrumentation
# via unstructured
wsproto==1.2.0
# via simple-websocket
xlrd==2.0.1
# via open-webui
yarl==1.9.4
# via aiohttp
youtube-transcript-api==0.6.2
# via open-webui
zipp==3.18.1
# via importlib-metadata

View File

@ -1,6 +1,73 @@
import { OLLAMA_API_BASE_URL } from '$lib/constants';
import { promptTemplate } from '$lib/utils';
export const getOllamaConfig = async (token: string = '') => {
let error = null;
const res = await fetch(`${OLLAMA_API_BASE_URL}/config`, {
method: 'GET',
headers: {
Accept: 'application/json',
'Content-Type': 'application/json',
...(token && { authorization: `Bearer ${token}` })
}
})
.then(async (res) => {
if (!res.ok) throw await res.json();
return res.json();
})
.catch((err) => {
console.log(err);
if ('detail' in err) {
error = err.detail;
} else {
error = 'Server connection failed';
}
return null;
});
if (error) {
throw error;
}
return res;
};
export const updateOllamaConfig = async (token: string = '', enable_ollama_api: boolean) => {
let error = null;
const res = await fetch(`${OLLAMA_API_BASE_URL}/config/update`, {
method: 'POST',
headers: {
Accept: 'application/json',
'Content-Type': 'application/json',
...(token && { authorization: `Bearer ${token}` })
},
body: JSON.stringify({
enable_ollama_api: enable_ollama_api
})
})
.then(async (res) => {
if (!res.ok) throw await res.json();
return res.json();
})
.catch((err) => {
console.log(err);
if ('detail' in err) {
error = err.detail;
} else {
error = 'Server connection failed';
}
return null;
});
if (error) {
throw error;
}
return res;
};
export const getOllamaUrls = async (token: string = '') => {
let error = null;

View File

@ -123,7 +123,7 @@
}
onMount(async () => {
if (!chatId) {
if (!$chatId) {
await initNewChat();
} else {
if (!($settings.saveChatHistory ?? true)) {
@ -442,8 +442,7 @@
: undefined,
...messages
]
.filter((message) => message)
.filter((message) => message.content != '')
.filter((message) => message?.content?.trim())
.map((message, idx, arr) => {
// Prepare the base message object
const baseMessage = {
@ -703,7 +702,7 @@
: undefined,
...messages
]
.filter((message) => message)
.filter((message) => message?.content?.trim())
.map((message, idx, arr) => ({
role: message.role,
...((message.files?.filter((file) => file.type === 'image').length > 0 ?? false) &&

View File

@ -3,7 +3,13 @@
import { createEventDispatcher, onMount, getContext } from 'svelte';
const dispatch = createEventDispatcher();
import { getOllamaUrls, getOllamaVersion, updateOllamaUrls } from '$lib/apis/ollama';
import {
getOllamaConfig,
getOllamaUrls,
getOllamaVersion,
updateOllamaConfig,
updateOllamaUrls
} from '$lib/apis/ollama';
import {
getOpenAIConfig,
getOpenAIKeys,
@ -26,6 +32,7 @@
let OPENAI_API_BASE_URLS = [''];
let ENABLE_OPENAI_API = false;
let ENABLE_OLLAMA_API = false;
const updateOpenAIHandler = async () => {
OPENAI_API_BASE_URLS = await updateOpenAIUrls(localStorage.token, OPENAI_API_BASE_URLS);
@ -50,10 +57,13 @@
onMount(async () => {
if ($user.role === 'admin') {
OLLAMA_BASE_URLS = await getOllamaUrls(localStorage.token);
const ollamaConfig = await getOllamaConfig(localStorage.token);
const openaiConfig = await getOpenAIConfig(localStorage.token);
const config = await getOpenAIConfig(localStorage.token);
ENABLE_OPENAI_API = config.ENABLE_OPENAI_API;
ENABLE_OPENAI_API = openaiConfig.ENABLE_OPENAI_API;
ENABLE_OLLAMA_API = ollamaConfig.ENABLE_OLLAMA_API;
OLLAMA_BASE_URLS = await getOllamaUrls(localStorage.token);
OPENAI_API_BASE_URLS = await getOpenAIUrls(localStorage.token);
OPENAI_API_KEYS = await getOpenAIKeys(localStorage.token);
@ -161,95 +171,108 @@
<hr class=" dark:border-gray-700" />
<div>
<div class=" mb-2.5 text-sm font-medium">{$i18n.t('Ollama Base URL')}</div>
<div class="flex w-full gap-1.5">
<div class="flex-1 flex flex-col gap-2">
{#each OLLAMA_BASE_URLS as url, idx}
<div class="flex gap-1.5">
<input
class="w-full rounded-lg py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
placeholder={$i18n.t('Enter URL (e.g. http://localhost:11434)')}
bind:value={url}
/>
<div class="pr-1.5 space-y-2">
<div class="flex justify-between items-center text-sm">
<div class=" font-medium">{$i18n.t('Ollama API')}</div>
<div class="self-center flex items-center">
{#if idx === 0}
<button
class="px-1"
on:click={() => {
OLLAMA_BASE_URLS = [...OLLAMA_BASE_URLS, ''];
}}
type="button"
>
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 16 16"
fill="currentColor"
class="w-4 h-4"
>
<path
d="M8.75 3.75a.75.75 0 0 0-1.5 0v3.5h-3.5a.75.75 0 0 0 0 1.5h3.5v3.5a.75.75 0 0 0 1.5 0v-3.5h3.5a.75.75 0 0 0 0-1.5h-3.5v-3.5Z"
/>
</svg>
</button>
{:else}
<button
class="px-1"
on:click={() => {
OLLAMA_BASE_URLS = OLLAMA_BASE_URLS.filter((url, urlIdx) => idx !== urlIdx);
}}
type="button"
>
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 16 16"
fill="currentColor"
class="w-4 h-4"
>
<path d="M3.75 7.25a.75.75 0 0 0 0 1.5h8.5a.75.75 0 0 0 0-1.5h-8.5Z" />
</svg>
</button>
{/if}
</div>
</div>
{/each}
</div>
<div class="">
<button
class="p-2.5 bg-gray-200 hover:bg-gray-300 dark:bg-gray-850 dark:hover:bg-gray-800 rounded-lg transition"
on:click={() => {
updateOllamaUrlsHandler();
<div class="mt-1">
<Switch
bind:state={ENABLE_OLLAMA_API}
on:change={async () => {
updateOllamaConfig(localStorage.token, ENABLE_OLLAMA_API);
}}
type="button"
>
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 20 20"
fill="currentColor"
class="w-4 h-4"
>
<path
fill-rule="evenodd"
d="M15.312 11.424a5.5 5.5 0 01-9.201 2.466l-.312-.311h2.433a.75.75 0 000-1.5H3.989a.75.75 0 00-.75.75v4.242a.75.75 0 001.5 0v-2.43l.31.31a7 7 0 0011.712-3.138.75.75 0 00-1.449-.39zm1.23-3.723a.75.75 0 00.219-.53V2.929a.75.75 0 00-1.5 0V5.36l-.31-.31A7 7 0 003.239 8.188a.75.75 0 101.448.389A5.5 5.5 0 0113.89 6.11l.311.31h-2.432a.75.75 0 000 1.5h4.243a.75.75 0 00.53-.219z"
clip-rule="evenodd"
/>
</svg>
</button>
/>
</div>
</div>
{#if ENABLE_OLLAMA_API}
<div class="flex w-full gap-1.5">
<div class="flex-1 flex flex-col gap-2">
{#each OLLAMA_BASE_URLS as url, idx}
<div class="flex gap-1.5">
<input
class="w-full rounded-lg py-2 px-4 text-sm dark:text-gray-300 dark:bg-gray-850 outline-none"
placeholder={$i18n.t('Enter URL (e.g. http://localhost:11434)')}
bind:value={url}
/>
<div class="mt-2 text-xs text-gray-400 dark:text-gray-500">
{$i18n.t('Trouble accessing Ollama?')}
<a
class=" text-gray-300 font-medium underline"
href="https://github.com/open-webui/open-webui#troubleshooting"
target="_blank"
>
{$i18n.t('Click here for help.')}
</a>
</div>
<div class="self-center flex items-center">
{#if idx === 0}
<button
class="px-1"
on:click={() => {
OLLAMA_BASE_URLS = [...OLLAMA_BASE_URLS, ''];
}}
type="button"
>
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 16 16"
fill="currentColor"
class="w-4 h-4"
>
<path
d="M8.75 3.75a.75.75 0 0 0-1.5 0v3.5h-3.5a.75.75 0 0 0 0 1.5h3.5v3.5a.75.75 0 0 0 1.5 0v-3.5h3.5a.75.75 0 0 0 0-1.5h-3.5v-3.5Z"
/>
</svg>
</button>
{:else}
<button
class="px-1"
on:click={() => {
OLLAMA_BASE_URLS = OLLAMA_BASE_URLS.filter((url, urlIdx) => idx !== urlIdx);
}}
type="button"
>
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 16 16"
fill="currentColor"
class="w-4 h-4"
>
<path d="M3.75 7.25a.75.75 0 0 0 0 1.5h8.5a.75.75 0 0 0 0-1.5h-8.5Z" />
</svg>
</button>
{/if}
</div>
</div>
{/each}
</div>
<div class="flex">
<button
class="self-center p-2 bg-gray-200 hover:bg-gray-300 dark:bg-gray-900 dark:hover:bg-gray-850 rounded-lg transition"
on:click={() => {
updateOllamaUrlsHandler();
}}
type="button"
>
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 20 20"
fill="currentColor"
class="w-4 h-4"
>
<path
fill-rule="evenodd"
d="M15.312 11.424a5.5 5.5 0 01-9.201 2.466l-.312-.311h2.433a.75.75 0 000-1.5H3.989a.75.75 0 00-.75.75v4.242a.75.75 0 001.5 0v-2.43l.31.31a7 7 0 0011.712-3.138.75.75 0 00-1.449-.39zm1.23-3.723a.75.75 0 00.219-.53V2.929a.75.75 0 00-1.5 0V5.36l-.31-.31A7 7 0 003.239 8.188a.75.75 0 101.448.389A5.5 5.5 0 0113.89 6.11l.311.31h-2.432a.75.75 0 000 1.5h4.243a.75.75 0 00.53-.219z"
clip-rule="evenodd"
/>
</svg>
</button>
</div>
</div>
<div class="mt-2 text-xs text-gray-400 dark:text-gray-500">
{$i18n.t('Trouble accessing Ollama?')}
<a
class=" text-gray-300 font-medium underline"
href="https://github.com/open-webui/open-webui#troubleshooting"
target="_blank"
>
{$i18n.t('Click here for help.')}
</a>
</div>
{/if}
</div>
</div>

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "حسنا دعنا نذهب!",
"OLED Dark": "OLED داكن",
"Ollama": "Ollama",
"Ollama Base URL": "Ollama الرابط الافتراضي",
"Ollama API": "",
"Ollama Version": "Ollama الاصدار",
"On": "تشغيل",
"Only": "فقط",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "ОК, Нека започваме!",
"OLED Dark": "OLED тъмно",
"Ollama": "Ollama",
"Ollama Base URL": "Ollama Базов URL",
"Ollama API": "",
"Ollama Version": "Ollama Версия",
"On": "Вкл.",
"Only": "Само",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "ঠিক আছে, চলুন যাই!",
"OLED Dark": "OLED ডার্ক",
"Ollama": "Ollama",
"Ollama Base URL": "Ollama বেজ ইউআরএল",
"Ollama API": "",
"Ollama Version": "Ollama ভার্সন",
"On": "চালু",
"Only": "শুধুমাত্র",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "D'acord, Anem!",
"OLED Dark": "OLED Fosc",
"Ollama": "Ollama",
"Ollama Base URL": "URL Base d'Ollama",
"Ollama API": "",
"Ollama Version": "Versió d'Ollama",
"On": "Activat",
"Only": "Només",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "Okay, los geht's!",
"OLED Dark": "OLED Dunkel",
"Ollama": "Ollama",
"Ollama Base URL": "Ollama Basis URL",
"Ollama API": "",
"Ollama Version": "Ollama-Version",
"On": "Ein",
"Only": "Nur",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "Okay, Let's Go!",
"OLED Dark": "OLED Dark",
"Ollama": "",
"Ollama Base URL": "Ollama Base Bark",
"Ollama API": "",
"Ollama Version": "Ollama Version",
"On": "On",
"Only": "Only",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "",
"OLED Dark": "",
"Ollama": "",
"Ollama Base URL": "",
"Ollama API": "",
"Ollama Version": "",
"On": "",
"Only": "",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "",
"OLED Dark": "",
"Ollama": "",
"Ollama Base URL": "",
"Ollama API": "",
"Ollama Version": "",
"On": "",
"Only": "",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "Bien, ¡Vamos!",
"OLED Dark": "OLED oscuro",
"Ollama": "Ollama",
"Ollama Base URL": "URL base de Ollama",
"Ollama API": "",
"Ollama Version": "Versión de Ollama",
"On": "Activado",
"Only": "Solamente",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "باشه، بزن بریم!",
"OLED Dark": "OLED تیره",
"Ollama": "Ollama",
"Ollama Base URL": "URL پایه اولاما",
"Ollama API": "",
"Ollama Version": "نسخه اولاما",
"On": "روشن",
"Only": "فقط",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "Eikun menoksi!",
"OLED Dark": "OLED-tumma",
"Ollama": "Ollama",
"Ollama Base URL": "Ollama-perus-URL",
"Ollama API": "",
"Ollama Version": "Ollama-versio",
"On": "Päällä",
"Only": "Vain",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "Okay, Allons-y !",
"OLED Dark": "OLED Sombre",
"Ollama": "Ollama",
"Ollama Base URL": "URL de Base Ollama",
"Ollama API": "",
"Ollama Version": "Version Ollama",
"On": "Activé",
"Only": "Seulement",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "D'accord, allons-y !",
"OLED Dark": "OLED Sombre",
"Ollama": "Ollama",
"Ollama Base URL": "URL de Base Ollama",
"Ollama API": "",
"Ollama Version": "Version Ollama",
"On": "Activé",
"Only": "Seulement",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "בסדר, בואו נתחיל!",
"OLED Dark": "OLED כהה",
"Ollama": "Ollama",
"Ollama Base URL": "כתובת URL בסיסית של Ollama",
"Ollama API": "",
"Ollama Version": "גרסת Ollama",
"On": "פועל",
"Only": "רק",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "ठीक है, चलिए चलते हैं!",
"OLED Dark": "OLEDescuro",
"Ollama": "Ollama",
"Ollama Base URL": "Ollama URL",
"Ollama API": "",
"Ollama Version": "Ollama Version",
"On": "चालू",
"Only": "केवल",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "U redu, idemo!",
"OLED Dark": "OLED Tamno",
"Ollama": "Ollama",
"Ollama Base URL": "Osnovni URL Ollama",
"Ollama API": "",
"Ollama Version": "Verzija Ollama",
"On": "Uključeno",
"Only": "Samo",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "Ok, andiamo!",
"OLED Dark": "OLED scuro",
"Ollama": "Ollama",
"Ollama Base URL": "URL base Ollama",
"Ollama API": "",
"Ollama Version": "Versione Ollama",
"On": "Attivato",
"Only": "Solo",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "OK、始めましょう",
"OLED Dark": "OLED ダーク",
"Ollama": "Ollama",
"Ollama Base URL": "Ollama ベース URL",
"Ollama API": "",
"Ollama Version": "Ollama バージョン",
"On": "オン",
"Only": "のみ",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "კარგი, წავედით!",
"OLED Dark": "OLED მუქი",
"Ollama": "Ollama",
"Ollama Base URL": "Ollama ბაზისური მისამართი",
"Ollama API": "",
"Ollama Version": "Ollama ვერსია",
"On": "ჩართვა",
"Only": "მხოლოდ",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "그렇습니다, 시작합시다!",
"OLED Dark": "OLED 어두운",
"Ollama": "Ollama",
"Ollama Base URL": "Ollama 기본 URL",
"Ollama API": "",
"Ollama Version": "Ollama 버전",
"On": "켜기",
"Only": "오직",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "Okay, Laten we gaan!",
"OLED Dark": "OLED Donker",
"Ollama": "Ollama",
"Ollama Base URL": "Ollama Basis URL",
"Ollama API": "",
"Ollama Version": "Ollama Versie",
"On": "Aan",
"Only": "Alleen",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "ਠੀਕ ਹੈ, ਚੱਲੋ ਚੱਲੀਏ!",
"OLED Dark": "OLED ਗੂੜ੍ਹਾ",
"Ollama": "ਓਲਾਮਾ",
"Ollama Base URL": "ਓਲਾਮਾ ਬੇਸ URL",
"Ollama API": "",
"Ollama Version": "ਓਲਾਮਾ ਵਰਜਨ",
"On": "ਚਾਲੂ",
"Only": "ਸਿਰਫ਼",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "Okej, zaczynamy!",
"OLED Dark": "Ciemny OLED",
"Ollama": "Ollama",
"Ollama Base URL": "Adres bazowy URL Ollama",
"Ollama API": "",
"Ollama Version": "Wersja Ollama",
"On": "Włączony",
"Only": "Tylko",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "Ok, Vamos Lá!",
"OLED Dark": "OLED Escuro",
"Ollama": "Ollama",
"Ollama Base URL": "URL Base do Ollama",
"Ollama API": "",
"Ollama Version": "Versão do Ollama",
"On": "Ligado",
"Only": "Somente",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "Ok, Vamos Lá!",
"OLED Dark": "OLED Escuro",
"Ollama": "Ollama",
"Ollama Base URL": "URL Base do Ollama",
"Ollama API": "",
"Ollama Version": "Versão do Ollama",
"On": "Ligado",
"Only": "Somente",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "Давайте начнём!",
"OLED Dark": "OLED темная",
"Ollama": "Ollama",
"Ollama Base URL": "Базовый адрес URL Ollama",
"Ollama API": "",
"Ollama Version": "Версия Ollama",
"On": "Включено.",
"Only": "Только",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "У реду, хајде да кренемо!",
"OLED Dark": "OLED тамна",
"Ollama": "Ollama",
"Ollama Base URL": "Основна адреса Ollama-е",
"Ollama API": "",
"Ollama Version": "Издање Ollama-е",
"On": "Укључено",
"Only": "Само",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "Okej, nu kör vi!",
"OLED Dark": "OLED mörkt",
"Ollama": "Ollama",
"Ollama Base URL": "Ollama bas-URL",
"Ollama API": "",
"Ollama Version": "Ollama-version",
"On": "På",
"Only": "Endast",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "Tamam, Hadi Başlayalım!",
"OLED Dark": "OLED Koyu",
"Ollama": "Ollama",
"Ollama Base URL": "Ollama Temel URL",
"Ollama API": "",
"Ollama Version": "Ollama Sürümü",
"On": "Açık",
"Only": "Yalnızca",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "Гаразд, давайте почнемо!",
"OLED Dark": "Темний OLED",
"Ollama": "Ollama",
"Ollama Base URL": "URL-адреса Ollama",
"Ollama API": "",
"Ollama Version": "Версія Ollama",
"On": "Увімк",
"Only": "Тільки",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "Được rồi, Bắt đầu thôi!",
"OLED Dark": "OLED Dark",
"Ollama": "Ollama",
"Ollama Base URL": "Đường dẫn tới API của Ollama (Ollama Base URL)",
"Ollama API": "",
"Ollama Version": "Phiên bản Ollama",
"On": "Bật",
"Only": "Only",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "好的,我们开始吧!",
"OLED Dark": "暗黑色",
"Ollama": "Ollama",
"Ollama Base URL": "Ollama 基础 URL",
"Ollama API": "",
"Ollama Version": "Ollama 版本",
"On": "开",
"Only": "仅",

View File

@ -314,7 +314,7 @@
"Okay, Let's Go!": "好的,啟動吧!",
"OLED Dark": "`",
"Ollama": "Ollama",
"Ollama Base URL": "Ollama 基本 URL",
"Ollama API": "",
"Ollama Version": "Ollama 版本",
"On": "開啟",
"Only": "僅有",