Merge branch 'dev' of https://github.com/open-webui/open-webui into Dev-Individual-RAG-Config

This commit is contained in:
weberm1
2025-06-18 09:32:17 +02:00
173 changed files with 10133 additions and 4686 deletions

View File

@@ -1077,6 +1077,10 @@ USER_PERMISSIONS_CHAT_CONTROLS = (
os.environ.get("USER_PERMISSIONS_CHAT_CONTROLS", "True").lower() == "true"
)
USER_PERMISSIONS_CHAT_SYSTEM_PROMPT = (
os.environ.get("USER_PERMISSIONS_CHAT_SYSTEM_PROMPT", "True").lower() == "true"
)
USER_PERMISSIONS_CHAT_FILE_UPLOAD = (
os.environ.get("USER_PERMISSIONS_CHAT_FILE_UPLOAD", "True").lower() == "true"
)
@@ -1162,6 +1166,7 @@ DEFAULT_USER_PERMISSIONS = {
},
"chat": {
"controls": USER_PERMISSIONS_CHAT_CONTROLS,
"system_prompt": USER_PERMISSIONS_CHAT_SYSTEM_PROMPT,
"file_upload": USER_PERMISSIONS_CHAT_FILE_UPLOAD,
"delete": USER_PERMISSIONS_CHAT_DELETE,
"edit": USER_PERMISSIONS_CHAT_EDIT,
@@ -2102,6 +2107,27 @@ RAG_FILE_MAX_SIZE = PersistentConfig(
),
)
FILE_IMAGE_COMPRESSION_WIDTH = PersistentConfig(
"FILE_IMAGE_COMPRESSION_WIDTH",
"file.image_compression_width",
(
int(os.environ.get("FILE_IMAGE_COMPRESSION_WIDTH"))
if os.environ.get("FILE_IMAGE_COMPRESSION_WIDTH")
else None
),
)
FILE_IMAGE_COMPRESSION_HEIGHT = PersistentConfig(
"FILE_IMAGE_COMPRESSION_HEIGHT",
"file.image_compression_height",
(
int(os.environ.get("FILE_IMAGE_COMPRESSION_HEIGHT"))
if os.environ.get("FILE_IMAGE_COMPRESSION_HEIGHT")
else None
),
)
RAG_ALLOWED_FILE_EXTENSIONS = PersistentConfig(
"RAG_ALLOWED_FILE_EXTENSIONS",
"rag.file.allowed_extensions",
@@ -2939,6 +2965,18 @@ AUDIO_STT_MODEL = PersistentConfig(
os.getenv("AUDIO_STT_MODEL", ""),
)
AUDIO_STT_SUPPORTED_CONTENT_TYPES = PersistentConfig(
"AUDIO_STT_SUPPORTED_CONTENT_TYPES",
"audio.stt.supported_content_types",
[
content_type.strip()
for content_type in os.environ.get(
"AUDIO_STT_SUPPORTED_CONTENT_TYPES", ""
).split(",")
if content_type.strip()
],
)
AUDIO_STT_AZURE_API_KEY = PersistentConfig(
"AUDIO_STT_AZURE_API_KEY",
"audio.stt.azure.api_key",
@@ -3113,3 +3151,22 @@ LDAP_VALIDATE_CERT = PersistentConfig(
LDAP_CIPHERS = PersistentConfig(
"LDAP_CIPHERS", "ldap.server.ciphers", os.environ.get("LDAP_CIPHERS", "ALL")
)
# For LDAP Group Management
ENABLE_LDAP_GROUP_MANAGEMENT = PersistentConfig(
"ENABLE_LDAP_GROUP_MANAGEMENT",
"ldap.group.enable_management",
os.environ.get("ENABLE_LDAP_GROUP_MANAGEMENT", "False").lower() == "true",
)
ENABLE_LDAP_GROUP_CREATION = PersistentConfig(
"ENABLE_LDAP_GROUP_CREATION",
"ldap.group.enable_creation",
os.environ.get("ENABLE_LDAP_GROUP_CREATION", "False").lower() == "true",
)
LDAP_ATTRIBUTE_FOR_GROUPS = PersistentConfig(
"LDAP_ATTRIBUTE_FOR_GROUPS",
"ldap.server.attribute_for_groups",
os.environ.get("LDAP_ATTRIBUTE_FOR_GROUPS", "memberOf"),
)

View File

@@ -539,6 +539,7 @@ AUDIT_EXCLUDED_PATHS = [path.lstrip("/") for path in AUDIT_EXCLUDED_PATHS]
####################################
ENABLE_OTEL = os.environ.get("ENABLE_OTEL", "False").lower() == "true"
ENABLE_OTEL_METRICS = os.environ.get("ENABLE_OTEL_METRICS", "False").lower() == "true"
OTEL_EXPORTER_OTLP_ENDPOINT = os.environ.get(
"OTEL_EXPORTER_OTLP_ENDPOINT", "http://localhost:4317"
)

View File

@@ -57,6 +57,8 @@ from open_webui.utils.logger import start_logger
from open_webui.socket.main import (
app as socket_app,
periodic_usage_pool_cleanup,
get_models_in_use,
get_active_user_ids,
)
from open_webui.routers import (
audio,
@@ -157,6 +159,7 @@ from open_webui.config import (
# Audio
AUDIO_STT_ENGINE,
AUDIO_STT_MODEL,
AUDIO_STT_SUPPORTED_CONTENT_TYPES,
AUDIO_STT_OPENAI_API_BASE_URL,
AUDIO_STT_OPENAI_API_KEY,
AUDIO_STT_AZURE_API_KEY,
@@ -208,6 +211,8 @@ from open_webui.config import (
RAG_ALLOWED_FILE_EXTENSIONS,
RAG_FILE_MAX_COUNT,
RAG_FILE_MAX_SIZE,
FILE_IMAGE_COMPRESSION_WIDTH,
FILE_IMAGE_COMPRESSION_HEIGHT,
RAG_OPENAI_API_BASE_URL,
RAG_OPENAI_API_KEY,
RAG_AZURE_OPENAI_BASE_URL,
@@ -354,6 +359,10 @@ from open_webui.config import (
LDAP_CA_CERT_FILE,
LDAP_VALIDATE_CERT,
LDAP_CIPHERS,
# LDAP Group Management
ENABLE_LDAP_GROUP_MANAGEMENT,
ENABLE_LDAP_GROUP_CREATION,
LDAP_ATTRIBUTE_FOR_GROUPS,
# Misc
ENV,
CACHE_DIR,
@@ -681,6 +690,11 @@ app.state.config.LDAP_CA_CERT_FILE = LDAP_CA_CERT_FILE
app.state.config.LDAP_VALIDATE_CERT = LDAP_VALIDATE_CERT
app.state.config.LDAP_CIPHERS = LDAP_CIPHERS
# For LDAP Group Management
app.state.config.ENABLE_LDAP_GROUP_MANAGEMENT = ENABLE_LDAP_GROUP_MANAGEMENT
app.state.config.ENABLE_LDAP_GROUP_CREATION = ENABLE_LDAP_GROUP_CREATION
app.state.config.LDAP_ATTRIBUTE_FOR_GROUPS = LDAP_ATTRIBUTE_FOR_GROUPS
app.state.AUTH_TRUSTED_EMAIL_HEADER = WEBUI_AUTH_TRUSTED_EMAIL_HEADER
app.state.AUTH_TRUSTED_NAME_HEADER = WEBUI_AUTH_TRUSTED_NAME_HEADER
@@ -706,9 +720,13 @@ app.state.config.TOP_K = RAG_TOP_K
app.state.config.TOP_K_RERANKER = RAG_TOP_K_RERANKER
app.state.config.RELEVANCE_THRESHOLD = RAG_RELEVANCE_THRESHOLD
app.state.config.HYBRID_BM25_WEIGHT = RAG_HYBRID_BM25_WEIGHT
app.state.config.ALLOWED_FILE_EXTENSIONS = RAG_ALLOWED_FILE_EXTENSIONS
app.state.config.FILE_MAX_SIZE = RAG_FILE_MAX_SIZE
app.state.config.FILE_MAX_COUNT = RAG_FILE_MAX_COUNT
app.state.config.FILE_IMAGE_COMPRESSION_WIDTH = FILE_IMAGE_COMPRESSION_WIDTH
app.state.config.FILE_IMAGE_COMPRESSION_HEIGHT = FILE_IMAGE_COMPRESSION_HEIGHT
app.state.config.RAG_FULL_CONTEXT = RAG_FULL_CONTEXT
@@ -957,10 +975,12 @@ app.state.config.IMAGE_STEPS = IMAGE_STEPS
#
########################################
app.state.config.STT_OPENAI_API_BASE_URL = AUDIO_STT_OPENAI_API_BASE_URL
app.state.config.STT_OPENAI_API_KEY = AUDIO_STT_OPENAI_API_KEY
app.state.config.STT_ENGINE = AUDIO_STT_ENGINE
app.state.config.STT_MODEL = AUDIO_STT_MODEL
app.state.config.STT_SUPPORTED_CONTENT_TYPES = AUDIO_STT_SUPPORTED_CONTENT_TYPES
app.state.config.STT_OPENAI_API_BASE_URL = AUDIO_STT_OPENAI_API_BASE_URL
app.state.config.STT_OPENAI_API_KEY = AUDIO_STT_OPENAI_API_KEY
app.state.config.WHISPER_MODEL = WHISPER_MODEL
app.state.config.WHISPER_VAD_FILTER = WHISPER_VAD_FILTER
@@ -1371,6 +1391,17 @@ async def chat_completion(
request, response, form_data, user, metadata, model, events, tasks
)
except Exception as e:
log.debug(f"Error in chat completion: {e}")
if metadata.get("chat_id") and metadata.get("message_id"):
# Update the chat message with the error
Chats.upsert_message_to_chat_by_id_and_message_id(
metadata["chat_id"],
metadata["message_id"],
{
"error": {"content": str(e)},
},
)
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=str(e),
@@ -1542,6 +1573,10 @@ async def get_app_config(request: Request):
"file": {
"max_size": app.state.config.FILE_MAX_SIZE,
"max_count": app.state.config.FILE_MAX_COUNT,
"image_compression": {
"width": app.state.config.FILE_IMAGE_COMPRESSION_WIDTH,
"height": app.state.config.FILE_IMAGE_COMPRESSION_HEIGHT,
},
},
"permissions": {**app.state.config.USER_PERMISSIONS},
"google_drive": {
@@ -1627,6 +1662,19 @@ async def get_app_changelog():
return {key: CHANGELOG[key] for idx, key in enumerate(CHANGELOG) if idx < 5}
@app.get("/api/usage")
async def get_current_usage(user=Depends(get_verified_user)):
"""
Get current usage statistics for Open WebUI.
This is an experimental endpoint and subject to change.
"""
try:
return {"model_ids": get_models_in_use(), "user_ids": get_active_user_ids()}
except Exception as e:
log.error(f"Error getting usage statistics: {e}")
raise HTTPException(status_code=500, detail="Internal Server Error")
############################
# OAuth Login & Callback
############################

View File

@@ -207,9 +207,39 @@ class GroupTable:
except Exception:
return False
def sync_user_groups_by_group_names(
def create_groups_by_group_names(
self, user_id: str, group_names: list[str]
) -> bool:
) -> list[GroupModel]:
# check for existing groups
existing_groups = self.get_groups()
existing_group_names = {group.name for group in existing_groups}
new_groups = []
with get_db() as db:
for group_name in group_names:
if group_name not in existing_group_names:
new_group = GroupModel(
id=str(uuid.uuid4()),
user_id=user_id,
name=group_name,
description="",
created_at=int(time.time()),
updated_at=int(time.time()),
)
try:
result = Group(**new_group.model_dump())
db.add(result)
db.commit()
db.refresh(result)
new_groups.append(GroupModel.model_validate(result))
except Exception as e:
log.exception(e)
continue
return new_groups
def sync_groups_by_group_names(self, user_id: str, group_names: list[str]) -> bool:
with get_db() as db:
try:
groups = db.query(Group).filter(Group.name.in_(group_names)).all()

View File

@@ -370,7 +370,7 @@ class UsersTable:
except Exception:
return False
def update_user_api_key_by_id(self, id: str, api_key: str) -> str:
def update_user_api_key_by_id(self, id: str, api_key: str) -> bool:
try:
with get_db() as db:
result = db.query(User).filter_by(id=id).update({"api_key": api_key})

View File

@@ -1,5 +1,5 @@
import requests
import logging
import logging, os
from typing import Iterator, List, Union
from langchain_core.document_loaders import BaseLoader
@@ -25,7 +25,7 @@ class ExternalDocumentLoader(BaseLoader):
self.file_path = file_path
self.mime_type = mime_type
def load(self) -> list[Document]:
def load(self) -> List[Document]:
with open(self.file_path, "rb") as f:
data = f.read()
@@ -36,23 +36,48 @@ class ExternalDocumentLoader(BaseLoader):
if self.api_key is not None:
headers["Authorization"] = f"Bearer {self.api_key}"
try:
headers["X-Filename"] = os.path.basename(self.file_path)
except:
pass
url = self.url
if url.endswith("/"):
url = url[:-1]
r = requests.put(f"{url}/process", data=data, headers=headers)
try:
response = requests.put(f"{url}/process", data=data, headers=headers)
except Exception as e:
log.error(f"Error connecting to endpoint: {e}")
raise Exception(f"Error connecting to endpoint: {e}")
if r.ok:
res = r.json()
if response.ok:
response_data = response.json()
if response_data:
if isinstance(response_data, dict):
return [
Document(
page_content=response_data.get("page_content"),
metadata=response_data.get("metadata"),
)
]
elif isinstance(response_data, list):
documents = []
for document in response_data:
documents.append(
Document(
page_content=document.get("page_content"),
metadata=document.get("metadata"),
)
)
return documents
else:
raise Exception("Error loading document: Unable to parse content")
if res:
return [
Document(
page_content=res.get("page_content"),
metadata=res.get("metadata"),
)
]
else:
raise Exception("Error loading document: No content returned")
else:
raise Exception(f"Error loading document: {r.status_code} {r.text}")
raise Exception(
f"Error loading document: {response.status_code} {response.text}"
)

View File

@@ -162,15 +162,15 @@ class DoclingLoader:
if picture_description_mode == "local" and self.params.get(
"picture_description_local", {}
):
params["picture_description_local"] = self.params.get(
"picture_description_local", {}
params["picture_description_local"] = json.dumps(
self.params.get("picture_description_local", {})
)
elif picture_description_mode == "api" and self.params.get(
"picture_description_api", {}
):
params["picture_description_api"] = self.params.get(
"picture_description_api", {}
params["picture_description_api"] = json.dumps(
self.params.get("picture_description_api", {})
)
if self.params.get("ocr_engine") and self.params.get("ocr_lang"):

View File

@@ -7,6 +7,7 @@ import hashlib
from concurrent.futures import ThreadPoolExecutor
import time
from urllib.parse import quote
from huggingface_hub import snapshot_download
from langchain.retrievers import ContextualCompressionRetriever, EnsembleRetriever
from langchain_community.retrievers import BM25Retriever
@@ -719,10 +720,10 @@ def generate_openai_batch_embeddings(
"Authorization": f"Bearer {key}",
**(
{
"X-OpenWebUI-User-Name": user.name,
"X-OpenWebUI-User-Id": user.id,
"X-OpenWebUI-User-Email": user.email,
"X-OpenWebUI-User-Role": user.role,
"X-OpenWebUI-User-Name": quote(user.name),
"X-OpenWebUI-User-Id": quote(user.id),
"X-OpenWebUI-User-Email": quote(user.email),
"X-OpenWebUI-User-Role": quote(user.role),
}
if ENABLE_FORWARD_USER_INFO_HEADERS and user
else {}
@@ -768,10 +769,10 @@ def generate_azure_openai_batch_embeddings(
"api-key": key,
**(
{
"X-OpenWebUI-User-Name": user.name,
"X-OpenWebUI-User-Id": user.id,
"X-OpenWebUI-User-Email": user.email,
"X-OpenWebUI-User-Role": user.role,
"X-OpenWebUI-User-Name": quote(user.name),
"X-OpenWebUI-User-Id": quote(user.id),
"X-OpenWebUI-User-Email": quote(user.email),
"X-OpenWebUI-User-Role": quote(user.role),
}
if ENABLE_FORWARD_USER_INFO_HEADERS and user
else {}
@@ -818,10 +819,10 @@ def generate_ollama_batch_embeddings(
"Authorization": f"Bearer {key}",
**(
{
"X-OpenWebUI-User-Name": user.name,
"X-OpenWebUI-User-Id": user.id,
"X-OpenWebUI-User-Email": user.email,
"X-OpenWebUI-User-Role": user.role,
"X-OpenWebUI-User-Name": quote(user.name),
"X-OpenWebUI-User-Id": quote(user.id),
"X-OpenWebUI-User-Email": quote(user.email),
"X-OpenWebUI-User-Role": quote(user.role),
}
if ENABLE_FORWARD_USER_INFO_HEADERS
else {}

View File

@@ -92,6 +92,15 @@ class PgvectorClient(VectorDBBase):
# Ensure the pgvector extension is available
self.session.execute(text("CREATE EXTENSION IF NOT EXISTS vector;"))
if PGVECTOR_PGCRYPTO:
# Ensure the pgcrypto extension is available for encryption
self.session.execute(text("CREATE EXTENSION IF NOT EXISTS pgcrypto;"))
if not PGVECTOR_PGCRYPTO_KEY:
raise ValueError(
"PGVECTOR_PGCRYPTO_KEY must be set when PGVECTOR_PGCRYPTO is enabled."
)
# Check vector length consistency
self.check_vector_length()

View File

@@ -10,11 +10,12 @@ from pydub.silence import split_on_silence
from concurrent.futures import ThreadPoolExecutor
from typing import Optional
from fnmatch import fnmatch
import aiohttp
import aiofiles
import requests
import mimetypes
from urllib.parse import quote
from fastapi import (
Depends,
@@ -168,6 +169,7 @@ class STTConfigForm(BaseModel):
OPENAI_API_KEY: str
ENGINE: str
MODEL: str
SUPPORTED_CONTENT_TYPES: list[str] = []
WHISPER_MODEL: str
DEEPGRAM_API_KEY: str
AZURE_API_KEY: str
@@ -202,6 +204,7 @@ async def get_audio_config(request: Request, user=Depends(get_admin_user)):
"OPENAI_API_KEY": request.app.state.config.STT_OPENAI_API_KEY,
"ENGINE": request.app.state.config.STT_ENGINE,
"MODEL": request.app.state.config.STT_MODEL,
"SUPPORTED_CONTENT_TYPES": request.app.state.config.STT_SUPPORTED_CONTENT_TYPES,
"WHISPER_MODEL": request.app.state.config.WHISPER_MODEL,
"DEEPGRAM_API_KEY": request.app.state.config.DEEPGRAM_API_KEY,
"AZURE_API_KEY": request.app.state.config.AUDIO_STT_AZURE_API_KEY,
@@ -236,6 +239,10 @@ async def update_audio_config(
request.app.state.config.STT_OPENAI_API_KEY = form_data.stt.OPENAI_API_KEY
request.app.state.config.STT_ENGINE = form_data.stt.ENGINE
request.app.state.config.STT_MODEL = form_data.stt.MODEL
request.app.state.config.STT_SUPPORTED_CONTENT_TYPES = (
form_data.stt.SUPPORTED_CONTENT_TYPES
)
request.app.state.config.WHISPER_MODEL = form_data.stt.WHISPER_MODEL
request.app.state.config.DEEPGRAM_API_KEY = form_data.stt.DEEPGRAM_API_KEY
request.app.state.config.AUDIO_STT_AZURE_API_KEY = form_data.stt.AZURE_API_KEY
@@ -250,6 +257,8 @@ async def update_audio_config(
request.app.state.faster_whisper_model = set_faster_whisper_model(
form_data.stt.WHISPER_MODEL, WHISPER_MODEL_AUTO_UPDATE
)
else:
request.app.state.faster_whisper_model = None
return {
"tts": {
@@ -269,6 +278,7 @@ async def update_audio_config(
"OPENAI_API_KEY": request.app.state.config.STT_OPENAI_API_KEY,
"ENGINE": request.app.state.config.STT_ENGINE,
"MODEL": request.app.state.config.STT_MODEL,
"SUPPORTED_CONTENT_TYPES": request.app.state.config.STT_SUPPORTED_CONTENT_TYPES,
"WHISPER_MODEL": request.app.state.config.WHISPER_MODEL,
"DEEPGRAM_API_KEY": request.app.state.config.DEEPGRAM_API_KEY,
"AZURE_API_KEY": request.app.state.config.AUDIO_STT_AZURE_API_KEY,
@@ -334,10 +344,10 @@ async def speech(request: Request, user=Depends(get_verified_user)):
"Authorization": f"Bearer {request.app.state.config.TTS_OPENAI_API_KEY}",
**(
{
"X-OpenWebUI-User-Name": user.name,
"X-OpenWebUI-User-Id": user.id,
"X-OpenWebUI-User-Email": user.email,
"X-OpenWebUI-User-Role": user.role,
"X-OpenWebUI-User-Name": quote(user.name),
"X-OpenWebUI-User-Id": quote(user.id),
"X-OpenWebUI-User-Email": quote(user.email),
"X-OpenWebUI-User-Role": quote(user.role),
}
if ENABLE_FORWARD_USER_INFO_HEADERS
else {}
@@ -628,7 +638,7 @@ def transcription_handler(request, file_path, metadata):
# Make request to Deepgram API
r = requests.post(
"https://api.deepgram.com/v1/listen",
"https://api.deepgram.com/v1/listen?smart_format=true",
headers=headers,
params=params,
data=file_data,
@@ -910,10 +920,14 @@ def transcription(
):
log.info(f"file.content_type: {file.content_type}")
SUPPORTED_CONTENT_TYPES = {"video/webm"} # Extend if you add more video types!
if not (
file.content_type.startswith("audio/")
or file.content_type in SUPPORTED_CONTENT_TYPES
supported_content_types = request.app.state.config.STT_SUPPORTED_CONTENT_TYPES or [
"audio/*",
"video/webm",
]
if not any(
fnmatch(file.content_type, content_type)
for content_type in supported_content_types
):
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,

View File

@@ -55,9 +55,8 @@ from typing import Optional, List
from ssl import CERT_NONE, CERT_REQUIRED, PROTOCOL_TLS
if ENABLE_LDAP.value:
from ldap3 import Server, Connection, NONE, Tls
from ldap3.utils.conv import escape_filter_chars
from ldap3 import Server, Connection, NONE, Tls
from ldap3.utils.conv import escape_filter_chars
router = APIRouter()
@@ -229,14 +228,30 @@ async def ldap_auth(request: Request, response: Response, form_data: LdapForm):
if not connection_app.bind():
raise HTTPException(400, detail="Application account bind failed")
ENABLE_LDAP_GROUP_MANAGEMENT = (
request.app.state.config.ENABLE_LDAP_GROUP_MANAGEMENT
)
ENABLE_LDAP_GROUP_CREATION = request.app.state.config.ENABLE_LDAP_GROUP_CREATION
LDAP_ATTRIBUTE_FOR_GROUPS = request.app.state.config.LDAP_ATTRIBUTE_FOR_GROUPS
search_attributes = [
f"{LDAP_ATTRIBUTE_FOR_USERNAME}",
f"{LDAP_ATTRIBUTE_FOR_MAIL}",
"cn",
]
if ENABLE_LDAP_GROUP_MANAGEMENT:
search_attributes.append(f"{LDAP_ATTRIBUTE_FOR_GROUPS}")
log.info(
f"LDAP Group Management enabled. Adding {LDAP_ATTRIBUTE_FOR_GROUPS} to search attributes"
)
log.info(f"LDAP search attributes: {search_attributes}")
search_success = connection_app.search(
search_base=LDAP_SEARCH_BASE,
search_filter=f"(&({LDAP_ATTRIBUTE_FOR_USERNAME}={escape_filter_chars(form_data.user.lower())}){LDAP_SEARCH_FILTERS})",
attributes=[
f"{LDAP_ATTRIBUTE_FOR_USERNAME}",
f"{LDAP_ATTRIBUTE_FOR_MAIL}",
"cn",
],
attributes=search_attributes,
)
if not search_success or not connection_app.entries:
@@ -259,6 +274,69 @@ async def ldap_auth(request: Request, response: Response, form_data: LdapForm):
cn = str(entry["cn"])
user_dn = entry.entry_dn
user_groups = []
if ENABLE_LDAP_GROUP_MANAGEMENT and LDAP_ATTRIBUTE_FOR_GROUPS in entry:
group_dns = entry[LDAP_ATTRIBUTE_FOR_GROUPS]
log.info(f"LDAP raw group DNs for user {username}: {group_dns}")
if group_dns:
log.info(f"LDAP group_dns original: {group_dns}")
log.info(f"LDAP group_dns type: {type(group_dns)}")
log.info(f"LDAP group_dns length: {len(group_dns)}")
if hasattr(group_dns, "value"):
group_dns = group_dns.value
log.info(f"Extracted .value property: {group_dns}")
elif hasattr(group_dns, "__iter__") and not isinstance(
group_dns, (str, bytes)
):
group_dns = list(group_dns)
log.info(f"Converted to list: {group_dns}")
if isinstance(group_dns, list):
group_dns = [str(item) for item in group_dns]
else:
group_dns = [str(group_dns)]
log.info(
f"LDAP group_dns after processing - type: {type(group_dns)}, length: {len(group_dns)}"
)
for group_idx, group_dn in enumerate(group_dns):
group_dn = str(group_dn)
log.info(f"Processing group DN #{group_idx + 1}: {group_dn}")
try:
group_cn = None
for item in group_dn.split(","):
item = item.strip()
if item.upper().startswith("CN="):
group_cn = item[3:]
break
if group_cn:
user_groups.append(group_cn)
else:
log.warning(
f"Could not extract CN from group DN: {group_dn}"
)
except Exception as e:
log.warning(
f"Failed to extract group name from DN {group_dn}: {e}"
)
log.info(
f"LDAP groups for user {username}: {user_groups} (total: {len(user_groups)})"
)
else:
log.info(f"No groups found for user {username}")
elif ENABLE_LDAP_GROUP_MANAGEMENT:
log.warning(
f"LDAP Group Management enabled but {LDAP_ATTRIBUTE_FOR_GROUPS} attribute not found in user entry"
)
if username == form_data.user.lower():
connection_user = Connection(
server,
@@ -334,6 +412,22 @@ async def ldap_auth(request: Request, response: Response, form_data: LdapForm):
user.id, request.app.state.config.USER_PERMISSIONS
)
if (
user.role != "admin"
and ENABLE_LDAP_GROUP_MANAGEMENT
and user_groups
):
if ENABLE_LDAP_GROUP_CREATION:
Groups.create_groups_by_group_names(user.id, user_groups)
try:
Groups.sync_groups_by_group_names(user.id, user_groups)
log.info(
f"Successfully synced groups for user {user.id}: {user_groups}"
)
except Exception as e:
log.error(f"Failed to sync groups for user {user.id}: {e}")
return {
"token": token,
"token_type": "Bearer",
@@ -386,7 +480,7 @@ async def signin(request: Request, response: Response, form_data: SigninForm):
group_names = [name.strip() for name in group_names if name.strip()]
if group_names:
Groups.sync_user_groups_by_group_names(user.id, group_names)
Groups.sync_groups_by_group_names(user.id, group_names)
elif WEBUI_AUTH == False:
admin_email = "admin@localhost"

View File

@@ -157,9 +157,18 @@ def upload_file(
if process:
try:
if file.content_type:
if file.content_type.startswith("audio/") or file.content_type in {
"video/webm"
}:
stt_supported_content_types = (
request.app.state.config.STT_SUPPORTED_CONTENT_TYPES
or [
"audio/*",
"video/webm",
]
)
if any(
fnmatch(file.content_type, content_type)
for content_type in stt_supported_content_types
):
file_path = Storage.get_file(file_path)
result = transcribe(request, file_path, file_metadata)

View File

@@ -8,6 +8,7 @@ import re
from pathlib import Path
from typing import Optional
from urllib.parse import quote
import requests
from fastapi import APIRouter, Depends, HTTPException, Request, UploadFile
from open_webui.config import CACHE_DIR
@@ -483,10 +484,10 @@ async def image_generations(
headers["Content-Type"] = "application/json"
if ENABLE_FORWARD_USER_INFO_HEADERS:
headers["X-OpenWebUI-User-Name"] = user.name
headers["X-OpenWebUI-User-Id"] = user.id
headers["X-OpenWebUI-User-Email"] = user.email
headers["X-OpenWebUI-User-Role"] = user.role
headers["X-OpenWebUI-User-Name"] = quote(user.name)
headers["X-OpenWebUI-User-Id"] = quote(user.id)
headers["X-OpenWebUI-User-Email"] = quote(user.email)
headers["X-OpenWebUI-User-Role"] = quote(user.role)
data = {
"model": (

View File

@@ -124,9 +124,9 @@ async def get_note_by_id(request: Request, id: str, user=Depends(get_verified_us
status_code=status.HTTP_404_NOT_FOUND, detail=ERROR_MESSAGES.NOT_FOUND
)
if user.role != "admin" or (
if user.role != "admin" and (
user.id != note.user_id
and not has_access(user.id, type="read", access_control=note.access_control)
and (not has_access(user.id, type="read", access_control=note.access_control))
):
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN, detail=ERROR_MESSAGES.DEFAULT()
@@ -158,7 +158,7 @@ async def update_note_by_id(
status_code=status.HTTP_404_NOT_FOUND, detail=ERROR_MESSAGES.NOT_FOUND
)
if user.role != "admin" or (
if user.role != "admin" and (
user.id != note.user_id
and not has_access(user.id, type="write", access_control=note.access_control)
):
@@ -197,7 +197,7 @@ async def delete_note_by_id(request: Request, id: str, user=Depends(get_verified
status_code=status.HTTP_404_NOT_FOUND, detail=ERROR_MESSAGES.NOT_FOUND
)
if user.role != "admin" or (
if user.role != "admin" and (
user.id != note.user_id
and not has_access(user.id, type="write", access_control=note.access_control)
):

View File

@@ -16,6 +16,9 @@ from urllib.parse import urlparse
import aiohttp
from aiocache import cached
import requests
from urllib.parse import quote
from open_webui.models.chats import Chats
from open_webui.models.users import UserModel
from open_webui.env import (
@@ -85,10 +88,10 @@ async def send_get_request(url, key=None, user: UserModel = None):
**({"Authorization": f"Bearer {key}"} if key else {}),
**(
{
"X-OpenWebUI-User-Name": user.name,
"X-OpenWebUI-User-Id": user.id,
"X-OpenWebUI-User-Email": user.email,
"X-OpenWebUI-User-Role": user.role,
"X-OpenWebUI-User-Name": quote(user.name),
"X-OpenWebUI-User-Id": quote(user.id),
"X-OpenWebUI-User-Email": quote(user.email),
"X-OpenWebUI-User-Role": quote(user.role),
}
if ENABLE_FORWARD_USER_INFO_HEADERS and user
else {}
@@ -136,10 +139,10 @@ async def send_post_request(
**({"Authorization": f"Bearer {key}"} if key else {}),
**(
{
"X-OpenWebUI-User-Name": user.name,
"X-OpenWebUI-User-Id": user.id,
"X-OpenWebUI-User-Email": user.email,
"X-OpenWebUI-User-Role": user.role,
"X-OpenWebUI-User-Name": quote(user.name),
"X-OpenWebUI-User-Id": quote(user.id),
"X-OpenWebUI-User-Email": quote(user.email),
"X-OpenWebUI-User-Role": quote(user.role),
}
if ENABLE_FORWARD_USER_INFO_HEADERS and user
else {}
@@ -147,8 +150,23 @@ async def send_post_request(
},
ssl=AIOHTTP_CLIENT_SESSION_SSL,
)
r.raise_for_status()
if r.ok is False:
try:
res = await r.json()
await cleanup_response(r, session)
if "error" in res:
raise HTTPException(status_code=r.status, detail=res["error"])
except HTTPException as e:
raise e # Re-raise HTTPException to be handled by FastAPI
except Exception as e:
log.error(f"Failed to parse error response: {e}")
raise HTTPException(
status_code=r.status,
detail=f"Open WebUI: Server Connection Error",
)
r.raise_for_status() # Raises an error for bad responses (4xx, 5xx)
if stream:
response_headers = dict(r.headers)
@@ -168,20 +186,14 @@ async def send_post_request(
await cleanup_response(r, session)
return res
except HTTPException as e:
raise e # Re-raise HTTPException to be handled by FastAPI
except Exception as e:
detail = None
if r is not None:
try:
res = await r.json()
if "error" in res:
detail = f"Ollama: {res.get('error', 'Unknown error')}"
except Exception:
detail = f"Ollama: {e}"
detail = f"Ollama: {e}"
raise HTTPException(
status_code=r.status if r else 500,
detail=detail if detail else "Open WebUI: Server Connection Error",
detail=detail if e else "Open WebUI: Server Connection Error",
)
@@ -231,10 +243,10 @@ async def verify_connection(
**({"Authorization": f"Bearer {key}"} if key else {}),
**(
{
"X-OpenWebUI-User-Name": user.name,
"X-OpenWebUI-User-Id": user.id,
"X-OpenWebUI-User-Email": user.email,
"X-OpenWebUI-User-Role": user.role,
"X-OpenWebUI-User-Name": quote(user.name),
"X-OpenWebUI-User-Id": quote(user.id),
"X-OpenWebUI-User-Email": quote(user.email),
"X-OpenWebUI-User-Role": quote(user.role),
}
if ENABLE_FORWARD_USER_INFO_HEADERS and user
else {}
@@ -451,10 +463,10 @@ async def get_ollama_tags(
**({"Authorization": f"Bearer {key}"} if key else {}),
**(
{
"X-OpenWebUI-User-Name": user.name,
"X-OpenWebUI-User-Id": user.id,
"X-OpenWebUI-User-Email": user.email,
"X-OpenWebUI-User-Role": user.role,
"X-OpenWebUI-User-Name": quote(user.name),
"X-OpenWebUI-User-Id": quote(user.id),
"X-OpenWebUI-User-Email": quote(user.email),
"X-OpenWebUI-User-Role": quote(user.role),
}
if ENABLE_FORWARD_USER_INFO_HEADERS and user
else {}
@@ -813,10 +825,10 @@ async def copy_model(
**({"Authorization": f"Bearer {key}"} if key else {}),
**(
{
"X-OpenWebUI-User-Name": user.name,
"X-OpenWebUI-User-Id": user.id,
"X-OpenWebUI-User-Email": user.email,
"X-OpenWebUI-User-Role": user.role,
"X-OpenWebUI-User-Name": quote(user.name),
"X-OpenWebUI-User-Id": quote(user.id),
"X-OpenWebUI-User-Email": quote(user.email),
"X-OpenWebUI-User-Role": quote(user.role),
}
if ENABLE_FORWARD_USER_INFO_HEADERS and user
else {}
@@ -879,10 +891,10 @@ async def delete_model(
**({"Authorization": f"Bearer {key}"} if key else {}),
**(
{
"X-OpenWebUI-User-Name": user.name,
"X-OpenWebUI-User-Id": user.id,
"X-OpenWebUI-User-Email": user.email,
"X-OpenWebUI-User-Role": user.role,
"X-OpenWebUI-User-Name": quote(user.name),
"X-OpenWebUI-User-Id": quote(user.id),
"X-OpenWebUI-User-Email": quote(user.email),
"X-OpenWebUI-User-Role": quote(user.role),
}
if ENABLE_FORWARD_USER_INFO_HEADERS and user
else {}
@@ -938,10 +950,10 @@ async def show_model_info(
**({"Authorization": f"Bearer {key}"} if key else {}),
**(
{
"X-OpenWebUI-User-Name": user.name,
"X-OpenWebUI-User-Id": user.id,
"X-OpenWebUI-User-Email": user.email,
"X-OpenWebUI-User-Role": user.role,
"X-OpenWebUI-User-Name": quote(user.name),
"X-OpenWebUI-User-Id": quote(user.id),
"X-OpenWebUI-User-Email": quote(user.email),
"X-OpenWebUI-User-Role": quote(user.role),
}
if ENABLE_FORWARD_USER_INFO_HEADERS and user
else {}
@@ -1025,10 +1037,10 @@ async def embed(
**({"Authorization": f"Bearer {key}"} if key else {}),
**(
{
"X-OpenWebUI-User-Name": user.name,
"X-OpenWebUI-User-Id": user.id,
"X-OpenWebUI-User-Email": user.email,
"X-OpenWebUI-User-Role": user.role,
"X-OpenWebUI-User-Name": quote(user.name),
"X-OpenWebUI-User-Id": quote(user.id),
"X-OpenWebUI-User-Email": quote(user.email),
"X-OpenWebUI-User-Role": quote(user.role),
}
if ENABLE_FORWARD_USER_INFO_HEADERS and user
else {}
@@ -1112,10 +1124,10 @@ async def embeddings(
**({"Authorization": f"Bearer {key}"} if key else {}),
**(
{
"X-OpenWebUI-User-Name": user.name,
"X-OpenWebUI-User-Id": user.id,
"X-OpenWebUI-User-Email": user.email,
"X-OpenWebUI-User-Role": user.role,
"X-OpenWebUI-User-Name": quote(user.name),
"X-OpenWebUI-User-Id": quote(user.id),
"X-OpenWebUI-User-Email": quote(user.email),
"X-OpenWebUI-User-Role": quote(user.role),
}
if ENABLE_FORWARD_USER_INFO_HEADERS and user
else {}
@@ -1232,6 +1244,9 @@ class GenerateChatCompletionForm(BaseModel):
stream: Optional[bool] = True
keep_alive: Optional[Union[int, str]] = None
tools: Optional[list[dict]] = None
model_config = ConfigDict(
extra="allow",
)
async def get_ollama_url(request: Request, model: str, url_idx: Optional[int] = None):
@@ -1269,7 +1284,9 @@ async def generate_chat_completion(
detail=str(e),
)
payload = {**form_data.model_dump(exclude_none=True)}
if isinstance(form_data, BaseModel):
payload = {**form_data.model_dump(exclude_none=True)}
if "metadata" in payload:
del payload["metadata"]
@@ -1285,11 +1302,7 @@ async def generate_chat_completion(
if params:
system = params.pop("system", None)
# Unlike OpenAI, Ollama does not support params directly in the body
payload["options"] = apply_model_params_to_body_ollama(
params, (payload.get("options", {}) or {})
)
payload = apply_model_params_to_body_ollama(params, payload)
payload = apply_model_system_prompt_to_body(system, payload, metadata, user)
# Check if user has access to the model
@@ -1323,7 +1336,7 @@ async def generate_chat_completion(
prefix_id = api_config.get("prefix_id", None)
if prefix_id:
payload["model"] = payload["model"].replace(f"{prefix_id}.", "")
# payload["keep_alive"] = -1 # keep alive forever
return await send_post_request(
url=f"{url}/api/chat",
payload=json.dumps(payload),

View File

@@ -8,7 +8,7 @@ from typing import Literal, Optional, overload
import aiohttp
from aiocache import cached
import requests
from urllib.parse import quote
from fastapi import Depends, FastAPI, HTTPException, Request, APIRouter
from fastapi.middleware.cors import CORSMiddleware
@@ -66,10 +66,10 @@ async def send_get_request(url, key=None, user: UserModel = None):
**({"Authorization": f"Bearer {key}"} if key else {}),
**(
{
"X-OpenWebUI-User-Name": user.name,
"X-OpenWebUI-User-Id": user.id,
"X-OpenWebUI-User-Email": user.email,
"X-OpenWebUI-User-Role": user.role,
"X-OpenWebUI-User-Name": quote(user.name),
"X-OpenWebUI-User-Id": quote(user.id),
"X-OpenWebUI-User-Email": quote(user.email),
"X-OpenWebUI-User-Role": quote(user.role),
}
if ENABLE_FORWARD_USER_INFO_HEADERS and user
else {}
@@ -225,10 +225,10 @@ async def speech(request: Request, user=Depends(get_verified_user)):
),
**(
{
"X-OpenWebUI-User-Name": user.name,
"X-OpenWebUI-User-Id": user.id,
"X-OpenWebUI-User-Email": user.email,
"X-OpenWebUI-User-Role": user.role,
"X-OpenWebUI-User-Name": quote(user.name),
"X-OpenWebUI-User-Id": quote(user.id),
"X-OpenWebUI-User-Email": quote(user.email),
"X-OpenWebUI-User-Role": quote(user.role),
}
if ENABLE_FORWARD_USER_INFO_HEADERS
else {}
@@ -478,10 +478,10 @@ async def get_models(
"Content-Type": "application/json",
**(
{
"X-OpenWebUI-User-Name": user.name,
"X-OpenWebUI-User-Id": user.id,
"X-OpenWebUI-User-Email": user.email,
"X-OpenWebUI-User-Role": user.role,
"X-OpenWebUI-User-Name": quote(user.name),
"X-OpenWebUI-User-Id": quote(user.id),
"X-OpenWebUI-User-Email": quote(user.email),
"X-OpenWebUI-User-Role": quote(user.role),
}
if ENABLE_FORWARD_USER_INFO_HEADERS
else {}
@@ -573,10 +573,10 @@ async def verify_connection(
"Content-Type": "application/json",
**(
{
"X-OpenWebUI-User-Name": user.name,
"X-OpenWebUI-User-Id": user.id,
"X-OpenWebUI-User-Email": user.email,
"X-OpenWebUI-User-Role": user.role,
"X-OpenWebUI-User-Name": quote(user.name),
"X-OpenWebUI-User-Id": quote(user.id),
"X-OpenWebUI-User-Email": quote(user.email),
"X-OpenWebUI-User-Role": quote(user.role),
}
if ENABLE_FORWARD_USER_INFO_HEADERS
else {}
@@ -806,10 +806,10 @@ async def generate_chat_completion(
),
**(
{
"X-OpenWebUI-User-Name": user.name,
"X-OpenWebUI-User-Id": user.id,
"X-OpenWebUI-User-Email": user.email,
"X-OpenWebUI-User-Role": user.role,
"X-OpenWebUI-User-Name": quote(user.name),
"X-OpenWebUI-User-Id": quote(user.id),
"X-OpenWebUI-User-Email": quote(user.email),
"X-OpenWebUI-User-Role": quote(user.role),
}
if ENABLE_FORWARD_USER_INFO_HEADERS
else {}
@@ -924,10 +924,10 @@ async def embeddings(request: Request, form_data: dict, user):
"Content-Type": "application/json",
**(
{
"X-OpenWebUI-User-Name": user.name,
"X-OpenWebUI-User-Id": user.id,
"X-OpenWebUI-User-Email": user.email,
"X-OpenWebUI-User-Role": user.role,
"X-OpenWebUI-User-Name": quote(user.name),
"X-OpenWebUI-User-Id": quote(user.id),
"X-OpenWebUI-User-Email": quote(user.email),
"X-OpenWebUI-User-Role": quote(user.role),
}
if ENABLE_FORWARD_USER_INFO_HEADERS and user
else {}
@@ -996,10 +996,10 @@ async def proxy(path: str, request: Request, user=Depends(get_verified_user)):
"Content-Type": "application/json",
**(
{
"X-OpenWebUI-User-Name": user.name,
"X-OpenWebUI-User-Id": user.id,
"X-OpenWebUI-User-Email": user.email,
"X-OpenWebUI-User-Role": user.role,
"X-OpenWebUI-User-Name": quote(user.name),
"X-OpenWebUI-User-Id": quote(user.id),
"X-OpenWebUI-User-Email": quote(user.email),
"X-OpenWebUI-User-Role": quote(user.role),
}
if ENABLE_FORWARD_USER_INFO_HEADERS
else {}

View File

@@ -618,6 +618,8 @@ async def get_rag_config(request: Request, collectionForm: CollectionForm, user=
# File upload settings
"FILE_MAX_SIZE": rag_config.get("FILE_MAX_SIZE", request.app.state.config.FILE_MAX_SIZE),
"FILE_MAX_COUNT": rag_config.get("FILE_MAX_COUNT", request.app.state.config.FILE_MAX_COUNT),
"FILE_IMAGE_COMPRESSION_WIDTH": rag_config.get("FILE_IMAGE_COMPRESSION_WIDTH", request.app.state.config.FILE_IMAGE_COMPRESSION_WIDTH),
"FILE_IMAGE_COMPRESSION_HEIGHT": rag_config.get("FILE_IMAGE_COMPRESSION_HEIGHT", request.app.state.config.FILE_IMAGE_COMPRESSION_HEIGHT),
"ALLOWED_FILE_EXTENSIONS": rag_config.get("ALLOWED_FILE_EXTENSIONS", request.app.state.config.ALLOWED_FILE_EXTENSIONS),
# Integration settings
"ENABLE_GOOGLE_DRIVE_INTEGRATION": rag_config.get("ENABLE_GOOGLE_DRIVE_INTEGRATION", request.app.state.config.ENABLE_GOOGLE_DRIVE_INTEGRATION),
@@ -790,6 +792,8 @@ class ConfigForm(BaseModel):
# File upload settings
FILE_MAX_SIZE: Optional[int] = None
FILE_MAX_COUNT: Optional[int] = None
FILE_IMAGE_COMPRESSION_WIDTH: Optional[int] = None
FILE_IMAGE_COMPRESSION_HEIGHT: Optional[int] = None
ALLOWED_FILE_EXTENSIONS: Optional[List[str]] = None
# Integration settings
@@ -1169,15 +1173,13 @@ async def update_rag_config(
)
# File upload settings
request.app.state.config.FILE_MAX_SIZE = (
form_data.FILE_MAX_SIZE
if form_data.FILE_MAX_SIZE is not None
else request.app.state.config.FILE_MAX_SIZE
request.app.state.config.FILE_MAX_SIZE = form_data.FILE_MAX_SIZE
request.app.state.config.FILE_MAX_COUNT = form_data.FILE_MAX_COUNT
request.app.state.config.FILE_IMAGE_COMPRESSION_WIDTH = (
form_data.FILE_IMAGE_COMPRESSION_WIDTH
)
request.app.state.config.FILE_MAX_COUNT = (
form_data.FILE_MAX_COUNT
if form_data.FILE_MAX_COUNT is not None
else request.app.state.config.FILE_MAX_COUNT
request.app.state.config.FILE_IMAGE_COMPRESSION_HEIGHT = (
form_data.FILE_IMAGE_COMPRESSION_HEIGHT
)
request.app.state.config.ALLOWED_FILE_EXTENSIONS = (
form_data.ALLOWED_FILE_EXTENSIONS
@@ -1347,6 +1349,8 @@ async def update_rag_config(
# File upload settings
"FILE_MAX_SIZE": request.app.state.config.FILE_MAX_SIZE,
"FILE_MAX_COUNT": request.app.state.config.FILE_MAX_COUNT,
"FILE_IMAGE_COMPRESSION_WIDTH": request.app.state.config.FILE_IMAGE_COMPRESSION_WIDTH,
"FILE_IMAGE_COMPRESSION_HEIGHT": request.app.state.config.FILE_IMAGE_COMPRESSION_HEIGHT,
"ALLOWED_FILE_EXTENSIONS": request.app.state.config.ALLOWED_FILE_EXTENSIONS,
# Integration settings
"ENABLE_GOOGLE_DRIVE_INTEGRATION": request.app.state.config.ENABLE_GOOGLE_DRIVE_INTEGRATION,
@@ -2303,6 +2307,10 @@ async def process_web_search(
try:
if request.app.state.config.BYPASS_WEB_SEARCH_WEB_LOADER:
search_results = [
item for result in search_results for item in result if result
]
docs = [
Document(
page_content=result.snippet,

View File

@@ -14,7 +14,11 @@ from open_webui.models.users import (
)
from open_webui.socket.main import get_active_status_by_user_id
from open_webui.socket.main import (
get_active_status_by_user_id,
get_active_user_ids,
get_user_active_status,
)
from open_webui.constants import ERROR_MESSAGES
from open_webui.env import SRC_LOG_LEVELS
from fastapi import APIRouter, Depends, HTTPException, Request, status
@@ -29,6 +33,24 @@ log.setLevel(SRC_LOG_LEVELS["MODELS"])
router = APIRouter()
############################
# GetActiveUsers
############################
@router.get("/active")
async def get_active_users(
user=Depends(get_verified_user),
):
"""
Get a list of active users.
"""
return {
"user_ids": get_active_user_ids(),
}
############################
# GetUsers
############################
@@ -111,6 +133,7 @@ class SharingPermissions(BaseModel):
class ChatPermissions(BaseModel):
controls: bool = True
system_prompt: bool = True
file_upload: bool = True
delete: bool = True
edit: bool = True
@@ -303,6 +326,18 @@ async def get_user_by_id(user_id: str, user=Depends(get_verified_user)):
)
############################
# GetUserActiveStatusById
############################
@router.get("/{user_id}/active", response_model=dict)
async def get_user_active_status_by_id(user_id: str, user=Depends(get_verified_user)):
return {
"active": get_user_active_status(user_id),
}
############################
# UpdateUserById
############################

View File

@@ -135,11 +135,6 @@ async def periodic_usage_pool_cleanup():
USAGE_POOL[model_id] = connections
send_usage = True
if send_usage:
# Emit updated usage information after cleaning
await sio.emit("usage", {"models": get_models_in_use()})
await asyncio.sleep(TIMEOUT_DURATION)
finally:
release_func()
@@ -157,6 +152,43 @@ def get_models_in_use():
return models_in_use
def get_active_user_ids():
"""Get the list of active user IDs."""
return list(USER_POOL.keys())
def get_user_active_status(user_id):
"""Check if a user is currently active."""
return user_id in USER_POOL
def get_user_id_from_session_pool(sid):
user = SESSION_POOL.get(sid)
if user:
return user["id"]
return None
def get_user_ids_from_room(room):
active_session_ids = sio.manager.get_participants(
namespace="/",
room=room,
)
active_user_ids = list(
set(
[SESSION_POOL.get(session_id[0])["id"] for session_id in active_session_ids]
)
)
return active_user_ids
def get_active_status_by_user_id(user_id):
if user_id in USER_POOL:
return True
return False
@sio.on("usage")
async def usage(sid, data):
if sid in SESSION_POOL:
@@ -170,9 +202,6 @@ async def usage(sid, data):
sid: {"updated_at": current_time},
}
# Broadcast the usage data to all clients
await sio.emit("usage", {"models": get_models_in_use()})
@sio.event
async def connect(sid, environ, auth):
@@ -190,10 +219,6 @@ async def connect(sid, environ, auth):
else:
USER_POOL[user.id] = [sid]
# print(f"user {user.name}({user.id}) connected with session ID {sid}")
await sio.emit("user-list", {"user_ids": list(USER_POOL.keys())})
await sio.emit("usage", {"models": get_models_in_use()})
@sio.on("user-join")
async def user_join(sid, data):
@@ -221,10 +246,6 @@ async def user_join(sid, data):
log.debug(f"{channels=}")
for channel in channels:
await sio.enter_room(sid, f"channel:{channel.id}")
# print(f"user {user.name}({user.id}) connected with session ID {sid}")
await sio.emit("user-list", {"user_ids": list(USER_POOL.keys())})
return {"id": user.id, "name": user.name}
@@ -277,12 +298,6 @@ async def channel_events(sid, data):
)
@sio.on("user-list")
async def user_list(sid):
if sid in SESSION_POOL:
await sio.emit("user-list", {"user_ids": list(USER_POOL.keys())})
@sio.event
async def disconnect(sid):
if sid in SESSION_POOL:
@@ -294,8 +309,6 @@ async def disconnect(sid):
if len(USER_POOL[user_id]) == 0:
del USER_POOL[user_id]
await sio.emit("user-list", {"user_ids": list(USER_POOL.keys())})
else:
pass
# print(f"Unknown session ID {sid} disconnected")
@@ -388,30 +401,3 @@ def get_event_call(request_info):
get_event_caller = get_event_call
def get_user_id_from_session_pool(sid):
user = SESSION_POOL.get(sid)
if user:
return user["id"]
return None
def get_user_ids_from_room(room):
active_session_ids = sio.manager.get_participants(
namespace="/",
room=room,
)
active_user_ids = list(
set(
[SESSION_POOL.get(session_id[0])["id"] for session_id in active_session_ids]
)
)
return active_user_ids
def get_active_status_by_user_id(user_id):
if user_id in USER_POOL:
return True
return False

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.3 KiB

After

Width:  |  Height:  |  Size: 1.6 KiB

View File

View File

@@ -60,7 +60,7 @@ def get_permissions(
# Combine permissions from all user groups
for group in user_groups:
group_permissions = group.permissions
group_permissions = group.permissions or {}
permissions = combine_permissions(permissions, group_permissions)
# Ensure all fields from default_permissions are present and filled in

View File

@@ -228,7 +228,9 @@ def get_current_user(
)
else:
if WEBUI_AUTH_TRUSTED_EMAIL_HEADER:
trusted_email = request.headers.get(WEBUI_AUTH_TRUSTED_EMAIL_HEADER)
trusted_email = request.headers.get(
WEBUI_AUTH_TRUSTED_EMAIL_HEADER, ""
).lower()
if trusted_email and user.email != trusted_email:
# Delete the token cookie
response.delete_cookie("token")

View File

@@ -419,7 +419,7 @@ async def chat_action(request: Request, action_id: str, form_data: dict, user: A
params[key] = value
if "__user__" in sig.parameters:
__user__ = (user.model_dump() if isinstance(user, UserModel) else {},)
__user__ = user.model_dump() if isinstance(user, UserModel) else {}
try:
if hasattr(function_module, "UserValves"):

View File

@@ -712,14 +712,9 @@ def apply_params_to_form_data(form_data, model):
# If custom_params are provided, merge them into params
params = deep_update(params, custom_params)
if model.get("ollama"):
if model.get("owned_by") == "ollama":
# Ollama specific parameters
form_data["options"] = params
if "format" in params:
form_data["format"] = params["format"]
if "keep_alive" in params:
form_data["keep_alive"] = params["keep_alive"]
else:
if isinstance(params, dict):
for key, value in params.items():
@@ -1102,6 +1097,7 @@ async def process_chat_response(
follow_ups = json.loads(follow_ups_string).get(
"follow_ups", []
)
Chats.upsert_message_to_chat_by_id_and_message_id(
metadata["chat_id"],
metadata["message_id"],
@@ -1122,7 +1118,12 @@ async def process_chat_response(
pass
if TASKS.TITLE_GENERATION in tasks:
user_message = get_last_user_message(messages)
if user_message and len(user_message) > 100:
user_message = user_message[:100] + "..."
if tasks[TASKS.TITLE_GENERATION]:
res = await generate_title(
request,
{
@@ -1138,7 +1139,9 @@ async def process_chat_response(
title_string = (
res.get("choices", [])[0]
.get("message", {})
.get("content", message.get("content", "New Chat"))
.get(
"content", message.get("content", user_message)
)
)
else:
title_string = ""
@@ -1149,13 +1152,13 @@ async def process_chat_response(
try:
title = json.loads(title_string).get(
"title", "New Chat"
"title", user_message
)
except Exception as e:
title = ""
if not title:
title = messages[0].get("content", "New Chat")
title = messages[0].get("content", user_message)
Chats.update_chat_title_by_id(metadata["chat_id"], title)
@@ -1166,14 +1169,14 @@ async def process_chat_response(
}
)
elif len(messages) == 2:
title = messages[0].get("content", "New Chat")
title = messages[0].get("content", user_message)
Chats.update_chat_title_by_id(metadata["chat_id"], title)
await event_emitter(
{
"type": "chat:title",
"data": message.get("content", "New Chat"),
"data": message.get("content", user_message),
}
)
@@ -1890,9 +1893,11 @@ async def process_chat_response(
value = delta.get("content")
reasoning_content = delta.get(
"reasoning_content"
) or delta.get("reasoning")
reasoning_content = (
delta.get("reasoning_content")
or delta.get("reasoning")
or delta.get("thinking")
)
if reasoning_content:
if (
not content_blocks
@@ -2075,28 +2080,38 @@ async def process_chat_response(
tools = metadata.get("tools", {})
results = []
for tool_call in response_tool_calls:
tool_call_id = tool_call.get("id", "")
tool_name = tool_call.get("function", {}).get("name", "")
tool_args = tool_call.get("function", {}).get("arguments", "{}")
tool_function_params = {}
try:
# json.loads cannot be used because some models do not produce valid JSON
tool_function_params = ast.literal_eval(
tool_call.get("function", {}).get("arguments", "{}")
)
tool_function_params = ast.literal_eval(tool_args)
except Exception as e:
log.debug(e)
# Fallback to JSON parsing
try:
tool_function_params = json.loads(
tool_call.get("function", {}).get("arguments", "{}")
)
tool_function_params = json.loads(tool_args)
except Exception as e:
log.debug(
f"Error parsing tool call arguments: {tool_call.get('function', {}).get('arguments', '{}')}"
log.error(
f"Error parsing tool call arguments: {tool_args}"
)
# Mutate the original tool call response params as they are passed back to the passed
# back to the LLM via the content blocks. If they are in a json block and are invalid json,
# this can cause downstream LLM integrations to fail (e.g. bedrock gateway) where response
# params are not valid json.
# Main case so far is no args = "" = invalid json.
log.debug(
f"Parsed args from {tool_args} to {tool_function_params}"
)
tool_call.setdefault("function", {})["arguments"] = json.dumps(
tool_function_params
)
tool_result = None
if tool_name in tools:

View File

@@ -208,6 +208,7 @@ def openai_chat_message_template(model: str):
def openai_chat_chunk_message_template(
model: str,
content: Optional[str] = None,
reasoning_content: Optional[str] = None,
tool_calls: Optional[list[dict]] = None,
usage: Optional[dict] = None,
) -> dict:
@@ -220,6 +221,9 @@ def openai_chat_chunk_message_template(
if content:
template["choices"][0]["delta"]["content"] = content
if reasoning_content:
template["choices"][0]["delta"]["reasoning_content"] = reasoning_content
if tool_calls:
template["choices"][0]["delta"]["tool_calls"] = tool_calls
@@ -234,6 +238,7 @@ def openai_chat_chunk_message_template(
def openai_chat_completion_message_template(
model: str,
message: Optional[str] = None,
reasoning_content: Optional[str] = None,
tool_calls: Optional[list[dict]] = None,
usage: Optional[dict] = None,
) -> dict:
@@ -241,8 +246,9 @@ def openai_chat_completion_message_template(
template["object"] = "chat.completion"
if message is not None:
template["choices"][0]["message"] = {
"content": message,
"role": "assistant",
"content": message,
**({"reasoning_content": reasoning_content} if reasoning_content else {}),
**({"tool_calls": tool_calls} if tool_calls else {}),
}

View File

@@ -537,8 +537,8 @@ class OAuthManager:
)
# Redirect back to the frontend with the JWT token
redirect_base_url = request.app.state.config.WEBUI_URL or request.base_url
if isinstance(redirect_base_url, str) and redirect_base_url.endswith("/"):
redirect_base_url = str(request.app.state.config.WEBUI_URL or request.base_url)
if redirect_base_url.endswith("/"):
redirect_base_url = redirect_base_url[:-1]
redirect_url = f"{redirect_base_url}/auth#token={jwt_token}"

View File

@@ -175,16 +175,32 @@ def apply_model_params_to_body_ollama(params: dict, form_data: dict) -> dict:
"num_thread": int,
}
# Extract keep_alive from options if it exists
if "options" in form_data and "keep_alive" in form_data["options"]:
form_data["keep_alive"] = form_data["options"]["keep_alive"]
del form_data["options"]["keep_alive"]
def parse_json(value: str) -> dict:
"""
Parses a JSON string into a dictionary, handling potential JSONDecodeError.
"""
try:
return json.loads(value)
except Exception as e:
return value
if "options" in form_data and "format" in form_data["options"]:
form_data["format"] = form_data["options"]["format"]
del form_data["options"]["format"]
ollama_root_params = {
"format": lambda x: parse_json(x),
"keep_alive": lambda x: parse_json(x),
"think": bool,
}
return apply_model_params_to_body(params, form_data, mappings)
for key, value in ollama_root_params.items():
if (param := params.get(key, None)) is not None:
# Copy the parameter to new name then delete it, to prevent Ollama warning of invalid option provided
form_data[key] = value(param)
del params[key]
# Unlike OpenAI, Ollama does not support params directly in the body
form_data["options"] = apply_model_params_to_body(
params, (form_data.get("options", {}) or {}), mappings
)
return form_data
def convert_messages_openai_to_ollama(messages: list[dict]) -> list[dict]:
@@ -279,36 +295,48 @@ def convert_payload_openai_to_ollama(openai_payload: dict) -> dict:
openai_payload.get("messages")
)
ollama_payload["stream"] = openai_payload.get("stream", False)
if "tools" in openai_payload:
ollama_payload["tools"] = openai_payload["tools"]
if "format" in openai_payload:
ollama_payload["format"] = openai_payload["format"]
# If there are advanced parameters in the payload, format them in Ollama's options field
if openai_payload.get("options"):
ollama_payload["options"] = openai_payload["options"]
ollama_options = openai_payload["options"]
def parse_json(value: str) -> dict:
"""
Parses a JSON string into a dictionary, handling potential JSONDecodeError.
"""
try:
return json.loads(value)
except Exception as e:
return value
ollama_root_params = {
"format": lambda x: parse_json(x),
"keep_alive": lambda x: parse_json(x),
"think": bool,
}
# Ollama's options field can contain parameters that should be at the root level.
for key, value in ollama_root_params.items():
if (param := ollama_options.get(key, None)) is not None:
# Copy the parameter to new name then delete it, to prevent Ollama warning of invalid option provided
ollama_payload[key] = value(param)
del ollama_options[key]
# Re-Mapping OpenAI's `max_tokens` -> Ollama's `num_predict`
if "max_tokens" in ollama_options:
ollama_options["num_predict"] = ollama_options["max_tokens"]
del ollama_options[
"max_tokens"
] # To prevent Ollama warning of invalid option provided
del ollama_options["max_tokens"]
# Ollama lacks a "system" prompt option. It has to be provided as a direct parameter, so we copy it down.
# Comment: Not sure why this is needed, but we'll keep it for compatibility.
if "system" in ollama_options:
ollama_payload["system"] = ollama_options["system"]
del ollama_options[
"system"
] # To prevent Ollama warning of invalid option provided
del ollama_options["system"]
# Extract keep_alive from options if it exists
if "keep_alive" in ollama_options:
ollama_payload["keep_alive"] = ollama_options["keep_alive"]
del ollama_options["keep_alive"]
ollama_payload["options"] = ollama_options
# If there is the "stop" parameter in the openai_payload, remap it to the ollama_payload.options
if "stop" in openai_payload:

View File

@@ -83,6 +83,7 @@ def convert_ollama_usage_to_openai(data: dict) -> dict:
def convert_response_ollama_to_openai(ollama_response: dict) -> dict:
model = ollama_response.get("model", "ollama")
message_content = ollama_response.get("message", {}).get("content", "")
reasoning_content = ollama_response.get("message", {}).get("thinking", None)
tool_calls = ollama_response.get("message", {}).get("tool_calls", None)
openai_tool_calls = None
@@ -94,7 +95,7 @@ def convert_response_ollama_to_openai(ollama_response: dict) -> dict:
usage = convert_ollama_usage_to_openai(data)
response = openai_chat_completion_message_template(
model, message_content, openai_tool_calls, usage
model, message_content, reasoning_content, openai_tool_calls, usage
)
return response
@@ -105,6 +106,7 @@ async def convert_streaming_response_ollama_to_openai(ollama_streaming_response)
model = data.get("model", "ollama")
message_content = data.get("message", {}).get("content", None)
reasoning_content = data.get("message", {}).get("thinking", None)
tool_calls = data.get("message", {}).get("tool_calls", None)
openai_tool_calls = None
@@ -118,7 +120,7 @@ async def convert_streaming_response_ollama_to_openai(ollama_streaming_response)
usage = convert_ollama_usage_to_openai(data)
data = openai_chat_chunk_message_template(
model, message_content, openai_tool_calls, usage
model, message_content, reasoning_content, openai_tool_calls, usage
)
line = f"data: {json.dumps(data)}\n\n"

View File

@@ -0,0 +1,110 @@
"""OpenTelemetry metrics bootstrap for Open WebUI.
This module initialises a MeterProvider that sends metrics to an OTLP
collector. The collector is responsible for exposing a Prometheus
`/metrics` endpoint WebUI does **not** expose it directly.
Metrics collected:
* http.server.requests (counter)
* http.server.duration (histogram, milliseconds)
Attributes used: http.method, http.route, http.status_code
If you wish to add more attributes (e.g. user-agent) you can, but beware of
high-cardinality label sets.
"""
from __future__ import annotations
import time
from typing import Dict, List, Sequence, Any
from fastapi import FastAPI, Request
from opentelemetry import metrics
from opentelemetry.exporter.otlp.proto.grpc.metric_exporter import (
OTLPMetricExporter,
)
from opentelemetry.sdk.metrics import MeterProvider
from opentelemetry.sdk.metrics.view import View
from opentelemetry.sdk.metrics.export import (
PeriodicExportingMetricReader,
)
from opentelemetry.sdk.resources import SERVICE_NAME, Resource
from open_webui.env import OTEL_SERVICE_NAME, OTEL_EXPORTER_OTLP_ENDPOINT
_EXPORT_INTERVAL_MILLIS = 10_000 # 10 seconds
def _build_meter_provider() -> MeterProvider:
"""Return a configured MeterProvider."""
# Periodic reader pushes metrics over OTLP/gRPC to collector
readers: List[PeriodicExportingMetricReader] = [
PeriodicExportingMetricReader(
OTLPMetricExporter(endpoint=OTEL_EXPORTER_OTLP_ENDPOINT),
export_interval_millis=_EXPORT_INTERVAL_MILLIS,
)
]
# Optional view to limit cardinality: drop user-agent etc.
views: List[View] = [
View(
instrument_name="http.server.duration",
attribute_keys=["http.method", "http.route", "http.status_code"],
),
View(
instrument_name="http.server.requests",
attribute_keys=["http.method", "http.route", "http.status_code"],
),
]
provider = MeterProvider(
resource=Resource.create({SERVICE_NAME: OTEL_SERVICE_NAME}),
metric_readers=list(readers),
views=views,
)
return provider
def setup_metrics(app: FastAPI) -> None:
"""Attach OTel metrics middleware to *app* and initialise provider."""
metrics.set_meter_provider(_build_meter_provider())
meter = metrics.get_meter(__name__)
# Instruments
request_counter = meter.create_counter(
name="http.server.requests",
description="Total HTTP requests",
unit="1",
)
duration_histogram = meter.create_histogram(
name="http.server.duration",
description="HTTP request duration",
unit="ms",
)
# FastAPI middleware
@app.middleware("http")
async def _metrics_middleware(request: Request, call_next):
start_time = time.perf_counter()
response = await call_next(request)
elapsed_ms = (time.perf_counter() - start_time) * 1000.0
# Route template e.g. "/items/{item_id}" instead of real path.
route = request.scope.get("route")
route_path = getattr(route, "path", request.url.path)
attrs: Dict[str, str | int] = {
"http.method": request.method,
"http.route": route_path,
"http.status_code": response.status_code,
}
request_counter.add(1, attrs)
duration_histogram.record(elapsed_ms, attrs)
return response

View File

@@ -7,7 +7,12 @@ from sqlalchemy import Engine
from open_webui.utils.telemetry.exporters import LazyBatchSpanProcessor
from open_webui.utils.telemetry.instrumentors import Instrumentor
from open_webui.env import OTEL_SERVICE_NAME, OTEL_EXPORTER_OTLP_ENDPOINT
from open_webui.utils.telemetry.metrics import setup_metrics
from open_webui.env import (
OTEL_SERVICE_NAME,
OTEL_EXPORTER_OTLP_ENDPOINT,
ENABLE_OTEL_METRICS,
)
def setup(app: FastAPI, db_engine: Engine):
@@ -21,3 +26,7 @@ def setup(app: FastAPI, db_engine: Engine):
exporter = OTLPSpanExporter(endpoint=OTEL_EXPORTER_OTLP_ENDPOINT)
trace.get_tracer_provider().add_span_processor(LazyBatchSpanProcessor(exporter))
Instrumentor(app=app, db_engine=db_engine).instrument()
# set up metrics only if enabled
if ENABLE_OTEL_METRICS:
setup_metrics(app)

View File

@@ -479,7 +479,7 @@ async def get_tool_server_data(token: str, url: str) -> Dict[str, Any]:
"specs": convert_openapi_to_tool_payload(res),
}
log.info("Fetched data:", data)
log.info(f"Fetched data: {data}")
return data
@@ -644,5 +644,5 @@ async def execute_tool_server(
except Exception as err:
error = str(err)
log.exception("API Request Error:", error)
log.exception(f"API Request Error: {error}")
return {"error": error}

View File

@@ -7,14 +7,13 @@ python-socketio==5.13.0
python-jose==3.4.0
passlib[bcrypt]==1.7.4
requests==2.32.3
requests==2.32.4
aiohttp==3.11.11
async-timeout
aiocache
aiofiles
starlette-compress==1.6.0
sqlalchemy==2.0.38
alembic==1.14.0
peewee==3.18.1
@@ -67,7 +66,7 @@ pypdf==4.3.1
fpdf2==2.8.2
pymdown-extensions==10.14.2
docx2txt==0.8
python-pptx==1.0.0
python-pptx==1.0.2
unstructured==0.16.17
nltk==3.9.1
Markdown==3.7
@@ -96,7 +95,7 @@ authlib==1.4.1
black==25.1.0
langfuse==2.44.0
youtube-transcript-api==1.0.3
youtube-transcript-api==1.1.0
pytube==15.0.0
extract_msg