mirror of
https://github.com/open-webui/open-webui
synced 2025-06-26 18:26:48 +00:00
feat: arena models
This commit is contained in:
@@ -1,6 +1,7 @@
|
||||
import inspect
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
from typing import AsyncGenerator, Generator, Iterator
|
||||
|
||||
from open_webui.apps.socket.main import get_event_call, get_event_emitter
|
||||
@@ -17,6 +18,7 @@ from open_webui.apps.webui.routers import (
|
||||
models,
|
||||
knowledge,
|
||||
prompts,
|
||||
evaluations,
|
||||
tools,
|
||||
users,
|
||||
utils,
|
||||
@@ -32,6 +34,9 @@ from open_webui.config import (
|
||||
ENABLE_LOGIN_FORM,
|
||||
ENABLE_MESSAGE_RATING,
|
||||
ENABLE_SIGNUP,
|
||||
ENABLE_EVALUATION_ARENA_MODELS,
|
||||
EVALUATION_ARENA_MODELS,
|
||||
DEFAULT_ARENA_MODEL,
|
||||
JWT_EXPIRES_IN,
|
||||
ENABLE_OAUTH_ROLE_MANAGEMENT,
|
||||
OAUTH_ROLES_CLAIM,
|
||||
@@ -94,6 +99,9 @@ app.state.config.BANNERS = WEBUI_BANNERS
|
||||
app.state.config.ENABLE_COMMUNITY_SHARING = ENABLE_COMMUNITY_SHARING
|
||||
app.state.config.ENABLE_MESSAGE_RATING = ENABLE_MESSAGE_RATING
|
||||
|
||||
app.state.config.ENABLE_EVALUATION_ARENA_MODELS = ENABLE_EVALUATION_ARENA_MODELS
|
||||
app.state.config.EVALUATION_ARENA_MODELS = EVALUATION_ARENA_MODELS
|
||||
|
||||
app.state.config.OAUTH_USERNAME_CLAIM = OAUTH_USERNAME_CLAIM
|
||||
app.state.config.OAUTH_PICTURE_CLAIM = OAUTH_PICTURE_CLAIM
|
||||
app.state.config.OAUTH_EMAIL_CLAIM = OAUTH_EMAIL_CLAIM
|
||||
@@ -117,20 +125,24 @@ app.add_middleware(
|
||||
|
||||
|
||||
app.include_router(configs.router, prefix="/configs", tags=["configs"])
|
||||
|
||||
app.include_router(auths.router, prefix="/auths", tags=["auths"])
|
||||
app.include_router(users.router, prefix="/users", tags=["users"])
|
||||
|
||||
app.include_router(chats.router, prefix="/chats", tags=["chats"])
|
||||
app.include_router(folders.router, prefix="/folders", tags=["folders"])
|
||||
|
||||
app.include_router(models.router, prefix="/models", tags=["models"])
|
||||
app.include_router(knowledge.router, prefix="/knowledge", tags=["knowledge"])
|
||||
app.include_router(prompts.router, prefix="/prompts", tags=["prompts"])
|
||||
|
||||
app.include_router(files.router, prefix="/files", tags=["files"])
|
||||
app.include_router(tools.router, prefix="/tools", tags=["tools"])
|
||||
app.include_router(functions.router, prefix="/functions", tags=["functions"])
|
||||
|
||||
app.include_router(memories.router, prefix="/memories", tags=["memories"])
|
||||
app.include_router(evaluations.router, prefix="/evaluations", tags=["evaluations"])
|
||||
|
||||
app.include_router(folders.router, prefix="/folders", tags=["folders"])
|
||||
app.include_router(files.router, prefix="/files", tags=["files"])
|
||||
|
||||
app.include_router(utils.router, prefix="/utils", tags=["utils"])
|
||||
|
||||
|
||||
@@ -145,8 +157,44 @@ async def get_status():
|
||||
|
||||
|
||||
async def get_all_models():
|
||||
models = []
|
||||
pipe_models = await get_pipe_models()
|
||||
return pipe_models
|
||||
models = models + pipe_models
|
||||
|
||||
if app.state.config.ENABLE_EVALUATION_ARENA_MODELS:
|
||||
arena_models = []
|
||||
if len(app.state.config.EVALUATION_ARENA_MODELS) > 0:
|
||||
arena_models = [
|
||||
{
|
||||
"id": model["id"],
|
||||
"name": model["name"],
|
||||
"info": {
|
||||
"meta": model["meta"],
|
||||
},
|
||||
"object": "model",
|
||||
"created": int(time.time()),
|
||||
"owned_by": "arena",
|
||||
"arena": True,
|
||||
}
|
||||
for model in app.state.config.EVALUATION_ARENA_MODELS
|
||||
]
|
||||
else:
|
||||
# Add default arena model
|
||||
arena_models = [
|
||||
{
|
||||
"id": DEFAULT_ARENA_MODEL["id"],
|
||||
"name": DEFAULT_ARENA_MODEL["name"],
|
||||
"info": {
|
||||
"meta": DEFAULT_ARENA_MODEL["meta"],
|
||||
},
|
||||
"object": "model",
|
||||
"created": int(time.time()),
|
||||
"owned_by": "arena",
|
||||
"arena": True,
|
||||
}
|
||||
]
|
||||
models = models + arena_models
|
||||
return models
|
||||
|
||||
|
||||
def get_function_module(pipe_id: str):
|
||||
|
||||
49
backend/open_webui/apps/webui/routers/evaluations.py
Normal file
49
backend/open_webui/apps/webui/routers/evaluations.py
Normal file
@@ -0,0 +1,49 @@
|
||||
from typing import Optional
|
||||
from fastapi import APIRouter, Depends, HTTPException, status, Request
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
from open_webui.constants import ERROR_MESSAGES
|
||||
from open_webui.utils.utils import get_admin_user, get_verified_user
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
############################
|
||||
# GetConfig
|
||||
############################
|
||||
|
||||
|
||||
@router.get("/config")
|
||||
async def get_config(request: Request, user=Depends(get_admin_user)):
|
||||
return {
|
||||
"ENABLE_EVALUATION_ARENA_MODELS": request.app.state.config.ENABLE_EVALUATION_ARENA_MODELS,
|
||||
"EVALUATION_ARENA_MODELS": request.app.state.config.EVALUATION_ARENA_MODELS,
|
||||
}
|
||||
|
||||
|
||||
############################
|
||||
# UpdateConfig
|
||||
############################
|
||||
|
||||
|
||||
class UpdateConfigForm(BaseModel):
|
||||
ENABLE_EVALUATION_ARENA_MODELS: Optional[bool] = None
|
||||
EVALUATION_ARENA_MODELS: Optional[list[dict]] = None
|
||||
|
||||
|
||||
@router.post("/config")
|
||||
async def update_config(
|
||||
request: Request,
|
||||
form_data: UpdateConfigForm,
|
||||
user=Depends(get_admin_user),
|
||||
):
|
||||
config = request.app.state.config
|
||||
if form_data.ENABLE_EVALUATION_ARENA_MODELS is not None:
|
||||
config.ENABLE_EVALUATION_ARENA_MODELS = form_data.ENABLE_EVALUATION_ARENA_MODELS
|
||||
if form_data.EVALUATION_ARENA_MODELS is not None:
|
||||
config.EVALUATION_ARENA_MODELS = form_data.EVALUATION_ARENA_MODELS
|
||||
return {
|
||||
"ENABLE_EVALUATION_ARENA_MODELS": config.ENABLE_EVALUATION_ARENA_MODELS,
|
||||
"EVALUATION_ARENA_MODELS": config.EVALUATION_ARENA_MODELS,
|
||||
}
|
||||
@@ -751,6 +751,28 @@ USER_PERMISSIONS = PersistentConfig(
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
ENABLE_EVALUATION_ARENA_MODELS = PersistentConfig(
|
||||
"ENABLE_EVALUATION_ARENA_MODELS",
|
||||
"evaluation.arena.enable",
|
||||
os.environ.get("ENABLE_EVALUATION_ARENA_MODELS", "True").lower() == "true",
|
||||
)
|
||||
EVALUATION_ARENA_MODELS = PersistentConfig(
|
||||
"EVALUATION_ARENA_MODELS",
|
||||
"evaluation.arena.models",
|
||||
[],
|
||||
)
|
||||
|
||||
DEFAULT_ARENA_MODEL = {
|
||||
"id": "arena-model",
|
||||
"name": "Arena Model",
|
||||
"meta": {
|
||||
"profile_image_url": "/favicon.png",
|
||||
"description": "Submit your questions to anonymous AI chatbots and vote on the best response.",
|
||||
"model_ids": None,
|
||||
},
|
||||
}
|
||||
|
||||
ENABLE_MODEL_FILTER = PersistentConfig(
|
||||
"ENABLE_MODEL_FILTER",
|
||||
"model_filter.enable",
|
||||
|
||||
@@ -7,6 +7,7 @@ import os
|
||||
import shutil
|
||||
import sys
|
||||
import time
|
||||
import random
|
||||
from contextlib import asynccontextmanager
|
||||
from typing import Optional
|
||||
|
||||
@@ -23,7 +24,7 @@ from fastapi import (
|
||||
status,
|
||||
)
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from fastapi.responses import JSONResponse
|
||||
from fastapi.responses import JSONResponse, RedirectResponse
|
||||
from fastapi.staticfiles import StaticFiles
|
||||
from pydantic import BaseModel
|
||||
from sqlalchemy import text
|
||||
@@ -1093,6 +1094,23 @@ async def generate_chat_completions(form_data: dict, user=Depends(get_verified_u
|
||||
)
|
||||
|
||||
model = app.state.MODELS[model_id]
|
||||
|
||||
if model["owned_by"] == "arena":
|
||||
model_ids = model.get("info", {}).get("meta", {}).get("model_ids")
|
||||
model_id = None
|
||||
if isinstance(model_ids, list) and model_ids:
|
||||
model_id = random.choice(model_ids)
|
||||
else:
|
||||
model_ids = [
|
||||
model["id"]
|
||||
for model in await get_all_models()
|
||||
if model.get("owned_by") != "arena"
|
||||
and not model.get("info", {}).get("meta", {}).get("hidden", False)
|
||||
]
|
||||
model_id = random.choice(model_ids)
|
||||
|
||||
form_data["model"] = model_id
|
||||
return await generate_chat_completions(form_data, user)
|
||||
if model.get("pipe"):
|
||||
return await generate_function_chat_completion(form_data, user=user)
|
||||
if model["owned_by"] == "ollama":
|
||||
|
||||
@@ -116,6 +116,9 @@ def convert_messages_openai_to_ollama(messages: list[dict]) -> list[dict]:
|
||||
elif item.get("type") == "image_url":
|
||||
img_url = item.get("image_url", {}).get("url", "")
|
||||
if img_url:
|
||||
# If the image url starts with data:, it's a base64 image and should be trimmed
|
||||
if img_url.startswith("data:"):
|
||||
img_url = img_url.split(",")[-1]
|
||||
images.append(img_url)
|
||||
|
||||
# Add content text (if any)
|
||||
|
||||
Reference in New Issue
Block a user