mirror of
https://github.com/open-webui/open-webui
synced 2024-11-16 13:40:55 +00:00
commit
edeff20e1d
2
.github/workflows/format-backend.yaml
vendored
2
.github/workflows/format-backend.yaml
vendored
@ -33,7 +33,7 @@ jobs:
|
|||||||
pip install black
|
pip install black
|
||||||
|
|
||||||
- name: Format backend
|
- name: Format backend
|
||||||
run: black . --exclude "/venv/"
|
run: npm run format:backend
|
||||||
|
|
||||||
- name: Check for changes after format
|
- name: Check for changes after format
|
||||||
run: git diff --exit-code
|
run: git diff --exit-code
|
||||||
|
2
Makefile
2
Makefile
@ -8,6 +8,8 @@ remove:
|
|||||||
|
|
||||||
start:
|
start:
|
||||||
@docker-compose start
|
@docker-compose start
|
||||||
|
startAndBuild:
|
||||||
|
docker-compose up -d --build
|
||||||
|
|
||||||
stop:
|
stop:
|
||||||
@docker-compose stop
|
@docker-compose stop
|
||||||
|
@ -325,7 +325,7 @@ def save_url_image(url):
|
|||||||
|
|
||||||
return image_id
|
return image_id
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Error saving image: {e}")
|
log.exception(f"Error saving image: {e}")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
@ -397,7 +397,7 @@ def generate_image(
|
|||||||
user.id,
|
user.id,
|
||||||
app.state.COMFYUI_BASE_URL,
|
app.state.COMFYUI_BASE_URL,
|
||||||
)
|
)
|
||||||
print(res)
|
log.debug(f"res: {res}")
|
||||||
|
|
||||||
images = []
|
images = []
|
||||||
|
|
||||||
@ -409,7 +409,7 @@ def generate_image(
|
|||||||
with open(file_body_path, "w") as f:
|
with open(file_body_path, "w") as f:
|
||||||
json.dump(data.model_dump(exclude_none=True), f)
|
json.dump(data.model_dump(exclude_none=True), f)
|
||||||
|
|
||||||
print(images)
|
log.debug(f"images: {images}")
|
||||||
return images
|
return images
|
||||||
else:
|
else:
|
||||||
if form_data.model:
|
if form_data.model:
|
||||||
|
@ -4,6 +4,12 @@ import json
|
|||||||
import urllib.request
|
import urllib.request
|
||||||
import urllib.parse
|
import urllib.parse
|
||||||
import random
|
import random
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from config import SRC_LOG_LEVELS
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
log.setLevel(SRC_LOG_LEVELS["COMFYUI"])
|
||||||
|
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
@ -121,7 +127,7 @@ COMFYUI_DEFAULT_PROMPT = """
|
|||||||
|
|
||||||
|
|
||||||
def queue_prompt(prompt, client_id, base_url):
|
def queue_prompt(prompt, client_id, base_url):
|
||||||
print("queue_prompt")
|
log.info("queue_prompt")
|
||||||
p = {"prompt": prompt, "client_id": client_id}
|
p = {"prompt": prompt, "client_id": client_id}
|
||||||
data = json.dumps(p).encode("utf-8")
|
data = json.dumps(p).encode("utf-8")
|
||||||
req = urllib.request.Request(f"{base_url}/prompt", data=data)
|
req = urllib.request.Request(f"{base_url}/prompt", data=data)
|
||||||
@ -129,7 +135,7 @@ def queue_prompt(prompt, client_id, base_url):
|
|||||||
|
|
||||||
|
|
||||||
def get_image(filename, subfolder, folder_type, base_url):
|
def get_image(filename, subfolder, folder_type, base_url):
|
||||||
print("get_image")
|
log.info("get_image")
|
||||||
data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
|
data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
|
||||||
url_values = urllib.parse.urlencode(data)
|
url_values = urllib.parse.urlencode(data)
|
||||||
with urllib.request.urlopen(f"{base_url}/view?{url_values}") as response:
|
with urllib.request.urlopen(f"{base_url}/view?{url_values}") as response:
|
||||||
@ -137,14 +143,14 @@ def get_image(filename, subfolder, folder_type, base_url):
|
|||||||
|
|
||||||
|
|
||||||
def get_image_url(filename, subfolder, folder_type, base_url):
|
def get_image_url(filename, subfolder, folder_type, base_url):
|
||||||
print("get_image")
|
log.info("get_image")
|
||||||
data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
|
data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
|
||||||
url_values = urllib.parse.urlencode(data)
|
url_values = urllib.parse.urlencode(data)
|
||||||
return f"{base_url}/view?{url_values}"
|
return f"{base_url}/view?{url_values}"
|
||||||
|
|
||||||
|
|
||||||
def get_history(prompt_id, base_url):
|
def get_history(prompt_id, base_url):
|
||||||
print("get_history")
|
log.info("get_history")
|
||||||
with urllib.request.urlopen(f"{base_url}/history/{prompt_id}") as response:
|
with urllib.request.urlopen(f"{base_url}/history/{prompt_id}") as response:
|
||||||
return json.loads(response.read())
|
return json.loads(response.read())
|
||||||
|
|
||||||
@ -212,15 +218,15 @@ def comfyui_generate_image(
|
|||||||
try:
|
try:
|
||||||
ws = websocket.WebSocket()
|
ws = websocket.WebSocket()
|
||||||
ws.connect(f"ws://{host}/ws?clientId={client_id}")
|
ws.connect(f"ws://{host}/ws?clientId={client_id}")
|
||||||
print("WebSocket connection established.")
|
log.info("WebSocket connection established.")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Failed to connect to WebSocket server: {e}")
|
log.exception(f"Failed to connect to WebSocket server: {e}")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
images = get_images(ws, comfyui_prompt, client_id, base_url)
|
images = get_images(ws, comfyui_prompt, client_id, base_url)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Error while receiving images: {e}")
|
log.exception(f"Error while receiving images: {e}")
|
||||||
images = None
|
images = None
|
||||||
|
|
||||||
ws.close()
|
ws.close()
|
||||||
|
@ -272,7 +272,7 @@ async def pull_model(
|
|||||||
if request_id in REQUEST_POOL:
|
if request_id in REQUEST_POOL:
|
||||||
yield chunk
|
yield chunk
|
||||||
else:
|
else:
|
||||||
print("User: canceled request")
|
log.warning("User: canceled request")
|
||||||
break
|
break
|
||||||
finally:
|
finally:
|
||||||
if hasattr(r, "close"):
|
if hasattr(r, "close"):
|
||||||
@ -670,7 +670,7 @@ async def generate_completion(
|
|||||||
else:
|
else:
|
||||||
raise HTTPException(
|
raise HTTPException(
|
||||||
status_code=400,
|
status_code=400,
|
||||||
detail="error_detail",
|
detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.model),
|
||||||
)
|
)
|
||||||
|
|
||||||
url = app.state.OLLAMA_BASE_URLS[url_idx]
|
url = app.state.OLLAMA_BASE_URLS[url_idx]
|
||||||
|
@ -333,7 +333,7 @@ def store_docs_in_vector_db(docs, collection_name, overwrite: bool = False) -> b
|
|||||||
if overwrite:
|
if overwrite:
|
||||||
for collection in CHROMA_CLIENT.list_collections():
|
for collection in CHROMA_CLIENT.list_collections():
|
||||||
if collection_name == collection.name:
|
if collection_name == collection.name:
|
||||||
print(f"deleting existing collection {collection_name}")
|
log.info(f"deleting existing collection {collection_name}")
|
||||||
CHROMA_CLIENT.delete_collection(name=collection_name)
|
CHROMA_CLIENT.delete_collection(name=collection_name)
|
||||||
|
|
||||||
collection = CHROMA_CLIENT.create_collection(
|
collection = CHROMA_CLIENT.create_collection(
|
||||||
@ -346,7 +346,7 @@ def store_docs_in_vector_db(docs, collection_name, overwrite: bool = False) -> b
|
|||||||
)
|
)
|
||||||
return True
|
return True
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(e)
|
log.exception(e)
|
||||||
if e.__class__.__name__ == "UniqueConstraintError":
|
if e.__class__.__name__ == "UniqueConstraintError":
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@ -575,7 +575,7 @@ def scan_docs_dir(user=Depends(get_admin_user)):
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(e)
|
log.exception(e)
|
||||||
pass
|
pass
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
@ -156,6 +156,8 @@ def rag_messages(docs, messages, template, k, embedding_function):
|
|||||||
|
|
||||||
relevant_contexts.append(context)
|
relevant_contexts.append(context)
|
||||||
|
|
||||||
|
log.debug(f"relevant_contexts: {relevant_contexts}")
|
||||||
|
|
||||||
context_string = ""
|
context_string = ""
|
||||||
for context in relevant_contexts:
|
for context in relevant_contexts:
|
||||||
if context:
|
if context:
|
||||||
|
@ -119,6 +119,7 @@ log.info(f"GLOBAL_LOG_LEVEL: {GLOBAL_LOG_LEVEL}")
|
|||||||
|
|
||||||
log_sources = [
|
log_sources = [
|
||||||
"AUDIO",
|
"AUDIO",
|
||||||
|
"COMFYUI",
|
||||||
"CONFIG",
|
"CONFIG",
|
||||||
"DB",
|
"DB",
|
||||||
"IMAGES",
|
"IMAGES",
|
||||||
@ -128,6 +129,7 @@ log_sources = [
|
|||||||
"OLLAMA",
|
"OLLAMA",
|
||||||
"OPENAI",
|
"OPENAI",
|
||||||
"RAG",
|
"RAG",
|
||||||
|
"WEBHOOK",
|
||||||
]
|
]
|
||||||
|
|
||||||
SRC_LOG_LEVELS = {}
|
SRC_LOG_LEVELS = {}
|
||||||
|
@ -164,15 +164,18 @@ app.mount("/rag/api/v1", rag_app)
|
|||||||
|
|
||||||
@app.get("/api/config")
|
@app.get("/api/config")
|
||||||
async def get_app_config():
|
async def get_app_config():
|
||||||
|
# Checking and Handling the Absence of 'ui' in CONFIG_DATA
|
||||||
|
|
||||||
|
default_locale = "en-US"
|
||||||
|
if "ui" in CONFIG_DATA:
|
||||||
|
default_locale = CONFIG_DATA["ui"].get("default_locale", "en-US")
|
||||||
|
|
||||||
|
# The Rest of the Function Now Uses the Variables Defined Above
|
||||||
return {
|
return {
|
||||||
"status": True,
|
"status": True,
|
||||||
"name": WEBUI_NAME,
|
"name": WEBUI_NAME,
|
||||||
"version": VERSION,
|
"version": VERSION,
|
||||||
"default_locale": (
|
"default_locale": default_locale,
|
||||||
CONFIG_DATA["ui"]["default_locale"]
|
|
||||||
if "ui" in CONFIG_DATA and "default_locale" in CONFIG_DATA["ui"]
|
|
||||||
else "en-US"
|
|
||||||
),
|
|
||||||
"images": images_app.state.ENABLED,
|
"images": images_app.state.ENABLED,
|
||||||
"default_models": webui_app.state.DEFAULT_MODELS,
|
"default_models": webui_app.state.DEFAULT_MODELS,
|
||||||
"default_prompt_suggestions": webui_app.state.DEFAULT_PROMPT_SUGGESTIONS,
|
"default_prompt_suggestions": webui_app.state.DEFAULT_PROMPT_SUGGESTIONS,
|
||||||
|
@ -1,6 +1,11 @@
|
|||||||
import json
|
import json
|
||||||
import requests
|
import requests
|
||||||
from config import VERSION, WEBUI_FAVICON_URL, WEBUI_NAME
|
import logging
|
||||||
|
|
||||||
|
from config import SRC_LOG_LEVELS, VERSION, WEBUI_FAVICON_URL, WEBUI_NAME
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
log.setLevel(SRC_LOG_LEVELS["WEBHOOK"])
|
||||||
|
|
||||||
|
|
||||||
def post_webhook(url: str, message: str, event_data: dict) -> bool:
|
def post_webhook(url: str, message: str, event_data: dict) -> bool:
|
||||||
@ -39,9 +44,11 @@ def post_webhook(url: str, message: str, event_data: dict) -> bool:
|
|||||||
else:
|
else:
|
||||||
payload = {**event_data}
|
payload = {**event_data}
|
||||||
|
|
||||||
|
log.debug(f"payload: {payload}")
|
||||||
r = requests.post(url, json=payload)
|
r = requests.post(url, json=payload)
|
||||||
r.raise_for_status()
|
r.raise_for_status()
|
||||||
|
log.debug(f"r.text: {r.text}")
|
||||||
return True
|
return True
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(e)
|
log.exception(e)
|
||||||
return False
|
return False
|
||||||
|
@ -13,6 +13,7 @@
|
|||||||
"lint:types": "npm run check",
|
"lint:types": "npm run check",
|
||||||
"lint:backend": "pylint backend/",
|
"lint:backend": "pylint backend/",
|
||||||
"format": "prettier --plugin-search-dir --write '**/*.{js,ts,svelte,css,md,html,json}'",
|
"format": "prettier --plugin-search-dir --write '**/*.{js,ts,svelte,css,md,html,json}'",
|
||||||
|
"format:backend": "black . --exclude \"/venv/\"",
|
||||||
"i18n:parse": "i18next --config i18next-parser.config.ts && prettier --write 'src/lib/i18n/**/*.{js,json}'"
|
"i18n:parse": "i18next --config i18next-parser.config.ts && prettier --write 'src/lib/i18n/**/*.{js,json}'"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
|
@ -78,3 +78,7 @@ select {
|
|||||||
/* for Chrome */
|
/* for Chrome */
|
||||||
-webkit-appearance: none;
|
-webkit-appearance: none;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.katex-mathml {
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
@ -520,11 +520,6 @@
|
|||||||
const sendPromptOpenAI = async (model, userPrompt, responseMessageId, _chatId) => {
|
const sendPromptOpenAI = async (model, userPrompt, responseMessageId, _chatId) => {
|
||||||
const responseMessage = history.messages[responseMessageId];
|
const responseMessage = history.messages[responseMessageId];
|
||||||
|
|
||||||
// Wait until history/message have been updated
|
|
||||||
await tick();
|
|
||||||
|
|
||||||
scrollToBottom();
|
|
||||||
|
|
||||||
const docs = messages
|
const docs = messages
|
||||||
.filter((message) => message?.files ?? null)
|
.filter((message) => message?.files ?? null)
|
||||||
.map((message) =>
|
.map((message) =>
|
||||||
@ -593,6 +588,11 @@
|
|||||||
: `${OPENAI_API_BASE_URL}`
|
: `${OPENAI_API_BASE_URL}`
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Wait until history/message have been updated
|
||||||
|
await tick();
|
||||||
|
|
||||||
|
scrollToBottom();
|
||||||
|
|
||||||
if (res && res.ok) {
|
if (res && res.ok) {
|
||||||
const reader = res.body
|
const reader = res.body
|
||||||
.pipeThrough(new TextDecoderStream())
|
.pipeThrough(new TextDecoderStream())
|
||||||
|
@ -536,11 +536,6 @@
|
|||||||
const sendPromptOpenAI = async (model, userPrompt, responseMessageId, _chatId) => {
|
const sendPromptOpenAI = async (model, userPrompt, responseMessageId, _chatId) => {
|
||||||
const responseMessage = history.messages[responseMessageId];
|
const responseMessage = history.messages[responseMessageId];
|
||||||
|
|
||||||
// Wait until history/message have been updated
|
|
||||||
await tick();
|
|
||||||
|
|
||||||
scrollToBottom();
|
|
||||||
|
|
||||||
const docs = messages
|
const docs = messages
|
||||||
.filter((message) => message?.files ?? null)
|
.filter((message) => message?.files ?? null)
|
||||||
.map((message) =>
|
.map((message) =>
|
||||||
@ -607,6 +602,11 @@
|
|||||||
: `${OPENAI_API_BASE_URL}`
|
: `${OPENAI_API_BASE_URL}`
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Wait until history/message have been updated
|
||||||
|
await tick();
|
||||||
|
|
||||||
|
scrollToBottom();
|
||||||
|
|
||||||
if (res && res.ok) {
|
if (res && res.ok) {
|
||||||
const reader = res.body
|
const reader = res.body
|
||||||
.pipeThrough(new TextDecoderStream())
|
.pipeThrough(new TextDecoderStream())
|
||||||
|
Loading…
Reference in New Issue
Block a user