Merge pull request #7274 from DmitriyAlergant-T1A/fix/logging_cleanup
Some checks failed
Deploy to HuggingFace Spaces / check-secret (push) Successful in 21s
Create and publish Docker images with specific build args / build-main-image (linux/amd64) (push) Failing after 3m6s
Create and publish Docker images with specific build args / build-main-image (linux/arm64) (push) Failing after 3m36s
Create and publish Docker images with specific build args / merge-main-images (push) Has been skipped
Create and publish Docker images with specific build args / build-cuda-image (linux/amd64) (push) Failing after 4m2s
Create and publish Docker images with specific build args / build-cuda-image (linux/arm64) (push) Failing after 4m26s
Create and publish Docker images with specific build args / merge-cuda-images (push) Has been skipped
Create and publish Docker images with specific build args / build-ollama-image (linux/amd64) (push) Failing after 4m43s
Create and publish Docker images with specific build args / build-ollama-image (linux/arm64) (push) Failing after 3m59s
Create and publish Docker images with specific build args / merge-ollama-images (push) Has been skipped
Python CI / Format Backend (3.11) (push) Failing after 2m8s
Frontend Build / Format & Build Frontend (push) Failing after 2m1s
Frontend Build / Frontend Unit Tests (push) Failing after 1m38s
Integration Test / Run Cypress Integration Tests (push) Failing after 5m13s
Integration Test / Run Migration Tests (push) Failing after 4m19s
Deploy to HuggingFace Spaces / deploy (push) Has been skipped

Fix: logging cleanup
This commit is contained in:
Timothy Jaeryang Baek 2024-11-22 20:16:47 -08:00 committed by GitHub
commit 8744a12abb
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 61 additions and 40 deletions

View File

@ -585,8 +585,6 @@ async def generate_chat_completion(
# Convert the modified body back to JSON
payload = json.dumps(payload)
log.debug(payload)
headers = {}
headers["Authorization"] = f"Bearer {key}"
headers["Content-Type"] = "application/json"

View File

@ -68,6 +68,7 @@ from open_webui.config import (
)
from open_webui.env import (
ENV,
SRC_LOG_LEVELS,
WEBUI_AUTH_TRUSTED_EMAIL_HEADER,
WEBUI_AUTH_TRUSTED_NAME_HEADER,
)
@ -94,6 +95,7 @@ app = FastAPI(
)
log = logging.getLogger(__name__)
log.setLevel(SRC_LOG_LEVELS["MAIN"])
app.state.config = AppConfig()
@ -270,7 +272,9 @@ async def get_pipe_models():
log.exception(e)
sub_pipes = []
print(sub_pipes)
log.debug(
f"get_pipe_models: function '{pipe.id}' is a manifold of {sub_pipes}"
)
for p in sub_pipes:
sub_pipe_id = f'{pipe.id}.{p["id"]}'
@ -280,6 +284,7 @@ async def get_pipe_models():
sub_pipe_name = f"{function_module.name}{sub_pipe_name}"
pipe_flag = {"type": pipe.type}
pipe_models.append(
{
"id": sub_pipe_id,
@ -293,6 +298,10 @@ async def get_pipe_models():
else:
pipe_flag = {"type": "pipe"}
log.debug(
f"get_pipe_models: function '{pipe.id}' is a single pipe {{ 'id': {pipe.id}, 'name': {pipe.name} }}"
)
pipe_models.append(
{
"id": pipe.id,
@ -346,7 +355,7 @@ def get_pipe_id(form_data: dict) -> str:
pipe_id = form_data["model"]
if "." in pipe_id:
pipe_id, _ = pipe_id.split(".", 1)
print(pipe_id)
return pipe_id
@ -453,7 +462,7 @@ async def generate_function_chat_completion(form_data, user, models: dict = {}):
return
except Exception as e:
print(f"Error: {e}")
log.error(f"Error: {e}")
yield f"data: {json.dumps({'error': {'detail':str(e)}})}\n\n"
return
@ -483,7 +492,7 @@ async def generate_function_chat_completion(form_data, user, models: dict = {}):
res = await execute_pipe(pipe, params)
except Exception as e:
print(f"Error: {e}")
log.error(f"Error: {e}")
return {"error": {"detail": str(e)}}
if isinstance(res, StreamingResponse) or isinstance(res, dict):

View File

@ -5,10 +5,15 @@ import sys
from importlib import util
import types
import tempfile
import logging
from open_webui.env import SRC_LOG_LEVELS
from open_webui.apps.webui.models.functions import Functions
from open_webui.apps.webui.models.tools import Tools
log = logging.getLogger(__name__)
log.setLevel(SRC_LOG_LEVELS["MAIN"])
def extract_frontmatter(content):
"""
@ -95,7 +100,7 @@ def load_tools_module_by_id(toolkit_id, content=None):
# Executing the modified content in the created module's namespace
exec(content, module.__dict__)
frontmatter = extract_frontmatter(content)
print(f"Loaded module: {module.__name__}")
log.info(f"Loaded module: {module.__name__}")
# Create and return the object if the class 'Tools' is found in the module
if hasattr(module, "Tools"):
@ -103,7 +108,7 @@ def load_tools_module_by_id(toolkit_id, content=None):
else:
raise Exception("No Tools class found in the module")
except Exception as e:
print(f"Error loading module: {toolkit_id}: {e}")
log.error(f"Error loading module: {toolkit_id}: {e}")
del sys.modules[module_name] # Clean up
raise e
finally:
@ -139,7 +144,7 @@ def load_function_module_by_id(function_id, content=None):
# Execute the modified content in the created module's namespace
exec(content, module.__dict__)
frontmatter = extract_frontmatter(content)
print(f"Loaded module: {module.__name__}")
log.info(f"Loaded module: {module.__name__}")
# Create appropriate object based on available class type in the module
if hasattr(module, "Pipe"):
@ -151,7 +156,7 @@ def load_function_module_by_id(function_id, content=None):
else:
raise Exception("No Function class found in the module")
except Exception as e:
print(f"Error loading module: {function_id}: {e}")
log.error(f"Error loading module: {function_id}: {e}")
del sys.modules[module_name] # Cleanup by removing the module in case of error
Functions.update_function_by_id(function_id, {"is_active": False})
@ -164,7 +169,7 @@ def install_frontmatter_requirements(requirements):
if requirements:
req_list = [req.strip() for req in requirements.split(",")]
for req in req_list:
print(f"Installing requirement: {req}")
log.info(f"Installing requirement: {req}")
subprocess.check_call([sys.executable, "-m", "pip", "install", req])
else:
print("No requirements found in frontmatter.")
log.info("No requirements found in frontmatter.")

View File

@ -539,8 +539,6 @@ async def chat_completion_files_handler(
if len(queries) == 0:
queries = [get_last_user_message(body["messages"])]
print(f"{queries=}")
sources = get_sources_from_files(
files=files,
queries=queries,
@ -970,7 +968,7 @@ app.add_middleware(SecurityHeadersMiddleware)
@app.middleware("http")
async def commit_session_after_request(request: Request, call_next):
response = await call_next(request)
log.debug("Commit session after request")
# log.debug("Commit session after request")
Session.commit()
return response
@ -1177,6 +1175,8 @@ async def get_all_models():
model["actions"].extend(
get_action_items_from_module(action_function, function_module)
)
log.debug(f"get_all_models() returned {len(models)} models")
return models
@ -1214,6 +1214,10 @@ async def get_models(user=Depends(get_verified_user)):
filtered_models.append(model)
models = filtered_models
log.debug(
f"/api/models returned filtered models accessible to the user: {json.dumps([model['id'] for model in models])}"
)
return {"data": models}
@ -1704,7 +1708,6 @@ async def update_task_config(form_data: TaskConfigForm, user=Depends(get_admin_u
@app.post("/api/task/title/completions")
async def generate_title(form_data: dict, user=Depends(get_verified_user)):
print("generate_title")
model_list = await get_all_models()
models = {model["id"]: model for model in model_list}
@ -1725,9 +1728,9 @@ async def generate_title(form_data: dict, user=Depends(get_verified_user)):
models,
)
print(task_model_id)
model = models[task_model_id]
log.debug(
f"generating chat title using model {task_model_id} for user {user.email} "
)
if app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE != "":
template = app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE
@ -1766,10 +1769,12 @@ Artificial Intelligence in Healthcare
"max_completion_tokens": 50,
}
),
"chat_id": form_data.get("chat_id", None),
"metadata": {"task": str(TASKS.TITLE_GENERATION), "task_body": form_data},
"metadata": {
"task": str(TASKS.TITLE_GENERATION),
"task_body": form_data,
"chat_id": form_data.get("chat_id", None),
},
}
log.debug(payload)
# Handle pipeline filters
try:
@ -1793,7 +1798,7 @@ Artificial Intelligence in Healthcare
@app.post("/api/task/tags/completions")
async def generate_chat_tags(form_data: dict, user=Depends(get_verified_user)):
print("generate_chat_tags")
if not app.state.config.ENABLE_TAGS_GENERATION:
return JSONResponse(
status_code=status.HTTP_200_OK,
@ -1818,7 +1823,10 @@ async def generate_chat_tags(form_data: dict, user=Depends(get_verified_user)):
app.state.config.TASK_MODEL_EXTERNAL,
models,
)
print(task_model_id)
log.debug(
f"generating chat tags using model {task_model_id} for user {user.email} "
)
if app.state.config.TAGS_GENERATION_PROMPT_TEMPLATE != "":
template = app.state.config.TAGS_GENERATION_PROMPT_TEMPLATE
@ -1849,9 +1857,12 @@ JSON format: { "tags": ["tag1", "tag2", "tag3"] }
"model": task_model_id,
"messages": [{"role": "user", "content": content}],
"stream": False,
"metadata": {"task": str(TASKS.TAGS_GENERATION), "task_body": form_data},
"metadata": {
"task": str(TASKS.TAGS_GENERATION),
"task_body": form_data,
"chat_id": form_data.get("chat_id", None),
},
}
log.debug(payload)
# Handle pipeline filters
try:
@ -1875,7 +1886,7 @@ JSON format: { "tags": ["tag1", "tag2", "tag3"] }
@app.post("/api/task/queries/completions")
async def generate_queries(form_data: dict, user=Depends(get_verified_user)):
print("generate_queries")
type = form_data.get("type")
if type == "web_search":
if not app.state.config.ENABLE_SEARCH_QUERY_GENERATION:
@ -1908,9 +1919,10 @@ async def generate_queries(form_data: dict, user=Depends(get_verified_user)):
app.state.config.TASK_MODEL_EXTERNAL,
models,
)
print(task_model_id)
model = models[task_model_id]
log.debug(
f"generating {type} queries using model {task_model_id} for user {user.email}"
)
if app.state.config.QUERY_GENERATION_PROMPT_TEMPLATE != "":
template = app.state.config.QUERY_GENERATION_PROMPT_TEMPLATE
@ -1925,9 +1937,12 @@ async def generate_queries(form_data: dict, user=Depends(get_verified_user)):
"model": task_model_id,
"messages": [{"role": "user", "content": content}],
"stream": False,
"metadata": {"task": str(TASKS.QUERY_GENERATION), "task_body": form_data},
"metadata": {
"task": str(TASKS.QUERY_GENERATION),
"task_body": form_data,
"chat_id": form_data.get("chat_id", None),
},
}
log.debug(payload)
# Handle pipeline filters
try:
@ -1951,7 +1966,6 @@ async def generate_queries(form_data: dict, user=Depends(get_verified_user)):
@app.post("/api/task/emoji/completions")
async def generate_emoji(form_data: dict, user=Depends(get_verified_user)):
print("generate_emoji")
model_list = await get_all_models()
models = {model["id"]: model for model in model_list}
@ -1971,9 +1985,8 @@ async def generate_emoji(form_data: dict, user=Depends(get_verified_user)):
app.state.config.TASK_MODEL_EXTERNAL,
models,
)
print(task_model_id)
model = models[task_model_id]
log.debug(f"generating emoji using model {task_model_id} for user {user.email} ")
template = '''
Your task is to reflect the speaker's likely facial expression through a fitting emoji. Interpret emotions from the message and reflect their facial expression using fitting, diverse emojis (e.g., 😊, 😢, 😡, 😱).
@ -2003,7 +2016,6 @@ Message: """{{prompt}}"""
"chat_id": form_data.get("chat_id", None),
"metadata": {"task": str(TASKS.EMOJI_GENERATION), "task_body": form_data},
}
log.debug(payload)
# Handle pipeline filters
try:
@ -2027,7 +2039,6 @@ Message: """{{prompt}}"""
@app.post("/api/task/moa/completions")
async def generate_moa_response(form_data: dict, user=Depends(get_verified_user)):
print("generate_moa_response")
model_list = await get_all_models()
models = {model["id"]: model for model in model_list}
@ -2047,9 +2058,8 @@ async def generate_moa_response(form_data: dict, user=Depends(get_verified_user)
app.state.config.TASK_MODEL_EXTERNAL,
models,
)
print(task_model_id)
model = models[task_model_id]
log.debug(f"generating MOA model {task_model_id} for user {user.email} ")
template = """You have been provided with a set of responses from various models to the latest user query: "{{prompt}}"
@ -2073,7 +2083,6 @@ Responses from models: {{responses}}"""
"task_body": form_data,
},
}
log.debug(payload)
try:
payload = filter_pipeline(payload, user, models)
@ -2108,7 +2117,7 @@ Responses from models: {{responses}}"""
async def get_pipelines_list(user=Depends(get_admin_user)):
responses = await get_openai_models_responses()
print(responses)
log.debug(f"get_pipelines_list: get_openai_models_responses returned {responses}")
urlIdxs = [
idx
for idx, response in enumerate(responses)