diff --git a/backend/apps/litellm/main.py b/backend/apps/litellm/main.py index 947456881..5a8b37f47 100644 --- a/backend/apps/litellm/main.py +++ b/backend/apps/litellm/main.py @@ -43,20 +43,29 @@ app.add_middleware( async def run_background_process(command): + # Start the process process = await asyncio.create_subprocess_exec( *command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE ) - return process + # Read output asynchronously + async for line in process.stdout: + print(line.decode().strip()) # Print stdout line by line + + await process.wait() # Wait for the subprocess to finish async def start_litellm_background(): + print("start_litellm_background") # Command to run in the background command = "litellm --telemetry False --config ./data/litellm/config.yaml" + await run_background_process(command) @app.on_event("startup") async def startup_event(): + + print("startup_event") # TODO: Check config.yaml file and create one asyncio.create_task(start_litellm_background()) diff --git a/backend/main.py b/backend/main.py index b5aa7e7d0..48e14f1dd 100644 --- a/backend/main.py +++ b/backend/main.py @@ -20,12 +20,13 @@ from starlette.middleware.base import BaseHTTPMiddleware from apps.ollama.main import app as ollama_app from apps.openai.main import app as openai_app -from apps.litellm.main import app as litellm_app +from apps.litellm.main import app as litellm_app, start_litellm_background from apps.audio.main import app as audio_app from apps.images.main import app as images_app from apps.rag.main import app as rag_app from apps.web.main import app as webui_app +import asyncio from pydantic import BaseModel from typing import List @@ -168,6 +169,11 @@ async def check_url(request: Request, call_next): return response +@app.on_event("startup") +async def on_startup(): + asyncio.create_task(start_litellm_background()) + + app.mount("/api/v1", webui_app) app.mount("/litellm/api", litellm_app)