diff --git a/backend/apps/litellm/main.py b/backend/apps/litellm/main.py index a9922aad7..39f348141 100644 --- a/backend/apps/litellm/main.py +++ b/backend/apps/litellm/main.py @@ -1,8 +1,8 @@ +from fastapi import FastAPI, Depends +from fastapi.routing import APIRoute +from fastapi.middleware.cors import CORSMiddleware + import logging - -from litellm.proxy.proxy_server import ProxyConfig, initialize -from litellm.proxy.proxy_server import app - from fastapi import FastAPI, Request, Depends, status, Response from fastapi.responses import JSONResponse @@ -23,24 +23,39 @@ from config import ( ) -proxy_config = ProxyConfig() +import asyncio +import subprocess -async def config(): - router, model_list, general_settings = await proxy_config.load_config( - router=None, config_file_path="./data/litellm/config.yaml" +app = FastAPI() + +origins = ["*"] + +app.add_middleware( + CORSMiddleware, + allow_origins=origins, + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + + +async def run_background_process(command): + process = await asyncio.create_subprocess_exec( + *command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE ) - - await initialize(config="./data/litellm/config.yaml", telemetry=False) + return process -async def startup(): - await config() +async def start_litellm_background(): + # Command to run in the background + command = "litellm --config ./data/litellm/config.yaml" + await run_background_process(command) @app.on_event("startup") -async def on_startup(): - await startup() +async def startup_event(): + asyncio.create_task(start_litellm_background()) app.state.MODEL_FILTER_ENABLED = MODEL_FILTER_ENABLED @@ -63,6 +78,11 @@ async def auth_middleware(request: Request, call_next): return response +@app.get("/") +async def get_status(): + return {"status": True} + + class ModifyModelsResponseMiddleware(BaseHTTPMiddleware): async def dispatch( self, request: Request, call_next: RequestResponseEndpoint @@ -98,3 +118,26 @@ class ModifyModelsResponseMiddleware(BaseHTTPMiddleware): app.add_middleware(ModifyModelsResponseMiddleware) + + +# from litellm.proxy.proxy_server import ProxyConfig, initialize +# from litellm.proxy.proxy_server import app + +# proxy_config = ProxyConfig() + + +# async def config(): +# router, model_list, general_settings = await proxy_config.load_config( +# router=None, config_file_path="./data/litellm/config.yaml" +# ) + +# await initialize(config="./data/litellm/config.yaml", telemetry=False) + + +# async def startup(): +# await config() + + +# @app.on_event("startup") +# async def on_startup(): +# await startup() diff --git a/backend/main.py b/backend/main.py index 8b5fd76bc..b5aa7e7d0 100644 --- a/backend/main.py +++ b/backend/main.py @@ -20,7 +20,7 @@ from starlette.middleware.base import BaseHTTPMiddleware from apps.ollama.main import app as ollama_app from apps.openai.main import app as openai_app -from apps.litellm.main import app as litellm_app, startup as litellm_app_startup +from apps.litellm.main import app as litellm_app from apps.audio.main import app as audio_app from apps.images.main import app as images_app from apps.rag.main import app as rag_app @@ -168,11 +168,6 @@ async def check_url(request: Request, call_next): return response -@app.on_event("startup") -async def on_startup(): - await litellm_app_startup() - - app.mount("/api/v1", webui_app) app.mount("/litellm/api", litellm_app)