diff --git a/backend/open_webui/__init__.py b/backend/open_webui/__init__.py index 30e83b198..743656d9e 100644 --- a/backend/open_webui/__init__.py +++ b/backend/open_webui/__init__.py @@ -39,6 +39,18 @@ def serve( "/usr/local/lib/python3.11/site-packages/nvidia/cudnn/lib", ] ) + try: + import torch + assert torch.cuda.is_available(), "CUDA not available" + typer.echo("CUDA seems to be working") + except Exception as e: + typer.echo( + "Error when testing CUDA but USE_CUDA_DOCKER is true. " + "Resetting USE_CUDA_DOCKER to false and removing " + f"LD_LIBRARY_PATH modifications: {e}" + ) + os.environ["USE_CUDA_DOCKER"] = "false" + os.environ["LD_LIBRARY_PATH"] = ":".join(LD_LIBRARY_PATH) import open_webui.main # we need set environment variables before importing main uvicorn.run(open_webui.main.app, host=host, port=port, forwarded_allow_ips="*") diff --git a/backend/open_webui/env.py b/backend/open_webui/env.py index d99a80df4..df5597cbf 100644 --- a/backend/open_webui/env.py +++ b/backend/open_webui/env.py @@ -36,7 +36,18 @@ except ImportError: USE_CUDA = os.environ.get("USE_CUDA_DOCKER", "false") if USE_CUDA.lower() == "true": - DEVICE_TYPE = "cuda" + try: + import torch + assert torch.cuda.is_available(), "CUDA not available" + DEVICE_TYPE = "cuda" + except Exception as e: + cuda_error = ( + "Error when testing CUDA but USE_CUDA_DOCKER is true. " + f"Resetting USE_CUDA_DOCKER to false: {e}" + ) + os.environ["USE_CUDA_DOCKER"] = "false" + USE_CUDA = "false" + DEVICE_TYPE = "cpu" else: DEVICE_TYPE = "cpu" @@ -56,6 +67,9 @@ else: log = logging.getLogger(__name__) log.info(f"GLOBAL_LOG_LEVEL: {GLOBAL_LOG_LEVEL}") +if "cuda_error" in locals(): + log.exception(cuda_error) + log_sources = [ "AUDIO", "COMFYUI",