diff --git a/TROUBLESHOOTING.md b/TROUBLESHOOTING.md index 8e8f89da0..a126eafbc 100644 --- a/TROUBLESHOOTING.md +++ b/TROUBLESHOOTING.md @@ -18,6 +18,10 @@ If you're experiencing connection issues, it’s often due to the WebUI docker c docker run -d --network=host -v open-webui:/app/backend/data -e OLLAMA_BASE_URL=http://127.0.0.1:11434 --name open-webui --restart always ghcr.io/open-webui/open-webui:main ``` +### Error on Slow Reponses for Ollama + +Open WebUI has a default timeout of 15 minutes for Ollama to finish generating the response. If needed, this can be adjusted via the environment variable AIOHTTP_CLIENT_TIMEOUT, which sets the timeout in seconds. + ### General Connection Errors **Ensure Ollama Version is Up-to-Date**: Always start by checking that you have the latest version of Ollama. Visit [Ollama's official site](https://ollama.com/) for the latest updates. diff --git a/backend/apps/ollama/main.py b/backend/apps/ollama/main.py index 1ed034f68..118c688d3 100644 --- a/backend/apps/ollama/main.py +++ b/backend/apps/ollama/main.py @@ -46,6 +46,7 @@ from config import ( SRC_LOG_LEVELS, OLLAMA_BASE_URLS, ENABLE_OLLAMA_API, + AIOHTTP_CLIENT_TIMEOUT, ENABLE_MODEL_FILTER, MODEL_FILTER_LIST, UPLOAD_DIR, @@ -154,7 +155,9 @@ async def cleanup_response( async def post_streaming_url(url: str, payload: str): r = None try: - session = aiohttp.ClientSession(trust_env=True) + session = aiohttp.ClientSession( + trust_env=True, timeout=aiohttp.ClientTimeout(total=AIOHTTP_CLIENT_TIMEOUT) + ) r = await session.post(url, data=payload) r.raise_for_status() diff --git a/backend/config.py b/backend/config.py index 30a23f29e..215c6f849 100644 --- a/backend/config.py +++ b/backend/config.py @@ -425,6 +425,7 @@ OLLAMA_API_BASE_URL = os.environ.get( ) OLLAMA_BASE_URL = os.environ.get("OLLAMA_BASE_URL", "") +AIOHTTP_CLIENT_TIMEOUT = int(os.environ.get("AIOHTTP_CLIENT_TIMEOUT", "900")) K8S_FLAG = os.environ.get("K8S_FLAG", "") USE_OLLAMA_DOCKER = os.environ.get("USE_OLLAMA_DOCKER", "false")