From 49e570d1f2165a63a57e12429184abe83740f6b5 Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Wed, 30 Oct 2024 20:41:01 +0000
Subject: [PATCH 01/42] fix: Apply Prettier formatting to docusaurus.config.ts
 to fix CI

---
 docusaurus.config.ts | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/docusaurus.config.ts b/docusaurus.config.ts
index f1d2740..ad581fe 100644
--- a/docusaurus.config.ts
+++ b/docusaurus.config.ts
@@ -46,7 +46,7 @@ const config: Config = {
 					// Please change this to your repo.
 					// Remove this to remove the "edit this page" links.
 					editUrl: "https://github.com/open-webui/docs/blob/main",
-					exclude: ['**/tab-**/**'],
+					exclude: ["**/tab-**/**"],
 				},
 				// blog: false,
 				blog: {

From 99337223336bc255f78e481e31e3cf124c0376b9 Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 19:48:06 +0000
Subject: [PATCH 02/42] Add Development.md changes from feature-nginx-combined

---
 .../advanced-topics/Development.md            | 201 ++++++++++++++++++
 1 file changed, 201 insertions(+)
 create mode 100644 docs/getting-started/advanced-topics/Development.md

diff --git a/docs/getting-started/advanced-topics/Development.md b/docs/getting-started/advanced-topics/Development.md
new file mode 100644
index 0000000..2ef5ff1
--- /dev/null
+++ b/docs/getting-started/advanced-topics/Development.md
@@ -0,0 +1,201 @@
+---
+sidebar_position: 5
+title: "๐Ÿ› ๏ธ Development Guide"
+---
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+import { TopBanners } from "@site/src/components/TopBanners";
+
+<TopBanners />
+
+# ๐Ÿ› ๏ธ Development Setup Guide
+
+Welcome to the **Open WebUI Development Setup Guide!** Whether you're a novice or an experienced developer, this guide will help you set up a **local development environment** for both the frontend and backend components. Letโ€™s dive in! ๐Ÿš€
+
+## System Requirements
+
+- **Operating System**: Linux (or WSL on Windows) or macOS  
+- **Python Version**: Python 3.11+  
+- **Node.js Version**: 20.10+
+
+## Development Methods
+
+<Tabs groupId="dev-setup">
+
+<TabItem value="local" label="Local Setup">
+
+### ๐Ÿง Local Development Setup
+
+1. **Clone the Repository**:
+   ```bash
+   git clone https://github.com/open-webui/open-webui.git
+   cd open-webui
+   ```
+
+2. **Frontend Setup**:
+   - Create a `.env` file:
+     ```bash
+     cp -RPp .env.example .env
+     ```
+   - Install dependencies:
+     ```bash
+     npm install
+     ```
+   - Start the frontend server:
+     ```bash
+     npm run dev
+     ```
+     ๐ŸŒ Available at: [http://localhost:5173](http://localhost:5173).
+
+3. **Backend Setup**:
+   - Navigate to the backend:
+     ```bash
+     cd backend
+     ```
+   - Use **Conda** for environment setup:
+     ```bash
+     conda create --name open-webui python=3.11
+     conda activate open-webui
+     ```
+   - Install dependencies:
+     ```bash
+     pip install -r requirements.txt -U
+     ```
+   - Start the backend:
+     ```bash
+     sh dev.sh
+     ```
+     ๐Ÿ“„ API docs available at: [http://localhost:8080/docs](http://localhost:8080/docs).
+
+</TabItem>
+
+<TabItem value="docker" label="Docker Setup">
+
+### ๐Ÿณ Docker-Based Development Setup
+
+1. **Create the Docker Compose File**:
+   ```yaml
+   name: open-webui-dev
+
+   services:
+     frontend:
+       build:
+         context: .
+         target: build
+       command: ["npm", "run", "dev"]
+       depends_on:
+         - backend
+       ports:
+         - "3000:5173"
+       extra_hosts:
+         - host.docker.internal:host-gateway
+       volumes:
+         - ./src:/app/src
+
+     backend:
+       build:
+         context: .
+         target: base
+       command: ["bash", "dev.sh"]
+       env_file: ".env"
+       environment:
+         - ENV=dev
+         - WEBUI_AUTH=False
+       ports:
+         - "8080:8080"
+       extra_hosts:
+         - host.docker.internal:host-gateway
+       volumes:
+         - ./backend:/app/backend
+         - data:/app/backend/data
+
+   volumes:
+     data: {}
+   ```
+
+2. **Start the Development Containers**:
+   ```bash
+   docker compose -f compose-dev.yaml up --watch
+   ```
+
+3. **Stop the Containers**:
+   ```bash
+   docker compose -f compose-dev.yaml down
+   ```
+
+</TabItem>
+<TabItem value="conda" label="Optional Conda Setup">
+
+### Conda Environment Setup
+
+If you prefer using **Conda** for isolation:
+
+1. **Create and Activate the Environment**:
+   ```bash
+   conda create --name open-webui-dev python=3.11
+   conda activate open-webui-dev
+   ```
+
+2. **Install Dependencies**:
+   ```bash
+   pip install -r requirements.txt
+   ```
+
+3. **Run the Servers**:
+   - Frontend:
+     ```bash
+     npm run dev
+     ```
+   - Backend:
+     ```bash
+     sh dev.sh
+     ```
+
+</TabItem>
+
+<TabItem value="troubleshooting" label="Troubleshooting">
+
+## ๐Ÿ› Troubleshooting
+
+### **FATAL ERROR: Reached Heap Limit**
+
+If you encounter memory-related errors during the build, increase the **Node.js heap size**:
+
+1. **Modify Dockerfile**:
+   ```dockerfile
+   ENV NODE_OPTIONS=--max-old-space-size=4096
+   ```
+
+2. **Allocate at least 4 GB of RAM** to Node.js.
+
+---
+
+### **Other Issues**
+
+- **Port Conflicts**:  
+   Ensure that no other processes are using **ports 8080 or 5173**.
+
+- **Hot Reload Not Working**:  
+   Verify that **watch mode** is enabled for both frontend and backend.
+
+</TabItem>
+
+</Tabs>
+
+## Contributing to Open WebUI
+
+### Local Workflow
+
+1. **Commit Changes Regularly** to track progress.
+2. **Sync with the Main Branch** to avoid conflicts:
+   ```bash
+   git pull origin main
+   ```
+
+3. **Run Tests Before Pushing**:
+   ```bash
+   npm run test
+   ```
+
+Happy coding! ๐ŸŽ‰

From 24e2e61e55b2b2ed8ce7b9c63a3cd3611fdb0d16 Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 19:50:37 +0000
Subject: [PATCH 03/42] Add EnvConfig.md changes from feature-nginx-combined

---
 .../advanced-topics/EnvConfig.md              | 1146 +++++++++++++++++
 1 file changed, 1146 insertions(+)
 create mode 100644 docs/getting-started/advanced-topics/EnvConfig.md

diff --git a/docs/getting-started/advanced-topics/EnvConfig.md b/docs/getting-started/advanced-topics/EnvConfig.md
new file mode 100644
index 0000000..e27bafe
--- /dev/null
+++ b/docs/getting-started/advanced-topics/EnvConfig.md
@@ -0,0 +1,1146 @@
+---
+sidebar_position: 4
+title: "๐ŸŒ Environment Variables"
+---
+
+## Overview
+
+Open WebUI provides a range of environment variables that allow you to customize and configure 
+various aspects of the application. This page serves as a comprehensive reference for all available 
+environment variables, including their types, default values, and descriptions.
+
+:::info
+Last updated: v0.3.20
+:::
+
+## App/Backend
+
+The following environment variables are used by `backend/config.py` to provide Open WebUI startup 
+configuration. Please note that some variables may have different default values depending on 
+whether you're running Open WebUI directly or via Docker. For more information on logging 
+environment variables, see our [logging documentation](Logging#appbackend).
+
+### General
+
+#### `ENV`
+
+- Type: `str` (enum: `dev`, `prod`)
+- Options:
+  - `dev` - Enables the FastAPI API docs on `/docs`
+  - `prod` - Automatically configures several environment variables
+- Default:
+  - **Backend Default**: `dev`
+  - **Docker Default**: `prod`
+- Description: Environment setting.
+
+#### `WEBUI_AUTH`
+
+- Type: `bool`
+- Default Setting: `True`
+- Description: This setting enables or disables authentication.
+
+:::danger
+If set to `False`, authentication will be disabled for your Open WebUI instance. However, it's 
+important to note that turning off authentication is only possible for fresh installations without 
+any existing users. If there are already users registered, you cannot disable authentication 
+directly. Ensure that no users are present in the database, if you intend to turn off `WEBUI_AUTH`.
+:::
+
+#### `WEBUI_NAME`
+
+- Type: `str`
+- Default: `Open WebUI`
+- Description: Sets the main WebUI name. Appends `(Open WebUI)` if overridden.
+
+#### `WEBUI_URL`
+
+- Type: `str`
+- Default: `http://localhost:3000`
+- Description: Specifies the URL where the Open WebUI is reachable. Currently used for search engine support.
+
+#### `AIOHTTP_CLIENT_TIMEOUT`
+
+- Type: `int`
+- Default: `300`
+- Description: Specifies the timeout duration in seconds for the aiohttp client.
+
+:::info
+This is the maximum amount of time the client will wait for a response before timing out.
+If set to an empty string (' '), the timeout will be set to `None`, effectively disabling the timeout and 
+allowing the client to wait indefinitely.
+:::
+
+#### `DATA_DIR`
+
+- Type: `str`
+- Default: `./data`
+- Description: Specifies the base directory for data storage, including uploads, cache, vector database, etc.
+
+#### `FRONTEND_BUILD_DIR`
+
+- Type: `str`
+- Default: `../build`
+- Description: Specifies the location of the built frontend files.
+
+#### `STATIC_DIR`
+
+- Type: `str`
+- Default: `./static`
+- Description: Specifies the directory for static files, such as the favicon.
+
+#### `CUSTOM_NAME`
+
+- Type: `str`
+- Description: Sets `WEBUI_NAME` but polls **api.openwebui.com** for metadata.
+
+#### `ENABLE_SIGNUP`
+
+- Type: `bool`
+- Default: `True`
+- Description: Toggles user account creation.
+
+#### `ENABLE_LOGIN_FORM`
+
+- Type: `bool`
+- Default: `True`
+- Description: Toggles email, password, sign in and "or" (only when `ENABLE_OAUTH_SIGNUP` is set to True) elements.
+
+:::danger
+This should **only** ever be set to `False` when [ENABLE_OAUTH_SIGNUP](EnvConfig) 
+is also being used and set to `True`. Failure to do so will result in the inability to login.
+:::
+
+#### `ENABLE_RAG_WEB_LOADER_SSL_VERIFICATION`
+
+- Type: `bool`
+- Default: `True`
+- Description: Bypass SSL Verification for RAG on Websites.
+
+#### `DEFAULT_MODELS`
+
+- Type: `str`
+- Description: Sets a default Language Model.
+
+#### `DEFAULT_USER_ROLE`
+
+- Type: `str` (enum: `pending`, `user`, `admin`)
+- Options:
+  - `pending` - New users are pending until their accounts are manually activated by an admin.
+  - `user` - New users are automatically activated with regular user permissions.
+  - `admin` - New users are automatically activated with administrator permissions.
+- Default: `pending`
+- Description: Sets the default role assigned to new users.
+
+#### `USER_PERMISSIONS_CHAT_DELETION`
+
+- Type: `bool`
+- Default: `True`
+- Description: Toggles user permission to delete chats.
+
+#### `USER_PERMISSIONS_CHAT_EDITING`
+
+- Type: `bool`
+- Default: `True`
+- Description: Toggles user permission to edit chats.
+
+#### `USER_PERMISSIONS_CHAT_TEMPORARY`
+
+- Type: `bool`
+- Default: `True`
+- Description: Toggles user permission to create temporary chats.
+
+#### `ENABLE_MODEL_FILTER`
+
+- Type: `bool`
+- Default: `False`
+- Description: Toggles Language Model filtering.
+
+#### `MODEL_FILTER_LIST`
+
+- Type: `str`
+- Description: Sets the Language Model filter list, semicolon-separated
+- Example: `llama3.1:instruct;gemma2:latest`
+
+#### `WEBHOOK_URL`
+
+- Type: `str`
+- Description: Sets a webhook for integration with Slack/Microsoft Teams.
+
+#### `ENABLE_ADMIN_EXPORT`
+
+- Type: `bool`
+- Default: `True`
+- Description: Controls whether admin users can export data.
+
+#### `ENABLE_ADMIN_CHAT_ACCESS`
+
+- Type: `bool`
+- Default: `True`
+- Description: Enables admin users to access all chats.
+
+#### `ENABLE_COMMUNITY_SHARING`
+
+- Type: `bool`
+- Default: `True`
+- Description: Controls whether users are shown the share to community button.
+
+#### `ENABLE_MESSAGE_RATING`
+
+- Type: `bool`
+- Default: `True`
+- Description: Enables message rating feature.
+
+#### `WEBUI_BUILD_HASH`
+
+- Type: `str`
+- Default: `dev-build`
+- Description: Used for identifying the Git SHA of the build for releases.
+
+#### `WEBUI_BANNERS`
+
+- Type: `list` of `dict`
+- Default: `[]`
+- Description: List of banners to show to users. Format of banners are:
+
+```json
+[{"id": "string","type": "string [info, success, warning, error]","title": "string","content": "string","dismissible": False,"timestamp": 1000}]
+```
+
+#### `WEBUI_AUTH_TRUSTED_EMAIL_HEADER`
+
+- Type: `str`
+- Description: Defines the trusted request header for authentication. See [SSO docs](/tutorials/features/sso).
+
+#### `WEBUI_AUTH_TRUSTED_NAME_HEADER`
+
+- Type: `str`
+- Description: Defines the trusted request header for the username of anyone registering with the 
+`WEBUI_AUTH_TRUSTED_EMAIL_HEADER` header. See [SSO docs](/tutorials/features/sso).
+
+#### `WEBUI_SECRET_KEY`
+
+- Type: `str`
+- Default: `t0p-s3cr3t`
+- Docker Default: Randomly generated on first start
+- Description: Overrides the randomly generated string used for JSON Web Token.
+
+#### `JWT_EXPIRES_IN`
+
+- Type: `int`
+- Default: `-1`
+- Description: Sets the JWT expiration time in seconds. A value of -1 disables expiration.
+
+#### `USE_CUDA_DOCKER`
+
+- Type: `bool`
+- Default: `False`
+- Description: Builds the Docker image with NVIDIA CUDA support. Enables GPU acceleration 
+for local Whisper and embeddings.
+
+#### `DATABASE_URL`
+
+- Type: `str`
+- Default: `sqlite:///${DATA_DIR}/webui.db`
+- Description: Specifies the database URL to connect to.
+
+:::info
+Supports SQLite and Postgres. Changing the URL does not migrate data between databases.
+Documentation on URL scheme available [here](https://docs.sqlalchemy.org/en/20/core/engines.html#database-urls).
+:::
+
+#### `DATABASE_POOL_SIZE`
+
+- Type: `int`
+- Default: `0`
+- Description: Specifies the size of the database pool. A value of `0` disables pooling. 
+
+#### `DATABASE_POOL_MAX_OVERFLOW`
+
+- Type: `int`
+- Default: `0`
+- Description: Specifies the database pool max overflow.
+
+:::info
+More information about this setting can be found [here](https://docs.sqlalchemy.org/en/20/core/pooling.html#sqlalchemy.pool.QueuePool.params.max_overflow).
+:::
+
+#### `DATABASE_POOL_TIMEOUT`
+
+- Type: `int`
+- Default: `30`
+- Description: Specifies the database pool timeout in seconds to get a connection.
+
+:::info
+More information about this setting can be found [here](https://docs.sqlalchemy.org/en/20/core/pooling.html#sqlalchemy.pool.QueuePool.params.timeout).
+:::
+
+#### `DATABASE_POOL_RECYCLE`
+
+- Type: `int`
+- Default: `3600`
+- Description: Specifies the database pool recycle time in seconds.
+
+:::info
+More information about this setting can be found [here](https://docs.sqlalchemy.org/en/20/core/pooling.html#setting-pool-recycle).
+:::
+
+#### `PORT`
+
+- Type: `int`
+- Default: `8080`
+- Description: Sets the port to run Open WebUI from.
+
+#### `RESET_CONFIG_ON_START`
+
+- Type: `bool`
+- Default: `False`
+- Description: Resets the `config.json` file on startup.
+
+#### `DEFAULT_LOCALE`
+
+- Type: `str`
+- Default: `en`
+- Description: Sets the default locale for the application.
+
+#### `FUNCTIONS_DIR`
+
+- Type: `str`
+- Default: `./functions`
+- Description: Specifies the directory for custom functions.
+
+#### `SHOW_ADMIN_DETAILS`
+
+- Type: `bool`
+- Default: `True`
+- Description: Toggles whether to show admin user details in the interface.
+
+#### `ADMIN_EMAIL`
+
+- Type: `str`
+- Description: Sets the admin email shown by `SHOW_ADMIN_DETAILS`
+
+#### `SAFE_MODE`
+
+- Type: `bool`
+- Default: `False`
+- Description: Enables safe mode, which disables potentially unsafe features.
+
+#### `WEBUI_SESSION_COOKIE_SAME_SITE`
+
+- Type: `str` (enum: `lax`, `strict`, `none`)
+- Options:
+  - `lax` - Sets the `SameSite` attribute to lax, allowing session cookies to be sent with 
+requests initiated by third-party websites.
+  - `strict` - Sets the `SameSite` attribute to strict, blocking session cookies from being sent 
+with requests initiated by third-party websites.
+  - `none` - Sets the `SameSite` attribute to none, allowing session cookies to be sent with 
+requests initiated by third-party websites, but only over HTTPS.
+- Default: `lax`
+- Description: Sets the `SameSite` attribute for session cookies.
+
+#### `WEBUI_SESSION_COOKIE_SECURE`
+
+- Type: `bool`
+- Default: `False`
+- Description: Sets the `Secure` attribute for session cookies if set to `True`.
+
+#### `AIOHTTP_CLIENT_TIMEOUT`
+
+- Type: `int`
+- Description: Sets the timeout in seconds for internal aiohttp connections. This impacts things 
+such as connections to Ollama and OpenAI endpoints.
+
+#### `FONTS_DIR`
+
+- Type: `str`
+- Description: Specifies the directory for fonts.
+
+### Ollama
+
+#### `ENABLE_OLLAMA_API`
+
+- Type: `bool`
+- Default: `True`
+- Description: Enables the use of Ollama APIs.
+
+#### `OLLAMA_BASE_URL`
+
+- Type: `str`
+- Default: `http://localhost:11434`
+- Docker Default:
+  - If `K8S_FLAG` is set: `http://ollama-service.open-webui.svc.cluster.local:11434`
+  - If `USE_OLLAMA_DOCKER=True`: `http://localhost:11434`
+  - Else `http://host.docker.internal:11434`
+- Description: Configures the Ollama backend URL.
+
+#### `OLLAMA_BASE_URLS`
+
+- Type: `str`
+- Description: Configures load-balanced Ollama backend hosts, separated by `;`. See 
+[`OLLAMA_BASE_URL`](#ollama_base_url). Takes precedence over[`OLLAMA_BASE_URL`](#ollama_base_url).
+
+#### `USE_OLLAMA_DOCKER`
+
+- Type: `bool`
+- Default: `False`
+- Description: Builds the Docker image with a bundled Ollama instance.
+
+#### `K8S_FLAG`
+
+- Type: `bool`
+- Description: If set, assumes Helm chart deployment and sets [`OLLAMA_BASE_URL`](#ollama_base_url) to `http://ollama-service.open-webui.svc.cluster.local:11434`
+
+### OpenAI
+
+#### `ENABLE_OPENAI_API`
+
+- Type: `bool`
+- Default: `True`
+- Description: Enables the use of OpenAI APIs.
+
+#### `OPENAI_API_BASE_URL`
+
+- Type: `str`
+- Default: `https://api.openai.com/v1`
+- Description: Configures the OpenAI base API URL.
+
+#### `OPENAI_API_BASE_URLS`
+
+- Type: `str`
+- Description: Supports balanced OpenAI base API URLs, semicolon-separated.
+- Example: `http://host-one:11434;http://host-two:11434`
+
+#### `OPENAI_API_KEY`
+
+- Type: `str`
+- Description: Sets the OpenAI API key.
+
+#### `OPENAI_API_KEYS`
+
+- Type: `str`
+- Description: Supports multiple OpenAI API keys, semicolon-separated.
+- Example: `sk-124781258123;sk-4389759834759834`
+
+### Tasks
+
+#### `TASK_MODEL`
+
+- Type: `str`
+- Description: The default model to use for tasks such as title and web search query generation 
+when using Ollama models.
+
+#### `TASK_MODEL_EXTERNAL`
+
+- Type: `str`
+- Description: The default model to use for tasks such as title and web search query generation 
+when using OpenAI-compatible endpoints.
+
+#### `TITLE_GENERATION_PROMPT_TEMPLATE`
+
+- Type: `str`
+- Description: Prompt to use when generating chat titles.
+- Default:
+
+```
+Create a concise, 3-5 word title with an emoji as a title for the prompt in the given language. Suitable Emojis for the summary can be used to enhance understanding but avoid quotation marks or special formatting. RESPOND ONLY WITH THE TITLE TEXT.
+
+Examples of titles:
+๐Ÿ“‰ Stock Market Trends
+๐Ÿช Perfect Chocolate Chip Recipe
+Evolution of Music Streaming
+Remote Work Productivity Tips
+Artificial Intelligence in Healthcare
+๐ŸŽฎ Video Game Development Insights
+
+Prompt: {{prompt:middletruncate:8000}}
+```
+
+#### `SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE`
+
+- Type: `str`
+- Description: Prompt to use when generating search queries.
+- Default:
+
+```
+Assess the need for a web search based on the current question and prior interactions, but lean towards suggesting a Google search query if uncertain. Generate a Google search query even when the answer might be straightforward, as additional information may enhance comprehension or provide updated data. If absolutely certain that no further information is required, return an empty string. Default to a search query if unsure or in doubt. Today's date is {{CURRENT_DATE}}.
+
+Current Question:
+{{prompt:end:4000}}
+
+Interaction History:
+{{MESSAGES:END:6}}
+```
+
+#### `TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE`
+
+- Type: `str`
+- Description: Prompt to use when calling tools.
+- Default:
+
+```
+Available Tools: {{TOOLS}}\nReturn an empty string if no tools match the query. If a function tool matches, construct and return a JSON object in the format {\"name\": \"functionName\", \"parameters\": {\"requiredFunctionParamKey\": \"requiredFunctionParamValue\"}} using the appropriate tool and its parameters. Only return the object and limit the response to the JSON object without additional text.
+```
+
+#### `CORS_ALLOW_ORIGIN`
+
+- Type: `str`
+- Default: `*`
+- Description: Sets the allowed origins for Cross-Origin Resource Sharing (CORS).
+
+### RAG
+
+#### `DOCS_DIR`
+
+- Type: `str`
+- Default: `${DATA_DIR}/docs`
+- Description: Specifies the directory scanned for documents to add to the RAG database when triggered.
+
+#### `VECTOR_DB`
+
+- Type: `str`
+- Default: `chroma`
+- Description: Specifies which vector database system to use, either 'chroma' for ChromaDB or 'milvus' for Milvus. This setting determines which vector storage system will be used for managing embeddings.
+
+#### `CHROMA_TENANT`
+
+- Type: `str`
+- Default: `default_tenant`
+- Description: Sets the tenant for ChromaDB to use for RAG embeddings.
+
+#### `CHROMA_DATABASE`
+
+- Type: `str`
+- Default: `default_database`
+- Description: Sets the database in the ChromaDB tenant to use for RAG embeddings.
+
+#### `CHROMA_HTTP_HOST`
+
+- Type: `str`
+- Description: Specifies the hostname of a remote ChromaDB Server. Uses a local ChromaDB instance if not set.
+
+#### `CHROMA_HTTP_PORT`
+
+- Type: `int`
+- Default: `8000`
+- Description: Specifies the port of a remote ChromaDB Server.
+
+#### `CHROMA_HTTP_HEADERS`
+
+- Type: `str`
+- Description: Comma-separated list of HTTP headers to include with every ChromaDB request.
+- Example: `Authorization=Bearer heuhagfuahefj,User-Agent=OpenWebUI`.
+
+#### `CHROMA_HTTP_SSL`
+
+- Type: `bool`
+- Default: `False`
+- Description: Controls whether or not SSL is used for ChromaDB Server connections.
+
+#### `MILVUS_URI`
+
+- Type: `str`
+- Default: `${DATA_DIR}/vector_db/milvus.db`
+- Description: Specifies the URI for connecting to the Milvus vector database. This can point to a local or remote Milvus server based on the deployment configuration.
+
+#### `RAG_TOP_K`
+
+- Type: `int`
+- Default: `5`
+- Description: Sets the default number of results to consider when using RAG.
+
+#### `RAG_RELEVANCE_THRESHOLD`
+
+- Type: `float`
+- Default: `0.0`
+- Description: Sets the relevance threshold to consider for documents when used with reranking.
+
+#### `ENABLE_RAG_HYBRID_SEARCH`
+
+- Type: `bool`
+- Default: `False`
+- Description: Enables the use of ensemble search with `BM25` + `ChromaDB`, with reranking using 
+`sentence_transformers` models.
+
+#### `ENABLE_RAG_WEB_LOADER_SSL_VERIFICATION`
+
+- Type: `bool`
+- Default: `True`
+- Description: Enables TLS certification verification when browsing web pages for RAG.
+
+#### `RAG_EMBEDDING_ENGINE`
+
+- Type: `str` (enum: `ollama`, `openai`)
+- Options:
+  - Leave empty for `Default (SentenceTransformers)` - Uses SentenceTransformers for embeddings.
+  - `ollama` - Uses the Ollama API for embeddings.
+  - `openai` - Uses the OpenAI API for embeddings.
+- Description: Selects an embedding engine to use for RAG.
+
+#### `PDF_EXTRACT_IMAGES`
+
+- Type: `bool`
+- Default: `False`
+- Description: Extracts images from PDFs using OCR when loading documents.
+
+#### `RAG_EMBEDDING_MODEL`
+
+- Type: `str`
+- Default: `sentence-transformers/all-MiniLM-L6-v2`
+- Description: Sets a model for embeddings. Locally, a Sentence-Transformer model is used.
+
+#### `RAG_EMBEDDING_MODEL_AUTO_UPDATE`
+
+- Type: `bool`
+- Default: `False`
+- Description: Toggles automatic update of the Sentence-Transformer model.
+
+#### `RAG_EMBEDDING_MODEL_TRUST_REMOTE_CODE`
+
+- Type: `bool`
+- Default: `False`
+- Description: Determines whether or not to allow custom models defined on the Hub in their own modeling files.
+
+#### `RAG_TEMPLATE`
+
+- Type: `str`
+- Default:
+
+```
+You are given a user query, some textual context and rules, all inside xml tags. You have to answer the query based on the context while respecting the rules.
+
+<context>
+[context]
+</context>
+
+<rules>
+- If you don't know, just say so.
+- If you are not sure, ask for clarification.
+- Answer in the same language as the user query.
+- If the context appears unreadable or of poor quality, tell the user then answer as best as you can.
+- If the answer is not in the context but you think you know the answer, explain that to the user then answer with your own knowledge.
+- Answer directly and without using xml tags.
+</rules>
+
+<user_query>
+[query]
+</user_query>
+```
+
+- Description: Template to use when injecting RAG documents into chat completion
+
+#### `RAG_RERANKING_MODEL`
+
+- Type: `str`
+- Description: Sets a model for reranking results. Locally, a Sentence-Transformer model is used.
+
+#### `RAG_RERANKING_MODEL_AUTO_UPDATE`
+
+- Type: `bool`
+- Default: `False`
+- Description: Toggles automatic update of the reranking model.
+
+#### `RAG_RERANKING_MODEL_TRUST_REMOTE_CODE`
+
+- Type: `bool`
+- Default: `False`
+- Description: Determines whether or not to allow custom models defined on the Hub in their own 
+modeling files for reranking.
+
+#### `RAG_OPENAI_API_BASE_URL`
+
+- Type: `str`
+- Default: `${OPENAI_API_BASE_URL}`
+- Description: Sets the OpenAI base API URL to use for RAG embeddings.
+
+#### `RAG_OPENAI_API_KEY`
+
+- Type: `str`
+- Default: `${OPENAI_API_KEY}`
+- Description: Sets the OpenAI API key to use for RAG embeddings.
+
+#### `RAG_EMBEDDING_OPENAI_BATCH_SIZE`
+
+- Type: `int`
+- Default: `1`
+- Description: Sets the batch size for OpenAI embeddings.
+
+#### `ENABLE_RAG_LOCAL_WEB_FETCH`
+
+- Type: `bool`
+- Default: `False`
+- Description: Enables local web fetching for RAG. Enabling this allows Server Side Request 
+Forgery attacks against local network resources.
+
+#### `YOUTUBE_LOADER_LANGUAGE`
+
+- Type: `str`
+- Default: `en`
+- Description: Sets the language to use for YouTube video loading.
+
+#### `CHUNK_SIZE`
+
+- Type: `int`
+- Default: `1500`
+- Description: Sets the document chunk size for embeddings.
+
+#### `CHUNK_OVERLAP`
+
+- Type: `int`
+- Default: `100`
+- Description: Specifies how much overlap there should be between chunks.
+
+#### `CONTENT_EXTRACTION_ENGINE`
+
+- Type: `str` (`tika`)
+- Options:
+  - Leave empty to use default
+  - `tika` - Use a local Apache Tika server
+- Description: Sets the content extraction engine to use for document ingestion.
+
+#### `TIKA_SERVER_URL`
+
+- Type: `str`
+- Default: `http://localhost:9998`
+- Description: Sets the URL for the Apache Tika server.
+
+#### `RAG_FILE_MAX_COUNT`
+
+- Type: `int`
+- Default: `10`
+- Description: Sets the maximum number of files that can be uploaded at once for document ingestion.
+
+#### `RAG_FILE_MAX_SIZE`
+
+- Type: `int`
+- Default: `100` (100MB)
+- Description: Sets the maximum size of a file that can be uploaded for document ingestion.
+
+### Web Search
+
+#### `ENABLE_RAG_WEB_SEARCH`
+
+- Type: `bool`
+- Default: `False`
+- Description: Enable web search toggle
+
+#### `ENABLE_SEARCH_QUERY`
+
+- Type: `bool`
+- Default: `False`
+- Description: Enables the generation of search queries from prompts
+
+#### `RAG_WEB_SEARCH_ENGINE`
+
+- Type: `str` (enum: `searxng`, `google_pse`, `brave`, `serpstack`, `serper`, `serply`, `searchapi`, `duckduckgo`, `tavily`, `jina`)
+- Options:
+  - `searxng` - Uses the [SearXNG](https://github.com/searxng/searxng) search engine.
+  - `google_pse` - Uses the [Google Programmable Search Engine](https://programmablesearchengine.google.com/about/).
+  - `brave` - Uses the [Brave search engine](https://brave.com/search/api/).
+  - `serpstack` - Uses the [Serpstack search engine](https://serpstack.com/).
+  - `serper` - Uses the [Serper search engine](https://serper.dev/).
+  - `serply` - Uses the [Serply search engine](https://serply.io/).
+  - `searchapi` - Uses the [SearchAPI search engine](https://www.searchapi.io/).
+  - `duckduckgo` - Uses the [DuckDuckGo search engine](https://duckduckgo.com/).
+  - `tavily` - Uses the [Tavily search engine](https://tavily.com/).
+  - `jina` - Uses the [Jina search engine](https://jina.ai/).
+- Description: Select engine for performing searches
+
+#### `SEARXNG_QUERY_URL`
+
+- Type: `str`
+- Description: The [SearXNG search API](https://docs.searxng.org/dev/search_api.html) URL supporting JSON output. `<query>` is replaced with 
+the search query. Example: `http://searxng.local/search?q=<query>`
+
+#### `GOOGLE_PSE_API_KEY`
+
+- Type: `str`
+- Description: The API key for the Google Programmable Search Engine (PSE) service.
+
+#### `GOOGLE_PSE_ENGINE_ID`
+
+- Type: `str`
+- Description: The engine ID for the Google Programmable Search Engine (PSE) service.
+
+#### `BRAVE_SEARCH_API_KEY`
+
+- Type: `str`
+- Description: The API key for the Brave Search API.
+
+#### `SERPSTACK_API_KEY`
+
+- Type: `str`
+- Description: The API key for Serpstack search API.
+
+#### `SERPSTACK_HTTPS`
+
+- Type: `bool`
+- Default: `True`
+- Description: Configures the use of HTTPS for Serpstack requests. Free tier requests are restricted to HTTP only.
+
+#### `SERPER_API_KEY`
+
+- Type: `str`
+- Description: The API key for the Serper search API.
+
+#### `SERPLY_API_KEY`
+
+- Type: `str`
+- Description: The API key for the Serply search API.
+
+#### `TAVILY_API_KEY`
+
+- Type: `str`
+- Description: The API key for the Tavily search API.
+
+#### `RAG_WEB_SEARCH_RESULT_COUNT`
+
+- Type: `int`
+- Default: `3`
+- Description: Maximum number of search results to crawl.
+
+#### `RAG_WEB_SEARCH_CONCURRENT_REQUESTS`
+
+- Type: `int`
+- Default: `10`
+- Description: Number of concurrent requests to crawl web pages returned from search results.
+
+#### `SEARCHAPI_API_KEY`
+
+- Type: `str`
+- Description: Sets the SearchAPI API key.
+
+#### `SEARCHAPI_ENGINE`
+
+- Type: `str`
+- Description: Sets the SearchAPI engine.
+
+### Speech to Text
+
+#### `AUDIO_STT_ENGINE`
+
+- Type: `str` (enum: `openai`)
+- Options:
+  - Leave empty to use local Whisper engine for Speech-to-Text.
+  - `openai` - Uses OpenAI engine for Speech-to-Text.
+- Description: Specifies the Speech-to-Text engine to use.
+
+#### `AUDIO_STT_OPENAI_API_BASE_URL`
+
+- Type: `str`
+- Default: `${OPENAI_API_BASE_URL}`
+- Description: Sets the OpenAI-compatible base URL to use for Speech-to-Text.
+
+#### `AUDIO_STT_OPENAI_API_KEY`
+
+- Type: `str`
+- Default: `${OPENAI_API_KEY}`
+- Description: Sets the OpenAI API key to use for Speech-to-Text.
+
+#### `AUDIO_STT_MODEL`
+
+- Type: `str`
+- Default: `whisper-1`
+- Description: Specifies the Speech-to-Text model to use for OpenAI-compatible endpoints.
+
+#### `WHISPER_MODEL`
+
+- Type: `str`
+- Default: `base`
+- Description: Sets the Whisper model to use for Speech-to-Text. The backend used is faster_whisper with quantization to `int8`.
+
+#### `WHISPER_MODEL_DIR`
+
+- Type: `str`
+- Default: `${DATA_DIR}/cache/whisper/models`
+- Description: Specifies the directory to store Whisper model files.
+
+#### `WHISPER_MODEL_AUTO_UPDATE`
+
+- Type: `bool`
+- Default: `False`
+- Description: Toggles automatic update of the Whisper model.
+
+### Text to Speech
+
+#### `AUDIO_TTS_ENGINE`
+
+- Type: `str` (enum: `elevenlabs`, `openai`)
+- Options:
+  - Leave empty to use built-in WebAPI engine for Text-to-Speech.
+  - `elevenlabs` - Uses ElevenLabs engine for Text-to-Speech
+  - `openai` - Uses OpenAI engine for Text-to-Speech.
+- Description: Specifies the Text-to-Speech engine to use.
+
+#### `AUDIO_TTS_API_KEY`
+
+- Type: `str`
+- Description: Sets the API key for Text-to-Speech.
+
+#### `AUDIO_TTS_OPENAI_API_BASE_URL`
+
+- Type: `str`
+- Default: `${OPENAI_API_BASE_URL}`
+- Description: Sets the OpenAI-compatible base URL to use for text-to-speech.
+
+#### `AUDIO_TTS_OPENAI_API_KEY`
+
+- Type: `str`
+- Default: `${OPENAI_API_KEY}`
+- Description: Sets the API key to use for text-to-speech.
+
+#### `AUDIO_TTS_MODEL`
+
+- Type: `str`
+- Default: `tts-1`
+- Description: Specifies the OpenAI text-to-speech model to use.
+
+#### `AUDIO_TTS_VOICE`
+
+- Type: `str`
+- Default: `alloy`
+- Description: Sets the OpenAI text-to-speech voice to use.
+
+#### `AUDIO_TTS_SPLIT_ON`
+
+- Type: `str`
+- Default: `punctuation`
+- Description: Sets the OpenAI text-to-speech split on to use.
+
+### Image Generation
+
+#### `ENABLE_IMAGE_GENERATION`
+
+- Type: `bool`
+- Default: `False`
+- Description: Enables or disables image generation features.
+
+#### `IMAGE_GENERATION_ENGINE`
+
+- Type: `str` (enum: `openai`, `comfyui`, `automatic1111`)
+- Options:
+  - `openai` - Uses OpenAI DALL-E for image generation.
+  - `comfyui` - Uses ComfyUI engine for image generation.
+  - `automatic1111` - Uses Automatic1111 engine for image generation (default).
+- Default: `automatic1111`
+- Description: Specifies the engine to use for image generation.
+
+#### `AUTOMATIC1111_BASE_URL`
+
+- Type: `str`
+- Description: Specifies the URL to Automatic1111's Stable Diffusion API.
+
+#### `AUTOMATIC1111_API_AUTH`
+
+- Type: `str`
+- Description: Sets the Automatic1111 API authentication.
+
+#### `COMFYUI_BASE_URL`
+
+- Type: `str`
+- Description: Specifies the URL to the ComfyUI image generation API.
+
+#### `COMFYUI_WORKFLOW`
+
+- Type: `str`
+- Description: Sets the ComfyUI workflow.
+
+#### `IMAGES_OPENAI_API_BASE_URL`
+
+- Type: `str`
+- Default: `${OPENAI_API_BASE_URL}`
+- Description: Sets the OpenAI-compatible base URL to use for DALL-E image generation.
+
+
+#### `IMAGES_OPENAI_API_KEY`
+
+- Type: `str`
+- Default: `${OPENAI_API_KEY}`
+- Description: Sets the API key to use for DALL-E image generation.
+
+#### `IMAGE_GENERATION_MODEL`
+
+- Type: `str`
+- Description: Default model to use for image generation
+
+#### `IMAGE_SIZE`
+
+- Type: `str`
+- Default: `512x512`
+- Description: Sets the default image size to generate.
+
+#### `IMAGE_STEPS`
+
+- Type: `int`
+- Default: `50`
+- Description: Sets the default iteration steps for image generation. Used for ComfyUI and AUTOMATIC1111.
+
+### OAuth
+
+#### `ENABLE_OAUTH_SIGNUP`
+
+- Type: `bool`
+- Default: `False`
+- Description: Enables user account creation via OAuth.
+
+#### `OAUTH_MERGE_ACCOUNTS_BY_EMAIL`
+
+- Type: `bool`
+- Default: `False`
+- Description: If enabled, merges OAuth accounts with existing accounts using the same email 
+address. This is considered unsafe as providers may not verify email addresses and can lead to 
+account takeovers.
+
+#### `OAUTH_USERNAME_CLAIM`
+
+- Type: `str`
+- Default: `name`
+- Description: Set username claim for OpenID.
+
+#### `OAUTH_EMAIL_CLAIM`
+
+- Type: `str`
+- Default: `email`
+- Description: Set email claim for OpenID.
+
+#### `OAUTH_PICTURE_CLAIM`
+
+- Type: `str`
+- Default: `picture`
+- Description: Set picture (avatar) claim for OpenID.
+
+#### `OAUTH_CLIENT_ID`
+
+- Type: `str`
+- Description: Sets the client ID for OIDC
+
+#### `OAUTH_CLIENT_SECRET`
+
+- Type: `str`
+- Description: Sets the client secret for OIDC
+
+#### `OAUTH_SCOPES`
+
+- Type: `str`
+- Default: `openid email profile`
+- Description: Sets the scope for OIDC authentication. `openid` and `email` are required.
+
+#### `OAUTH_PROVIDER_NAME`
+
+- Type: `str`
+- Default: `SSO`
+- Description: Sets the name for the OIDC provider.
+
+#### `ENABLE_OAUTH_ROLE_MANAGEMENT`
+
+- Type: `bool`
+- Default: `False`
+- Description: Enables role management to oauth delegation.
+
+#### `OAUTH_ROLES_CLAIM`
+
+- Type: `str`
+- Default: `roles`
+- Description: Sets the roles claim to look for in the OIDC token.
+
+#### `OAUTH_ALLOWED_ROLES`
+
+- Type: `str`
+- Default: `user,admin`
+- Description: Sets the roles that are allowed access to the platform.
+
+#### `OAUTH_ADMIN_ROLES`
+
+- Type: `str`
+- Default: `admin`
+- Description: Sets the roles that are considered administrators.
+
+#### `GOOGLE_CLIENT_ID`
+
+- Type: `str`
+- Description: Sets the client ID for Google OAuth
+
+#### `GOOGLE_CLIENT_SECRET`
+
+- Type: `str`
+- Description: Sets the client secret for Google OAuth
+
+#### `GOOGLE_OAUTH_SCOPE`
+
+- Type: `str`
+- Default: `openid email profile`
+- Description: Sets the scope for Google OAuth authentication.
+
+#### `GOOGLE_REDIRECT_URI`
+
+- Type: `str`
+- Description: Sets the redirect URI for Google OAuth
+
+#### `MICROSOFT_CLIENT_ID`
+
+- Type: `str`
+- Description: Sets the client ID for Microsoft OAuth
+
+#### `MICROSOFT_CLIENT_SECRET`
+
+- Type: `str`
+- Description: Sets the client secret for Microsoft OAuth
+
+#### `MICROSOFT_CLIENT_TENANT_ID`
+
+- Type: `str`
+- Description: Sets the tenant ID for Microsoft OAuth
+
+#### `MICROSOFT_OAUTH_SCOPE`
+
+- Type: `str`
+- Default: `openid email profile`
+- Description: Sets the scope for Microsoft OAuth authentication.
+
+#### `MICROSOFT_REDIRECT_URI`
+
+- Type: `str`
+- Description: Sets the redirect URI for Microsoft OAuth
+
+#### `OPENID_PROVIDER_URL`
+
+- Type: `str`
+- Description: Path to the `.well-known/openid-configuration` endpoint
+
+#### `OPENID_REDIRECT_URI`
+
+- Type: `str`
+- Description: Sets the redirect URI for OIDC
+
+### Tools
+
+#### `TOOLS_DIR`
+
+- Type: `str`
+- Default: `${DATA_DIR}/tools`
+- Description: Specifies the directory for custom tools.
+
+## Misc Environment Variables
+
+These variables are not specific to Open WebUI but can still be valuable in certain contexts.
+
+### Proxy Settings
+
+Open WebUI supports using proxies for HTTP and HTTPS retrievals. To specify proxy settings,
+Open WebUI uses the following environment variables:
+
+#### `http_proxy`
+
+- Type: `str`
+- Description: Sets the URL for the HTTP proxy.
+
+#### `https_proxy`
+
+- Type: `str`
+- Description: Sets the URL for the HTTPS proxy.
+
+#### `no_proxy`
+
+- Type: `str`
+- Description: Lists domain extensions (or IP addresses) for which the proxy should not be used,
+separated by commas. For example, setting no_proxy to '.mit.edu' ensures that the proxy is
+bypassed when accessing documents from MIT.

From 26d16362dd134d0083b805d1bdd0ac80abcfe4ec Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 19:51:08 +0000
Subject: [PATCH 04/42] Add HttpsEncryption.md changes from
 feature-nginx-combined

---
 .../advanced-topics/HttpsEncryption.md        | 27 +++++++++++++++++++
 1 file changed, 27 insertions(+)
 create mode 100644 docs/getting-started/advanced-topics/HttpsEncryption.md

diff --git a/docs/getting-started/advanced-topics/HttpsEncryption.md b/docs/getting-started/advanced-topics/HttpsEncryption.md
new file mode 100644
index 0000000..dafefc5
--- /dev/null
+++ b/docs/getting-started/advanced-topics/HttpsEncryption.md
@@ -0,0 +1,27 @@
+---
+sidebar_position: 6
+title: "๐Ÿ”’HTTPS Encryption"
+---
+
+## Overview
+
+While HTTPS encryption is **not required** to operate Open WebUI in most cases, certain featuresโ€”such as **Voice Calls**โ€”will be blocked by modern web browsers unless HTTPS is enabled. If you do not plan to use these features, you can skip this section.
+
+## Importance of HTTPS
+
+For deployments at high risk of traffic interception, such as those hosted on the internet, it is recommended to implement HTTPS encryption. This ensures that the username/password signup and authentication process remains secure, protecting sensitive user data from potential threats.
+
+## Choosing Your HTTPS Solution
+
+The choice of HTTPS encryption solution is up to the user and should align with the existing infrastructure. Here are some common scenarios:
+
+- **AWS Environments**: Utilizing an AWS Elastic Load Balancer is often a practical choice for managing HTTPS.
+- **Docker Container Environments**: Popular solutions include Nginx, Traefik, and Caddy.
+- **Cloudflare**: Offers easy HTTPS setup with minimal server-side configuration, suitable for a wide range of applications.
+- **Ngrok**: Provides a quick way to set up HTTPS for local development environments, particularly useful for testing and demos.
+
+## Further Guidance
+
+For detailed instructions and community-submitted tutorials on actual HTTPS encryption deployments, please refer to the [Deployment Tutorials](../../tutorials/deployment/).
+
+This documentation provides a starting point for understanding the options available for enabling HTTPS encryption in your environment.
\ No newline at end of file

From 837386c1ac81e52dd37a273f009146c0b7dfe035 Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 19:51:15 +0000
Subject: [PATCH 05/42] Add Logging.md changes from feature-nginx-combined

---
 .../advanced-topics/Logging.md                | 63 +++++++++++++++++++
 1 file changed, 63 insertions(+)
 create mode 100644 docs/getting-started/advanced-topics/Logging.md

diff --git a/docs/getting-started/advanced-topics/Logging.md b/docs/getting-started/advanced-topics/Logging.md
new file mode 100644
index 0000000..ad91982
--- /dev/null
+++ b/docs/getting-started/advanced-topics/Logging.md
@@ -0,0 +1,63 @@
+---
+sidebar_position: 5
+title: "๐Ÿ“œ Open WebUI Logging"
+---
+
+## Browser Client Logging ##
+
+Client logging generally occurs via [JavaScript](https://developer.mozilla.org/en-US/docs/Web/API/console/log_static) `console.log()` and can be accessed using the built-in browser-specific developer tools:
+* Blink
+  * [Chrome/Chromium](https://developer.chrome.com/docs/devtools/)
+  * [Edge](https://learn.microsoft.com/en-us/microsoft-edge/devtools-guide-chromium/overview)
+* Gecko
+  * [Firefox](https://firefox-source-docs.mozilla.org/devtools-user/)
+* WebKit
+  * [Safari](https://developer.apple.com/safari/tools/)
+
+## Application Server/Backend Logging ##
+
+Logging is an ongoing work-in-progress but some level of control is available using environment variables. [Python Logging](https://docs.python.org/3/howto/logging.html) `log()` and `print()` statements send information to the console. The default level is `INFO`. Ideally, sensitive data will only be exposed with `DEBUG` level.
+
+### Logging Levels ###
+
+The following [logging levels](https://docs.python.org/3/howto/logging.html#logging-levels) values are supported:
+
+| Level      | Numeric value |
+| ---------- | ------------- |
+| `CRITICAL` | 50            |
+| `ERROR`    | 40            |
+| `WARNING`  | 30            |
+| `INFO`     | 20            |
+| `DEBUG`    | 10            |
+| `NOTSET`   | 0             |
+
+### Global ###
+
+The default global log level of `INFO` can be overridden with the `GLOBAL_LOG_LEVEL` environment variable. When set, this executes a [basicConfig](https://docs.python.org/3/library/logging.html#logging.basicConfig) statement with the `force` argument set to *True* within `config.py`. This results in reconfiguration of all attached loggers:
+> _If this keyword argument is specified as true, any existing handlers attached to the root logger are removed and closed, before carrying out the configuration as specified by the other arguments._
+
+The stream uses standard output (`sys.stdout`). In addition to all Open-WebUI `log()` statements, this also affects any imported Python modules that use the Python Logging module `basicConfig` mechanism including [urllib](https://docs.python.org/3/library/urllib.html).
+
+For example, to set `DEBUG` logging level as a Docker parameter use:
+```
+--env GLOBAL_LOG_LEVEL="DEBUG"
+```
+
+### App/Backend ###
+
+Some level of granularity is possible using any of the following combination of variables. Note that `basicConfig` `force` isn't presently used so these statements may only affect Open-WebUI logging and not 3rd party modules.
+
+| Environment Variable | App/Backend                                                       |
+| -------------------- | ----------------------------------------------------------------- |
+| `AUDIO_LOG_LEVEL`    | Audio transcription using faster-whisper, TTS etc.                |
+| `COMFYUI_LOG_LEVEL`  | ComfyUI integration handling                                      |
+| `CONFIG_LOG_LEVEL`   | Configuration handling                                            |
+| `DB_LOG_LEVEL`       | Internal Peewee Database                                          |
+| `IMAGES_LOG_LEVEL`   | AUTOMATIC1111 stable diffusion image generation                   |
+| `LITELLM_LOG_LEVEL`  | LiteLLM proxy                                                     |
+| `MAIN_LOG_LEVEL`     | Main (root) execution                                             |
+| `MODELS_LOG_LEVEL`   | LLM model interaction, authentication, etc.                       |
+| `OLLAMA_LOG_LEVEL`   | Ollama backend interaction                                        |
+| `OPENAI_LOG_LEVEL`   | OpenAI interaction                                                |
+| `RAG_LOG_LEVEL`      | Retrieval-Augmented Generation using Chroma/Sentence-Transformers |
+| `WEBHOOK_LOG_LEVEL`  | Authentication webhook extended logging                           |

From 3b784a3b3d5bb53f3b09625b5cc2ac97ab2516ea Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 19:51:20 +0000
Subject: [PATCH 06/42] Add advanced-topics/index.mdx changes from
 feature-nginx-combined

---
 .../getting-started/advanced-topics/index.mdx | 37 +++++++++++++++++++
 1 file changed, 37 insertions(+)
 create mode 100644 docs/getting-started/advanced-topics/index.mdx

diff --git a/docs/getting-started/advanced-topics/index.mdx b/docs/getting-started/advanced-topics/index.mdx
new file mode 100644
index 0000000..9c8639a
--- /dev/null
+++ b/docs/getting-started/advanced-topics/index.mdx
@@ -0,0 +1,37 @@
+---
+sidebar_position: 4
+title: "๐Ÿ“š Advanced Topics"
+---
+
+# ๐Ÿ“š Advanced Topics
+
+Explore deeper concepts and advanced configurations of Open WebUI to enhance your setup.
+
+---
+
+## ๐Ÿ”ง Environment Configuration  
+Understand how to set environment variables to customize your Open WebUI setup.  
+[Environment Configuration Guide](./EnvConfig)
+
+---
+
+## ๐Ÿ“Š Logging and Monitoring  
+Learn how to monitor, log, and troubleshoot your system effectively.  
+[Logging and Monitoring Guide](./Logging)
+
+---
+
+## ๐Ÿ› ๏ธ Development Guide  
+Dive into the development process and learn how to contribute to Open WebUI.  
+[Development Guide](./Development)
+
+---
+
+## ๐Ÿ”’ HTTPS Encryption  
+Ensure secure communication by implementing HTTPS encryption in your deployment.  
+[HTTPS Encryption Guide](./HttpsEncryption)
+
+---
+
+Looking for installation instructions? Head over to our [Quick Start Guide](../quick-start).  
+Need to explore core features? Check out [Using OpenWebUI](../using-openwebui).
\ No newline at end of file

From 3bfd4104270c12f98e8c0197fe8092790063515c Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 19:51:40 +0000
Subject: [PATCH 07/42] Add index.md changes from feature-nginx-combined

---
 docs/getting-started/index.md | 27 +++++++++++++++++++++++++++
 1 file changed, 27 insertions(+)
 create mode 100644 docs/getting-started/index.md

diff --git a/docs/getting-started/index.md b/docs/getting-started/index.md
new file mode 100644
index 0000000..db76f08
--- /dev/null
+++ b/docs/getting-started/index.md
@@ -0,0 +1,27 @@
+---
+sidebar_position: 3
+title: "๐Ÿš€Getting Started"
+---
+
+# Getting Started with Open WebUI
+
+Welcome to the **Open WebUI Documentation Hub!** Below is a list of essential guides and resources to help you get started, manage, and develop with Open WebUI.
+
+---
+
+## โฑ๏ธ Quick Start  
+Get up and running quickly with our [Quick Start Guide](./quick-start).
+
+---
+
+## ๐Ÿ“š Using OpenWebUI  
+Learn the basics and explore key concepts in our [Using OpenWebUI Guide](./using-openwebui).
+
+---
+
+## ๐Ÿ› ๏ธ Advanced Topics  
+Take a deeper dive into configurations and development tips in our [Advanced Topics Guide](./advanced-topics).
+
+---
+
+Happy exploring! ๐ŸŽ‰ If you have questions, join our [community](https://discord.gg/5rJgQTnV4s) or raise an issue on [GitHub](https://github.com/open-webui/open-webui). 

From 73fc0651d24d09d4721460c8420b36cb2f4b9242 Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 19:51:44 +0000
Subject: [PATCH 08/42] Add quick-start/index.mdx changes from
 feature-nginx-combined

---
 docs/getting-started/quick-start/index.mdx | 155 +++++++++++++++++++++
 1 file changed, 155 insertions(+)
 create mode 100644 docs/getting-started/quick-start/index.mdx

diff --git a/docs/getting-started/quick-start/index.mdx b/docs/getting-started/quick-start/index.mdx
new file mode 100644
index 0000000..1ba1bf1
--- /dev/null
+++ b/docs/getting-started/quick-start/index.mdx
@@ -0,0 +1,155 @@
+---
+sidebar_position: 2
+title: "โฑ๏ธ Quick Start"
+---
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+import { TopBanners } from "@site/src/components/TopBanners";
+
+import DockerCompose from './tab-docker/DockerCompose.md';
+import Podman from './tab-docker/Podman.md';
+import ManualDocker from './tab-docker/ManualDocker.md';
+import DockerSwarm from './tab-docker/DockerSwarm.md';
+import DataStorage from './tab-docker/DataStorage.md';
+import DockerUpdating from './tab-docker/DockerUpdating.md';
+import Helm from './tab-kubernetes/Helm.md';
+import Kustomize from './tab-kubernetes/Kustomize.md';
+import Venv from './tab-python/Venv.md';
+import CondaUnix from './tab-python/CondaUnix.md';
+import CondaWindows from './tab-python/CondaWindows.md';
+
+<TopBanners />
+
+## How to Install โฑ๏ธ
+
+:::info **Important Note on User Roles and Privacy:**
+
+- **Admin Creation:** The first account created on Open WebUI gains **Administrator privileges**, controlling user management and system settings.
+- **User Registrations:** Subsequent sign-ups start with **Pending** status, requiring Administrator approval for access.
+- **Privacy and Data Security:** **All your data**, including login details, is **locally stored** on your device. Open WebUI ensures **strict confidentiality** and **no external requests** for enhanced privacy and security.
+
+:::
+
+Choose your preferred installation method below:
+
+- **Docker:** Recommended for most users due to ease of setup and flexibility.
+- **Kubernetes:** Ideal for enterprise deployments that require scaling and orchestration.
+- **Python:** Suitable for low-resource environments or those wanting a manual setup.
+
+<Tabs>
+  <TabItem value="docker" label="Docker">
+
+    <Tabs>
+      <TabItem value="docker-compose" label="Docker Compose">
+        <DockerCompose />
+        <DataStorage />  
+        <DockerUpdating />
+      </TabItem>
+
+      <TabItem value="podman" label="Podman">
+        <Podman />
+      </TabItem>
+
+      <TabItem value="manual-docker" label="Manual Docker">
+        <ManualDocker />
+        <DataStorage />  
+        <DockerUpdating />
+      </TabItem>
+
+      <TabItem value="swarm" label="Docker Swarm">
+        <DockerSwarm />
+      </TabItem>
+
+    </Tabs>
+
+  </TabItem>
+
+  <TabItem value="kubernetes" label="Kubernetes">
+
+    <Tabs>
+
+      <TabItem value="helm" label="Helm">
+        <Helm />
+      </TabItem>
+
+      <TabItem value="kustomize" label="Kustomize">
+        <Kustomize />
+      </TabItem>
+
+    </Tabs>
+
+  </TabItem>
+
+  <TabItem value="python" label="Python">
+
+    <Tabs>
+
+      <TabItem value="venv" label="Venv">
+        <Venv />
+      </TabItem>
+
+      <TabItem value="conda" label="Conda">
+        <h3>Choose Your Platform</h3>
+        <Tabs groupId="platform-conda">
+          <TabItem value="unix-conda" label="Linux/macOS">
+            <CondaUnix />
+          </TabItem>
+
+          <TabItem value="windows-conda" label="Windows">
+            <CondaWindows />
+          </TabItem>
+        </Tabs>
+      </TabItem>
+
+      <TabItem value="development" label="Development">
+        <h3>Development Setup</h3>
+        <p>
+          For developers who want to contribute, check the Development Guide in <a href="../advanced-topics">Advanced Topics</a>.
+        </p>
+      </TabItem>
+    </Tabs>
+  </TabItem>
+
+  <TabItem value="third-party" label="Third Party">
+    <Tabs>
+      <TabItem value="pinokio-computer" label="Pinokio.computer">
+        ### Pinokio.computer Installation
+
+        For installation via Pinokio.computer, please visit their website:
+
+        [https://pinokio.computer/](https://pinokio.computer/)
+
+        Support for this installation method is provided through their website.
+      </TabItem>
+    </Tabs>
+
+    ### Additional Third-Party Integrations
+
+    *(Add information about third-party integrations as they become available.)*
+  </TabItem>
+</Tabs>
+
+## Next Steps
+
+After installing, visit:
+
+- [http://localhost:3000](http://localhost:3000) to access OpenWebUI.
+- or [http://localhost:8080/](http://localhost:8080/) when using a Python deployment.
+
+You are now ready to start **[Using OpenWebUI](../using-openwebui/index.mdx)**!
+
+## Join the Community
+
+Need help? Have questions? Join our community:
+
+- [Open WebUI Discord](https://discord.gg/5rJgQTnV4s)
+- [GitHub Issues](https://github.com/open-webui/open-webui/issues)
+
+Stay updated with the latest features, troubleshooting tips, and announcements!
+
+## Conclusion
+
+Thank you for choosing Open WebUI! We are committed to providing a powerful, privacy-focused interface for your LLM needs. If you encounter any issues, refer to the [Troubleshooting Guide](../../troubleshooting/index.mdx).
+
+Happy exploring! ๐ŸŽ‰

From 67715328798331646a2e2acea078e754c358c604 Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 19:51:49 +0000
Subject: [PATCH 09/42] Add DataStorage.md changes from feature-nginx-combined

---
 .../quick-start/tab-docker/DataStorage.md            | 12 ++++++++++++
 1 file changed, 12 insertions(+)
 create mode 100644 docs/getting-started/quick-start/tab-docker/DataStorage.md

diff --git a/docs/getting-started/quick-start/tab-docker/DataStorage.md b/docs/getting-started/quick-start/tab-docker/DataStorage.md
new file mode 100644
index 0000000..6cc8907
--- /dev/null
+++ b/docs/getting-started/quick-start/tab-docker/DataStorage.md
@@ -0,0 +1,12 @@
+
+## Data Storage and Bind Mounts
+
+This project uses [Docker named volumes](https://docs.docker.com/storage/volumes/) to **persist data**. If needed, replace the volume name with a host directory:
+
+**Example**:
+
+```bash
+-v /path/to/folder:/app/backend/data
+```
+
+Ensure the host folder has the correct permissions.

From 002cb89edab4a51ba16888af9deb21307a27e625 Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 19:51:55 +0000
Subject: [PATCH 10/42] Add DockerCompose.md changes from
 feature-nginx-combined

---
 .../quick-start/tab-docker/DockerCompose.md   | 54 +++++++++++++++++++
 1 file changed, 54 insertions(+)
 create mode 100644 docs/getting-started/quick-start/tab-docker/DockerCompose.md

diff --git a/docs/getting-started/quick-start/tab-docker/DockerCompose.md b/docs/getting-started/quick-start/tab-docker/DockerCompose.md
new file mode 100644
index 0000000..4b867ad
--- /dev/null
+++ b/docs/getting-started/quick-start/tab-docker/DockerCompose.md
@@ -0,0 +1,54 @@
+# Docker Compose Setup
+
+Using Docker Compose simplifies the management of multi-container Docker applications.
+
+If you don't have Docker installed, check out our [Docker installation tutorial](../../../tutorials/integrations/docker-install.md).
+
+Docker Compose requires an additional package, `docker-compose-v2`.
+
+**Warning:** Older Docker Compose tutorials may reference version 1 syntax, which uses commands like `docker-compose build`. Ensure you use version 2 syntax, which uses commands like `docker compose build` (note the space instead of a hyphen).
+
+## Example `docker-compose.yml`
+
+Here is an example configuration file for setting up Open WebUI with Docker Compose:
+
+```yaml
+version: '3'
+services:
+  openwebui:
+    image: ghcr.io/open-webui/open-webui:main
+    ports:
+      - "3000:8080"
+    volumes:
+      - open-webui:/app/backend/data
+volumes:
+  open-webui:
+```
+
+## Starting the Services
+
+To start your services, run the following command:
+
+```bash
+docker compose up -d
+```
+
+## Helper Script
+
+A useful helper script called `run-compose.sh` is included with the codebase. This script assists in choosing which Docker Compose files to include in your deployment, streamlining the setup process.
+
+---
+
+**Note:** For Nvidia GPU support, add the following to your service definition in the `docker-compose.yml` file:
+
+```yaml
+deploy:
+  resources:
+    reservations:
+      devices:
+        - driver: nvidia
+          count: all
+          capabilities: [gpu]
+```
+
+This setup ensures that your application can leverage GPU resources when available.
\ No newline at end of file

From 113212a61bc7984117f1fe79cb82194f7ab887a9 Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 19:52:00 +0000
Subject: [PATCH 11/42] Add DockerSwarm.md changes from feature-nginx-combined

---
 .../quick-start/tab-docker/DockerSwarm.md     | 141 ++++++++++++++++++
 1 file changed, 141 insertions(+)
 create mode 100644 docs/getting-started/quick-start/tab-docker/DockerSwarm.md

diff --git a/docs/getting-started/quick-start/tab-docker/DockerSwarm.md b/docs/getting-started/quick-start/tab-docker/DockerSwarm.md
new file mode 100644
index 0000000..e0644de
--- /dev/null
+++ b/docs/getting-started/quick-start/tab-docker/DockerSwarm.md
@@ -0,0 +1,141 @@
+## Docker Swarm
+
+This installation method requires knowledge on Docker Swarms, as it utilizes a stack file to deploy 3 seperate containers as services in a Docker Swarm.
+
+It includes isolated containers of ChromaDB, Ollama, and OpenWebUI. 
+Additionally, there are pre-filled [Environment Variables](/getting-started/env-configuration) to further illustrate the setup.
+
+Choose the appropriate command based on your hardware setup:
+
+- **Before Starting**:
+
+  Directories for your volumes need to be created on the host, or you can specify a custom location or volume.
+  
+  The current example utilizes an isolated dir `data`, which is within the same dir as the `docker-stack.yaml`.
+  
+      - **For example**:
+  
+        ```bash
+        mkdir -p data/open-webui data/chromadb data/ollama
+        ```
+
+- **With GPU Support**:
+
+    #### Docker-stack.yaml
+    ```yaml
+    version: '3.9'
+
+    services:
+      openWebUI:
+        image: ghcr.io/open-webui/open-webui:main
+        depends_on:
+            - chromadb
+            - ollama
+        volumes:
+          - ./data/open-webui:/app/backend/data
+        environment:
+          DATA_DIR: /app/backend/data 
+          OLLAMA_BASE_URLS: http://ollama:11434
+          CHROMA_HTTP_PORT: 8000
+          CHROMA_HTTP_HOST: chromadb
+          CHROMA_TENANT: default_tenant
+          VECTOR_DB: chroma
+          WEBUI_NAME: Awesome ChatBot
+          CORS_ALLOW_ORIGIN: "*" # This is the current Default, will need to change before going live
+          RAG_EMBEDDING_ENGINE: ollama
+          RAG_EMBEDDING_MODEL: nomic-embed-text-v1.5
+          RAG_EMBEDDING_MODEL_TRUST_REMOTE_CODE: "True"
+        ports:
+          - target: 8080
+            published: 8080
+            mode: overlay
+        deploy:
+          replicas: 1
+          restart_policy:
+            condition: any
+            delay: 5s
+            max_attempts: 3
+
+      chromadb:
+        hostname: chromadb
+        image: chromadb/chroma:0.5.15
+        volumes:
+          - ./data/chromadb:/chroma/chroma
+        environment:
+          - IS_PERSISTENT=TRUE
+          - ALLOW_RESET=TRUE
+          - PERSIST_DIRECTORY=/chroma/chroma
+        ports: 
+          - target: 8000
+            published: 8000
+            mode: overlay
+        deploy:
+          replicas: 1
+          restart_policy:
+            condition: any
+            delay: 5s
+            max_attempts: 3
+        healthcheck: 
+          test: ["CMD-SHELL", "curl localhost:8000/api/v1/heartbeat || exit 1"]
+          interval: 10s
+          retries: 2
+          start_period: 5s
+          timeout: 10s
+
+      ollama:
+        image: ollama/ollama:latest
+        hostname: ollama
+        ports:
+          - target: 11434
+            published: 11434
+            mode: overlay
+        deploy:
+          resources:
+            reservations:
+              generic_resources:
+                - discrete_resource_spec:
+                    kind: "NVIDIA-GPU"
+                    value: 0
+          replicas: 1
+          restart_policy:
+            condition: any
+            delay: 5s
+            max_attempts: 3
+        volumes:
+          - ./data/ollama:/root/.ollama
+
+    ```
+    - **Additional Requirements**:
+
+      1. Ensure CUDA is Enabled, follow your OS and GPU instructions for that.
+      2. Enable Docker GPU support, see [Nvidia Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html " on Nvidia's site.") 
+      3. Follow the [Guide here on configuring Docker Swarm to with with your GPU](https://gist.github.com/tomlankhorst/33da3c4b9edbde5c83fc1244f010815c#configuring-docker-to-work-with-your-gpus) 
+        - Ensure _GPU Resource_ is enabled in `/etc/nvidia-container-runtime/config.toml` and enable GPU resource advertising by uncommenting the `swarm-resource = "DOCKER_RESOURCE_GPU"`. The docker daemon must be restarted after updating these files on each node.
+
+
+- **With CPU Support**:
+  
+    Modify the Ollama Service within `docker-stack.yaml` and remove the lines for `generic_resources:`
+    ```yaml
+        ollama:
+      image: ollama/ollama:latest
+      hostname: ollama
+      ports:
+        - target: 11434
+          published: 11434
+          mode: overlay
+      deploy:
+        replicas: 1
+        restart_policy:
+          condition: any
+          delay: 5s
+          max_attempts: 3
+      volumes:
+        - ./data/ollama:/root/.ollama
+    ```
+
+- **Deploy Docker Stack**:
+  
+  ```bash
+  docker stack deploy -c docker-stack.yaml -d super-awesome-ai
+  ```

From 3bb3bbdf69cfa174ab56599feacb12d470a02e99 Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 19:52:06 +0000
Subject: [PATCH 12/42] Add DockerUpdating.md changes from
 feature-nginx-combined

---
 .../quick-start/tab-docker/DockerUpdating.md  | 42 +++++++++++++++++++
 1 file changed, 42 insertions(+)
 create mode 100644 docs/getting-started/quick-start/tab-docker/DockerUpdating.md

diff --git a/docs/getting-started/quick-start/tab-docker/DockerUpdating.md b/docs/getting-started/quick-start/tab-docker/DockerUpdating.md
new file mode 100644
index 0000000..5af6ba1
--- /dev/null
+++ b/docs/getting-started/quick-start/tab-docker/DockerUpdating.md
@@ -0,0 +1,42 @@
+
+
+# Docker Compose Setup
+
+Using Docker Compose simplifies the management of multi-container Docker applications.
+
+## Example `docker-compose.yml`
+
+```yaml
+version: '3'
+services:
+  openwebui:
+    image: ghcr.io/open-webui/open-webui:main
+    ports:
+      - "3000:8080"
+    volumes:
+      - open-webui:/app/backend/data
+volumes:
+  open-webui:
+```
+
+## Starting the Services
+
+To start your services, run:
+
+```bash
+docker compose up -d
+```
+
+---
+
+**Note:** For Nvidia GPU support, add the following to your service definition:
+
+```yaml
+deploy:
+  resources:
+    reservations:
+      devices:
+        - driver: nvidia
+          count: all
+          capabilities: [gpu]
+```

From e45c34d9a76b93070b9c4b66074d59b0654c0160 Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 19:52:10 +0000
Subject: [PATCH 13/42] Add ManualDocker.md changes from feature-nginx-combined

---
 .../quick-start/tab-docker/ManualDocker.md    | 24 +++++++++++++++++++
 1 file changed, 24 insertions(+)
 create mode 100644 docs/getting-started/quick-start/tab-docker/ManualDocker.md

diff --git a/docs/getting-started/quick-start/tab-docker/ManualDocker.md b/docs/getting-started/quick-start/tab-docker/ManualDocker.md
new file mode 100644
index 0000000..d6702fc
--- /dev/null
+++ b/docs/getting-started/quick-start/tab-docker/ManualDocker.md
@@ -0,0 +1,24 @@
+
+# Manual Docker Setup
+
+If you prefer to set up Docker manually, follow these steps.
+
+## Step 1: Pull the Open WebUI Image
+
+```bash
+docker pull ghcr.io/open-webui/open-webui:main
+```
+
+## Step 2: Run the Container
+
+```bash
+docker run -d -p 3000:8080 -v open-webui:/app/backend/data --name open-webui ghcr.io/open-webui/open-webui:main
+```
+
+**Note:** For Nvidia GPU support, add `--gpus all` to the `docker run` command.
+
+## Access the WebUI
+
+After the container is running, access Open WebUI at:
+
+[http://localhost:3000](http://localhost:3000)

From 3f4999a2a3e513b8b5cfb583cbc8445e86b68e50 Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 19:52:14 +0000
Subject: [PATCH 14/42] Add Podman.md changes from feature-nginx-combined

---
 .../quick-start/tab-docker/Podman.md          | 28 +++++++++++++++++++
 1 file changed, 28 insertions(+)
 create mode 100644 docs/getting-started/quick-start/tab-docker/Podman.md

diff --git a/docs/getting-started/quick-start/tab-docker/Podman.md b/docs/getting-started/quick-start/tab-docker/Podman.md
new file mode 100644
index 0000000..9cf11fd
--- /dev/null
+++ b/docs/getting-started/quick-start/tab-docker/Podman.md
@@ -0,0 +1,28 @@
+
+# Using Podman
+
+Podman is a daemonless container engine for developing, managing, and running OCI Containers.
+
+## Basic Commands
+
+- **Run a Container:**
+
+  ```bash
+  podman run -d --name openwebui -p 3000:8080 ghcr.io/open-webui/open-webui:main
+  ```
+
+- **List Running Containers:**
+
+  ```bash
+  podman ps
+  ```
+
+## Networking with Podman
+
+If networking issues arise, you may need to adjust your network settings:
+
+```bash
+--network=slirp4netns:allow_host_loopback=true
+```
+
+Refer to the Podman [documentation](https://podman.io/) for advanced configurations.

From a1aa82f9f0957555192e0e65afa4bf3548572f0b Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 19:52:19 +0000
Subject: [PATCH 15/42] Add Helm.md changes from feature-nginx-combined

---
 .../quick-start/tab-kubernetes/Helm.md        | 34 +++++++++++++++++++
 1 file changed, 34 insertions(+)
 create mode 100644 docs/getting-started/quick-start/tab-kubernetes/Helm.md

diff --git a/docs/getting-started/quick-start/tab-kubernetes/Helm.md b/docs/getting-started/quick-start/tab-kubernetes/Helm.md
new file mode 100644
index 0000000..61a0e98
--- /dev/null
+++ b/docs/getting-started/quick-start/tab-kubernetes/Helm.md
@@ -0,0 +1,34 @@
+
+# Helm Setup for Kubernetes
+
+Helm helps you manage Kubernetes applications.
+
+## Prerequisites
+
+- Kubernetes cluster is set up.
+- Helm is installed.
+
+## Steps
+
+1. **Add Open WebUI Helm Repository:**
+
+   ```bash
+   helm repo add open-webui https://open-webui.github.io/helm-charts
+   helm repo update
+   ```
+
+2. **Install Open WebUI Chart:**
+
+   ```bash
+   helm install openwebui open-webui/open-webui
+   ```
+
+3. **Verify Installation:**
+
+   ```bash
+   kubectl get pods
+   ```
+
+## Access the WebUI
+
+Set up port forwarding or load balancing to access Open WebUI from outside the cluster.

From ced3d97f78e2b73d486a1bef11ba3a31b57e2bcd Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 19:52:25 +0000
Subject: [PATCH 16/42] Add Kustomize.md changes from feature-nginx-combined

---
 .../quick-start/tab-kubernetes/Kustomize.md   | 35 +++++++++++++++++++
 1 file changed, 35 insertions(+)
 create mode 100644 docs/getting-started/quick-start/tab-kubernetes/Kustomize.md

diff --git a/docs/getting-started/quick-start/tab-kubernetes/Kustomize.md b/docs/getting-started/quick-start/tab-kubernetes/Kustomize.md
new file mode 100644
index 0000000..eeb4e72
--- /dev/null
+++ b/docs/getting-started/quick-start/tab-kubernetes/Kustomize.md
@@ -0,0 +1,35 @@
+
+
+# Kustomize Setup for Kubernetes
+
+Kustomize allows you to customize Kubernetes YAML configurations.
+
+## Prerequisites
+
+- Kubernetes cluster is set up.
+- Kustomize is installed.
+
+## Steps
+
+1. **Clone the Open WebUI Manifests:**
+
+   ```bash
+   git clone https://github.com/open-webui/k8s-manifests.git
+   cd k8s-manifests
+   ```
+
+2. **Apply the Manifests:**
+
+   ```bash
+   kubectl apply -k .
+   ```
+
+3. **Verify Installation:**
+
+   ```bash
+   kubectl get pods
+   ```
+
+## Access the WebUI
+
+Set up port forwarding or load balancing to access Open WebUI from outside the cluster.

From 373cf0ab1350e461d8224e0065dcef21e02b3b9f Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 19:52:34 +0000
Subject: [PATCH 17/42] Add CondaUnix.md changes from feature-nginx-combined

---
 .../quick-start/tab-python/CondaUnix.md       | 27 +++++++++++++++++++
 1 file changed, 27 insertions(+)
 create mode 100644 docs/getting-started/quick-start/tab-python/CondaUnix.md

diff --git a/docs/getting-started/quick-start/tab-python/CondaUnix.md b/docs/getting-started/quick-start/tab-python/CondaUnix.md
new file mode 100644
index 0000000..556f9a3
--- /dev/null
+++ b/docs/getting-started/quick-start/tab-python/CondaUnix.md
@@ -0,0 +1,27 @@
+
+
+# Install with Conda
+
+1. **Create a Conda Environment:**
+
+   ```bash
+   conda create -n open-webui python=3.9
+   ```
+
+2. **Activate the Environment:**
+
+   ```bash
+   conda activate open-webui
+   ```
+
+3. **Install Open WebUI:**
+
+   ```bash
+   pip install open-webui
+   ```
+
+4. **Start the Server:**
+
+   ```bash
+   open-webui serve
+   ```

From c5e7df03aaddf9e7929926ba66d1e888c9a695bb Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 19:52:39 +0000
Subject: [PATCH 18/42] Add CondaWindows.md changes from feature-nginx-combined

---
 .../quick-start/tab-python/CondaWindows.md    | 27 +++++++++++++++++++
 1 file changed, 27 insertions(+)
 create mode 100644 docs/getting-started/quick-start/tab-python/CondaWindows.md

diff --git a/docs/getting-started/quick-start/tab-python/CondaWindows.md b/docs/getting-started/quick-start/tab-python/CondaWindows.md
new file mode 100644
index 0000000..556f9a3
--- /dev/null
+++ b/docs/getting-started/quick-start/tab-python/CondaWindows.md
@@ -0,0 +1,27 @@
+
+
+# Install with Conda
+
+1. **Create a Conda Environment:**
+
+   ```bash
+   conda create -n open-webui python=3.9
+   ```
+
+2. **Activate the Environment:**
+
+   ```bash
+   conda activate open-webui
+   ```
+
+3. **Install Open WebUI:**
+
+   ```bash
+   pip install open-webui
+   ```
+
+4. **Start the Server:**
+
+   ```bash
+   open-webui serve
+   ```

From e44803147c532edd0c5049e5c2c4aa8d28c2fabb Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 19:52:44 +0000
Subject: [PATCH 19/42] Add Venv.md changes from feature-nginx-combined

---
 .../quick-start/tab-python/Venv.md            | 38 +++++++++++++++++++
 1 file changed, 38 insertions(+)
 create mode 100644 docs/getting-started/quick-start/tab-python/Venv.md

diff --git a/docs/getting-started/quick-start/tab-python/Venv.md b/docs/getting-started/quick-start/tab-python/Venv.md
new file mode 100644
index 0000000..0acaf1f
--- /dev/null
+++ b/docs/getting-started/quick-start/tab-python/Venv.md
@@ -0,0 +1,38 @@
+
+# Using Virtual Environments
+
+Create isolated Python environments using `venv`.
+
+## Steps
+
+1. **Create a Virtual Environment:**
+
+   ```bash
+   python3 -m venv venv
+   ```
+
+2. **Activate the Virtual Environment:**
+
+   - On Linux/macOS:
+
+     ```bash
+     source venv/bin/activate
+     ```
+
+   - On Windows:
+
+     ```bash
+     venv\Scripts\activate
+     ```
+
+3. **Install Open WebUI:**
+
+   ```bash
+   pip install open-webui
+   ```
+
+4. **Start the Server:**
+
+   ```bash
+   open-webui serve
+   ```

From 9a213b46b8478b23c8bbb2bb1af4615c6013d1d1 Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 19:52:48 +0000
Subject: [PATCH 20/42] Add OllamaModels.mdx changes from
 feature-nginx-combined

---
 .../using-openwebui/OllamaModels.mdx          | 81 +++++++++++++++++++
 1 file changed, 81 insertions(+)
 create mode 100644 docs/getting-started/using-openwebui/OllamaModels.mdx

diff --git a/docs/getting-started/using-openwebui/OllamaModels.mdx b/docs/getting-started/using-openwebui/OllamaModels.mdx
new file mode 100644
index 0000000..34f0244
--- /dev/null
+++ b/docs/getting-started/using-openwebui/OllamaModels.mdx
@@ -0,0 +1,81 @@
+---
+title: "๐Ÿค–Ollama Models"
+---
+
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+# Ollama Models
+
+Explore how to download, load, and use models with Ollama, both via **Docker** and **Remote** setups.
+
+---
+
+<Tabs groupId="ollama-setup">
+  <TabItem value="docker-ollama" label="Ollama Inside Docker">
+    ## ๐Ÿณ Ollama Inside Docker
+
+    If **Ollama is deployed inside Docker** (e.g., using Docker Compose or Kubernetes), the service will be available:
+
+    - **Inside the container**: `http://127.0.0.1:11434`
+    - **From the host**: `http://localhost:11435` (if exposed via host network)
+
+    ### Step 1: Check Available Models
+    ```bash
+    docker exec -it openwebui curl http://ollama:11434/v1/models
+    ```
+
+    From the host (if exposed):
+    ```bash
+    curl http://localhost:11435/v1/models
+    ```
+
+    ### Step 2: Download Llama 3.2
+    ```bash
+    docker exec -it ollama ollama pull llama3.2
+    ```
+
+    You can also download a higher-quality version (8-bit) from Hugging Face:
+    ```bash
+    docker exec -it ollama ollama pull hf.co/bartowski/Llama-3.2-3B-Instruct-GGUF:Q8_0
+    ```
+
+  </TabItem>
+
+  <TabItem value="byo-ollama" label="BYO Ollama (External Ollama)">
+    ## ๐Ÿ› ๏ธ Bring Your Own Ollama (BYO Ollama)
+
+    If Ollama is running on the **host machine** or another server on your network, follow these steps.
+
+    ### Step 1: Check Available Models
+    Local:
+    ```bash
+    curl http://localhost:11434/v1/models
+    ```
+
+    Remote:
+    ```bash
+    curl http://<remote-ip>:11434/v1/models
+    ```
+
+    ### Step 2: Set the OLLAMA_BASE_URL
+    ```bash
+    export OLLAMA_HOST=<remote-ip>:11434
+    ```
+
+    ### Step 3: Download Llama 3.2
+    ```bash
+    ollama pull llama3.2
+    ```
+
+    Or download the 8-bit version from Hugging Face:
+    ```bash
+    ollama pull hf.co/bartowski/Llama-3.2-3B-Instruct-GGUF:Q8_0
+    ```
+
+  </TabItem>
+</Tabs>
+
+---
+
+You now have everything you need to download and run models with **Ollama**. Happy exploring!

From 5fd20acbac9aaa0d850ca750d00fe69ea23616ee Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 19:52:54 +0000
Subject: [PATCH 21/42] Add Terminology.mdx changes from feature-nginx-combined

---
 .../using-openwebui/Terminology.mdx           | 26 +++++++++++++++++++
 1 file changed, 26 insertions(+)
 create mode 100644 docs/getting-started/using-openwebui/Terminology.mdx

diff --git a/docs/getting-started/using-openwebui/Terminology.mdx b/docs/getting-started/using-openwebui/Terminology.mdx
new file mode 100644
index 0000000..9ae27de
--- /dev/null
+++ b/docs/getting-started/using-openwebui/Terminology.mdx
@@ -0,0 +1,26 @@
+---
+title: "๐Ÿ“– OpenWebUI Terminology"
+---
+
+# ๐Ÿ“– OpenWebUI Terminology
+
+Enhance your understanding of OpenWebUI with key concepts and components to improve your usage and configuration.
+
+---
+
+## Explore the Workspace  
+Begin by exploring the [Workspace](../../tutorials/features/workspace) to discover essential concepts such as Modelfiles, Knowledge, Prompts, Tools, and Functions.
+
+---
+
+## Interact with the Playground  
+Visit the Playground to directly engage with a Large Language Model. Here, you can experiment with different `System Prompts` to modify the model's behavior and persona.
+
+---
+
+## Personalize in Settings  
+Access the Settings to personalize your experience. Customize features like Memory, adjust Voice settings for both TTS (Text-to-Speech) and STT (Speech-to-Text), and toggle between Dark/Light mode for optimal viewing.
+
+---
+
+This terminology guide will help you navigate and configure OpenWebUI effectively!
\ No newline at end of file

From 7d71c2f5ca533d5fc15e78371d00db1824898f43 Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 19:52:58 +0000
Subject: [PATCH 22/42] Add using-openwebui/index.mdx changes from
 feature-nginx-combined

---
 .../getting-started/using-openwebui/index.mdx | 32 +++++++++++++++++++
 1 file changed, 32 insertions(+)
 create mode 100644 docs/getting-started/using-openwebui/index.mdx

diff --git a/docs/getting-started/using-openwebui/index.mdx b/docs/getting-started/using-openwebui/index.mdx
new file mode 100644
index 0000000..7940aec
--- /dev/null
+++ b/docs/getting-started/using-openwebui/index.mdx
@@ -0,0 +1,32 @@
+---
+sidebar_position: 3
+title: "๐Ÿง‘โ€๐Ÿ’ป Using OpenWebUI"
+---
+
+# Using OpenWebUI
+
+Explore the essential concepts and features of Open WebUI, including models, knowledge, prompts, pipes, actions, and more.
+
+---
+
+## ๐Ÿ“ฅ Ollama Models  
+Learn how to download, load, and use models effectively.  
+[Check out Ollama Models](./OllamaModels.mdx)
+
+---
+
+## ๐Ÿ“š Terminology  
+Understand key components: models, prompts, knowledge, functions, pipes, and actions.  
+[Read the Terminology Guide](./Terminology.mdx)
+
+---
+
+## ๐Ÿ“– Community Tutorials  
+If you like the documentation you are reading right now, then check out this tutorial on [Configuring RAG with OpenWebUI Documentation](../../tutorials/tips/rag-tutorial.md).
+Then go on to explore other community-submitted tutorials to enhance your OpenWebUI experience. 
+[Explore Community Tutorials](/category/-tutorials)  
+
+
+---
+
+Stay tuned for more updates as we continue to expand these sections!
\ No newline at end of file

From a5a3e77cd25188110c936a99fd5de4862140659b Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 19:53:02 +0000
Subject: [PATCH 23/42] Add OllamaDocker.md changes from feature-nginx-combined

---
 .../tab-ollama/OllamaDocker.md                | 43 +++++++++++++++++++
 1 file changed, 43 insertions(+)
 create mode 100644 docs/getting-started/using-openwebui/tab-ollama/OllamaDocker.md

diff --git a/docs/getting-started/using-openwebui/tab-ollama/OllamaDocker.md b/docs/getting-started/using-openwebui/tab-ollama/OllamaDocker.md
new file mode 100644
index 0000000..093ae6f
--- /dev/null
+++ b/docs/getting-started/using-openwebui/tab-ollama/OllamaDocker.md
@@ -0,0 +1,43 @@
+
+### ๐Ÿณ Ollama Inside Docker
+
+If **Ollama is deployed inside Docker** (e.g., using Docker Compose or Kubernetes), the service will be available:
+
+- **Inside the container**: `http://127.0.0.1:11434`
+- **From the host**: `http://localhost:11435` (if exposed via host network)
+
+#### Step 1: Check Available Models
+
+- Inside the container:
+
+  ```bash
+  docker exec -it openwebui curl http://ollama:11434/v1/models
+  ```
+
+- From the host (if exposed):
+
+  ```bash
+  curl http://localhost:11435/v1/models
+  ```
+
+This command lists all available models and confirms that Ollama is running.
+
+#### Step 2: Download Llama 3.2
+
+Run the following command:
+
+```bash
+docker exec -it ollama ollama pull llama3.2
+```
+
+**Tip:** You can download other models from Hugging Face by specifying the appropriate URL. For example, to download a higher-quality **8-bit version of Llama 3.2**:
+
+```bash
+ollama pull hf.co/bartowski/Llama-3.2-3B-Instruct-GGUF:Q8_0
+```
+
+#### Step 3: Access the WebUI
+
+Once everything is set up, access the WebUI at:
+
+[http://localhost:3000](http://localhost:3000)

From cc2a2f850e92291bacfee0c615a96433c143f32a Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 19:53:06 +0000
Subject: [PATCH 24/42] Add OllamaRemote.md changes from feature-nginx-combined

---
 .../tab-ollama/OllamaRemote.md                | 50 +++++++++++++++++++
 1 file changed, 50 insertions(+)
 create mode 100644 docs/getting-started/using-openwebui/tab-ollama/OllamaRemote.md

diff --git a/docs/getting-started/using-openwebui/tab-ollama/OllamaRemote.md b/docs/getting-started/using-openwebui/tab-ollama/OllamaRemote.md
new file mode 100644
index 0000000..c99af07
--- /dev/null
+++ b/docs/getting-started/using-openwebui/tab-ollama/OllamaRemote.md
@@ -0,0 +1,50 @@
+
+### ๐Ÿ› ๏ธ Bring Your Own Ollama (BYO Ollama)
+
+If Ollama is running on the **host machine** or another server on your network, follow these steps.
+
+#### Step 1: Check Available Models
+
+- If Ollama is **local**, run:
+
+  ```bash
+  curl http://localhost:11434/v1/models
+  ```
+
+- If Ollama is **remote**, use:
+
+  ```bash
+  curl http://<remote-ip>:11434/v1/models
+  ```
+
+This confirms that Ollama is available and lists the available models.
+
+#### Step 2: Set the OLLAMA_BASE_URL
+
+If Ollama is running **remotely** or on the host, set the following environment variable:
+
+```bash
+export OLLAMA_HOST=<remote-ip>:11434
+```
+
+This ensures Open WebUI can reach the remote Ollama instance.
+
+#### Step 3: Download Llama 3.2
+
+From your local or remote machine, run:
+
+```bash
+ollama pull llama3.2
+```
+
+**Tip:** Use this command to download the 8-bit version from Hugging Face:
+
+```bash
+ollama pull hf.co/bartowski/Llama-3.2-3B-Instruct-GGUF:Q8_0
+```
+
+#### Step 4: Access the WebUI
+
+You can now access the WebUI at:
+
+[http://localhost:3000](http://localhost:3000)

From 1fa2abc34d796c25b3b32fc021aed3347985aee1 Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 20:19:52 +0000
Subject: [PATCH 25/42] Add docker-install.md to fix broken link in
 DockerCompose.md

---
 docs/tutorials/integrations/docker-install.md | 59 +++++++++++++++++++
 1 file changed, 59 insertions(+)
 create mode 100644 docs/tutorials/integrations/docker-install.md

diff --git a/docs/tutorials/integrations/docker-install.md b/docs/tutorials/integrations/docker-install.md
new file mode 100644
index 0000000..d459d6c
--- /dev/null
+++ b/docs/tutorials/integrations/docker-install.md
@@ -0,0 +1,59 @@
+
+# Installing Docker
+
+## For Windows and Mac Users
+
+- Download Docker Desktop from [Docker's official website](https://www.docker.com/products/docker-desktop).  
+- Follow the installation instructions on the website.  
+- After installation, **open Docker Desktop** to ensure it's running properly.
+
+---
+
+## For Ubuntu Users
+
+1. **Open your terminal.**
+
+2. **Set up Dockerโ€™s apt repository:**
+   ```bash
+   sudo apt-get update
+   sudo apt-get install ca-certificates curl
+   sudo install -m 0755 -d /etc/apt/keyrings
+   sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc
+   sudo chmod a+r /etc/apt/keyrings/docker.asc
+   echo \
+     "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \
+     $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
+     sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
+   ```
+
+:::note
+If using an **Ubuntu derivative** (e.g., Linux Mint), use `UBUNTU_CODENAME` instead of `VERSION_CODENAME`.
+:::
+
+3. **Install Docker Engine:**
+   ```bash
+   sudo apt-get update
+   sudo apt-get install docker-ce docker-ce-cli containerd.io docker-compose-plugin
+   ```
+
+4. **Verify Docker Installation:**
+   ```bash
+   sudo docker run hello-world
+   ```
+
+---
+
+## For Other Linux Distributions
+
+For other Linux distributions, refer to the [official Docker documentation](https://docs.docker.com/engine/install/).
+
+---
+
+## Install and Verify Ollama
+
+1. **Download Ollama** from [https://ollama.com/](https://ollama.com/).
+
+2. **Verify Ollama Installation:**
+   - Open a browser and navigate to:
+     [http://127.0.0.1:11434/](http://127.0.0.1:11434/).
+   - Note: The port may vary based on your installation.

From 71ce18ae2114663cb5fbd96a4cd8cfc0b5974cbd Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 20:20:24 +0000
Subject: [PATCH 26/42] Add docker-install.md from feature-nginx-combined to
 resolve link issue

---
 docs/tutorials/integrations/docker-install.md | 59 +++++++++++++++++++
 1 file changed, 59 insertions(+)
 create mode 100644 docs/tutorials/integrations/docker-install.md

diff --git a/docs/tutorials/integrations/docker-install.md b/docs/tutorials/integrations/docker-install.md
new file mode 100644
index 0000000..d459d6c
--- /dev/null
+++ b/docs/tutorials/integrations/docker-install.md
@@ -0,0 +1,59 @@
+
+# Installing Docker
+
+## For Windows and Mac Users
+
+- Download Docker Desktop from [Docker's official website](https://www.docker.com/products/docker-desktop).  
+- Follow the installation instructions on the website.  
+- After installation, **open Docker Desktop** to ensure it's running properly.
+
+---
+
+## For Ubuntu Users
+
+1. **Open your terminal.**
+
+2. **Set up Dockerโ€™s apt repository:**
+   ```bash
+   sudo apt-get update
+   sudo apt-get install ca-certificates curl
+   sudo install -m 0755 -d /etc/apt/keyrings
+   sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc
+   sudo chmod a+r /etc/apt/keyrings/docker.asc
+   echo \
+     "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \
+     $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
+     sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
+   ```
+
+:::note
+If using an **Ubuntu derivative** (e.g., Linux Mint), use `UBUNTU_CODENAME` instead of `VERSION_CODENAME`.
+:::
+
+3. **Install Docker Engine:**
+   ```bash
+   sudo apt-get update
+   sudo apt-get install docker-ce docker-ce-cli containerd.io docker-compose-plugin
+   ```
+
+4. **Verify Docker Installation:**
+   ```bash
+   sudo docker run hello-world
+   ```
+
+---
+
+## For Other Linux Distributions
+
+For other Linux distributions, refer to the [official Docker documentation](https://docs.docker.com/engine/install/).
+
+---
+
+## Install and Verify Ollama
+
+1. **Download Ollama** from [https://ollama.com/](https://ollama.com/).
+
+2. **Verify Ollama Installation:**
+   - Open a browser and navigate to:
+     [http://127.0.0.1:11434/](http://127.0.0.1:11434/).
+   - Note: The port may vary based on your installation.

From 81d57993bcc1121c924baba90e0a06471d776194 Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 20:20:51 +0000
Subject: [PATCH 27/42] Remove duplicate index.mdx file to resolve route
 conflict

---
 docs/getting-started/index.mdx | 585 ---------------------------------
 1 file changed, 585 deletions(-)
 delete mode 100644 docs/getting-started/index.mdx

diff --git a/docs/getting-started/index.mdx b/docs/getting-started/index.mdx
deleted file mode 100644
index 4bf763b..0000000
--- a/docs/getting-started/index.mdx
+++ /dev/null
@@ -1,585 +0,0 @@
----
-sidebar_position: 3
-title: "๐Ÿš€ Getting Started"
----
-
-import { TopBanners } from "@site/src/components/TopBanners";
-
-<TopBanners />
-
-## How to Install ๐Ÿš€
-
-:::info **Important Note on User Roles and Privacy:**
-
-- **Admin Creation:** The first account created on Open WebUI gains **Administrator privileges**, controlling user management and system settings.
-
-- **User Registrations:** Subsequent sign-ups start with **Pending** status, requiring Administrator approval for access.
-
-- **Privacy and Data Security:** **All your data**, including login details, is **locally stored** on your device. Open WebUI ensures **strict confidentiality** and **no external requests** for enhanced privacy and security.
-
-:::
-
-## Quick Start with Docker ๐Ÿณ (Recommended)
-
-:::tip
-
-#### Disabling Login for Single User
-
-If you want to disable login for a single-user setup, set [`WEBUI_AUTH`](/getting-started/env-configuration) to `False`. This will bypass the login page.
-
-:::warning
-You cannot switch between single-user mode and multi-account mode after this change.
-:::
-
-:::danger
-When using Docker to install Open WebUI, make sure to include the `-v open-webui:/app/backend/data` in your Docker command. This step is crucial as it ensures your database is properly mounted and prevents any loss of data.
-:::
-
-<details>
-<summary>Before You Begin</summary>
-#### Installing Docker
-
-#### For Windows and Mac Users:
-
-- Download Docker Desktop from [Docker's official website](https://www.docker.com/products/docker-desktop).
-- Follow the installation instructions provided on the website. After installation, open Docker Desktop to ensure it's running properly.
-
-#### For Ubuntu Users:
-
-1. **Open your terminal.**
-
-2. **Set up Docker's apt repository:**
-
-   - Update your package index:
-     ```bash
-     sudo apt-get update
-     ```
-   - Install packages to allow apt to use a repository over HTTPS:
-     ```bash
-     sudo apt-get install ca-certificates curl
-     ```
-   - Create a directory for the Docker apt keyring:
-     ```bash
-     sudo install -m 0755 -d /etc/apt/keyrings
-     ```
-   - Add Docker's official GPG key:
-     ```bash
-     sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc
-     sudo chmod a+r /etc/apt/keyrings/docker.asc
-     ```
-   - Add the Docker repository to Apt sources:
-     ```bash
-     echo \
-       "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \
-       $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \
-       sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
-     ```
-
-:::note
-If you're using an Ubuntu derivative distro, such as Linux Mint, you might need to use `UBUNTU_CODENAME` instead of `VERSION_CODENAME`.
-:::
-
-3. **Install Docker Engine:**
-
-   - Update your package index again:
-     ```bash
-     sudo apt-get update
-     ```
-   - Install Docker Engine, CLI, and containerd:
-     ```bash
-     sudo apt-get install docker-ce docker-ce-cli containerd.io docker-compose-plugin
-     ```
-
-4. **Verify the Docker installation:**
-   - Use the following command to run a test image:
-     ```bash
-     sudo docker run hello-world
-     ```
-     This command downloads a test image and runs it in a container. If successful, it prints an informational message confirming that Docker is installed and working correctly.
-
-#### Other Linux Distributions:
-
-- For other Linux distributions, please refer to the [official Docker documentation](https://docs.docker.com/engine/install/) for installation instructions specific to your distro.
-
-#### Ensure You Have the Latest Version of Ollama:
-
-- Download the latest version from [https://ollama.com/](https://ollama.com/).
-
-#### Verify Ollama Installation:
-
-- After installing Ollama, verify its functionality by accessing [http://127.0.0.1:11434/](http://127.0.0.1:11434/) in your web browser. Note that the port number might be different based on your installation.
-
-</details>
-
-<details> 
-<summary>Data Storage in Docker</summary>
-
-This tutorial uses [Docker named volumes](https://docs.docker.com/storage/volumes/) to guarantee the **persistance of your data**. This might make it difficult to know exactly where your data is stored in your machine if this is your first time using Docker. Alternatively, you can replace the volume name with a absolute path on your host machine to link your container data to a folder in your computer using a [bind mount](https://docs.docker.com/storage/bind-mounts/).
-
-**Example**: change `-v open-webui:/app/backend/data` to `-v /path/to/folder:/app/backend/data`
-
-Ensure you have the proper access rights to the folder on your host machine.
-
-Visit the [Docker documentation](https://docs.docker.com/storage/) to understand more about volumes and bind mounts.
-</details>
-
-### Installation with Default Configuration
-
-- **If Ollama is on your computer**, use this command:
-
-  ```bash
-  docker run -d -p 3000:8080 --add-host=host.docker.internal:host-gateway -v open-webui:/app/backend/data --name open-webui --restart always ghcr.io/open-webui/open-webui:main
-  ```
-
-- **If Ollama is on a Different Server**, use this command:
-
-  To connect to Ollama on another server, change the `OLLAMA_BASE_URL` to the server's URL:
-
-  ```bash
-  docker run -d -p 3000:8080 -e OLLAMA_BASE_URL=https://example.com -v open-webui:/app/backend/data --name open-webui --restart always ghcr.io/open-webui/open-webui:main
-  ```
-
-- **To run Open WebUI with Nvidia GPU support**, use this command:
-
-  ```bash
-  docker run -d -p 3000:8080 --gpus all --add-host=host.docker.internal:host-gateway -v open-webui:/app/backend/data --name open-webui --restart always ghcr.io/open-webui/open-webui:cuda
-  ```
-
-  This will result in a faster Bundled Ollama, faster Speech-To-Text and faster RAG embeddings if using SentenceTransformers.
-
-### Installation for OpenAI API Usage Only
-
-- **If you're only using OpenAI API**, use this command:
-
-  ```bash
-  docker run -d -p 3000:8080 -e OPENAI_API_KEY=your_secret_key -v open-webui:/app/backend/data --name open-webui --restart always ghcr.io/open-webui/open-webui:main
-  ```
-
-### Installing Open WebUI with Bundled Ollama Support
-
-This installation method uses a single container image that bundles Open WebUI with Ollama, allowing for a streamlined setup via a single command. Choose the appropriate command based on your hardware setup:
-
-- **With GPU Support**:
-  Utilize GPU resources by running the following command:
-
-  ```bash
-  docker run -d -p 3000:8080 --gpus=all -v ollama:/root/.ollama -v open-webui:/app/backend/data --name open-webui --restart always ghcr.io/open-webui/open-webui:ollama
-  ```
-
-- **For CPU Only**:
-  If you're not using a GPU, use this command instead:
-
-  ```bash
-  docker run -d -p 3000:8080 -v ollama:/root/.ollama -v open-webui:/app/backend/data --name open-webui --restart always ghcr.io/open-webui/open-webui:ollama
-  ```
-
-Both commands facilitate a built-in, hassle-free installation of both Open WebUI and Ollama, ensuring that you can get everything up and running swiftly.
-
-After installation, you can access Open WebUI at [http://localhost:3000](http://localhost:3000). Enjoy! ๐Ÿ˜„
-
-## Manual Installation
-
-### Installation with `pip` (Beta)
-
-For users who prefer to use Python's package manager `pip`, Open WebUI offers a installation method. Python 3.11 is required for this method.
-
-1. **Install Open WebUI**:
-   Open your terminal and run the following command:
-
-   ```bash
-   pip install open-webui
-   ```
-
-2. **Start Open WebUI**:
-   Once installed, start the server using:
-
-   ```bash
-   open-webui serve
-   ```
-
-This method installs all necessary dependencies and starts Open WebUI, allowing for a simple and efficient setup. After installation, you can access Open WebUI at [http://localhost:8080](http://localhost:8080). Enjoy! ๐Ÿ˜„
-
-### Install from Open WebUI GitHub Repo
-
-:::info
-Open WebUI consists of two primary components: the frontend and the backend (which serves as a reverse proxy, handling static frontend files, and additional features). Both need to be running concurrently for the development environment.
-:::
-
-#### Requirements ๐Ÿ“ฆ
-
-- ๐Ÿฐ [Node.js](https://nodejs.org/en) >= 20.10
-- ๐Ÿ [Python](https://python.org) >= 3.11
-
-#### Build and Install ๐Ÿ› ๏ธ
-
-Run the following commands to install:
-
-For Linux/macOS:
-```sh
-git clone https://github.com/open-webui/open-webui.git
-cd open-webui/
-
-# Copying required .env file
-cp -RPp .env.example .env
-
-# Building Frontend Using Node
-npm install
-npm run build
-
-cd ./backend
-
-# Optional: To install using Conda as your development environment, follow these instructions:
-# Create and activate a Conda environment
-conda create --name open-webui-env python=3.11
-conda activate open-webui-env
-
-# Install dependencies
-pip install -r requirements.txt -U
-
-# Start the application
-bash start.sh
-```
-
-For Windows:
-```powershell
-git clone https://github.com/open-webui/open-webui.git
-cd open-webui
-
-copy .env.example .env
-
-npm install
-npm run build
-
-cd .\backend
-
-# Optional: To install using Conda as your development environment, follow these instructions:
-# Create and activate a Conda environment
-conda create --name open-webui-env python=3.11
-conda activate open-webui-env
-
-pip install -r requirements.txt -U
-
-start.bat
-```
-
-You should have Open WebUI up and running at http://localhost:8080/. Enjoy! ๐Ÿ˜„
-
-## Docker Compose
-
-#### Using Docker Compose
-
-- If you don't have Ollama yet, use Docker Compose for easy installation. Run this command:
-
-  ```bash
-  docker compose up -d --build
-  ```
-
-- **For Nvidia GPU Support:** Use an additional Docker Compose file:
-
-  ```bash
-  docker compose -f docker-compose.yaml -f docker-compose.gpu.yaml up -d --build
-  ```
-
-- **For AMD GPU Support:** Some AMD GPUs require setting an environment variable for proper functionality:
-
-  ```bash
-  HSA_OVERRIDE_GFX_VERSION=11.0.0 docker compose -f docker-compose.yaml -f docker-compose.amdgpu.yaml up -d --build
-  ```
-
-  <details>
-  <summary>AMD GPU Support with HSA_OVERRIDE_GFX_VERSION</summary>
-
-  For AMD GPU users encountering compatibility issues, setting the `HSA_OVERRIDE_GFX_VERSION` environment variable is crucial. This variable instructs the ROCm platform to emulate a specific GPU architecture, ensuring compatibility with various AMD GPUs not officially supported. Depending on your GPU model, adjust the `HSA_OVERRIDE_GFX_VERSION` as follows:
-
-  - **For RDNA1 & RDNA2 GPUs** (e.g., RX 6700, RX 680M): Use `HSA_OVERRIDE_GFX_VERSION=10.3.0`.
-  - **For RDNA3 GPUs**: Set `HSA_OVERRIDE_GFX_VERSION=11.0.0`.
-  - **For older GCN (Graphics Core Next) GPUs**: The version to use varies. GCN 4th gen and earlier might require different settings, such as `ROC_ENABLE_PRE_VEGA=1` for GCN4, or `HSA_OVERRIDE_GFX_VERSION=9.0.0` for Vega (GCN5.0) emulation.
-
-  Ensure to replace `<version>` with the appropriate version number based on your GPU model and the guidelines above. For a detailed list of compatible versions and more in-depth instructions, refer to the [ROCm documentation](https://rocm.docs.amd.com) and the [openSUSE Wiki on AMD GPGPU](https://en.opensuse.org/SDB:AMD_GPGPU).
-
-  Example command for RDNA1 & RDNA2 GPUs:
-
-  ```bash
-  HSA_OVERRIDE_GFX_VERSION=10.3.0 docker compose -f docker-compose.yaml -f docker-compose.amdgpu.yaml up -d --build
-  ```
-
-  </details>
-
-- **To Expose Ollama API:** Use another Docker Compose file:
-
-  ```bash
-  docker compose -f docker-compose.yaml -f docker-compose.api.yaml up -d --build
-  ```
-
-#### Using `run-compose.sh` Script (Linux or Docker-Enabled WSL2 on Windows)
-
-- Give execute permission to the script:
-
-  ```bash
-  chmod +x run-compose.sh
-  ```
-
-- For CPU-only container:
-
-  ```bash
-  ./run-compose.sh
-  ```
-
-- For GPU support (read the note about GPU compatibility):
-
-  ```bash
-  ./run-compose.sh --enable-gpu
-  ```
-
-- To build the latest local version, add `--build`:
-
-  ```bash
-  ./run-compose.sh --enable-gpu --build
-  ```
-
-
-
-## Docker Swarm
-
-This installation method requires knowledge on Docker Swarms, as it utilizes a stack file to deploy 3 seperate containers as services in a Docker Swarm.
-
-It includes isolated containers of ChromaDB, Ollama, and OpenWebUI. 
-Additionally, there are pre-filled [Environment Variables](/getting-started/env-configuration) to further illustrate the setup.
-
-Choose the appropriate command based on your hardware setup:
-
-- **Before Starting**:
-
-  Directories for your volumes need to be created on the host, or you can specify a custom location or volume.
-  
-  The current example utilizes an isolated dir `data`, which is within the same dir as the `docker-stack.yaml`.
-  
-      - **For example**:
-  
-        ```bash
-        mkdir -p data/open-webui data/chromadb data/ollama
-        ```
-
-- **With GPU Support**:
-
-    #### Docker-stack.yaml
-    ```yaml
-    version: '3.9'
-
-    services:
-      openWebUI:
-        image: ghcr.io/open-webui/open-webui:main
-        depends_on:
-            - chromadb
-            - ollama
-        volumes:
-          - ./data/open-webui:/app/backend/data
-        environment:
-          DATA_DIR: /app/backend/data 
-          OLLAMA_BASE_URLS: http://ollama:11434
-          CHROMA_HTTP_PORT: 8000
-          CHROMA_HTTP_HOST: chromadb
-          CHROMA_TENANT: default_tenant
-          VECTOR_DB: chroma
-          WEBUI_NAME: Awesome ChatBot
-          CORS_ALLOW_ORIGIN: "*" # This is the current Default, will need to change before going live
-          RAG_EMBEDDING_ENGINE: ollama
-          RAG_EMBEDDING_MODEL: nomic-embed-text-v1.5
-          RAG_EMBEDDING_MODEL_TRUST_REMOTE_CODE: "True"
-        ports:
-          - target: 8080
-            published: 8080
-            mode: overlay
-        deploy:
-          replicas: 1
-          restart_policy:
-            condition: any
-            delay: 5s
-            max_attempts: 3
-
-      chromadb:
-        hostname: chromadb
-        image: chromadb/chroma:0.5.15
-        volumes:
-          - ./data/chromadb:/chroma/chroma
-        environment:
-          - IS_PERSISTENT=TRUE
-          - ALLOW_RESET=TRUE
-          - PERSIST_DIRECTORY=/chroma/chroma
-        ports: 
-          - target: 8000
-            published: 8000
-            mode: overlay
-        deploy:
-          replicas: 1
-          restart_policy:
-            condition: any
-            delay: 5s
-            max_attempts: 3
-        healthcheck: 
-          test: ["CMD-SHELL", "curl localhost:8000/api/v1/heartbeat || exit 1"]
-          interval: 10s
-          retries: 2
-          start_period: 5s
-          timeout: 10s
-
-      ollama:
-        image: ollama/ollama:latest
-        hostname: ollama
-        ports:
-          - target: 11434
-            published: 11434
-            mode: overlay
-        deploy:
-          resources:
-            reservations:
-              generic_resources:
-                - discrete_resource_spec:
-                    kind: "NVIDIA-GPU"
-                    value: 0
-          replicas: 1
-          restart_policy:
-            condition: any
-            delay: 5s
-            max_attempts: 3
-        volumes:
-          - ./data/ollama:/root/.ollama
-
-    ```
-    - **Additional Requirements**:
-
-      1. Ensure CUDA is Enabled, follow your OS and GPU instructions for that.
-      2. Enable Docker GPU support, see [Nvidia Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html " on Nvidia's site.") 
-      3. Follow the [Guide here on configuring Docker Swarm to with with your GPU](https://gist.github.com/tomlankhorst/33da3c4b9edbde5c83fc1244f010815c#configuring-docker-to-work-with-your-gpus) 
-        - Ensure _GPU Resource_ is enabled in `/etc/nvidia-container-runtime/config.toml` and enable GPU resource advertising by uncommenting the `swarm-resource = "DOCKER_RESOURCE_GPU"`. The docker daemon must be restarted after updating these files on each node.
-
-
-- **With CPU Support**:
-  
-    Modify the Ollama Service within `docker-stack.yaml` and remove the lines for `generic_resources:`
-    ```yaml
-        ollama:
-      image: ollama/ollama:latest
-      hostname: ollama
-      ports:
-        - target: 11434
-          published: 11434
-          mode: overlay
-      deploy:
-        replicas: 1
-        restart_policy:
-          condition: any
-          delay: 5s
-          max_attempts: 3
-      volumes:
-        - ./data/ollama:/root/.ollama
-    ```
-
-- **Deploy Docker Stack**:
-  
-  ```bash
-  docker stack deploy -c docker-stack.yaml -d super-awesome-ai
-  ```
-
-
-## Installing with Podman
-
-<details>
-<summary>Rootless (Podman) local-only Open WebUI with Systemd service and auto-update</summary>
-
-:::note
-Consult the Docker documentation because much of the configuration and syntax is interchangeable with [Podman](https://github.com/containers/podman). See also [rootless_tutorial](https://github.com/containers/podman/blob/main/docs/tutorials/rootless_tutorial.md). This example requires the [slirp4netns](https://github.com/rootless-containers/slirp4netns) network backend to facilitate server listen and Ollama communication over localhost only.
-:::
-
-:::warning
-Rootless container execution with Podman (and Docker/ContainerD) does **not** support [AppArmor confinment](https://github.com/containers/podman/pull/19303). This may increase the attack vector due to [requirement of user namespace](https://rootlesscontaine.rs/caveats). Caution should be exercised and judement (in contrast to the root daemon) rendered based on threat model.
-:::
-
-1. Pull the latest image:
-   ```bash
-   podman pull ghcr.io/open-webui/open-webui:main
-   ```
-2. Create a new container using desired configuration:
-
-   :::note
-   `-p 127.0.0.1:3000:8080` ensures that we listen only on localhost, `--network slirp4netns:allow_host_loopback=true` permits the container to access Ollama when it also listens strictly on localhost. `--add-host=ollama.local:10.0.2.2 --env 'OLLAMA_BASE_URL=http://ollama.local:11434'` adds a hosts record to the container and configures open-webui to use the friendly hostname. `10.0.2.2` is the default slirp4netns address used for localhost mapping. `--env 'ANONYMIZED_TELEMETRY=False'` isn't necessary since Chroma telemetry has been disabled in the code but is included as an example.
-   :::
-
-   ```bash
-   podman create -p 127.0.0.1:3000:8080 --network slirp4netns:allow_host_loopback=true --add-host=ollama.local:10.0.2.2 --env 'OLLAMA_BASE_URL=http://ollama.local:11434' --env 'ANONYMIZED_TELEMETRY=False' -v open-webui:/app/backend/data --label io.containers.autoupdate=registry --name open-webui ghcr.io/open-webui/open-webui:main
-   ```
-
-   :::note
-   [Podman 5.0](https://www.redhat.com/en/blog/podman-50-unveiled) has updated the default rootless network backend to use the more performant [pasta](https://passt.top/passt/about/). While `slirp4netns:allow_host_loopback=true` still achieves the same local-only intention, it's now recommended use a simple TCP forward instead like: `--network=pasta:-T,11434 --add-host=ollama.local:127.0.0.1`. Full example:
-   :::
-
-   ```bash
-   podman create -p 127.0.0.1:3000:8080 --network=pasta:-T,11434 --add-host=ollama.local:127.0.0.1 --env 'OLLAMA_BASE_URL=http://ollama.local:11434' --env 'ANONYMIZED_TELEMETRY=False' -v open-webui:/app/backend/data --label io.containers.autoupdate=registry --name open-webui ghcr.io/open-webui/open-webui:main
-   ```
-
-3. Prepare for systemd user service:
-   ```bash
-   mkdir -p ~/.config/systemd/user/
-   ```
-4. Generate user service with Podman:
-   ```bash
-   podman generate systemd --new open-webui > ~/.config/systemd/user/open-webui.service
-   ```
-5. Reload systemd configuration:
-   ```bash
-   systemctl --user daemon-reload
-   ```
-6. Enable and validate new service:
-   ```bash
-   systemctl --user enable open-webui.service
-   systemctl --user start open-webui.service
-   systemctl --user status open-webui.service
-   ```
-7. Enable and validate Podman auto-update:
-   ```bash
-   systemctl --user enable podman-auto-update.timer
-   systemctl --user enable podman-auto-update.service
-   systemctl --user status podman-auto-update.timer
-   ```
-   Dry run with the following command (omit `--dry-run` to force an update):
-   ```bash
-   podman auto-update --dry-run
-   ```
-
-:::tip
-This process is compatible with Windows 11 WSL deployments when using Ollama within the WSL environment or using the Ollama Windows Preview. When using the native Ollama Windows Preview version, one additional step is required: enable [mirrored networking mode](https://learn.microsoft.com/en-us/windows/wsl/networking#mirrored-mode-networking).
-:::
-
-### Enabling Windows 11 mirrored networking
-
-1. Populate `%UserProfile%\.wslconfig` with:
-   ```
-   [wsl2]
-   networkingMode=mirrored
-   ```
-2. Restart WSL:
-   ```
-   wsl --shutdown
-   ```
-
-</details>
-
-### Alternative Installation Methods
-
-For other ways to install, like using Kustomize or Helm, check out [INSTALLATION](/getting-started/installation). Join our [Open WebUI Discord community](https://discord.gg/5rJgQTnV4s) for more help and information.
-
-### Updating your Docker Installation
-
-For detailed instructions on manually updating your local Docker installation of Open WebUI, including steps for those not using Watchtower and updates via Docker Compose, please refer to our dedicated guide: [UPDATING](/getting-started/updating).
-
-For a quick update with Watchtower, use the command below. Remember to replace `open-webui` with your actual container name if it differs.
-
-```bash
-docker run --rm --volume /var/run/docker.sock:/var/run/docker.sock containrrr/watchtower --run-once open-webui
-```
-
-In the last part of the command, replace `open-webui` with your container name if it is different.
-
-:::info
-After updating Open WebUI, you might need to refresh your browser cache to see the changes.
-:::

From 87246216543fed223d39ba3d54dce64e3e61c047 Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 20:23:41 +0000
Subject: [PATCH 28/42] Fix broken link in ipex_llm.md to updated index.md file

---
 docs/tutorials/integrations/ipex_llm.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/docs/tutorials/integrations/ipex_llm.md b/docs/tutorials/integrations/ipex_llm.md
index f59f90a..116185f 100644
--- a/docs/tutorials/integrations/ipex_llm.md
+++ b/docs/tutorials/integrations/ipex_llm.md
@@ -4,7 +4,7 @@ title: "Local LLM Setup with IPEX-LLM on Intel GPU"
 ---
 
 :::note
-This guide is verified with Open WebUI setup through [Manual Installation](/getting-started/index.mdx#manual-installation).
+This guide is verified with Open WebUI setup through [Manual Installation](/getting-started/index.md#manual-installation).
 :::
 
 # Local LLM Setup with IPEX-LLM on Intel GPU

From f9967cec634d3d7b9577c0fe033f45b999b0d91d Mon Sep 17 00:00:00 2001
From: mhand <matthewhandau@gmail.com>
Date: Wed, 6 Nov 2024 07:27:58 +1100
Subject: [PATCH 29/42] Update gh-pages.yml

fix: BASE_URL and SITE_URL
---
 .github/workflows/gh-pages.yml | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)

diff --git a/.github/workflows/gh-pages.yml b/.github/workflows/gh-pages.yml
index e7580e2..cee3ed7 100644
--- a/.github/workflows/gh-pages.yml
+++ b/.github/workflows/gh-pages.yml
@@ -36,6 +36,9 @@
           - name: Install dependencies
             run: npm ci
           - name: Build
+            env:
+              BASE_URL: ${{ vars.BASE_URL }}
+              SITE_URL: ${{ vars.SITE_URL }}
             run: npm run build
           - name: Upload artifact
             uses: actions/upload-pages-artifact@v3
@@ -52,4 +55,4 @@
         steps:
           - name: Deploy to GitHub Pages
             id: deployment
-            uses: actions/deploy-pages@v4
\ No newline at end of file
+            uses: actions/deploy-pages@v4

From 641a5f1f8f0565ae1a5a3151085d11e865e02647 Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 21:45:16 +0000
Subject: [PATCH 30/42] Update docusaurus.config.ts to use BASE_URL and
 SITE_URL env vars with defaults

---
 docusaurus.config.ts | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/docusaurus.config.ts b/docusaurus.config.ts
index ad581fe..5d670fb 100644
--- a/docusaurus.config.ts
+++ b/docusaurus.config.ts
@@ -8,11 +8,11 @@ const config: Config = {
 	tagline: "ChatGPT-Style WebUI for LLMs (Formerly Ollama WebUI)",
 	favicon: "img/favicon.png",
 
-	// Set the production url of your site here
-	url: "https://openwebui.com",
-	// Set the /<baseUrl>/ pathname under which your site is served
-	// For GitHub pages deployment, it is often '/<projectName>/'
-	baseUrl: "/",
+	// Set the production url of your site here, defaulting to current value
+	url: process.env.SITE_URL || "https://openwebui.com",
+	// Set the /<baseUrl>/ pathname under which your site is served, defaulting to current value
+		// For GitHub pages deployment, it is often '/<projectName>/'
+	baseUrl: process.env.BASE_URL || "/",
 
 	// GitHub pages deployment config.
 	// If you aren't using GitHub pages, you don't need these.

From 54c621f503ac201b870a52ab19dad27e15c362b9 Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 21:56:27 +0000
Subject: [PATCH 31/42] Add API Endpoints guide and update Advanced Topics
 index

---
 .../advanced-topics/api-endpoints.md          | 202 ++++++++++++++++++
 .../getting-started/advanced-topics/index.mdx |  16 +-
 2 files changed, 213 insertions(+), 5 deletions(-)
 create mode 100644 docs/getting-started/advanced-topics/api-endpoints.md

diff --git a/docs/getting-started/advanced-topics/api-endpoints.md b/docs/getting-started/advanced-topics/api-endpoints.md
new file mode 100644
index 0000000..37dd185
--- /dev/null
+++ b/docs/getting-started/advanced-topics/api-endpoints.md
@@ -0,0 +1,202 @@
+---
+sidebar_position: 400
+title: "๐Ÿ”— API Endpoints"
+---
+
+This guide provides essential information on how to interact with the API endpoints effectively to achieve seamless integration and automation using our models. Please note that this is an experimental setup and may undergo future updates for enhancement.
+
+## Authentication
+To ensure secure access to the API, authentication is required ๐Ÿ›ก๏ธ. You can authenticate your API requests using the Bearer Token mechanism. Obtain your API key from **Settings > Account** in the Open WebUI, or alternatively, use a JWT (JSON Web Token) for authentication.
+
+## Notable API Endpoints
+
+### ๐Ÿ“œ Retrieve All Models
+- **Endpoint**: `GET /api/models`
+- **Description**: Fetches all models created or added via Open WebUI.
+- **Example**:
+  ```bash
+  curl -H "Authorization: Bearer YOUR_API_KEY" http://localhost:3000/api/models
+  ```
+
+### ๐Ÿ’ฌ Chat Completions
+- **Endpoint**: `POST /api/chat/completions`
+- **Description**: Serves as an OpenAI API compatible chat completion endpoint for models on Open WebUI including Ollama models, OpenAI models, and Open WebUI Function models.
+- **Example**:
+  ```bash
+  curl -X POST http://localhost:3000/api/chat/completions \
+  -H "Authorization: Bearer YOUR_API_KEY" \
+  -H "Content-Type: application/json" \
+  -d '{
+        "model": "llama3.1",
+        "messages": [
+          {
+            "role": "user",
+            "content": "Why is the sky blue?"
+          }
+        ]
+      }'
+  ```
+
+### ๐Ÿงฉ Retrieval Augmented Generation (RAG)
+
+The Retrieval Augmented Generation (RAG) feature allows you to enhance responses by incorporating data from external sources. Below, you will find the methods for managing files and knowledge collections via the API, and how to use them in chat completions effectively.
+
+#### Uploading Files
+
+To utilize external data in RAG responses, you first need to upload the files. The content of the uploaded file is automatically extracted and stored in a vector database.
+
+- **Endpoint**: `POST /api/v1/files/`
+- **Curl Example**:
+  ```bash
+  curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Accept: application/json" \
+  -F "file=@/path/to/your/file" http://localhost:3000/api/v1/files/
+  ```
+- **Python Example**:
+  ```python
+  import requests
+  
+  def upload_file(token, file_path):
+      url = 'http://localhost:3000/api/v1/files/'
+      headers = {
+          'Authorization': f'Bearer {token}',
+          'Accept': 'application/json'
+      }
+      files = {'file': open(file_path, 'rb')}
+      response = requests.post(url, headers=headers, files=files)
+      return response.json()
+  ```
+
+#### Adding Files to Knowledge Collections
+
+After uploading, you can group files into a knowledge collection or reference them individually in chats.
+
+- **Endpoint**: `POST /api/v1/knowledge/{id}/file/add`
+- **Curl Example**:
+  ```bash
+  curl -X POST http://localhost:3000/api/v1/knowledge/{knowledge_id}/file/add \
+  -H "Authorization: Bearer YOUR_API_KEY" \
+  -H "Content-Type: application/json" \
+  -d '{"file_id": "your-file-id-here"}'
+  ```
+- **Python Example**:
+  ```python
+  import requests
+
+  def add_file_to_knowledge(token, knowledge_id, file_id):
+      url = f'http://localhost:3000/api/v1/knowledge/{knowledge_id}/file/add'
+      headers = {
+          'Authorization': f'Bearer {token}',
+          'Content-Type': 'application/json'
+      }
+      data = {'file_id': file_id}
+      response = requests.post(url, headers=headers, json=data)
+      return response.json()
+  ```
+
+#### Using Files and Collections in Chat Completions
+
+You can reference both individual files or entire collections in your RAG queries for enriched responses.
+
+##### Using an Individual File in Chat Completions
+
+This method is beneficial when you want to focus the chat model's response on the content of a specific file.
+
+- **Endpoint**: `POST /api/chat/completions`
+- **Curl Example**:
+  ```bash
+  curl -X POST http://localhost:3000/api/chat/completions \
+  -H "Authorization: Bearer YOUR_API_KEY" \
+  -H "Content-Type: application/json" \
+  -d '{
+        "model": "gpt-4-turbo",
+        "messages": [
+          {"role": "user", "content": "Explain the concepts in this document."}
+        ],
+        "files": [
+          {"type": "file", "id": "your-file-id-here"}
+        ]
+      }'
+  ```
+
+- **Python Example**:
+  ```python
+  import requests
+
+  def chat_with_file(token, model, query, file_id):
+      url = 'http://localhost:3000/api/chat/completions'
+      headers = {
+          'Authorization': f'Bearer {token}',
+          'Content-Type': 'application/json'
+      }
+      payload = {
+          'model': model,
+          'messages': [{'role': 'user', 'content': query}],
+          'files': [{'type': 'file', 'id': file_id}]
+      }
+      response = requests.post(url, headers=headers, json=payload)
+      return response.json()
+  ```
+
+##### Using a Knowledge Collection in Chat Completions
+
+Leverage a knowledge collection to enhance the response when the inquiry may benefit from a broader context or multiple documents.
+
+- **Endpoint**: `POST /api/chat/completions`
+- **Curl Example**:
+  ```bash
+  curl -X POST http://localhost:3000/api/chat/completions \
+  -H "Authorization: Bearer YOUR_API_KEY" \
+  -H "Content-Type: application/json" \
+  -d '{
+        "model": "gpt-4-turbo",
+        "messages": [
+          {"role": "user", "content": "Provide insights on the historical perspectives covered in the collection."}
+        ],
+        "files": [
+          {"type": "collection", "id": "your-collection-id-here"}
+        ]
+      }'
+  ```
+
+- **Python Example**:
+  ```python
+  import requests
+  
+  def chat_with_collection(token, model, query, collection_id):
+      url = 'http://localhost:3000/api/chat/completions'
+      headers = {
+          'Authorization': f'Bearer {token}',
+          'Content-Type': 'application/json'
+      }
+      payload = {
+          'model': model,
+          'messages': [{'role': 'user', 'content': query}],
+          'files': [{'type': 'collection', 'id': collection_id}]
+      }
+      response = requests.post(url, headers=headers, json=payload)
+      return response.json()
+  ```
+
+These methods enable effective utilization of external knowledge via uploaded files and curated knowledge collections, enhancing chat applications' capabilities using the Open WebUI API. Whether using files individually or within collections, you can customize the integration based on your specific needs.
+
+## Advantages of Using Open WebUI as a Unified LLM Provider
+Open WebUI offers a myriad of benefits, making it an essential tool for developers and businesses alike:
+- **Unified Interface**: Simplify your interactions with different LLMs through a single, integrated platform.
+- **Ease of Implementation**: Quick start integration with comprehensive documentation and community support.
+
+## Swagger Documentation Links
+Access detailed API documentation for different services provided by Open WebUI:
+
+| Application | Documentation Path      |
+|-------------|-------------------------|
+| Main        | `/docs`                 |
+| WebUI       | `/api/v1/docs`          |
+| Ollama      | `/ollama/docs`          |
+| OpenAI      | `/openai/docs`          |
+| Images      | `/images/api/v1/docs`   |
+| Audio       | `/audio/api/v1/docs`    |
+| RAG         | `/retrieval/api/v1/docs`|
+
+Each documentation portal offers interactive examples, schema descriptions, and testing capabilities to enhance your understanding and ease of use.
+
+By following these guidelines, you can swiftly integrate and begin utilizing the Open WebUI API. Should you encounter any issues or have questions, feel free to reach out through our Discord Community or consult the FAQs. Happy coding! ๐ŸŒŸ
diff --git a/docs/getting-started/advanced-topics/index.mdx b/docs/getting-started/advanced-topics/index.mdx
index 9c8639a..f826d89 100644
--- a/docs/getting-started/advanced-topics/index.mdx
+++ b/docs/getting-started/advanced-topics/index.mdx
@@ -11,27 +11,33 @@ Explore deeper concepts and advanced configurations of Open WebUI to enhance you
 
 ## ๐Ÿ”ง Environment Configuration  
 Understand how to set environment variables to customize your Open WebUI setup.  
-[Environment Configuration Guide](./EnvConfig)
+[Environment Configuration Guide](./env-configuration)
 
 ---
 
 ## ๐Ÿ“Š Logging and Monitoring  
 Learn how to monitor, log, and troubleshoot your system effectively.  
-[Logging and Monitoring Guide](./Logging)
+[Logging and Monitoring Guide](./logging)
 
 ---
 
 ## ๐Ÿ› ๏ธ Development Guide  
 Dive into the development process and learn how to contribute to Open WebUI.  
-[Development Guide](./Development)
+[Development Guide](./development)
 
 ---
 
 ## ๐Ÿ”’ HTTPS Encryption  
 Ensure secure communication by implementing HTTPS encryption in your deployment.  
-[HTTPS Encryption Guide](./HttpsEncryption)
+[HTTPS Encryption Guide](./https-encryption)
+
+---
+
+## ๐Ÿ”— API Endpoints  
+Get essential information for API integration and automation using our models.  
+[API Endpoints Guide](./api-endpoints)
 
 ---
 
 Looking for installation instructions? Head over to our [Quick Start Guide](../quick-start).  
-Need to explore core features? Check out [Using OpenWebUI](../using-openwebui).
\ No newline at end of file
+Need to explore core features? Check out [Using OpenWebUI](../using-openwebui).

From 4d0e58ea798cdc888fe9ce638d0bf06aaa2888d3 Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 22:08:15 +0000
Subject: [PATCH 32/42] Update links to advanced-topics/env-configuration and
 finalize pending link updates

---
 docs/api/index.md                             |  202 ---
 .../advanced-topics/EnvConfig.md              | 1146 -----------------
 .../{Development.md => development.md}        |    0
 .../env-configuration.md                      |    0
 ...HttpsEncryption.md => https-encryption.md} |    0
 .../{Logging.md => logging.md}                |    0
 docs/getting-started/development.mdx          |  199 ---
 docs/getting-started/installation.md          |   40 -
 docs/getting-started/logging.md               |   63 -
 docs/getting-started/updating.md              |  123 --
 .../getting-started/using-openwebui/index.mdx |    8 +-
 .../{OllamaModels.mdx => ollama-models.mdx}   |    0
 .../using-openwebui/resources.mdx             |   37 +
 .../{Terminology.mdx => terminology.mdx}      |    0
 14 files changed, 43 insertions(+), 1775 deletions(-)
 delete mode 100644 docs/api/index.md
 delete mode 100644 docs/getting-started/advanced-topics/EnvConfig.md
 rename docs/getting-started/advanced-topics/{Development.md => development.md} (100%)
 rename docs/getting-started/{ => advanced-topics}/env-configuration.md (100%)
 rename docs/getting-started/advanced-topics/{HttpsEncryption.md => https-encryption.md} (100%)
 rename docs/getting-started/advanced-topics/{Logging.md => logging.md} (100%)
 delete mode 100644 docs/getting-started/development.mdx
 delete mode 100644 docs/getting-started/installation.md
 delete mode 100644 docs/getting-started/logging.md
 delete mode 100644 docs/getting-started/updating.md
 rename docs/getting-started/using-openwebui/{OllamaModels.mdx => ollama-models.mdx} (100%)
 create mode 100644 docs/getting-started/using-openwebui/resources.mdx
 rename docs/getting-started/using-openwebui/{Terminology.mdx => terminology.mdx} (100%)

diff --git a/docs/api/index.md b/docs/api/index.md
deleted file mode 100644
index 37dd185..0000000
--- a/docs/api/index.md
+++ /dev/null
@@ -1,202 +0,0 @@
----
-sidebar_position: 400
-title: "๐Ÿ”— API Endpoints"
----
-
-This guide provides essential information on how to interact with the API endpoints effectively to achieve seamless integration and automation using our models. Please note that this is an experimental setup and may undergo future updates for enhancement.
-
-## Authentication
-To ensure secure access to the API, authentication is required ๐Ÿ›ก๏ธ. You can authenticate your API requests using the Bearer Token mechanism. Obtain your API key from **Settings > Account** in the Open WebUI, or alternatively, use a JWT (JSON Web Token) for authentication.
-
-## Notable API Endpoints
-
-### ๐Ÿ“œ Retrieve All Models
-- **Endpoint**: `GET /api/models`
-- **Description**: Fetches all models created or added via Open WebUI.
-- **Example**:
-  ```bash
-  curl -H "Authorization: Bearer YOUR_API_KEY" http://localhost:3000/api/models
-  ```
-
-### ๐Ÿ’ฌ Chat Completions
-- **Endpoint**: `POST /api/chat/completions`
-- **Description**: Serves as an OpenAI API compatible chat completion endpoint for models on Open WebUI including Ollama models, OpenAI models, and Open WebUI Function models.
-- **Example**:
-  ```bash
-  curl -X POST http://localhost:3000/api/chat/completions \
-  -H "Authorization: Bearer YOUR_API_KEY" \
-  -H "Content-Type: application/json" \
-  -d '{
-        "model": "llama3.1",
-        "messages": [
-          {
-            "role": "user",
-            "content": "Why is the sky blue?"
-          }
-        ]
-      }'
-  ```
-
-### ๐Ÿงฉ Retrieval Augmented Generation (RAG)
-
-The Retrieval Augmented Generation (RAG) feature allows you to enhance responses by incorporating data from external sources. Below, you will find the methods for managing files and knowledge collections via the API, and how to use them in chat completions effectively.
-
-#### Uploading Files
-
-To utilize external data in RAG responses, you first need to upload the files. The content of the uploaded file is automatically extracted and stored in a vector database.
-
-- **Endpoint**: `POST /api/v1/files/`
-- **Curl Example**:
-  ```bash
-  curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Accept: application/json" \
-  -F "file=@/path/to/your/file" http://localhost:3000/api/v1/files/
-  ```
-- **Python Example**:
-  ```python
-  import requests
-  
-  def upload_file(token, file_path):
-      url = 'http://localhost:3000/api/v1/files/'
-      headers = {
-          'Authorization': f'Bearer {token}',
-          'Accept': 'application/json'
-      }
-      files = {'file': open(file_path, 'rb')}
-      response = requests.post(url, headers=headers, files=files)
-      return response.json()
-  ```
-
-#### Adding Files to Knowledge Collections
-
-After uploading, you can group files into a knowledge collection or reference them individually in chats.
-
-- **Endpoint**: `POST /api/v1/knowledge/{id}/file/add`
-- **Curl Example**:
-  ```bash
-  curl -X POST http://localhost:3000/api/v1/knowledge/{knowledge_id}/file/add \
-  -H "Authorization: Bearer YOUR_API_KEY" \
-  -H "Content-Type: application/json" \
-  -d '{"file_id": "your-file-id-here"}'
-  ```
-- **Python Example**:
-  ```python
-  import requests
-
-  def add_file_to_knowledge(token, knowledge_id, file_id):
-      url = f'http://localhost:3000/api/v1/knowledge/{knowledge_id}/file/add'
-      headers = {
-          'Authorization': f'Bearer {token}',
-          'Content-Type': 'application/json'
-      }
-      data = {'file_id': file_id}
-      response = requests.post(url, headers=headers, json=data)
-      return response.json()
-  ```
-
-#### Using Files and Collections in Chat Completions
-
-You can reference both individual files or entire collections in your RAG queries for enriched responses.
-
-##### Using an Individual File in Chat Completions
-
-This method is beneficial when you want to focus the chat model's response on the content of a specific file.
-
-- **Endpoint**: `POST /api/chat/completions`
-- **Curl Example**:
-  ```bash
-  curl -X POST http://localhost:3000/api/chat/completions \
-  -H "Authorization: Bearer YOUR_API_KEY" \
-  -H "Content-Type: application/json" \
-  -d '{
-        "model": "gpt-4-turbo",
-        "messages": [
-          {"role": "user", "content": "Explain the concepts in this document."}
-        ],
-        "files": [
-          {"type": "file", "id": "your-file-id-here"}
-        ]
-      }'
-  ```
-
-- **Python Example**:
-  ```python
-  import requests
-
-  def chat_with_file(token, model, query, file_id):
-      url = 'http://localhost:3000/api/chat/completions'
-      headers = {
-          'Authorization': f'Bearer {token}',
-          'Content-Type': 'application/json'
-      }
-      payload = {
-          'model': model,
-          'messages': [{'role': 'user', 'content': query}],
-          'files': [{'type': 'file', 'id': file_id}]
-      }
-      response = requests.post(url, headers=headers, json=payload)
-      return response.json()
-  ```
-
-##### Using a Knowledge Collection in Chat Completions
-
-Leverage a knowledge collection to enhance the response when the inquiry may benefit from a broader context or multiple documents.
-
-- **Endpoint**: `POST /api/chat/completions`
-- **Curl Example**:
-  ```bash
-  curl -X POST http://localhost:3000/api/chat/completions \
-  -H "Authorization: Bearer YOUR_API_KEY" \
-  -H "Content-Type: application/json" \
-  -d '{
-        "model": "gpt-4-turbo",
-        "messages": [
-          {"role": "user", "content": "Provide insights on the historical perspectives covered in the collection."}
-        ],
-        "files": [
-          {"type": "collection", "id": "your-collection-id-here"}
-        ]
-      }'
-  ```
-
-- **Python Example**:
-  ```python
-  import requests
-  
-  def chat_with_collection(token, model, query, collection_id):
-      url = 'http://localhost:3000/api/chat/completions'
-      headers = {
-          'Authorization': f'Bearer {token}',
-          'Content-Type': 'application/json'
-      }
-      payload = {
-          'model': model,
-          'messages': [{'role': 'user', 'content': query}],
-          'files': [{'type': 'collection', 'id': collection_id}]
-      }
-      response = requests.post(url, headers=headers, json=payload)
-      return response.json()
-  ```
-
-These methods enable effective utilization of external knowledge via uploaded files and curated knowledge collections, enhancing chat applications' capabilities using the Open WebUI API. Whether using files individually or within collections, you can customize the integration based on your specific needs.
-
-## Advantages of Using Open WebUI as a Unified LLM Provider
-Open WebUI offers a myriad of benefits, making it an essential tool for developers and businesses alike:
-- **Unified Interface**: Simplify your interactions with different LLMs through a single, integrated platform.
-- **Ease of Implementation**: Quick start integration with comprehensive documentation and community support.
-
-## Swagger Documentation Links
-Access detailed API documentation for different services provided by Open WebUI:
-
-| Application | Documentation Path      |
-|-------------|-------------------------|
-| Main        | `/docs`                 |
-| WebUI       | `/api/v1/docs`          |
-| Ollama      | `/ollama/docs`          |
-| OpenAI      | `/openai/docs`          |
-| Images      | `/images/api/v1/docs`   |
-| Audio       | `/audio/api/v1/docs`    |
-| RAG         | `/retrieval/api/v1/docs`|
-
-Each documentation portal offers interactive examples, schema descriptions, and testing capabilities to enhance your understanding and ease of use.
-
-By following these guidelines, you can swiftly integrate and begin utilizing the Open WebUI API. Should you encounter any issues or have questions, feel free to reach out through our Discord Community or consult the FAQs. Happy coding! ๐ŸŒŸ
diff --git a/docs/getting-started/advanced-topics/EnvConfig.md b/docs/getting-started/advanced-topics/EnvConfig.md
deleted file mode 100644
index e27bafe..0000000
--- a/docs/getting-started/advanced-topics/EnvConfig.md
+++ /dev/null
@@ -1,1146 +0,0 @@
----
-sidebar_position: 4
-title: "๐ŸŒ Environment Variables"
----
-
-## Overview
-
-Open WebUI provides a range of environment variables that allow you to customize and configure 
-various aspects of the application. This page serves as a comprehensive reference for all available 
-environment variables, including their types, default values, and descriptions.
-
-:::info
-Last updated: v0.3.20
-:::
-
-## App/Backend
-
-The following environment variables are used by `backend/config.py` to provide Open WebUI startup 
-configuration. Please note that some variables may have different default values depending on 
-whether you're running Open WebUI directly or via Docker. For more information on logging 
-environment variables, see our [logging documentation](Logging#appbackend).
-
-### General
-
-#### `ENV`
-
-- Type: `str` (enum: `dev`, `prod`)
-- Options:
-  - `dev` - Enables the FastAPI API docs on `/docs`
-  - `prod` - Automatically configures several environment variables
-- Default:
-  - **Backend Default**: `dev`
-  - **Docker Default**: `prod`
-- Description: Environment setting.
-
-#### `WEBUI_AUTH`
-
-- Type: `bool`
-- Default Setting: `True`
-- Description: This setting enables or disables authentication.
-
-:::danger
-If set to `False`, authentication will be disabled for your Open WebUI instance. However, it's 
-important to note that turning off authentication is only possible for fresh installations without 
-any existing users. If there are already users registered, you cannot disable authentication 
-directly. Ensure that no users are present in the database, if you intend to turn off `WEBUI_AUTH`.
-:::
-
-#### `WEBUI_NAME`
-
-- Type: `str`
-- Default: `Open WebUI`
-- Description: Sets the main WebUI name. Appends `(Open WebUI)` if overridden.
-
-#### `WEBUI_URL`
-
-- Type: `str`
-- Default: `http://localhost:3000`
-- Description: Specifies the URL where the Open WebUI is reachable. Currently used for search engine support.
-
-#### `AIOHTTP_CLIENT_TIMEOUT`
-
-- Type: `int`
-- Default: `300`
-- Description: Specifies the timeout duration in seconds for the aiohttp client.
-
-:::info
-This is the maximum amount of time the client will wait for a response before timing out.
-If set to an empty string (' '), the timeout will be set to `None`, effectively disabling the timeout and 
-allowing the client to wait indefinitely.
-:::
-
-#### `DATA_DIR`
-
-- Type: `str`
-- Default: `./data`
-- Description: Specifies the base directory for data storage, including uploads, cache, vector database, etc.
-
-#### `FRONTEND_BUILD_DIR`
-
-- Type: `str`
-- Default: `../build`
-- Description: Specifies the location of the built frontend files.
-
-#### `STATIC_DIR`
-
-- Type: `str`
-- Default: `./static`
-- Description: Specifies the directory for static files, such as the favicon.
-
-#### `CUSTOM_NAME`
-
-- Type: `str`
-- Description: Sets `WEBUI_NAME` but polls **api.openwebui.com** for metadata.
-
-#### `ENABLE_SIGNUP`
-
-- Type: `bool`
-- Default: `True`
-- Description: Toggles user account creation.
-
-#### `ENABLE_LOGIN_FORM`
-
-- Type: `bool`
-- Default: `True`
-- Description: Toggles email, password, sign in and "or" (only when `ENABLE_OAUTH_SIGNUP` is set to True) elements.
-
-:::danger
-This should **only** ever be set to `False` when [ENABLE_OAUTH_SIGNUP](EnvConfig) 
-is also being used and set to `True`. Failure to do so will result in the inability to login.
-:::
-
-#### `ENABLE_RAG_WEB_LOADER_SSL_VERIFICATION`
-
-- Type: `bool`
-- Default: `True`
-- Description: Bypass SSL Verification for RAG on Websites.
-
-#### `DEFAULT_MODELS`
-
-- Type: `str`
-- Description: Sets a default Language Model.
-
-#### `DEFAULT_USER_ROLE`
-
-- Type: `str` (enum: `pending`, `user`, `admin`)
-- Options:
-  - `pending` - New users are pending until their accounts are manually activated by an admin.
-  - `user` - New users are automatically activated with regular user permissions.
-  - `admin` - New users are automatically activated with administrator permissions.
-- Default: `pending`
-- Description: Sets the default role assigned to new users.
-
-#### `USER_PERMISSIONS_CHAT_DELETION`
-
-- Type: `bool`
-- Default: `True`
-- Description: Toggles user permission to delete chats.
-
-#### `USER_PERMISSIONS_CHAT_EDITING`
-
-- Type: `bool`
-- Default: `True`
-- Description: Toggles user permission to edit chats.
-
-#### `USER_PERMISSIONS_CHAT_TEMPORARY`
-
-- Type: `bool`
-- Default: `True`
-- Description: Toggles user permission to create temporary chats.
-
-#### `ENABLE_MODEL_FILTER`
-
-- Type: `bool`
-- Default: `False`
-- Description: Toggles Language Model filtering.
-
-#### `MODEL_FILTER_LIST`
-
-- Type: `str`
-- Description: Sets the Language Model filter list, semicolon-separated
-- Example: `llama3.1:instruct;gemma2:latest`
-
-#### `WEBHOOK_URL`
-
-- Type: `str`
-- Description: Sets a webhook for integration with Slack/Microsoft Teams.
-
-#### `ENABLE_ADMIN_EXPORT`
-
-- Type: `bool`
-- Default: `True`
-- Description: Controls whether admin users can export data.
-
-#### `ENABLE_ADMIN_CHAT_ACCESS`
-
-- Type: `bool`
-- Default: `True`
-- Description: Enables admin users to access all chats.
-
-#### `ENABLE_COMMUNITY_SHARING`
-
-- Type: `bool`
-- Default: `True`
-- Description: Controls whether users are shown the share to community button.
-
-#### `ENABLE_MESSAGE_RATING`
-
-- Type: `bool`
-- Default: `True`
-- Description: Enables message rating feature.
-
-#### `WEBUI_BUILD_HASH`
-
-- Type: `str`
-- Default: `dev-build`
-- Description: Used for identifying the Git SHA of the build for releases.
-
-#### `WEBUI_BANNERS`
-
-- Type: `list` of `dict`
-- Default: `[]`
-- Description: List of banners to show to users. Format of banners are:
-
-```json
-[{"id": "string","type": "string [info, success, warning, error]","title": "string","content": "string","dismissible": False,"timestamp": 1000}]
-```
-
-#### `WEBUI_AUTH_TRUSTED_EMAIL_HEADER`
-
-- Type: `str`
-- Description: Defines the trusted request header for authentication. See [SSO docs](/tutorials/features/sso).
-
-#### `WEBUI_AUTH_TRUSTED_NAME_HEADER`
-
-- Type: `str`
-- Description: Defines the trusted request header for the username of anyone registering with the 
-`WEBUI_AUTH_TRUSTED_EMAIL_HEADER` header. See [SSO docs](/tutorials/features/sso).
-
-#### `WEBUI_SECRET_KEY`
-
-- Type: `str`
-- Default: `t0p-s3cr3t`
-- Docker Default: Randomly generated on first start
-- Description: Overrides the randomly generated string used for JSON Web Token.
-
-#### `JWT_EXPIRES_IN`
-
-- Type: `int`
-- Default: `-1`
-- Description: Sets the JWT expiration time in seconds. A value of -1 disables expiration.
-
-#### `USE_CUDA_DOCKER`
-
-- Type: `bool`
-- Default: `False`
-- Description: Builds the Docker image with NVIDIA CUDA support. Enables GPU acceleration 
-for local Whisper and embeddings.
-
-#### `DATABASE_URL`
-
-- Type: `str`
-- Default: `sqlite:///${DATA_DIR}/webui.db`
-- Description: Specifies the database URL to connect to.
-
-:::info
-Supports SQLite and Postgres. Changing the URL does not migrate data between databases.
-Documentation on URL scheme available [here](https://docs.sqlalchemy.org/en/20/core/engines.html#database-urls).
-:::
-
-#### `DATABASE_POOL_SIZE`
-
-- Type: `int`
-- Default: `0`
-- Description: Specifies the size of the database pool. A value of `0` disables pooling. 
-
-#### `DATABASE_POOL_MAX_OVERFLOW`
-
-- Type: `int`
-- Default: `0`
-- Description: Specifies the database pool max overflow.
-
-:::info
-More information about this setting can be found [here](https://docs.sqlalchemy.org/en/20/core/pooling.html#sqlalchemy.pool.QueuePool.params.max_overflow).
-:::
-
-#### `DATABASE_POOL_TIMEOUT`
-
-- Type: `int`
-- Default: `30`
-- Description: Specifies the database pool timeout in seconds to get a connection.
-
-:::info
-More information about this setting can be found [here](https://docs.sqlalchemy.org/en/20/core/pooling.html#sqlalchemy.pool.QueuePool.params.timeout).
-:::
-
-#### `DATABASE_POOL_RECYCLE`
-
-- Type: `int`
-- Default: `3600`
-- Description: Specifies the database pool recycle time in seconds.
-
-:::info
-More information about this setting can be found [here](https://docs.sqlalchemy.org/en/20/core/pooling.html#setting-pool-recycle).
-:::
-
-#### `PORT`
-
-- Type: `int`
-- Default: `8080`
-- Description: Sets the port to run Open WebUI from.
-
-#### `RESET_CONFIG_ON_START`
-
-- Type: `bool`
-- Default: `False`
-- Description: Resets the `config.json` file on startup.
-
-#### `DEFAULT_LOCALE`
-
-- Type: `str`
-- Default: `en`
-- Description: Sets the default locale for the application.
-
-#### `FUNCTIONS_DIR`
-
-- Type: `str`
-- Default: `./functions`
-- Description: Specifies the directory for custom functions.
-
-#### `SHOW_ADMIN_DETAILS`
-
-- Type: `bool`
-- Default: `True`
-- Description: Toggles whether to show admin user details in the interface.
-
-#### `ADMIN_EMAIL`
-
-- Type: `str`
-- Description: Sets the admin email shown by `SHOW_ADMIN_DETAILS`
-
-#### `SAFE_MODE`
-
-- Type: `bool`
-- Default: `False`
-- Description: Enables safe mode, which disables potentially unsafe features.
-
-#### `WEBUI_SESSION_COOKIE_SAME_SITE`
-
-- Type: `str` (enum: `lax`, `strict`, `none`)
-- Options:
-  - `lax` - Sets the `SameSite` attribute to lax, allowing session cookies to be sent with 
-requests initiated by third-party websites.
-  - `strict` - Sets the `SameSite` attribute to strict, blocking session cookies from being sent 
-with requests initiated by third-party websites.
-  - `none` - Sets the `SameSite` attribute to none, allowing session cookies to be sent with 
-requests initiated by third-party websites, but only over HTTPS.
-- Default: `lax`
-- Description: Sets the `SameSite` attribute for session cookies.
-
-#### `WEBUI_SESSION_COOKIE_SECURE`
-
-- Type: `bool`
-- Default: `False`
-- Description: Sets the `Secure` attribute for session cookies if set to `True`.
-
-#### `AIOHTTP_CLIENT_TIMEOUT`
-
-- Type: `int`
-- Description: Sets the timeout in seconds for internal aiohttp connections. This impacts things 
-such as connections to Ollama and OpenAI endpoints.
-
-#### `FONTS_DIR`
-
-- Type: `str`
-- Description: Specifies the directory for fonts.
-
-### Ollama
-
-#### `ENABLE_OLLAMA_API`
-
-- Type: `bool`
-- Default: `True`
-- Description: Enables the use of Ollama APIs.
-
-#### `OLLAMA_BASE_URL`
-
-- Type: `str`
-- Default: `http://localhost:11434`
-- Docker Default:
-  - If `K8S_FLAG` is set: `http://ollama-service.open-webui.svc.cluster.local:11434`
-  - If `USE_OLLAMA_DOCKER=True`: `http://localhost:11434`
-  - Else `http://host.docker.internal:11434`
-- Description: Configures the Ollama backend URL.
-
-#### `OLLAMA_BASE_URLS`
-
-- Type: `str`
-- Description: Configures load-balanced Ollama backend hosts, separated by `;`. See 
-[`OLLAMA_BASE_URL`](#ollama_base_url). Takes precedence over[`OLLAMA_BASE_URL`](#ollama_base_url).
-
-#### `USE_OLLAMA_DOCKER`
-
-- Type: `bool`
-- Default: `False`
-- Description: Builds the Docker image with a bundled Ollama instance.
-
-#### `K8S_FLAG`
-
-- Type: `bool`
-- Description: If set, assumes Helm chart deployment and sets [`OLLAMA_BASE_URL`](#ollama_base_url) to `http://ollama-service.open-webui.svc.cluster.local:11434`
-
-### OpenAI
-
-#### `ENABLE_OPENAI_API`
-
-- Type: `bool`
-- Default: `True`
-- Description: Enables the use of OpenAI APIs.
-
-#### `OPENAI_API_BASE_URL`
-
-- Type: `str`
-- Default: `https://api.openai.com/v1`
-- Description: Configures the OpenAI base API URL.
-
-#### `OPENAI_API_BASE_URLS`
-
-- Type: `str`
-- Description: Supports balanced OpenAI base API URLs, semicolon-separated.
-- Example: `http://host-one:11434;http://host-two:11434`
-
-#### `OPENAI_API_KEY`
-
-- Type: `str`
-- Description: Sets the OpenAI API key.
-
-#### `OPENAI_API_KEYS`
-
-- Type: `str`
-- Description: Supports multiple OpenAI API keys, semicolon-separated.
-- Example: `sk-124781258123;sk-4389759834759834`
-
-### Tasks
-
-#### `TASK_MODEL`
-
-- Type: `str`
-- Description: The default model to use for tasks such as title and web search query generation 
-when using Ollama models.
-
-#### `TASK_MODEL_EXTERNAL`
-
-- Type: `str`
-- Description: The default model to use for tasks such as title and web search query generation 
-when using OpenAI-compatible endpoints.
-
-#### `TITLE_GENERATION_PROMPT_TEMPLATE`
-
-- Type: `str`
-- Description: Prompt to use when generating chat titles.
-- Default:
-
-```
-Create a concise, 3-5 word title with an emoji as a title for the prompt in the given language. Suitable Emojis for the summary can be used to enhance understanding but avoid quotation marks or special formatting. RESPOND ONLY WITH THE TITLE TEXT.
-
-Examples of titles:
-๐Ÿ“‰ Stock Market Trends
-๐Ÿช Perfect Chocolate Chip Recipe
-Evolution of Music Streaming
-Remote Work Productivity Tips
-Artificial Intelligence in Healthcare
-๐ŸŽฎ Video Game Development Insights
-
-Prompt: {{prompt:middletruncate:8000}}
-```
-
-#### `SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE`
-
-- Type: `str`
-- Description: Prompt to use when generating search queries.
-- Default:
-
-```
-Assess the need for a web search based on the current question and prior interactions, but lean towards suggesting a Google search query if uncertain. Generate a Google search query even when the answer might be straightforward, as additional information may enhance comprehension or provide updated data. If absolutely certain that no further information is required, return an empty string. Default to a search query if unsure or in doubt. Today's date is {{CURRENT_DATE}}.
-
-Current Question:
-{{prompt:end:4000}}
-
-Interaction History:
-{{MESSAGES:END:6}}
-```
-
-#### `TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE`
-
-- Type: `str`
-- Description: Prompt to use when calling tools.
-- Default:
-
-```
-Available Tools: {{TOOLS}}\nReturn an empty string if no tools match the query. If a function tool matches, construct and return a JSON object in the format {\"name\": \"functionName\", \"parameters\": {\"requiredFunctionParamKey\": \"requiredFunctionParamValue\"}} using the appropriate tool and its parameters. Only return the object and limit the response to the JSON object without additional text.
-```
-
-#### `CORS_ALLOW_ORIGIN`
-
-- Type: `str`
-- Default: `*`
-- Description: Sets the allowed origins for Cross-Origin Resource Sharing (CORS).
-
-### RAG
-
-#### `DOCS_DIR`
-
-- Type: `str`
-- Default: `${DATA_DIR}/docs`
-- Description: Specifies the directory scanned for documents to add to the RAG database when triggered.
-
-#### `VECTOR_DB`
-
-- Type: `str`
-- Default: `chroma`
-- Description: Specifies which vector database system to use, either 'chroma' for ChromaDB or 'milvus' for Milvus. This setting determines which vector storage system will be used for managing embeddings.
-
-#### `CHROMA_TENANT`
-
-- Type: `str`
-- Default: `default_tenant`
-- Description: Sets the tenant for ChromaDB to use for RAG embeddings.
-
-#### `CHROMA_DATABASE`
-
-- Type: `str`
-- Default: `default_database`
-- Description: Sets the database in the ChromaDB tenant to use for RAG embeddings.
-
-#### `CHROMA_HTTP_HOST`
-
-- Type: `str`
-- Description: Specifies the hostname of a remote ChromaDB Server. Uses a local ChromaDB instance if not set.
-
-#### `CHROMA_HTTP_PORT`
-
-- Type: `int`
-- Default: `8000`
-- Description: Specifies the port of a remote ChromaDB Server.
-
-#### `CHROMA_HTTP_HEADERS`
-
-- Type: `str`
-- Description: Comma-separated list of HTTP headers to include with every ChromaDB request.
-- Example: `Authorization=Bearer heuhagfuahefj,User-Agent=OpenWebUI`.
-
-#### `CHROMA_HTTP_SSL`
-
-- Type: `bool`
-- Default: `False`
-- Description: Controls whether or not SSL is used for ChromaDB Server connections.
-
-#### `MILVUS_URI`
-
-- Type: `str`
-- Default: `${DATA_DIR}/vector_db/milvus.db`
-- Description: Specifies the URI for connecting to the Milvus vector database. This can point to a local or remote Milvus server based on the deployment configuration.
-
-#### `RAG_TOP_K`
-
-- Type: `int`
-- Default: `5`
-- Description: Sets the default number of results to consider when using RAG.
-
-#### `RAG_RELEVANCE_THRESHOLD`
-
-- Type: `float`
-- Default: `0.0`
-- Description: Sets the relevance threshold to consider for documents when used with reranking.
-
-#### `ENABLE_RAG_HYBRID_SEARCH`
-
-- Type: `bool`
-- Default: `False`
-- Description: Enables the use of ensemble search with `BM25` + `ChromaDB`, with reranking using 
-`sentence_transformers` models.
-
-#### `ENABLE_RAG_WEB_LOADER_SSL_VERIFICATION`
-
-- Type: `bool`
-- Default: `True`
-- Description: Enables TLS certification verification when browsing web pages for RAG.
-
-#### `RAG_EMBEDDING_ENGINE`
-
-- Type: `str` (enum: `ollama`, `openai`)
-- Options:
-  - Leave empty for `Default (SentenceTransformers)` - Uses SentenceTransformers for embeddings.
-  - `ollama` - Uses the Ollama API for embeddings.
-  - `openai` - Uses the OpenAI API for embeddings.
-- Description: Selects an embedding engine to use for RAG.
-
-#### `PDF_EXTRACT_IMAGES`
-
-- Type: `bool`
-- Default: `False`
-- Description: Extracts images from PDFs using OCR when loading documents.
-
-#### `RAG_EMBEDDING_MODEL`
-
-- Type: `str`
-- Default: `sentence-transformers/all-MiniLM-L6-v2`
-- Description: Sets a model for embeddings. Locally, a Sentence-Transformer model is used.
-
-#### `RAG_EMBEDDING_MODEL_AUTO_UPDATE`
-
-- Type: `bool`
-- Default: `False`
-- Description: Toggles automatic update of the Sentence-Transformer model.
-
-#### `RAG_EMBEDDING_MODEL_TRUST_REMOTE_CODE`
-
-- Type: `bool`
-- Default: `False`
-- Description: Determines whether or not to allow custom models defined on the Hub in their own modeling files.
-
-#### `RAG_TEMPLATE`
-
-- Type: `str`
-- Default:
-
-```
-You are given a user query, some textual context and rules, all inside xml tags. You have to answer the query based on the context while respecting the rules.
-
-<context>
-[context]
-</context>
-
-<rules>
-- If you don't know, just say so.
-- If you are not sure, ask for clarification.
-- Answer in the same language as the user query.
-- If the context appears unreadable or of poor quality, tell the user then answer as best as you can.
-- If the answer is not in the context but you think you know the answer, explain that to the user then answer with your own knowledge.
-- Answer directly and without using xml tags.
-</rules>
-
-<user_query>
-[query]
-</user_query>
-```
-
-- Description: Template to use when injecting RAG documents into chat completion
-
-#### `RAG_RERANKING_MODEL`
-
-- Type: `str`
-- Description: Sets a model for reranking results. Locally, a Sentence-Transformer model is used.
-
-#### `RAG_RERANKING_MODEL_AUTO_UPDATE`
-
-- Type: `bool`
-- Default: `False`
-- Description: Toggles automatic update of the reranking model.
-
-#### `RAG_RERANKING_MODEL_TRUST_REMOTE_CODE`
-
-- Type: `bool`
-- Default: `False`
-- Description: Determines whether or not to allow custom models defined on the Hub in their own 
-modeling files for reranking.
-
-#### `RAG_OPENAI_API_BASE_URL`
-
-- Type: `str`
-- Default: `${OPENAI_API_BASE_URL}`
-- Description: Sets the OpenAI base API URL to use for RAG embeddings.
-
-#### `RAG_OPENAI_API_KEY`
-
-- Type: `str`
-- Default: `${OPENAI_API_KEY}`
-- Description: Sets the OpenAI API key to use for RAG embeddings.
-
-#### `RAG_EMBEDDING_OPENAI_BATCH_SIZE`
-
-- Type: `int`
-- Default: `1`
-- Description: Sets the batch size for OpenAI embeddings.
-
-#### `ENABLE_RAG_LOCAL_WEB_FETCH`
-
-- Type: `bool`
-- Default: `False`
-- Description: Enables local web fetching for RAG. Enabling this allows Server Side Request 
-Forgery attacks against local network resources.
-
-#### `YOUTUBE_LOADER_LANGUAGE`
-
-- Type: `str`
-- Default: `en`
-- Description: Sets the language to use for YouTube video loading.
-
-#### `CHUNK_SIZE`
-
-- Type: `int`
-- Default: `1500`
-- Description: Sets the document chunk size for embeddings.
-
-#### `CHUNK_OVERLAP`
-
-- Type: `int`
-- Default: `100`
-- Description: Specifies how much overlap there should be between chunks.
-
-#### `CONTENT_EXTRACTION_ENGINE`
-
-- Type: `str` (`tika`)
-- Options:
-  - Leave empty to use default
-  - `tika` - Use a local Apache Tika server
-- Description: Sets the content extraction engine to use for document ingestion.
-
-#### `TIKA_SERVER_URL`
-
-- Type: `str`
-- Default: `http://localhost:9998`
-- Description: Sets the URL for the Apache Tika server.
-
-#### `RAG_FILE_MAX_COUNT`
-
-- Type: `int`
-- Default: `10`
-- Description: Sets the maximum number of files that can be uploaded at once for document ingestion.
-
-#### `RAG_FILE_MAX_SIZE`
-
-- Type: `int`
-- Default: `100` (100MB)
-- Description: Sets the maximum size of a file that can be uploaded for document ingestion.
-
-### Web Search
-
-#### `ENABLE_RAG_WEB_SEARCH`
-
-- Type: `bool`
-- Default: `False`
-- Description: Enable web search toggle
-
-#### `ENABLE_SEARCH_QUERY`
-
-- Type: `bool`
-- Default: `False`
-- Description: Enables the generation of search queries from prompts
-
-#### `RAG_WEB_SEARCH_ENGINE`
-
-- Type: `str` (enum: `searxng`, `google_pse`, `brave`, `serpstack`, `serper`, `serply`, `searchapi`, `duckduckgo`, `tavily`, `jina`)
-- Options:
-  - `searxng` - Uses the [SearXNG](https://github.com/searxng/searxng) search engine.
-  - `google_pse` - Uses the [Google Programmable Search Engine](https://programmablesearchengine.google.com/about/).
-  - `brave` - Uses the [Brave search engine](https://brave.com/search/api/).
-  - `serpstack` - Uses the [Serpstack search engine](https://serpstack.com/).
-  - `serper` - Uses the [Serper search engine](https://serper.dev/).
-  - `serply` - Uses the [Serply search engine](https://serply.io/).
-  - `searchapi` - Uses the [SearchAPI search engine](https://www.searchapi.io/).
-  - `duckduckgo` - Uses the [DuckDuckGo search engine](https://duckduckgo.com/).
-  - `tavily` - Uses the [Tavily search engine](https://tavily.com/).
-  - `jina` - Uses the [Jina search engine](https://jina.ai/).
-- Description: Select engine for performing searches
-
-#### `SEARXNG_QUERY_URL`
-
-- Type: `str`
-- Description: The [SearXNG search API](https://docs.searxng.org/dev/search_api.html) URL supporting JSON output. `<query>` is replaced with 
-the search query. Example: `http://searxng.local/search?q=<query>`
-
-#### `GOOGLE_PSE_API_KEY`
-
-- Type: `str`
-- Description: The API key for the Google Programmable Search Engine (PSE) service.
-
-#### `GOOGLE_PSE_ENGINE_ID`
-
-- Type: `str`
-- Description: The engine ID for the Google Programmable Search Engine (PSE) service.
-
-#### `BRAVE_SEARCH_API_KEY`
-
-- Type: `str`
-- Description: The API key for the Brave Search API.
-
-#### `SERPSTACK_API_KEY`
-
-- Type: `str`
-- Description: The API key for Serpstack search API.
-
-#### `SERPSTACK_HTTPS`
-
-- Type: `bool`
-- Default: `True`
-- Description: Configures the use of HTTPS for Serpstack requests. Free tier requests are restricted to HTTP only.
-
-#### `SERPER_API_KEY`
-
-- Type: `str`
-- Description: The API key for the Serper search API.
-
-#### `SERPLY_API_KEY`
-
-- Type: `str`
-- Description: The API key for the Serply search API.
-
-#### `TAVILY_API_KEY`
-
-- Type: `str`
-- Description: The API key for the Tavily search API.
-
-#### `RAG_WEB_SEARCH_RESULT_COUNT`
-
-- Type: `int`
-- Default: `3`
-- Description: Maximum number of search results to crawl.
-
-#### `RAG_WEB_SEARCH_CONCURRENT_REQUESTS`
-
-- Type: `int`
-- Default: `10`
-- Description: Number of concurrent requests to crawl web pages returned from search results.
-
-#### `SEARCHAPI_API_KEY`
-
-- Type: `str`
-- Description: Sets the SearchAPI API key.
-
-#### `SEARCHAPI_ENGINE`
-
-- Type: `str`
-- Description: Sets the SearchAPI engine.
-
-### Speech to Text
-
-#### `AUDIO_STT_ENGINE`
-
-- Type: `str` (enum: `openai`)
-- Options:
-  - Leave empty to use local Whisper engine for Speech-to-Text.
-  - `openai` - Uses OpenAI engine for Speech-to-Text.
-- Description: Specifies the Speech-to-Text engine to use.
-
-#### `AUDIO_STT_OPENAI_API_BASE_URL`
-
-- Type: `str`
-- Default: `${OPENAI_API_BASE_URL}`
-- Description: Sets the OpenAI-compatible base URL to use for Speech-to-Text.
-
-#### `AUDIO_STT_OPENAI_API_KEY`
-
-- Type: `str`
-- Default: `${OPENAI_API_KEY}`
-- Description: Sets the OpenAI API key to use for Speech-to-Text.
-
-#### `AUDIO_STT_MODEL`
-
-- Type: `str`
-- Default: `whisper-1`
-- Description: Specifies the Speech-to-Text model to use for OpenAI-compatible endpoints.
-
-#### `WHISPER_MODEL`
-
-- Type: `str`
-- Default: `base`
-- Description: Sets the Whisper model to use for Speech-to-Text. The backend used is faster_whisper with quantization to `int8`.
-
-#### `WHISPER_MODEL_DIR`
-
-- Type: `str`
-- Default: `${DATA_DIR}/cache/whisper/models`
-- Description: Specifies the directory to store Whisper model files.
-
-#### `WHISPER_MODEL_AUTO_UPDATE`
-
-- Type: `bool`
-- Default: `False`
-- Description: Toggles automatic update of the Whisper model.
-
-### Text to Speech
-
-#### `AUDIO_TTS_ENGINE`
-
-- Type: `str` (enum: `elevenlabs`, `openai`)
-- Options:
-  - Leave empty to use built-in WebAPI engine for Text-to-Speech.
-  - `elevenlabs` - Uses ElevenLabs engine for Text-to-Speech
-  - `openai` - Uses OpenAI engine for Text-to-Speech.
-- Description: Specifies the Text-to-Speech engine to use.
-
-#### `AUDIO_TTS_API_KEY`
-
-- Type: `str`
-- Description: Sets the API key for Text-to-Speech.
-
-#### `AUDIO_TTS_OPENAI_API_BASE_URL`
-
-- Type: `str`
-- Default: `${OPENAI_API_BASE_URL}`
-- Description: Sets the OpenAI-compatible base URL to use for text-to-speech.
-
-#### `AUDIO_TTS_OPENAI_API_KEY`
-
-- Type: `str`
-- Default: `${OPENAI_API_KEY}`
-- Description: Sets the API key to use for text-to-speech.
-
-#### `AUDIO_TTS_MODEL`
-
-- Type: `str`
-- Default: `tts-1`
-- Description: Specifies the OpenAI text-to-speech model to use.
-
-#### `AUDIO_TTS_VOICE`
-
-- Type: `str`
-- Default: `alloy`
-- Description: Sets the OpenAI text-to-speech voice to use.
-
-#### `AUDIO_TTS_SPLIT_ON`
-
-- Type: `str`
-- Default: `punctuation`
-- Description: Sets the OpenAI text-to-speech split on to use.
-
-### Image Generation
-
-#### `ENABLE_IMAGE_GENERATION`
-
-- Type: `bool`
-- Default: `False`
-- Description: Enables or disables image generation features.
-
-#### `IMAGE_GENERATION_ENGINE`
-
-- Type: `str` (enum: `openai`, `comfyui`, `automatic1111`)
-- Options:
-  - `openai` - Uses OpenAI DALL-E for image generation.
-  - `comfyui` - Uses ComfyUI engine for image generation.
-  - `automatic1111` - Uses Automatic1111 engine for image generation (default).
-- Default: `automatic1111`
-- Description: Specifies the engine to use for image generation.
-
-#### `AUTOMATIC1111_BASE_URL`
-
-- Type: `str`
-- Description: Specifies the URL to Automatic1111's Stable Diffusion API.
-
-#### `AUTOMATIC1111_API_AUTH`
-
-- Type: `str`
-- Description: Sets the Automatic1111 API authentication.
-
-#### `COMFYUI_BASE_URL`
-
-- Type: `str`
-- Description: Specifies the URL to the ComfyUI image generation API.
-
-#### `COMFYUI_WORKFLOW`
-
-- Type: `str`
-- Description: Sets the ComfyUI workflow.
-
-#### `IMAGES_OPENAI_API_BASE_URL`
-
-- Type: `str`
-- Default: `${OPENAI_API_BASE_URL}`
-- Description: Sets the OpenAI-compatible base URL to use for DALL-E image generation.
-
-
-#### `IMAGES_OPENAI_API_KEY`
-
-- Type: `str`
-- Default: `${OPENAI_API_KEY}`
-- Description: Sets the API key to use for DALL-E image generation.
-
-#### `IMAGE_GENERATION_MODEL`
-
-- Type: `str`
-- Description: Default model to use for image generation
-
-#### `IMAGE_SIZE`
-
-- Type: `str`
-- Default: `512x512`
-- Description: Sets the default image size to generate.
-
-#### `IMAGE_STEPS`
-
-- Type: `int`
-- Default: `50`
-- Description: Sets the default iteration steps for image generation. Used for ComfyUI and AUTOMATIC1111.
-
-### OAuth
-
-#### `ENABLE_OAUTH_SIGNUP`
-
-- Type: `bool`
-- Default: `False`
-- Description: Enables user account creation via OAuth.
-
-#### `OAUTH_MERGE_ACCOUNTS_BY_EMAIL`
-
-- Type: `bool`
-- Default: `False`
-- Description: If enabled, merges OAuth accounts with existing accounts using the same email 
-address. This is considered unsafe as providers may not verify email addresses and can lead to 
-account takeovers.
-
-#### `OAUTH_USERNAME_CLAIM`
-
-- Type: `str`
-- Default: `name`
-- Description: Set username claim for OpenID.
-
-#### `OAUTH_EMAIL_CLAIM`
-
-- Type: `str`
-- Default: `email`
-- Description: Set email claim for OpenID.
-
-#### `OAUTH_PICTURE_CLAIM`
-
-- Type: `str`
-- Default: `picture`
-- Description: Set picture (avatar) claim for OpenID.
-
-#### `OAUTH_CLIENT_ID`
-
-- Type: `str`
-- Description: Sets the client ID for OIDC
-
-#### `OAUTH_CLIENT_SECRET`
-
-- Type: `str`
-- Description: Sets the client secret for OIDC
-
-#### `OAUTH_SCOPES`
-
-- Type: `str`
-- Default: `openid email profile`
-- Description: Sets the scope for OIDC authentication. `openid` and `email` are required.
-
-#### `OAUTH_PROVIDER_NAME`
-
-- Type: `str`
-- Default: `SSO`
-- Description: Sets the name for the OIDC provider.
-
-#### `ENABLE_OAUTH_ROLE_MANAGEMENT`
-
-- Type: `bool`
-- Default: `False`
-- Description: Enables role management to oauth delegation.
-
-#### `OAUTH_ROLES_CLAIM`
-
-- Type: `str`
-- Default: `roles`
-- Description: Sets the roles claim to look for in the OIDC token.
-
-#### `OAUTH_ALLOWED_ROLES`
-
-- Type: `str`
-- Default: `user,admin`
-- Description: Sets the roles that are allowed access to the platform.
-
-#### `OAUTH_ADMIN_ROLES`
-
-- Type: `str`
-- Default: `admin`
-- Description: Sets the roles that are considered administrators.
-
-#### `GOOGLE_CLIENT_ID`
-
-- Type: `str`
-- Description: Sets the client ID for Google OAuth
-
-#### `GOOGLE_CLIENT_SECRET`
-
-- Type: `str`
-- Description: Sets the client secret for Google OAuth
-
-#### `GOOGLE_OAUTH_SCOPE`
-
-- Type: `str`
-- Default: `openid email profile`
-- Description: Sets the scope for Google OAuth authentication.
-
-#### `GOOGLE_REDIRECT_URI`
-
-- Type: `str`
-- Description: Sets the redirect URI for Google OAuth
-
-#### `MICROSOFT_CLIENT_ID`
-
-- Type: `str`
-- Description: Sets the client ID for Microsoft OAuth
-
-#### `MICROSOFT_CLIENT_SECRET`
-
-- Type: `str`
-- Description: Sets the client secret for Microsoft OAuth
-
-#### `MICROSOFT_CLIENT_TENANT_ID`
-
-- Type: `str`
-- Description: Sets the tenant ID for Microsoft OAuth
-
-#### `MICROSOFT_OAUTH_SCOPE`
-
-- Type: `str`
-- Default: `openid email profile`
-- Description: Sets the scope for Microsoft OAuth authentication.
-
-#### `MICROSOFT_REDIRECT_URI`
-
-- Type: `str`
-- Description: Sets the redirect URI for Microsoft OAuth
-
-#### `OPENID_PROVIDER_URL`
-
-- Type: `str`
-- Description: Path to the `.well-known/openid-configuration` endpoint
-
-#### `OPENID_REDIRECT_URI`
-
-- Type: `str`
-- Description: Sets the redirect URI for OIDC
-
-### Tools
-
-#### `TOOLS_DIR`
-
-- Type: `str`
-- Default: `${DATA_DIR}/tools`
-- Description: Specifies the directory for custom tools.
-
-## Misc Environment Variables
-
-These variables are not specific to Open WebUI but can still be valuable in certain contexts.
-
-### Proxy Settings
-
-Open WebUI supports using proxies for HTTP and HTTPS retrievals. To specify proxy settings,
-Open WebUI uses the following environment variables:
-
-#### `http_proxy`
-
-- Type: `str`
-- Description: Sets the URL for the HTTP proxy.
-
-#### `https_proxy`
-
-- Type: `str`
-- Description: Sets the URL for the HTTPS proxy.
-
-#### `no_proxy`
-
-- Type: `str`
-- Description: Lists domain extensions (or IP addresses) for which the proxy should not be used,
-separated by commas. For example, setting no_proxy to '.mit.edu' ensures that the proxy is
-bypassed when accessing documents from MIT.
diff --git a/docs/getting-started/advanced-topics/Development.md b/docs/getting-started/advanced-topics/development.md
similarity index 100%
rename from docs/getting-started/advanced-topics/Development.md
rename to docs/getting-started/advanced-topics/development.md
diff --git a/docs/getting-started/env-configuration.md b/docs/getting-started/advanced-topics/env-configuration.md
similarity index 100%
rename from docs/getting-started/env-configuration.md
rename to docs/getting-started/advanced-topics/env-configuration.md
diff --git a/docs/getting-started/advanced-topics/HttpsEncryption.md b/docs/getting-started/advanced-topics/https-encryption.md
similarity index 100%
rename from docs/getting-started/advanced-topics/HttpsEncryption.md
rename to docs/getting-started/advanced-topics/https-encryption.md
diff --git a/docs/getting-started/advanced-topics/Logging.md b/docs/getting-started/advanced-topics/logging.md
similarity index 100%
rename from docs/getting-started/advanced-topics/Logging.md
rename to docs/getting-started/advanced-topics/logging.md
diff --git a/docs/getting-started/development.mdx b/docs/getting-started/development.mdx
deleted file mode 100644
index cc13537..0000000
--- a/docs/getting-started/development.mdx
+++ /dev/null
@@ -1,199 +0,0 @@
----
-sidebar_position: 6
-title: "๐Ÿ› ๏ธ Development Guide"
----
-import { TopBanners } from "@site/src/components/TopBanners";
-
-<TopBanners />
-
-Welcome to the Open WebUI Development Setup Guide! ๐ŸŒŸ Whether you're a novice or a veteran in the software development world, this guide is designed to assist you in establishing a functional local development environment for both the frontend and backend components of Open WebUI. Let's get started and set up your development environment swiftly! ๐Ÿš€
-
-## System Requirements
-
-Before diving into the setup, make sure your system meets the following requirements:
-- **Operating System**: Linux (WSL) or macOS (Instructions provided here specifically cater to these operating systems)
-- **Python Version**: Python 3.11
-
-## ๐Ÿง Linux/macOS Setup Guide
-
-This section provides a step-by-step process to get your development environment ready on Linux (WSL) or macOS platforms.
-
-### ๐Ÿ“ก Cloning the Repository
-
-First, you'll need to clone the Open WebUI repository and switch to the directory:
-
-```sh
-git clone https://github.com/open-webui/open-webui.git
-cd open-webui
-```
-
-### ๐Ÿ–ฅ๏ธ Frontend Server Setup
-
-To set up the frontend server, follow these instructions:
-
-1. **Environment Configuration**:
-   Duplicate the environment configuration file:
-
-   ```sh
-   cp -RPp .env.example .env
-   ```
-
-2. **Install Dependencies**:
-   Run the following commands to install necessary dependencies:
-
-   ```sh
-   npm install
-   ```
-
-3. **Launch the Server**:
-   Start the server with:
-
-   ```sh
-   npm run dev
-   ```
-
-   ๐ŸŒ The frontend server will be available at: http://localhost:5173. Please note that for the frontend server to function correctly, the backend server should be running concurrently.
-
-### ๐Ÿ–ฅ๏ธ Backend Server Setup
-
-Setting up the backend server involves a few more steps, Python 3.11 is required for Open WebUI:
-
-1. **Change Directory**:
-   Open a new terminal window and navigate to the backend directory:
-
-   ```sh
-   cd open-webui/backend
-   ```
-
-2. **Python Environment Setup** (Using Conda Recommended):
-    - Create and activate a Conda environment with Python 3.11:
-
-      ```sh
-      conda create --name open-webui python=3.11
-      conda activate open-webui
-      ```
-
-3. **Install Backend Dependencies**:
-   Install all the required Python libraries:
-
-   ```sh
-   pip install -r requirements.txt -U
-   ```
-
-4. **Start the Backend Application**:
-   Launch the backend application with:
-
-   ```sh
-   sh dev.sh
-   ```
-
-   ๐Ÿ“„ Access the backend API documentation at: http://localhost:8080/docs. The backend supports hot reloading, making your development process smoother by automatically reflecting changes.
-
-That's it! You now have both the frontend and backend servers running. Explore the API documentation and start developing features for Open WebUI. Happy coding! ๐ŸŽ‰
-
-## ๐Ÿณ Running in a Docker Container
-
-For those who prefer using Docker, here's how you can set things up:
-
-1. **Initialize Configuration:**
-   Assuming you have already cloned the repository and created a `.env` file, create a new file named `compose-dev.yaml`. This configuration uses Docker Compose to ease the development setup.
-
-```yaml
-name: open-webui-dev
-
-services:
-  frontend:
-    build:
-      context: .
-      target: build
-    command: ["npm", "run", "dev"]
-    depends_on:
-      - backend
-    extra_hosts:
-      - host.docker.internal:host-gateway
-    ports:
-      - "3000:5173"
-    develop:
-      watch:
-        path: ./src
-        action: sync
-
-  backend:
-    build:
-      context: .
-      target: base
-    command: ["bash", "dev.sh"]
-    env_file: ".env"
-    environment:
-      - ENV=dev
-      - WEBUI_AUTH=False
-    volumes:
-      - data:/app/backend/data
-    extra_hosts:
-      - host.docker.internal:host-gateway
-    ports:
-      - "8080:8080"
-    restart: always
-    develop:
-      watch:
-        path: ./backend
-        action: sync
-
-volumes:
-  data: {}
-
-```
-
-2. **Start Development Containers:**
-
-```sh
-docker compose -f compose-dev.yaml up --watch
-```
-
-This command will start the frontend and backend servers in hot reload mode. Changes in your source files will trigger an automatic refresh. The web app will be available at http://localhost:3000 and Backend API docs at http://localhost:8080/docs.
-
-3. **Stopping the Containers:**
-
-To stop the containers, you can use:
-
-```sh
-docker compose -f compose-dev.yaml down
-```
-
-### ๐Ÿ”„ Integration with Pipelines
-
-If your development involves [Pipelines](https://docs.openwebui.com/pipelines/), you can enhance your Docker setup:
-
-```yaml
-services:
-  pipelines:
-    ports:
-      - "9099:9099"
-    volumes:
-      - ./pipelines:/app/pipelines
-    extra_hosts:
-      - host.docker.internal:host-gateway
-    restart: always
-```
-
-This setup involves mounting the `pipelines` directory to ensure any changes reflect immediately, maintaining high development agility.
-
-:::note
-This configuration uses volume bind-mounts. Learn more about how they differ from named volumes [here](https://docs.docker.com/storage/bind-mounts/).
-:::
-
-## ๐Ÿ› Troubleshooting
-
-### FATAL ERROR: Reached heap limit
-
-When you encounter a memory-related error during the Docker build processโ€”especially while executing `npm run build`โ€”it typically indicates that the JavaScript heap has exceeded its memory limit. One effective solution is to increase the memory allocated to Node.js by adjusting the `NODE_OPTIONS` environment variable. This allows you to set a higher maximum heap size, which can help prevent out-of-memory errors during the build process. If you encounter this issue, try to allocate at least 4 GB of RAM, or higher if you have enough RAM.
-
-You can increase the memory allocated to Node.js by adding the following line just before `npm run build` in the `Dockerfile`.
-
-```docker title=/Dockerfile
-ENV NODE_OPTIONS=--max-old-space-size=4096
-```
-
----
-
-Through these setup steps, both new and experienced contributors can seamlessly integrate into the development workflow of Open WebUI. Happy coding! ๐ŸŽ‰
diff --git a/docs/getting-started/installation.md b/docs/getting-started/installation.md
deleted file mode 100644
index a83b1be..0000000
--- a/docs/getting-started/installation.md
+++ /dev/null
@@ -1,40 +0,0 @@
----
-sidebar_position: 1
-title: "๐Ÿ”ง Alternative Installation"
----
-
-### Installing Both Ollama and Open WebUI Using Kustomize
-
-For a CPU-only Pod:
-
-```bash
-kubectl apply -k ./kubernetes/manifest/base
-```
-
-For a GPU-enabled Pod:
-
-```bash
-kubectl apply -k ./kubernetes/manifest/gpu
-```
-
-### Installing Both Ollama and Open WebUI Using Helm
-
-:::info
-
-    The Helm installation method has been migrated to the new GitHub repository. Please refer to
-    the latest installation instructions at [https://github.com/open-webui/helm-charts](https://github.com/open-webui/helm-charts).
-
-:::
-
-Confirm that Helm has been deployed on your execution environment. 
-For installation instructions, visit [https://helm.sh/docs/intro/install/](https://helm.sh/docs/intro/install/).
-
-```bash
-helm repo add open-webui https://helm.openwebui.com/
-helm repo update
-
-kubectl create namespace open-webui
-helm upgrade --install open-webui open-webui/open-webui --namespace open-webui
-```
-
-For additional customization options, refer to the [kubernetes/helm/values.yaml](https://github.com/open-webui/helm-charts/tree/main/charts/open-webui) file.
diff --git a/docs/getting-started/logging.md b/docs/getting-started/logging.md
deleted file mode 100644
index beeb9e8..0000000
--- a/docs/getting-started/logging.md
+++ /dev/null
@@ -1,63 +0,0 @@
----
-sidebar_position: 3
-title: "๐Ÿ“œ Open WebUI Logging"
----
-
-## Browser Client Logging ##
-
-Client logging generally occurs via [JavaScript](https://developer.mozilla.org/en-US/docs/Web/API/console/log_static) `console.log()` and can be accessed using the built-in browser-specific developer tools:
-* Blink
-  * [Chrome/Chromium](https://developer.chrome.com/docs/devtools/)
-  * [Edge](https://learn.microsoft.com/en-us/microsoft-edge/devtools-guide-chromium/overview)
-* Gecko
-  * [Firefox](https://firefox-source-docs.mozilla.org/devtools-user/)
-* WebKit
-  * [Safari](https://developer.apple.com/safari/tools/)
-
-## Application Server/Backend Logging ##
-
-Logging is an ongoing work-in-progress but some level of control is available using environment variables. [Python Logging](https://docs.python.org/3/howto/logging.html) `log()` and `print()` statements send information to the console. The default level is `INFO`. Ideally, sensitive data will only be exposed with `DEBUG` level.
-
-### Logging Levels ###
-
-The following [logging levels](https://docs.python.org/3/howto/logging.html#logging-levels) values are supported:
-
-| Level      | Numeric value |
-| ---------- | ------------- |
-| `CRITICAL` | 50            |
-| `ERROR`    | 40            |
-| `WARNING`  | 30            |
-| `INFO`     | 20            |
-| `DEBUG`    | 10            |
-| `NOTSET`   | 0             |
-
-### Global ###
-
-The default global log level of `INFO` can be overridden with the `GLOBAL_LOG_LEVEL` environment variable. When set, this executes a [basicConfig](https://docs.python.org/3/library/logging.html#logging.basicConfig) statement with the `force` argument set to *True* within `config.py`. This results in reconfiguration of all attached loggers:
-> _If this keyword argument is specified as true, any existing handlers attached to the root logger are removed and closed, before carrying out the configuration as specified by the other arguments._
-
-The stream uses standard output (`sys.stdout`). In addition to all Open-WebUI `log()` statements, this also affects any imported Python modules that use the Python Logging module `basicConfig` mechanism including [urllib](https://docs.python.org/3/library/urllib.html).
-
-For example, to set `DEBUG` logging level as a Docker parameter use:
-```
---env GLOBAL_LOG_LEVEL="DEBUG"
-```
-
-### App/Backend ###
-
-Some level of granularity is possible using any of the following combination of variables. Note that `basicConfig` `force` isn't presently used so these statements may only affect Open-WebUI logging and not 3rd party modules.
-
-| Environment Variable | App/Backend                                                       |
-| -------------------- | ----------------------------------------------------------------- |
-| `AUDIO_LOG_LEVEL`    | Audio transcription using faster-whisper, TTS etc.                |
-| `COMFYUI_LOG_LEVEL`  | ComfyUI integration handling                                      |
-| `CONFIG_LOG_LEVEL`   | Configuration handling                                            |
-| `DB_LOG_LEVEL`       | Internal Peewee Database                                          |
-| `IMAGES_LOG_LEVEL`   | AUTOMATIC1111 stable diffusion image generation                   |
-| `LITELLM_LOG_LEVEL`  | LiteLLM proxy                                                     |
-| `MAIN_LOG_LEVEL`     | Main (root) execution                                             |
-| `MODELS_LOG_LEVEL`   | LLM model interaction, authentication, etc.                       |
-| `OLLAMA_LOG_LEVEL`   | Ollama backend interaction                                        |
-| `OPENAI_LOG_LEVEL`   | OpenAI interaction                                                |
-| `RAG_LOG_LEVEL`      | Retrieval-Augmented Generation using Chroma/Sentence-Transformers |
-| `WEBHOOK_LOG_LEVEL`  | Authentication webhook extended logging                           |
diff --git a/docs/getting-started/updating.md b/docs/getting-started/updating.md
deleted file mode 100644
index bf44546..0000000
--- a/docs/getting-started/updating.md
+++ /dev/null
@@ -1,123 +0,0 @@
----
-sidebar_position: 2
-title: "๐Ÿ”„ Updating Open WebUI"
----
-
-## Updating your Docker Installation
-
-Keeping your Open WebUI Docker installation up-to-date ensures you have the latest features and security updates. You can update your installation manually or use [Watchtower](https://containrrr.dev/watchtower/) for automatic updates.
-
-### Manual Update
-
-Follow these steps to manually update your Open WebUI:
-
-1. **Pull the Latest Docker Image**:
-   ```bash
-   docker pull ghcr.io/open-webui/open-webui:main
-   ```
-
-2. **Stop and Remove the Existing Container**:
-   - This step ensures that you can create a new container from the updated image.
-   ```bash
-   docker stop open-webui
-   docker rm open-webui
-   ```
-
-3. **Create a New Container with the Updated Image**:
-   - Use the same `docker run` command you used initially to create the container, ensuring all your configurations remain the same.
-   ```bash
-   docker run -d -p 3000:8080 --add-host=host.docker.internal:host-gateway -v open-webui:/app/backend/data --name open-webui --restart always ghcr.io/open-webui/open-webui:main
-   ```
-
-This process updates your Open WebUI container to the latest version while preserving your data stored in Docker volumes.
-
-### Updating with Watchtower
-
-For those who prefer automated updates, Watchtower can monitor your Open WebUI container and automatically update it to the latest version. You have two options with Watchtower: running it once for an immediate update, or deploying it persistently to automate future updates.
-
-#### Running Watchtower Once
-
-To update your container immediately without keeping Watchtower running continuously, use the following command. Replace `open-webui` with your container name if it differs.
-
-```bash
-docker run --rm --volume /var/run/docker.sock:/var/run/docker.sock containrrr/watchtower --run-once open-webui
-```
-
-#### Deploying Watchtower Persistently
-
-If you prefer Watchtower to continuously monitor and update your container whenever a new version is available, you can run Watchtower as a persistent service. This method ensures your Open WebUI always stays up to date without any manual intervention. Use the command below to deploy Watchtower in this manner:
-
-```bash
-docker run -d --name watchtower --volume /var/run/docker.sock:/var/run/docker.sock containrrr/watchtower open-webui
-```
-
-Remember to replace `open-webui` with the name of your container if you have named it differently. This configuration allows you to benefit from the latest improvements and security patches with minimal downtime and manual effort.
-
-### Updating Docker Compose Installation
-
-If you installed Open WebUI using Docker Compose, follow these steps to update:
-
-1. **Pull the Latest Images**:
-   - This command fetches the latest versions of the images specified in your `docker-compose.yml` files.
-   ```bash
-   docker compose pull
-   ```
-
-2. **Recreate the Containers with the Latest Images**:
-   - This command recreates the containers based on the newly pulled images, ensuring your installation is up-to-date. No build step is required for updates.
-   ```bash
-   docker compose up -d
-   ```
-
-This method ensures your Docker Compose-based installation of Open WebUI (and any associated services, like Ollama) is updated efficiently and without the need for manual container management.
-
-## Updating Your Direct Install
-
-For those who have installed Open WebUI directly without using Docker, updates are just as important to ensure access to the latest features and security patches. Remember, direct installations are not officially supported, and you might need to troubleshoot on your own. Here's how to update your installation:
-
-### Pull the Latest Changes
-
-Navigate to your Open WebUI project directory and pull the latest changes from the repository:
-
-```sh
-cd path/to/open-webui/
-git pull origin main
-```
-
-Replace `path/to/open-webui/` with the actual path to your Open WebUI installation.
-
-### Update Dependencies
-
-After pulling the latest changes, update your project dependencies. This step ensures that both frontend and backend dependencies are up to date.
-
-- **For Node.js (Frontend):**
-
-```sh
-npm install
-npm run build
-```
-
-- **For Python (Backend):**
-
-```sh
-cd backend
-pip install -r requirements.txt -U
-```
-
-### Restart the Backend Server
-
-To apply the updates, you need to restart the backend server. If you have a running instance, stop it first and then start it again using the provided script.
-
-```sh
-bash start.sh
-```
-
-This command should be run from within the `backend` directory of your Open WebUI project.
-
-:::info
-
-Direct installations require more manual effort to update compared to Docker-based installations. If you frequently need updates and want to streamline the process, consider transitioning to a Docker-based setup for easier management.
-
-:::
-
-By following these steps, you can update your direct installation of Open WebUI, ensuring you're running the latest version with all its benefits. Remember to back up any critical data or custom configurations before starting the update process to prevent any unintended loss.
\ No newline at end of file
diff --git a/docs/getting-started/using-openwebui/index.mdx b/docs/getting-started/using-openwebui/index.mdx
index 7940aec..7f061bf 100644
--- a/docs/getting-started/using-openwebui/index.mdx
+++ b/docs/getting-started/using-openwebui/index.mdx
@@ -11,13 +11,17 @@ Explore the essential concepts and features of Open WebUI, including models, kno
 
 ## ๐Ÿ“ฅ Ollama Models  
 Learn how to download, load, and use models effectively.  
-[Check out Ollama Models](./OllamaModels.mdx)
+[Check out Ollama Models](./ollama-models.mdx)
 
 ---
 
 ## ๐Ÿ“š Terminology  
 Understand key components: models, prompts, knowledge, functions, pipes, and actions.  
-[Read the Terminology Guide](./Terminology.mdx)
+[Read the Terminology Guide](./terminology.mdx)
+
+## ๐ŸŒ Additional Resources and Integrations  
+Find community tools, integrations, and official resources.  
+[Additional Resources Guide](./resources)
 
 ---
 
diff --git a/docs/getting-started/using-openwebui/OllamaModels.mdx b/docs/getting-started/using-openwebui/ollama-models.mdx
similarity index 100%
rename from docs/getting-started/using-openwebui/OllamaModels.mdx
rename to docs/getting-started/using-openwebui/ollama-models.mdx
diff --git a/docs/getting-started/using-openwebui/resources.mdx b/docs/getting-started/using-openwebui/resources.mdx
new file mode 100644
index 0000000..477d9cd
--- /dev/null
+++ b/docs/getting-started/using-openwebui/resources.mdx
@@ -0,0 +1,37 @@
+---
+sidebar_position: 400
+title: "๐ŸŒ Additional Resources and Integrations"
+---
+
+# ๐ŸŒ Additional Resources and Integrations
+
+Explore more resources, community tools, and integration options to make the most out of Open WebUI.
+
+---
+
+## ๐Ÿ”ฅ Open WebUI Website
+Visit [Open WebUI](https://openwebui.com/) for official documentation, tools, and resources:
+- **Leaderboard**: Check out the latest high-ranking models, tools, and integrations.
+- **Featured Models and Tools**: Discover models and tools created by community members.
+- **New Integrations**: Find newly released integrations, plugins, and models to expand your setup.
+
+---
+
+## ๐ŸŒ Community Platforms
+Connect with the Open WebUI community for support, tips, and discussions.
+
+- **Discord**: Join our community on Discord to chat with other users, ask questions, and stay updated.
+  [Join the Discord Server](https://discord.com/invite/5rJgQTnV4s)
+- **Reddit**: Follow the Open WebUI subreddit for announcements, discussions, and user-submitted content.
+  [Visit Reddit Community](https://www.reddit.com/r/OpenWebUI/)
+
+---
+
+## ๐Ÿ“– Tutorials and User Guides
+Explore community-created tutorials to enhance your Open WebUI experience:
+- [Explore Community Tutorials](/category/-tutorials)
+- Learn how to configure RAG and advanced integrations with the [RAG Configuration Guide](../../tutorials/tips/rag-tutorial.md).
+
+---
+
+Stay connected and make the most out of Open WebUI through these community resources and integrations!
diff --git a/docs/getting-started/using-openwebui/Terminology.mdx b/docs/getting-started/using-openwebui/terminology.mdx
similarity index 100%
rename from docs/getting-started/using-openwebui/Terminology.mdx
rename to docs/getting-started/using-openwebui/terminology.mdx

From ae555ad60e8c73edf7fa96e559709233c5c56026 Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 22:16:16 +0000
Subject: [PATCH 33/42] Fix broken links: update references to
 env-configuration and updating

---
 docs/getting-started/advanced-topics/env-configuration.md | 2 +-
 docs/tutorials/integrations/browser-search-engine.md      | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/docs/getting-started/advanced-topics/env-configuration.md b/docs/getting-started/advanced-topics/env-configuration.md
index a01b0e6..f3f2905 100644
--- a/docs/getting-started/advanced-topics/env-configuration.md
+++ b/docs/getting-started/advanced-topics/env-configuration.md
@@ -19,7 +19,7 @@ Last updated: v0.3.20
 The following environment variables are used by `backend/config.py` to provide Open WebUI startup 
 configuration. Please note that some variables may have different default values depending on 
 whether you're running Open WebUI directly or via Docker. For more information on logging 
-environment variables, see our [logging documentation](/getting-started/logging#appbackend).
+environment variables, see our [logging documentation](./logging#appbackend).
 
 ### General
 
diff --git a/docs/tutorials/integrations/browser-search-engine.md b/docs/tutorials/integrations/browser-search-engine.md
index 7f4aa65..9d890b7 100644
--- a/docs/tutorials/integrations/browser-search-engine.md
+++ b/docs/tutorials/integrations/browser-search-engine.md
@@ -14,7 +14,7 @@ Open WebUI allows you to integrate directly into your web browser. This tutorial
 Before you begin, ensure that:
 
 - You have Chrome or another supported browser installed.
-- The `WEBUI_URL` environment variable is set correctly, either using Docker environment variables or in the `.env` file as specified in the [Getting Started](/getting-started/env-configuration) guide.
+- The `WEBUI_URL` environment variable is set correctly, either using Docker environment variables or in the `.env` file as specified in the [Getting Started](/getting-started/advanced-topics/env-configuration) guide.
 
 ### Step 1: Set the WEBUI_URL Environment Variable
 

From 369fbb03f0834f48eead5e277d00b0bfe8a8014a Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 22:25:40 +0000
Subject: [PATCH 34/42] Update links for env-configuration and update guide
 references

---
 docs/getting-started/quick-start/tab-docker/DockerSwarm.md | 2 +-
 docs/intro.mdx                                             | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/docs/getting-started/quick-start/tab-docker/DockerSwarm.md b/docs/getting-started/quick-start/tab-docker/DockerSwarm.md
index e0644de..e211e58 100644
--- a/docs/getting-started/quick-start/tab-docker/DockerSwarm.md
+++ b/docs/getting-started/quick-start/tab-docker/DockerSwarm.md
@@ -3,7 +3,7 @@
 This installation method requires knowledge on Docker Swarms, as it utilizes a stack file to deploy 3 seperate containers as services in a Docker Swarm.
 
 It includes isolated containers of ChromaDB, Ollama, and OpenWebUI. 
-Additionally, there are pre-filled [Environment Variables](/getting-started/env-configuration) to further illustrate the setup.
+Additionally, there are pre-filled [Environment Variables](../advanced-topics/env-configuration) to further illustrate the setup.
 
 Choose the appropriate command based on your hardware setup:
 
diff --git a/docs/intro.mdx b/docs/intro.mdx
index 2dbe85b..bfa797d 100644
--- a/docs/intro.mdx
+++ b/docs/intro.mdx
@@ -43,7 +43,7 @@ import { SponsorList } from "@site/src/components/SponsorList";
 
 #### Disabling Login for Single User
 
-If you want to disable login for a single-user setup, set [`WEBUI_AUTH`](/getting-started/env-configuration) to `False`. This will bypass the login page.
+If you want to disable login for a single-user setup, set [`WEBUI_AUTH`](./getting-started/advanced-topics/env-configuration) to `False`. This will bypass the login page.
 
 :::warning
 You cannot switch between single-user mode and multi-account mode after this change.
@@ -161,7 +161,7 @@ If you're facing various issues like "Open WebUI: Server Connection Error", see
 
 ## Updating
 
-Check out our full [updating guide](/getting-started/updating).
+Check out how to update Docker in the [Quick Start guide](./getting-started/quick-start).
 
 In case you want to update your local Docker installation to the latest version, you can do it with [Watchtower](https://containrrr.dev/watchtower/):
 

From b01ee593a344f15360e6f744c83bf109cd4fee06 Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 22:33:50 +0000
Subject: [PATCH 35/42] Update sidebar positions and titles in getting-started
 guide

- Adjusted  for Ollama Models, Additional Resources, and Terminology pages to improve navigation order.
- Simplified 'Additional Resources and Integrations' title to 'Additional Resources' for clarity and conciseness.
---
 docs/getting-started/using-openwebui/ollama-models.mdx | 1 +
 docs/getting-started/using-openwebui/resources.mdx     | 6 +++---
 docs/getting-started/using-openwebui/terminology.mdx   | 1 +
 3 files changed, 5 insertions(+), 3 deletions(-)

diff --git a/docs/getting-started/using-openwebui/ollama-models.mdx b/docs/getting-started/using-openwebui/ollama-models.mdx
index 34f0244..6b5606a 100644
--- a/docs/getting-started/using-openwebui/ollama-models.mdx
+++ b/docs/getting-started/using-openwebui/ollama-models.mdx
@@ -1,4 +1,5 @@
 ---
+sidebar_position: 2
 title: "๐Ÿค–Ollama Models"
 ---
 
diff --git a/docs/getting-started/using-openwebui/resources.mdx b/docs/getting-started/using-openwebui/resources.mdx
index 477d9cd..2147226 100644
--- a/docs/getting-started/using-openwebui/resources.mdx
+++ b/docs/getting-started/using-openwebui/resources.mdx
@@ -1,9 +1,9 @@
 ---
-sidebar_position: 400
-title: "๐ŸŒ Additional Resources and Integrations"
+sidebar_position: 4
+title: "๐ŸŒ Additional Resources"
 ---
 
-# ๐ŸŒ Additional Resources and Integrations
+# ๐ŸŒ Additional Resources
 
 Explore more resources, community tools, and integration options to make the most out of Open WebUI.
 
diff --git a/docs/getting-started/using-openwebui/terminology.mdx b/docs/getting-started/using-openwebui/terminology.mdx
index 9ae27de..16d8a59 100644
--- a/docs/getting-started/using-openwebui/terminology.mdx
+++ b/docs/getting-started/using-openwebui/terminology.mdx
@@ -1,4 +1,5 @@
 ---
+sidebar_position: 3
 title: "๐Ÿ“– OpenWebUI Terminology"
 ---
 

From 24a6d230d18ecb3625f0d5bc92cc65df0dd1acd0 Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 22:37:14 +0000
Subject: [PATCH 36/42] fix: ipex tutorial anchor

---
 docs/tutorials/integrations/ipex_llm.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/docs/tutorials/integrations/ipex_llm.md b/docs/tutorials/integrations/ipex_llm.md
index 116185f..1828787 100644
--- a/docs/tutorials/integrations/ipex_llm.md
+++ b/docs/tutorials/integrations/ipex_llm.md
@@ -4,7 +4,7 @@ title: "Local LLM Setup with IPEX-LLM on Intel GPU"
 ---
 
 :::note
-This guide is verified with Open WebUI setup through [Manual Installation](/getting-started/index.md#manual-installation).
+This guide is verified with Open WebUI setup through [Manual Installation](/getting-started/index.md).
 :::
 
 # Local LLM Setup with IPEX-LLM on Intel GPU

From dc7f1fe7d4218be93542b3c5ccc9565a01737e14 Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 22:40:31 +0000
Subject: [PATCH 37/42] docs: reworded ollama troubleshooting

---
 docs/getting-started/using-openwebui/index.mdx              | 6 +++---
 .../{ollama-models.mdx => troubleshooting-ollama.mdx}       | 0
 2 files changed, 3 insertions(+), 3 deletions(-)
 rename docs/getting-started/using-openwebui/{ollama-models.mdx => troubleshooting-ollama.mdx} (100%)

diff --git a/docs/getting-started/using-openwebui/index.mdx b/docs/getting-started/using-openwebui/index.mdx
index 7f061bf..28c6925 100644
--- a/docs/getting-started/using-openwebui/index.mdx
+++ b/docs/getting-started/using-openwebui/index.mdx
@@ -9,9 +9,9 @@ Explore the essential concepts and features of Open WebUI, including models, kno
 
 ---
 
-## ๐Ÿ“ฅ Ollama Models  
-Learn how to download, load, and use models effectively.  
-[Check out Ollama Models](./ollama-models.mdx)
+## ๐Ÿ“ฅ Troubleshooting Ollama
+Many users wish to make use of their existing Ollama instance, but encounter common issues.
+If this is you, then check out the [Troubleshooting Ollama guide](./troubleshooting-ollama.mdx)
 
 ---
 
diff --git a/docs/getting-started/using-openwebui/ollama-models.mdx b/docs/getting-started/using-openwebui/troubleshooting-ollama.mdx
similarity index 100%
rename from docs/getting-started/using-openwebui/ollama-models.mdx
rename to docs/getting-started/using-openwebui/troubleshooting-ollama.mdx

From 905b238dd04b19253dfdf07d33eaed1b974bc0c3 Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 22:43:09 +0000
Subject: [PATCH 38/42] docs: updating troubleshooting ollama title

---
 .../using-openwebui/troubleshooting-ollama.mdx                | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/docs/getting-started/using-openwebui/troubleshooting-ollama.mdx b/docs/getting-started/using-openwebui/troubleshooting-ollama.mdx
index 6b5606a..4f813a8 100644
--- a/docs/getting-started/using-openwebui/troubleshooting-ollama.mdx
+++ b/docs/getting-started/using-openwebui/troubleshooting-ollama.mdx
@@ -1,12 +1,12 @@
 ---
 sidebar_position: 2
-title: "๐Ÿค–Ollama Models"
+title: "๐Ÿค–Troubleshooting Ollama"
 ---
 
 import Tabs from '@theme/Tabs';
 import TabItem from '@theme/TabItem';
 
-# Ollama Models
+# Troubleshooting Ollama
 
 Explore how to download, load, and use models with Ollama, both via **Docker** and **Remote** setups.
 

From b787483aba7111be6341554f555c13ade3b11e62 Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 22:54:10 +0000
Subject: [PATCH 39/42] docs: remove docusaurus change from this feature branch

---
 docusaurus.config.ts | 170 -------------------------------------------
 1 file changed, 170 deletions(-)
 delete mode 100644 docusaurus.config.ts

diff --git a/docusaurus.config.ts b/docusaurus.config.ts
deleted file mode 100644
index 5d670fb..0000000
--- a/docusaurus.config.ts
+++ /dev/null
@@ -1,170 +0,0 @@
-import { Config } from "@docusaurus/types";
-import type * as Preset from "@docusaurus/preset-classic";
-
-import { themes as prismThemes } from "prism-react-renderer";
-
-const config: Config = {
-	title: "Open WebUI",
-	tagline: "ChatGPT-Style WebUI for LLMs (Formerly Ollama WebUI)",
-	favicon: "img/favicon.png",
-
-	// Set the production url of your site here, defaulting to current value
-	url: process.env.SITE_URL || "https://openwebui.com",
-	// Set the /<baseUrl>/ pathname under which your site is served, defaulting to current value
-		// For GitHub pages deployment, it is often '/<projectName>/'
-	baseUrl: process.env.BASE_URL || "/",
-
-	// GitHub pages deployment config.
-	// If you aren't using GitHub pages, you don't need these.
-	organizationName: "open-webui", // Usually your GitHub org/user name.
-	projectName: "docs", // Usually your repo name.
-
-	onBrokenLinks: "throw",
-	onBrokenMarkdownLinks: "warn",
-
-	// Even if you don't use internationalization, you can use this field to set
-	// useful metadata like html lang. For example, if your site is Chinese, you
-	// may want to replace "en" with "zh-Hans".
-	i18n: {
-		defaultLocale: "en",
-		locales: ["en"],
-	},
-
-	// Enable Mermaid for diagrams
-	markdown: {
-		mermaid: true,
-	},
-	themes: ["@docusaurus/theme-mermaid"],
-
-	presets: [
-		[
-			"classic",
-			{
-				docs: {
-					sidebarPath: "./sidebars.ts",
-					routeBasePath: "/",
-					// Please change this to your repo.
-					// Remove this to remove the "edit this page" links.
-					editUrl: "https://github.com/open-webui/docs/blob/main",
-					exclude: ["**/tab-**/**"],
-				},
-				// blog: false,
-				blog: {
-					showReadingTime: true,
-					// Please change this to your repo.
-					// Remove this to remove the "edit this page" links.
-					// editUrl:
-					// "https://github.com/facebook/docusaurus/tree/main/packages/create-docusaurus/templates/shared/",
-				},
-				theme: {
-					customCss: "./src/css/custom.css",
-				},
-			} satisfies Preset.Options,
-		],
-	],
-
-	themeConfig: {
-		// Replace with your project's social card
-		// image: "img/docusaurus-social-card.jpg",
-		navbar: {
-			title: "Open WebUI",
-			logo: {
-				src: "img/logo.png",
-				srcDark: "img/logo-dark.png",
-			},
-			items: [
-				// {
-				// 	type: "docSidebar",
-				// 	position: "left",
-				// 	sidebarId: "pipelines",
-				// 	label: "Pipelines",
-				// },
-
-				// {
-				//   type: "docSidebar",
-				//   sidebarId: "blog",
-				//   position: "left",
-				//   label: "Blog",
-				// },
-
-				// {
-				//   href: "/blog",
-				//   label: "Blog",
-				//   position: "left",
-				// },
-				{
-					href: "https://github.com/open-webui/open-webui",
-					position: "right",
-					className: "header-github-link",
-					"aria-label": "GitHub repository",
-				},
-				{
-					href: "https://discord.com/invite/5rJgQTnV4s",
-					position: "right",
-					className: "header-discord-link",
-					"aria-label": "Discord server",
-				},
-			],
-		},
-		footer: {
-			logo: {
-				src: "img/logo-dark.png",
-				height: 100,
-			},
-			style: "light",
-			links: [
-				{
-					title: "Docs",
-					items: [
-						{
-							label: "Getting Started",
-							to: "getting-started",
-						},
-						{
-							label: "FAQ",
-							to: "faq",
-						},
-					],
-				},
-				{
-					title: "Community",
-					items: [
-						{
-							label: "GitHub",
-							href: "https://github.com/open-webui/open-webui",
-						},
-						{
-							label: "Discord",
-							href: "https://discord.gg/5rJgQTnV4s",
-						},
-						{
-							label: "๐•",
-							href: "https://x.com/OpenWebUI",
-						},
-					],
-				},
-				{
-					title: "More",
-					items: [
-						{
-							label: "Release Notes",
-							to: "https://github.com/open-webui/open-webui/blob/main/CHANGELOG.md",
-						},
-						{
-							label: "About",
-							to: "https://openwebui.com",
-						},
-					],
-				},
-			],
-			// copyright: `Copyright ยฉ ${new Date().getFullYear()} OpenWebUI`,
-		},
-		prism: {
-			theme: prismThemes.github,
-			darkTheme: prismThemes.dracula,
-		},
-	} satisfies Preset.ThemeConfig,
-	plugins: [require.resolve("docusaurus-lunr-search")],
-};
-
-export default config;

From 306d16515da42429df7820c8a1cf327056ff2513 Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 22:59:29 +0000
Subject: [PATCH 40/42] docs: revert changes to docusaurus and ghpages

---
 .github/workflows/gh-pages.yml | 111 +++++++++++----------
 docusaurus.config.ts           | 170 +++++++++++++++++++++++++++++++++
 2 files changed, 224 insertions(+), 57 deletions(-)
 create mode 100644 docusaurus.config.ts

diff --git a/.github/workflows/gh-pages.yml b/.github/workflows/gh-pages.yml
index cee3ed7..0f35f85 100644
--- a/.github/workflows/gh-pages.yml
+++ b/.github/workflows/gh-pages.yml
@@ -1,58 +1,55 @@
 ---
-    name: Deploy site to Pages
-    
-    on:
-      # Runs on pushes targeting the default branch
-      push:
-        branches: ["main"]
-    
-      # Allows you to run this workflow manually from the Actions tab
-      workflow_dispatch:
-    
-    # Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
-    permissions:
-      contents: read
-      pages: write
-      id-token: write
-    
-    # Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
-    # However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.
-    concurrency:
-      group: "pages"
-      cancel-in-progress: false
-    
-    jobs:
-      # Build job
-      build:
-        runs-on: ubuntu-latest
-        steps:
-          - name: Checkout
-            uses: actions/checkout@v4
-          - name: Setup Node
-            uses: actions/setup-node@v4
-            with:
-              node-version-file: ".node-version"
-              cache: npm
-          - name: Install dependencies
-            run: npm ci
-          - name: Build
-            env:
-              BASE_URL: ${{ vars.BASE_URL }}
-              SITE_URL: ${{ vars.SITE_URL }}
-            run: npm run build
-          - name: Upload artifact
-            uses: actions/upload-pages-artifact@v3
-            with:
-              path: ./build
-    
-      # Deployment job
-      deploy:
-        environment:
-          name: github-pages
-          url: ${{ steps.deployment.outputs.page_url }}
-        runs-on: ubuntu-latest
-        needs: build
-        steps:
-          - name: Deploy to GitHub Pages
-            id: deployment
-            uses: actions/deploy-pages@v4
+  name: Deploy site to Pages
+  
+  on:
+    # Runs on pushes targeting the default branch
+    push:
+      branches: ["main"]
+  
+    # Allows you to run this workflow manually from the Actions tab
+    workflow_dispatch:
+  
+  # Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
+  permissions:
+    contents: read
+    pages: write
+    id-token: write
+  
+  # Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
+  # However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.
+  concurrency:
+    group: "pages"
+    cancel-in-progress: false
+  
+  jobs:
+    # Build job
+    build:
+      runs-on: ubuntu-latest
+      steps:
+        - name: Checkout
+          uses: actions/checkout@v4
+        - name: Setup Node
+          uses: actions/setup-node@v4
+          with:
+            node-version-file: ".node-version"
+            cache: npm
+        - name: Install dependencies
+          run: npm ci
+        - name: Build
+          run: npm run build
+        - name: Upload artifact
+          uses: actions/upload-pages-artifact@v3
+          with:
+            path: ./build
+  
+    # Deployment job
+    deploy:
+      environment:
+        name: github-pages
+        url: ${{ steps.deployment.outputs.page_url }}
+      runs-on: ubuntu-latest
+      needs: build
+      steps:
+        - name: Deploy to GitHub Pages
+          id: deployment
+          uses: actions/deploy-pages@v4
\ No newline at end of file
diff --git a/docusaurus.config.ts b/docusaurus.config.ts
new file mode 100644
index 0000000..ad581fe
--- /dev/null
+++ b/docusaurus.config.ts
@@ -0,0 +1,170 @@
+import { Config } from "@docusaurus/types";
+import type * as Preset from "@docusaurus/preset-classic";
+
+import { themes as prismThemes } from "prism-react-renderer";
+
+const config: Config = {
+	title: "Open WebUI",
+	tagline: "ChatGPT-Style WebUI for LLMs (Formerly Ollama WebUI)",
+	favicon: "img/favicon.png",
+
+	// Set the production url of your site here
+	url: "https://openwebui.com",
+	// Set the /<baseUrl>/ pathname under which your site is served
+	// For GitHub pages deployment, it is often '/<projectName>/'
+	baseUrl: "/",
+
+	// GitHub pages deployment config.
+	// If you aren't using GitHub pages, you don't need these.
+	organizationName: "open-webui", // Usually your GitHub org/user name.
+	projectName: "docs", // Usually your repo name.
+
+	onBrokenLinks: "throw",
+	onBrokenMarkdownLinks: "warn",
+
+	// Even if you don't use internationalization, you can use this field to set
+	// useful metadata like html lang. For example, if your site is Chinese, you
+	// may want to replace "en" with "zh-Hans".
+	i18n: {
+		defaultLocale: "en",
+		locales: ["en"],
+	},
+
+	// Enable Mermaid for diagrams
+	markdown: {
+		mermaid: true,
+	},
+	themes: ["@docusaurus/theme-mermaid"],
+
+	presets: [
+		[
+			"classic",
+			{
+				docs: {
+					sidebarPath: "./sidebars.ts",
+					routeBasePath: "/",
+					// Please change this to your repo.
+					// Remove this to remove the "edit this page" links.
+					editUrl: "https://github.com/open-webui/docs/blob/main",
+					exclude: ["**/tab-**/**"],
+				},
+				// blog: false,
+				blog: {
+					showReadingTime: true,
+					// Please change this to your repo.
+					// Remove this to remove the "edit this page" links.
+					// editUrl:
+					// "https://github.com/facebook/docusaurus/tree/main/packages/create-docusaurus/templates/shared/",
+				},
+				theme: {
+					customCss: "./src/css/custom.css",
+				},
+			} satisfies Preset.Options,
+		],
+	],
+
+	themeConfig: {
+		// Replace with your project's social card
+		// image: "img/docusaurus-social-card.jpg",
+		navbar: {
+			title: "Open WebUI",
+			logo: {
+				src: "img/logo.png",
+				srcDark: "img/logo-dark.png",
+			},
+			items: [
+				// {
+				// 	type: "docSidebar",
+				// 	position: "left",
+				// 	sidebarId: "pipelines",
+				// 	label: "Pipelines",
+				// },
+
+				// {
+				//   type: "docSidebar",
+				//   sidebarId: "blog",
+				//   position: "left",
+				//   label: "Blog",
+				// },
+
+				// {
+				//   href: "/blog",
+				//   label: "Blog",
+				//   position: "left",
+				// },
+				{
+					href: "https://github.com/open-webui/open-webui",
+					position: "right",
+					className: "header-github-link",
+					"aria-label": "GitHub repository",
+				},
+				{
+					href: "https://discord.com/invite/5rJgQTnV4s",
+					position: "right",
+					className: "header-discord-link",
+					"aria-label": "Discord server",
+				},
+			],
+		},
+		footer: {
+			logo: {
+				src: "img/logo-dark.png",
+				height: 100,
+			},
+			style: "light",
+			links: [
+				{
+					title: "Docs",
+					items: [
+						{
+							label: "Getting Started",
+							to: "getting-started",
+						},
+						{
+							label: "FAQ",
+							to: "faq",
+						},
+					],
+				},
+				{
+					title: "Community",
+					items: [
+						{
+							label: "GitHub",
+							href: "https://github.com/open-webui/open-webui",
+						},
+						{
+							label: "Discord",
+							href: "https://discord.gg/5rJgQTnV4s",
+						},
+						{
+							label: "๐•",
+							href: "https://x.com/OpenWebUI",
+						},
+					],
+				},
+				{
+					title: "More",
+					items: [
+						{
+							label: "Release Notes",
+							to: "https://github.com/open-webui/open-webui/blob/main/CHANGELOG.md",
+						},
+						{
+							label: "About",
+							to: "https://openwebui.com",
+						},
+					],
+				},
+			],
+			// copyright: `Copyright ยฉ ${new Date().getFullYear()} OpenWebUI`,
+		},
+		prism: {
+			theme: prismThemes.github,
+			darkTheme: prismThemes.dracula,
+		},
+	} satisfies Preset.ThemeConfig,
+	plugins: [require.resolve("docusaurus-lunr-search")],
+};
+
+export default config;

From 13019fc57a3f1a071b53c33b544cc9ce3ff0275d Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 23:01:22 +0000
Subject: [PATCH 41/42] docs: fix whitespace

---
 .github/workflows/gh-pages.yml | 108 ++++++++++++++++-----------------
 1 file changed, 54 insertions(+), 54 deletions(-)

diff --git a/.github/workflows/gh-pages.yml b/.github/workflows/gh-pages.yml
index 0f35f85..e7580e2 100644
--- a/.github/workflows/gh-pages.yml
+++ b/.github/workflows/gh-pages.yml
@@ -1,55 +1,55 @@
 ---
-  name: Deploy site to Pages
-  
-  on:
-    # Runs on pushes targeting the default branch
-    push:
-      branches: ["main"]
-  
-    # Allows you to run this workflow manually from the Actions tab
-    workflow_dispatch:
-  
-  # Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
-  permissions:
-    contents: read
-    pages: write
-    id-token: write
-  
-  # Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
-  # However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.
-  concurrency:
-    group: "pages"
-    cancel-in-progress: false
-  
-  jobs:
-    # Build job
-    build:
-      runs-on: ubuntu-latest
-      steps:
-        - name: Checkout
-          uses: actions/checkout@v4
-        - name: Setup Node
-          uses: actions/setup-node@v4
-          with:
-            node-version-file: ".node-version"
-            cache: npm
-        - name: Install dependencies
-          run: npm ci
-        - name: Build
-          run: npm run build
-        - name: Upload artifact
-          uses: actions/upload-pages-artifact@v3
-          with:
-            path: ./build
-  
-    # Deployment job
-    deploy:
-      environment:
-        name: github-pages
-        url: ${{ steps.deployment.outputs.page_url }}
-      runs-on: ubuntu-latest
-      needs: build
-      steps:
-        - name: Deploy to GitHub Pages
-          id: deployment
-          uses: actions/deploy-pages@v4
\ No newline at end of file
+    name: Deploy site to Pages
+    
+    on:
+      # Runs on pushes targeting the default branch
+      push:
+        branches: ["main"]
+    
+      # Allows you to run this workflow manually from the Actions tab
+      workflow_dispatch:
+    
+    # Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
+    permissions:
+      contents: read
+      pages: write
+      id-token: write
+    
+    # Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
+    # However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.
+    concurrency:
+      group: "pages"
+      cancel-in-progress: false
+    
+    jobs:
+      # Build job
+      build:
+        runs-on: ubuntu-latest
+        steps:
+          - name: Checkout
+            uses: actions/checkout@v4
+          - name: Setup Node
+            uses: actions/setup-node@v4
+            with:
+              node-version-file: ".node-version"
+              cache: npm
+          - name: Install dependencies
+            run: npm ci
+          - name: Build
+            run: npm run build
+          - name: Upload artifact
+            uses: actions/upload-pages-artifact@v3
+            with:
+              path: ./build
+    
+      # Deployment job
+      deploy:
+        environment:
+          name: github-pages
+          url: ${{ steps.deployment.outputs.page_url }}
+        runs-on: ubuntu-latest
+        needs: build
+        steps:
+          - name: Deploy to GitHub Pages
+            id: deployment
+            uses: actions/deploy-pages@v4
\ No newline at end of file

From 08f589640711b0ff9f022d1fe38fcc054e83965d Mon Sep 17 00:00:00 2001
From: Matthew Hand <matthewhandau@gmail.com>
Date: Tue, 5 Nov 2024 23:17:16 +0000
Subject: [PATCH 42/42] Apply Prettier formatting to gh-pages workflow YAML
 file for CI compliance

---
 .github/workflows/gh-pages.yml | 108 ++++++++++++++++-----------------
 1 file changed, 54 insertions(+), 54 deletions(-)

diff --git a/.github/workflows/gh-pages.yml b/.github/workflows/gh-pages.yml
index e7580e2..2a97016 100644
--- a/.github/workflows/gh-pages.yml
+++ b/.github/workflows/gh-pages.yml
@@ -1,55 +1,55 @@
 ---
-    name: Deploy site to Pages
-    
-    on:
-      # Runs on pushes targeting the default branch
-      push:
-        branches: ["main"]
-    
-      # Allows you to run this workflow manually from the Actions tab
-      workflow_dispatch:
-    
-    # Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
-    permissions:
-      contents: read
-      pages: write
-      id-token: write
-    
-    # Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
-    # However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.
-    concurrency:
-      group: "pages"
-      cancel-in-progress: false
-    
-    jobs:
-      # Build job
-      build:
-        runs-on: ubuntu-latest
-        steps:
-          - name: Checkout
-            uses: actions/checkout@v4
-          - name: Setup Node
-            uses: actions/setup-node@v4
-            with:
-              node-version-file: ".node-version"
-              cache: npm
-          - name: Install dependencies
-            run: npm ci
-          - name: Build
-            run: npm run build
-          - name: Upload artifact
-            uses: actions/upload-pages-artifact@v3
-            with:
-              path: ./build
-    
-      # Deployment job
-      deploy:
-        environment:
-          name: github-pages
-          url: ${{ steps.deployment.outputs.page_url }}
-        runs-on: ubuntu-latest
-        needs: build
-        steps:
-          - name: Deploy to GitHub Pages
-            id: deployment
-            uses: actions/deploy-pages@v4
\ No newline at end of file
+name: Deploy site to Pages
+
+on:
+  # Runs on pushes targeting the default branch
+  push:
+    branches: ["main"]
+
+  # Allows you to run this workflow manually from the Actions tab
+  workflow_dispatch:
+
+# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
+permissions:
+  contents: read
+  pages: write
+  id-token: write
+
+# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
+# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.
+concurrency:
+  group: "pages"
+  cancel-in-progress: false
+
+jobs:
+  # Build job
+  build:
+    runs-on: ubuntu-latest
+    steps:
+      - name: Checkout
+        uses: actions/checkout@v4
+      - name: Setup Node
+        uses: actions/setup-node@v4
+        with:
+          node-version-file: ".node-version"
+          cache: npm
+      - name: Install dependencies
+        run: npm ci
+      - name: Build
+        run: npm run build
+      - name: Upload artifact
+        uses: actions/upload-pages-artifact@v3
+        with:
+          path: ./build
+
+  # Deployment job
+  deploy:
+    environment:
+      name: github-pages
+      url: ${{ steps.deployment.outputs.page_url }}
+    runs-on: ubuntu-latest
+    needs: build
+    steps:
+      - name: Deploy to GitHub Pages
+        id: deployment
+        uses: actions/deploy-pages@v4