From 7313e4246aa088b72ddb934642252dfd4f104661 Mon Sep 17 00:00:00 2001 From: "Timothy J. Baek" Date: Sun, 31 Dec 2023 15:10:33 -0800 Subject: [PATCH] fix: readme.md formatting --- INSTALLATION.md | 35 +++++++++++ README.md | 126 ++++++++++++++++++++-------------------- docker-compose.api.yaml | 1 + docker-compose.api.yml | 7 --- docker-compose.gpu.yaml | 1 + docker-compose.gpu.yml | 13 ----- 6 files changed, 100 insertions(+), 83 deletions(-) create mode 100644 INSTALLATION.md delete mode 100644 docker-compose.api.yml delete mode 100644 docker-compose.gpu.yml diff --git a/INSTALLATION.md b/INSTALLATION.md new file mode 100644 index 000000000..4b802c699 --- /dev/null +++ b/INSTALLATION.md @@ -0,0 +1,35 @@ +### Installing Both Ollama and Ollama Web UI Using Kustomize + +For cpu-only pod + +```bash +kubectl apply -f ./kubernetes/manifest/base +``` + +For gpu-enabled pod + +```bash +kubectl apply -k ./kubernetes/manifest +``` + +### Installing Both Ollama and Ollama Web UI Using Helm + +Package Helm file first + +```bash +helm package ./kubernetes/helm/ +``` + +For cpu-only pod + +```bash +helm install ollama-webui ./ollama-webui-*.tgz +``` + +For gpu-enabled pod + +```bash +helm install ollama-webui ./ollama-webui-*.tgz --set ollama.resources.limits.nvidia.com/gpu="1" +``` + +Check the `kubernetes/helm/values.yaml` file to know which parameters are available for customization diff --git a/README.md b/README.md index ea41517b8..cd6558385 100644 --- a/README.md +++ b/README.md @@ -79,69 +79,6 @@ Don't forget to explore our sibling project, [OllamaHub](https://ollamahub.com/) - **Privacy and Data Security:** We prioritize your privacy and data security above all. Please be reassured that all data entered into the Ollama Web UI is stored locally on your device. Our system is designed to be privacy-first, ensuring that no external requests are made, and your data does not leave your local environment. We are committed to maintaining the highest standards of data privacy and security, ensuring that your information remains confidential and under your control. -### Installing Both Ollama and Ollama Web UI Using Provided run-compose.sh bash script -Also available on Windows under any docker-enabled WSL2 linux distro (you have to enable it from Docker Desktop) - -Simply run the following command: -Grant execute permission to script -```bash -chmod +x run-compose.sh -``` - -For CPU only container -```bash -./run-compose.sh -``` - -For GPU enabled container (to enable this you must have your gpu driver for docker, it mostly works with nvidia so this is the official install guide: [nvidia-container-toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html)) -Warning! A GPU-enabled installation has only been tested using linux and nvidia GPU, full functionalities are not guaranteed under Windows or Macos or using a different GPU -```bash -./run-compose.sh --enable-gpu -``` - -Note that both the above commands will use the latest production docker image in repository, to be able to build the latest local version you'll need to append the `--build` parameter, for example: -```bash -./run-compose.sh --enable-gpu --build -``` - -### Installing Both Ollama and Ollama Web UI Using Docker Compose -To install using docker compose script as CPU-only installation simply run this command -```bash -docker compose up -d -``` - -for a GPU-enabled installation (provided you installed the necessary gpu drivers and you are using nvidia) -```bash -docker compose -f docker-compose.yaml -f docker-compose.gpu.yaml up -d -``` - -### Installing Both Ollama and Ollama Web UI Using Kustomize -For cpu-only pod -```bash -kubectl apply -f ./kubernetes/manifest/base -``` -For gpu-enabled pod -```bash -kubectl apply -k ./kubernetes/manifest -``` - -### Installing Both Ollama and Ollama Web UI Using Helm -Package Helm file first -```bash -helm package ./kubernetes/helm/ -``` - -For cpu-only pod -```bash -helm install ollama-webui ./ollama-webui-*.tgz -``` -For gpu-enabled pod -```bash -helm install ollama-webui ./ollama-webui-*.tgz --set ollama.resources.limits.nvidia.com/gpu="1" -``` - -Check the `kubernetes/helm/values.yaml` file to know which parameters are available for customization - ### Installing Ollama Web UI Only #### Prerequisites @@ -186,6 +123,69 @@ docker build -t ollama-webui . docker run -d -p 3000:8080 -e OLLAMA_API_BASE_URL=https://example.com/api -v ollama-webui:/app/backend/data --name ollama-webui --restart always ollama-webui ``` +### Installing Both Ollama and Ollama Web UI + +#### Using Docker Compose + +If you don't have Ollama installed yet, you can use the provided Docker Compose file for a hassle-free installation. Simply run the following command: + +```bash +docker compose up -d --build +``` + +This command will install both Ollama and Ollama Web UI on your system. + +##### Enable GPU + +Use the additional Docker Compose file designed to enable GPU support by running the following command: + +```bash +docker compose -f docker-compose.yaml -f docker-compose.gpu.yaml up -d --build +``` + +##### Expose Ollama API outside the container stack + +Deploy the service with an additional Docker Compose file designed for API exposure: + +```bash +docker compose -f docker-compose.yaml -f docker-compose.api.yaml up -d --build +``` + +#### Using Provided `run-compose.sh` Script (Linux) + +Also available on Windows under any docker-enabled WSL2 linux distro (you have to enable it from Docker Desktop) + +Simply run the following command to grant execute permission to script: + +```bash +chmod +x run-compose.sh +``` + +##### For CPU only container + +```bash +./run-compose.sh +``` + +##### Enable GPU + +For GPU enabled container (to enable this you must have your gpu driver for docker, it mostly works with nvidia so this is the official install guide: [nvidia-container-toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html)) +Warning! A GPU-enabled installation has only been tested using linux and nvidia GPU, full functionalities are not guaranteed under Windows or Macos or using a different GPU + +```bash +./run-compose.sh --enable-gpu +``` + +Note that both the above commands will use the latest production docker image in repository, to be able to build the latest local version you'll need to append the `--build` parameter, for example: + +```bash +./run-compose.sh --enable-gpu --build +``` + +#### Using Alternative Methods (Kustomize or Helm) + +See [INSTALLATION.md](/INSTALLATION.md) for information on how to install and/or join our [Ollama Web UI Discord community](https://discord.gg/5rJgQTnV4s). + ## How to Install Without Docker While we strongly recommend using our convenient Docker container installation for optimal support, we understand that some situations may require a non-Docker setup, especially for development purposes. Please note that non-Docker installations are not officially supported, and you might need to troubleshoot on your own. diff --git a/docker-compose.api.yaml b/docker-compose.api.yaml index 5e4b22274..f19974e7d 100644 --- a/docker-compose.api.yaml +++ b/docker-compose.api.yaml @@ -2,5 +2,6 @@ version: '3.8' services: ollama: + # Expose Ollama API outside the container stack ports: - ${OLLAMA_WEBAPI_PORT-11434}:11434 diff --git a/docker-compose.api.yml b/docker-compose.api.yml deleted file mode 100644 index c36cf11e0..000000000 --- a/docker-compose.api.yml +++ /dev/null @@ -1,7 +0,0 @@ -version: '3.6' - -services: - ollama: - # Expose Ollama API outside the container stack - ports: - - 11434:11434 \ No newline at end of file diff --git a/docker-compose.gpu.yaml b/docker-compose.gpu.yaml index 7df6b91a7..424f485a1 100644 --- a/docker-compose.gpu.yaml +++ b/docker-compose.gpu.yaml @@ -2,6 +2,7 @@ version: '3.8' services: ollama: + # GPU support deploy: resources: reservations: diff --git a/docker-compose.gpu.yml b/docker-compose.gpu.yml deleted file mode 100644 index db47ae136..000000000 --- a/docker-compose.gpu.yml +++ /dev/null @@ -1,13 +0,0 @@ -version: '3.6' - -services: - ollama: - # GPU support - deploy: - resources: - reservations: - devices: - - driver: nvidia - count: 1 - capabilities: - - gpu