version: '3.6' services: ollama: # Uncomment below for GPU support # deploy: # resources: # reservations: # devices: # - driver: nvidia # count: 1 # capabilities: # - gpu volumes: - ollama:/root/.ollama # Uncomment below to expose Ollama API outside the container stack # ports: # - 11434:11434 container_name: ollama pull_policy: always tty: true restart: unless-stopped image: ollama/ollama:latest ollama-webui: build: context: . args: OLLAMA_API_BASE_URL: '/ollama/api' dockerfile: Dockerfile image: ollama-webui:latest container_name: ollama-webui depends_on: - ollama ports: - 3000:8080 environment: - "OLLAMA_API_BASE_URL=http://ollama:11434/api" extra_hosts: - host.docker.internal:host-gateway restart: unless-stopped volumes: ollama: {}