diff --git a/kubernetes/helm/templates/ollama-service.yaml b/kubernetes/helm/templates/ollama-service.yaml index afa25e38b..54558473a 100644 --- a/kubernetes/helm/templates/ollama-service.yaml +++ b/kubernetes/helm/templates/ollama-service.yaml @@ -4,6 +4,7 @@ metadata: name: ollama-service namespace: {{ .Values.namespace }} spec: + type: {{ .Values.ollama.service.type }} selector: app: ollama ports: diff --git a/kubernetes/helm/templates/ollama-statefulset.yaml b/kubernetes/helm/templates/ollama-statefulset.yaml index 755ed008a..83cb6883f 100644 --- a/kubernetes/helm/templates/ollama-statefulset.yaml +++ b/kubernetes/helm/templates/ollama-statefulset.yaml @@ -19,15 +19,32 @@ spec: image: {{ .Values.ollama.image }} ports: - containerPort: {{ .Values.ollama.servicePort }} - resources: - limits: - cpu: {{ .Values.ollama.resources.limits.cpu }} - memory: {{ .Values.ollama.resources.limits.memory }} - nvidia.com/gpu: {{ .Values.ollama.resources.limits.gpu }} + env: + {{- if .Values.ollama.gpu.enabled }} + - name: PATH + value: /usr/local/nvidia/bin:/usr/local/cuda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin + - name: LD_LIBRARY_PATH + value: /usr/local/nvidia/lib:/usr/local/nvidia/lib64 + - name: NVIDIA_DRIVER_CAPABILITIES + value: compute,utility + {{- end}} + {{- if .Values.ollama.resources }} + resources: {{- toYaml .Values.ollama.resources | nindent 10 }} + {{- end }} volumeMounts: - name: ollama-volume mountPath: /root/.ollama tty: true + {{- with .Values.ollama.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + tolerations: + {{- if .Values.ollama.gpu.enabled }} + - key: nvidia.com/gpu + operator: Exists + effect: NoSchedule + {{- end }} volumeClaimTemplates: - metadata: name: ollama-volume @@ -35,4 +52,4 @@ spec: accessModes: [ "ReadWriteOnce" ] resources: requests: - storage: 1Gi \ No newline at end of file + storage: {{ .Values.ollama.volumeSize }} \ No newline at end of file diff --git a/kubernetes/helm/templates/webui-deployment.yaml b/kubernetes/helm/templates/webui-deployment.yaml index ec4fc79f4..d9721ee05 100644 --- a/kubernetes/helm/templates/webui-deployment.yaml +++ b/kubernetes/helm/templates/webui-deployment.yaml @@ -15,14 +15,24 @@ spec: spec: containers: - name: ollama-webui - image: ghcr.io/ollama-webui/ollama-webui:main + image: {{ .Values.webui.image }} ports: - containerPort: 8080 - resources: - limits: - cpu: "500m" - memory: "500Mi" + {{- if .Values.webui.resources }} + resources: {{- toYaml .Values.webui.resources | nindent 10 }} + {{- end }} + volumeMounts: + - name: webui-volume + mountPath: /app/backend/data env: - name: OLLAMA_API_BASE_URL value: "http://ollama-service.{{ .Values.namespace }}.svc.cluster.local:{{ .Values.ollama.servicePort }}/api" - tty: true \ No newline at end of file + tty: true + {{- with .Values.webui.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: webui-volume + persistentVolumeClaim: + claimName: ollama-webui-pvc \ No newline at end of file diff --git a/kubernetes/helm/templates/webui-pvc.yaml b/kubernetes/helm/templates/webui-pvc.yaml new file mode 100644 index 000000000..e9961aa8d --- /dev/null +++ b/kubernetes/helm/templates/webui-pvc.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + app: ollama-webui + name: ollama-webui-pvc + namespace: {{ .Values.namespace }} +spec: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: {{ .Values.webui.volumeSize }} \ No newline at end of file diff --git a/kubernetes/helm/templates/webui-service.yaml b/kubernetes/helm/templates/webui-service.yaml index dd6058558..7fefa4fd4 100644 --- a/kubernetes/helm/templates/webui-service.yaml +++ b/kubernetes/helm/templates/webui-service.yaml @@ -4,7 +4,7 @@ metadata: name: ollama-webui-service namespace: {{ .Values.namespace }} spec: - type: NodePort # Use LoadBalancer if you're on a cloud that supports it + type: {{ .Values.webui.service.type }} # Default: NodePort # Use LoadBalancer if you're on a cloud that supports it selector: app: ollama-webui ports: diff --git a/kubernetes/helm/values.yaml b/kubernetes/helm/values.yaml index f115f82fe..648b40509 100644 --- a/kubernetes/helm/values.yaml +++ b/kubernetes/helm/values.yaml @@ -10,6 +10,12 @@ ollama: memory: "2Gi" nvidia.com/gpu: "0" volumeSize: 1Gi + nodeSelector: {} + tolerations: [] + service: + type: ClusterIP + gpu: + enabled: false webui: replicaCount: 1 @@ -25,3 +31,8 @@ webui: # Use appropriate annotations for your Ingress controller, e.g., for NGINX: # nginx.ingress.kubernetes.io/rewrite-target: / host: ollama.minikube.local + volumeSize: 1Gi + nodeSelector: {} + tolerations: [] + service: + type: NodePort \ No newline at end of file