clearml-helm-charts/charts/clearml/values.yaml
2021-08-06 14:18:53 +02:00

328 lines
7.6 KiB
YAML

clearml:
defaultCompany: "d1bd92a3b039400cbafc60a7a5b1e52b"
ingress:
enabled: false
name: clearml-server-ingress
annotations: {}
host: ""
tls:
secretName: ""
apiserver:
prepopulateEnabled: "true"
prepopulateZipFiles: "/opt/clearml/db-pre-populate"
prepopulateArtifactsPath: "/mnt/fileserver"
configDir: /opt/clearml/config
service:
type: NodePort
port: 8008
livenessDelay: 60
readinessDelay: 60
replicaCount: 1
image:
repository: "allegroai/clearml"
pullPolicy: IfNotPresent
tag: "1.1.1"
extraEnvs: []
podAnnotations: {}
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
# Optional: used in pvc-apiserver containing optional server configuration files
storage:
enableConfigVolume: false
config:
class: "standard"
size: 1Gi
fileserver:
service:
type: NodePort
port: 8081
replicaCount: 1
image:
repository: "allegroai/clearml"
pullPolicy: IfNotPresent
tag: "1.1.1"
extraEnvs: []
podAnnotations: {}
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
storage:
data:
class: "standard"
size: 50Gi
webserver:
extraEnvs: []
service:
type: NodePort
port: 80
replicaCount: 1
image:
repository: "allegroai/clearml"
pullPolicy: IfNotPresent
tag: "1.1.1"
podAnnotations: {}
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
agentservices:
clearmlHostIp: null
agentVersion: ""
clearmlWebHost: null
clearmlFilesHost: null
clearmlGitUser: null
clearmlGitPassword: null
awsAccessKeyId: null
awsSecretAccessKey: null
awsDefaultRegion: null
azureStorageAccount: null
azureStorageKey: null
googleCredentials: null
clearmlWorkerId: "clearml-services"
replicaCount: 1
image:
repository: "allegroai/clearml-agent-services"
pullPolicy: IfNotPresent
tag: "latest"
extraEnvs: []
podAnnotations: {}
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
storage:
data:
class: "standard"
size: 50Gi
agentGroups:
agent-group-cpu:
name: agent-group-cpu
replicaCount: 1
nvidiaGpusPerAgent: 0
agentVersion: "" # if set, it *MUST* include comparison operator (e.g. ">=0.16.1")
queues: "default" # multiple queues can be specified separated by a space (e.g. "important_jobs default")
clearmlGitUser: null
clearmlGitPassword: null
clearmlAccessKey: null
clearmlSecretKey: null
awsAccessKeyId: null
awsSecretAccessKey: null
awsDefaultRegion: null
azureStorageAccount: null
azureStorageKey: null
clearmlConfig: |-
sdk {
}
image:
repository: "ubuntu"
pullPolicy: IfNotPresent
tag: "18.04"
podAnnotations: {}
nodeSelector: {}
tolerations: []
affinity: {}
agent-group-gpu:
name: agent-group-gpu
replicaCount: 0
nvidiaGpusPerAgent: 1
agentVersion: "" # if set, it *MUST* include comparison operator (e.g. ">=0.16.1")
queues: "default" # multiple queues can be specified separated by a space (e.g. "important_jobs default")
clearmlGitUser: null
clearmlGitPassword: null
clearmlAccessKey: null
clearmlSecretKey: null
awsAccessKeyId: null
awsSecretAccessKey: null
awsDefaultRegion: null
azureStorageAccount: null
azureStorageKey: null
clearmlConfig: |-
sdk {
}
image:
repository: "nvidia/cuda"
pullPolicy: IfNotPresent
tag: "11.0-base-ubuntu18.04"
podAnnotations: {}
nodeSelector: {}
tolerations: []
affinity: {}
redis: # configuration from https://github.com/bitnami/charts/blob/master/bitnami/redis/values.yaml
enabled: true
usePassword: false
databaseNumber: 0
master:
name: "{{ .Release.Name }}-redis-master"
port: 6379
persistence:
enabled: true
accessModes:
- ReadWriteOnce
size: 5Gi
cluster:
enabled: false
mongodb: # configuration from https://github.com/bitnami/charts/blob/master/bitnami/mongodb/values.yaml
enabled: true
architecture: standalone
auth:
enabled: false
replicaCount: 1
persistence:
enabled: true
accessModes:
- ReadWriteOnce
size: 50Gi
service:
name: "{{ .Release.Name }}-mongodb"
type: ClusterIP
port: 27017
portName: mongo-service
elasticsearch: # configuration from https://github.com/elastic/helm-charts/blob/7.10/elasticsearch/values.yaml
enabled: true
httpPort: 9200
roles:
master: "true"
ingest: "true"
data: "true"
remote_cluster_client: "true"
replicas: 1
# Readiness probe hack for a single-node cluster (where status will never be green). Should be removed if using replicas > 1
clusterHealthCheckParams: "wait_for_status=yellow&timeout=1s"
minimumMasterNodes: 1
clusterName: clearml-elastic
esJavaOpts: "-Xmx2g -Xms2g"
extraEnvs:
- name: bootstrap.memory_lock
value: "false"
- name: cluster.routing.allocation.node_initial_primaries_recoveries
value: "500"
- name: cluster.routing.allocation.disk.watermark.low
value: 500mb
- name: cluster.routing.allocation.disk.watermark.high
value: 500mb
- name: cluster.routing.allocation.disk.watermark.flood_stage
value: 500mb
- name: http.compression_level
value: "7"
- name: reindex.remote.whitelist
value: '*.*'
- name: xpack.monitoring.enabled
value: "false"
- name: xpack.security.enabled
value: "false"
resources:
requests:
memory: "4Gi"
limits:
memory: "4Gi"
persistence:
enabled: true
volumeClaimTemplate:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 50Gi
esConfig:
elasticsearch.yml: |
xpack.security.enabled: false