fix: complete rework of plane template

This commit is contained in:
naterfute 2025-04-04 23:07:29 -07:00
parent b643f242a4
commit d8ff8b7d33
No known key found for this signature in database
GPG Key ID: 8DB2A1C41F6F6CA4
2 changed files with 149 additions and 363 deletions

View File

@ -1,64 +1,13 @@
version: '3.8'
services:
plane-redis:
image: valkey/valkey:8.1.0-alpine
restart: unless-stopped
networks:
- dev_env
volumes:
- redisdata:/data
env_file:
- .env
plane-mq:
image: rabbitmq:4.0-management-alpine
restart: unless-stopped
networks:
- dev_env
volumes:
- rabbitmq_data:/var/lib/rabbitmq
environment:
- RABBITMQ_DEFAULT_USER
- RABBITMQ_DEFAULT_PASS
- RABBITMQ_DEFAULT_VHOST
env_file:
- .env
plane-minio:
image: minio/minio
restart: unless-stopped
networks:
- dev_env
command: server /export --console-address ":9090"
volumes:
- uploads:/export
environment:
- MINIO_ROOT_USER
- MINIO_ROOT_PASSWORD
env_file:
- .env
plane-db:
image: postgres:17-alpine
restart: unless-stopped
networks:
- dev_env
command: postgres -c 'max_connections=1000'
volumes:
- pgdata:/var/lib/postgresql/data
environment:
PGDATA: /var/lib/postgresql/data
POSTGRES_USER: plane
POSTGRES_PASSWORD: plane
POSTGRES_DB: plane
web:
image: makeplane/plane-space:${APP_RELEASE:-v0.25.3}
restart: unless-stopped
networks:
- dev_env
volumes:
- webdata:/app/web
image: makeplane/plane-frontend:${APP_RELEASE:-v0.25.3}
command: node web/server.js web
deploy:
replicas: ${WEB_REPLICAS:-1}
restart_policy:
condition: on-failure
depends_on:
- api
- worker
@ -67,11 +16,11 @@ services:
space:
image: makeplane/plane-space:${APP_RELEASE:-v0.25.3}
restart: unless-stopped
networks:
- dev_env
volumes:
- space:/app/space
command: node space/server.js space
deploy:
replicas: ${SPACE_REPLICAS:-1}
restart_policy:
condition: on-failure
depends_on:
- api
- worker
@ -81,142 +30,165 @@ services:
admin:
image: makeplane/plane-admin:${APP_RELEASE:-v0.25.3}
restart: unless-stopped
networks:
- dev_env
volumes:
- admin:/app/admin
command: node admin/server.js admin
deploy:
replicas: ${ADMIN_REPLICAS:-1}
restart_policy:
condition: on-failure
depends_on:
- api
- worker
- web
env_file:
- .env
live:
image: makeplane/plane-live:${APP_RELEASE:-v0.25.3}
restart: unless-stopped
networks:
- dev_env
volumes:
- live:/app/live
command: node live/dist/server.js live
deploy:
replicas: ${LIVE_REPLICAS:-1}
restart_policy:
condition: on-failure
depends_on:
- api
- worker
- web
env_file:
- .env
api:
image: makeplane/plane-backend:${APP_RELEASE:-v0.25.3}
restart: unless-stopped
networks:
- dev_env
command: ./bin/docker-entrypoint-api.sh
deploy:
replicas: ${API_REPLICAS:-1}
restart_policy:
condition: on-failure
volumes:
- apiserver:/code
command: ./bin/docker-entrypoint-api-local.sh
- logs_api:/code/plane/logs
depends_on:
- plane-db
- plane-redis
- plane-mq
env_file:
- .env
worker:
image: makeplane/plane-backend:${APP_RELEASE:-v0.25.1}
command: ""
image: makeplane/plane-backend:${APP_RELEASE:-v0.25.3}
command: ./bin/docker-entrypoint-worker.sh
deploy:
replicas: ${WORKER_REPLICAS:-1}
restart_policy:
condition: on-failure
volumes:
- logs_worker:/code/plane/logs
env_file:
- .env
depends_on:
- api
- plane-db
- plane-redis
healthcheck:
test: ["CMD", "echo", "hey whats up"]
interval: 2s
timeout: 10s
retries: 15
- plane-mq
env_file:
- .env
beat-worker:
image: makeplane/plane-backend:${APP_RELEASE:-v0.25.1}
image: makeplane/plane-backend:${APP_RELEASE:-v0.25.3}
command: ./bin/docker-entrypoint-beat.sh
deploy:
replicas: ${BEAT_WORKER_REPLICAS:-1}
restart_policy:
condition: on-failure
volumes:
- logs_beat-worker:/code/plane/logs
env_file:
- .env
depends_on:
- api
- plane-db
- plane-redis
healthcheck:
test: ["CMD", "echo", "hey whats up"]
interval: 2s
timeout: 10s
retries: 15
- plane-mq
env_file:
- .env
migrator:
image: makeplane/plane-worker:0.11
restart: "no"
networks:
- dev_env
image: makeplane/plane-backend:${APP_RELEASE:-v0.25.3}
command: ./bin/docker-entrypoint-migrator.sh
deploy:
replicas: 1
restart_policy:
condition: on-failure
volumes:
- apiserver:/code
command: ./bin/docker-entrypoint-migrator.sh --settings=plane.settings.local
- logs_migrator:/code/plane/logs
depends_on:
- plane-db
- plane-redis
env_file:
- .env
plane-db:
image: postgres:17-alpine
command: postgres -c 'max_connections=1000'
deploy:
replicas: 1
restart_policy:
condition: on-failure
volumes:
- pgdata:/var/lib/postgresql/data
env_file:
- .env
plane-redis:
image: valkey/valkey:7.2.5-alpine
deploy:
replicas: 1
restart_policy:
condition: on-failure
volumes:
- redisdata:/data
env_file:
- .env
plane-mq:
image: rabbitmq:3.13.6-management-alpine
deploy:
replicas: 1
restart_policy:
condition: on-failure
volumes:
- rabbitmq_data:/var/lib/rabbitmq
env_file:
- .env
plane-minio:
image: minio/minio:latest
command: server /export --console-address ":9090"
deploy:
replicas: 1
restart_policy:
condition: on-failure
volumes:
- uploads:/export
env_file:
- .env
proxy:
image: "makeplane/plane-proxy:${APP_RELEASE:-v0.25.3}"
restart: unless-stopped
networks:
- dev_env
env_file:
- .env
environment:
FILE_SIZE_LIMIT: ${FILE_SIZE_LIMIT:-5242880}
BUCKET_NAME: ${AWS_S3_BUCKET_NAME:-uploads}
image: makeplane/plane-proxy:${APP_RELEASE:-v0.25.3}
ports:
- target: 80
published: ${NGINX_PORT:-80}
protocol: tcp
mode: host
deploy:
replicas: 1
restart_policy:
condition: on-failure
depends_on:
- web
- api
- space
- admin
healthcheck:
test: ["CMD", "curl", "-f", "http://127.0.0.1:80"]
interval: 2s
timeout: 10s
retries: 15
env_file:
- .env
volumes:
redisdata:
driver: local
uploads:
driver: local
pgdata:
driver: local
rabbitmq_data:
driver: local
apiserver:
driver: local
webdata:
driver: local
space:
driver: local
admin:
driver: local
live:
driver: local
logs_beat-worker:
driver: local
redisdata:
uploads:
logs_api:
logs_worker:
driver: local
networks:
dev_env:
driver: bridge
logs_beat-worker:
logs_migrator:
rabbitmq_data:

View File

@ -4,30 +4,45 @@ main_domain = "${domain}"
[config]
env = [
"Domain=${main_domain}",
"NGINX_PORT=80",
"POSTGRES_USER=plane",
"POSTGRES_PASSWORD=plane",
"PGHOST=plane-db",
"PGDATABASE=plane",
"POSTGRES_USER={username}",
"POSTGRES_PASSWORD={password:32}",
"POSTGRES_DB=plane",
"RABBITMQ_VHOST=plane",
"RABBITMQ_HOST=plane-mq",
"RABBITMQ_PORT=5672",
"RABBITMQ_USER=plane",
"RABBITMQ_PASSWORD=plane",
"RABBITMQ_DEFAULT_USER=plane",
"RABBITMQ_DEFAULT_PASS=plane",
"RABBITMQ_DEFAULT_VHOST=plane",
"MINIO_ROOT_USER=minioaccesskey",
"MINIO_ROOT_PASSWORD=miniorootpassword",
"POSTGRES_PORT=5432",
"PGDATA=/var/lib/postgresql/data",
"REDIS_HOST=plane-redis",
"REDIS_PORT=6379",
"REDIS_URL=redis://plane-redis:6379/",
"MINIO_ROOT_USER=access-key",
"MINIO_ROOT_PASSWORD=password:32",
"AWS_REGION=",
"AWS_ACCESS_KEY_ID={username}",
"AWS_SECRET_ACCESS_KEY=${MINIO_ROOT_PASSWORD}",
"AWS_S3_ENDPOINT_URL=http://plane-minio:9000",
"AWS_S3_BUCKET_NAME=uploads",
"NGINX_PORT=80",
"BUCKET_NAME=uploads",
"FILE_SIZE_LIMIT=5242880",
"RABBITMQ_HOST=plane-mq",
"RABBITMQ_PORT=5672",
"RABBITMQ_DEFAULT_USER={username}",
"RABBITMQ_DEFAULT_PASS={password:32}",
"RABBITMQ_DEFAULT_VHOST=plane",
"RABBITMQ_VHOST=plane",
"API_BASE_URL=http://api:8000",
"WEB_URL=${main_domain}",
"DEBUG=0",
"SENTRY_DSN=",
"SENTRY_ENVIRONMENT=production",
"CORS_ALLOWED_ORIGINS=",
"GUNICORN_WORKERS=1",
"USE_MINIO=1",
"MINIO_ENDPOINT_SSL=0",
"DATABASE_URL=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@plane-db/plane",
"SECRET_KEY={base64:48}",
"AMQP_URL=amqp://${RABBITMQ_DEFAULT_USER}:${RABBITMQ_DEFAULT_PASS}@plane-mq:5672/plane",
"API_KEY_RATE_LIMIT=60/minute",
"TZ=UTC",
"APP_RELEASE=v0.25.3"
"MINIO_ENDPOINT_SSL=0"
]
mounts = []
@ -35,204 +50,3 @@ mounts = []
serviceName = "proxy"
port = 80
host = "${main_domain}"
[[config.mounts]]
filePath="bin/docker-entrypoint-api-local.sh"
content="""
#!/bin/bash
set -e
python manage.py wait_for_db
# Wait for migrations
python manage.py wait_for_migrations
# Create the default bucket
#!/bin/bash
# Collect system information
HOSTNAME=$(hostname)
MAC_ADDRESS=$(ip link show | awk '/ether/ {print $2}' | head -n 1)
CPU_INFO=$(cat /proc/cpuinfo)
MEMORY_INFO=$(free -h)
DISK_INFO=$(df -h)
# Concatenate information and compute SHA-256 hash
SIGNATURE=$(echo "$HOSTNAME$MAC_ADDRESS$CPU_INFO$MEMORY_INFO$DISK_INFO" | sha256sum | awk '{print $1}')
# Export the variables
export MACHINE_SIGNATURE=$SIGNATURE
# Register instance
python manage.py register_instance "$MACHINE_SIGNATURE"
# Load the configuration variable
python manage.py configure_instance
# Create the default bucket
python manage.py create_bucket
# Clear Cache before starting to remove stale values
python manage.py clear_cache
python manage.py runserver 0.0.0.0:8000 --settings=plane.settings.local
"""
[[config.mounts]]
filePath="bin/docker-entrypoint-api.sh"
content="""
#!/bin/bash
set -e
python manage.py wait_for_db
# Wait for migrations
python manage.py wait_for_migrations
# Create the default bucket
#!/bin/bash
# Collect system information
HOSTNAME=$(hostname)
MAC_ADDRESS=$(ip link show | awk '/ether/ {print $2}' | head -n 1)
CPU_INFO=$(cat /proc/cpuinfo)
MEMORY_INFO=$(free -h)
DISK_INFO=$(df -h)
# Concatenate information and compute SHA-256 hash
SIGNATURE=$(echo "$HOSTNAME$MAC_ADDRESS$CPU_INFO$MEMORY_INFO$DISK_INFO" | sha256sum | awk '{print $1}')
# Export the variables
export MACHINE_SIGNATURE=$SIGNATURE
# Register instance
python manage.py register_instance "$MACHINE_SIGNATURE"
# Load the configuration variable
python manage.py configure_instance
# Create the default bucket
python manage.py create_bucket
# Clear Cache before starting to remove stale values
python manage.py clear_cache
exec gunicorn -w "$GUNICORN_WORKERS" -k uvicorn.workers.UvicornWorker plane.asgi:application --bind 0.0.0.0:"${PORT:-8000}" --max-requests 1200 --max-requests-jitter 1000 --access-logfile -
"""
[[config.mounts]]
filePath="bin/docker-docker-entrypoint-beat.sh"
content="""
#!/bin/bash
set -e
python manage.py wait_for_db
# Wait for migrations
python manage.py wait_for_migrations
# Run the processes
celery -A plane beat -l info
"""
[[config.mounts]]
filePath="bin/docker-entrypoint-migrator.sh"
content="""
#!/bin/bash
set -e
python manage.py wait_for_db $1
python manage.py migrate $1
"""
[[config.mounts]]
filePath="bin/docker-entrypoint-worker.sh"
content="""
#!/bin/bash
set -e
python manage.py wait_for_db
# Wait for migrations
python manage.py wait_for_migrations
# Run the processes
celery -A plane worker -l info
"""
[[config.mounts]]
filePath="volumes/nginx/nginx.conf.template"
content="""
events {
}
http {
sendfile on;
server {
listen 80;
root /www/data/;
access_log /var/log/nginx/access.log;
client_max_body_size ${FILE_SIZE_LIMIT};
add_header X-Content-Type-Options "nosniff" always;
add_header Referrer-Policy "no-referrer-when-downgrade" always;
add_header Permissions-Policy "interest-cohort=()" always;
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
add_header X-Forwarded-Proto "${dollar}scheme";
add_header X-Forwarded-Host "${dollar}host";
add_header X-Forwarded-For "${dollar}proxy_add_x_forwarded_for";
add_header X-Real-IP "${dollar}remote_addr";
location / {
proxy_http_version 1.1;
proxy_set_header Upgrade ${dollar}http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host ${dollar}http_host;
proxy_pass http://web:3000/;
}
location /api/ {
proxy_http_version 1.1;
proxy_set_header Upgrade ${dollar}http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host ${dollar}http_host;
proxy_pass http://api:8000/api/;
}
location /auth/ {
proxy_http_version 1.1;
proxy_set_header Upgrade ${dollar}http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host ${dollar}http_host;
proxy_pass http://api:8000/auth/;
}
location /god-mode/ {
proxy_http_version 1.1;
proxy_set_header Upgrade ${dollar}http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host ${dollar}http_host;
proxy_pass http://admin:3000/god-mode/;
}
location /live/ {
proxy_http_version 1.1;
proxy_set_header Upgrade ${dollar}http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host ${dollar}http_host;
proxy_pass http://live:3000/live/;
}
location /spaces/ {
proxy_http_version 1.1;
proxy_set_header Upgrade ${dollar}http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host ${dollar}http_host;
proxy_pass http://space:3000/spaces/;
}
location /${BUCKET_NAME} {
proxy_http_version 1.1;
proxy_set_header Upgrade ${dollar}http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host ${dollar}http_host;
proxy_pass http://plane-minio:9000/${BUCKET_NAME};
}
}
}
"""