chore: add yaml and toml dependencies, and create script for converting YAML to TOML

- Added 'yaml' and '@iarna/toml' dependencies in package.json.
- Created a new script.js file to process YAML files and convert them to TOML format.
- Added template.toml files for various blueprints in the blueprints directory.
This commit is contained in:
Mauricio Siu 2025-03-30 00:51:49 -06:00
parent 7e1d9df7e1
commit 0f16376f98
115 changed files with 3329 additions and 11 deletions

View File

@ -10,6 +10,8 @@
"preview": "vite preview"
},
"dependencies": {
"yaml":"2.7.1",
"@iarna/toml": "^2.2.5",
"@codemirror/autocomplete": "^6.18.6",
"@codemirror/lang-json": "^6.0.1",
"@codemirror/lang-yaml": "^6.1.1",

View File

@ -26,6 +26,9 @@ importers:
'@codemirror/view':
specifier: 6.29.0
version: 6.29.0
'@iarna/toml':
specifier: ^2.2.5
version: 2.2.5
'@radix-ui/react-dialog':
specifier: ^1.1.6
version: 1.1.6(@types/react-dom@19.0.4(@types/react@19.0.10))(@types/react@19.0.10)(react-dom@19.0.0(react@19.0.0))(react@19.0.0)
@ -46,7 +49,7 @@ importers:
version: 1.1.3(@types/react-dom@19.0.4(@types/react@19.0.10))(@types/react@19.0.10)(react-dom@19.0.0(react@19.0.0))(react@19.0.0)
'@tailwindcss/vite':
specifier: ^4.0.12
version: 4.0.12(vite@6.2.1(@types/node@20.17.24)(jiti@2.4.2)(lightningcss@1.29.2))
version: 4.0.12(vite@6.2.1(@types/node@20.17.24)(jiti@2.4.2)(lightningcss@1.29.2)(yaml@2.7.1))
'@uiw/codemirror-theme-github':
specifier: ^4.22.1
version: 4.23.10(@codemirror/language@6.10.8)(@codemirror/state@6.5.2)(@codemirror/view@6.29.0)
@ -91,7 +94,10 @@ importers:
version: 1.0.7(tailwindcss@4.0.12)
vite-plugin-static-copy:
specifier: 2.3.0
version: 2.3.0(vite@6.2.1(@types/node@20.17.24)(jiti@2.4.2)(lightningcss@1.29.2))
version: 2.3.0(vite@6.2.1(@types/node@20.17.24)(jiti@2.4.2)(lightningcss@1.29.2)(yaml@2.7.1))
yaml:
specifier: 2.7.1
version: 2.7.1
zustand:
specifier: ^5.0.3
version: 5.0.3(@types/react@19.0.10)(react@19.0.0)
@ -107,7 +113,7 @@ importers:
version: 19.0.4(@types/react@19.0.10)
'@vitejs/plugin-react':
specifier: ^4.3.4
version: 4.3.4(vite@6.2.1(@types/node@20.17.24)(jiti@2.4.2)(lightningcss@1.29.2))
version: 4.3.4(vite@6.2.1(@types/node@20.17.24)(jiti@2.4.2)(lightningcss@1.29.2)(yaml@2.7.1))
globals:
specifier: ^15.15.0
version: 15.15.0
@ -116,7 +122,7 @@ importers:
version: 5.7.3
vite:
specifier: ^6.2.0
version: 6.2.1(@types/node@20.17.24)(jiti@2.4.2)(lightningcss@1.29.2)
version: 6.2.1(@types/node@20.17.24)(jiti@2.4.2)(lightningcss@1.29.2)(yaml@2.7.1)
packages:
@ -408,6 +414,9 @@ packages:
'@floating-ui/utils@0.2.9':
resolution: {integrity: sha512-MDWhGtE+eHw5JW7lq4qhc5yRLS11ERl1c7Z6Xd0a58DozHES6EnNNwUWbMiG4J9Cgj053Bhk8zvlhFYKVhULwg==}
'@iarna/toml@2.2.5':
resolution: {integrity: sha512-trnsAYxU3xnS1gPHPyU961coFyLkh4gAD/0zQ5mymY4yOZ+CYvsPqUbOFSw0aDM4y0tV7tiFxL/1XfXPNC6IPg==}
'@jridgewell/gen-mapping@0.3.8':
resolution: {integrity: sha512-imAbBGkb+ebQyxKgzv5Hu2nmROxoDOXHh80evxdoXNOrvAnVx7zimzc1Oo5h9RlfV4vPXaE2iM5pOFbvOCClWA==}
engines: {node: '>=6.0.0'}
@ -1657,6 +1666,11 @@ packages:
yallist@3.1.1:
resolution: {integrity: sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==}
yaml@2.7.1:
resolution: {integrity: sha512-10ULxpnOCQXxJvBgxsn9ptjq6uviG/htZKk9veJGhlqn3w/DxQ631zFF+nlQXLwmImeS5amR2dl2U8sg6U9jsQ==}
engines: {node: '>= 14'}
hasBin: true
zustand@5.0.3:
resolution: {integrity: sha512-14fwWQtU3pH4dE0dOpdMiWjddcH+QzKIgk1cl8epwSE7yag43k/AD/m4L6+K7DytAOr9gGBe3/EXj9g7cdostg==}
engines: {node: '>=12.20.0'}
@ -1965,6 +1979,8 @@ snapshots:
'@floating-ui/utils@0.2.9': {}
'@iarna/toml@2.2.5': {}
'@jridgewell/gen-mapping@0.3.8':
dependencies:
'@jridgewell/set-array': 1.2.1
@ -2581,13 +2597,13 @@ snapshots:
'@tailwindcss/oxide-win32-arm64-msvc': 4.0.12
'@tailwindcss/oxide-win32-x64-msvc': 4.0.12
'@tailwindcss/vite@4.0.12(vite@6.2.1(@types/node@20.17.24)(jiti@2.4.2)(lightningcss@1.29.2))':
'@tailwindcss/vite@4.0.12(vite@6.2.1(@types/node@20.17.24)(jiti@2.4.2)(lightningcss@1.29.2)(yaml@2.7.1))':
dependencies:
'@tailwindcss/node': 4.0.12
'@tailwindcss/oxide': 4.0.12
lightningcss: 1.29.2
tailwindcss: 4.0.12
vite: 6.2.1(@types/node@20.17.24)(jiti@2.4.2)(lightningcss@1.29.2)
vite: 6.2.1(@types/node@20.17.24)(jiti@2.4.2)(lightningcss@1.29.2)(yaml@2.7.1)
'@types/babel__core@7.20.5':
dependencies:
@ -2665,14 +2681,14 @@ snapshots:
- '@codemirror/lint'
- '@codemirror/search'
'@vitejs/plugin-react@4.3.4(vite@6.2.1(@types/node@20.17.24)(jiti@2.4.2)(lightningcss@1.29.2))':
'@vitejs/plugin-react@4.3.4(vite@6.2.1(@types/node@20.17.24)(jiti@2.4.2)(lightningcss@1.29.2)(yaml@2.7.1))':
dependencies:
'@babel/core': 7.26.9
'@babel/plugin-transform-react-jsx-self': 7.25.9(@babel/core@7.26.9)
'@babel/plugin-transform-react-jsx-source': 7.25.9(@babel/core@7.26.9)
'@types/babel__core': 7.20.5
react-refresh: 0.14.2
vite: 6.2.1(@types/node@20.17.24)(jiti@2.4.2)(lightningcss@1.29.2)
vite: 6.2.1(@types/node@20.17.24)(jiti@2.4.2)(lightningcss@1.29.2)(yaml@2.7.1)
transitivePeerDependencies:
- supports-color
@ -3087,16 +3103,16 @@ snapshots:
optionalDependencies:
'@types/react': 19.0.10
vite-plugin-static-copy@2.3.0(vite@6.2.1(@types/node@20.17.24)(jiti@2.4.2)(lightningcss@1.29.2)):
vite-plugin-static-copy@2.3.0(vite@6.2.1(@types/node@20.17.24)(jiti@2.4.2)(lightningcss@1.29.2)(yaml@2.7.1)):
dependencies:
chokidar: 3.6.0
fast-glob: 3.3.3
fs-extra: 11.3.0
p-map: 7.0.3
picocolors: 1.1.1
vite: 6.2.1(@types/node@20.17.24)(jiti@2.4.2)(lightningcss@1.29.2)
vite: 6.2.1(@types/node@20.17.24)(jiti@2.4.2)(lightningcss@1.29.2)(yaml@2.7.1)
vite@6.2.1(@types/node@20.17.24)(jiti@2.4.2)(lightningcss@1.29.2):
vite@6.2.1(@types/node@20.17.24)(jiti@2.4.2)(lightningcss@1.29.2)(yaml@2.7.1):
dependencies:
esbuild: 0.25.1
postcss: 8.5.3
@ -3106,11 +3122,14 @@ snapshots:
fsevents: 2.3.3
jiti: 2.4.2
lightningcss: 1.29.2
yaml: 2.7.1
w3c-keyname@2.2.8: {}
yallist@3.1.1: {}
yaml@2.7.1: {}
zustand@5.0.3(@types/react@19.0.10)(react@19.0.0):
optionalDependencies:
'@types/react': 19.0.10

36
app/script.js Normal file
View File

@ -0,0 +1,36 @@
import yaml from "yaml";
import toml from "@iarna/toml";
import fs from "fs";
import path from "path";
import { fileURLToPath } from "url";
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
function convertYamlToToml(yamlContent) {
const parsedYaml = yaml.parse(yamlContent);
return toml.stringify(parsedYaml);
}
function processDirectory(dirPath) {
const files = fs.readdirSync(dirPath);
files.forEach((file) => {
const filePath = path.join(dirPath, file);
const stat = fs.statSync(filePath);
if (stat.isDirectory()) {
processDirectory(filePath);
} else if (file === "template.yml") {
console.log(`Converting ${filePath}`);
const yamlContent = fs.readFileSync(filePath, "utf8");
const tomlContent = convertYamlToToml(yamlContent);
const tomlPath = path.join(dirPath, "template.toml");
fs.writeFileSync(tomlPath, tomlContent);
}
});
}
// Ruta al directorio blueprints relativa al script
const blueprintsPath = path.join(__dirname, "..", "blueprints");
processDirectory(blueprintsPath);

View File

@ -0,0 +1,21 @@
[variables]
main_domain = "${domain}"
api_key = "${password:32}"
encryption_key = "${password:32}"
jwt_secret = "${password:32}"
postgres_password = "${password:32}"
[config]
env = [
"AP_HOST=${main_domain}",
"AP_API_KEY=${api_key}",
"AP_ENCRYPTION_KEY=${encryption_key}",
"AP_JWT_SECRET=${jwt_secret}",
"AP_POSTGRES_PASSWORD=${postgres_password}"
]
mounts = [ ]
[[config.domains]]
serviceName = "activepieces"
port = 80
host = "${main_domain}"

View File

@ -0,0 +1,11 @@
[variables]
main_domain = "${domain}"
[config]
env = [ ]
mounts = [ ]
[[config.domains]]
serviceName = "actualbudget"
port = 5_006
host = "${main_domain}"

View File

@ -0,0 +1,11 @@
[variables]
main_domain = "${domain}"
[config]
env = [ ]
mounts = [ ]
[[config.domains]]
serviceName = "alist"
port = 5_244
host = "${main_domain}"

View File

@ -0,0 +1,15 @@
[variables]
main_domain = "${domain}"
service_hash = "${hash:32}"
[config]
env = [
"ANSWER_HOST=http://${main_domain}",
"SERVICE_HASH=${service_hash}"
]
mounts = [ ]
[[config.domains]]
serviceName = "answer"
port = 9_080
host = "${main_domain}"

View File

@ -0,0 +1,11 @@
[variables]
main_domain = "${domain}"
[config]
env = [ ]
mounts = [ ]
[[config.domains]]
serviceName = "appsmith"
port = 80
host = "${main_domain}"

View File

@ -0,0 +1,143 @@
[variables]
main_domain = "${domain}"
[config]
env = [
"_APP_ENV=production",
"_APP_LOCALE=en",
"_APP_OPTIONS_ABUSE=enabled",
"_APP_OPTIONS_FORCE_HTTPS=disabled",
"_APP_OPTIONS_FUNCTIONS_FORCE_HTTPS=disabled",
"_APP_OPTIONS_ROUTER_PROTECTION=disabled",
"_APP_OPENSSL_KEY_V1=your-secret-key",
"_APP_DOMAIN=${main_domain}",
"_APP_DOMAIN_FUNCTIONS=${main_domain}",
"_APP_DOMAIN_TARGET=${main_domain}",
"_APP_CONSOLE_WHITELIST_ROOT=enabled",
"_APP_CONSOLE_WHITELIST_EMAILS=",
"_APP_CONSOLE_WHITELIST_IPS=",
"_APP_CONSOLE_HOSTNAMES=",
"_APP_SYSTEM_EMAIL_NAME=Appwrite",
"_APP_SYSTEM_EMAIL_ADDRESS=noreply@appwrite.io",
"_APP_SYSTEM_TEAM_EMAIL=team@appwrite.io",
"_APP_SYSTEM_RESPONSE_FORMAT=",
"_APP_SYSTEM_SECURITY_EMAIL_ADDRESS=certs@appwrite.io",
"_APP_EMAIL_SECURITY=",
"_APP_EMAIL_CERTIFICATES=",
"_APP_USAGE_STATS=enabled",
"_APP_LOGGING_PROVIDER=",
"_APP_LOGGING_CONFIG=",
"_APP_USAGE_AGGREGATION_INTERVAL=30",
"_APP_USAGE_TIMESERIES_INTERVAL=30",
"_APP_USAGE_DATABASE_INTERVAL=900",
"_APP_WORKER_PER_CORE=6",
"_APP_CONSOLE_SESSION_ALERTS=disabled",
"_APP_REDIS_HOST=redis",
"_APP_REDIS_PORT=6379",
"_APP_REDIS_USER=",
"_APP_REDIS_PASS=",
"_APP_DB_HOST=mariadb",
"_APP_DB_PORT=3306",
"_APP_DB_SCHEMA=appwrite",
"_APP_DB_USER=user",
"_APP_DB_PASS=password",
"_APP_DB_ROOT_PASS=rootsecretpassword",
"_APP_INFLUXDB_HOST=influxdb",
"_APP_INFLUXDB_PORT=8086",
"_APP_STATSD_HOST=telegraf",
"_APP_STATSD_PORT=8125",
"_APP_SMTP_HOST=",
"_APP_SMTP_PORT=",
"_APP_SMTP_SECURE=",
"_APP_SMTP_USERNAME=",
"_APP_SMTP_PASSWORD=",
"_APP_SMS_PROVIDER=",
"_APP_SMS_FROM=",
"_APP_STORAGE_LIMIT=30000000",
"_APP_STORAGE_PREVIEW_LIMIT=20000000",
"_APP_STORAGE_ANTIVIRUS=disabled",
"_APP_STORAGE_ANTIVIRUS_HOST=clamav",
"_APP_STORAGE_ANTIVIRUS_PORT=3310",
"_APP_STORAGE_DEVICE=local",
"_APP_STORAGE_S3_ACCESS_KEY=",
"_APP_STORAGE_S3_SECRET=",
"_APP_STORAGE_S3_REGION=us-east-1",
"_APP_STORAGE_S3_BUCKET=",
"_APP_STORAGE_DO_SPACES_ACCESS_KEY=",
"_APP_STORAGE_DO_SPACES_SECRET=",
"_APP_STORAGE_DO_SPACES_REGION=us-east-1",
"_APP_STORAGE_DO_SPACES_BUCKET=",
"_APP_STORAGE_BACKBLAZE_ACCESS_KEY=",
"_APP_STORAGE_BACKBLAZE_SECRET=",
"_APP_STORAGE_BACKBLAZE_REGION=us-west-004",
"_APP_STORAGE_BACKBLAZE_BUCKET=",
"_APP_STORAGE_LINODE_ACCESS_KEY=",
"_APP_STORAGE_LINODE_SECRET=",
"_APP_STORAGE_LINODE_REGION=eu-central-1",
"_APP_STORAGE_LINODE_BUCKET=",
"_APP_STORAGE_WASABI_ACCESS_KEY=",
"_APP_STORAGE_WASABI_SECRET=",
"_APP_STORAGE_WASABI_REGION=eu-central-1",
"_APP_STORAGE_WASABI_BUCKET=",
"_APP_FUNCTIONS_SIZE_LIMIT=30000000",
"_APP_FUNCTIONS_BUILD_SIZE_LIMIT=2000000000",
"_APP_FUNCTIONS_TIMEOUT=900",
"_APP_FUNCTIONS_BUILD_TIMEOUT=900",
"_APP_FUNCTIONS_CONTAINERS=10",
"_APP_FUNCTIONS_CPUS=0",
"_APP_FUNCTIONS_MEMORY=0",
"_APP_FUNCTIONS_MEMORY_SWAP=0",
"_APP_FUNCTIONS_RUNTIMES=node-16.0,php-8.0,python-3.9,ruby-3.0",
"_APP_EXECUTOR_SECRET=your-secret-key",
"_APP_EXECUTOR_HOST=http://exc1/v1",
"_APP_EXECUTOR_RUNTIME_NETWORK=appwrite_runtimes",
"_APP_FUNCTIONS_ENVS=node-16.0,php-7.4,python-3.9,ruby-3.0",
"_APP_FUNCTIONS_INACTIVE_THRESHOLD=60",
"DOCKERHUB_PULL_USERNAME=",
"DOCKERHUB_PULL_PASSWORD=",
"DOCKERHUB_PULL_EMAIL=",
"OPEN_RUNTIMES_NETWORK=appwrite_runtimes",
"_APP_FUNCTIONS_RUNTIMES_NETWORK=runtimes",
"_APP_DOCKER_HUB_USERNAME=",
"_APP_DOCKER_HUB_PASSWORD=",
"_APP_FUNCTIONS_MAINTENANCE_INTERVAL=3600",
"_APP_VCS_GITHUB_APP_NAME=",
"_APP_VCS_GITHUB_PRIVATE_KEY=",
"_APP_VCS_GITHUB_APP_ID=",
"_APP_VCS_GITHUB_CLIENT_ID=",
"_APP_VCS_GITHUB_CLIENT_SECRET=",
"_APP_VCS_GITHUB_WEBHOOK_SECRET=",
"_APP_MAINTENANCE_INTERVAL=86400",
"_APP_MAINTENANCE_DELAY=0",
"_APP_MAINTENANCE_RETENTION_CACHE=2592000",
"_APP_MAINTENANCE_RETENTION_EXECUTION=1209600",
"_APP_MAINTENANCE_RETENTION_AUDIT=1209600",
"_APP_MAINTENANCE_RETENTION_ABUSE=86400",
"_APP_MAINTENANCE_RETENTION_USAGE_HOURLY=8640000",
"_APP_MAINTENANCE_RETENTION_SCHEDULES=86400",
"_APP_GRAPHQL_MAX_BATCH_SIZE=10",
"_APP_GRAPHQL_MAX_COMPLEXITY=250",
"_APP_GRAPHQL_MAX_DEPTH=3",
"_APP_MIGRATIONS_FIREBASE_CLIENT_ID=",
"_APP_MIGRATIONS_FIREBASE_CLIENT_SECRET=",
"_APP_ASSISTANT_OPENAI_API_KEY="
]
mounts = [ ]
[[config.domains]]
serviceName = "appwrite"
port = 80
host = "${main_domain}"
path = "/"
[[config.domains]]
serviceName = "appwrite-console"
port = 80
host = "${main_domain}"
path = "/console"
[[config.domains]]
serviceName = "appwrite-realtime"
port = 80
host = "${main_domain}"
path = "/v1/realtime"

View File

@ -0,0 +1,12 @@
[variables]
main_domain = "${domain}"
auth_secret = "${base64:32}"
[config]
env = [ "APTABASE_HOST=${main_domain}", "AUTH_SECRET=${auth_secret}" ]
mounts = [ ]
[[config.domains]]
serviceName = "aptabase"
port = 8_080
host = "${main_domain}"

View File

@ -0,0 +1,11 @@
[variables]
main_domain = "${domain}"
[config]
env = [ "BASEROW_HOST=${main_domain}" ]
mounts = [ ]
[[config.domains]]
serviceName = "baserow"
port = 80
host = "${main_domain}"

View File

@ -0,0 +1,18 @@
[variables]
main_domain = "${domain}"
[config]
env = [
"PUID=1000",
"PGID=1000",
"TZ=Etc/UTC",
"SUBFOLDER=/",
"NVIDIA_VISIBLE_DEVICES=all",
"NVIDIA_DRIVER_CAPABILITIES=all"
]
mounts = [ ]
[[config.domains]]
serviceName = "blender"
port = 3_000
host = "${main_domain}"

View File

@ -0,0 +1,15 @@
[variables]
main_domain = "${domain}"
browserless_token = "${password:16}"
[config]
env = [
"BROWERLESS_HOST=${main_domain}",
"BROWSERLESS_TOKEN=${browserless_token}"
]
mounts = [ ]
[[config.domains]]
serviceName = "browserless"
port = 3_000
host = "${main_domain}"

View File

@ -0,0 +1,29 @@
[variables]
main_domain = "${domain}"
api_key = "${password:32}"
encryption_key = "${password:32}"
jwt_secret = "${password:32}"
couchdb_password = "${password:32}"
redis_password = "${password:32}"
minio_access_key = "${password:32}"
minio_secret_key = "${password:32}"
watchtower_password = "${password:32}"
[config]
env = [
"BB_HOST=${main_domain}",
"BB_INTERNAL_API_KEY=${api_key}",
"BB_API_ENCRYPTION_KEY=${encryption_key}",
"BB_JWT_SECRET=${jwt_secret}",
"BB_COUCHDB_PASSWORD=${couchdb_password}",
"BB_REDIS_PASSWORD=${redis_password}",
"BB_WATCHTOWER_PASSWORD=${watchtower_password}",
"BB_MINIO_ACCESS_KEY=${minio_access_key}",
"BB_MINIO_SECRET_KEY=${minio_secret_key}"
]
mounts = [ ]
[[config.domains]]
serviceName = "proxy"
port = 10_000
host = "${main_domain}"

View File

@ -0,0 +1,17 @@
[variables]
main_domain = "${domain}"
calcom_encryption_key = "${base64:32}"
nextauth_secret = "${base64:32}"
[config]
env = [
"CALCOM_HOST=${main_domain}",
"NEXTAUTH_SECRET=${nextauth_secret}",
"CALENDSO_ENCRYPTION_KEY=${calcom_encryption_key}"
]
mounts = [ ]
[[config.domains]]
serviceName = "calcom"
port = 3_000
host = "${main_domain}"

View File

@ -0,0 +1,30 @@
[variables]
main_domain = "${domain}"
secret_key_base = "${base64:64}"
postgres_password = "${password}"
[config]
env = [
"FRONTEND_URL=http://${main_domain}",
"SECRET_KEY_BASE=${secret_key_base}",
"RAILS_ENV=production",
"NODE_ENV=production",
"INSTALLATION_ENV=docker",
"RAILS_LOG_TO_STDOUT=true",
"LOG_LEVEL=info",
"DEFAULT_LOCALE=en",
"POSTGRES_HOST=chatwoot-postgres",
"POSTGRES_PORT=5432",
"POSTGRES_DATABASE=chatwoot",
"POSTGRES_USERNAME=postgres",
"POSTGRES_PASSWORD=${postgres_password}",
"REDIS_URL=redis://chatwoot-redis:6379",
"ENABLE_ACCOUNT_SIGNUP=false",
"ACTIVE_STORAGE_SERVICE=local"
]
mounts = [ ]
[[config.domains]]
serviceName = "chatwoot-rails"
port = 3_000
host = "${main_domain}"

View File

@ -0,0 +1,11 @@
[variables]
main_domain = "${domain}"
[config]
env = [ "DOMAIN=${main_domain}" ]
mounts = [ ]
[[config.domains]]
serviceName = "client"
port = 80
host = "${main_domain}"

View File

@ -0,0 +1,6 @@
variables = { }
[config]
domains = [ ]
env = [ "CLOUDFLARE_TUNNEL_TOKEN=\"<INSERT TOKEN>\"" ]
mounts = [ ]

View File

@ -0,0 +1,18 @@
[variables]
main_domain = "${domain}"
postgres_password = "${password}"
[config]
env = [
"CODER_ACCESS_URL=",
"CODER_HTTP_ADDRESS=0.0.0.0:7080",
"POSTGRES_DB=coder",
"POSTGRES_USER=coder",
"POSTGRES_PASSWORD=${postgres_password}"
]
mounts = [ ]
[[config.domains]]
serviceName = "coder"
port = 7_080
host = "${main_domain}"

View File

@ -0,0 +1,11 @@
[variables]
main_domain = "${domain}"
[config]
env = [ "MATRIX_SUBDOMAIN=${main_domain}" ]
mounts = [ ]
[[config.domains]]
serviceName = "homeserver"
port = 6_167
host = "${main_domain}"

View File

@ -0,0 +1,15 @@
[variables]
main_domain = "${domain}"
registration_token = "${password:20}"
[config]
env = [
"CONDUWUIT_SERVER_NAME=${main_domain}",
"CONDUWUIT_REGISTRATION_TOKEN=${registration_token}"
]
mounts = [ ]
[[config.domains]]
serviceName = "homeserver"
port = 6_167
host = "${main_domain}"

View File

@ -0,0 +1,27 @@
[variables]
dashboard_domain = "${domain}"
backend_domain = "${domain}"
actions_domain = "${domain}"
[config]
env = [
"NEXT_PUBLIC_DEPLOYMENT_URL=http://${backend_domain}",
"CONVEX_CLOUD_ORIGIN=http://${backend_domain}",
"CONVEX_SITE_ORIGIN=http://${actions_domain}"
]
mounts = [ ]
[[config.domains]]
serviceName = "dashboard"
port = 6_791
host = "${dashboard_domain}"
[[config.domains]]
serviceName = "backend"
port = 3_210
host = "${backend_domain}"
[[config.domains]]
serviceName = "backend"
port = 3_211
host = "${actions_domain}"

View File

@ -0,0 +1,13 @@
[variables]
main_domain = "${domain}"
username = "${password:16}"
password = "${password:32}"
[config]
env = [ "COUCHDB_USER=${username}", "COUCHDB_PASSWORD=${password}" ]
mounts = [ ]
[[config.domains]]
serviceName = "couchdb"
port = 5_984
host = "${main_domain}"

View File

@ -0,0 +1,11 @@
[variables]
main_domain = "${domain}"
[config]
env = [ "HC=1" ]
mounts = [ ]
[[config.domains]]
serviceName = "datalens"
port = 8_080
host = "${main_domain}"

View File

@ -0,0 +1,16 @@
[variables]
main_domain = "${domain}"
directus_secret = "${base64:64}"
database_password = "${password}"
[config]
env = [
"DATABASE_PASSWORD=${database_password}",
"DIRECTUS_SECRET=${directus_secret}"
]
mounts = [ ]
[[config.domains]]
serviceName = "directus"
port = 8_055
host = "${main_domain}"

View File

@ -0,0 +1,27 @@
[variables]
main_domain = "${domain}"
mysql_password = "${password}"
mysql_root_password = "${password}"
mysql_user = "tickets"
mysql_database = "tickets"
encryption_key = "${password:48}"
[config]
env = [
"TICKETS_HOST=${main_domain}",
"MYSQL_DATABASE=${mysql_database}",
"MYSQL_PASSWORD=${mysql_password}",
"MYSQL_ROOT_PASSWORD=${mysql_root_password}",
"MYSQL_USER=${mysql_user}",
"ENCRYPTION_KEY=${encryption_key}",
"# Follow the guide at: https://discordtickets.app/self-hosting/installation/docker/#creating-the-discord-application",
"DISCORD_SECRET=",
"DISCORD_TOKEN=",
"SUPER_USERS=YOUR_DISCORD_USER_ID"
]
mounts = [ ]
[[config.domains]]
serviceName = "tickets-app"
port = 8_169
host = "${main_domain}"

View File

@ -0,0 +1,17 @@
[variables]
main_domain = "${domain}"
postgres_password = "${password}"
redis_password = "${password}"
[config]
env = [
"DISCOURSE_HOST=${main_domain}",
"POSTGRES_PASSWORD=${postgres_password}",
"REDIS_PASSWORD=${redis_password}"
]
mounts = [ ]
[[config.domains]]
serviceName = "discourse-app"
port = 3_000
host = "${main_domain}"

View File

@ -0,0 +1,19 @@
[variables]
main_domain = "${domain}"
postgres_password = "${password}"
app_secret = "${password}"
[config]
env = [
"POSTGRES_DB=docmost",
"POSTGRES_USER=docmost",
"POSTGRES_PASSWORD=${postgres_password}",
"APP_URL=http://${main_domain}:3000",
"APP_SECRET=${app_secret}"
]
mounts = [ ]
[[config.domains]]
serviceName = "docmost"
port = 3_000
host = "${main_domain}"

View File

@ -0,0 +1,20 @@
[variables]
main_domain = "${domain}"
nextauth_secret = "${base64:32}"
encryption_key = "${password:32}"
secondary_encryption_key = "${password:64}"
[config]
env = [
"DOCUMENSO_HOST=${main_domain}",
"DOCUMENSO_PORT=3000",
"NEXTAUTH_SECRET=${nextauth_secret}",
"NEXT_PRIVATE_ENCRYPTION_KEY=${encryption_key}",
"NEXT_PRIVATE_ENCRYPTION_SECONDARY_KEY=${secondary_encryption_key}"
]
mounts = [ ]
[[config.domains]]
serviceName = "documenso"
port = 3_000
host = "${main_domain}"

View File

@ -0,0 +1,21 @@
[variables]
main_domain = "${domain}"
secret_key_base = "${base64:64}"
[config]
env = [
"DOUBLEZERO_HOST=${main_domain}",
"DOUBLEZERO_PORT=4000",
"SECRET_KEY_BASE=${secret_key_base}",
"AWS_ACCESS_KEY_ID=your-aws-access-key",
"AWS_SECRET_ACCESS_KEY=your-aws-secret-key",
"AWS_REGION=your-aws-region",
"SQS_URL=your-aws-sqs-url",
"SYSTEM_EMAIL="
]
mounts = [ ]
[[config.domains]]
serviceName = "doublezero"
port = 4_000
host = "${main_domain}"

View File

@ -0,0 +1,15 @@
[variables]
main_domain = "${domain}"
[config]
env = [
"DRAWIO_HOST=${main_domain}",
"DRAWIO_BASE_URL=https://${main_domain}",
"DRAWIO_SERVER_URL=https://${main_domain}/"
]
mounts = [ ]
[[config.domains]]
serviceName = "drawio"
port = 8_080
host = "${main_domain}"

View File

@ -0,0 +1,17 @@
[variables]
main_domain = "${domain}"
api_domain = "${domain}"
[config]
env = [ ]
mounts = [ ]
[[config.domains]]
serviceName = "kibana"
port = 5_601
host = "${main_domain}"
[[config.domains]]
serviceName = "elasticsearch"
port = 9_200
host = "${api_domain}"

View File

@ -0,0 +1,27 @@
[variables]
main_domain = "${domain}"
db_root_password = "${password:32}"
admin_password = "${password:32}"
[config]
env = [
"SITE_NAME=${main_domain}",
"ADMIN_PASSWORD=${admin_password}",
"DB_ROOT_PASSWORD=${db_root_password}",
"MIGRATE=1",
"ENABLE_DB=1",
"DB_HOST=db",
"CREATE_SITE=1",
"CONFIGURE=1",
"REGENERATE_APPS_TXT=1",
"INSTALL_APP_ARGS=--install-app erpnext",
"IMAGE_NAME=docker.io/frappe/erpnext",
"VERSION=version-15",
"FRAPPE_SITE_NAME_HEADER="
]
mounts = [ ]
[[config.domains]]
serviceName = "frontend"
port = 8_080
host = "${main_domain}"

View File

@ -0,0 +1,40 @@
[variables]
main_domain = "${domain}"
api_key = "${base64:64}"
postgres_password = "${password}"
[config]
env = [
"SERVER_URL=https://${main_domain}",
"AUTHENTICATION_TYPE=apikey",
"AUTHENTICATION_API_KEY=${api_key}",
"AUTHENTICATION_EXPOSE_IN_FETCH_INSTANCES=true",
"LANGUAGE=en",
"CONFIG_SESSION_PHONE_CLIENT=Evolution API",
"CONFIG_SESSION_PHONE_NAME=Chrome",
"TELEMETRY=false",
"TELEMETRY_URL=",
"POSTGRES_DATABASE=evolution",
"POSTGRES_USERNAME=postgresql",
"POSTGRES_PASSWORD=${postgres_password}",
"DATABASE_ENABLED=true",
"DATABASE_PROVIDER=postgresql",
"DATABASE_CONNECTION_URI=postgres://postgresql:${postgres_password}@evolution-postgres:5432/evolution",
"DATABASE_SAVE_DATA_INSTANCE=true",
"DATABASE_SAVE_DATA_NEW_MESSAGE=true",
"DATABASE_SAVE_MESSAGE_UPDATE=true",
"DATABASE_SAVE_DATA_CONTACTS=true",
"DATABASE_SAVE_DATA_CHATS=true",
"DATABASE_SAVE_DATA_LABELS=true",
"DATABASE_SAVE_DATA_HISTORIC=true",
"CACHE_REDIS_ENABLED=true",
"CACHE_REDIS_URI=redis://evolution-redis:6379",
"CACHE_REDIS_PREFIX_KEY=evolution",
"CACHE_REDIS_SAVE_INSTANCES=true"
]
mounts = [ ]
[[config.domains]]
serviceName = "evolution-api"
port = 8_080
host = "${main_domain}"

View File

@ -0,0 +1,11 @@
[variables]
main_domain = "${domain}"
[config]
env = [ ]
mounts = [ ]
[[config.domains]]
serviceName = "excalidraw"
port = 80
host = "${main_domain}"

View File

@ -0,0 +1,11 @@
[variables]
main_domain = "${domain}"
[config]
env = [ "FB_BASEURL=/filebrowser" ]
mounts = [ ]
[[config.domains]]
serviceName = "filebrowser"
port = 8_080
host = "${main_domain}"

View File

@ -0,0 +1,20 @@
[variables]
main_domain = "${domain}"
secret_base = "${base64:64}"
encryption_key = "${base64:48}"
cron_secret = "${base64:32}"
[config]
env = [
"WEBAPP_URL=http://${main_domain}",
"NEXTAUTH_URL=http://${main_domain}",
"NEXTAUTH_SECRET=${secret_base}",
"ENCRYPTION_KEY=${encryption_key}",
"CRON_SECRET=${cron_secret}"
]
mounts = [ ]
[[config.domains]]
serviceName = "formbricks"
port = 3_000
host = "${main_domain}"

View File

@ -0,0 +1,27 @@
[variables]
main_domain = "${domain}"
db_root_password = "${password:32}"
admin_password = "${password:32}"
[config]
env = [
"SITE_NAME=${main_domain}",
"ADMIN_PASSWORD=${admin_password}",
"DB_ROOT_PASSWORD=${db_root_password}",
"MIGRATE=1",
"ENABLE_DB=1",
"DB_HOST=db",
"CREATE_SITE=1",
"CONFIGURE=1",
"REGENERATE_APPS_TXT=1",
"INSTALL_APP_ARGS=--install-app hrms",
"IMAGE_NAME=ghcr.io/frappe/hrms",
"VERSION=version-15",
"FRAPPE_SITE_NAME_HEADER="
]
mounts = [ ]
[[config.domains]]
serviceName = "frontend"
port = 8_080
host = "${main_domain}"

View File

@ -0,0 +1,11 @@
[variables]
main_domain = "${domain}"
[config]
env = [ "GHOST_HOST=${main_domain}" ]
mounts = [ ]
[[config.domains]]
serviceName = "ghost"
port = 2_368
host = "${main_domain}"

View File

@ -0,0 +1,11 @@
[variables]
main_domain = "${domain}"
[config]
env = [ "USER_UID=1000", "USER_GID=1000" ]
mounts = [ ]
[[config.domains]]
serviceName = "gitea"
port = 3_000
host = "${main_domain}"

View File

@ -0,0 +1,94 @@
[variables]
main_domain = "${domain}"
[config]
env = [ ]
[[config.domains]]
serviceName = "glance"
port = 8_080
host = "${main_domain}"
[[config.mounts]]
filePath = "/app/config/glance.yml"
content = """
branding:
hide-footer: true
logo-text: P
pages:
- name: Home
columns:
- size: small
widgets:
- type: calendar
- type: releases
show-source-icon: true
repositories:
- Dokploy/dokploy
- n8n-io/n8n
- Budibase/budibase
- home-assistant/core
- tidbyt/pixlet
- type: twitch-channels
channels:
- nmplol
- extraemily
- qtcinderella
- ludwig
- timthetatman
- mizkif
- size: full
widgets:
- type: hacker-news
- type: videos
style: grid-cards
channels:
- UC3GzdWYwUYI1ACxuP9Nm-eg
- UCGbg3DjQdcqWwqOLHpYHXIg
- UC24RSoLcjiNZbQcT54j5l7Q
limit: 3
- type: rss
limit: 10
collapse-after: 3
cache: 3h
feeds:
- url: https://daringfireball.net/feeds/main
title: Daring Fireball
- size: small
widgets:
- type: weather
location: Gansevoort, New York, United States
show-area-name: false
units: imperial
hour-format: 12h
- type: markets
markets:
- symbol: SPY
name: S&P 500
- symbol: VOO
name: Vanguard
- symbol: BTC-USD
name: Bitcoin
- symbol: ETH-USD
name: Etherium
- symbol: NVDA
name: NVIDIA
- symbol: AAPL
name: Apple
- symbol: MSFT
name: Microsoft
- symbol: GOOGL
name: Google
- symbol: AMD
name: AMD
- symbol: TSLA
name: Tesla
"""

View File

@ -0,0 +1,16 @@
[variables]
main_domain = "${domain}"
secret_key = "${base64:32}"
[config]
env = [
"GLITCHTIP_HOST=${main_domain}",
"GLITCHTIP_PORT=8000",
"SECRET_KEY=${secret_key}"
]
mounts = [ ]
[[config.domains]]
serviceName = "web"
port = 8_000
host = "${main_domain}"

View File

@ -0,0 +1,11 @@
[variables]
main_domain = "${domain}"
[config]
env = [ ]
mounts = [ ]
[[config.domains]]
serviceName = "glpi-web"
port = 80
host = "${main_domain}"

View File

@ -0,0 +1,16 @@
[variables]
main_domain = "${domain}"
username = "gotenberg"
password = "changethis"
[config]
env = [
"GOTENBERG_API_BASIC_AUTH_USERNAME=${username}",
"GOTENBERG_API_BASIC_AUTH_PASSWORD=${password}"
]
mounts = [ ]
[[config.domains]]
serviceName = "gotenberg"
port = 3_000
host = "${main_domain}"

View File

@ -0,0 +1,11 @@
[variables]
main_domain = "${domain}"
[config]
env = [ ]
mounts = [ ]
[[config.domains]]
serviceName = "grafana"
port = 3_000
host = "${main_domain}"

View File

@ -0,0 +1,17 @@
[variables]
main_domain = "${domain}"
session_key = "${base64:64}"
form_encryption_key = "${base64:64}"
[config]
env = [
"APP_HOMEPAGE_URL=http://${main_domain}",
"SESSION_KEY=${session_key}",
"FORM_ENCRYPTION_KEY=${form_encryption_key}"
]
mounts = [ ]
[[config.domains]]
serviceName = "heyform"
port = 8_000
host = "${main_domain}"

View File

@ -0,0 +1,27 @@
[variables]
main_domain = "${domain}"
postgres_password = "${password}"
jwt_secret = "${password}"
app_key = "${password}"
[config]
env = [
"DOMAIN=${main_domain}",
"POSTGRES_DB=hievents",
"POSTGRES_USER=hievents",
"POSTGRES_PASSWORD=${postgres_password}",
"VITE_STRIPE_PUBLISHABLE_KEY=",
"APP_KEY=${app_key}",
"JWT_SECRET=${jwt_secret}",
"MAIL_MAILER=",
"MAIL_HOST=",
"MAIL_PORT=",
"MAIL_FROM_ADDRESS=",
"MAIL_FROM_NAME="
]
mounts = [ ]
[[config.domains]]
serviceName = "all-in-one"
port = 80
host = "${main_domain}"

View File

@ -0,0 +1,18 @@
[variables]
main_domain = "${domain}"
postgres_password = "${password}"
next_secret = "${base64:32}"
meili_master_key = "${base64:32}"
[config]
env = [
"NEXTAUTH_SECRET=${next_secret}",
"MEILI_MASTER_KEY=${meili_master_key}",
"NEXTAUTH_URL=http://${main_domain}"
]
mounts = [ ]
[[config.domains]]
serviceName = "web"
port = 3_000
host = "${main_domain}"

View File

@ -0,0 +1,12 @@
[variables]
main_domain = "${domain}"
secret_key = "${password:64}"
[config]
env = [ "SECRET_ENCRYPTION_KEY=${secret_key}" ]
mounts = [ ]
[[config.domains]]
serviceName = "homarr"
port = 7_575
host = "${main_domain}"

View File

@ -0,0 +1,106 @@
[variables]
main_domain = "${domain}"
huly_secret = "${base64:64}"
[config]
env = [
"HULY_VERSION=v0.6.468",
"DOCKER_NAME=huly",
"HOST_ADDRESS=${main_domain}",
"SECURE=",
"HTTP_PORT=80",
"HTTP_BIND=",
"TITLE=Huly",
"DEFAULT_LANGUAGE=en",
"LAST_NAME_FIRST=true",
"SECRET=${huly_secret}"
]
[[config.domains]]
serviceName = "nginx"
port = 80
host = "${main_domain}"
[[config.mounts]]
filePath = "/volumes/nginx/.huly.nginx"
content = """
server {
listen 80;
server_name _;
location / {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_pass http://front:8080;
}
location /_accounts {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
rewrite ^/_accounts(/.*)$ $1 break;
proxy_pass http://account:3000/;
}
location /_collaborator {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
rewrite ^/_collaborator(/.*)$ $1 break;
proxy_pass http://collaborator:3078/;
}
location /_transactor {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
rewrite ^/_transactor(/.*)$ $1 break;
proxy_pass http://transactor:3333/;
}
location ~ ^/eyJ {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_pass http://transactor:3333;
}
location /_rekoni {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
rewrite ^/_rekoni(/.*)$ $1 break;
proxy_pass http://rekoni:4004/;
}
location /_stats {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
rewrite ^/_stats(/.*)$ $1 break;
proxy_pass http://stats:4900/;
}
}
"""

View File

@ -0,0 +1,26 @@
[variables]
main_domain = "${domain}"
db_password = "${password}"
db_user = "immich"
[config]
env = [
"IMMICH_HOST=${main_domain}",
"SERVER_URL=https://${main_domain}",
"FRONT_BASE_URL=https://${main_domain}",
"DB_HOSTNAME=immich-database",
"DB_PORT=5432",
"DB_USERNAME=${db_user}",
"DB_PASSWORD=${db_password}",
"DB_DATABASE_NAME=immich",
"REDIS_HOSTNAME=immich-redis",
"REDIS_PORT=6379",
"REDIS_DBINDEX=0",
"TZ=UTC"
]
mounts = [ ]
[[config.domains]]
serviceName = "immich-server"
port = 2_283
host = "${main_domain}"

View File

@ -0,0 +1,58 @@
[variables]
main_domain = "${domain}"
postgres_password = "${password}"
postgres_user = "infisical"
postgres_db = "infisical"
[config]
env = [
"ENCRYPTION_KEY=6c1fe4e407b8911c104518103505b218",
"AUTH_SECRET=5lrMXKKWCVocS/uerPsl7V+TX/aaUaI7iDkgl3tSmLE=",
"POSTGRES_PASSWORD=${postgres_password}",
"POSTGRES_USER=${postgres_user}",
"POSTGRES_DB=${postgres_db}",
"SITE_URL=http://${main_domain}:8080",
"SMTP_HOST=",
"SMTP_PORT=",
"SMTP_NAME=",
"SMTP_USERNAME=",
"SMTP_PASSWORD=",
"CLIENT_ID_HEROKU=",
"CLIENT_ID_VERCEL=",
"CLIENT_ID_NETLIFY=",
"CLIENT_ID_GITHUB=",
"CLIENT_ID_GITHUB_APP=",
"CLIENT_SLUG_GITHUB_APP=",
"CLIENT_ID_GITLAB=",
"CLIENT_ID_BITBUCKET=",
"CLIENT_SECRET_HEROKU=",
"CLIENT_SECRET_VERCEL=",
"CLIENT_SECRET_NETLIFY=",
"CLIENT_SECRET_GITHUB=",
"CLIENT_SECRET_GITHUB_APP=",
"CLIENT_SECRET_GITLAB=",
"CLIENT_SECRET_BITBUCKET=",
"CLIENT_SLUG_VERCEL=",
"CLIENT_PRIVATE_KEY_GITHUB_APP=",
"CLIENT_APP_ID_GITHUB_APP=",
"SENTRY_DSN=",
"POSTHOG_HOST=",
"POSTHOG_PROJECT_API_KEY=",
"CLIENT_ID_GOOGLE_LOGIN=",
"CLIENT_SECRET_GOOGLE_LOGIN=",
"CLIENT_ID_GITHUB_LOGIN=",
"CLIENT_SECRET_GITHUB_LOGIN=",
"CLIENT_ID_GITLAB_LOGIN=",
"CLIENT_SECRET_GITLAB_LOGIN=",
"CAPTCHA_SECRET=",
"NEXT_PUBLIC_CAPTCHA_SITE_KEY=",
"PLAIN_API_KEY=",
"PLAIN_WISH_LABEL_IDS=",
"SSL_CLIENT_CERTIFICATE_HEADER_KEY="
]
mounts = [ ]
[[config.domains]]
serviceName = "backend"
port = 8_080
host = "${main_domain}"

View File

@ -0,0 +1,11 @@
[variables]
main_domain = "${domain}"
[config]
env = { }
mounts = [ ]
[[config.domains]]
serviceName = "influxdb"
port = 8_086
host = "${main_domain}"

View File

@ -0,0 +1,19 @@
[variables]
main_domain = "${domain}"
db_password = "${password}"
db_username = "invoiceshelf"
db_database = "invoiceshelf"
[config]
mounts = [ ]
[[config.domains]]
serviceName = "invoiceshelf-app"
port = 80
host = "${main_domain}"
[config.env]
INVOICESHELF_HOST = "${main_domain}"
DB_PASSWORD = "${db_password}"
DB_USERNAME = "${db_username}"
DB_DATABASE = "${db_database}"

View File

@ -0,0 +1,11 @@
[variables]
main_domain = "${domain}"
[config]
env = { }
mounts = [ ]
[[config.domains]]
serviceName = "it-tools"
port = 80
host = "${main_domain}"

View File

@ -0,0 +1,13 @@
[variables]
main_domain = "${domain}"
[config]
mounts = [ ]
[[config.domains]]
serviceName = "jellyfin"
port = 8_096
host = "${main_domain}"
[config.env]
JELLYFIN_HOST = "${main_domain}"

View File

@ -0,0 +1,22 @@
[variables]
main_domain = "${domain}"
admin_password = "${password:32}"
mysql_password = "${password:32}"
mysql_root_password = "${password:32}"
app_secret = "${password:32}"
[config]
mounts = [ ]
[[config.domains]]
serviceName = "app"
port = 8_001
host = "${main_domain}"
[config.env]
KI_HOST = "${main_domain}"
KI_ADMINMAIL = "admin@kimai.local"
KI_ADMINPASS = "${admin_password}"
KI_MYSQL_ROOT_PASSWORD = "${mysql_root_password}"
KI_MYSQL_PASSWORD = "${mysql_password}"
KI_APP_SECRET = "${app_secret}"

View File

@ -0,0 +1,16 @@
[variables]
main_domain = "${domain}"
db_password = "${password}"
db_username = "langflow"
[config]
mounts = [ ]
[[config.domains]]
serviceName = "langflow"
port = 7_860
host = "${main_domain}"
[config.env]
DB_PASSWORD = "${db_password}"
DB_USERNAME = "${db_username}"

View File

@ -0,0 +1,17 @@
[variables]
main_domain = "${domain}"
postgres_password = "${password}"
next_secret = "${base64:32}"
[config]
mounts = [ ]
[[config.domains]]
serviceName = "linkwarden"
port = 3_000
host = "${main_domain}"
[config.env]
POSTGRES_PASSWORD = "${postgres_password}"
NEXTAUTH_SECRET = "${next_secret}"
NEXTAUTH_URL = "http://${main_domain}/api/v1/auth"

View File

@ -0,0 +1,29 @@
[variables]
main_domain = "${domain}"
[config]
[[config.domains]]
serviceName = "app"
port = 9_000
host = "${main_domain}"
[[config.mounts]]
filePath = "config.toml"
content = """
[app]
address = "0.0.0.0:9000"
[db]
host = "db"
port = 5432
user = "listmonk"
password = "listmonk"
database = "listmonk"
ssl_mode = "disable"
max_open = 25
max_idle = 25
max_lifetime = "300s"
params = ""
"""

View File

@ -0,0 +1,11 @@
[variables]
main_domain = "${domain}"
[config]
env = { }
mounts = [ ]
[[config.domains]]
serviceName = "lobe-chat"
port = 3_210
host = "${main_domain}"

View File

@ -0,0 +1,22 @@
[variables]
main_domain = "${domain}"
admin_domain = "${domain}"
postgres_password = "${password}"
[config]
mounts = [ ]
[[config.domains]]
serviceName = "app"
port = 3_001
host = "${main_domain}"
[[config.domains]]
serviceName = "app"
port = 3_002
host = "${admin_domain}"
[config.env]
LOGTO_ENDPOINT = "http://${admin_domain}"
LOGTO_ADMIN_ENDPOINT = "http://${admin_domain}"
LOGTO_POSTGRES_PASSWORD = "${postgres_password}"

View File

@ -0,0 +1,16 @@
[variables]
main_domain = "${domain}"
[config]
mounts = [ ]
[[config.domains]]
serviceName = "macos"
port = 8_006
host = "${main_domain}"
[config.env]
VERSION = "15"
DISK_SIZE = "64G"
RAM_SIZE = "4G"
CPU_CORES = "2"

View File

@ -0,0 +1,11 @@
[variables]
main_domain = "${domain}"
default_password = "${password}"
[config]
mounts = [ ]
[[config.domains]]
serviceName = "mailpit"
port = 8_025
host = "${main_domain}"

View File

@ -0,0 +1,21 @@
[variables]
main_domain = "${domain}"
secret_key_base = "${base64:64}"
synth_api_key = "${base64:32}"
[[config.domains]]
serviceName = "app"
port = 3_000
host = "${main_domain}"
[config.env]
SECRET_KEY_BASE = "${secret_key_base}"
SELF_HOSTED = "true"
SYNTH_API_KEY = "${synth_api_key}"
RAILS_FORCE_SSL = "false"
RAILS_ASSUME_SSL = "false"
GOOD_JOB_EXECUTION_MODE = "async"
[[config.mounts]]
filePath = "./uploads"
content = "This is where user uploads will be stored"

View File

@ -0,0 +1,15 @@
[variables]
main_domain = "${domain}"
master_key = "${base64:32}"
[config]
mounts = [ ]
[[config.domains]]
serviceName = "meilisearch"
port = 7_700
host = "${main_domain}"
[config.env]
MEILI_ENV = "development"
MEILI_MASTER_KEY = "${master_key}"

View File

@ -0,0 +1,11 @@
[variables]
main_domain = "${domain}"
[config]
env = { }
mounts = [ ]
[[config.domains]]
serviceName = "metabase"
port = 3_000
host = "${main_domain}"

View File

@ -0,0 +1,17 @@
[variables]
main_domain = "${domain}"
api_domain = "${domain}"
[config]
env = { }
mounts = [ ]
[[config.domains]]
serviceName = "minio"
port = 9_001
host = "${main_domain}"
[[config.domains]]
serviceName = "minio"
port = 9_000
host = "${api_domain}"

View File

@ -0,0 +1,15 @@
[variables]
main_domain = "${domain}"
[config]
mounts = [ ]
[[config.domains]]
serviceName = "n8n"
port = 5_678
host = "${main_domain}"
[config.env]
N8N_HOST = "${main_domain}"
N8N_PORT = "5678"
GENERIC_TIMEZONE = "Europe/Berlin"

View File

@ -0,0 +1,17 @@
[variables]
main_domain = "${domain}"
db_password = "${password}"
db_root_password = "${password}"
[config]
mounts = [ ]
[[config.domains]]
serviceName = "nextcloud"
port = 80
host = "${main_domain}"
[config.env]
NEXTCLOUD_DOMAIN = "${main_domain}"
MYSQL_SECRET_PASSWORD = "${db_password}"
MYSQL_SECRET_PASSWORD_ROOT = "${db_root_password}"

View File

@ -0,0 +1,15 @@
[variables]
main_domain = "${domain}"
jwt_secret = "${base64:64}"
[config]
mounts = [ ]
[[config.domains]]
serviceName = "nocodb"
port = 8_000
host = "${main_domain}"
[config.env]
NOCODB_PORT = "8000"
NC_AUTH_JWT_SECRET = "${jwt_secret}"

View File

@ -0,0 +1,11 @@
[variables]
main_domain = "${domain}"
[config]
env = { }
mounts = [ ]
[[config.domains]]
serviceName = "web"
port = 8_069
host = "${main_domain}"

View File

@ -0,0 +1,11 @@
[variables]
main_domain = "${domain}"
[config]
env = { }
mounts = [ ]
[[config.domains]]
serviceName = "onedev"
port = 6_610
host = "${main_domain}"

View File

@ -0,0 +1,13 @@
[variables]
main_domain = "${domain}"
[config]
mounts = [ ]
[[config.domains]]
serviceName = "ontime"
port = 4_001
host = "${main_domain}"
[config.env]
TZ = "UTC"

View File

@ -0,0 +1,14 @@
[variables]
main_domain = "${domain}"
[config]
mounts = [ ]
[[config.domains]]
serviceName = "open-webui"
port = 8_080
host = "${main_domain}"
[config.env]
OLLAMA_DOCKER_TAG = "0.1.47"
WEBUI_DOCKER_TAG = "0.3.7"

View File

@ -0,0 +1,11 @@
[variables]
main_domain = "${domain}"
[config]
env = [ ]
mounts = [ ]
[[config.domains]]
serviceName = "otterwiki"
port = 80
host = "${main_domain}"

View File

@ -0,0 +1,66 @@
[variables]
main_domain = "${domain}"
dex_domain = "${domain}"
secret_key = "${base64:32}"
utils_secret = "${base64:32}"
client_secret = "${base64:32}"
postgres_password = "${password}"
[[config.domains]]
serviceName = "outline"
port = 3_000
host = "${main_domain}"
[[config.domains]]
serviceName = "dex"
port = 5_556
host = "${dex_domain}"
[config.env]
URL = "http://${main_domain}"
DEX_URL = "http://${dex_domain}"
DOMAIN_NAME = "${main_domain}"
POSTGRES_PASSWORD = "${postgres_password}"
SECRET_KEY = "${secret_key}"
UTILS_SECRET = "${utils_secret}"
CLIENT_SECRET = "${client_secret}"
[[config.mounts]]
filePath = "/etc/dex/config.yaml"
content = """
issuer: http://${dex_domain}
web:
http: 0.0.0.0:5556
storage:
type: memory
enablePasswordDB: true
frontend:
issuer: Outline
logger:
level: debug
staticPasswords:
- email: "admin@example.com"
# bcrypt hash of the string "password": $(echo password | htpasswd -BinC 10 admin | cut -d: -f2)
hash: "$2y$10$jsRWHw54uxTUIfhjgUrB9u8HSzPk7TUuQri9sXZrKzRXcScvwYor."
username: "admin"
userID: "1"
oauth2:
skipApprovalScreen: true
alwaysShowLoginScreen: false
passwordConnector: local
staticClients:
- id: "outline"
redirectURIs:
- http://${main_domain}/auth/oidc.callback
name: "Outline"
secret: "${client_secret}"
"""

View File

@ -0,0 +1,13 @@
[variables]
main_domain = "${domain}"
[config]
mounts = [ ]
[[config.domains]]
serviceName = "penpot-frontend"
port = 80
host = "${main_domain}"
[config.env]
DOMAIN_NAME = "${main_domain}"

View File

@ -0,0 +1,24 @@
[variables]
main_domain = "${domain}"
api_domain = "${domain}"
postgres_password = "${password}"
secret = "${base64:32}"
[config]
mounts = [ ]
[[config.domains]]
serviceName = "peppermint-app"
port = 3_000
host = "${main_domain}"
[[config.domains]]
serviceName = "peppermint-app"
port = 5_003
host = "${api_domain}"
[config.env]
MAIN_DOMAIN = "${main_domain}"
API_DOMAIN = "${api_domain}"
POSTGRES_PASSWORD = "${postgres_password}"
SECRET = "${secret}"

View File

@ -0,0 +1,15 @@
[variables]
main_domain = "${domain}"
admin_password = "${password}"
[config]
mounts = [ ]
[[config.domains]]
serviceName = "photoprism"
port = 2_342
host = "${main_domain}"
[config.env]
BASE_URL = "http://${main_domain}"
ADMIN_PASSWORD = "${admin_password}"

View File

@ -0,0 +1,18 @@
[variables]
main_domain = "${domain}"
root_password = "${password:32}"
user_password = "${password:32}"
[config]
mounts = [ ]
[[config.domains]]
serviceName = "phpmyadmin"
port = 80
host = "${main_domain}"
[config.env]
MYSQL_ROOT_PASSWORD = "${root_password}"
MYSQL_DATABASE = "mysql"
MYSQL_USER = "phpmyadmin"
MYSQL_PASSWORD = "${user_password}"

View File

@ -0,0 +1,48 @@
[variables]
main_domain = "${domain}"
secret_base = "${base64:64}"
totp_key = "${base64:32}"
[[config.domains]]
serviceName = "plausible"
port = 8_000
host = "${main_domain}"
[config.env]
BASE_URL = "http://${main_domain}"
SECRET_KEY_BASE = "${secret_base}"
TOTP_VAULT_KEY = "${totp_key}"
[[config.mounts]]
filePath = "/clickhouse/clickhouse-config.xml"
content = """
<clickhouse>
<logger>
<level>warning</level>
<console>true</console>
</logger>
<!-- Stop all the unnecessary logging -->
<query_thread_log remove="remove"/>
<query_log remove="remove"/>
<text_log remove="remove"/>
<trace_log remove="remove"/>
<metric_log remove="remove"/>
<asynchronous_metric_log remove="remove"/>
<session_log remove="remove"/>
<part_log remove="remove"/>
</clickhouse>
"""
[[config.mounts]]
filePath = "/clickhouse/clickhouse-user-config.xml"
content = """
<clickhouse>
<profiles>
<default>
<log_queries>0</log_queries>
<log_query_threads>0</log_query_threads>
</default>
</profiles>
</clickhouse>
"""

View File

@ -0,0 +1,15 @@
[variables]
main_domain = "${domain}"
[config]
mounts = [ ]
[[config.domains]]
serviceName = "pocket-id"
port = 80
host = "${main_domain}"
[config.env]
PUBLIC_UI_CONFIG_DISABLED = "false"
PUBLIC_APP_URL = "http://${main_domain}"
TRUST_PROXY = "true"

View File

@ -0,0 +1,11 @@
[variables]
main_domain = "${domain}"
[config]
env = { }
mounts = [ ]
[[config.domains]]
serviceName = "pocketbase"
port = 80
host = "${main_domain}"

View File

@ -0,0 +1,11 @@
[variables]
main_domain = "${domain}"
[config]
env = { }
mounts = [ ]
[[config.domains]]
serviceName = "portainer"
port = 9_000
host = "${main_domain}"

View File

@ -0,0 +1,21 @@
[variables]
main_domain = "${domain}"
db_password = "${password}"
db_user = "postiz"
db_name = "postiz"
jwt_secret = "${base64:32}"
[config]
mounts = [ ]
[[config.domains]]
serviceName = "postiz-app"
port = 5_000
host = "${main_domain}"
[config.env]
POSTIZ_HOST = "${main_domain}"
DB_PASSWORD = "${db_password}"
DB_USER = "${db_user}"
DB_NAME = "${db_name}"
JWT_SECRET = "${jwt_secret}"

View File

@ -0,0 +1,18 @@
[variables]
main_domain = "${domain}"
registry_http_secret = "${password:30}"
[[config.domains]]
serviceName = "registry"
port = 5_000
host = "${main_domain}"
[config.env]
REGISTRY_HTTP_SECRET = "${registry_http_secret}"
[[config.mounts]]
filePath = "/auth/registry.password"
content = """
# from: docker run --rm --entrypoint htpasswd httpd:2 -Bbn docker password
docker:$2y$10$qWZoWev/u5PV7WneFoRAMuoGpRcAQOgUuIIdLnU7pJXogrBSY23/2
"""

View File

@ -0,0 +1,14 @@
[variables]
main_domain = "${domain}"
[config]
mounts = [ ]
[[config.domains]]
serviceName = "rocketchat"
port = 3_000
host = "${main_domain}"
[config.env]
ROCKETCHAT_HOST = "${main_domain}"
ROCKETCHAT_PORT = "3000"

View File

@ -0,0 +1,14 @@
[variables]
main_domain = "${domain}"
[config]
mounts = [ ]
[[config.domains]]
serviceName = "roundcubemail"
port = 80
host = "${main_domain}"
[config.env]
DEFAULT_HOST = "tls://mail.example.com"
SMTP_SERVER = "tls://mail.example.com"

View File

@ -0,0 +1,16 @@
[variables]
main_domain = "${domain}"
postgres_password = "${password}"
admin_access_token = "${base64:32}"
[config]
mounts = [ ]
[[config.domains]]
serviceName = "ryot-app"
port = 8_000
host = "${main_domain}"
[config.env]
POSTGRES_PASSWORD = "${postgres_password}"
ADMIN_ACCESS_TOKEN = "${admin_access_token}"

View File

@ -0,0 +1,20 @@
[variables]
main_domain = "${domain}"
initial_api_key = "${password:30}"
[config]
mounts = [ ]
[[config.domains]]
serviceName = "shlink-web"
port = 8_080
host = "web-${main_domain}"
[[config.domains]]
serviceName = "shlink"
port = 8_080
host = "${main_domain}"
[config.env]
INITIAL_API_KEY = "${initial_api_key}"
DEFAULT_DOMAIN = "${main_domain}"

View File

@ -0,0 +1,18 @@
[variables]
main_domain = "${domain}"
db_password = "${password}"
db_user = "slash"
db_name = "slash"
[config]
mounts = [ ]
[[config.domains]]
serviceName = "slash-app"
port = 5_231
host = "${main_domain}"
[config.env]
DB_USER = "${db_user}"
DB_PASSWORD = "${db_password}"
DB_NAME = "${db_name}"

View File

@ -0,0 +1,17 @@
[variables]
main_domain = "${domain}"
metrics_domain = "${domain}"
[config]
env = { }
mounts = [ ]
[[config.domains]]
serviceName = "soketi"
port = 6_001
host = "${main_domain}"
[[config.domains]]
serviceName = "soketi"
port = 9_601
host = "${metrics_domain}"

View File

@ -0,0 +1,15 @@
[variables]
main_domain = "${domain}"
secret_key = "${password}"
[config]
mounts = [ ]
[[config.domains]]
serviceName = "server"
port = 8_080
host = "${main_domain}"
[config.env]
SD_USERNAME = "admin"
SD_PASSWORD = "${secret_key}"

View File

@ -0,0 +1,11 @@
[variables]
main_domain = "${domain}"
[config]
env = { }
mounts = [ ]
[[config.domains]]
serviceName = "stirling-pdf"
port = 8_080
host = "${main_domain}"

View File

@ -0,0 +1,921 @@
[variables]
main_domain = "${domain}"
postgres_password = "${password:32}"
jwt_secret = "${base64:32}"
dashboard_password = "${password:32}"
logflare_api_key = "${password:32}"
anon_key = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.ewogICJyb2xlIjogImFub24iLAogICJpc3MiOiAic3VwYWJhc2UiLAogICJpYXQiOiAxNzQxNTAwMDAwLAogICJleHAiOiAxODk5MjY2NDAwCn0.muKe0Nrvkf5bMyLoFqAuFypRu3jHAcTYU08SYKrgRQo"
service_role_key = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.ewogICJyb2xlIjogInNlcnZpY2Vfcm9sZSIsCiAgImlzcyI6ICJzdXBhYmFzZSIsCiAgImlhdCI6IDE3NDE1MDAwMDAsCiAgImV4cCI6IDE4OTkyNjY0MDAKfQ.1KoSiJVueKJNkF59uc84BLqk7h8VdAoVp6Gozqr_vGc"
[[config.domains]]
serviceName = "kong"
port = 8_000
host = "${main_domain}"
[config.env]
SUPABASE_HOST = "${main_domain}"
POSTGRES_PASSWORD = "${postgres_password}"
JWT_SECRET = "${jwt_secret}"
ANON_KEY = "${anon_key}"
SERVICE_ROLE_KEY = "${service_role_key}"
DASHBOARD_USERNAME = "supabase"
DASHBOARD_PASSWORD = "${dashboard_password}"
POSTGRES_HOSTNAME = "db"
POSTGRES_DB = "postgres"
POSTGRES_PORT = "5432"
KONG_HTTP_PORT = "8000"
KONG_HTTPS_PORT = "8443"
PGRST_DB_SCHEMAS = "public,storage,graphql_public"
ADDITIONAL_REDIRECT_URLS = ""
JWT_EXPIRY = "3600"
DISABLE_SIGNUP = "false"
MAILER_URLPATHS_CONFIRMATION = '"/auth/v1/verify"'
MAILER_URLPATHS_INVITE = '"/auth/v1/verify"'
MAILER_URLPATHS_RECOVERY = '"/auth/v1/verify"'
MAILER_URLPATHS_EMAIL_CHANGE = '"/auth/v1/verify"'
ENABLE_EMAIL_SIGNUP = "true"
ENABLE_EMAIL_AUTOCONFIRM = "false"
SMTP_ADMIN_EMAIL = "admin@example.com"
SMTP_HOSTNAME = "supabase-mail"
SMTP_PORT = "2500"
SMTP_USER = "fake_mail_user"
SMTP_PASS = "fake_mail_password"
SMTP_SENDER_NAME = "fake_sender"
ENABLE_ANONYMOUS_USERS = "false"
ENABLE_PHONE_SIGNUP = "true"
ENABLE_PHONE_AUTOCONFIRM = "true"
STUDIO_DEFAULT_ORGANIZATION = "Default Organization"
STUDIO_DEFAULT_PROJECT = "Default Project"
STUDIO_PORT = "3000"
IMGPROXY_ENABLE_WEBP_DETECTION = "true"
FUNCTIONS_VERIFY_JWT = "false"
LOGFLARE_LOGGER_BACKEND_API_KEY = "${logflare_api_key}"
LOGFLARE_API_KEY = "${logflare_api_key}"
DOCKER_SOCKET_LOCATION = "/var/run/docker.sock"
GOOGLE_PROJECT_ID = "GOOGLE_PROJECT_ID"
GOOGLE_PROJECT_NUMBER = "GOOGLE_PROJECT_NUMBER"
[[config.mounts]]
filePath = "/volumes/api/kong.yml"
content = """
_format_version: '2.1'
_transform: true
###
### Consumers / Users
###
consumers:
- username: DASHBOARD
- username: anon
keyauth_credentials:
- key: $SUPABASE_ANON_KEY
- username: service_role
keyauth_credentials:
- key: $SUPABASE_SERVICE_KEY
###
### Access Control List
###
acls:
- consumer: anon
group: anon
- consumer: service_role
group: admin
###
### Dashboard credentials
###
basicauth_credentials:
- consumer: DASHBOARD
username: $DASHBOARD_USERNAME
password: $DASHBOARD_PASSWORD
###
### API Routes
###
services:
## Open Auth routes
- name: auth-v1-open
url: http://auth:9999/verify
routes:
- name: auth-v1-open
strip_path: true
paths:
- /auth/v1/verify
plugins:
- name: cors
- name: auth-v1-open-callback
url: http://auth:9999/callback
routes:
- name: auth-v1-open-callback
strip_path: true
paths:
- /auth/v1/callback
plugins:
- name: cors
- name: auth-v1-open-authorize
url: http://auth:9999/authorize
routes:
- name: auth-v1-open-authorize
strip_path: true
paths:
- /auth/v1/authorize
plugins:
- name: cors
## Secure Auth routes
- name: auth-v1
_comment: 'GoTrue: /auth/v1/* -> http://auth:9999/*'
url: http://auth:9999/
routes:
- name: auth-v1-all
strip_path: true
paths:
- /auth/v1/
plugins:
- name: cors
- name: key-auth
config:
hide_credentials: false
- name: acl
config:
hide_groups_header: true
allow:
- admin
- anon
## Secure REST routes
- name: rest-v1
_comment: 'PostgREST: /rest/v1/* -> http://rest:3000/*'
url: http://rest:3000/
routes:
- name: rest-v1-all
strip_path: true
paths:
- /rest/v1/
plugins:
- name: cors
- name: key-auth
config:
hide_credentials: true
- name: acl
config:
hide_groups_header: true
allow:
- admin
- anon
## Secure GraphQL routes
- name: graphql-v1
_comment: 'PostgREST: /graphql/v1/* -> http://rest:3000/rpc/graphql'
url: http://rest:3000/rpc/graphql
routes:
- name: graphql-v1-all
strip_path: true
paths:
- /graphql/v1
plugins:
- name: cors
- name: key-auth
config:
hide_credentials: true
- name: request-transformer
config:
add:
headers:
- Content-Profile:graphql_public
- name: acl
config:
hide_groups_header: true
allow:
- admin
- anon
## Secure Realtime routes
- name: realtime-v1-ws
_comment: 'Realtime: /realtime/v1/* -> ws://realtime:4000/socket/*'
url: http://realtime-dev.supabase-realtime:4000/socket
protocol: ws
routes:
- name: realtime-v1-ws
strip_path: true
paths:
- /realtime/v1/
plugins:
- name: cors
- name: key-auth
config:
hide_credentials: false
- name: acl
config:
hide_groups_header: true
allow:
- admin
- anon
- name: realtime-v1-rest
_comment: 'Realtime: /realtime/v1/* -> ws://realtime:4000/socket/*'
url: http://realtime-dev.supabase-realtime:4000/api
protocol: http
routes:
- name: realtime-v1-rest
strip_path: true
paths:
- /realtime/v1/api
plugins:
- name: cors
- name: key-auth
config:
hide_credentials: false
- name: acl
config:
hide_groups_header: true
allow:
- admin
- anon
## Storage routes: the storage server manages its own auth
- name: storage-v1
_comment: 'Storage: /storage/v1/* -> http://storage:5000/*'
url: http://storage:5000/
routes:
- name: storage-v1-all
strip_path: true
paths:
- /storage/v1/
plugins:
- name: cors
## Edge Functions routes
- name: functions-v1
_comment: 'Edge Functions: /functions/v1/* -> http://functions:9000/*'
url: http://functions:9000/
routes:
- name: functions-v1-all
strip_path: true
paths:
- /functions/v1/
plugins:
- name: cors
## Analytics routes
- name: analytics-v1
_comment: 'Analytics: /analytics/v1/* -> http://logflare:4000/*'
url: http://analytics:4000/
routes:
- name: analytics-v1-all
strip_path: true
paths:
- /analytics/v1/
## Secure Database routes
- name: meta
_comment: 'pg-meta: /pg/* -> http://pg-meta:8080/*'
url: http://meta:8080/
routes:
- name: meta-all
strip_path: true
paths:
- /pg/
plugins:
- name: key-auth
config:
hide_credentials: false
- name: acl
config:
hide_groups_header: true
allow:
- admin
## Protected Dashboard - catch all remaining routes
- name: dashboard
_comment: 'Studio: /* -> http://studio:3000/*'
url: http://studio:3000/
routes:
- name: dashboard-all
strip_path: true
paths:
- /
plugins:
- name: cors
- name: basic-auth
config:
hide_credentials: true
"""
[[config.mounts]]
filePath = "/volumes/db/init/data.sql"
content = ""
[[config.mounts]]
filePath = "/volumes/db/jwt.sql"
content = """
\\set jwt_secret `echo "$JWT_SECRET"`
\\set jwt_exp `echo "$JWT_EXP"`
ALTER DATABASE postgres SET "app.settings.jwt_secret" TO :'jwt_secret';
ALTER DATABASE postgres SET "app.settings.jwt_exp" TO :'jwt_exp';
"""
[[config.mounts]]
filePath = "/volumes/db/logs.sql"
content = """
\\set pguser `echo "$POSTGRES_USER"`
create schema if not exists _analytics;
alter schema _analytics owner to :pguser;
"""
[[config.mounts]]
filePath = "/volumes/db/realtime.sql"
content = """
\\set pguser `echo "$POSTGRES_USER"`
create schema if not exists _realtime;
alter schema _realtime owner to :pguser;
"""
[[config.mounts]]
filePath = "/volumes/db/roles.sql"
content = """
-- NOTE: change to your own passwords for production environments
\\set pgpass `echo "$POSTGRES_PASSWORD"`
ALTER USER authenticator WITH PASSWORD :'pgpass';
ALTER USER pgbouncer WITH PASSWORD :'pgpass';
ALTER USER supabase_auth_admin WITH PASSWORD :'pgpass';
ALTER USER supabase_functions_admin WITH PASSWORD :'pgpass';
ALTER USER supabase_storage_admin WITH PASSWORD :'pgpass';
"""
[[config.mounts]]
filePath = "/volumes/db/webhooks.sql"
content = """
BEGIN;
-- Create pg_net extension
CREATE EXTENSION IF NOT EXISTS pg_net SCHEMA extensions;
-- Create supabase_functions schema
CREATE SCHEMA supabase_functions AUTHORIZATION supabase_admin;
GRANT USAGE ON SCHEMA supabase_functions TO postgres, anon, authenticated, service_role;
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON TABLES TO postgres, anon, authenticated, service_role;
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON FUNCTIONS TO postgres, anon, authenticated, service_role;
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON SEQUENCES TO postgres, anon, authenticated, service_role;
-- supabase_functions.migrations definition
CREATE TABLE supabase_functions.migrations (
version text PRIMARY KEY,
inserted_at timestamptz NOT NULL DEFAULT NOW()
);
-- Initial supabase_functions migration
INSERT INTO supabase_functions.migrations (version) VALUES ('initial');
-- supabase_functions.hooks definition
CREATE TABLE supabase_functions.hooks (
id bigserial PRIMARY KEY,
hook_table_id integer NOT NULL,
hook_name text NOT NULL,
created_at timestamptz NOT NULL DEFAULT NOW(),
request_id bigint
);
CREATE INDEX supabase_functions_hooks_request_id_idx ON supabase_functions.hooks USING btree (request_id);
CREATE INDEX supabase_functions_hooks_h_table_id_h_name_idx ON supabase_functions.hooks USING btree (hook_table_id, hook_name);
COMMENT ON TABLE supabase_functions.hooks IS 'Supabase Functions Hooks: Audit trail for triggered hooks.';
CREATE FUNCTION supabase_functions.http_request()
RETURNS trigger
LANGUAGE plpgsql
AS $function$
DECLARE
request_id bigint;
payload jsonb;
url text := TG_ARGV[0]::text;
method text := TG_ARGV[1]::text;
headers jsonb DEFAULT '{}'::jsonb;
params jsonb DEFAULT '{}'::jsonb;
timeout_ms integer DEFAULT 1000;
BEGIN
IF url IS NULL OR url = 'null' THEN
RAISE EXCEPTION 'url argument is missing';
END IF;
IF method IS NULL OR method = 'null' THEN
RAISE EXCEPTION 'method argument is missing';
END IF;
IF TG_ARGV[2] IS NULL OR TG_ARGV[2] = 'null' THEN
headers = '{"Content-Type": "application/json"}'::jsonb;
ELSE
headers = TG_ARGV[2]::jsonb;
END IF;
IF TG_ARGV[3] IS NULL OR TG_ARGV[3] = 'null' THEN
params = '{}'::jsonb;
ELSE
params = TG_ARGV[3]::jsonb;
END IF;
IF TG_ARGV[4] IS NULL OR TG_ARGV[4] = 'null' THEN
timeout_ms = 1000;
ELSE
timeout_ms = TG_ARGV[4]::integer;
END IF;
CASE
WHEN method = 'GET' THEN
SELECT http_get INTO request_id FROM net.http_get(
url,
params,
headers,
timeout_ms
);
WHEN method = 'POST' THEN
payload = jsonb_build_object(
'old_record', OLD,
'record', NEW,
'type', TG_OP,
'table', TG_TABLE_NAME,
'schema', TG_TABLE_SCHEMA
);
SELECT http_post INTO request_id FROM net.http_post(
url,
payload,
params,
headers,
timeout_ms
);
ELSE
RAISE EXCEPTION 'method argument % is invalid', method;
END CASE;
INSERT INTO supabase_functions.hooks
(hook_table_id, hook_name, request_id)
VALUES
(TG_RELID, TG_NAME, request_id);
RETURN NEW;
END
$function$;
-- Supabase super admin
DO
$$
BEGIN
IF NOT EXISTS (
SELECT 1
FROM pg_roles
WHERE rolname = 'supabase_functions_admin'
)
THEN
CREATE USER supabase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION;
END IF;
END
$$;
GRANT ALL PRIVILEGES ON SCHEMA supabase_functions TO supabase_functions_admin;
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA supabase_functions TO supabase_functions_admin;
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA supabase_functions TO supabase_functions_admin;
ALTER USER supabase_functions_admin SET search_path = "supabase_functions";
ALTER table "supabase_functions".migrations OWNER TO supabase_functions_admin;
ALTER table "supabase_functions".hooks OWNER TO supabase_functions_admin;
ALTER function "supabase_functions".http_request() OWNER TO supabase_functions_admin;
GRANT supabase_functions_admin TO postgres;
-- Remove unused supabase_pg_net_admin role
DO
$$
BEGIN
IF EXISTS (
SELECT 1
FROM pg_roles
WHERE rolname = 'supabase_pg_net_admin'
)
THEN
REASSIGN OWNED BY supabase_pg_net_admin TO supabase_admin;
DROP OWNED BY supabase_pg_net_admin;
DROP ROLE supabase_pg_net_admin;
END IF;
END
$$;
-- pg_net grants when extension is already enabled
DO
$$
BEGIN
IF EXISTS (
SELECT 1
FROM pg_extension
WHERE extname = 'pg_net'
)
THEN
GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role;
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
END IF;
END
$$;
-- Event trigger for pg_net
CREATE OR REPLACE FUNCTION extensions.grant_pg_net_access()
RETURNS event_trigger
LANGUAGE plpgsql
AS $$
BEGIN
IF EXISTS (
SELECT 1
FROM pg_event_trigger_ddl_commands() AS ev
JOIN pg_extension AS ext
ON ev.objid = ext.oid
WHERE ext.extname = 'pg_net'
)
THEN
GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role;
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
END IF;
END;
$$;
COMMENT ON FUNCTION extensions.grant_pg_net_access IS 'Grants access to pg_net';
DO
$$
BEGIN
IF NOT EXISTS (
SELECT 1
FROM pg_event_trigger
WHERE evtname = 'issue_pg_net_access'
) THEN
CREATE EVENT TRIGGER issue_pg_net_access ON ddl_command_end WHEN TAG IN ('CREATE EXTENSION')
EXECUTE PROCEDURE extensions.grant_pg_net_access();
END IF;
END
$$;
INSERT INTO supabase_functions.migrations (version) VALUES ('20210809183423_update_grants');
ALTER function supabase_functions.http_request() SECURITY DEFINER;
ALTER function supabase_functions.http_request() SET search_path = supabase_functions;
REVOKE ALL ON FUNCTION supabase_functions.http_request() FROM PUBLIC;
GRANT EXECUTE ON FUNCTION supabase_functions.http_request() TO postgres, anon, authenticated, service_role;
COMMIT;
"""
[[config.mounts]]
filePath = "/volumes/functions/hello/index.ts"
content = """
// Follow this setup guide to integrate the Deno language server with your editor:
// https://deno.land/manual/getting_started/setup_your_environment
// This enables autocomplete, go to definition, etc.
import { serve } from "https://deno.land/std@0.177.1/http/server.ts"
serve(async () => {
return new Response(
`"Hello from Edge Functions!"`,
{ headers: { "Content-Type": "application/json" } }
)
})
// To invoke:
// curl 'http://localhost:<KONG_HTTP_PORT>/functions/v1/hello' \\
// --header 'Authorization: Bearer <anon/service_role API key>'
"""
[[config.mounts]]
filePath = "/volumes/functions/main/index.ts"
content = """
import { serve } from 'https://deno.land/std@0.131.0/http/server.ts'
import * as jose from 'https://deno.land/x/jose@v4.14.4/index.ts'
console.log('main function started')
const JWT_SECRET = Deno.env.get('JWT_SECRET')
const VERIFY_JWT = Deno.env.get('VERIFY_JWT') === 'true'
function getAuthToken(req: Request) {
const authHeader = req.headers.get('authorization')
if (!authHeader) {
throw new Error('Missing authorization header')
}
const [bearer, token] = authHeader.split(' ')
if (bearer !== 'Bearer') {
throw new Error(`Auth header is not 'Bearer {token}'`)
}
return token
}
async function verifyJWT(jwt: string): Promise<boolean> {
const encoder = new TextEncoder()
const secretKey = encoder.encode(JWT_SECRET)
try {
await jose.jwtVerify(jwt, secretKey)
} catch (err) {
console.error(err)
return false
}
return true
}
serve(async (req: Request) => {
if (req.method !== 'OPTIONS' && VERIFY_JWT) {
try {
const token = getAuthToken(req)
const isValidJWT = await verifyJWT(token)
if (!isValidJWT) {
return new Response(JSON.stringify({ msg: 'Invalid JWT' }), {
status: 401,
headers: { 'Content-Type': 'application/json' },
})
}
} catch (e) {
console.error(e)
return new Response(JSON.stringify({ msg: e.toString() }), {
status: 401,
headers: { 'Content-Type': 'application/json' },
})
}
}
const url = new URL(req.url)
const { pathname } = url
const path_parts = pathname.split('/')
const service_name = path_parts[1]
if (!service_name || service_name === '') {
const error = { msg: 'missing function name in request' }
return new Response(JSON.stringify(error), {
status: 400,
headers: { 'Content-Type': 'application/json' },
})
}
const servicePath = `/home/deno/functions/${service_name}`
console.error(`serving the request with ${servicePath}`)
const memoryLimitMb = 150
const workerTimeoutMs = 1 * 60 * 1000
const noModuleCache = false
const importMapPath = null
const envVarsObj = Deno.env.toObject()
const envVars = Object.keys(envVarsObj).map((k) => [k, envVarsObj[k]])
try {
const worker = await EdgeRuntime.userWorkers.create({
servicePath,
memoryLimitMb,
workerTimeoutMs,
noModuleCache,
importMapPath,
envVars,
})
return await worker.fetch(req)
} catch (e) {
const error = { msg: e.toString() }
return new Response(JSON.stringify(error), {
status: 500,
headers: { 'Content-Type': 'application/json' },
})
}
})
"""
[[config.mounts]]
filePath = "/volumes/logs/vector.yml"
content = """
api:
enabled: true
address: 0.0.0.0:9001
sources:
docker_host:
type: docker_logs
exclude_containers:
- supabase-vector
transforms:
project_logs:
type: remap
inputs:
- docker_host
source: |-
.project = "default"
.event_message = del(.message)
.appname = del(.container_name)
del(.container_created_at)
del(.container_id)
del(.source_type)
del(.stream)
del(.label)
del(.image)
del(.host)
del(.stream)
router:
type: route
inputs:
- project_logs
route:
kong: '.appname == "supabase-kong"'
auth: '.appname == "supabase-auth"'
rest: '.appname == "supabase-rest"'
realtime: '.appname == "supabase-realtime"'
storage: '.appname == "supabase-storage"'
functions: '.appname == "supabase-functions"'
db: '.appname == "supabase-db"'
# Ignores non nginx errors since they are related with kong booting up
kong_logs:
type: remap
inputs:
- router.kong
source: |-
req, err = parse_nginx_log(.event_message, "combined")
if err == null {
.timestamp = req.timestamp
.metadata.request.headers.referer = req.referer
.metadata.request.headers.user_agent = req.agent
.metadata.request.headers.cf_connecting_ip = req.client
.metadata.request.method = req.method
.metadata.request.path = req.path
.metadata.request.protocol = req.protocol
.metadata.response.status_code = req.status
}
if err != null {
abort
}
# Ignores non nginx errors since they are related with kong booting up
kong_err:
type: remap
inputs:
- router.kong
source: |-
.metadata.request.method = "GET"
.metadata.response.status_code = 200
parsed, err = parse_nginx_log(.event_message, "error")
if err == null {
.timestamp = parsed.timestamp
.severity = parsed.severity
.metadata.request.host = parsed.host
.metadata.request.headers.cf_connecting_ip = parsed.client
url, err = split(parsed.request, " ")
if err == null {
.metadata.request.method = url[0]
.metadata.request.path = url[1]
.metadata.request.protocol = url[2]
}
}
if err != null {
abort
}
# Gotrue logs are structured json strings which frontend parses directly. But we keep metadata for consistency.
auth_logs:
type: remap
inputs:
- router.auth
source: |-
parsed, err = parse_json(.event_message)
if err == null {
.metadata.timestamp = parsed.time
.metadata = merge!(.metadata, parsed)
}
# PostgREST logs are structured so we separate timestamp from message using regex
rest_logs:
type: remap
inputs:
- router.rest
source: |-
parsed, err = parse_regex(.event_message, r'^(?P<time>.*): (?P<msg>.*)$')
if err == null {
.event_message = parsed.msg
.timestamp = to_timestamp!(parsed.time)
.metadata.host = .project
}
# Realtime logs are structured so we parse the severity level using regex (ignore time because it has no date)
realtime_logs:
type: remap
inputs:
- router.realtime
source: |-
.metadata.project = del(.project)
.metadata.external_id = .metadata.project
parsed, err = parse_regex(.event_message, r'^(?P<time>\\d+:\\d+:\\d+\\.\\d+) \\[(?P<level>\\w+)\\] (?P<msg>.*)$')
if err == null {
.event_message = parsed.msg
.metadata.level = parsed.level
}
# Storage logs may contain json objects so we parse them for completeness
storage_logs:
type: remap
inputs:
- router.storage
source: |-
.metadata.project = del(.project)
.metadata.tenantId = .metadata.project
parsed, err = parse_json(.event_message)
if err == null {
.event_message = parsed.msg
.metadata.level = parsed.level
.metadata.timestamp = parsed.time
.metadata.context[0].host = parsed.hostname
.metadata.context[0].pid = parsed.pid
}
# Postgres logs some messages to stderr which we map to warning severity level
db_logs:
type: remap
inputs:
- router.db
source: |-
.metadata.host = "db-default"
.metadata.parsed.timestamp = .timestamp
parsed, err = parse_regex(.event_message, r'.*(?P<level>INFO|NOTICE|WARNING|ERROR|LOG|FATAL|PANIC?):.*', numeric_groups: true)
if err != null || parsed == null {
.metadata.parsed.error_severity = "info"
}
if parsed != null {
.metadata.parsed.error_severity = parsed.level
}
if .metadata.parsed.error_severity == "info" {
.metadata.parsed.error_severity = "log"
}
.metadata.parsed.error_severity = upcase!(.metadata.parsed.error_severity)
sinks:
logflare_auth:
type: 'http'
inputs:
- auth_logs
encoding:
codec: 'json'
method: 'post'
request:
retry_max_duration_secs: 10
uri: 'http://analytics:4000/api/logs?source_name=gotrue.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
logflare_realtime:
type: 'http'
inputs:
- realtime_logs
encoding:
codec: 'json'
method: 'post'
request:
retry_max_duration_secs: 10
uri: 'http://analytics:4000/api/logs?source_name=realtime.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
logflare_rest:
type: 'http'
inputs:
- rest_logs
encoding:
codec: 'json'
method: 'post'
request:
retry_max_duration_secs: 10
uri: 'http://analytics:4000/api/logs?source_name=postgREST.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
logflare_db:
type: 'http'
inputs:
- db_logs
encoding:
codec: 'json'
method: 'post'
request:
retry_max_duration_secs: 10
# We must route the sink through kong because ingesting logs before logflare is fully initialised will
# lead to broken queries from studio. This works by the assumption that containers are started in the
# following order: vector > db > logflare > kong
uri: 'http://kong:8000/analytics/v1/api/logs?source_name=postgres.logs&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
logflare_functions:
type: 'http'
inputs:
- router.functions
encoding:
codec: 'json'
method: 'post'
request:
retry_max_duration_secs: 10
uri: 'http://analytics:4000/api/logs?source_name=deno-relay-logs&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
logflare_storage:
type: 'http'
inputs:
- storage_logs
encoding:
codec: 'json'
method: 'post'
request:
retry_max_duration_secs: 10
uri: 'http://analytics:4000/api/logs?source_name=storage.logs.prod.2&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
logflare_kong:
type: 'http'
inputs:
- kong_logs
- kong_err
encoding:
codec: 'json'
method: 'post'
request:
retry_max_duration_secs: 10
uri: 'http://analytics:4000/api/logs?source_name=cloudflare.logs.prod&api_key=${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
"""

View File

@ -0,0 +1,54 @@
[variables]
main_domain = "${domain}"
secret_key = "${password:30}"
postgres_password = "${password:30}"
redis_password = "${password:30}"
mapbox_api_key = ""
[[config.domains]]
serviceName = "superset"
port = 8_088
host = "${main_domain}"
[config.env]
SECRET_KEY = "${secret_key}"
MAPBOX_API_KEY = "${mapbox_api_key}"
POSTGRES_DB = "superset"
POSTGRES_USER = "superset"
POSTGRES_PASSWORD = "${postgres_password}"
REDIS_PASSWORD = "${redis_password}"
[[config.mounts]]
filePath = "./superset/superset_config.py"
content = """
\"""
For more configuration options, see:
- https://superset.apache.org/docs/configuration/configuring-superset
\"""
import os
SECRET_KEY = os.getenv("SECRET_KEY")
MAPBOX_API_KEY = os.getenv("MAPBOX_API_KEY", "")
CACHE_CONFIG = {
"CACHE_TYPE": "RedisCache",
"CACHE_DEFAULT_TIMEOUT": 300,
"CACHE_KEY_PREFIX": "superset_",
"CACHE_REDIS_HOST": "redis",
"CACHE_REDIS_PORT": 6379,
"CACHE_REDIS_DB": 1,
"CACHE_REDIS_URL": f"redis://:{os.getenv('REDIS_PASSWORD')}@{os.getenv('REDIS_HOST')}:6379/1",
}
FILTER_STATE_CACHE_CONFIG = {**CACHE_CONFIG, "CACHE_KEY_PREFIX": "superset_filter_"}
EXPLORE_FORM_DATA_CACHE_CONFIG = {**CACHE_CONFIG, "CACHE_KEY_PREFIX": "superset_explore_form_"}
SQLALCHEMY_TRACK_MODIFICATIONS = True
SQLALCHEMY_DATABASE_URI = f"postgresql+psycopg2://{os.getenv('POSTGRES_USER')}:{os.getenv('POSTGRES_PASSWORD')}@{os.getenv('POSTGRES_HOST')}:5432/{os.getenv('POSTGRES_DB')}"
# Uncomment if you want to load example data (using "superset load_examples") at the
# same location as your metadata postgresql instance. Otherwise, the default sqlite
# will be used, which will not persist in volume when restarting superset by default.
#SQLALCHEMY_EXAMPLES_URI = SQLALCHEMY_DATABASE_URI
"""

View File

@ -0,0 +1,25 @@
[variables]
main_domain = "${domain}"
db_password = "${password}"
public_db_port = "${randomPort}"
[config]
mounts = [ ]
[[config.domains]]
serviceName = "teable"
port = 3_000
host = "${main_domain}"
[config.env]
TEABLE_HOST = "${main_domain}"
TEABLE_DB_PORT = "${public_db_port}"
TIMEZONE = "UTC"
POSTGRES_HOST = "teable-db"
POSTGRES_PORT = "5432"
POSTGRES_DB = "teable"
POSTGRES_USER = "teable"
POSTGRES_PASSWORD = "${db_password}"
PUBLIC_ORIGIN = "https://${main_domain}"
PRISMA_DATABASE_URL = "postgresql://teable:${db_password}@teable-db:5432/teable"
PUBLIC_DATABASE_PROXY = "${TEABLE_HOST}:${TEABLE_DB_PORT}"

View File

@ -0,0 +1,26 @@
[variables]
main_domain = "${domain}"
jwt_secret = "${base64:32}"
[config]
mounts = [ ]
[[config.domains]]
serviceName = "app"
port = 8_080
host = "${main_domain}"
[config.env]
TOLGEE_HOST = "${main_domain}"
TOLGEE_AUTHENTICATION_ENABLED = "true"
TOLGEE_AUTHENTICATION_INITIAL_PASSWORD = "admin"
TOLGEE_AUTHENTICATION_INITIAL_USERNAME = "admin"
TOLGEE_AUTHENTICATION_JWT_SECRET = "${jwt_secret}"
TOLGEE_MACHINE_TRANSLATION_GOOGLE_API_KEY = "my_google_api_key"
TOLGEE_SMTP_AUTH = "true"
TOLGEE_SMTP_FROM = "Tolgee <no-reply@mydomain.com>"
TOLGEE_SMTPHOST = "email-smtp.regional-region.amazonaws.com"
TOLGEE_SMTP_PASSWORD = "omg/my/password"
TOLGEE_SMTP_PORT = "465"
TOLGEE_SMTP_SSL_ENABLED = "true"
TOLGEE_SMTP_USERNAME = "user@company.com"

View File

@ -0,0 +1,45 @@
[variables]
main_domain = "${domain}"
magic_link_secret = "${base64:16}"
session_secret = "${base64:16}"
encryption_key = "${base64:32}"
provider_secret = "${base64:32}"
coordinator_secret = "${base64:32}"
db_password = "${base64:24}"
db_user = "triggeruser"
db_name = "triggerdb"
[config]
mounts = [ ]
[[config.domains]]
serviceName = "webapp"
port = 3_000
host = "${main_domain}"
[config.env]
NODE_ENV = "production"
RUNTIME_PLATFORM = "docker-compose"
V3_ENABLED = "true"
TRIGGER_DOMAIN = "${main_domain}"
TRIGGER_PROTOCOL = "http"
POSTGRES_USER = "${db_user}"
POSTGRES_PASSWORD = "${db_password}"
POSTGRES_DB = "${db_name}"
DATABASE_URL = "postgresql://${db_user}:${db_password}@postgres:5432/${db_name}"
MAGIC_LINK_SECRET = "${magic_link_secret}"
SESSION_SECRET = "${session_secret}"
ENCRYPTION_KEY = "${encryption_key}"
PROVIDER_SECRET = "${provider_secret}"
COORDINATOR_SECRET = "${coordinator_secret}"
INTERNAL_OTEL_TRACE_DISABLED = "1"
INTERNAL_OTEL_TRACE_LOGGING_ENABLED = "0"
DEFAULT_ORG_EXECUTION_CONCURRENCY_LIMIT = "300"
DEFAULT_ENV_EXECUTION_CONCURRENCY_LIMIT = "100"
DIRECT_URL = "${DATABASE_URL}"
REDIS_HOST = "redis"
REDIS_PORT = "6379"
REDIS_TLS_DISABLED = "true"
HTTP_SERVER_PORT = "9020"
COORDINATOR_HOST = "127.0.0.1"
COORDINATOR_PORT = "${HTTP_SERVER_PORT}"

Some files were not shown because too many files have changed in this diff Show More