refactor: remove unused files

This commit is contained in:
Mauricio Siu
2024-10-07 00:49:54 -06:00
parent c4cf545d85
commit 9b6ea99eea
74 changed files with 8 additions and 3928 deletions

View File

@@ -5,7 +5,8 @@
"scripts": {
"dev": "PORT=4000 tsx watch src/index.ts",
"build": "tsc --project tsconfig.json",
"start": "node --experimental-specifier-resolution=node dist/index.js"
"start": "node --experimental-specifier-resolution=node dist/index.js",
"typecheck": "tsc --noEmit"
},
"dependencies": {
"pino": "9.4.0",

View File

@@ -1,6 +1,6 @@
import fs from "node:fs/promises";
import path from "node:path";
import { paths } from "@/server/constants";
import { paths } from "@dokploy/server";
const { APPLICATIONS_PATH } = paths();
import type { ApplicationNested } from "@dokploy/server";
import { unzipDrop } from "@dokploy/server";

View File

@@ -1,7 +1,7 @@
import { randomBytes } from "node:crypto";
import { readFile } from "node:fs/promises";
import { join } from "node:path";
import type { Domain } from "@dokploy/builders";
import type { Domain } from "@dokploy/server";
// import { IS_CLOUD } from "@/server/constants";
import { TRPCError } from "@trpc/server";
import { templates } from "../templates";

View File

@@ -4,7 +4,8 @@
"scripts": {
"dev": "PORT=4001 tsx watch src/index.ts",
"build": "tsc --project tsconfig.json",
"start": "node --experimental-specifier-resolution=node dist/index.js"
"start": "node --experimental-specifier-resolution=node dist/index.js",
"typecheck": "tsc --noEmit"
},
"dependencies": {
"drizzle-orm": "^0.30.8",

View File

@@ -8,7 +8,8 @@
"build": "rm -rf ./dist && tsc --project tsconfig.server.json && tsc-alias -p tsconfig.server.json",
"build:types": "tsc --emitDeclarationOnly --experimenta-dts",
"dev": "rm -rf ./dist && pnpm esbuild && tsc --emitDeclarationOnly --outDir dist -p tsconfig.server.json",
"esbuild": "tsx ./esbuild.config.ts"
"esbuild": "tsx ./esbuild.config.ts",
"typecheck": "tsc --noEmit"
},
"dependencies": {
"rotating-file-stream": "3.2.3",

View File

@@ -1,6 +0,0 @@
version: "3.8"
services:
appsmith:
image: index.docker.io/appsmith/appsmith-ee:v1.29
volumes:
- ../files/stacks:/appsmith-stacks

View File

@@ -1,23 +0,0 @@
import {
type DomainSchema,
type Schema,
type Template,
generateHash,
generateRandomDomain,
} from "../utils";
export function generate(schema: Schema): Template {
const mainServiceHash = generateHash(schema.projectName);
const domains: DomainSchema[] = [
{
host: generateRandomDomain(schema),
port: 80,
serviceName: "appsmith",
},
];
return {
domains,
};
}

View File

@@ -1,51 +0,0 @@
services:
aptabase_db:
image: postgres:15-alpine
restart: always
volumes:
- db-data:/var/lib/postgresql/data
environment:
POSTGRES_USER: aptabase
POSTGRES_PASSWORD: sTr0NGp4ssw0rd
networks:
- dokploy-network
healthcheck:
test: ["CMD-SHELL", "pg_isready -U aptabase"]
interval: 10s
timeout: 5s
retries: 5
aptabase_events_db:
image: clickhouse/clickhouse-server:23.8.16.16-alpine
restart: always
volumes:
- events-db-data:/var/lib/clickhouse
environment:
CLICKHOUSE_USER: aptabase
CLICKHOUSE_PASSWORD: sTr0NGp4ssw0rd
ulimits:
nofile:
soft: 262144
hard: 262144
networks:
- dokploy-network
healthcheck:
test: ["CMD-SHELL", "curl -f http://localhost:8123 || exit 1"]
interval: 10s
timeout: 5s
retries: 5
aptabase:
image: ghcr.io/aptabase/aptabase:main
restart: always
environment:
BASE_URL: http://${APTABASE_HOST}
AUTH_SECRET: ${AUTH_SECRET}
DATABASE_URL: Server=aptabase_db;Port=5432;User Id=aptabase;Password=sTr0NGp4ssw0rd;Database=aptabase
CLICKHOUSE_URL: Host=aptabase_events_db;Port=8123;Username=aptabase;Password=sTr0NGp4ssw0rd
volumes:
db-data:
driver: local
events-db-data:
driver: local

View File

@@ -1,27 +0,0 @@
import {
type DomainSchema,
type Schema,
type Template,
generateBase64,
generateRandomDomain,
} from "../utils";
export function generate(schema: Schema): Template {
const mainDomain = generateRandomDomain(schema);
const authSecret = generateBase64(32);
const domains: DomainSchema[] = [
{
host: mainDomain,
port: 8080,
serviceName: "aptabase",
},
];
const envs = [`APTABASE_HOST=${mainDomain}`, `AUTH_SECRET=${authSecret}`];
return {
envs,
domains,
};
}

View File

@@ -1,10 +0,0 @@
version: "3.8"
services:
baserow:
image: baserow/baserow:1.25.2
environment:
BASEROW_PUBLIC_URL: "http://${BASEROW_HOST}"
volumes:
- baserow_data:/baserow/data
volumes:
baserow_data:

View File

@@ -1,24 +0,0 @@
import {
type DomainSchema,
type Schema,
type Template,
generateRandomDomain,
} from "../utils";
export function generate(schema: Schema): Template {
const mainHost = generateRandomDomain(schema);
const domains: DomainSchema[] = [
{
host: mainHost,
port: 80,
serviceName: "baserow",
},
];
const envs = [`BASEROW_HOST=${mainHost}`];
return {
envs,
domains,
};
}

View File

@@ -1,26 +0,0 @@
services:
postgres:
image: postgres:16-alpine
networks:
- dokploy-network
volumes:
- calcom-data:/var/lib/postgresql/data
environment:
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=password
- POSTGRES_DB=db
- DATABASE_URL=postgres://postgres:password@postgres:5432/db
calcom:
image: calcom/cal.com:v2.7.6
depends_on:
- postgres
environment:
- NEXTAUTH_SECRET=${NEXTAUTH_SECRET}
- CALENDSO_ENCRYPTION_KEY=${CALENDSO_ENCRYPTION_KEY}
- DATABASE_URL=postgres://postgres:password@postgres:5432/db
- NEXT_PUBLIC_WEBAPP_URL=http://${CALCOM_HOST}
- NEXTAUTH_URL=http://${CALCOM_HOST}/api/auth
volumes:
calcom-data:

View File

@@ -1,32 +0,0 @@
import {
type DomainSchema,
type Schema,
type Template,
generateBase64,
generateRandomDomain,
} from "../utils";
export function generate(schema: Schema): Template {
const mainDomain = generateRandomDomain(schema);
const calcomEncryptionKey = generateBase64(32);
const nextAuthSecret = generateBase64(32);
const domains: DomainSchema[] = [
{
host: mainDomain,
port: 3000,
serviceName: "calcom",
},
];
const envs = [
`CALCOM_HOST=${mainDomain}`,
`NEXTAUTH_SECRET=${nextAuthSecret}`,
`CALENDSO_ENCRYPTION_KEY=${calcomEncryptionKey}`,
];
return {
envs,
domains,
};
}

View File

@@ -1,47 +0,0 @@
version: "3.8"
services:
database:
image: postgis/postgis:13-master
volumes:
- directus:/var/lib/postgresql/data
networks:
- dokploy-network
environment:
POSTGRES_USER: "directus"
POSTGRES_PASSWORD: "directus"
POSTGRES_DB: "directus"
cache:
image: redis:6
networks:
- dokploy-network
directus:
image: directus/directus:10.12.1
ports:
- 8055
volumes:
- ../files/uploads:/directus/uploads
- ../files/extensions:/directus/extensions
depends_on:
- cache
- database
environment:
SECRET: "replace-with-secure-random-value"
DB_CLIENT: "pg"
DB_HOST: "database"
DB_PORT: "5432"
DB_DATABASE: "directus"
DB_USER: "directus"
DB_PASSWORD: "directus"
CACHE_ENABLED: "true"
CACHE_AUTO_PURGE: "true"
CACHE_STORE: "redis"
REDIS: "redis://cache:6379"
ADMIN_EMAIL: "admin@example.com"
ADMIN_PASSWORD: "d1r3ctu5"
volumes:
directus:

View File

@@ -1,20 +0,0 @@
import {
type DomainSchema,
type Schema,
type Template,
generateRandomDomain,
} from "../utils";
export function generate(schema: Schema): Template {
const domains: DomainSchema[] = [
{
host: generateRandomDomain(schema),
port: 8055,
serviceName: "directus",
},
];
return {
domains,
};
}

View File

@@ -1,43 +0,0 @@
version: "3.8"
services:
postgres:
image: postgres:16
networks:
- dokploy-network
volumes:
- documenso-data:/var/lib/postgresql/data
environment:
- POSTGRES_USER=documenso
- POSTGRES_PASSWORD=password
- POSTGRES_DB=documenso
healthcheck:
test: ["CMD-SHELL", "pg_isready -U documenso"]
interval: 10s
timeout: 5s
retries: 5
start_period: 10s
documenso:
image: documenso/documenso:v1.5.6-rc.2
depends_on:
postgres:
condition: service_healthy
environment:
- PORT=${DOCUMENSO_PORT}
- NEXTAUTH_URL=http://${DOCUMENSO_HOST}
- NEXTAUTH_SECRET=${NEXTAUTH_SECRET}
- NEXT_PRIVATE_ENCRYPTION_KEY=${NEXT_PRIVATE_ENCRYPTION_KEY}
- NEXT_PRIVATE_ENCRYPTION_SECONDARY_KEY=${NEXT_PRIVATE_ENCRYPTION_SECONDARY_KEY}
- NEXT_PUBLIC_WEBAPP_URL=http://${DOCUMENSO_HOST}
- NEXT_PRIVATE_DATABASE_URL=postgres://documenso:password@postgres:5432/documenso
- NEXT_PRIVATE_DIRECT_DATABASE_URL=postgres://documenso:password@postgres:5432/documenso
- NEXT_PUBLIC_UPLOAD_TRANSPORT=database
- NEXT_PRIVATE_SMTP_TRANSPORT=smtp-auth
- NEXT_PRIVATE_SIGNING_LOCAL_FILE_PATH=/opt/documenso/cert.p12
ports:
- ${DOCUMENSO_PORT}
volumes:
- /opt/documenso/cert.p12:/opt/documenso/cert.p12
volumes:
documenso-data:

View File

@@ -1,36 +0,0 @@
import {
type DomainSchema,
type Schema,
type Template,
generateBase64,
generatePassword,
generateRandomDomain,
} from "../utils";
export function generate(schema: Schema): Template {
const mainDomain = generateRandomDomain(schema);
const nextAuthSecret = generateBase64(32);
const documensoEncryptionKey = generatePassword(32);
const documensoSecondaryEncryptionKey = generatePassword(64);
const domains: DomainSchema[] = [
{
host: mainDomain,
port: 3000,
serviceName: "documenso",
},
];
const envs = [
`DOCUMENSO_HOST=${mainDomain}`,
"DOCUMENSO_PORT=3000",
`NEXTAUTH_SECRET=${nextAuthSecret}`,
`NEXT_PRIVATE_ENCRYPTION_KEY=${documensoEncryptionKey}`,
`NEXT_PRIVATE_ENCRYPTION_SECONDARY_KEY=${documensoSecondaryEncryptionKey}`,
];
return {
envs,
domains,
};
}

View File

@@ -1,19 +0,0 @@
services:
doublezero:
restart: always
image: liltechnomancer/double-zero:0.2.1
volumes:
- db-data:/var/lib/doublezero/data
environment:
AWS_ACCESS_KEY_ID: ${AWS_ACCESS_KEY_ID}
AWS_SECRET_ACCESS_KEY: ${AWS_SECRET_ACCESS_KEY}
AWS_REGION: ${AWS_REGION}
SQS_URL: ${SQS_URL}
SYSTEM_EMAIL: ${SYSTEM_EMAIL}
SECRET_KEY_BASE: ${SECRET_KEY_BASE}
PHX_HOST: ${DOUBLEZERO_HOST}
DATABASE_PATH: ./00.db
volumes:
db-data:
driver: local

View File

@@ -1,36 +0,0 @@
import {
type DomainSchema,
type Schema,
type Template,
generateBase64,
generateRandomDomain,
} from "../utils";
export function generate(schema: Schema): Template {
const mainDomain = generateRandomDomain(schema);
const secretKeyBase = generateBase64(64);
const domains: DomainSchema[] = [
{
host: mainDomain,
port: 4000,
serviceName: "doublezero",
},
];
const envs = [
`DOUBLEZERO_HOST=${mainDomain}`,
"DOUBLEZERO_PORT=4000",
`SECRET_KEY_BASE=${secretKeyBase}`,
"AWS_ACCESS_KEY_ID=your-aws-access-key",
"AWS_SECRET_ACCESS_KEY=your-aws-secret-key",
"AWS_REGION=your-aws-region",
"SQS_URL=your-aws-sqs-url",
"SYSTEM_EMAIL=",
];
return {
envs,
domains,
};
}

View File

@@ -1,7 +0,0 @@
version: "3.8"
services:
excalidraw:
networks:
- dokploy-network
image: excalidraw/excalidraw:latest

View File

@@ -1,23 +0,0 @@
import {
type DomainSchema,
type Schema,
type Template,
generateHash,
generateRandomDomain,
} from "../utils";
export function generate(schema: Schema): Template {
const mainDomain = generateRandomDomain(schema);
const domains: DomainSchema[] = [
{
host: mainDomain,
port: 80,
serviceName: "excalidraw",
},
];
return {
domains,
};
}

View File

@@ -1,29 +0,0 @@
version: "3.8"
services:
ghost:
image: ghost:5-alpine
restart: always
environment:
database__client: mysql
database__connection__host: db
database__connection__user: root
database__connection__password: example
database__connection__database: ghost
url: http://${GHOST_HOST}
volumes:
- ghost:/var/lib/ghost/content
db:
image: mysql:8.0
restart: always
networks:
- dokploy-network
environment:
MYSQL_ROOT_PASSWORD: example
volumes:
- db:/var/lib/mysql
volumes:
ghost:
db:

View File

@@ -1,25 +0,0 @@
import {
type DomainSchema,
type Schema,
type Template,
generateHash,
generateRandomDomain,
} from "../utils";
export function generate(schema: Schema): Template {
const mainDomain = generateRandomDomain(schema);
const domains: DomainSchema[] = [
{
host: mainDomain,
port: 2368,
serviceName: "ghost",
},
];
const envs = [`GHOST_HOST=${mainDomain}`];
return {
envs,
domains,
};
}

View File

@@ -1,37 +0,0 @@
version: "3.8"
services:
gitea:
image: gitea/gitea:1.22.2
environment:
- USER_UID=${USER_UID}
- USER_GID=${USER_GID}
- GITEA__database__DB_TYPE=postgres
- GITEA__database__HOST=db:5432
- GITEA__database__NAME=gitea
- GITEA__database__USER=gitea
- GITEA__database__PASSWD=gitea
restart: always
networks:
- dokploy-network
volumes:
- gitea_server:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
depends_on:
- db
db:
image: postgres:16
restart: always
environment:
- POSTGRES_USER=gitea
- POSTGRES_PASSWORD=gitea
- POSTGRES_DB=gitea
networks:
- dokploy-network
volumes:
- gitea_db:/var/lib/postgresql/data
volumes:
gitea_db:
gitea_server:

View File

@@ -1,24 +0,0 @@
import {
type DomainSchema,
type Schema,
type Template,
generateRandomDomain,
} from "../utils";
export function generate(schema: Schema): Template {
const mainDomain = generateRandomDomain(schema);
const domains: DomainSchema[] = [
{
host: mainDomain,
port: 3000,
serviceName: "gitea",
},
];
const envs = ["USER_UID=1000", "USER_GID=1000"];
return {
envs,
domains,
};
}

View File

@@ -1,59 +0,0 @@
x-environment: &default-environment
DATABASE_URL: postgres://postgres:postgres@postgres:5432/postgres
SECRET_KEY: ${SECRET_KEY}
PORT: ${GLITCHTIP_PORT}
EMAIL_URL: consolemail://
GLITCHTIP_DOMAIN: http://${GLITCHTIP_HOST}
DEFAULT_FROM_EMAIL: email@glitchtip.com
CELERY_WORKER_AUTOSCALE: "1,3"
CELERY_WORKER_MAX_TASKS_PER_CHILD: "10000"
x-depends_on: &default-depends_on
- postgres
- redis
services:
postgres:
image: postgres:16
environment:
POSTGRES_HOST_AUTH_METHOD: "trust"
restart: unless-stopped
volumes:
- pg-data:/var/lib/postgresql/data
networks:
- dokploy-network
redis:
image: redis
restart: unless-stopped
networks:
- dokploy-network
web:
image: glitchtip/glitchtip:v4.0
depends_on: *default-depends_on
ports:
- ${GLITCHTIP_PORT}
environment: *default-environment
restart: unless-stopped
volumes:
- uploads:/code/uploads
worker:
image: glitchtip/glitchtip:v4.0
command: ./bin/run-celery-with-beat.sh
depends_on: *default-depends_on
environment: *default-environment
restart: unless-stopped
volumes:
- uploads:/code/uploads
networks:
- dokploy-network
migrate:
image: glitchtip/glitchtip:v4.0
depends_on: *default-depends_on
command: "./manage.py migrate"
environment: *default-environment
networks:
- dokploy-network
volumes:
pg-data:
uploads:

View File

@@ -1,30 +0,0 @@
import {
type DomainSchema,
type Schema,
type Template,
generateBase64,
generateRandomDomain,
} from "../utils";
export function generate(schema: Schema): Template {
const mainDomain = generateRandomDomain(schema);
const secretKey = generateBase64(32);
const domains: DomainSchema[] = [
{
host: mainDomain,
port: 8000,
serviceName: "web",
},
];
const envs = [
`GLITCHTIP_HOST=${mainDomain}`,
"GLITCHTIP_PORT=8000",
`SECRET_KEY=${secretKey}`,
];
return {
envs,
domains,
};
}

View File

@@ -1,9 +0,0 @@
version: "3.8"
services:
grafana:
image: grafana/grafana-enterprise:9.5.20
restart: unless-stopped
volumes:
- grafana-storage:/var/lib/grafana
volumes:
grafana-storage: {}

View File

@@ -1,19 +0,0 @@
import {
type DomainSchema,
type Schema,
type Template,
generateRandomDomain,
} from "../utils";
export function generate(schema: Schema): Template {
const domains: DomainSchema[] = [
{
host: generateRandomDomain(schema),
port: 3000,
serviceName: "grafana",
},
];
return {
domains,
};
}

View File

@@ -1,19 +0,0 @@
version: "3.8"
services:
jellyfin:
image: jellyfin/jellyfin:10
volumes:
- config:/config
- cache:/cache
- media:/media
restart: "unless-stopped"
# Optional - alternative address used for autodiscovery
environment:
- JELLYFIN_PublishedServerUrl=http://${JELLYFIN_HOST}
# Optional - may be necessary for docker healthcheck to pass if running in host network mode
extra_hosts:
- "host.docker.internal:host-gateway"
volumes:
config:
cache:
media:

View File

@@ -1,25 +0,0 @@
// EXAMPLE
import {
type DomainSchema,
type Schema,
type Template,
generateRandomDomain,
} from "../utils";
export function generate(schema: Schema): Template {
const domain = generateRandomDomain(schema);
const domains: DomainSchema[] = [
{
host: domain,
port: 8096,
serviceName: "jellyfin",
},
];
const envs = [`JELLYFIN_HOST=${domain}`];
return {
envs,
domains,
};
}

View File

@@ -1,49 +0,0 @@
services:
db:
image: postgres:13
ports:
- 5432
networks:
- dokploy-network
environment:
- POSTGRES_PASSWORD=listmonk
- POSTGRES_USER=listmonk
- POSTGRES_DB=listmonk
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "pg_isready -U listmonk"]
interval: 10s
timeout: 5s
retries: 6
volumes:
- listmonk-data:/var/lib/postgresql/data
setup:
image: listmonk/listmonk:v3.0.0
networks:
- dokploy-network
volumes:
- ../files/config.toml:/listmonk/config.toml
depends_on:
- db
command:
[
sh,
-c,
"sleep 3 && ./listmonk --install --idempotent --yes --config config.toml",
]
app:
restart: unless-stopped
image: listmonk/listmonk:v3.0.0
environment:
- TZ=Etc/UTC
depends_on:
- db
- setup
volumes:
- ../files/config.toml:/listmonk/config.toml
volumes:
listmonk-data:
driver: local

View File

@@ -1,57 +0,0 @@
import {
type DomainSchema,
type Schema,
type Template,
generatePassword,
generateRandomDomain,
} from "../utils";
export function generate(schema: Schema): Template {
const randomDomain = generateRandomDomain(schema);
const adminPassword = generatePassword(32);
const domains: DomainSchema[] = [
{
host: randomDomain,
port: 9000,
serviceName: "app",
},
];
const envs = [
`# login with admin:${adminPassword}`,
"# check config.toml in Advanced / Volumes for more options",
];
const mounts: Template["mounts"] = [
{
filePath: "config.toml",
content: `[app]
address = "0.0.0.0:9000"
admin_username = "admin"
admin_password = "${adminPassword}"
[db]
host = "db"
port = 5432
user = "listmonk"
password = "listmonk"
database = "listmonk"
ssl_mode = "disable"
max_open = 25
max_idle = 25
max_lifetime = "300s"
params = ""
`,
},
];
return {
envs,
mounts,
domains,
};
}

View File

@@ -1,14 +0,0 @@
version: "3.8"
services:
meilisearch:
image: getmeili/meilisearch:v1.8.3
volumes:
- meili_data:/meili_data
environment:
MEILI_MASTER_KEY: ${MEILI_MASTER_KEY}
MEILI_ENV: ${MEILI_ENV}
volumes:
meili_data:
driver: local

View File

@@ -1,26 +0,0 @@
import {
type DomainSchema,
type Schema,
type Template,
generateBase64,
generateRandomDomain,
} from "../utils";
export function generate(schema: Schema): Template {
const mainDomain = generateRandomDomain(schema);
const masterKey = generateBase64(32);
const domains: DomainSchema[] = [
{
host: mainDomain,
port: 7700,
serviceName: "meilisearch",
},
];
const envs = ["MEILI_ENV=development", `MEILI_MASTER_KEY=${masterKey}`];
return {
envs,
domains,
};
}

View File

@@ -1,26 +0,0 @@
version: "3.8"
services:
metabase:
image: metabase/metabase:v0.50.8
volumes:
- /dev/urandom:/dev/random:ro
environment:
MB_DB_TYPE: postgres
MB_DB_DBNAME: metabaseappdb
MB_DB_PORT: 5432
MB_DB_USER: metabase
MB_DB_PASS: mysecretpassword
MB_DB_HOST: postgres
healthcheck:
test: curl --fail -I http://localhost:3000/api/health || exit 1
interval: 15s
timeout: 5s
retries: 5
postgres:
image: postgres:14
environment:
POSTGRES_USER: metabase
POSTGRES_DB: metabaseappdb
POSTGRES_PASSWORD: mysecretpassword
networks:
- dokploy-network

View File

@@ -1,22 +0,0 @@
import {
type DomainSchema,
type Schema,
type Template,
generateRandomDomain,
} from "../utils";
export function generate(schema: Schema): Template {
const randomDomain = generateRandomDomain(schema);
const domains: DomainSchema[] = [
{
host: randomDomain,
port: 3000,
serviceName: "metabase",
},
];
return {
domains,
};
}

View File

@@ -1,13 +0,0 @@
version: "3.8"
services:
minio:
image: minio/minio
volumes:
- minio-data:/data
environment:
- MINIO_ROOT_USER=minioadmin
- MINIO_ROOT_PASSWORD=minioadmin123
command: server /data --console-address ":9001"
volumes:
minio-data:

View File

@@ -1,28 +0,0 @@
import {
type DomainSchema,
type Schema,
type Template,
generateRandomDomain,
} from "../utils";
export function generate(schema: Schema): Template {
const mainDomain = generateRandomDomain(schema);
const apiDomain = generateRandomDomain(schema);
const domains: DomainSchema[] = [
{
host: mainDomain,
port: 9001,
serviceName: "minio",
},
{
host: apiDomain,
port: 9000,
serviceName: "minio",
},
];
return {
domains,
};
}

View File

@@ -1,18 +0,0 @@
version: "3.8"
services:
n8n:
image: docker.n8n.io/n8nio/n8n:1.48.1
restart: always
environment:
- N8N_HOST=${N8N_HOST}
- N8N_PORT=${N8N_PORT}
- N8N_PROTOCOL=http
- NODE_ENV=production
- WEBHOOK_URL=https://${N8N_HOST}/
- GENERIC_TIMEZONE=${GENERIC_TIMEZONE}
- N8N_SECURE_COOKIE=false
volumes:
- n8n_data:/home/node/.n8n
volumes:
n8n_data:

View File

@@ -1,28 +0,0 @@
import {
type DomainSchema,
type Schema,
type Template,
generateRandomDomain,
} from "../utils";
export function generate(schema: Schema): Template {
const mainDomain = generateRandomDomain(schema);
const domains: DomainSchema[] = [
{
host: mainDomain,
port: 5678,
serviceName: "n8n",
},
];
const envs = [
`N8N_HOST=${mainDomain}`,
"N8N_PORT=5678",
"GENERIC_TIMEZONE=Europe/Berlin",
];
return {
envs,
domains,
};
}

View File

@@ -1,32 +0,0 @@
version: "3.8"
services:
nocodb:
image: nocodb/nocodb:0.251.1
restart: always
environment:
NC_DB: "pg://root_db?u=postgres&p=password&d=root_db"
PORT: ${NOCODB_PORT}
NC_REDIS_URL: ${NC_REDIS_URL}
volumes:
- nc_data:/usr/app/data
root_db:
image: postgres:14.7
restart: always
networks:
- dokploy-network
environment:
POSTGRES_DB: root_db
POSTGRES_PASSWORD: password
POSTGRES_USER: postgres
healthcheck:
interval: 10s
retries: 10
test: 'pg_isready -U "$$POSTGRES_USER" -d "$$POSTGRES_DB"'
timeout: 2s
volumes:
- "db_data:/var/lib/postgresql/data"
volumes:
db_data: {}
nc_data: {}

View File

@@ -1,28 +0,0 @@
// EXAMPLE
import {
type DomainSchema,
type Schema,
type Template,
generateBase64,
generateRandomDomain,
} from "../utils";
export function generate(schema: Schema): Template {
const randomDomain = generateRandomDomain(schema);
const secretBase = generateBase64(64);
const domains: DomainSchema[] = [
{
host: randomDomain,
port: 8000,
serviceName: "nocodb",
},
];
const envs = ["NOCODB_PORT=8000", `NC_AUTH_JWT_SECRET=${secretBase}`];
return {
envs,
domains,
};
}

View File

@@ -1,29 +0,0 @@
version: "3.8"
services:
web:
image: odoo:16.0
depends_on:
- db
environment:
- HOST=db
- USER=odoo
- PASSWORD=odoo
volumes:
- odoo-web-data:/var/lib/odoo
- ../files/config:/etc/odoo
- ../files/addons:/mnt/extra-addons
db:
image: postgres:13
networks:
- dokploy-network
environment:
- POSTGRES_DB=postgres
- POSTGRES_USER=odoo
- POSTGRES_PASSWORD=odoo
volumes:
- odoo-db-data:/var/lib/postgresql/data
volumes:
odoo-web-data:
odoo-db-data:

View File

@@ -1,22 +0,0 @@
import {
type DomainSchema,
type Schema,
type Template,
generateRandomDomain,
} from "../utils";
export function generate(schema: Schema): Template {
const randomDomain = generateRandomDomain(schema);
const domains: DomainSchema[] = [
{
host: randomDomain,
port: 8069,
serviceName: "web",
},
];
return {
domains,
};
}

View File

@@ -1,26 +0,0 @@
version: "3.8"
services:
ollama:
volumes:
- ollama:/root/.ollama
networks:
- dokploy-network
pull_policy: always
tty: true
restart: unless-stopped
image: ollama/ollama:${OLLAMA_DOCKER_TAG-latest}
open-webui:
image: ghcr.io/open-webui/open-webui:${WEBUI_DOCKER_TAG-main}
volumes:
- open-webui:/app/backend/data
depends_on:
- ollama
environment:
- "OLLAMA_BASE_URL=http://ollama:11434"
- "WEBUI_SECRET_KEY="
restart: unless-stopped
volumes:
ollama: {}
open-webui: {}

View File

@@ -1,24 +0,0 @@
import {
type DomainSchema,
type Schema,
type Template,
generateRandomDomain,
} from "../utils";
export function generate(schema: Schema): Template {
const randomDomain = generateRandomDomain(schema);
const domains: DomainSchema[] = [
{
host: randomDomain,
port: 8080,
serviceName: "open-webui",
},
];
const envs = ["OLLAMA_DOCKER_TAG=0.1.47", "WEBUI_DOCKER_TAG=0.3.7"];
return {
envs,
domains,
};
}

View File

@@ -1,28 +0,0 @@
version: "3.8"
services:
db:
image: mysql:5.7
environment:
MYSQL_ROOT_PASSWORD: ${MYSQL_ROOT_PASSWORD}
MYSQL_DATABASE: tu_base_de_datos
MYSQL_USER: ${MYSQL_USER}
MYSQL_PASSWORD: ${MYSQL_PASSWORD}
volumes:
- db_data:/var/lib/mysql
networks:
- dokploy-network
phpmyadmin:
image: phpmyadmin/phpmyadmin:5.2.1
environment:
PMA_HOST: db
PMA_USER: ${MYSQL_USER}
PMA_PASSWORD: ${MYSQL_PASSWORD}
PMA_ARBITRARY: 1
depends_on:
- db
volumes:
db_data:
driver: local

View File

@@ -1,32 +0,0 @@
import {
type DomainSchema,
type Schema,
type Template,
generatePassword,
generateRandomDomain,
} from "../utils";
export function generate(schema: Schema): Template {
const mainDomain = generateRandomDomain(schema);
const rootPassword = generatePassword(32);
const password = generatePassword(32);
const domains: DomainSchema[] = [
{
host: mainDomain,
port: 80,
serviceName: "phpmyadmin",
},
];
const envs = [
`MYSQL_ROOT_PASSWORD=${rootPassword}`,
"MYSQL_DATABASE=mysql",
"MYSQL_USER=phpmyadmin",
`MYSQL_PASSWORD=${password}`,
];
return {
envs,
domains,
};
}

View File

@@ -1,44 +0,0 @@
version: "3.8"
services:
plausible_db:
image: postgres:16-alpine
restart: always
networks:
- dokploy-network
volumes:
- db-data:/var/lib/postgresql/data
environment:
- POSTGRES_PASSWORD=postgres
plausible_events_db:
image: clickhouse/clickhouse-server:24.3.3.102-alpine
restart: always
networks:
- dokploy-network
volumes:
- event-data:/var/lib/clickhouse
- event-logs:/var/log/clickhouse-server
- ../files/clickhouse/clickhouse-config.xml:/etc/clickhouse-server/config.d/logging.xml:ro
- ../files/clickhouse/clickhouse-user-config.xml:/etc/clickhouse-server/users.d/logging.xml:ro
ulimits:
nofile:
soft: 262144
hard: 262144
plausible:
image: ghcr.io/plausible/community-edition:v2.1.0
restart: always
command: sh -c "sleep 10 && /entrypoint.sh db createdb && /entrypoint.sh db migrate && /entrypoint.sh run"
depends_on:
- plausible_db
- plausible_events_db
env_file:
- .env
volumes:
db-data:
driver: local
event-data:
driver: local
event-logs:
driver: local

View File

@@ -1,71 +0,0 @@
import {
type DomainSchema,
type Schema,
type Template,
generateBase64,
generateRandomDomain,
} from "../utils";
export function generate(schema: Schema): Template {
const mainDomain = generateRandomDomain(schema);
const secretBase = generateBase64(64);
const toptKeyBase = generateBase64(32);
const domains: DomainSchema[] = [
{
host: mainDomain,
port: 8000,
serviceName: "plausible",
},
];
const envs = [
`BASE_URL=http://${mainDomain}`,
`SECRET_KEY_BASE=${secretBase}`,
`TOTP_VAULT_KEY=${toptKeyBase}`,
];
const mounts: Template["mounts"] = [
{
filePath: "/clickhouse/clickhouse-config.xml",
content: `
<clickhouse>
<logger>
<level>warning</level>
<console>true</console>
</logger>
<!-- Stop all the unnecessary logging -->
<query_thread_log remove="remove"/>
<query_log remove="remove"/>
<text_log remove="remove"/>
<trace_log remove="remove"/>
<metric_log remove="remove"/>
<asynchronous_metric_log remove="remove"/>
<session_log remove="remove"/>
<part_log remove="remove"/>
</clickhouse>
`,
},
{
filePath: "/clickhouse/clickhouse-user-config.xml",
content: `
<clickhouse>
<profiles>
<default>
<log_queries>0</log_queries>
<log_query_threads>0</log_query_threads>
</default>
</profiles>
</clickhouse>
`,
},
];
return {
envs,
mounts,
domains,
};
}

View File

@@ -1,9 +0,0 @@
version: "3.8"
services:
pocketbase:
image: spectado/pocketbase:0.22.12
restart: unless-stopped
volumes:
- /etc/dokploy/templates/${HASH}/data:/pb_data
- /etc/dokploy/templates/${HASH}/public:/pb_public
- /etc/dokploy/templates/${HASH}/migrations:/pb_migrations

View File

@@ -1,22 +0,0 @@
import {
type DomainSchema,
type Schema,
type Template,
generateRandomDomain,
} from "../utils";
export function generate(schema: Schema): Template {
const mainDomain = generateRandomDomain(schema);
const domains: DomainSchema[] = [
{
host: mainDomain,
port: 80,
serviceName: "pocketbase",
},
];
return {
domains,
};
}

View File

@@ -1,35 +0,0 @@
version: "3.8"
services:
rocketchat:
image: registry.rocket.chat/rocketchat/rocket.chat:6.9.2
restart: always
environment:
MONGO_URL: "mongodb://mongodb:27017/rocketchat?replicaSet=rs0"
MONGO_OPLOG_URL: "mongodb://mongodb:27017/local?replicaSet=rs0"
ROOT_URL: ${ROOT_URL:-http://${ROCKETCHAT_HOST}:${ROCKETCHAT_PORT}}
PORT: ${ROCKETCHAT_PORT}
DEPLOY_METHOD: docker
DEPLOY_PLATFORM:
REG_TOKEN:
depends_on:
- mongodb
mongodb:
image: docker.io/bitnami/mongodb:5.0
restart: always
volumes:
- mongodb_data:/bitnami/mongodb
environment:
MONGODB_REPLICA_SET_MODE: primary
MONGODB_REPLICA_SET_NAME: rs0
MONGODB_PORT_NUMBER: 27017
MONGODB_INITIAL_PRIMARY_HOST: mongodb
MONGODB_INITIAL_PRIMARY_PORT_NUMBER: 27017
MONGODB_ADVERTISED_HOSTNAME: mongodb
MONGODB_ENABLE_JOURNAL: true
ALLOW_EMPTY_PASSWORD: yes
networks:
- dokploy-network
volumes:
mongodb_data: { driver: local }

View File

@@ -1,25 +0,0 @@
import {
type DomainSchema,
type Schema,
type Template,
generateRandomDomain,
} from "../utils";
export function generate(schema: Schema): Template {
const mainDomain = generateRandomDomain(schema);
const domains: DomainSchema[] = [
{
host: mainDomain,
port: 3000,
serviceName: "rocketchat",
},
];
const envs = [`ROCKETCHAT_HOST=${mainDomain}`, "ROCKETCHAT_PORT=3000"];
return {
envs,
domains,
};
}

View File

@@ -1,12 +0,0 @@
version: "3"
services:
soketi:
image: quay.io/soketi/soketi:1.4-16-debian
container_name: soketi
environment:
SOKETI_DEBUG: "1"
SOKETI_HOST: "0.0.0.0"
SOKETI_PORT: "6001"
SOKETI_METRICS_SERVER_PORT: "9601"
restart: unless-stopped

View File

@@ -1,28 +0,0 @@
import {
type DomainSchema,
type Schema,
type Template,
generateRandomDomain,
} from "../utils";
export function generate(schema: Schema): Template {
const mainDomain = generateRandomDomain(schema);
const metricsDomain = generateRandomDomain(schema);
const domains: DomainSchema[] = [
{
host: mainDomain,
port: 6001,
serviceName: "soketi",
},
{
host: metricsDomain,
port: 9601,
serviceName: "soketi",
},
];
return {
domains,
};
}

View File

@@ -1,460 +0,0 @@
# Usage
# Start: docker compose up
# With helpers: docker compose -f docker-compose.yml -f ../files/dev/docker-compose.dev.yml up
# Stop: docker compose down
# Destroy: docker compose -f docker-compose.yml -f ../files/dev/docker-compose.dev.yml down -v --remove-orphans
name: supabase
version: "3.8"
services:
studio:
container_name: supabase-studio
image: supabase/studio:20240729-ce42139
networks:
- dokploy-network
restart: unless-stopped
healthcheck:
test:
[
"CMD",
"node",
"-e",
"require('http').get('http://localhost:3000/api/profile', (r) => {if (r.statusCode !== 200) throw new Error(r.statusCode)})",
]
timeout: 5s
interval: 5s
retries: 3
depends_on:
analytics:
condition: service_healthy
environment:
STUDIO_PG_META_URL: http://meta:8080
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
DEFAULT_ORGANIZATION_NAME: ${STUDIO_DEFAULT_ORGANIZATION}
DEFAULT_PROJECT_NAME: ${STUDIO_DEFAULT_PROJECT}
SUPABASE_URL: http://kong:8000
SUPABASE_PUBLIC_URL: http://${SUPABASE_HOST}
SUPABASE_ANON_KEY: ${ANON_KEY}
SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY}
AUTH_JWT_SECRET: ${JWT_SECRET}
LOGFLARE_API_KEY: ${LOGFLARE_API_KEY}
LOGFLARE_URL: http://analytics:4000
NEXT_PUBLIC_ENABLE_LOGS: true
# Comment to use Big Query backend for analytics
NEXT_ANALYTICS_BACKEND_PROVIDER: postgres
# Uncomment to use Big Query backend for analytics
# NEXT_ANALYTICS_BACKEND_PROVIDER: bigquery
kong:
container_name: supabase-kong
image: kong:2.8.1
restart: unless-stopped
networks:
- dokploy-network
# https://unix.stackexchange.com/a/294837
entrypoint: bash -c 'eval "echo \"$$(cat ~/temp.yml)\"" > ~/kong.yml && /docker-entrypoint.sh kong docker-start'
#ports:
# - ${KONG_HTTP_PORT}:8000/tcp
# - ${KONG_HTTPS_PORT}:8443/tcp
expose:
- 8000
- 8443
depends_on:
analytics:
condition: service_healthy
environment:
KONG_DATABASE: "off"
KONG_DECLARATIVE_CONFIG: /home/kong/kong.yml
# https://github.com/supabase/cli/issues/14
KONG_DNS_ORDER: LAST,A,CNAME
KONG_PLUGINS: request-transformer,cors,key-auth,acl,basic-auth
KONG_NGINX_PROXY_PROXY_BUFFER_SIZE: 160k
KONG_NGINX_PROXY_PROXY_BUFFERS: 64 160k
SUPABASE_ANON_KEY: ${ANON_KEY}
SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY}
DASHBOARD_USERNAME: ${DASHBOARD_USERNAME}
DASHBOARD_PASSWORD: ${DASHBOARD_PASSWORD}
volumes:
# https://github.com/supabase/supabase/issues/12661
- ../files/volumes/api/kong.yml:/home/kong/temp.yml:ro
auth:
container_name: supabase-auth
image: supabase/gotrue:v2.158.1
networks:
- dokploy-network
depends_on:
db:
# Disable this if you are using an external Postgres database
condition: service_healthy
analytics:
condition: service_healthy
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://localhost:9999/health",
]
timeout: 5s
interval: 5s
retries: 3
restart: unless-stopped
environment:
GOTRUE_API_HOST: 0.0.0.0
GOTRUE_API_PORT: 9999
API_EXTERNAL_URL: http://${SUPABASE_HOST}
GOTRUE_DB_DRIVER: postgres
GOTRUE_DB_DATABASE_URL: postgres://supabase_auth_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOSTNAME}:${POSTGRES_PORT}/${POSTGRES_DB}
GOTRUE_SITE_URL: http://${SUPABASE_HOST}
GOTRUE_URI_ALLOW_LIST: ${ADDITIONAL_REDIRECT_URLS}
GOTRUE_DISABLE_SIGNUP: ${DISABLE_SIGNUP}
GOTRUE_JWT_ADMIN_ROLES: service_role
GOTRUE_JWT_AUD: authenticated
GOTRUE_JWT_DEFAULT_GROUP_NAME: authenticated
GOTRUE_JWT_EXP: ${JWT_EXPIRY}
GOTRUE_JWT_SECRET: ${JWT_SECRET}
GOTRUE_EXTERNAL_EMAIL_ENABLED: ${ENABLE_EMAIL_SIGNUP}
GOTRUE_EXTERNAL_ANONYMOUS_USERS_ENABLED: ${ENABLE_ANONYMOUS_USERS}
GOTRUE_MAILER_AUTOCONFIRM: ${ENABLE_EMAIL_AUTOCONFIRM}
# GOTRUE_MAILER_SECURE_EMAIL_CHANGE_ENABLED: true
# GOTRUE_SMTP_MAX_FREQUENCY: 1s
GOTRUE_SMTP_ADMIN_EMAIL: ${SMTP_ADMIN_EMAIL}
GOTRUE_SMTP_HOST: ${SMTP_HOSTNAME}
GOTRUE_SMTP_PORT: ${SMTP_PORT}
GOTRUE_SMTP_USER: ${SMTP_USER}
GOTRUE_SMTP_PASS: ${SMTP_PASS}
GOTRUE_SMTP_SENDER_NAME: ${SMTP_SENDER_NAME}
GOTRUE_MAILER_URLPATHS_INVITE: ${MAILER_URLPATHS_INVITE}
GOTRUE_MAILER_URLPATHS_CONFIRMATION: ${MAILER_URLPATHS_CONFIRMATION}
GOTRUE_MAILER_URLPATHS_RECOVERY: ${MAILER_URLPATHS_RECOVERY}
GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE: ${MAILER_URLPATHS_EMAIL_CHANGE}
GOTRUE_EXTERNAL_PHONE_ENABLED: ${ENABLE_PHONE_SIGNUP}
GOTRUE_SMS_AUTOCONFIRM: ${ENABLE_PHONE_AUTOCONFIRM}
# Uncomment to enable custom access token hook. You'll need to create a public.custom_access_token_hook function and grant necessary permissions.
# See: https://supabase.com/docs/guides/auth/auth-hooks#hook-custom-access-token for details
# GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_ENABLED="true"
# GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_URI="pg-functions://postgres/public/custom_access_token_hook"
# GOTRUE_HOOK_MFA_VERIFICATION_ATTEMPT_ENABLED="true"
# GOTRUE_HOOK_MFA_VERIFICATION_ATTEMPT_URI="pg-functions://postgres/public/mfa_verification_attempt"
# GOTRUE_HOOK_PASSWORD_VERIFICATION_ATTEMPT_ENABLED="true"
# GOTRUE_HOOK_PASSWORD_VERIFICATION_ATTEMPT_URI="pg-functions://postgres/public/password_verification_attempt"
rest:
container_name: supabase-rest
image: postgrest/postgrest:v12.2.0
networks:
- dokploy-network
depends_on:
db:
# Disable this if you are using an external Postgres database
condition: service_healthy
analytics:
condition: service_healthy
restart: unless-stopped
environment:
PGRST_DB_URI: postgres://authenticator:${POSTGRES_PASSWORD}@${POSTGRES_HOSTNAME}:${POSTGRES_PORT}/${POSTGRES_DB}
PGRST_DB_SCHEMAS: ${PGRST_DB_SCHEMAS}
PGRST_DB_ANON_ROLE: anon
PGRST_JWT_SECRET: ${JWT_SECRET}
PGRST_DB_USE_LEGACY_GUCS: "false"
PGRST_APP_SETTINGS_JWT_SECRET: ${JWT_SECRET}
PGRST_APP_SETTINGS_JWT_EXP: ${JWT_EXPIRY}
command: "postgrest"
realtime:
# This container name looks inconsistent but is correct because realtime constructs tenant id by parsing the subdomain
container_name: realtime-dev.supabase-realtime
image: supabase/realtime:v2.30.23
networks:
- dokploy-network
depends_on:
db:
# Disable this if you are using an external Postgres database
condition: service_healthy
analytics:
condition: service_healthy
healthcheck:
test:
[
"CMD",
"curl",
"-sSfL",
"--head",
"-o",
"/dev/null",
"-H",
"Authorization: Bearer ${ANON_KEY}",
"http://localhost:4000/api/tenants/realtime-dev/health",
]
timeout: 5s
interval: 5s
retries: 3
restart: unless-stopped
environment:
PORT: 4000
DB_HOST: ${POSTGRES_HOSTNAME}
DB_PORT: ${POSTGRES_PORT}
DB_USER: supabase_admin
DB_PASSWORD: ${POSTGRES_PASSWORD}
DB_NAME: ${POSTGRES_DB}
DB_AFTER_CONNECT_QUERY: "SET search_path TO _realtime"
DB_ENC_KEY: supabaserealtime
API_JWT_SECRET: ${JWT_SECRET}
SECRET_KEY_BASE: UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq
ERL_AFLAGS: -proto_dist inet_tcp
DNS_NODES: "''"
RLIMIT_NOFILE: "10000"
APP_NAME: realtime
SEED_SELF_HOST: true
# To use S3 backed storage: docker compose -f docker-compose.yml -f docker-compose.s3.yml up
storage:
container_name: supabase-storage
image: supabase/storage-api:v1.0.6
networks:
- dokploy-network
depends_on:
db:
# Disable this if you are using an external Postgres database
condition: service_healthy
rest:
condition: service_started
imgproxy:
condition: service_started
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://localhost:5000/status",
]
timeout: 5s
interval: 5s
retries: 3
restart: unless-stopped
environment:
ANON_KEY: ${ANON_KEY}
SERVICE_KEY: ${SERVICE_ROLE_KEY}
POSTGREST_URL: http://rest:3000
PGRST_JWT_SECRET: ${JWT_SECRET}
DATABASE_URL: postgres://supabase_storage_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOSTNAME}:${POSTGRES_PORT}/${POSTGRES_DB}
FILE_SIZE_LIMIT: 52428800
STORAGE_BACKEND: file
FILE_STORAGE_BACKEND_PATH: /var/lib/storage
TENANT_ID: stub
# TODO: https://github.com/supabase/storage-api/issues/55
REGION: stub
GLOBAL_S3_BUCKET: stub
ENABLE_IMAGE_TRANSFORMATION: "true"
IMGPROXY_URL: http://imgproxy:5001
volumes:
- ../files/volumes/storage:/var/lib/storage:z
imgproxy:
container_name: supabase-imgproxy
image: darthsim/imgproxy:v3.8.0
networks:
- dokploy-network
healthcheck:
test: ["CMD", "imgproxy", "health"]
timeout: 5s
interval: 5s
retries: 3
environment:
IMGPROXY_BIND: ":5001"
IMGPROXY_LOCAL_FILESYSTEM_ROOT: /
IMGPROXY_USE_ETAG: "true"
IMGPROXY_ENABLE_WEBP_DETECTION: ${IMGPROXY_ENABLE_WEBP_DETECTION}
volumes:
- ../files/volumes/storage:/var/lib/storage:z
meta:
container_name: supabase-meta
image: supabase/postgres-meta:v0.83.2
networks:
- dokploy-network
depends_on:
db:
# Disable this if you are using an external Postgres database
condition: service_healthy
analytics:
condition: service_healthy
restart: unless-stopped
environment:
PG_META_PORT: 8080
PG_META_DB_HOST: ${POSTGRES_HOSTNAME}
PG_META_DB_PORT: ${POSTGRES_PORT}
PG_META_DB_NAME: ${POSTGRES_DB}
PG_META_DB_USER: supabase_admin
PG_META_DB_PASSWORD: ${POSTGRES_PASSWORD}
functions:
container_name: supabase-edge-functions
image: supabase/edge-runtime:v1.56.0
restart: unless-stopped
networks:
- dokploy-network
depends_on:
analytics:
condition: service_healthy
environment:
JWT_SECRET: ${JWT_SECRET}
SUPABASE_URL: http://kong:8000
SUPABASE_ANON_KEY: ${ANON_KEY}
SUPABASE_SERVICE_ROLE_KEY: ${SERVICE_ROLE_KEY}
SUPABASE_DB_URL: postgresql://postgres:${POSTGRES_PASSWORD}@${POSTGRES_HOSTNAME}:${POSTGRES_PORT}/${POSTGRES_DB}
# TODO: Allow configuring VERIFY_JWT per function. This PR might help: https://github.com/supabase/cli/pull/786
VERIFY_JWT: "${FUNCTIONS_VERIFY_JWT}"
volumes:
- ../files/volumes/functions:/home/deno/functions:Z
command:
- start
- --main-service
- /home/deno/functions/main
analytics:
container_name: supabase-analytics
image: supabase/logflare:1.4.0
networks:
- dokploy-network
healthcheck:
test: ["CMD", "curl", "http://localhost:4000/health"]
timeout: 5s
interval: 5s
retries: 10
restart: unless-stopped
depends_on:
db:
# Disable this if you are using an external Postgres database
condition: service_healthy
# Uncomment to use Big Query backend for analytics
# volumes:
# - type: bind
# source: ${PWD}/gcloud.json
# target: /opt/app/rel/logflare/bin/gcloud.json
# read_only: true
environment:
LOGFLARE_NODE_HOST: 127.0.0.1
DB_USERNAME: supabase_admin
DB_DATABASE: ${POSTGRES_DB}
DB_HOSTNAME: ${POSTGRES_HOSTNAME}
DB_PORT: ${POSTGRES_PORT}
DB_PASSWORD: ${POSTGRES_PASSWORD}
DB_SCHEMA: _analytics
LOGFLARE_API_KEY: ${LOGFLARE_API_KEY}
LOGFLARE_SINGLE_TENANT: true
LOGFLARE_SUPABASE_MODE: true
LOGFLARE_MIN_CLUSTER_SIZE: 1
# Comment variables to use Big Query backend for analytics
POSTGRES_BACKEND_URL: postgresql://supabase_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOSTNAME}:${POSTGRES_PORT}/${POSTGRES_DB}
POSTGRES_BACKEND_SCHEMA: _analytics
LOGFLARE_FEATURE_FLAG_OVERRIDE: multibackend=true
# Uncomment to use Big Query backend for analytics
# GOOGLE_PROJECT_ID: ${GOOGLE_PROJECT_ID}
# GOOGLE_PROJECT_NUMBER: ${GOOGLE_PROJECT_NUMBER}
#ports:
# - 4000:4000
expose:
- 4000
# Comment out everything below this point if you are using an external Postgres database
db:
container_name: supabase-db
image: supabase/postgres:15.1.1.78
networks:
- dokploy-network
healthcheck:
test: pg_isready -U postgres -h localhost
interval: 5s
timeout: 5s
retries: 10
depends_on:
vector:
condition: service_healthy
command:
- postgres
- -c
- config_file=/etc/postgresql/postgresql.conf
- -c
- log_min_messages=fatal # prevents Realtime polling queries from appearing in logs
restart: unless-stopped
#ports:
# # Pass down internal port because it's set dynamically by other services
# - ${POSTGRES_PORT}:${POSTGRES_PORT}
expose:
- ${POSTGRES_PORT}
environment:
POSTGRES_HOST: /var/run/postgresql
PGPORT: ${POSTGRES_PORT}
POSTGRES_PORT: ${POSTGRES_PORT}
PGPASSWORD: ${POSTGRES_PASSWORD}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
PGDATABASE: ${POSTGRES_DB}
POSTGRES_DB: ${POSTGRES_DB}
JWT_SECRET: ${JWT_SECRET}
JWT_EXP: ${JWT_EXPIRY}
volumes:
- ../files/volumes/db/realtime.sql:/docker-entrypoint-initdb.d/migrations/99-realtime.sql:Z
# Must be superuser to create event trigger
- ../files/volumes/db/webhooks.sql:/docker-entrypoint-initdb.d/init-scripts/98-webhooks.sql:Z
# Must be superuser to alter reserved role
- ../files/volumes/db/roles.sql:/docker-entrypoint-initdb.d/init-scripts/99-roles.sql:Z
# Initialize the database settings with JWT_SECRET and JWT_EXP
- ../files/volumes/db/jwt.sql:/docker-entrypoint-initdb.d/init-scripts/99-jwt.sql:Z
# PGDATA directory is persisted between restarts
- ../files/volumes/db/data:/var/lib/postgresql/data:Z
# Changes required for Analytics support
- ../files/volumes/db/logs.sql:/docker-entrypoint-initdb.d/migrations/99-logs.sql:Z
# Use named volume to persist pgsodium decryption key between restarts
- db-config:/etc/postgresql-custom
vector:
container_name: supabase-vector
image: timberio/vector:0.28.1-alpine
networks:
- dokploy-network
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://vector:9001/health",
]
timeout: 5s
interval: 5s
retries: 3
volumes:
- ../files/volumes/logs/vector.yml:/etc/vector/vector.yml:ro
- ${DOCKER_SOCKET_LOCATION}:/var/run/docker.sock:ro
environment:
LOGFLARE_API_KEY: ${LOGFLARE_API_KEY}
command: ["--config", "etc/vector/vector.yml"]
volumes:
db-config:
networks:
dokploy-network:
external: true

View File

@@ -1,995 +0,0 @@
import { createHmac } from "node:crypto";
import {
type DomainSchema,
type Schema,
type Template,
generateBase64,
generatePassword,
generateRandomDomain,
} from "../utils";
interface JWTPayload {
role: "anon" | "service_role";
iss: string;
iat: number;
exp: number;
}
function base64UrlEncode(str: string): string {
return Buffer.from(str)
.toString("base64")
.replace(/\+/g, "-")
.replace(/\//g, "_")
.replace(/=/g, "");
}
function generateJWT(payload: JWTPayload, secret: string): string {
const header = { alg: "HS256", typ: "JWT" };
const encodedHeader = base64UrlEncode(JSON.stringify(header));
const encodedPayload = base64UrlEncode(JSON.stringify(payload));
const signature = createHmac("sha256", secret)
.update(`${encodedHeader}.${encodedPayload}`)
.digest("base64url");
return `${encodedHeader}.${encodedPayload}.${signature}`;
}
export function generateSupabaseAnonJWT(secret: string): string {
const now = Math.floor(Date.now() / 1000);
const payload: JWTPayload = {
role: "anon",
iss: "supabase",
iat: now,
exp: now + 100 * 365 * 24 * 60 * 60, // 100 years
};
return generateJWT(payload, secret);
}
export function generateSupabaseServiceJWT(secret: string): string {
const now = Math.floor(Date.now() / 1000);
const payload: JWTPayload = {
role: "service_role",
iss: "supabase",
iat: now,
exp: now + 100 * 365 * 24 * 60 * 60, // 100 years
};
return generateJWT(payload, secret);
}
export function generate(schema: Schema): Template {
const mainDomain = generateRandomDomain(schema);
const postgresPassword = generatePassword(32);
const jwtSecret = generateBase64(32);
const dashboardPassword = generatePassword(32);
const logflareApiKey = generatePassword(32);
const annonKey = generateSupabaseAnonJWT(jwtSecret);
const serviceRoleKey = generateSupabaseServiceJWT(jwtSecret);
const domains: DomainSchema[] = [
{
serviceName: "kong",
host: mainDomain,
port: 8000,
},
];
const envs = [
`SUPABASE_HOST=${mainDomain}`,
`POSTGRES_PASSWORD=${postgresPassword}`,
`JWT_SECRET=${jwtSecret}`,
`ANON_KEY=${annonKey}`,
`SERVICE_ROLE_KEY=${serviceRoleKey}`,
"DASHBOARD_USERNAME=supabase",
`DASHBOARD_PASSWORD=${dashboardPassword}`,
"POSTGRES_HOSTNAME=db",
"POSTGRES_DB=postgres",
"POSTGRES_PORT=5432",
"KONG_HTTP_PORT=8000",
"KONG_HTTPS_PORT=8443",
"PGRST_DB_SCHEMAS=public,storage,graphql_public",
"ADDITIONAL_REDIRECT_URLS=",
"JWT_EXPIRY=3600",
"DISABLE_SIGNUP=false",
`MAILER_URLPATHS_CONFIRMATION=\"/auth/v1/verify\"`,
`MAILER_URLPATHS_INVITE=\"/auth/v1/verify\"`,
`MAILER_URLPATHS_RECOVERY=\"/auth/v1/verify\"`,
`MAILER_URLPATHS_EMAIL_CHANGE=\"/auth/v1/verify\"`,
"ENABLE_EMAIL_SIGNUP=true",
"ENABLE_EMAIL_AUTOCONFIRM=false",
"SMTP_ADMIN_EMAIL=admin@example.com",
"SMTP_HOSTNAME=supabase-mail",
"SMTP_PORT=2500",
"SMTP_USER=fake_mail_user",
"SMTP_PASS=fake_mail_password",
"SMTP_SENDER_NAME=fake_sender",
"ENABLE_ANONYMOUS_USERS=false",
"ENABLE_PHONE_SIGNUP=true",
"ENABLE_PHONE_AUTOCONFIRM=true",
"STUDIO_DEFAULT_ORGANIZATION=Default Organization",
"STUDIO_DEFAULT_PROJECT=Default Project",
"STUDIO_PORT=3000",
"IMGPROXY_ENABLE_WEBP_DETECTION=true",
"FUNCTIONS_VERIFY_JWT=false",
`LOGFLARE_LOGGER_BACKEND_API_KEY=${logflareApiKey}`,
`LOGFLARE_API_KEY=${logflareApiKey}`,
"DOCKER_SOCKET_LOCATION=/var/run/docker.sock",
"GOOGLE_PROJECT_ID=GOOGLE_PROJECT_ID",
"GOOGLE_PROJECT_NUMBER=GOOGLE_PROJECT_NUMBER",
];
const mounts: Template["mounts"] = [
{
filePath: "/volumes/api/kong.yml",
content: `
_format_version: '2.1'
_transform: true
###
### Consumers / Users
###
consumers:
- username: DASHBOARD
- username: anon
keyauth_credentials:
- key: $SUPABASE_ANON_KEY
- username: service_role
keyauth_credentials:
- key: $SUPABASE_SERVICE_KEY
###
### Access Control List
###
acls:
- consumer: anon
group: anon
- consumer: service_role
group: admin
###
### Dashboard credentials
###
basicauth_credentials:
- consumer: DASHBOARD
username: $DASHBOARD_USERNAME
password: $DASHBOARD_PASSWORD
###
### API Routes
###
services:
## Open Auth routes
- name: auth-v1-open
url: http://auth:9999/verify
routes:
- name: auth-v1-open
strip_path: true
paths:
- /auth/v1/verify
plugins:
- name: cors
- name: auth-v1-open-callback
url: http://auth:9999/callback
routes:
- name: auth-v1-open-callback
strip_path: true
paths:
- /auth/v1/callback
plugins:
- name: cors
- name: auth-v1-open-authorize
url: http://auth:9999/authorize
routes:
- name: auth-v1-open-authorize
strip_path: true
paths:
- /auth/v1/authorize
plugins:
- name: cors
## Secure Auth routes
- name: auth-v1
_comment: 'GoTrue: /auth/v1/* -> http://auth:9999/*'
url: http://auth:9999/
routes:
- name: auth-v1-all
strip_path: true
paths:
- /auth/v1/
plugins:
- name: cors
- name: key-auth
config:
hide_credentials: false
- name: acl
config:
hide_groups_header: true
allow:
- admin
- anon
## Secure REST routes
- name: rest-v1
_comment: 'PostgREST: /rest/v1/* -> http://rest:3000/*'
url: http://rest:3000/
routes:
- name: rest-v1-all
strip_path: true
paths:
- /rest/v1/
plugins:
- name: cors
- name: key-auth
config:
hide_credentials: true
- name: acl
config:
hide_groups_header: true
allow:
- admin
- anon
## Secure GraphQL routes
- name: graphql-v1
_comment: 'PostgREST: /graphql/v1/* -> http://rest:3000/rpc/graphql'
url: http://rest:3000/rpc/graphql
routes:
- name: graphql-v1-all
strip_path: true
paths:
- /graphql/v1
plugins:
- name: cors
- name: key-auth
config:
hide_credentials: true
- name: request-transformer
config:
add:
headers:
- Content-Profile:graphql_public
- name: acl
config:
hide_groups_header: true
allow:
- admin
- anon
## Secure Realtime routes
- name: realtime-v1-ws
_comment: 'Realtime: /realtime/v1/* -> ws://realtime:4000/socket/*'
url: http://realtime-dev.supabase-realtime:4000/socket
protocol: ws
routes:
- name: realtime-v1-ws
strip_path: true
paths:
- /realtime/v1/
plugins:
- name: cors
- name: key-auth
config:
hide_credentials: false
- name: acl
config:
hide_groups_header: true
allow:
- admin
- anon
- name: realtime-v1-rest
_comment: 'Realtime: /realtime/v1/* -> ws://realtime:4000/socket/*'
url: http://realtime-dev.supabase-realtime:4000/api
protocol: http
routes:
- name: realtime-v1-rest
strip_path: true
paths:
- /realtime/v1/api
plugins:
- name: cors
- name: key-auth
config:
hide_credentials: false
- name: acl
config:
hide_groups_header: true
allow:
- admin
- anon
## Storage routes: the storage server manages its own auth
- name: storage-v1
_comment: 'Storage: /storage/v1/* -> http://storage:5000/*'
url: http://storage:5000/
routes:
- name: storage-v1-all
strip_path: true
paths:
- /storage/v1/
plugins:
- name: cors
## Edge Functions routes
- name: functions-v1
_comment: 'Edge Functions: /functions/v1/* -> http://functions:9000/*'
url: http://functions:9000/
routes:
- name: functions-v1-all
strip_path: true
paths:
- /functions/v1/
plugins:
- name: cors
## Analytics routes
- name: analytics-v1
_comment: 'Analytics: /analytics/v1/* -> http://logflare:4000/*'
url: http://analytics:4000/
routes:
- name: analytics-v1-all
strip_path: true
paths:
- /analytics/v1/
## Secure Database routes
- name: meta
_comment: 'pg-meta: /pg/* -> http://pg-meta:8080/*'
url: http://meta:8080/
routes:
- name: meta-all
strip_path: true
paths:
- /pg/
plugins:
- name: key-auth
config:
hide_credentials: false
- name: acl
config:
hide_groups_header: true
allow:
- admin
## Protected Dashboard - catch all remaining routes
- name: dashboard
_comment: 'Studio: /* -> http://studio:3000/*'
url: http://studio:3000/
routes:
- name: dashboard-all
strip_path: true
paths:
- /
plugins:
- name: cors
- name: basic-auth
config:
hide_credentials: true
`,
},
{
filePath: "/volumes/db/init/data.sql",
content: `
`,
},
{
filePath: "/volumes/db/jwt.sql",
content: `
\\set jwt_secret \`echo "$JWT_SECRET"\`
\\set jwt_exp \`echo "$JWT_EXP"\`
ALTER DATABASE postgres SET "app.settings.jwt_secret" TO :'jwt_secret';
ALTER DATABASE postgres SET "app.settings.jwt_exp" TO :'jwt_exp';
`,
},
{
filePath: "/volumes/db/logs.sql",
content: `
\\set pguser \`echo "$POSTGRES_USER"\`
create schema if not exists _analytics;
alter schema _analytics owner to :pguser;
`,
},
{
filePath: "/volumes/db/realtime.sql",
content: `
\\set pguser \`echo "$POSTGRES_USER"\`
create schema if not exists _realtime;
alter schema _realtime owner to :pguser;
`,
},
{
filePath: "/volumes/db/roles.sql",
content: `
-- NOTE: change to your own passwords for production environments
\\set pgpass \`echo "$POSTGRES_PASSWORD"\`
ALTER USER authenticator WITH PASSWORD :'pgpass';
ALTER USER pgbouncer WITH PASSWORD :'pgpass';
ALTER USER supabase_auth_admin WITH PASSWORD :'pgpass';
ALTER USER supabase_functions_admin WITH PASSWORD :'pgpass';
ALTER USER supabase_storage_admin WITH PASSWORD :'pgpass';
`,
},
{
filePath: "/volumes/db/webhooks.sql",
content: `
BEGIN;
-- Create pg_net extension
CREATE EXTENSION IF NOT EXISTS pg_net SCHEMA extensions;
-- Create supabase_functions schema
CREATE SCHEMA supabase_functions AUTHORIZATION supabase_admin;
GRANT USAGE ON SCHEMA supabase_functions TO postgres, anon, authenticated, service_role;
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON TABLES TO postgres, anon, authenticated, service_role;
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON FUNCTIONS TO postgres, anon, authenticated, service_role;
ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON SEQUENCES TO postgres, anon, authenticated, service_role;
-- supabase_functions.migrations definition
CREATE TABLE supabase_functions.migrations (
version text PRIMARY KEY,
inserted_at timestamptz NOT NULL DEFAULT NOW()
);
-- Initial supabase_functions migration
INSERT INTO supabase_functions.migrations (version) VALUES ('initial');
-- supabase_functions.hooks definition
CREATE TABLE supabase_functions.hooks (
id bigserial PRIMARY KEY,
hook_table_id integer NOT NULL,
hook_name text NOT NULL,
created_at timestamptz NOT NULL DEFAULT NOW(),
request_id bigint
);
CREATE INDEX supabase_functions_hooks_request_id_idx ON supabase_functions.hooks USING btree (request_id);
CREATE INDEX supabase_functions_hooks_h_table_id_h_name_idx ON supabase_functions.hooks USING btree (hook_table_id, hook_name);
COMMENT ON TABLE supabase_functions.hooks IS 'Supabase Functions Hooks: Audit trail for triggered hooks.';
CREATE FUNCTION supabase_functions.http_request()
RETURNS trigger
LANGUAGE plpgsql
AS $function$
DECLARE
request_id bigint;
payload jsonb;
url text := TG_ARGV[0]::text;
method text := TG_ARGV[1]::text;
headers jsonb DEFAULT '{}'::jsonb;
params jsonb DEFAULT '{}'::jsonb;
timeout_ms integer DEFAULT 1000;
BEGIN
IF url IS NULL OR url = 'null' THEN
RAISE EXCEPTION 'url argument is missing';
END IF;
IF method IS NULL OR method = 'null' THEN
RAISE EXCEPTION 'method argument is missing';
END IF;
IF TG_ARGV[2] IS NULL OR TG_ARGV[2] = 'null' THEN
headers = '{"Content-Type": "application/json"}'::jsonb;
ELSE
headers = TG_ARGV[2]::jsonb;
END IF;
IF TG_ARGV[3] IS NULL OR TG_ARGV[3] = 'null' THEN
params = '{}'::jsonb;
ELSE
params = TG_ARGV[3]::jsonb;
END IF;
IF TG_ARGV[4] IS NULL OR TG_ARGV[4] = 'null' THEN
timeout_ms = 1000;
ELSE
timeout_ms = TG_ARGV[4]::integer;
END IF;
CASE
WHEN method = 'GET' THEN
SELECT http_get INTO request_id FROM net.http_get(
url,
params,
headers,
timeout_ms
);
WHEN method = 'POST' THEN
payload = jsonb_build_object(
'old_record', OLD,
'record', NEW,
'type', TG_OP,
'table', TG_TABLE_NAME,
'schema', TG_TABLE_SCHEMA
);
SELECT http_post INTO request_id FROM net.http_post(
url,
payload,
params,
headers,
timeout_ms
);
ELSE
RAISE EXCEPTION 'method argument % is invalid', method;
END CASE;
INSERT INTO supabase_functions.hooks
(hook_table_id, hook_name, request_id)
VALUES
(TG_RELID, TG_NAME, request_id);
RETURN NEW;
END
$function$;
-- Supabase super admin
DO
$$
BEGIN
IF NOT EXISTS (
SELECT 1
FROM pg_roles
WHERE rolname = 'supabase_functions_admin'
)
THEN
CREATE USER supabase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION;
END IF;
END
$$;
GRANT ALL PRIVILEGES ON SCHEMA supabase_functions TO supabase_functions_admin;
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA supabase_functions TO supabase_functions_admin;
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA supabase_functions TO supabase_functions_admin;
ALTER USER supabase_functions_admin SET search_path = "supabase_functions";
ALTER table "supabase_functions".migrations OWNER TO supabase_functions_admin;
ALTER table "supabase_functions".hooks OWNER TO supabase_functions_admin;
ALTER function "supabase_functions".http_request() OWNER TO supabase_functions_admin;
GRANT supabase_functions_admin TO postgres;
-- Remove unused supabase_pg_net_admin role
DO
$$
BEGIN
IF EXISTS (
SELECT 1
FROM pg_roles
WHERE rolname = 'supabase_pg_net_admin'
)
THEN
REASSIGN OWNED BY supabase_pg_net_admin TO supabase_admin;
DROP OWNED BY supabase_pg_net_admin;
DROP ROLE supabase_pg_net_admin;
END IF;
END
$$;
-- pg_net grants when extension is already enabled
DO
$$
BEGIN
IF EXISTS (
SELECT 1
FROM pg_extension
WHERE extname = 'pg_net'
)
THEN
GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role;
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
END IF;
END
$$;
-- Event trigger for pg_net
CREATE OR REPLACE FUNCTION extensions.grant_pg_net_access()
RETURNS event_trigger
LANGUAGE plpgsql
AS $$
BEGIN
IF EXISTS (
SELECT 1
FROM pg_event_trigger_ddl_commands() AS ev
JOIN pg_extension AS ext
ON ev.objid = ext.oid
WHERE ext.extname = 'pg_net'
)
THEN
GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role;
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
END IF;
END;
$$;
COMMENT ON FUNCTION extensions.grant_pg_net_access IS 'Grants access to pg_net';
DO
$$
BEGIN
IF NOT EXISTS (
SELECT 1
FROM pg_event_trigger
WHERE evtname = 'issue_pg_net_access'
) THEN
CREATE EVENT TRIGGER issue_pg_net_access ON ddl_command_end WHEN TAG IN ('CREATE EXTENSION')
EXECUTE PROCEDURE extensions.grant_pg_net_access();
END IF;
END
$$;
INSERT INTO supabase_functions.migrations (version) VALUES ('20210809183423_update_grants');
ALTER function supabase_functions.http_request() SECURITY DEFINER;
ALTER function supabase_functions.http_request() SET search_path = supabase_functions;
REVOKE ALL ON FUNCTION supabase_functions.http_request() FROM PUBLIC;
GRANT EXECUTE ON FUNCTION supabase_functions.http_request() TO postgres, anon, authenticated, service_role;
COMMIT;
`,
},
{
filePath: "/volumes/functions/hello/index.ts",
content: `
// Follow this setup guide to integrate the Deno language server with your editor:
// https://deno.land/manual/getting_started/setup_your_environment
// This enables autocomplete, go to definition, etc.
import { serve } from "https://deno.land/std@0.177.1/http/server.ts"
serve(async () => {
return new Response(
\`"Hello from Edge Functions!"\`,
{ headers: { "Content-Type": "application/json" } },
)
})
// To invoke:
// curl 'http://localhost:<KONG_HTTP_PORT>/functions/v1/hello' \\
// --header 'Authorization: Bearer <anon/service_role API key>'
`,
},
{
filePath: "/volumes/functions/main/index.ts",
content: `
import { serve } from 'https://deno.land/std@0.131.0/http/server.ts'
import * as jose from 'https://deno.land/x/jose@v4.14.4/index.ts'
console.log('main function started')
const JWT_SECRET = Deno.env.get('JWT_SECRET')
const VERIFY_JWT = Deno.env.get('VERIFY_JWT') === 'true'
function getAuthToken(req: Request) {
const authHeader = req.headers.get('authorization')
if (!authHeader) {
throw new Error('Missing authorization header')
}
const [bearer, token] = authHeader.split(' ')
if (bearer !== 'Bearer') {
throw new Error(\`Auth header is not 'Bearer {token}'\`)
}
return token
}
async function verifyJWT(jwt: string): Promise<boolean> {
const encoder = new TextEncoder()
const secretKey = encoder.encode(JWT_SECRET)
try {
await jose.jwtVerify(jwt, secretKey)
} catch (err) {
console.error(err)
return false
}
return true
}
serve(async (req: Request) => {
if (req.method !== 'OPTIONS' && VERIFY_JWT) {
try {
const token = getAuthToken(req)
const isValidJWT = await verifyJWT(token)
if (!isValidJWT) {
return new Response(JSON.stringify({ msg: 'Invalid JWT' }), {
status: 401,
headers: { 'Content-Type': 'application/json' },
})
}
} catch (e) {
console.error(e)
return new Response(JSON.stringify({ msg: e.toString() }), {
status: 401,
headers: { 'Content-Type': 'application/json' },
})
}
}
const url = new URL(req.url)
const { pathname } = url
const path_parts = pathname.split('/')
const service_name = path_parts[1]
if (!service_name || service_name === '') {
const error = { msg: 'missing function name in request' }
return new Response(JSON.stringify(error), {
status: 400,
headers: { 'Content-Type': 'application/json' },
})
}
const servicePath = \`/home/deno/functions/\${service_name}\`
console.error(\`serving the request with \${servicePath}\`)
const memoryLimitMb = 150
const workerTimeoutMs = 1 * 60 * 1000
const noModuleCache = false
const importMapPath = null
const envVarsObj = Deno.env.toObject()
const envVars = Object.keys(envVarsObj).map((k) => [k, envVarsObj[k]])
try {
const worker = await EdgeRuntime.userWorkers.create({
servicePath,
memoryLimitMb,
workerTimeoutMs,
noModuleCache,
importMapPath,
envVars,
})
return await worker.fetch(req)
} catch (e) {
const error = { msg: e.toString() }
return new Response(JSON.stringify(error), {
status: 500,
headers: { 'Content-Type': 'application/json' },
})
}
})
`,
},
{
filePath: "/volumes/logs/vector.yml",
content: `
api:
enabled: true
address: 0.0.0.0:9001
sources:
docker_host:
type: docker_logs
exclude_containers:
- supabase-vector
transforms:
project_logs:
type: remap
inputs:
- docker_host
source: |-
.project = "default"
.event_message = del(.message)
.appname = del(.container_name)
del(.container_created_at)
del(.container_id)
del(.source_type)
del(.stream)
del(.label)
del(.image)
del(.host)
del(.stream)
router:
type: route
inputs:
- project_logs
route:
kong: '.appname == "supabase-kong"'
auth: '.appname == "supabase-auth"'
rest: '.appname == "supabase-rest"'
realtime: '.appname == "supabase-realtime"'
storage: '.appname == "supabase-storage"'
functions: '.appname == "supabase-functions"'
db: '.appname == "supabase-db"'
# Ignores non nginx errors since they are related with kong booting up
kong_logs:
type: remap
inputs:
- router.kong
source: |-
req, err = parse_nginx_log(.event_message, "combined")
if err == null {
.timestamp = req.timestamp
.metadata.request.headers.referer = req.referer
.metadata.request.headers.user_agent = req.agent
.metadata.request.headers.cf_connecting_ip = req.client
.metadata.request.method = req.method
.metadata.request.path = req.path
.metadata.request.protocol = req.protocol
.metadata.response.status_code = req.status
}
if err != null {
abort
}
# Ignores non nginx errors since they are related with kong booting up
kong_err:
type: remap
inputs:
- router.kong
source: |-
.metadata.request.method = "GET"
.metadata.response.status_code = 200
parsed, err = parse_nginx_log(.event_message, "error")
if err == null {
.timestamp = parsed.timestamp
.severity = parsed.severity
.metadata.request.host = parsed.host
.metadata.request.headers.cf_connecting_ip = parsed.client
url, err = split(parsed.request, " ")
if err == null {
.metadata.request.method = url[0]
.metadata.request.path = url[1]
.metadata.request.protocol = url[2]
}
}
if err != null {
abort
}
# Gotrue logs are structured json strings which frontend parses directly. But we keep metadata for consistency.
auth_logs:
type: remap
inputs:
- router.auth
source: |-
parsed, err = parse_json(.event_message)
if err == null {
.metadata.timestamp = parsed.time
.metadata = merge!(.metadata, parsed)
}
# PostgREST logs are structured so we separate timestamp from message using regex
rest_logs:
type: remap
inputs:
- router.rest
source: |-
parsed, err = parse_regex(.event_message, r'^(?P<time>.*): (?P<msg>.*)$')
if err == null {
.event_message = parsed.msg
.timestamp = to_timestamp!(parsed.time)
.metadata.host = .project
}
# Realtime logs are structured so we parse the severity level using regex (ignore time because it has no date)
realtime_logs:
type: remap
inputs:
- router.realtime
source: |-
.metadata.project = del(.project)
.metadata.external_id = .metadata.project
parsed, err = parse_regex(.event_message, r'^(?P<time>\\d+:\\d+:\\d+\\.\\d+) \\[(?P<level>\\w+)\\] (?P<msg>.*)$')
if err == null {
.event_message = parsed.msg
.metadata.level = parsed.level
}
# Storage logs may contain json objects so we parse them for completeness
storage_logs:
type: remap
inputs:
- router.storage
source: |-
.metadata.project = del(.project)
.metadata.tenantId = .metadata.project
parsed, err = parse_json(.event_message)
if err == null {
.event_message = parsed.msg
.metadata.level = parsed.level
.metadata.timestamp = parsed.time
.metadata.context[0].host = parsed.hostname
.metadata.context[0].pid = parsed.pid
}
# Postgres logs some messages to stderr which we map to warning severity level
db_logs:
type: remap
inputs:
- router.db
source: |-
.metadata.host = "db-default"
.metadata.parsed.timestamp = .timestamp
parsed, err = parse_regex(.event_message, r'.*(?P<level>INFO|NOTICE|WARNING|ERROR|LOG|FATAL|PANIC?):.*', numeric_groups: true)
if err != null || parsed == null {
.metadata.parsed.error_severity = "info"
}
if parsed != null {
.metadata.parsed.error_severity = parsed.level
}
if .metadata.parsed.error_severity == "info" {
.metadata.parsed.error_severity = "log"
}
.metadata.parsed.error_severity = upcase!(.metadata.parsed.error_severity)
sinks:
logflare_auth:
type: 'http'
inputs:
- auth_logs
encoding:
codec: 'json'
method: 'post'
request:
retry_max_duration_secs: 10
uri: 'http://analytics:4000/api/logs?source_name=gotrue.logs.prod&api_key=\${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
logflare_realtime:
type: 'http'
inputs:
- realtime_logs
encoding:
codec: 'json'
method: 'post'
request:
retry_max_duration_secs: 10
uri: 'http://analytics:4000/api/logs?source_name=realtime.logs.prod&api_key=\${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
logflare_rest:
type: 'http'
inputs:
- rest_logs
encoding:
codec: 'json'
method: 'post'
request:
retry_max_duration_secs: 10
uri: 'http://analytics:4000/api/logs?source_name=postgREST.logs.prod&api_key=\${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
logflare_db:
type: 'http'
inputs:
- db_logs
encoding:
codec: 'json'
method: 'post'
request:
retry_max_duration_secs: 10
# We must route the sink through kong because ingesting logs before logflare is fully initialised will
# lead to broken queries from studio. This works by the assumption that containers are started in the
# following order: vector > db > logflare > kong
uri: 'http://kong:8000/analytics/v1/api/logs?source_name=postgres.logs&api_key=\${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
logflare_functions:
type: 'http'
inputs:
- router.functions
encoding:
codec: 'json'
method: 'post'
request:
retry_max_duration_secs: 10
uri: 'http://analytics:4000/api/logs?source_name=deno-relay-logs&api_key=\${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
logflare_storage:
type: 'http'
inputs:
- storage_logs
encoding:
codec: 'json'
method: 'post'
request:
retry_max_duration_secs: 10
uri: 'http://analytics:4000/api/logs?source_name=storage.logs.prod.2&api_key=\${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
logflare_kong:
type: 'http'
inputs:
- kong_logs
- kong_err
encoding:
codec: 'json'
method: 'post'
request:
retry_max_duration_secs: 10
uri: 'http://analytics:4000/api/logs?source_name=cloudflare.logs.prod&api_key=\${LOGFLARE_API_KEY?LOGFLARE_API_KEY is required}'
`,
},
];
return {
domains,
envs,
mounts,
};
}

View File

@@ -1,69 +0,0 @@
version: "3.9"
services:
teable:
image: ghcr.io/teableio/teable:1.3.1-alpha-build.460
restart: always
volumes:
- teable-data:/app/.assets
# you may use a bind-mounted host directory instead,
# so that it is harder to accidentally remove the volume and lose all your data!
# - ./docker/teable/data:/app/.assets:rw
environment:
- TZ=${TIMEZONE}
- NEXT_ENV_IMAGES_ALL_REMOTE=true
- PUBLIC_ORIGIN=${PUBLIC_ORIGIN}
- PRISMA_DATABASE_URL=${PRISMA_DATABASE_URL}
- PUBLIC_DATABASE_PROXY=${PUBLIC_DATABASE_PROXY}
- BACKEND_MAIL_HOST=${BACKEND_MAIL_HOST}
- BACKEND_MAIL_PORT=${BACKEND_MAIL_PORT}
- BACKEND_MAIL_SECURE=${BACKEND_MAIL_SECURE}
- BACKEND_MAIL_SENDER=${BACKEND_MAIL_SENDER}
- BACKEND_MAIL_SENDER_NAME=${BACKEND_MAIL_SENDER_NAME}
- BACKEND_MAIL_AUTH_USER=${BACKEND_MAIL_AUTH_USER}
- BACKEND_MAIL_AUTH_PASS=${BACKEND_MAIL_AUTH_PASS}
depends_on:
teable-db-migrate:
condition: service_completed_successfully
teable-db:
image: postgres:15.4
restart: always
ports:
- "${TEABLE_DB_PORT}:${POSTGRES_PORT}"
volumes:
- teable-db:/var/lib/postgresql/data
# you may use a bind-mounted host directory instead,
# so that it is harder to accidentally remove the volume and lose all your data!
# - ./docker/db/data:/var/lib/postgresql/data:rw
environment:
- TZ=${TIMEZONE}
- POSTGRES_DB=${POSTGRES_DB}
- POSTGRES_USER=${POSTGRES_USER}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
networks:
- dokploy-network
healthcheck:
test:
[
"CMD-SHELL",
"sh -c 'pg_isready -U ${POSTGRES_USER} -d ${POSTGRES_DB}'",
]
interval: 10s
timeout: 3s
retries: 3
teable-db-migrate:
image: ghcr.io/teableio/teable-db-migrate:latest
environment:
- TZ=${TIMEZONE}
- PRISMA_DATABASE_URL=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
networks:
- dokploy-network
depends_on:
teable-db:
condition: service_healthy
volumes:
teable-data: {}
teable-db: {}

View File

@@ -1,54 +0,0 @@
import {
type DomainSchema,
type Schema,
type Template,
generatePassword,
generateRandomDomain,
} from "../utils";
export function generate(schema: Schema): Template {
const password = generatePassword();
const mainDomain = generateRandomDomain(schema);
const publicDbPort = ((min: number, max: number) => {
return Math.round(Math.random() * (max - min) + min);
})(32769, 65534);
const domains: DomainSchema[] = [
{
host: mainDomain,
port: 3000,
serviceName: "teable",
},
];
const envs = [
`TEABLE_HOST=${mainDomain}`,
`TEABLE_DB_PORT=${publicDbPort}`,
"TIMEZONE=UTC",
"# Postgres",
"POSTGRES_HOST=teable-db",
"POSTGRES_PORT=5432",
"POSTGRES_DB=teable",
"POSTGRES_USER=teable",
`POSTGRES_PASSWORD=${password}`,
"# App",
"PUBLIC_ORIGIN=https://${TEABLE_HOST}",
"PRISMA_DATABASE_URL=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}",
"PUBLIC_DATABASE_PROXY=${TEABLE_HOST}:${TEABLE_DB_PORT}",
"# Need to support sending emails to enable the following configurations",
"# You need to modify the configuration according to the actual situation, otherwise it will not be able to send emails correctly.",
"#BACKEND_MAIL_HOST=smtp.teable.io",
"#BACKEND_MAIL_PORT=465",
"#BACKEND_MAIL_SECURE=true",
"#BACKEND_MAIL_SENDER=noreply.teable.io",
"#BACKEND_MAIL_SENDER_NAME=Teable",
"#BACKEND_MAIL_AUTH_USER=username",
"#BACKEND_MAIL_AUTH_PASS=password",
];
return {
envs,
domains,
};
}

View File

@@ -1,500 +0,0 @@
import type { TemplateData } from "./types/templates-data.type";
export const templates: TemplateData[] = [
{
id: "supabase",
name: "SupaBase",
version: "1.24.07",
description:
"The open source Firebase alternative. Supabase gives you a dedicated Postgres database to build your web, mobile, and AI applications. ",
links: {
github: "https://github.com/supabase/supabase",
website: "https://supabase.com/",
docs: "https://supabase.com/docs/guides/self-hosting",
},
logo: "supabase.svg",
load: () => import("./supabase/index").then((m) => m.generate),
tags: ["database", "firebase", "postgres"],
},
{
id: "pocketbase",
name: "Pocketbase",
version: "v0.22.12",
description:
"Pocketbase is a self-hosted alternative to Firebase that allows you to build and host your own backend services.",
links: {
github: "https://github.com/pocketbase/pocketbase",
website: "https://pocketbase.io/",
docs: "https://pocketbase.io/docs/",
},
logo: "pocketbase.svg",
load: () => import("./pocketbase/index").then((m) => m.generate),
tags: ["database", "cms", "headless"],
},
{
id: "plausible",
name: "Plausible",
version: "v2.1.0",
description:
"Plausible is a open source, self-hosted web analytics platform that lets you track website traffic and user behavior.",
logo: "plausible.svg",
links: {
github: "https://github.com/plausible/plausible",
website: "https://plausible.io/",
docs: "https://plausible.io/docs",
},
tags: ["analytics"],
load: () => import("./plausible/index").then((m) => m.generate),
},
{
id: "calcom",
name: "Calcom",
version: "v2.7.6",
description:
"Calcom is a open source alternative to Calendly that allows to create scheduling and booking services.",
links: {
github: "https://github.com/calcom/cal.com",
website: "https://cal.com/",
docs: "https://cal.com/docs",
},
logo: "calcom.jpg",
tags: ["scheduling", "booking"],
load: () => import("./calcom/index").then((m) => m.generate),
},
{
id: "grafana",
name: "Grafana",
version: "9.5.20",
description:
"Grafana is an open source platform for data visualization and monitoring.",
logo: "grafana.svg",
links: {
github: "https://github.com/grafana/grafana",
website: "https://grafana.com/",
docs: "https://grafana.com/docs/",
},
tags: ["monitoring"],
load: () => import("./grafana/index").then((m) => m.generate),
},
{
id: "directus",
name: "Directus",
version: "10.12.1",
description:
"Directus is an open source headless CMS that provides an API-first solution for building custom backends.",
logo: "directus.jpg",
links: {
github: "https://github.com/directus/directus",
website: "https://directus.io/",
docs: "https://docs.directus.io/",
},
tags: ["cms"],
load: () => import("./directus/index").then((m) => m.generate),
},
{
id: "baserow",
name: "Baserow",
version: "1.25.2",
description:
"Baserow is an open source database management tool that allows you to create and manage databases.",
logo: "baserow.webp",
links: {
github: "https://github.com/Baserow/baserow",
website: "https://baserow.io/",
docs: "https://baserow.io/docs/index",
},
tags: ["database"],
load: () => import("./baserow/index").then((m) => m.generate),
},
{
id: "ghost",
name: "Ghost",
version: "5.0.0",
description:
"Ghost is a free and open source, professional publishing platform built on a modern Node.js technology stack.",
logo: "ghost.jpeg",
links: {
github: "https://github.com/TryGhost/Ghost",
website: "https://ghost.org/",
docs: "https://ghost.org/docs/",
},
tags: ["cms"],
load: () => import("./ghost/index").then((m) => m.generate),
},
{
id: "uptime-kuma",
name: "Uptime Kuma",
version: "1.21.4",
description:
"Uptime Kuma is a free and open source monitoring tool that allows you to monitor your websites and applications.",
logo: "uptime-kuma.png",
links: {
github: "https://github.com/louislam/uptime-kuma",
website: "https://uptime.kuma.pet/",
docs: "https://github.com/louislam/uptime-kuma/wiki",
},
tags: ["monitoring"],
load: () => import("./uptime-kuma/index").then((m) => m.generate),
},
{
id: "n8n",
name: "n8n",
version: "1.48.1",
description:
"n8n is an open source low-code platform for automating workflows and integrations.",
logo: "n8n.png",
links: {
github: "https://github.com/n8n-io/n8n",
website: "https://n8n.io/",
docs: "https://docs.n8n.io/",
},
tags: ["automation"],
load: () => import("./n8n/index").then((m) => m.generate),
},
{
id: "wordpress",
name: "Wordpress",
version: "5.8.3",
description:
"Wordpress is a free and open source content management system (CMS) for publishing and managing websites.",
logo: "wordpress.png",
links: {
github: "https://github.com/WordPress/WordPress",
website: "https://wordpress.org/",
docs: "https://wordpress.org/documentation/",
},
tags: ["cms"],
load: () => import("./wordpress/index").then((m) => m.generate),
},
{
id: "odoo",
name: "Odoo",
version: "16.0",
description:
"Odoo is a free and open source business management software that helps you manage your company's operations.",
logo: "odoo.png",
links: {
github: "https://github.com/odoo/odoo",
website: "https://odoo.com/",
docs: "https://www.odoo.com/documentation/",
},
tags: ["cms"],
load: () => import("./odoo/index").then((m) => m.generate),
},
{
id: "appsmith",
name: "Appsmith",
version: "v1.29",
description:
"Appsmith is a free and open source platform for building internal tools and applications.",
logo: "appsmith.png",
links: {
github: "https://github.com/appsmithorg/appsmith",
website: "https://appsmith.com/",
docs: "https://docs.appsmith.com/",
},
tags: ["cms"],
load: () => import("./appsmith/index").then((m) => m.generate),
},
{
id: "excalidraw",
name: "Excalidraw",
version: "latest",
description:
"Excalidraw is a free and open source online diagramming tool that lets you easily create and share beautiful diagrams.",
logo: "excalidraw.jpg",
links: {
github: "https://github.com/excalidraw/excalidraw",
website: "https://excalidraw.com/",
docs: "https://docs.excalidraw.com/",
},
tags: ["drawing"],
load: () => import("./excalidraw/index").then((m) => m.generate),
},
{
id: "documenso",
name: "Documenso",
version: "v1.5.6",
description:
"Documenso is the open source alternative to DocuSign for signing documents digitally",
links: {
github: "https://github.com/documenso/documenso",
website: "https://documenso.com/",
docs: "https://documenso.com/docs",
},
logo: "documenso.png",
tags: ["document-signing"],
load: () => import("./documenso/index").then((m) => m.generate),
},
{
id: "nocodb",
name: "NocoDB",
version: "0.251.1",
description:
"NocoDB is an opensource Airtable alternative that turns any MySQL, PostgreSQL, SQL Server, SQLite & MariaDB into a smart spreadsheet.",
links: {
github: "https://github.com/nocodb/nocodb",
website: "https://nocodb.com/",
docs: "https://docs.nocodb.com/",
},
logo: "nocodb.png",
tags: ["database", "spreadsheet", "low-code", "nocode"],
load: () => import("./nocodb/index").then((m) => m.generate),
},
{
id: "meilisearch",
name: "Meilisearch",
version: "v1.8.3",
description:
"Meilisearch is a free and open-source search engine that allows you to easily add search functionality to your web applications.",
logo: "meilisearch.png",
links: {
github: "https://github.com/meilisearch/meilisearch",
website: "https://www.meilisearch.com/",
docs: "https://docs.meilisearch.com/",
},
tags: ["search"],
load: () => import("./meilisearch/index").then((m) => m.generate),
},
{
id: "phpmyadmin",
name: "Phpmyadmin",
version: "5.2.1",
description:
"Phpmyadmin is a free and open-source web interface for MySQL and MariaDB that allows you to manage your databases.",
logo: "phpmyadmin.png",
links: {
github: "https://github.com/phpmyadmin/phpmyadmin",
website: "https://www.phpmyadmin.net/",
docs: "https://www.phpmyadmin.net/docs/",
},
tags: ["database"],
load: () => import("./phpmyadmin/index").then((m) => m.generate),
},
{
id: "rocketchat",
name: "Rocketchat",
version: "6.9.2",
description:
"Rocket.Chat is a free and open-source web chat platform that allows you to build and manage your own chat applications.",
logo: "rocketchat.png",
links: {
github: "https://github.com/RocketChat/Rocket.Chat",
website: "https://rocket.chat/",
docs: "https://rocket.chat/docs/",
},
tags: ["chat"],
load: () => import("./rocketchat/index").then((m) => m.generate),
},
{
id: "minio",
name: "Minio",
description:
"Minio is an open source object storage server compatible with Amazon S3 cloud storage service.",
logo: "minio.png",
version: "latest",
links: {
github: "https://github.com/minio/minio",
website: "https://minio.io/",
docs: "https://docs.minio.io/",
},
tags: ["storage"],
load: () => import("./minio/index").then((m) => m.generate),
},
{
id: "metabase",
name: "Metabase",
version: "v0.50.8",
description:
"Metabase is an open source business intelligence tool that allows you to ask questions and visualize data.",
logo: "metabase.png",
links: {
github: "https://github.com/metabase/metabase",
website: "https://www.metabase.com/",
docs: "https://www.metabase.com/docs/",
},
tags: ["database", "dashboard"],
load: () => import("./metabase/index").then((m) => m.generate),
},
{
id: "glitchtip",
name: "Glitchtip",
version: "v4.0",
description: "Glitchtip is simple, open source error tracking",
logo: "glitchtip.png",
links: {
github: "https://gitlab.com/glitchtip/",
website: "https://glitchtip.com/",
docs: "https://glitchtip.com/documentation",
},
tags: ["hosting"],
load: () => import("./glitchtip/index").then((m) => m.generate),
},
{
id: "open-webui",
name: "Open WebUI",
version: "v0.3.7",
description:
"Open WebUI is a free and open source chatgpt alternative. Open WebUI is an extensible, feature-rich, and user-friendly self-hosted WebUI designed to operate entirely offline. It supports various LLM runners, including Ollama and OpenAI-compatible APIs. The template include ollama and webui services.",
logo: "open-webui.png",
links: {
github: "https://github.com/open-webui/open-webui",
website: "https://openwebui.com/",
docs: "https://docs.openwebui.com/",
},
tags: ["chat"],
load: () => import("./open-webui/index").then((m) => m.generate),
},
{
id: "listmonk",
name: "Listmonk",
version: "v3.0.0",
description:
"High performance, self-hosted, newsletter and mailing list manager with a modern dashboard.",
logo: "listmonk.png",
links: {
github: "https://github.com/knadh/listmonk",
website: "https://listmonk.app/",
docs: "https://listmonk.app/docs/",
},
tags: ["email", "newsletter", "mailing-list"],
load: () => import("./listmonk/index").then((m) => m.generate),
},
{
id: "doublezero",
name: "Double Zero",
version: "v0.2.1",
description:
"00 is a self hostable SES dashboard for sending and monitoring emails with AWS",
logo: "doublezero.svg",
links: {
github: "https://github.com/technomancy-dev/00",
website: "https://www.double-zero.cloud/",
docs: "https://github.com/technomancy-dev/00",
},
tags: ["email"],
load: () => import("./doublezero/index").then((m) => m.generate),
},
{
id: "umami",
name: "Umami",
version: "v2.12.1",
description:
"Umami is a simple, fast, privacy-focused alternative to Google Analytics.",
logo: "umami.png",
links: {
github: "https://github.com/umami-software/umami",
website: "https://umami.is",
docs: "https://umami.is/docs",
},
tags: ["analytics"],
load: () => import("./umami/index").then((m) => m.generate),
},
{
id: "jellyfin",
name: "jellyfin",
version: "v10.9.7",
description:
"Jellyfin is a Free Software Media System that puts you in control of managing and streaming your media. ",
logo: "jellyfin.svg",
links: {
github: "https://github.com/jellyfin/jellyfin",
website: "https://jellyfin.org/",
docs: "https://jellyfin.org/docs/",
},
tags: ["media system"],
load: () => import("./jellyfin/index").then((m) => m.generate),
},
{
id: "teable",
name: "teable",
version: "v1.3.1-alpha-build.460",
description:
"Teable is a Super fast, Real-time, Professional, Developer friendly, No-code database built on Postgres. It uses a simple, spreadsheet-like interface to create complex enterprise-level database applications. Unlock efficient app development with no-code, free from the hurdles of data security and scalability.",
logo: "teable.png",
links: {
github: "https://github.com/teableio/teable",
website: "https://teable.io/",
docs: "https://help.teable.io/",
},
tags: ["database", "spreadsheet", "low-code", "nocode"],
load: () => import("./teable/index").then((m) => m.generate),
},
{
id: "zipline",
name: "Zipline",
version: "v3.7.9",
description:
"A ShareX/file upload server that is easy to use, packed with features, and with an easy setup!",
logo: "zipline.png",
links: {
github: "https://github.com/diced/zipline",
website: "https://zipline.diced.sh/",
docs: "https://zipline.diced.sh/docs/",
},
tags: ["media system", "storage"],
load: () => import("./zipline/index").then((m) => m.generate),
},
{
id: "soketi",
name: "Soketi",
version: "v1.4-16",
description:
"Soketi is your simple, fast, and resilient open-source WebSockets server.",
logo: "soketi.png",
links: {
github: "https://github.com/soketi/soketi",
website: "https://soketi.app/",
docs: "https://docs.soketi.app/",
},
tags: ["chat"],
load: () => import("./soketi/index").then((m) => m.generate),
},
{
id: "aptabase",
name: "Aptabase",
version: "v1.0.0",
description:
"Aptabase is a self-hosted web analytics platform that lets you track website traffic and user behavior.",
logo: "aptabase.svg",
links: {
github: "https://github.com/aptabase/aptabase",
website: "https://aptabase.com/",
docs: "https://github.com/aptabase/aptabase/blob/main/README.md",
},
tags: ["analytics", "self-hosted"],
load: () => import("./aptabase/index").then((m) => m.generate),
},
{
id: "typebot",
name: "Typebot",
version: "2.27.0",
description: "Typebot is an open-source chatbot builder platform.",
logo: "typebot.svg",
links: {
github: "https://github.com/baptisteArno/typebot.io",
website: "https://typebot.io/",
docs: "https://docs.typebot.io/get-started/introduction",
},
tags: ["chatbot", "builder", "open-source"],
load: () => import("./typebot/index").then((m) => m.generate),
},
{
id: "gitea",
name: "Gitea",
version: "1.22.2",
description:
"Git with a cup of tea! Painless self-hosted all-in-one software development service, including Git hosting, code review, team collaboration, package registry and CI/CD.",
logo: "gitea.png",
links: {
github: "https://github.com/go-gitea/gitea.git",
website: "https://gitea.com/",
docs: "https://docs.gitea.com/installation/install-with-docker",
},
tags: ["self-hosted", "storage"],
load: () => import("./gitea/index").then((m) => m.generate),
},
];

View File

@@ -1,49 +0,0 @@
version: '3.3'
volumes:
db-data:
services:
typebot-db:
image: postgres:14-alpine
restart: always
volumes:
- db-data:/var/lib/postgresql/data
environment:
POSTGRES_USER: typebot
POSTGRES_DB: typebot
POSTGRES_PASSWORD: typebot
networks:
- dokploy-network
typebot-builder:
image: baptistearno/typebot-builder:2.27
restart: always
depends_on:
- typebot-db
environment:
ENCRYPTION_SECRET: '${ENCRYPTION_SECRET}'
DATABASE_URL: 'postgresql://typebot:typebot@typebot-db:5432/typebot'
NEXTAUTH_URL: '${NEXTAUTH_URL}'
NEXT_PUBLIC_VIEWER_URL: '${NEXT_PUBLIC_VIEWER_URL}'
ADMIN_EMAIL: '${ADMIN_EMAIL}'
SMTP_HOST: '${SMTP_HOST}'
NEXT_PUBLIC_SMTP_FROM: '${NEXT_PUBLIC_SMTP_FROM}'
SMTP_USERNAME: '${SMTP_USERNAME}'
SMTP_PASSWORD: '${SMTP_PASSWORD}'
DEFAULT_WORKSPACE_PLAN: '${DEFAULT_WORKSPACE_PLAN}'
typebot-viewer:
image: baptistearno/typebot-viewer:2.27.0
restart: always
environment:
ENCRYPTION_SECRET: '${ENCRYPTION_SECRET}'
DATABASE_URL: postgresql://typebot:typebot@typebot-db:5432/typebot
NEXTAUTH_URL: '${NEXTAUTH_URL}'
NEXT_PUBLIC_VIEWER_URL: '${NEXT_PUBLIC_VIEWER_URL}'
ADMIN_EMAIL: '${ADMIN_EMAIL}'
SMTP_HOST: '${SMTP_HOST}'
NEXT_PUBLIC_SMTP_FROM: '${NEXT_PUBLIC_SMTP_FROM}'
SMTP_USERNAME: '${SMTP_USERNAME}'
SMTP_PASSWORD: '${SMTP_PASSWORD}'
DEFAULT_WORKSPACE_PLAN: '${DEFAULT_WORKSPACE_PLAN}'

View File

@@ -1,44 +0,0 @@
import {
type DomainSchema,
type Schema,
type Template,
generateBase64,
generateRandomDomain,
} from "../utils";
export function generate(schema: Schema): Template {
const builderDomain = generateRandomDomain(schema);
const viewerDomain = generateRandomDomain(schema);
const encryptionSecret = generateBase64(24);
const domains: DomainSchema[] = [
{
host: builderDomain,
port: 3000,
serviceName: "typebot-builder",
},
{
host: viewerDomain,
port: 3000,
serviceName: "typebot-viewer",
},
];
const envs = [
`ENCRYPTION_SECRET=${encryptionSecret}`,
`NEXTAUTH_URL=http://${builderDomain}`,
`NEXT_PUBLIC_VIEWER_URL=http://${viewerDomain}`,
"ADMIN_EMAIL=typebot@example.com",
"SMTP_HOST='Fill'",
"SMTP_PORT=25",
"SMTP_USERNAME='Fill'",
"SMTP_PASSWORD='Fill'",
"NEXT_PUBLIC_SMTP_FROM=typebot@example.com",
"DEFAULT_WORKSPACE_PLAN=UNLIMITED",
];
return {
envs,
domains,
};
}

View File

@@ -1,67 +0,0 @@
import type { templates } from "../templates";
import type { Schema, Template } from "../utils";
/**
* Type representing the keys of the templates.
*/
export type TemplatesKeys = (typeof templates)[number]["id"];
/**
* Interface representing the data structure for a template.
*/
export type TemplateData = {
/**
* Unique identifier for the template.
*/
id: string;
/**
* Name of the template.
*/
name: string;
/**
* Description of the template Max(150 Characters).
*/
description: string;
/**
* Links related to the template.
*/
links: {
/**
* GitHub repository link for the template.
*/
github: string;
/**
* Optional documentation link for the template.
*/
docs?: string;
/**
* Optional website link for the template.
*/
website?: string;
};
/**
* Version of the template.
*/
version: string;
/**
* Tags associated with the template.
*/
tags: string[];
/**
* Name of the logo file with extension (e.g. pocketbase.png).
*/
logo: string;
/**
* Function to load the template, returning a promise that resolves with a function
* taking a schema and returning a template.
*/
load: () => Promise<(schema: Schema) => Template>;
};

View File

@@ -1,35 +0,0 @@
services:
umami:
image: ghcr.io/umami-software/umami:postgresql-v2.13.2
restart: always
healthcheck:
test: ["CMD-SHELL", "curl http://localhost:3000/api/heartbeat"]
interval: 5s
timeout: 5s
retries: 5
depends_on:
db:
condition: service_healthy
environment:
DATABASE_URL: postgresql://umami:umami@db:5432/umami
DATABASE_TYPE: postgresql
APP_SECRET: ${APP_SECRET}
db:
image: postgres:15-alpine
restart: always
healthcheck:
test: ["CMD-SHELL", "pg_isready -U $${POSTGRES_USER} -d $${POSTGRES_DB}"]
interval: 5s
timeout: 5s
retries: 5
networks:
- dokploy-network
volumes:
- db-data:/var/lib/postgresql/data
environment:
POSTGRES_DB: umami
POSTGRES_USER: umami
POSTGRES_PASSWORD: umami
volumes:
db-data:

View File

@@ -1,27 +0,0 @@
import {
type DomainSchema,
type Schema,
type Template,
generateBase64,
generateRandomDomain,
} from "../utils";
export function generate(schema: Schema): Template {
const randomDomain = generateRandomDomain(schema);
const randomSecret = generateBase64();
const domains: DomainSchema[] = [
{
host: randomDomain,
port: 3000,
serviceName: "umami",
},
];
const envs = [`APP_SECRET=${randomSecret}`];
return {
envs,
domains,
};
}

View File

@@ -1,10 +0,0 @@
version: "3.8"
services:
uptime-kuma:
image: louislam/uptime-kuma:1
restart: always
volumes:
- uptime-kuma-data:/app/data
volumes:
uptime-kuma-data:

View File

@@ -1,22 +0,0 @@
import {
type DomainSchema,
type Schema,
type Template,
generateRandomDomain,
} from "../utils";
export function generate(schema: Schema): Template {
const randomDomain = generateRandomDomain(schema);
const domains: DomainSchema[] = [
{
host: randomDomain,
port: 3001,
serviceName: "uptime-kuma",
},
];
return {
domains,
};
}

View File

@@ -2,10 +2,6 @@ import { randomBytes } from "node:crypto";
import { readFile } from "node:fs/promises";
import { join } from "node:path";
import type { Domain } from "@/server/services/domain";
// import { IS_CLOUD } from "@/server/constants";
import { TRPCError } from "@trpc/server";
import { templates } from "../templates";
import type { TemplatesKeys } from "../types/templates-data.type";
export interface Schema {
serverIp: string;
@@ -48,21 +44,6 @@ export const generateBase64 = (bytes = 32): string => {
return randomBytes(bytes).toString("base64");
};
export const loadTemplateModule = async (
id: TemplatesKeys,
): Promise<(schema: Schema) => Template> => {
const templateLoader = templates.find((t) => t.id === id);
if (!templateLoader) {
throw new TRPCError({
code: "BAD_REQUEST",
message: `Template ${id} not found or not implemented yet`,
});
}
const generate = await templateLoader.load();
return generate;
};
export const readTemplateComposeFile = async (id: string) => {
const cwd = process.cwd();
const composeFile = await readFile(

View File

@@ -1,27 +0,0 @@
version: "3.8"
services:
wordpress:
image: wordpress:5.8.3
environment:
WORDPRESS_DB_HOST: db
WORDPRESS_DB_USER: exampleuser
WORDPRESS_DB_PASSWORD: examplepass
WORDPRESS_DB_NAME: exampledb
volumes:
- wordpress_data:/var/www/html
db:
image: mysql:5.7.34
networks:
- dokploy-network
environment:
MYSQL_DATABASE: exampledb
MYSQL_USER: exampleuser
MYSQL_PASSWORD: examplepass
MYSQL_ROOT_PASSWORD: rootpass
volumes:
- db_data:/var/lib/mysql
volumes:
wordpress_data:
db_data:

View File

@@ -1,22 +0,0 @@
import {
type DomainSchema,
type Schema,
type Template,
generateRandomDomain,
} from "../utils";
export function generate(schema: Schema): Template {
const randomDomain = generateRandomDomain(schema);
const domains: DomainSchema[] = [
{
host: randomDomain,
port: 80,
serviceName: "wordpress",
},
];
return {
domains,
};
}

View File

@@ -1,37 +0,0 @@
version: "3"
services:
postgres:
image: postgres:15
networks:
- dokploy-network
restart: unless-stopped
environment:
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=postgres
- POSTGRES_DATABASE=postgres
volumes:
- pg_data:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"]
interval: 10s
timeout: 5s
retries: 5
zipline:
image: ghcr.io/diced/zipline:3.7.9
restart: unless-stopped
environment:
- CORE_RETURN_HTTPS=${ZIPLINE_RETURN_HTTPS}
- CORE_SECRET=${ZIPLINE_SECRET}
- CORE_HOST=0.0.0.0
- CORE_PORT=${ZIPLINE_PORT}
- CORE_DATABASE_URL=postgres://postgres:postgres@postgres/postgres
- CORE_LOGGER=${ZIPLINE_LOGGER}
volumes:
- "../files/uploads:/zipline/uploads"
- "../files/public:/zipline/public"
depends_on:
- "postgres"
volumes:
pg_data:

View File

@@ -1,32 +0,0 @@
import {
type DomainSchema,
type Schema,
type Template,
generateBase64,
generateRandomDomain,
} from "@/server/templates/utils";
export function generate(schema: Schema): Template {
const randomDomain = generateRandomDomain(schema);
const secretBase = generateBase64(64);
const domains: DomainSchema[] = [
{
host: randomDomain,
port: 3000,
serviceName: "zipline",
},
];
const envs = [
"ZIPLINE_PORT=3000",
`ZIPLINE_SECRET=${secretBase}`,
"ZIPLINE_RETURN_HTTPS=false",
"ZIPLINE_LOGGER=true",
];
return {
envs,
domains,
};
}