diff --git a/apps/dokploy/public/templates/supabase.svg b/apps/dokploy/public/templates/supabase.svg
new file mode 100644
index 00000000..2b69d42e
--- /dev/null
+++ b/apps/dokploy/public/templates/supabase.svg
@@ -0,0 +1,15 @@
+
diff --git a/apps/dokploy/templates/supabase/docker-compose.yml b/apps/dokploy/templates/supabase/docker-compose.yml
new file mode 100644
index 00000000..5be8158c
--- /dev/null
+++ b/apps/dokploy/templates/supabase/docker-compose.yml
@@ -0,0 +1,437 @@
+# Usage
+# Start: docker compose up
+# With helpers: docker compose -f docker-compose.yml -f ../files/dev/docker-compose.dev.yml up
+# Stop: docker compose down
+# Destroy: docker compose -f docker-compose.yml -f ../files/dev/docker-compose.dev.yml down -v --remove-orphans
+
+name: supabase
+version: "3.8"
+
+services:
+ studio:
+ container_name: supabase-studio
+ image: supabase/studio:20240729-ce42139
+ networks:
+ - dokploy-network
+ restart: unless-stopped
+ healthcheck:
+ test: [ "CMD", "node", "-e", "require('http').get('http://localhost:3000/api/profile', (r) => {if (r.statusCode !== 200) throw new Error(r.statusCode)})" ]
+ timeout: 5s
+ interval: 5s
+ retries: 3
+ depends_on:
+ analytics:
+ condition: service_healthy
+ environment:
+ STUDIO_PG_META_URL: http://meta:8080
+ POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
+
+ DEFAULT_ORGANIZATION_NAME: ${STUDIO_DEFAULT_ORGANIZATION}
+ DEFAULT_PROJECT_NAME: ${STUDIO_DEFAULT_PROJECT}
+
+ SUPABASE_URL: http://kong:8000
+ SUPABASE_PUBLIC_URL: http://${SUPABASE_HOST}
+ SUPABASE_ANON_KEY: ${ANON_KEY}
+ SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY}
+ AUTH_JWT_SECRET: ${JWT_SECRET}
+
+ LOGFLARE_API_KEY: ${LOGFLARE_API_KEY}
+ LOGFLARE_URL: http://analytics:4000
+ NEXT_PUBLIC_ENABLE_LOGS: true
+ # Comment to use Big Query backend for analytics
+ NEXT_ANALYTICS_BACKEND_PROVIDER: postgres
+ # Uncomment to use Big Query backend for analytics
+ # NEXT_ANALYTICS_BACKEND_PROVIDER: bigquery
+
+ kong:
+ container_name: supabase-kong
+ image: kong:2.8.1
+ restart: unless-stopped
+ networks:
+ - dokploy-network
+ # https://unix.stackexchange.com/a/294837
+ entrypoint: bash -c 'eval "echo \"$$(cat ~/temp.yml)\"" > ~/kong.yml && /docker-entrypoint.sh kong docker-start'
+ #ports:
+ # - ${KONG_HTTP_PORT}:8000/tcp
+ # - ${KONG_HTTPS_PORT}:8443/tcp
+ expose:
+ - 8000
+ - 8443
+ labels:
+ - traefik.enable=true
+ - traefik.http.routers.frontend-app.rule=Host(`${SUPABASE_HOST}`)
+ - traefik.http.routers.frontend-app.entrypoints=web
+ - traefik.http.services.frontend-app.loadbalancer.server.port=${KONG_HTTP_PORT}
+ depends_on:
+ analytics:
+ condition: service_healthy
+ environment:
+ KONG_DATABASE: "off"
+ KONG_DECLARATIVE_CONFIG: /home/kong/kong.yml
+ # https://github.com/supabase/cli/issues/14
+ KONG_DNS_ORDER: LAST,A,CNAME
+ KONG_PLUGINS: request-transformer,cors,key-auth,acl,basic-auth
+ KONG_NGINX_PROXY_PROXY_BUFFER_SIZE: 160k
+ KONG_NGINX_PROXY_PROXY_BUFFERS: 64 160k
+ SUPABASE_ANON_KEY: ${ANON_KEY}
+ SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY}
+ DASHBOARD_USERNAME: ${DASHBOARD_USERNAME}
+ DASHBOARD_PASSWORD: ${DASHBOARD_PASSWORD}
+ volumes:
+ # https://github.com/supabase/supabase/issues/12661
+ - ../files/volumes/api/kong.yml:/home/kong/temp.yml:ro
+
+ auth:
+ container_name: supabase-auth
+ image: supabase/gotrue:v2.158.1
+ networks:
+ - dokploy-network
+ depends_on:
+ db:
+ # Disable this if you are using an external Postgres database
+ condition: service_healthy
+ analytics:
+ condition: service_healthy
+ healthcheck:
+ test: [ "CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:9999/health" ]
+ timeout: 5s
+ interval: 5s
+ retries: 3
+ restart: unless-stopped
+ environment:
+ GOTRUE_API_HOST: 0.0.0.0
+ GOTRUE_API_PORT: 9999
+ API_EXTERNAL_URL: http://${SUPABASE_HOST}
+
+ GOTRUE_DB_DRIVER: postgres
+ GOTRUE_DB_DATABASE_URL: postgres://supabase_auth_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOSTNAME}:${POSTGRES_PORT}/${POSTGRES_DB}
+
+ GOTRUE_SITE_URL: http://${SUPABASE_HOST}
+ GOTRUE_URI_ALLOW_LIST: ${ADDITIONAL_REDIRECT_URLS}
+ GOTRUE_DISABLE_SIGNUP: ${DISABLE_SIGNUP}
+
+ GOTRUE_JWT_ADMIN_ROLES: service_role
+ GOTRUE_JWT_AUD: authenticated
+ GOTRUE_JWT_DEFAULT_GROUP_NAME: authenticated
+ GOTRUE_JWT_EXP: ${JWT_EXPIRY}
+ GOTRUE_JWT_SECRET: ${JWT_SECRET}
+
+ GOTRUE_EXTERNAL_EMAIL_ENABLED: ${ENABLE_EMAIL_SIGNUP}
+ GOTRUE_EXTERNAL_ANONYMOUS_USERS_ENABLED: ${ENABLE_ANONYMOUS_USERS}
+ GOTRUE_MAILER_AUTOCONFIRM: ${ENABLE_EMAIL_AUTOCONFIRM}
+ # GOTRUE_MAILER_SECURE_EMAIL_CHANGE_ENABLED: true
+ # GOTRUE_SMTP_MAX_FREQUENCY: 1s
+ GOTRUE_SMTP_ADMIN_EMAIL: ${SMTP_ADMIN_EMAIL}
+ GOTRUE_SMTP_HOST: ${SMTP_HOSTNAME}
+ GOTRUE_SMTP_PORT: ${SMTP_PORT}
+ GOTRUE_SMTP_USER: ${SMTP_USER}
+ GOTRUE_SMTP_PASS: ${SMTP_PASS}
+ GOTRUE_SMTP_SENDER_NAME: ${SMTP_SENDER_NAME}
+ GOTRUE_MAILER_URLPATHS_INVITE: ${MAILER_URLPATHS_INVITE}
+ GOTRUE_MAILER_URLPATHS_CONFIRMATION: ${MAILER_URLPATHS_CONFIRMATION}
+ GOTRUE_MAILER_URLPATHS_RECOVERY: ${MAILER_URLPATHS_RECOVERY}
+ GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE: ${MAILER_URLPATHS_EMAIL_CHANGE}
+
+ GOTRUE_EXTERNAL_PHONE_ENABLED: ${ENABLE_PHONE_SIGNUP}
+ GOTRUE_SMS_AUTOCONFIRM: ${ENABLE_PHONE_AUTOCONFIRM}
+ # Uncomment to enable custom access token hook. You'll need to create a public.custom_access_token_hook function and grant necessary permissions.
+ # See: https://supabase.com/docs/guides/auth/auth-hooks#hook-custom-access-token for details
+ # GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_ENABLED="true"
+ # GOTRUE_HOOK_CUSTOM_ACCESS_TOKEN_URI="pg-functions://postgres/public/custom_access_token_hook"
+
+ # GOTRUE_HOOK_MFA_VERIFICATION_ATTEMPT_ENABLED="true"
+ # GOTRUE_HOOK_MFA_VERIFICATION_ATTEMPT_URI="pg-functions://postgres/public/mfa_verification_attempt"
+
+ # GOTRUE_HOOK_PASSWORD_VERIFICATION_ATTEMPT_ENABLED="true"
+ # GOTRUE_HOOK_PASSWORD_VERIFICATION_ATTEMPT_URI="pg-functions://postgres/public/password_verification_attempt"
+
+
+
+
+ rest:
+ container_name: supabase-rest
+ image: postgrest/postgrest:v12.2.0
+ networks:
+ - dokploy-network
+ depends_on:
+ db:
+ # Disable this if you are using an external Postgres database
+ condition: service_healthy
+ analytics:
+ condition: service_healthy
+ restart: unless-stopped
+ environment:
+ PGRST_DB_URI: postgres://authenticator:${POSTGRES_PASSWORD}@${POSTGRES_HOSTNAME}:${POSTGRES_PORT}/${POSTGRES_DB}
+ PGRST_DB_SCHEMAS: ${PGRST_DB_SCHEMAS}
+ PGRST_DB_ANON_ROLE: anon
+ PGRST_JWT_SECRET: ${JWT_SECRET}
+ PGRST_DB_USE_LEGACY_GUCS: "false"
+ PGRST_APP_SETTINGS_JWT_SECRET: ${JWT_SECRET}
+ PGRST_APP_SETTINGS_JWT_EXP: ${JWT_EXPIRY}
+ command: "postgrest"
+
+ realtime:
+ # This container name looks inconsistent but is correct because realtime constructs tenant id by parsing the subdomain
+ container_name: realtime-dev.supabase-realtime
+ image: supabase/realtime:v2.30.23
+ networks:
+ - dokploy-network
+ depends_on:
+ db:
+ # Disable this if you are using an external Postgres database
+ condition: service_healthy
+ analytics:
+ condition: service_healthy
+ healthcheck:
+ test: [ "CMD", "curl", "-sSfL", "--head", "-o", "/dev/null", "-H", "Authorization: Bearer ${ANON_KEY}", "http://localhost:4000/api/tenants/realtime-dev/health" ]
+ timeout: 5s
+ interval: 5s
+ retries: 3
+ restart: unless-stopped
+ environment:
+ PORT: 4000
+ DB_HOST: ${POSTGRES_HOSTNAME}
+ DB_PORT: ${POSTGRES_PORT}
+ DB_USER: supabase_admin
+ DB_PASSWORD: ${POSTGRES_PASSWORD}
+ DB_NAME: ${POSTGRES_DB}
+ DB_AFTER_CONNECT_QUERY: 'SET search_path TO _realtime'
+ DB_ENC_KEY: supabaserealtime
+ API_JWT_SECRET: ${JWT_SECRET}
+ SECRET_KEY_BASE: UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq
+ ERL_AFLAGS: -proto_dist inet_tcp
+ DNS_NODES: "''"
+ RLIMIT_NOFILE: "10000"
+ APP_NAME: realtime
+ SEED_SELF_HOST: true
+
+ # To use S3 backed storage: docker compose -f docker-compose.yml -f docker-compose.s3.yml up
+ storage:
+ container_name: supabase-storage
+ image: supabase/storage-api:v1.0.6
+ networks:
+ - dokploy-network
+ depends_on:
+ db:
+ # Disable this if you are using an external Postgres database
+ condition: service_healthy
+ rest:
+ condition: service_started
+ imgproxy:
+ condition: service_started
+ healthcheck:
+ test: [ "CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:5000/status" ]
+ timeout: 5s
+ interval: 5s
+ retries: 3
+ restart: unless-stopped
+ environment:
+ ANON_KEY: ${ANON_KEY}
+ SERVICE_KEY: ${SERVICE_ROLE_KEY}
+ POSTGREST_URL: http://rest:3000
+ PGRST_JWT_SECRET: ${JWT_SECRET}
+ DATABASE_URL: postgres://supabase_storage_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOSTNAME}:${POSTGRES_PORT}/${POSTGRES_DB}
+ FILE_SIZE_LIMIT: 52428800
+ STORAGE_BACKEND: file
+ FILE_STORAGE_BACKEND_PATH: /var/lib/storage
+ TENANT_ID: stub
+ # TODO: https://github.com/supabase/storage-api/issues/55
+ REGION: stub
+ GLOBAL_S3_BUCKET: stub
+ ENABLE_IMAGE_TRANSFORMATION: "true"
+ IMGPROXY_URL: http://imgproxy:5001
+ volumes:
+ - ../files/volumes/storage:/var/lib/storage:z
+
+ imgproxy:
+ container_name: supabase-imgproxy
+ image: darthsim/imgproxy:v3.8.0
+ networks:
+ - dokploy-network
+ healthcheck:
+ test: [ "CMD", "imgproxy", "health" ]
+ timeout: 5s
+ interval: 5s
+ retries: 3
+ environment:
+ IMGPROXY_BIND: ":5001"
+ IMGPROXY_LOCAL_FILESYSTEM_ROOT: /
+ IMGPROXY_USE_ETAG: "true"
+ IMGPROXY_ENABLE_WEBP_DETECTION: ${IMGPROXY_ENABLE_WEBP_DETECTION}
+ volumes:
+ - ../files/volumes/storage:/var/lib/storage:z
+
+ meta:
+ container_name: supabase-meta
+ image: supabase/postgres-meta:v0.83.2
+ networks:
+ - dokploy-network
+ depends_on:
+ db:
+ # Disable this if you are using an external Postgres database
+ condition: service_healthy
+ analytics:
+ condition: service_healthy
+ restart: unless-stopped
+ environment:
+ PG_META_PORT: 8080
+ PG_META_DB_HOST: ${POSTGRES_HOSTNAME}
+ PG_META_DB_PORT: ${POSTGRES_PORT}
+ PG_META_DB_NAME: ${POSTGRES_DB}
+ PG_META_DB_USER: supabase_admin
+ PG_META_DB_PASSWORD: ${POSTGRES_PASSWORD}
+
+ functions:
+ container_name: supabase-edge-functions
+ image: supabase/edge-runtime:v1.56.0
+ restart: unless-stopped
+ networks:
+ - dokploy-network
+ depends_on:
+ analytics:
+ condition: service_healthy
+ environment:
+ JWT_SECRET: ${JWT_SECRET}
+ SUPABASE_URL: http://kong:8000
+ SUPABASE_ANON_KEY: ${ANON_KEY}
+ SUPABASE_SERVICE_ROLE_KEY: ${SERVICE_ROLE_KEY}
+ SUPABASE_DB_URL: postgresql://postgres:${POSTGRES_PASSWORD}@${POSTGRES_HOSTNAME}:${POSTGRES_PORT}/${POSTGRES_DB}
+ # TODO: Allow configuring VERIFY_JWT per function. This PR might help: https://github.com/supabase/cli/pull/786
+ VERIFY_JWT: "${FUNCTIONS_VERIFY_JWT}"
+ volumes:
+ - ../files/volumes/functions:/home/deno/functions:Z
+ command:
+ - start
+ - --main-service
+ - /home/deno/functions/main
+
+ analytics:
+ container_name: supabase-analytics
+ image: supabase/logflare:1.4.0
+ networks:
+ - dokploy-network
+ healthcheck:
+ test: [ "CMD", "curl", "http://localhost:4000/health" ]
+ timeout: 5s
+ interval: 5s
+ retries: 10
+ restart: unless-stopped
+ depends_on:
+ db:
+ # Disable this if you are using an external Postgres database
+ condition: service_healthy
+ # Uncomment to use Big Query backend for analytics
+ # volumes:
+ # - type: bind
+ # source: ${PWD}/gcloud.json
+ # target: /opt/app/rel/logflare/bin/gcloud.json
+ # read_only: true
+ environment:
+ LOGFLARE_NODE_HOST: 127.0.0.1
+ DB_USERNAME: supabase_admin
+ DB_DATABASE: ${POSTGRES_DB}
+ DB_HOSTNAME: ${POSTGRES_HOSTNAME}
+ DB_PORT: ${POSTGRES_PORT}
+ DB_PASSWORD: ${POSTGRES_PASSWORD}
+ DB_SCHEMA: _analytics
+ LOGFLARE_API_KEY: ${LOGFLARE_API_KEY}
+ LOGFLARE_SINGLE_TENANT: true
+ LOGFLARE_SUPABASE_MODE: true
+ LOGFLARE_MIN_CLUSTER_SIZE: 1
+
+ # Comment variables to use Big Query backend for analytics
+ POSTGRES_BACKEND_URL: postgresql://supabase_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOSTNAME}:${POSTGRES_PORT}/${POSTGRES_DB}
+ POSTGRES_BACKEND_SCHEMA: _analytics
+ LOGFLARE_FEATURE_FLAG_OVERRIDE: multibackend=true
+ # Uncomment to use Big Query backend for analytics
+ # GOOGLE_PROJECT_ID: ${GOOGLE_PROJECT_ID}
+ # GOOGLE_PROJECT_NUMBER: ${GOOGLE_PROJECT_NUMBER}
+ #ports:
+ # - 4000:4000
+ expose:
+ - 4000
+
+ # Comment out everything below this point if you are using an external Postgres database
+ db:
+ container_name: supabase-db
+ image: supabase/postgres:15.1.1.78
+ networks:
+ - dokploy-network
+ healthcheck:
+ test: pg_isready -U postgres -h localhost
+ interval: 5s
+ timeout: 5s
+ retries: 10
+ depends_on:
+ vector:
+ condition: service_healthy
+ command:
+ - postgres
+ - -c
+ - config_file=/etc/postgresql/postgresql.conf
+ - -c
+ - log_min_messages=fatal # prevents Realtime polling queries from appearing in logs
+ restart: unless-stopped
+ #ports:
+ # # Pass down internal port because it's set dynamically by other services
+ # - ${POSTGRES_PORT}:${POSTGRES_PORT}
+ expose:
+ - ${POSTGRES_PORT}
+ environment:
+ POSTGRES_HOST: /var/run/postgresql
+ PGPORT: ${POSTGRES_PORT}
+ POSTGRES_PORT: ${POSTGRES_PORT}
+ PGPASSWORD: ${POSTGRES_PASSWORD}
+ POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
+ PGDATABASE: ${POSTGRES_DB}
+ POSTGRES_DB: ${POSTGRES_DB}
+ JWT_SECRET: ${JWT_SECRET}
+ JWT_EXP: ${JWT_EXPIRY}
+ volumes:
+ - ../files/volumes/db/realtime.sql:/docker-entrypoint-initdb.d/migrations/99-realtime.sql:Z
+ # Must be superuser to create event trigger
+ - ../files/volumes/db/webhooks.sql:/docker-entrypoint-initdb.d/init-scripts/98-webhooks.sql:Z
+ # Must be superuser to alter reserved role
+ - ../files/volumes/db/roles.sql:/docker-entrypoint-initdb.d/init-scripts/99-roles.sql:Z
+ # Initialize the database settings with JWT_SECRET and JWT_EXP
+ - ../files/volumes/db/jwt.sql:/docker-entrypoint-initdb.d/init-scripts/99-jwt.sql:Z
+ # PGDATA directory is persisted between restarts
+ - ../files/volumes/db/data:/var/lib/postgresql/data:Z
+ # Changes required for Analytics support
+ - ../files/volumes/db/logs.sql:/docker-entrypoint-initdb.d/migrations/99-logs.sql:Z
+ # Use named volume to persist pgsodium decryption key between restarts
+ - db-config:/etc/postgresql-custom
+
+ vector:
+ container_name: supabase-vector
+ image: timberio/vector:0.28.1-alpine
+ networks:
+ - dokploy-network
+ healthcheck:
+ test:
+ [
+
+ "CMD",
+ "wget",
+ "--no-verbose",
+ "--tries=1",
+ "--spider",
+ "http://vector:9001/health"
+ ]
+ timeout: 5s
+ interval: 5s
+ retries: 3
+ volumes:
+ - ../files/volumes/logs/vector.yml:/etc/vector/vector.yml:ro
+ - ${DOCKER_SOCKET_LOCATION}:/var/run/docker.sock:ro
+ environment:
+ LOGFLARE_API_KEY: ${LOGFLARE_API_KEY}
+ command: [ "--config", "etc/vector/vector.yml" ]
+
+volumes:
+ db-config:
+
+
+networks:
+ dokploy-network:
+ external: true
diff --git a/apps/dokploy/templates/supabase/index.ts b/apps/dokploy/templates/supabase/index.ts
new file mode 100644
index 00000000..2456f015
--- /dev/null
+++ b/apps/dokploy/templates/supabase/index.ts
@@ -0,0 +1,989 @@
+import { createHmac, randomBytes } from "node:crypto";
+import {
+ type Schema,
+ type Template,
+ generateBase64,
+ generateHash,
+ generatePassword,
+ generateRandomDomain,
+} from "../utils";
+
+interface JWTPayload {
+ role: "anon" | "service_role";
+ iss: string;
+ iat: number;
+ exp: number;
+}
+
+function base64UrlEncode(str: string): string {
+ return Buffer.from(str)
+ .toString("base64")
+ .replace(/\+/g, "-")
+ .replace(/\//g, "_")
+ .replace(/=/g, "");
+}
+
+function generateJWT(payload: JWTPayload, secret: string): string {
+ const header = { alg: "HS256", typ: "JWT" };
+
+ const encodedHeader = base64UrlEncode(JSON.stringify(header));
+ const encodedPayload = base64UrlEncode(JSON.stringify(payload));
+
+ const signature = createHmac("sha256", secret)
+ .update(`${encodedHeader}.${encodedPayload}`)
+ .digest("base64url");
+
+ return `${encodedHeader}.${encodedPayload}.${signature}`;
+}
+
+export function generateSupabaseAnonJWT(secret: string): string {
+ const now = Math.floor(Date.now() / 1000);
+ const payload: JWTPayload = {
+ role: "anon",
+ iss: "supabase",
+ iat: now,
+ exp: now + 100 * 365 * 24 * 60 * 60, // 100 years
+ };
+
+ return generateJWT(payload, secret);
+}
+
+export function generateSupabaseServiceJWT(secret: string): string {
+ const now = Math.floor(Date.now() / 1000);
+ const payload: JWTPayload = {
+ role: "service_role",
+ iss: "supabase",
+ iat: now,
+ exp: now + 100 * 365 * 24 * 60 * 60, // 100 years
+ };
+
+ return generateJWT(payload, secret);
+}
+
+export function generate(schema: Schema): Template {
+ const mainServiceHash = generateHash(schema.projectName);
+ const randomDomain = generateRandomDomain(schema);
+
+ const postgresPassword = generatePassword(32);
+ const jwtSecret = generateBase64(32);
+ const dashboardPassword = generatePassword(32);
+ const logflareApiKey = generatePassword(32);
+
+ const annonKey = generateSupabaseAnonJWT(jwtSecret);
+ const serviceRoleKey = generateSupabaseServiceJWT(jwtSecret);
+
+ const envs = [
+ `SUPABASE_HOST=${randomDomain}`,
+ `POSTGRES_PASSWORD=${postgresPassword}`,
+ `JWT_SECRET=${jwtSecret}`,
+ `ANON_KEY=${annonKey}`,
+ `SERVICE_ROLE_KEY=${serviceRoleKey}`,
+ "DASHBOARD_USERNAME=supabase",
+ `DASHBOARD_PASSWORD=${dashboardPassword}`,
+ "POSTGRES_HOSTNAME=db",
+ "POSTGRES_DB=postgres",
+ "POSTGRES_PORT=5432",
+ "KONG_HTTP_PORT=8000",
+ "KONG_HTTPS_PORT=8443",
+ "PGRST_DB_SCHEMAS=public,storage,graphql_public",
+ "ADDITIONAL_REDIRECT_URLS=",
+ "JWT_EXPIRY=3600",
+ "DISABLE_SIGNUP=false",
+ `MAILER_URLPATHS_CONFIRMATION=\"/auth/v1/verify\"`,
+ `MAILER_URLPATHS_INVITE=\"/auth/v1/verify\"`,
+ `MAILER_URLPATHS_RECOVERY=\"/auth/v1/verify\"`,
+ `MAILER_URLPATHS_EMAIL_CHANGE=\"/auth/v1/verify\"`,
+ "ENABLE_EMAIL_SIGNUP=true",
+ "ENABLE_EMAIL_AUTOCONFIRM=false",
+ "SMTP_ADMIN_EMAIL=admin@example.com",
+ "SMTP_HOSTNAME=supabase-mail",
+ "SMTP_PORT=2500",
+ "SMTP_USER=fake_mail_user",
+ "SMTP_PASS=fake_mail_password",
+ "SMTP_SENDER_NAME=fake_sender",
+ "ENABLE_ANONYMOUS_USERS=false",
+ "ENABLE_PHONE_SIGNUP=true",
+ "ENABLE_PHONE_AUTOCONFIRM=true",
+ "STUDIO_DEFAULT_ORGANIZATION=Default Organization",
+ "STUDIO_DEFAULT_PROJECT=Default Project",
+ "STUDIO_PORT=3000",
+ "IMGPROXY_ENABLE_WEBP_DETECTION=true",
+ "FUNCTIONS_VERIFY_JWT=false",
+ `LOGFLARE_LOGGER_BACKEND_API_KEY=${logflareApiKey}`,
+ `LOGFLARE_API_KEY=${logflareApiKey}`,
+ "DOCKER_SOCKET_LOCATION=/var/run/docker.sock",
+ "GOOGLE_PROJECT_ID=GOOGLE_PROJECT_ID",
+ "GOOGLE_PROJECT_NUMBER=GOOGLE_PROJECT_NUMBER",
+ `HASH=${mainServiceHash}`,
+ ];
+
+ const mounts: Template["mounts"] = [
+ {
+ filePath: "/volumes/api/kong.yml",
+ content: `
+_format_version: '2.1'
+_transform: true
+
+###
+### Consumers / Users
+###
+consumers:
+ - username: DASHBOARD
+ - username: anon
+ keyauth_credentials:
+ - key: $SUPABASE_ANON_KEY
+ - username: service_role
+ keyauth_credentials:
+ - key: $SUPABASE_SERVICE_KEY
+
+###
+### Access Control List
+###
+acls:
+ - consumer: anon
+ group: anon
+ - consumer: service_role
+ group: admin
+
+###
+### Dashboard credentials
+###
+basicauth_credentials:
+ - consumer: DASHBOARD
+ username: $DASHBOARD_USERNAME
+ password: $DASHBOARD_PASSWORD
+
+###
+### API Routes
+###
+services:
+ ## Open Auth routes
+ - name: auth-v1-open
+ url: http://auth:9999/verify
+ routes:
+ - name: auth-v1-open
+ strip_path: true
+ paths:
+ - /auth/v1/verify
+ plugins:
+ - name: cors
+ - name: auth-v1-open-callback
+ url: http://auth:9999/callback
+ routes:
+ - name: auth-v1-open-callback
+ strip_path: true
+ paths:
+ - /auth/v1/callback
+ plugins:
+ - name: cors
+ - name: auth-v1-open-authorize
+ url: http://auth:9999/authorize
+ routes:
+ - name: auth-v1-open-authorize
+ strip_path: true
+ paths:
+ - /auth/v1/authorize
+ plugins:
+ - name: cors
+
+ ## Secure Auth routes
+ - name: auth-v1
+ _comment: 'GoTrue: /auth/v1/* -> http://auth:9999/*'
+ url: http://auth:9999/
+ routes:
+ - name: auth-v1-all
+ strip_path: true
+ paths:
+ - /auth/v1/
+ plugins:
+ - name: cors
+ - name: key-auth
+ config:
+ hide_credentials: false
+ - name: acl
+ config:
+ hide_groups_header: true
+ allow:
+ - admin
+ - anon
+
+ ## Secure REST routes
+ - name: rest-v1
+ _comment: 'PostgREST: /rest/v1/* -> http://rest:3000/*'
+ url: http://rest:3000/
+ routes:
+ - name: rest-v1-all
+ strip_path: true
+ paths:
+ - /rest/v1/
+ plugins:
+ - name: cors
+ - name: key-auth
+ config:
+ hide_credentials: true
+ - name: acl
+ config:
+ hide_groups_header: true
+ allow:
+ - admin
+ - anon
+
+ ## Secure GraphQL routes
+ - name: graphql-v1
+ _comment: 'PostgREST: /graphql/v1/* -> http://rest:3000/rpc/graphql'
+ url: http://rest:3000/rpc/graphql
+ routes:
+ - name: graphql-v1-all
+ strip_path: true
+ paths:
+ - /graphql/v1
+ plugins:
+ - name: cors
+ - name: key-auth
+ config:
+ hide_credentials: true
+ - name: request-transformer
+ config:
+ add:
+ headers:
+ - Content-Profile:graphql_public
+ - name: acl
+ config:
+ hide_groups_header: true
+ allow:
+ - admin
+ - anon
+
+ ## Secure Realtime routes
+ - name: realtime-v1-ws
+ _comment: 'Realtime: /realtime/v1/* -> ws://realtime:4000/socket/*'
+ url: http://realtime-dev.supabase-realtime:4000/socket
+ protocol: ws
+ routes:
+ - name: realtime-v1-ws
+ strip_path: true
+ paths:
+ - /realtime/v1/
+ plugins:
+ - name: cors
+ - name: key-auth
+ config:
+ hide_credentials: false
+ - name: acl
+ config:
+ hide_groups_header: true
+ allow:
+ - admin
+ - anon
+ - name: realtime-v1-rest
+ _comment: 'Realtime: /realtime/v1/* -> ws://realtime:4000/socket/*'
+ url: http://realtime-dev.supabase-realtime:4000/api
+ protocol: http
+ routes:
+ - name: realtime-v1-rest
+ strip_path: true
+ paths:
+ - /realtime/v1/api
+ plugins:
+ - name: cors
+ - name: key-auth
+ config:
+ hide_credentials: false
+ - name: acl
+ config:
+ hide_groups_header: true
+ allow:
+ - admin
+ - anon
+ ## Storage routes: the storage server manages its own auth
+ - name: storage-v1
+ _comment: 'Storage: /storage/v1/* -> http://storage:5000/*'
+ url: http://storage:5000/
+ routes:
+ - name: storage-v1-all
+ strip_path: true
+ paths:
+ - /storage/v1/
+ plugins:
+ - name: cors
+
+ ## Edge Functions routes
+ - name: functions-v1
+ _comment: 'Edge Functions: /functions/v1/* -> http://functions:9000/*'
+ url: http://functions:9000/
+ routes:
+ - name: functions-v1-all
+ strip_path: true
+ paths:
+ - /functions/v1/
+ plugins:
+ - name: cors
+
+ ## Analytics routes
+ - name: analytics-v1
+ _comment: 'Analytics: /analytics/v1/* -> http://logflare:4000/*'
+ url: http://analytics:4000/
+ routes:
+ - name: analytics-v1-all
+ strip_path: true
+ paths:
+ - /analytics/v1/
+
+ ## Secure Database routes
+ - name: meta
+ _comment: 'pg-meta: /pg/* -> http://pg-meta:8080/*'
+ url: http://meta:8080/
+ routes:
+ - name: meta-all
+ strip_path: true
+ paths:
+ - /pg/
+ plugins:
+ - name: key-auth
+ config:
+ hide_credentials: false
+ - name: acl
+ config:
+ hide_groups_header: true
+ allow:
+ - admin
+
+ ## Protected Dashboard - catch all remaining routes
+ - name: dashboard
+ _comment: 'Studio: /* -> http://studio:3000/*'
+ url: http://studio:3000/
+ routes:
+ - name: dashboard-all
+ strip_path: true
+ paths:
+ - /
+ plugins:
+ - name: cors
+ - name: basic-auth
+ config:
+ hide_credentials: true
+ `,
+ },
+ {
+ filePath: "/volumes/db/init/data.sql",
+ content: `
+ `,
+ },
+ {
+ filePath: "/volumes/db/jwt.sql",
+ content: `
+\\set jwt_secret \`echo "$JWT_SECRET"\`
+\\set jwt_exp \`echo "$JWT_EXP"\`
+
+ALTER DATABASE postgres SET "app.settings.jwt_secret" TO :'jwt_secret';
+ALTER DATABASE postgres SET "app.settings.jwt_exp" TO :'jwt_exp';
+ `,
+ },
+ {
+ filePath: "/volumes/db/logs.sql",
+ content: `
+\\set pguser \`echo "$POSTGRES_USER"\`
+
+create schema if not exists _analytics;
+alter schema _analytics owner to :pguser;
+ `,
+ },
+ {
+ filePath: "/volumes/db/realtime.sql",
+ content: `
+\\set pguser \`echo "$POSTGRES_USER"\`
+
+create schema if not exists _realtime;
+alter schema _realtime owner to :pguser;
+ `,
+ },
+ {
+ filePath: "/volumes/db/roles.sql",
+ content: `
+-- NOTE: change to your own passwords for production environments
+\\set pgpass \`echo "$POSTGRES_PASSWORD"\`
+
+ALTER USER authenticator WITH PASSWORD :'pgpass';
+ALTER USER pgbouncer WITH PASSWORD :'pgpass';
+ALTER USER supabase_auth_admin WITH PASSWORD :'pgpass';
+ALTER USER supabase_functions_admin WITH PASSWORD :'pgpass';
+ALTER USER supabase_storage_admin WITH PASSWORD :'pgpass';
+ `,
+ },
+ {
+ filePath: "/volumes/db/webhooks.sql",
+ content: `
+BEGIN;
+ -- Create pg_net extension
+ CREATE EXTENSION IF NOT EXISTS pg_net SCHEMA extensions;
+ -- Create supabase_functions schema
+ CREATE SCHEMA supabase_functions AUTHORIZATION supabase_admin;
+ GRANT USAGE ON SCHEMA supabase_functions TO postgres, anon, authenticated, service_role;
+ ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON TABLES TO postgres, anon, authenticated, service_role;
+ ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON FUNCTIONS TO postgres, anon, authenticated, service_role;
+ ALTER DEFAULT PRIVILEGES IN SCHEMA supabase_functions GRANT ALL ON SEQUENCES TO postgres, anon, authenticated, service_role;
+ -- supabase_functions.migrations definition
+ CREATE TABLE supabase_functions.migrations (
+ version text PRIMARY KEY,
+ inserted_at timestamptz NOT NULL DEFAULT NOW()
+ );
+ -- Initial supabase_functions migration
+ INSERT INTO supabase_functions.migrations (version) VALUES ('initial');
+ -- supabase_functions.hooks definition
+ CREATE TABLE supabase_functions.hooks (
+ id bigserial PRIMARY KEY,
+ hook_table_id integer NOT NULL,
+ hook_name text NOT NULL,
+ created_at timestamptz NOT NULL DEFAULT NOW(),
+ request_id bigint
+ );
+ CREATE INDEX supabase_functions_hooks_request_id_idx ON supabase_functions.hooks USING btree (request_id);
+ CREATE INDEX supabase_functions_hooks_h_table_id_h_name_idx ON supabase_functions.hooks USING btree (hook_table_id, hook_name);
+ COMMENT ON TABLE supabase_functions.hooks IS 'Supabase Functions Hooks: Audit trail for triggered hooks.';
+ CREATE FUNCTION supabase_functions.http_request()
+ RETURNS trigger
+ LANGUAGE plpgsql
+ AS $function$
+ DECLARE
+ request_id bigint;
+ payload jsonb;
+ url text := TG_ARGV[0]::text;
+ method text := TG_ARGV[1]::text;
+ headers jsonb DEFAULT '{}'::jsonb;
+ params jsonb DEFAULT '{}'::jsonb;
+ timeout_ms integer DEFAULT 1000;
+ BEGIN
+ IF url IS NULL OR url = 'null' THEN
+ RAISE EXCEPTION 'url argument is missing';
+ END IF;
+
+ IF method IS NULL OR method = 'null' THEN
+ RAISE EXCEPTION 'method argument is missing';
+ END IF;
+
+ IF TG_ARGV[2] IS NULL OR TG_ARGV[2] = 'null' THEN
+ headers = '{"Content-Type": "application/json"}'::jsonb;
+ ELSE
+ headers = TG_ARGV[2]::jsonb;
+ END IF;
+
+ IF TG_ARGV[3] IS NULL OR TG_ARGV[3] = 'null' THEN
+ params = '{}'::jsonb;
+ ELSE
+ params = TG_ARGV[3]::jsonb;
+ END IF;
+
+ IF TG_ARGV[4] IS NULL OR TG_ARGV[4] = 'null' THEN
+ timeout_ms = 1000;
+ ELSE
+ timeout_ms = TG_ARGV[4]::integer;
+ END IF;
+
+ CASE
+ WHEN method = 'GET' THEN
+ SELECT http_get INTO request_id FROM net.http_get(
+ url,
+ params,
+ headers,
+ timeout_ms
+ );
+ WHEN method = 'POST' THEN
+ payload = jsonb_build_object(
+ 'old_record', OLD,
+ 'record', NEW,
+ 'type', TG_OP,
+ 'table', TG_TABLE_NAME,
+ 'schema', TG_TABLE_SCHEMA
+ );
+
+ SELECT http_post INTO request_id FROM net.http_post(
+ url,
+ payload,
+ params,
+ headers,
+ timeout_ms
+ );
+ ELSE
+ RAISE EXCEPTION 'method argument % is invalid', method;
+ END CASE;
+
+ INSERT INTO supabase_functions.hooks
+ (hook_table_id, hook_name, request_id)
+ VALUES
+ (TG_RELID, TG_NAME, request_id);
+
+ RETURN NEW;
+ END
+ $function$;
+ -- Supabase super admin
+ DO
+ $$
+ BEGIN
+ IF NOT EXISTS (
+ SELECT 1
+ FROM pg_roles
+ WHERE rolname = 'supabase_functions_admin'
+ )
+ THEN
+ CREATE USER supabase_functions_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION;
+ END IF;
+ END
+ $$;
+ GRANT ALL PRIVILEGES ON SCHEMA supabase_functions TO supabase_functions_admin;
+ GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA supabase_functions TO supabase_functions_admin;
+ GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA supabase_functions TO supabase_functions_admin;
+ ALTER USER supabase_functions_admin SET search_path = "supabase_functions";
+ ALTER table "supabase_functions".migrations OWNER TO supabase_functions_admin;
+ ALTER table "supabase_functions".hooks OWNER TO supabase_functions_admin;
+ ALTER function "supabase_functions".http_request() OWNER TO supabase_functions_admin;
+ GRANT supabase_functions_admin TO postgres;
+ -- Remove unused supabase_pg_net_admin role
+ DO
+ $$
+ BEGIN
+ IF EXISTS (
+ SELECT 1
+ FROM pg_roles
+ WHERE rolname = 'supabase_pg_net_admin'
+ )
+ THEN
+ REASSIGN OWNED BY supabase_pg_net_admin TO supabase_admin;
+ DROP OWNED BY supabase_pg_net_admin;
+ DROP ROLE supabase_pg_net_admin;
+ END IF;
+ END
+ $$;
+ -- pg_net grants when extension is already enabled
+ DO
+ $$
+ BEGIN
+ IF EXISTS (
+ SELECT 1
+ FROM pg_extension
+ WHERE extname = 'pg_net'
+ )
+ THEN
+ GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role;
+ ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
+ ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
+ ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
+ ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
+ REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
+ REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
+ GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
+ GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
+ END IF;
+ END
+ $$;
+ -- Event trigger for pg_net
+ CREATE OR REPLACE FUNCTION extensions.grant_pg_net_access()
+ RETURNS event_trigger
+ LANGUAGE plpgsql
+ AS $$
+ BEGIN
+ IF EXISTS (
+ SELECT 1
+ FROM pg_event_trigger_ddl_commands() AS ev
+ JOIN pg_extension AS ext
+ ON ev.objid = ext.oid
+ WHERE ext.extname = 'pg_net'
+ )
+ THEN
+ GRANT USAGE ON SCHEMA net TO supabase_functions_admin, postgres, anon, authenticated, service_role;
+ ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
+ ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SECURITY DEFINER;
+ ALTER function net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
+ ALTER function net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) SET search_path = net;
+ REVOKE ALL ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
+ REVOKE ALL ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) FROM PUBLIC;
+ GRANT EXECUTE ON FUNCTION net.http_get(url text, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
+ GRANT EXECUTE ON FUNCTION net.http_post(url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer) TO supabase_functions_admin, postgres, anon, authenticated, service_role;
+ END IF;
+ END;
+ $$;
+ COMMENT ON FUNCTION extensions.grant_pg_net_access IS 'Grants access to pg_net';
+ DO
+ $$
+ BEGIN
+ IF NOT EXISTS (
+ SELECT 1
+ FROM pg_event_trigger
+ WHERE evtname = 'issue_pg_net_access'
+ ) THEN
+ CREATE EVENT TRIGGER issue_pg_net_access ON ddl_command_end WHEN TAG IN ('CREATE EXTENSION')
+ EXECUTE PROCEDURE extensions.grant_pg_net_access();
+ END IF;
+ END
+ $$;
+ INSERT INTO supabase_functions.migrations (version) VALUES ('20210809183423_update_grants');
+ ALTER function supabase_functions.http_request() SECURITY DEFINER;
+ ALTER function supabase_functions.http_request() SET search_path = supabase_functions;
+ REVOKE ALL ON FUNCTION supabase_functions.http_request() FROM PUBLIC;
+ GRANT EXECUTE ON FUNCTION supabase_functions.http_request() TO postgres, anon, authenticated, service_role;
+COMMIT;
+ `,
+ },
+ {
+ filePath: "/volumes/functions/hello/index.ts",
+ content: `
+// Follow this setup guide to integrate the Deno language server with your editor:
+// https://deno.land/manual/getting_started/setup_your_environment
+// This enables autocomplete, go to definition, etc.
+
+import { serve } from "https://deno.land/std@0.177.1/http/server.ts"
+
+serve(async () => {
+ return new Response(
+ \`"Hello from Edge Functions!"\`,
+ { headers: { "Content-Type": "application/json" } },
+ )
+})
+
+// To invoke:
+// curl 'http://localhost:/functions/v1/hello' \\
+// --header 'Authorization: Bearer '
+ `,
+ },
+ {
+ filePath: "/volumes/functions/main/index.ts",
+ content: `
+import { serve } from 'https://deno.land/std@0.131.0/http/server.ts'
+import * as jose from 'https://deno.land/x/jose@v4.14.4/index.ts'
+
+console.log('main function started')
+
+const JWT_SECRET = Deno.env.get('JWT_SECRET')
+const VERIFY_JWT = Deno.env.get('VERIFY_JWT') === 'true'
+
+function getAuthToken(req: Request) {
+ const authHeader = req.headers.get('authorization')
+ if (!authHeader) {
+ throw new Error('Missing authorization header')
+ }
+ const [bearer, token] = authHeader.split(' ')
+ if (bearer !== 'Bearer') {
+ throw new Error(\`Auth header is not 'Bearer {token}'\`)
+ }
+ return token
+}
+
+async function verifyJWT(jwt: string): Promise {
+ const encoder = new TextEncoder()
+ const secretKey = encoder.encode(JWT_SECRET)
+ try {
+ await jose.jwtVerify(jwt, secretKey)
+ } catch (err) {
+ console.error(err)
+ return false
+ }
+ return true
+}
+
+serve(async (req: Request) => {
+ if (req.method !== 'OPTIONS' && VERIFY_JWT) {
+ try {
+ const token = getAuthToken(req)
+ const isValidJWT = await verifyJWT(token)
+
+ if (!isValidJWT) {
+ return new Response(JSON.stringify({ msg: 'Invalid JWT' }), {
+ status: 401,
+ headers: { 'Content-Type': 'application/json' },
+ })
+ }
+ } catch (e) {
+ console.error(e)
+ return new Response(JSON.stringify({ msg: e.toString() }), {
+ status: 401,
+ headers: { 'Content-Type': 'application/json' },
+ })
+ }
+ }
+
+ const url = new URL(req.url)
+ const { pathname } = url
+ const path_parts = pathname.split('/')
+ const service_name = path_parts[1]
+
+ if (!service_name || service_name === '') {
+ const error = { msg: 'missing function name in request' }
+ return new Response(JSON.stringify(error), {
+ status: 400,
+ headers: { 'Content-Type': 'application/json' },
+ })
+ }
+
+ const servicePath = \`/home/deno/functions/\${service_name}\`
+ console.error(\`serving the request with \${servicePath}\`)
+
+ const memoryLimitMb = 150
+ const workerTimeoutMs = 1 * 60 * 1000
+ const noModuleCache = false
+ const importMapPath = null
+ const envVarsObj = Deno.env.toObject()
+ const envVars = Object.keys(envVarsObj).map((k) => [k, envVarsObj[k]])
+
+ try {
+ const worker = await EdgeRuntime.userWorkers.create({
+ servicePath,
+ memoryLimitMb,
+ workerTimeoutMs,
+ noModuleCache,
+ importMapPath,
+ envVars,
+ })
+ return await worker.fetch(req)
+ } catch (e) {
+ const error = { msg: e.toString() }
+ return new Response(JSON.stringify(error), {
+ status: 500,
+ headers: { 'Content-Type': 'application/json' },
+ })
+ }
+})
+ `,
+ },
+ {
+ filePath: "/volumes/logs/vector.yml",
+ content: `
+api:
+ enabled: true
+ address: 0.0.0.0:9001
+
+sources:
+ docker_host:
+ type: docker_logs
+ exclude_containers:
+ - supabase-vector
+
+transforms:
+ project_logs:
+ type: remap
+ inputs:
+ - docker_host
+ source: |-
+ .project = "default"
+ .event_message = del(.message)
+ .appname = del(.container_name)
+ del(.container_created_at)
+ del(.container_id)
+ del(.source_type)
+ del(.stream)
+ del(.label)
+ del(.image)
+ del(.host)
+ del(.stream)
+ router:
+ type: route
+ inputs:
+ - project_logs
+ route:
+ kong: '.appname == "supabase-kong"'
+ auth: '.appname == "supabase-auth"'
+ rest: '.appname == "supabase-rest"'
+ realtime: '.appname == "supabase-realtime"'
+ storage: '.appname == "supabase-storage"'
+ functions: '.appname == "supabase-functions"'
+ db: '.appname == "supabase-db"'
+ # Ignores non nginx errors since they are related with kong booting up
+ kong_logs:
+ type: remap
+ inputs:
+ - router.kong
+ source: |-
+ req, err = parse_nginx_log(.event_message, "combined")
+ if err == null {
+ .timestamp = req.timestamp
+ .metadata.request.headers.referer = req.referer
+ .metadata.request.headers.user_agent = req.agent
+ .metadata.request.headers.cf_connecting_ip = req.client
+ .metadata.request.method = req.method
+ .metadata.request.path = req.path
+ .metadata.request.protocol = req.protocol
+ .metadata.response.status_code = req.status
+ }
+ if err != null {
+ abort
+ }
+ # Ignores non nginx errors since they are related with kong booting up
+ kong_err:
+ type: remap
+ inputs:
+ - router.kong
+ source: |-
+ .metadata.request.method = "GET"
+ .metadata.response.status_code = 200
+ parsed, err = parse_nginx_log(.event_message, "error")
+ if err == null {
+ .timestamp = parsed.timestamp
+ .severity = parsed.severity
+ .metadata.request.host = parsed.host
+ .metadata.request.headers.cf_connecting_ip = parsed.client
+ url, err = split(parsed.request, " ")
+ if err == null {
+ .metadata.request.method = url[0]
+ .metadata.request.path = url[1]
+ .metadata.request.protocol = url[2]
+ }
+ }
+ if err != null {
+ abort
+ }
+ # Gotrue logs are structured json strings which frontend parses directly. But we keep metadata for consistency.
+ auth_logs:
+ type: remap
+ inputs:
+ - router.auth
+ source: |-
+ parsed, err = parse_json(.event_message)
+ if err == null {
+ .metadata.timestamp = parsed.time
+ .metadata = merge!(.metadata, parsed)
+ }
+ # PostgREST logs are structured so we separate timestamp from message using regex
+ rest_logs:
+ type: remap
+ inputs:
+ - router.rest
+ source: |-
+ parsed, err = parse_regex(.event_message, r'^(?P