mirror of
https://github.com/Dokploy/dokploy
synced 2025-06-26 18:27:59 +00:00
fix: resolved merge conflicts with fork/canary
This commit is contained in:
@@ -1,121 +1,77 @@
|
||||
import { IS_CLOUD, paths } from "@dokploy/server/constants";
|
||||
import { type RotatingFileStream, createStream } from "rotating-file-stream";
|
||||
import { paths } from "@dokploy/server/constants";
|
||||
import { execAsync } from "../process/execAsync";
|
||||
import { findAdmin } from "@dokploy/server/services/admin";
|
||||
import { updateUser } from "@dokploy/server/services/user";
|
||||
import { scheduleJob, scheduledJobs } from "node-schedule";
|
||||
|
||||
class LogRotationManager {
|
||||
private static instance: LogRotationManager;
|
||||
private stream: RotatingFileStream | null = null;
|
||||
const LOG_CLEANUP_JOB_NAME = "access-log-cleanup";
|
||||
|
||||
private constructor() {
|
||||
if (IS_CLOUD) {
|
||||
return;
|
||||
}
|
||||
this.initialize().catch(console.error);
|
||||
}
|
||||
|
||||
public static getInstance(): LogRotationManager {
|
||||
if (!LogRotationManager.instance) {
|
||||
LogRotationManager.instance = new LogRotationManager();
|
||||
}
|
||||
return LogRotationManager.instance;
|
||||
}
|
||||
|
||||
private async initialize(): Promise<void> {
|
||||
const isActive = await this.getStateFromDB();
|
||||
if (isActive) {
|
||||
await this.activateStream();
|
||||
}
|
||||
}
|
||||
|
||||
private async getStateFromDB(): Promise<boolean> {
|
||||
const admin = await findAdmin();
|
||||
return admin?.user.enableLogRotation ?? false;
|
||||
}
|
||||
|
||||
private async setStateInDB(active: boolean): Promise<void> {
|
||||
const admin = await findAdmin();
|
||||
if (!admin) {
|
||||
return;
|
||||
}
|
||||
await updateUser(admin.user.id, {
|
||||
enableLogRotation: active,
|
||||
});
|
||||
}
|
||||
|
||||
private async activateStream(): Promise<void> {
|
||||
export const startLogCleanup = async (
|
||||
cronExpression = "0 0 * * *",
|
||||
): Promise<boolean> => {
|
||||
try {
|
||||
const { DYNAMIC_TRAEFIK_PATH } = paths();
|
||||
if (this.stream) {
|
||||
await this.deactivateStream();
|
||||
|
||||
const existingJob = scheduledJobs[LOG_CLEANUP_JOB_NAME];
|
||||
if (existingJob) {
|
||||
existingJob.cancel();
|
||||
}
|
||||
|
||||
this.stream = createStream("access.log", {
|
||||
size: "100M",
|
||||
interval: "1d",
|
||||
path: DYNAMIC_TRAEFIK_PATH,
|
||||
rotate: 6,
|
||||
compress: "gzip",
|
||||
});
|
||||
scheduleJob(LOG_CLEANUP_JOB_NAME, cronExpression, async () => {
|
||||
try {
|
||||
await execAsync(
|
||||
`tail -n 1000 ${DYNAMIC_TRAEFIK_PATH}/access.log > ${DYNAMIC_TRAEFIK_PATH}/access.log.tmp && mv ${DYNAMIC_TRAEFIK_PATH}/access.log.tmp ${DYNAMIC_TRAEFIK_PATH}/access.log`,
|
||||
);
|
||||
|
||||
this.stream.on("rotation", this.handleRotation.bind(this));
|
||||
}
|
||||
|
||||
private async deactivateStream(): Promise<void> {
|
||||
return new Promise<void>((resolve) => {
|
||||
if (this.stream) {
|
||||
this.stream.end(() => {
|
||||
this.stream = null;
|
||||
resolve();
|
||||
});
|
||||
} else {
|
||||
resolve();
|
||||
await execAsync("docker exec dokploy-traefik kill -USR1 1");
|
||||
} catch (error) {
|
||||
console.error("Error during log cleanup:", error);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public async activate(): Promise<boolean> {
|
||||
const currentState = await this.getStateFromDB();
|
||||
if (currentState) {
|
||||
return true;
|
||||
const admin = await findAdmin();
|
||||
if (admin) {
|
||||
await updateUser(admin.user.id, {
|
||||
logCleanupCron: cronExpression,
|
||||
});
|
||||
}
|
||||
|
||||
await this.setStateInDB(true);
|
||||
await this.activateStream();
|
||||
return true;
|
||||
} catch (_) {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
public async deactivate(): Promise<boolean> {
|
||||
console.log("Deactivating log rotation...");
|
||||
const currentState = await this.getStateFromDB();
|
||||
if (!currentState) {
|
||||
console.log("Log rotation is already inactive in DB");
|
||||
return true;
|
||||
export const stopLogCleanup = async (): Promise<boolean> => {
|
||||
try {
|
||||
const existingJob = scheduledJobs[LOG_CLEANUP_JOB_NAME];
|
||||
if (existingJob) {
|
||||
existingJob.cancel();
|
||||
}
|
||||
|
||||
// Update database
|
||||
const admin = await findAdmin();
|
||||
if (admin) {
|
||||
await updateUser(admin.user.id, {
|
||||
logCleanupCron: null,
|
||||
});
|
||||
}
|
||||
|
||||
await this.setStateInDB(false);
|
||||
await this.deactivateStream();
|
||||
console.log("Log rotation deactivated successfully");
|
||||
return true;
|
||||
} catch (error) {
|
||||
console.error("Error stopping log cleanup:", error);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
private async handleRotation() {
|
||||
try {
|
||||
const status = await this.getStatus();
|
||||
if (!status) {
|
||||
await this.deactivateStream();
|
||||
}
|
||||
await execAsync(
|
||||
"docker kill -s USR1 $(docker ps -q --filter name=dokploy-traefik)",
|
||||
);
|
||||
console.log("USR1 Signal send to Traefik");
|
||||
} catch (error) {
|
||||
console.error("Error sending USR1 Signal to Traefik:", error);
|
||||
}
|
||||
}
|
||||
public async getStatus(): Promise<boolean> {
|
||||
const dbState = await this.getStateFromDB();
|
||||
return dbState;
|
||||
}
|
||||
}
|
||||
export const logRotationManager = LogRotationManager.getInstance();
|
||||
export const getLogCleanupStatus = async (): Promise<{
|
||||
enabled: boolean;
|
||||
cronExpression: string | null;
|
||||
}> => {
|
||||
const admin = await findAdmin();
|
||||
const cronExpression = admin?.user.logCleanupCron ?? null;
|
||||
return {
|
||||
enabled: cronExpression !== null,
|
||||
cronExpression,
|
||||
};
|
||||
};
|
||||
|
||||
@@ -6,14 +6,21 @@ interface HourlyData {
|
||||
count: number;
|
||||
}
|
||||
|
||||
export function processLogs(logString: string): HourlyData[] {
|
||||
export function processLogs(
|
||||
logString: string,
|
||||
dateRange?: { start?: string; end?: string },
|
||||
): HourlyData[] {
|
||||
if (_.isEmpty(logString)) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const hourlyData = _(logString)
|
||||
.split("\n")
|
||||
.compact()
|
||||
.filter((line) => {
|
||||
const trimmed = line.trim();
|
||||
// Check if the line starts with { and ends with } to ensure it's a potential JSON object
|
||||
return trimmed !== "" && trimmed.startsWith("{") && trimmed.endsWith("}");
|
||||
})
|
||||
.map((entry) => {
|
||||
try {
|
||||
const log: LogEntry = JSON.parse(entry);
|
||||
@@ -21,6 +28,20 @@ export function processLogs(logString: string): HourlyData[] {
|
||||
return null;
|
||||
}
|
||||
const date = new Date(log.StartUTC);
|
||||
|
||||
if (dateRange?.start || dateRange?.end) {
|
||||
const logDate = date.getTime();
|
||||
const start = dateRange?.start
|
||||
? new Date(dateRange.start).getTime()
|
||||
: 0;
|
||||
const end = dateRange?.end
|
||||
? new Date(dateRange.end).getTime()
|
||||
: Number.POSITIVE_INFINITY;
|
||||
if (logDate < start || logDate > end) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
return `${date.toISOString().slice(0, 13)}:00:00Z`;
|
||||
} catch (error) {
|
||||
console.error("Error parsing log entry:", error);
|
||||
@@ -51,21 +72,46 @@ export function parseRawConfig(
|
||||
sort?: SortInfo,
|
||||
search?: string,
|
||||
status?: string[],
|
||||
dateRange?: { start?: string; end?: string },
|
||||
): { data: LogEntry[]; totalCount: number } {
|
||||
try {
|
||||
if (_.isEmpty(rawConfig)) {
|
||||
return { data: [], totalCount: 0 };
|
||||
}
|
||||
|
||||
// Split logs into chunks to avoid memory issues
|
||||
let parsedLogs = _(rawConfig)
|
||||
.split("\n")
|
||||
.filter((line) => {
|
||||
const trimmed = line.trim();
|
||||
return (
|
||||
trimmed !== "" && trimmed.startsWith("{") && trimmed.endsWith("}")
|
||||
);
|
||||
})
|
||||
.map((line) => {
|
||||
try {
|
||||
return JSON.parse(line) as LogEntry;
|
||||
} catch (error) {
|
||||
console.error("Error parsing log line:", error);
|
||||
return null;
|
||||
}
|
||||
})
|
||||
.compact()
|
||||
.map((line) => JSON.parse(line) as LogEntry)
|
||||
.value();
|
||||
|
||||
parsedLogs = parsedLogs.filter(
|
||||
(log) => log.ServiceName !== "dokploy-service-app@file",
|
||||
);
|
||||
// Apply date range filter if provided
|
||||
if (dateRange?.start || dateRange?.end) {
|
||||
parsedLogs = parsedLogs.filter((log) => {
|
||||
const logDate = new Date(log.StartUTC).getTime();
|
||||
const start = dateRange?.start
|
||||
? new Date(dateRange.start).getTime()
|
||||
: 0;
|
||||
const end = dateRange?.end
|
||||
? new Date(dateRange.end).getTime()
|
||||
: Number.POSITIVE_INFINITY;
|
||||
return logDate >= start && logDate <= end;
|
||||
});
|
||||
}
|
||||
|
||||
if (search) {
|
||||
parsedLogs = parsedLogs.filter((log) =>
|
||||
@@ -78,6 +124,7 @@ export function parseRawConfig(
|
||||
status.some((range) => isStatusInRange(log.DownstreamStatus, range)),
|
||||
);
|
||||
}
|
||||
|
||||
const totalCount = parsedLogs.length;
|
||||
|
||||
if (sort) {
|
||||
@@ -101,6 +148,7 @@ export function parseRawConfig(
|
||||
throw new Error("Failed to parse rawConfig");
|
||||
}
|
||||
}
|
||||
|
||||
const isStatusInRange = (status: number, range: string) => {
|
||||
switch (range) {
|
||||
case "info":
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import path from "node:path";
|
||||
import { getAllServers } from "@dokploy/server/services/server";
|
||||
import { scheduleJob } from "node-schedule";
|
||||
import { db } from "../../db/index";
|
||||
@@ -12,6 +13,11 @@ import { runMongoBackup } from "./mongo";
|
||||
import { runMySqlBackup } from "./mysql";
|
||||
import { runPostgresBackup } from "./postgres";
|
||||
import { findAdmin } from "../../services/admin";
|
||||
import { getS3Credentials } from "./utils";
|
||||
import { execAsync, execAsyncRemote } from "../process/execAsync";
|
||||
|
||||
import type { BackupSchedule } from "@dokploy/server/services/backup";
|
||||
import { startLogCleanup } from "../access-log/handler";
|
||||
|
||||
export const initCronJobs = async () => {
|
||||
console.log("Setting up cron jobs....");
|
||||
@@ -168,4 +174,43 @@ export const initCronJobs = async () => {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (admin?.user.logCleanupCron) {
|
||||
await startLogCleanup(admin.user.logCleanupCron);
|
||||
}
|
||||
};
|
||||
|
||||
export const keepLatestNBackups = async (
|
||||
backup: BackupSchedule,
|
||||
serverId?: string | null,
|
||||
) => {
|
||||
// 0 also immediately returns which is good as the empty "keep latest" field in the UI
|
||||
// is saved as 0 in the database
|
||||
if (!backup.keepLatestCount) return;
|
||||
|
||||
try {
|
||||
const rcloneFlags = getS3Credentials(backup.destination);
|
||||
const backupFilesPath = path.join(
|
||||
`:s3:${backup.destination.bucket}`,
|
||||
backup.prefix,
|
||||
);
|
||||
|
||||
// --include "*.sql.gz" ensures nothing else other than the db backup files are touched by rclone
|
||||
const rcloneList = `rclone lsf ${rcloneFlags.join(" ")} --include "*.sql.gz" ${backupFilesPath}`;
|
||||
// when we pipe the above command with this one, we only get the list of files we want to delete
|
||||
const sortAndPickUnwantedBackups = `sort -r | tail -n +$((${backup.keepLatestCount}+1)) | xargs -I{}`;
|
||||
// this command deletes the files
|
||||
// to test the deletion before actually deleting we can add --dry-run before ${backupFilesPath}/{}
|
||||
const rcloneDelete = `rclone delete ${rcloneFlags.join(" ")} ${backupFilesPath}/{}`;
|
||||
|
||||
const rcloneCommand = `${rcloneList} | ${sortAndPickUnwantedBackups} ${rcloneDelete}`;
|
||||
|
||||
if (serverId) {
|
||||
await execAsyncRemote(serverId, rcloneCommand);
|
||||
} else {
|
||||
await execAsync(rcloneCommand);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -5,6 +5,7 @@ import { runMariadbBackup } from "./mariadb";
|
||||
import { runMongoBackup } from "./mongo";
|
||||
import { runMySqlBackup } from "./mysql";
|
||||
import { runPostgresBackup } from "./postgres";
|
||||
import { keepLatestNBackups } from ".";
|
||||
|
||||
export const scheduleBackup = (backup: BackupSchedule) => {
|
||||
const { schedule, backupId, databaseType, postgres, mysql, mongo, mariadb } =
|
||||
@@ -12,12 +13,16 @@ export const scheduleBackup = (backup: BackupSchedule) => {
|
||||
scheduleJob(backupId, schedule, async () => {
|
||||
if (databaseType === "postgres" && postgres) {
|
||||
await runPostgresBackup(postgres, backup);
|
||||
await keepLatestNBackups(backup, postgres.serverId);
|
||||
} else if (databaseType === "mysql" && mysql) {
|
||||
await runMySqlBackup(mysql, backup);
|
||||
await keepLatestNBackups(backup, mysql.serverId);
|
||||
} else if (databaseType === "mongo" && mongo) {
|
||||
await runMongoBackup(mongo, backup);
|
||||
await keepLatestNBackups(backup, mongo.serverId);
|
||||
} else if (databaseType === "mariadb" && mariadb) {
|
||||
await runMariadbBackup(mariadb, backup);
|
||||
await keepLatestNBackups(backup, mariadb.serverId);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
@@ -17,32 +17,68 @@ export const buildRailpack = async (
|
||||
);
|
||||
|
||||
try {
|
||||
// Ensure buildkit container is running, create if it doesn't exist
|
||||
await execAsync(
|
||||
"docker container inspect buildkit >/dev/null 2>&1 || docker run --rm --privileged -d --name buildkit moby/buildkit",
|
||||
"docker buildx create --use --name builder-containerd --driver docker-container || true",
|
||||
);
|
||||
|
||||
// Build the application using railpack
|
||||
const args = ["build", buildAppDirectory, "--name", appName];
|
||||
await execAsync("docker buildx use builder-containerd");
|
||||
|
||||
// Add environment variables
|
||||
// First prepare the build plan and info
|
||||
const prepareArgs = [
|
||||
"prepare",
|
||||
buildAppDirectory,
|
||||
"--plan-out",
|
||||
`${buildAppDirectory}/railpack-plan.json`,
|
||||
"--info-out",
|
||||
`${buildAppDirectory}/railpack-info.json`,
|
||||
];
|
||||
|
||||
// Add environment variables to prepare command
|
||||
for (const env of envVariables) {
|
||||
args.push("--env", env);
|
||||
prepareArgs.push("--env", env);
|
||||
}
|
||||
|
||||
// Run prepare command
|
||||
await spawnAsync("railpack", prepareArgs, (data) => {
|
||||
if (writeStream.writable) {
|
||||
writeStream.write(data);
|
||||
}
|
||||
});
|
||||
|
||||
// Build with BuildKit using the Railpack frontend
|
||||
const buildArgs = [
|
||||
"buildx",
|
||||
"build",
|
||||
"--build-arg",
|
||||
"BUILDKIT_SYNTAX=ghcr.io/railwayapp/railpack-frontend:v0.0.55",
|
||||
"-f",
|
||||
`${buildAppDirectory}/railpack-plan.json`,
|
||||
"--output",
|
||||
`type=docker,name=${appName}`,
|
||||
];
|
||||
|
||||
// Add secrets properly formatted
|
||||
const env: { [key: string]: string } = {};
|
||||
for (const envVar of envVariables) {
|
||||
const [key, value] = envVar.split("=");
|
||||
if (key && value) {
|
||||
buildArgs.push("--secret", `id=${key},env=${key}`);
|
||||
env[key] = value;
|
||||
}
|
||||
}
|
||||
|
||||
buildArgs.push(buildAppDirectory);
|
||||
|
||||
await spawnAsync(
|
||||
"railpack",
|
||||
args,
|
||||
"docker",
|
||||
buildArgs,
|
||||
(data) => {
|
||||
if (writeStream.writable) {
|
||||
writeStream.write(data);
|
||||
}
|
||||
},
|
||||
{
|
||||
env: {
|
||||
...process.env,
|
||||
BUILDKIT_HOST: "docker-container://buildkit",
|
||||
},
|
||||
env: { ...process.env, ...env },
|
||||
},
|
||||
);
|
||||
|
||||
@@ -63,25 +99,65 @@ export const getRailpackCommand = (
|
||||
application.project.env,
|
||||
);
|
||||
|
||||
// Build the application using railpack
|
||||
const args = ["build", buildAppDirectory, "--name", appName];
|
||||
// Prepare command
|
||||
const prepareArgs = [
|
||||
"prepare",
|
||||
buildAppDirectory,
|
||||
"--plan-out",
|
||||
`${buildAppDirectory}/railpack-plan.json`,
|
||||
"--info-out",
|
||||
`${buildAppDirectory}/railpack-info.json`,
|
||||
];
|
||||
|
||||
// Add environment variables
|
||||
for (const env of envVariables) {
|
||||
args.push("--env", env);
|
||||
prepareArgs.push("--env", env);
|
||||
}
|
||||
|
||||
const command = `railpack ${args.join(" ")}`;
|
||||
// Build command
|
||||
const buildArgs = [
|
||||
"buildx",
|
||||
"build",
|
||||
"--build-arg",
|
||||
"BUILDKIT_SYNTAX=ghcr.io/railwayapp/railpack-frontend:v0.0.55",
|
||||
"-f",
|
||||
`${buildAppDirectory}/railpack-plan.json`,
|
||||
"--output",
|
||||
`type=docker,name=${appName}`,
|
||||
];
|
||||
|
||||
// Add secrets properly formatted
|
||||
const exportEnvs = [];
|
||||
for (const envVar of envVariables) {
|
||||
const [key, value] = envVar.split("=");
|
||||
if (key && value) {
|
||||
buildArgs.push("--secret", `id=${key},env=${key}`);
|
||||
exportEnvs.push(`export ${key}=${value}`);
|
||||
}
|
||||
}
|
||||
|
||||
buildArgs.push(buildAppDirectory);
|
||||
|
||||
const bashCommand = `
|
||||
echo "Building with Railpack..." >> "${logPath}";
|
||||
docker container inspect buildkit >/dev/null 2>&1 || docker run --rm --privileged -d --name buildkit moby/buildkit;
|
||||
export BUILDKIT_HOST=docker-container://buildkit;
|
||||
${command} >> ${logPath} 2>> ${logPath} || {
|
||||
echo "❌ Railpack build failed" >> ${logPath};
|
||||
exit 1;
|
||||
}
|
||||
echo "✅ Railpack build completed." >> ${logPath};
|
||||
`;
|
||||
# Ensure we have a builder with containerd
|
||||
docker buildx create --use --name builder-containerd --driver docker-container || true
|
||||
docker buildx use builder-containerd
|
||||
|
||||
echo "Preparing Railpack build plan..." >> "${logPath}";
|
||||
railpack ${prepareArgs.join(" ")} >> ${logPath} 2>> ${logPath} || {
|
||||
echo "❌ Railpack prepare failed" >> ${logPath};
|
||||
exit 1;
|
||||
}
|
||||
echo "✅ Railpack prepare completed." >> ${logPath};
|
||||
|
||||
echo "Building with Railpack frontend..." >> "${logPath}";
|
||||
# Export environment variables for secrets
|
||||
${exportEnvs.join("\n")}
|
||||
docker ${buildArgs.join(" ")} >> ${logPath} 2>> ${logPath} || {
|
||||
echo "❌ Railpack build failed" >> ${logPath};
|
||||
exit 1;
|
||||
}
|
||||
echo "✅ Railpack build completed." >> ${logPath};
|
||||
`;
|
||||
|
||||
return bashCommand;
|
||||
};
|
||||
|
||||
@@ -31,7 +31,7 @@ export const buildMariadb = async (mariadb: MariadbNested) => {
|
||||
mounts,
|
||||
} = mariadb;
|
||||
|
||||
const defaultMariadbEnv = `MARIADB_DATABASE=${databaseName}\nMARIADB_USER=${databaseUser}\nMARIADB_PASSWORD=${databasePassword}\nMARIADB_ROOT_PASSWORD=${databaseRootPassword}${
|
||||
const defaultMariadbEnv = `MARIADB_DATABASE="${databaseName}"\nMARIADB_USER="${databaseUser}"\nMARIADB_PASSWORD="${databasePassword}"\nMARIADB_ROOT_PASSWORD="${databaseRootPassword}"${
|
||||
env ? `\n${env}` : ""
|
||||
}`;
|
||||
const resources = calculateResources({
|
||||
|
||||
@@ -77,7 +77,7 @@ fi
|
||||
|
||||
${command ?? "wait $MONGOD_PID"}`;
|
||||
|
||||
const defaultMongoEnv = `MONGO_INITDB_ROOT_USERNAME=${databaseUser}\nMONGO_INITDB_ROOT_PASSWORD=${databasePassword}${replicaSets ? "\nMONGO_INITDB_DATABASE=admin" : ""}${
|
||||
const defaultMongoEnv = `MONGO_INITDB_ROOT_USERNAME="${databaseUser}"\nMONGO_INITDB_ROOT_PASSWORD="${databasePassword}"${replicaSets ? "\nMONGO_INITDB_DATABASE=admin" : ""}${
|
||||
env ? `\n${env}` : ""
|
||||
}`;
|
||||
|
||||
|
||||
@@ -34,10 +34,10 @@ export const buildMysql = async (mysql: MysqlNested) => {
|
||||
|
||||
const defaultMysqlEnv =
|
||||
databaseUser !== "root"
|
||||
? `MYSQL_USER=${databaseUser}\nMYSQL_DATABASE=${databaseName}\nMYSQL_PASSWORD=${databasePassword}\nMYSQL_ROOT_PASSWORD=${databaseRootPassword}${
|
||||
? `MYSQL_USER="${databaseUser}"\nMYSQL_DATABASE="${databaseName}"\nMYSQL_PASSWORD="${databasePassword}"\nMYSQL_ROOT_PASSWORD="${databaseRootPassword}"${
|
||||
env ? `\n${env}` : ""
|
||||
}`
|
||||
: `MYSQL_DATABASE=${databaseName}\nMYSQL_ROOT_PASSWORD=${databaseRootPassword}${
|
||||
: `MYSQL_DATABASE="${databaseName}"\nMYSQL_ROOT_PASSWORD="${databaseRootPassword}"${
|
||||
env ? `\n${env}` : ""
|
||||
}`;
|
||||
const resources = calculateResources({
|
||||
|
||||
@@ -30,7 +30,7 @@ export const buildPostgres = async (postgres: PostgresNested) => {
|
||||
mounts,
|
||||
} = postgres;
|
||||
|
||||
const defaultPostgresEnv = `POSTGRES_DB=${databaseName}\nPOSTGRES_USER=${databaseUser}\nPOSTGRES_PASSWORD=${databasePassword}${
|
||||
const defaultPostgresEnv = `POSTGRES_DB="${databaseName}"\nPOSTGRES_USER="${databaseUser}"\nPOSTGRES_PASSWORD="${databasePassword}"${
|
||||
env ? `\n${env}` : ""
|
||||
}`;
|
||||
const resources = calculateResources({
|
||||
|
||||
99
packages/server/src/utils/databases/rebuild.ts
Normal file
99
packages/server/src/utils/databases/rebuild.ts
Normal file
@@ -0,0 +1,99 @@
|
||||
import { deployPostgres } from "@dokploy/server/services/postgres";
|
||||
import { execAsyncRemote } from "../process/execAsync";
|
||||
import { execAsync } from "../process/execAsync";
|
||||
import { deployMySql } from "@dokploy/server/services/mysql";
|
||||
import { deployMariadb } from "@dokploy/server/services/mariadb";
|
||||
import { deployMongo } from "@dokploy/server/services/mongo";
|
||||
import { deployRedis } from "@dokploy/server/services/redis";
|
||||
import { removeService } from "../docker/utils";
|
||||
import { db } from "@dokploy/server/db";
|
||||
import {
|
||||
postgres,
|
||||
mysql,
|
||||
mariadb,
|
||||
mongo,
|
||||
redis,
|
||||
} from "@dokploy/server/db/schema";
|
||||
import { eq } from "drizzle-orm";
|
||||
|
||||
type DatabaseType = "postgres" | "mysql" | "mariadb" | "mongo" | "redis";
|
||||
|
||||
export const rebuildDatabase = async (
|
||||
databaseId: string,
|
||||
type: DatabaseType,
|
||||
) => {
|
||||
const database = await findDatabaseById(databaseId, type);
|
||||
|
||||
if (!database) {
|
||||
throw new Error("Database not found");
|
||||
}
|
||||
|
||||
await removeService(database.appName, database.serverId);
|
||||
await new Promise((resolve) => setTimeout(resolve, 6000));
|
||||
|
||||
for (const mount of database.mounts) {
|
||||
if (mount.type === "volume") {
|
||||
const command = `docker volume rm ${mount?.volumeName} --force`;
|
||||
if (database.serverId) {
|
||||
await execAsyncRemote(database.serverId, command);
|
||||
} else {
|
||||
await execAsync(command);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (type === "postgres") {
|
||||
await deployPostgres(databaseId);
|
||||
} else if (type === "mysql") {
|
||||
await deployMySql(databaseId);
|
||||
} else if (type === "mariadb") {
|
||||
await deployMariadb(databaseId);
|
||||
} else if (type === "mongo") {
|
||||
await deployMongo(databaseId);
|
||||
} else if (type === "redis") {
|
||||
await deployRedis(databaseId);
|
||||
}
|
||||
};
|
||||
|
||||
const findDatabaseById = async (databaseId: string, type: DatabaseType) => {
|
||||
if (type === "postgres") {
|
||||
return await db.query.postgres.findFirst({
|
||||
where: eq(postgres.postgresId, databaseId),
|
||||
with: {
|
||||
mounts: true,
|
||||
},
|
||||
});
|
||||
}
|
||||
if (type === "mysql") {
|
||||
return await db.query.mysql.findFirst({
|
||||
where: eq(mysql.mysqlId, databaseId),
|
||||
with: {
|
||||
mounts: true,
|
||||
},
|
||||
});
|
||||
}
|
||||
if (type === "mariadb") {
|
||||
return await db.query.mariadb.findFirst({
|
||||
where: eq(mariadb.mariadbId, databaseId),
|
||||
with: {
|
||||
mounts: true,
|
||||
},
|
||||
});
|
||||
}
|
||||
if (type === "mongo") {
|
||||
return await db.query.mongo.findFirst({
|
||||
where: eq(mongo.mongoId, databaseId),
|
||||
with: {
|
||||
mounts: true,
|
||||
},
|
||||
});
|
||||
}
|
||||
if (type === "redis") {
|
||||
return await db.query.redis.findFirst({
|
||||
where: eq(redis.redisId, databaseId),
|
||||
with: {
|
||||
mounts: true,
|
||||
},
|
||||
});
|
||||
}
|
||||
};
|
||||
@@ -28,7 +28,7 @@ export const buildRedis = async (redis: RedisNested) => {
|
||||
mounts,
|
||||
} = redis;
|
||||
|
||||
const defaultRedisEnv = `REDIS_PASSWORD=${databasePassword}${
|
||||
const defaultRedisEnv = `REDIS_PASSWORD="${databasePassword}"${
|
||||
env ? `\n${env}` : ""
|
||||
}`;
|
||||
const resources = calculateResources({
|
||||
|
||||
@@ -219,13 +219,9 @@ export const addDomainToCompose = async (
|
||||
throw new Error(`The service ${serviceName} not found in the compose`);
|
||||
}
|
||||
|
||||
const httpLabels = await createDomainLabels(appName, domain, "web");
|
||||
const httpLabels = createDomainLabels(appName, domain, "web");
|
||||
if (https) {
|
||||
const httpsLabels = await createDomainLabels(
|
||||
appName,
|
||||
domain,
|
||||
"websecure",
|
||||
);
|
||||
const httpsLabels = createDomainLabels(appName, domain, "websecure");
|
||||
httpLabels.push(...httpsLabels);
|
||||
}
|
||||
|
||||
@@ -250,9 +246,9 @@ export const addDomainToCompose = async (
|
||||
|
||||
if (Array.isArray(labels)) {
|
||||
if (!labels.includes("traefik.enable=true")) {
|
||||
labels.push("traefik.enable=true");
|
||||
labels.unshift("traefik.enable=true");
|
||||
}
|
||||
labels.push(...httpLabels);
|
||||
labels.unshift(...httpLabels);
|
||||
}
|
||||
|
||||
if (!compose.isolatedDeployment) {
|
||||
@@ -287,12 +283,20 @@ export const writeComposeFile = async (
|
||||
}
|
||||
};
|
||||
|
||||
export const createDomainLabels = async (
|
||||
export const createDomainLabels = (
|
||||
appName: string,
|
||||
domain: Domain,
|
||||
entrypoint: "web" | "websecure",
|
||||
) => {
|
||||
const { host, port, https, uniqueConfigKey, certificateType, path } = domain;
|
||||
const {
|
||||
host,
|
||||
port,
|
||||
https,
|
||||
uniqueConfigKey,
|
||||
certificateType,
|
||||
path,
|
||||
customCertResolver,
|
||||
} = domain;
|
||||
const routerName = `${appName}-${uniqueConfigKey}-${entrypoint}`;
|
||||
const labels = [
|
||||
`traefik.http.routers.${routerName}.rule=Host(\`${host}\`)${path && path !== "/" ? ` && PathPrefix(\`${path}\`)` : ""}`,
|
||||
@@ -312,6 +316,10 @@ export const createDomainLabels = async (
|
||||
labels.push(
|
||||
`traefik.http.routers.${routerName}.tls.certresolver=letsencrypt`,
|
||||
);
|
||||
} else if (certificateType === "custom" && customCertResolver) {
|
||||
labels.push(
|
||||
`traefik.http.routers.${routerName}.tls.certresolver=${customCertResolver}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -137,12 +137,44 @@ export const readRemoteConfig = async (serverId: string, appName: string) => {
|
||||
}
|
||||
};
|
||||
|
||||
export const readMonitoringConfig = () => {
|
||||
export const readMonitoringConfig = (readAll = false) => {
|
||||
const { DYNAMIC_TRAEFIK_PATH } = paths();
|
||||
const configPath = path.join(DYNAMIC_TRAEFIK_PATH, "access.log");
|
||||
if (fs.existsSync(configPath)) {
|
||||
const yamlStr = fs.readFileSync(configPath, "utf8");
|
||||
return yamlStr;
|
||||
if (!readAll) {
|
||||
// Read first 500 lines
|
||||
let content = "";
|
||||
let chunk = "";
|
||||
let validCount = 0;
|
||||
|
||||
for (const char of fs.readFileSync(configPath, "utf8")) {
|
||||
chunk += char;
|
||||
if (char === "\n") {
|
||||
try {
|
||||
const trimmed = chunk.trim();
|
||||
if (
|
||||
trimmed !== "" &&
|
||||
trimmed.startsWith("{") &&
|
||||
trimmed.endsWith("}")
|
||||
) {
|
||||
const log = JSON.parse(trimmed);
|
||||
if (log.ServiceName !== "dokploy-service-app@file") {
|
||||
content += chunk;
|
||||
validCount++;
|
||||
if (validCount >= 500) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// Ignore invalid JSON
|
||||
}
|
||||
chunk = "";
|
||||
}
|
||||
}
|
||||
return content;
|
||||
}
|
||||
return fs.readFileSync(configPath, "utf8");
|
||||
}
|
||||
return null;
|
||||
};
|
||||
|
||||
@@ -148,6 +148,8 @@ export const createRouterConfig = async (
|
||||
if (entryPoint === "websecure") {
|
||||
if (certificateType === "letsencrypt") {
|
||||
routerConfig.tls = { certResolver: "letsencrypt" };
|
||||
} else if (certificateType === "custom" && domain.customCertResolver) {
|
||||
routerConfig.tls = { certResolver: domain.customCertResolver };
|
||||
} else if (certificateType === "none") {
|
||||
routerConfig.tls = undefined;
|
||||
}
|
||||
|
||||
9
packages/server/src/utils/watch-paths/should-deploy.ts
Normal file
9
packages/server/src/utils/watch-paths/should-deploy.ts
Normal file
@@ -0,0 +1,9 @@
|
||||
import micromatch from "micromatch";
|
||||
|
||||
export const shouldDeploy = (
|
||||
watchPaths: string[] | null,
|
||||
modifiedFiles: string[],
|
||||
): boolean => {
|
||||
if (!watchPaths || watchPaths?.length === 0) return true;
|
||||
return micromatch.some(modifiedFiles, watchPaths);
|
||||
};
|
||||
Reference in New Issue
Block a user