Enhance backup command generation and logging for database backups

- Refactored backup utilities for PostgreSQL, MySQL, MariaDB, and MongoDB to streamline command generation and improve logging.
- Introduced a unified `getBackupCommand` function to encapsulate backup command logic, enhancing maintainability.
- Improved error handling and logging during backup processes, providing clearer feedback on command execution and success/failure states.
- Updated container retrieval methods to return null instead of throwing errors when no containers are found, improving robustness.
This commit is contained in:
Mauricio Siu 2025-05-04 00:15:09 -06:00
parent 557c89ac6d
commit 66dd890448
10 changed files with 219 additions and 290 deletions

View File

@ -80,6 +80,7 @@ import {
Loader2,
PlusIcon,
Search,
ServerIcon,
Trash2,
X,
} from "lucide-react";
@ -968,6 +969,11 @@ const Project = (
}}
className="flex flex-col group relative cursor-pointer bg-transparent transition-colors hover:bg-border"
>
{service.serverId && (
<div className="absolute -left-1 -top-2">
<ServerIcon className="size-4 text-muted-foreground" />
</div>
)}
<div className="absolute -right-1 -top-2">
<StatusTooltip status={service.status} />
</div>

View File

@ -316,7 +316,7 @@ export const createDeploymentBackup = async (
const command = `
mkdir -p ${LOGS_PATH}/${backup.appName};
echo "Initializing backup" >> ${logFilePath};
echo "Initializing backup\n" >> ${logFilePath};
`;
await execAsyncRemote(server.serverId, command);
@ -324,7 +324,7 @@ echo "Initializing backup" >> ${logFilePath};
await fsPromises.mkdir(path.join(LOGS_PATH, backup.appName), {
recursive: true,
});
await fsPromises.writeFile(logFilePath, "Initializing backup");
await fsPromises.writeFile(logFilePath, "Initializing backup\n");
}
const deploymentCreate = await db

View File

@ -2,21 +2,12 @@ import type { BackupSchedule } from "@dokploy/server/services/backup";
import type { Compose } from "@dokploy/server/services/compose";
import { findProjectById } from "@dokploy/server/services/project";
import { sendDatabaseBackupNotifications } from "../notifications/database-backup";
import { execAsyncRemote, execAsyncStream } from "../process/execAsync";
import {
getMariadbBackupCommand,
getMysqlBackupCommand,
getMongoBackupCommand,
getPostgresBackupCommand,
getS3Credentials,
normalizeS3Path,
} from "./utils";
import { execAsync, execAsyncRemote } from "../process/execAsync";
import { getS3Credentials, normalizeS3Path, getBackupCommand } from "./utils";
import {
createDeploymentBackup,
updateDeploymentStatus,
} from "@dokploy/server/services/deployment";
import { createWriteStream } from "node:fs";
import { getComposeContainer } from "../docker/utils";
export const runComposeBackup = async (
compose: Compose,
@ -24,7 +15,7 @@ export const runComposeBackup = async (
) => {
const { projectId, name } = compose;
const project = await findProjectById(projectId);
const { prefix, database } = backup;
const { prefix } = backup;
const destination = backup.destination;
const backupFileName = `${new Date().toISOString()}.dump.gz`;
const bucketDestination = `${normalizeS3Path(prefix)}${backupFileName}`;
@ -37,74 +28,17 @@ export const runComposeBackup = async (
try {
const rcloneFlags = getS3Credentials(destination);
const rcloneDestination = `:s3:${destination.bucket}/${bucketDestination}`;
const { Id: containerId } = await getComposeContainer(
compose,
backup.serviceName || "",
);
const rcloneCommand = `rclone rcat ${rcloneFlags.join(" ")} "${rcloneDestination}"`;
let backupCommand = "";
if (backup.databaseType === "postgres") {
backupCommand = getPostgresBackupCommand(
containerId,
database,
backup.metadata?.postgres?.databaseUser || "",
const backupCommand = getBackupCommand(
backup,
rcloneCommand,
deployment.logPath,
);
} else if (backup.databaseType === "mariadb") {
backupCommand = getMariadbBackupCommand(
containerId,
database,
backup.metadata?.mariadb?.databaseUser || "",
backup.metadata?.mariadb?.databasePassword || "",
);
} else if (backup.databaseType === "mysql") {
backupCommand = getMysqlBackupCommand(
containerId,
database,
backup.metadata?.mysql?.databaseRootPassword || "",
);
} else if (backup.databaseType === "mongo") {
backupCommand = getMongoBackupCommand(
containerId,
database,
backup.metadata?.mongo?.databaseUser || "",
backup.metadata?.mongo?.databasePassword || "",
);
}
if (compose.serverId) {
await execAsyncRemote(
compose.serverId,
`
set -e;
echo "Running command." >> ${deployment.logPath};
export RCLONE_LOG_LEVEL=DEBUG;
${backupCommand} | ${rcloneCommand} >> ${deployment.logPath} 2>> ${deployment.logPath} || {
echo "❌ Command failed" >> ${deployment.logPath};
exit 1;
}
echo "✅ Command executed successfully" >> ${deployment.logPath};
`,
);
await execAsyncRemote(compose.serverId, backupCommand);
} else {
const writeStream = createWriteStream(deployment.logPath, { flags: "a" });
await execAsyncStream(
`${backupCommand} | ${rcloneCommand}`,
(data) => {
if (writeStream.writable) {
writeStream.write(data);
}
},
{
env: {
...process.env,
RCLONE_LOG_LEVEL: "DEBUG",
},
},
);
writeStream.write("Backup done✅");
writeStream.end();
await execAsync(backupCommand);
}
await sendDatabaseBackupNotifications({

View File

@ -1,27 +1,21 @@
import type { BackupSchedule } from "@dokploy/server/services/backup";
import type { Mariadb } from "@dokploy/server/services/mariadb";
import { findProjectById } from "@dokploy/server/services/project";
import { getServiceContainer } from "../docker/utils";
import { sendDatabaseBackupNotifications } from "../notifications/database-backup";
import { execAsyncRemote, execAsyncStream } from "../process/execAsync";
import {
getMariadbBackupCommand,
getS3Credentials,
normalizeS3Path,
} from "./utils";
import { execAsync, execAsyncRemote } from "../process/execAsync";
import { getBackupCommand, getS3Credentials, normalizeS3Path } from "./utils";
import {
createDeploymentBackup,
updateDeploymentStatus,
} from "@dokploy/server/services/deployment";
import { createWriteStream } from "node:fs";
export const runMariadbBackup = async (
mariadb: Mariadb,
backup: BackupSchedule,
) => {
const { appName, databasePassword, databaseUser, projectId, name } = mariadb;
const { projectId, name } = mariadb;
const project = await findProjectById(projectId);
const { prefix, database } = backup;
const { prefix } = backup;
const destination = backup.destination;
const backupFileName = `${new Date().toISOString()}.sql.gz`;
const bucketDestination = `${normalizeS3Path(prefix)}${backupFileName}`;
@ -33,50 +27,17 @@ export const runMariadbBackup = async (
try {
const rcloneFlags = getS3Credentials(destination);
const rcloneDestination = `:s3:${destination.bucket}/${bucketDestination}`;
const { Id: containerId } = await getServiceContainer(
appName,
mariadb.serverId,
);
const rcloneCommand = `rclone rcat ${rcloneFlags.join(" ")} "${rcloneDestination}"`;
const command = getMariadbBackupCommand(
containerId,
database,
databaseUser,
databasePassword || "",
const backupCommand = getBackupCommand(
backup,
rcloneCommand,
deployment.logPath,
);
if (mariadb.serverId) {
await execAsyncRemote(
mariadb.serverId,
`
set -e;
echo "Running command." >> ${deployment.logPath};
export RCLONE_LOG_LEVEL=DEBUG;
${command} | ${rcloneCommand} >> ${deployment.logPath} 2>> ${deployment.logPath} || {
echo "❌ Command failed" >> ${deployment.logPath};
exit 1;
}
echo "✅ Command executed successfully" >> ${deployment.logPath};
`,
);
await execAsyncRemote(mariadb.serverId, backupCommand);
} else {
const writeStream = createWriteStream(deployment.logPath, { flags: "a" });
await execAsyncStream(
`${command} | ${rcloneCommand}`,
(data) => {
if (writeStream.writable) {
writeStream.write(data);
}
},
{
env: {
...process.env,
RCLONE_LOG_LEVEL: "DEBUG",
},
},
);
await execAsync(backupCommand);
}
await sendDatabaseBackupNotifications({

View File

@ -1,24 +1,18 @@
import type { BackupSchedule } from "@dokploy/server/services/backup";
import type { Mongo } from "@dokploy/server/services/mongo";
import { findProjectById } from "@dokploy/server/services/project";
import { getServiceContainer } from "../docker/utils";
import { sendDatabaseBackupNotifications } from "../notifications/database-backup";
import { execAsyncRemote, execAsyncStream } from "../process/execAsync";
import {
getMongoBackupCommand,
getS3Credentials,
normalizeS3Path,
} from "./utils";
import { execAsync, execAsyncRemote } from "../process/execAsync";
import { getBackupCommand, getS3Credentials, normalizeS3Path } from "./utils";
import {
createDeploymentBackup,
updateDeploymentStatus,
} from "@dokploy/server/services/deployment";
import { createWriteStream } from "node:fs";
export const runMongoBackup = async (mongo: Mongo, backup: BackupSchedule) => {
const { appName, databasePassword, databaseUser, projectId, name } = mongo;
const { projectId, name } = mongo;
const project = await findProjectById(projectId);
const { prefix, database } = backup;
const { prefix } = backup;
const destination = backup.destination;
const backupFileName = `${new Date().toISOString()}.dump.gz`;
const bucketDestination = `${normalizeS3Path(prefix)}${backupFileName}`;
@ -30,51 +24,18 @@ export const runMongoBackup = async (mongo: Mongo, backup: BackupSchedule) => {
try {
const rcloneFlags = getS3Credentials(destination);
const rcloneDestination = `:s3:${destination.bucket}/${bucketDestination}`;
const { Id: containerId } = await getServiceContainer(
appName,
mongo.serverId,
);
const rcloneCommand = `rclone rcat ${rcloneFlags.join(" ")} "${rcloneDestination}"`;
const command = getMongoBackupCommand(
containerId,
database,
databaseUser || "",
databasePassword || "",
const backupCommand = getBackupCommand(
backup,
rcloneCommand,
deployment.logPath,
);
if (mongo.serverId) {
await execAsyncRemote(
mongo.serverId,
`
set -e;
echo "Running command." >> ${deployment.logPath};
export RCLONE_LOG_LEVEL=DEBUG;
${command} | ${rcloneCommand} >> ${deployment.logPath} 2>> ${deployment.logPath} || {
echo "❌ Command failed" >> ${deployment.logPath};
exit 1;
}
echo "✅ Command executed successfully" >> ${deployment.logPath};
`,
);
await execAsyncRemote(mongo.serverId, backupCommand);
} else {
const writeStream = createWriteStream(deployment.logPath, { flags: "a" });
await execAsyncStream(
`${command} | ${rcloneCommand}`,
(data) => {
if (writeStream.writable) {
writeStream.write(data);
}
},
{
env: {
...process.env,
RCLONE_LOG_LEVEL: "DEBUG",
},
},
);
writeStream.write("Backup done✅");
writeStream.end();
await execAsync(backupCommand);
}
await sendDatabaseBackupNotifications({

View File

@ -1,24 +1,18 @@
import type { BackupSchedule } from "@dokploy/server/services/backup";
import type { MySql } from "@dokploy/server/services/mysql";
import { findProjectById } from "@dokploy/server/services/project";
import { getServiceContainer } from "../docker/utils";
import { sendDatabaseBackupNotifications } from "../notifications/database-backup";
import { execAsyncRemote, execAsyncStream } from "../process/execAsync";
import {
getMysqlBackupCommand,
getS3Credentials,
normalizeS3Path,
} from "./utils";
import { createWriteStream } from "node:fs";
import { execAsync, execAsyncRemote } from "../process/execAsync";
import { getBackupCommand, getS3Credentials, normalizeS3Path } from "./utils";
import {
createDeploymentBackup,
updateDeploymentStatus,
} from "@dokploy/server/services/deployment";
export const runMySqlBackup = async (mysql: MySql, backup: BackupSchedule) => {
const { appName, databaseRootPassword, projectId, name } = mysql;
const { projectId, name } = mysql;
const project = await findProjectById(projectId);
const { prefix, database } = backup;
const { prefix } = backup;
const destination = backup.destination;
const backupFileName = `${new Date().toISOString()}.sql.gz`;
const bucketDestination = `${normalizeS3Path(prefix)}${backupFileName}`;
@ -27,53 +21,23 @@ export const runMySqlBackup = async (mysql: MySql, backup: BackupSchedule) => {
title: "MySQL Backup",
description: "MySQL Backup",
});
try {
const rcloneFlags = getS3Credentials(destination);
const rcloneDestination = `:s3:${destination.bucket}/${bucketDestination}`;
const { Id: containerId } = await getServiceContainer(
appName,
mysql.serverId,
const rcloneCommand = `rclone rcat ${rcloneFlags.join(" ")} "${rcloneDestination}"`;
const backupCommand = getBackupCommand(
backup,
rcloneCommand,
deployment.logPath,
);
const rcloneCommand = `rclone rcat ${rcloneFlags.join(" ")} "${rcloneDestination}"`;
const command = getMysqlBackupCommand(
containerId,
database,
databaseRootPassword || "",
);
if (mysql.serverId) {
await execAsyncRemote(
mysql.serverId,
`
set -e;
echo "Running command." >> ${deployment.logPath};
export RCLONE_LOG_LEVEL=DEBUG;
${command} | ${rcloneCommand} >> ${deployment.logPath} 2>> ${deployment.logPath} || {
echo "❌ Command failed" >> ${deployment.logPath};
exit 1;
}
echo "✅ Command executed successfully" >> ${deployment.logPath};
`,
);
await execAsyncRemote(mysql.serverId, backupCommand);
} else {
const writeStream = createWriteStream(deployment.logPath, { flags: "a" });
await execAsyncStream(
`${command} | ${rcloneCommand}`,
(data) => {
if (writeStream.writable) {
writeStream.write(data);
}
},
{
env: {
...process.env,
RCLONE_LOG_LEVEL: "DEBUG",
},
},
);
writeStream.write("Backup done✅");
writeStream.end();
await execAsync(backupCommand);
}
await sendDatabaseBackupNotifications({
applicationName: name,

View File

@ -1,33 +1,27 @@
import type { BackupSchedule } from "@dokploy/server/services/backup";
import type { Postgres } from "@dokploy/server/services/postgres";
import { findProjectById } from "@dokploy/server/services/project";
import { getServiceContainer } from "../docker/utils";
import { sendDatabaseBackupNotifications } from "../notifications/database-backup";
import { execAsyncRemote, execAsyncStream } from "../process/execAsync";
import {
getPostgresBackupCommand,
getS3Credentials,
normalizeS3Path,
} from "./utils";
import { execAsync, execAsyncRemote } from "../process/execAsync";
import { getBackupCommand, getS3Credentials, normalizeS3Path } from "./utils";
import {
createDeploymentBackup,
updateDeploymentStatus,
} from "@dokploy/server/services/deployment";
import { createWriteStream } from "node:fs";
export const runPostgresBackup = async (
postgres: Postgres,
backup: BackupSchedule,
) => {
const { appName, databaseUser, name, projectId } = postgres;
const { name, projectId } = postgres;
const project = await findProjectById(projectId);
const deployment = await createDeploymentBackup({
backupId: backup.backupId,
title: "Postgres Backup",
description: "Postgres Backup",
title: "Initializing Backup",
description: "Initializing Backup",
});
const { prefix, database } = backup;
const { prefix } = backup;
const destination = backup.destination;
const backupFileName = `${new Date().toISOString()}.sql.gz`;
const bucketDestination = `${normalizeS3Path(prefix)}${backupFileName}`;
@ -37,49 +31,15 @@ export const runPostgresBackup = async (
const rcloneCommand = `rclone rcat ${rcloneFlags.join(" ")} "${rcloneDestination}"`;
const { Id: containerId } = await getServiceContainer(
appName,
postgres.serverId,
const backupCommand = getBackupCommand(
backup,
rcloneCommand,
deployment.logPath,
);
const command = getPostgresBackupCommand(
containerId,
database,
databaseUser || "",
);
if (postgres.serverId) {
await execAsyncRemote(
postgres.serverId,
`
set -e;
echo "Running command." >> ${deployment.logPath};
export RCLONE_LOG_LEVEL=DEBUG;
${command} | ${rcloneCommand} >> ${deployment.logPath} 2>> ${deployment.logPath} || {
echo "❌ Command failed" >> ${deployment.logPath};
exit 1;
}
echo "✅ Command executed successfully" >> ${deployment.logPath};
`,
);
await execAsyncRemote(postgres.serverId, backupCommand);
} else {
const writeStream = createWriteStream(deployment.logPath, { flags: "a" });
await execAsyncStream(
`${command} | ${rcloneCommand}`,
(data) => {
if (writeStream.writable) {
writeStream.write(data);
}
},
{
env: {
...process.env,
RCLONE_LOG_LEVEL: "DEBUG",
},
},
);
writeStream.write("Backup done✅");
writeStream.end();
await execAsync(backupCommand);
}
await sendDatabaseBackupNotifications({

View File

@ -77,35 +77,179 @@ export const getS3Credentials = (destination: Destination) => {
};
export const getPostgresBackupCommand = (
containerId: string,
database: string,
databaseUser: string,
) => {
return `docker exec ${containerId} sh -c "pg_dump -Fc --no-acl --no-owner -h localhost -U ${databaseUser} --no-password '${database}' | gzip"`;
return `docker exec -i $CONTAINER_ID bash -c "set -o pipefail; pg_dump -Fc --no-acl --no-owner -h localhost -U ${databaseUser} --no-password '${database}' | gzip"`;
};
export const getMariadbBackupCommand = (
containerId: string,
database: string,
databaseUser: string,
databasePassword: string,
) => {
return `docker exec ${containerId} sh -c "mariadb-dump --user='${databaseUser}' --password='${databasePassword}' --databases ${database} | gzip"`;
return `docker exec -i $CONTAINER_ID bash -c "set -o pipefail; mariadb-dump --user='${databaseUser}' --password='${databasePassword}' --databases ${database} | gzip"`;
};
export const getMysqlBackupCommand = (
containerId: string,
database: string,
databasePassword: string,
) => {
return `docker exec ${containerId} sh -c "mysqldump --default-character-set=utf8mb4 -u 'root' --password='${databasePassword}' --single-transaction --no-tablespaces --quick '${database}' | gzip"`;
return `docker exec -i $CONTAINER_ID bash -c "set -o pipefail; mysqldump --default-character-set=utf8mb4 -u 'root' --password='${databasePassword}' --single-transaction --no-tablespaces --quick '${database}' | gzip"`;
};
export const getMongoBackupCommand = (
containerId: string,
database: string,
databaseUser: string,
databasePassword: string,
) => {
return `docker exec ${containerId} sh -c "mongodump -d '${database}' -u '${databaseUser}' -p '${databasePassword}' --archive --authenticationDatabase admin --gzip"`;
return `docker exec -i $CONTAINER_ID bash -c "set -o pipefail; mongodump -d '${database}' -u '${databaseUser}' -p '${databasePassword}' --archive --authenticationDatabase admin --gzip"`;
};
const getServiceContainerCommand = (appName: string) => {
return `docker ps -q --filter "status=running" --filter "label=com.docker.swarm.service.name=${appName}" | head -n 1`;
};
const getComposeContainerCommand = (
appName: string,
serviceName: string,
composeType: "stack" | "docker-compose" | undefined,
) => {
if (composeType === "stack") {
return `docker ps -q --filter "status=running" --filter "label=com.docker.stack.namespace=${appName}" --filter "label=com.docker.swarm.service.name=${serviceName}" | head -n 1`;
}
return `docker ps -q --filter "status=running" --filter "label=com.docker.compose.project=${appName}" --filter "label=com.docker.compose.service=${serviceName}" | head -n 1`;
};
const getContainerSearchCommand = (backup: BackupSchedule) => {
const { backupType, postgres, mysql, mariadb, mongo, compose, serviceName } =
backup;
if (backupType === "database") {
const appName =
postgres?.appName || mysql?.appName || mariadb?.appName || mongo?.appName;
return getServiceContainerCommand(appName || "");
}
if (backupType === "compose") {
const { appName, composeType } = compose || {};
return getComposeContainerCommand(
appName || "",
serviceName || "",
composeType,
);
}
};
export const generateBackupCommand = (backup: BackupSchedule) => {
const { backupType, databaseType } = backup;
switch (databaseType) {
case "postgres": {
const postgres = backup.postgres;
if (backupType === "database" && postgres) {
return getPostgresBackupCommand(backup.database, postgres.databaseUser);
}
if (backupType === "compose" && backup.metadata?.postgres) {
return getPostgresBackupCommand(
backup.database,
backup.metadata.postgres.databaseUser,
);
}
break;
}
case "mysql": {
const mysql = backup.mysql;
if (backupType === "database" && mysql) {
return getMysqlBackupCommand(backup.database, mysql.databasePassword);
}
if (backupType === "compose" && backup.metadata?.mysql) {
return getMysqlBackupCommand(
backup.database,
backup.metadata?.mysql?.databaseRootPassword || "",
);
}
break;
}
case "mariadb": {
const mariadb = backup.mariadb;
if (backupType === "database" && mariadb) {
return getMariadbBackupCommand(
backup.database,
mariadb.databaseUser,
mariadb.databasePassword,
);
}
if (backupType === "compose" && backup.metadata?.mariadb) {
return getMariadbBackupCommand(
backup.database,
backup.metadata.mariadb.databaseUser,
backup.metadata.mariadb.databasePassword,
);
}
break;
}
case "mongo": {
const mongo = backup.mongo;
if (backupType === "database" && mongo) {
return getMongoBackupCommand(
backup.database,
mongo.databaseUser,
mongo.databasePassword,
);
}
if (backupType === "compose" && backup.metadata?.mongo) {
return getMongoBackupCommand(
backup.database,
backup.metadata.mongo.databaseUser,
backup.metadata.mongo.databasePassword,
);
}
break;
}
default:
throw new Error(`Database type not supported: ${databaseType}`);
}
return null;
};
export const getBackupCommand = (
backup: BackupSchedule,
rcloneCommand: string,
logPath: string,
) => {
const containerSearch = getContainerSearchCommand(backup);
const backupCommand = generateBackupCommand(backup);
return `
set -eo pipefail;
echo "[$(date)] Starting backup process..." >> ${logPath};
echo "[$(date)] Executing backup command..." >> ${logPath};
CONTAINER_ID=$(${containerSearch})
if [ -z "$CONTAINER_ID" ]; then
echo "[$(date)] ❌ Container not found" >> ${logPath};
exit 1;
fi
echo "[$(date)] Container Up: $CONTAINER_ID" >> ${logPath};
# Run the backup command and capture the exit status
BACKUP_OUTPUT=$(${backupCommand} 2>&1 >/dev/null) || {
echo "[$(date)] ❌ backup failed" >> ${logPath};
echo "Error: $BACKUP_OUTPUT" >> ${logPath};
exit 1;
}
echo "[$(date)] ✅ backup completed successfully" >> ${logPath};
echo "[$(date)] Starting upload to S3..." >> ${logPath};
# Run the upload command and capture the exit status
UPLOAD_OUTPUT=$(${backupCommand} | ${rcloneCommand} 2>&1 >/dev/null) || {
echo "[$(date)] ❌ Upload to S3 failed" >> ${logPath};
echo "Error: $UPLOAD_OUTPUT" >> ${logPath};
exit 1;
}
echo "[$(date)] ✅ Upload to S3 completed successfully" >> ${logPath};
echo "Backup done ✅" >> ${logPath};
`;
};

View File

@ -509,7 +509,7 @@ export const getServiceContainer = async (
});
if (containers.length === 0 || !containers[0]) {
throw new Error(`No container found with name: ${appName}`);
return null;
}
const container = containers[0];
@ -549,7 +549,7 @@ export const getComposeContainer = async (
});
if (containers.length === 0 || !containers[0]) {
throw new Error(`No container found with name: ${appName}`);
return null;
}
const container = containers[0];

View File

@ -21,7 +21,6 @@ export const execAsyncStream = (
const childProcess = exec(command, options, (error) => {
if (error) {
console.log(error);
reject(error);
return;
}