Refactor backup and restore utilities for improved container handling

- Removed the `getRemoteServiceContainer` function and updated the `getServiceContainer` function to handle both local and remote service containers more efficiently.
- Refactored backup and restore commands for MariaDB, MongoDB, MySQL, and PostgreSQL to utilize new utility functions for generating backup and restore commands, enhancing code clarity and maintainability.
- Streamlined command execution logic in backup and restore processes, ensuring consistent handling of container IDs across different database types.
This commit is contained in:
Mauricio Siu 2025-05-03 13:24:05 -06:00
parent 0690f07262
commit 89306a7619
9 changed files with 115 additions and 142 deletions

View File

@ -1,10 +1,7 @@
import type { BackupSchedule } from "@dokploy/server/services/backup";
import type { Mariadb } from "@dokploy/server/services/mariadb";
import { findProjectById } from "@dokploy/server/services/project";
import {
getRemoteServiceContainer,
getServiceContainer,
} from "../docker/utils";
import { getServiceContainer } from "../docker/utils";
import { sendDatabaseBackupNotifications } from "../notifications/database-backup";
import { execAsync, execAsyncRemote } from "../process/execAsync";
import { getS3Credentials, normalizeS3Path } from "./utils";
@ -24,22 +21,20 @@ export const runMariadbBackup = async (
const rcloneFlags = getS3Credentials(destination);
const rcloneDestination = `:s3:${destination.bucket}/${bucketDestination}`;
const { Id: containerId } = await getServiceContainer(
appName,
mariadb.serverId,
);
const rcloneCommand = `rclone rcat ${rcloneFlags.join(" ")} "${rcloneDestination}"`;
if (mariadb.serverId) {
const { Id: containerId } = await getRemoteServiceContainer(
mariadb.serverId,
appName,
);
const mariadbDumpCommand = `docker exec ${containerId} sh -c "mariadb-dump --user='${databaseUser}' --password='${databasePassword}' --databases ${database} | gzip"`;
await execAsyncRemote(
mariadb.serverId,
`${mariadbDumpCommand} | ${rcloneCommand}`,
);
} else {
const { Id: containerId } = await getServiceContainer(appName);
const mariadbDumpCommand = `docker exec ${containerId} sh -c "mariadb-dump --user='${databaseUser}' --password='${databasePassword}' --databases ${database} | gzip"`;
await execAsync(`${mariadbDumpCommand} | ${rcloneCommand}`);
}

View File

@ -1,15 +1,15 @@
import type { BackupSchedule } from "@dokploy/server/services/backup";
import type { Mongo } from "@dokploy/server/services/mongo";
import { findProjectById } from "@dokploy/server/services/project";
import {
getRemoteServiceContainer,
getServiceContainer,
} from "../docker/utils";
import { getServiceContainer } from "../docker/utils";
import { sendDatabaseBackupNotifications } from "../notifications/database-backup";
import { execAsync, execAsyncRemote } from "../process/execAsync";
import { getS3Credentials, normalizeS3Path } from "./utils";
import {
getMongoBackupCommand,
getS3Credentials,
normalizeS3Path,
} from "./utils";
// mongodb://mongo:Bqh7AQl-PRbnBu@localhost:27017/?tls=false&directConnection=true
export const runMongoBackup = async (mongo: Mongo, backup: BackupSchedule) => {
const { appName, databasePassword, databaseUser, projectId, name } = mongo;
const project = await findProjectById(projectId);
@ -22,22 +22,22 @@ export const runMongoBackup = async (mongo: Mongo, backup: BackupSchedule) => {
const rcloneFlags = getS3Credentials(destination);
const rcloneDestination = `:s3:${destination.bucket}/${bucketDestination}`;
const rcloneCommand = `rclone rcat ${rcloneFlags.join(" ")} "${rcloneDestination}"`;
if (mongo.serverId) {
const { Id: containerId } = await getRemoteServiceContainer(
mongo.serverId,
appName,
);
const mongoDumpCommand = `docker exec ${containerId} sh -c "mongodump -d '${database}' -u '${databaseUser}' -p '${databasePassword}' --archive --authenticationDatabase=admin --gzip"`;
const { Id: containerId } = await getServiceContainer(
appName,
mongo.serverId,
);
await execAsyncRemote(
mongo.serverId,
`${mongoDumpCommand} | ${rcloneCommand}`,
);
const rcloneCommand = `rclone rcat ${rcloneFlags.join(" ")} "${rcloneDestination}"`;
const command = getMongoBackupCommand(
containerId,
database,
databaseUser || "",
databasePassword || "",
);
if (mongo.serverId) {
await execAsyncRemote(mongo.serverId, `${command} | ${rcloneCommand}`);
} else {
const { Id: containerId } = await getServiceContainer(appName);
const mongoDumpCommand = `docker exec ${containerId} sh -c "mongodump -d '${database}' -u '${databaseUser}' -p '${databasePassword}' --archive --authenticationDatabase=admin --gzip"`;
await execAsync(`${mongoDumpCommand} | ${rcloneCommand}`);
await execAsync(`${command} | ${rcloneCommand}`);
}
await sendDatabaseBackupNotifications({
@ -61,4 +61,3 @@ export const runMongoBackup = async (mongo: Mongo, backup: BackupSchedule) => {
throw error;
}
};
// mongorestore -d monguito -u mongo -p Bqh7AQl-PRbnBu --authenticationDatabase admin --gzip --archive=2024-04-13T05:03:58.937Z.dump.gz

View File

@ -1,13 +1,14 @@
import type { BackupSchedule } from "@dokploy/server/services/backup";
import type { MySql } from "@dokploy/server/services/mysql";
import { findProjectById } from "@dokploy/server/services/project";
import {
getRemoteServiceContainer,
getServiceContainer,
} from "../docker/utils";
import { getServiceContainer } from "../docker/utils";
import { sendDatabaseBackupNotifications } from "../notifications/database-backup";
import { execAsync, execAsyncRemote } from "../process/execAsync";
import { getS3Credentials, normalizeS3Path } from "./utils";
import {
getMysqlBackupCommand,
getS3Credentials,
normalizeS3Path,
} from "./utils";
export const runMySqlBackup = async (mysql: MySql, backup: BackupSchedule) => {
const { appName, databaseRootPassword, projectId, name } = mysql;
@ -21,23 +22,21 @@ export const runMySqlBackup = async (mysql: MySql, backup: BackupSchedule) => {
const rcloneFlags = getS3Credentials(destination);
const rcloneDestination = `:s3:${destination.bucket}/${bucketDestination}`;
const { Id: containerId } = await getServiceContainer(
appName,
mysql.serverId,
);
const rcloneCommand = `rclone rcat ${rcloneFlags.join(" ")} "${rcloneDestination}"`;
const command = getMysqlBackupCommand(
containerId,
database,
databaseRootPassword || "",
);
if (mysql.serverId) {
const { Id: containerId } = await getRemoteServiceContainer(
mysql.serverId,
appName,
);
const mysqlDumpCommand = `docker exec ${containerId} sh -c "mysqldump --default-character-set=utf8mb4 -u 'root' --password='${databaseRootPassword}' --single-transaction --no-tablespaces --quick '${database}' | gzip"`;
await execAsyncRemote(
mysql.serverId,
`${mysqlDumpCommand} | ${rcloneCommand}`,
);
await execAsyncRemote(mysql.serverId, `${command} | ${rcloneCommand}`);
} else {
const { Id: containerId } = await getServiceContainer(appName);
const mysqlDumpCommand = `docker exec ${containerId} sh -c "mysqldump --default-character-set=utf8mb4 -u 'root' --password='${databaseRootPassword}' --single-transaction --no-tablespaces --quick '${database}' | gzip"`;
await execAsync(`${mysqlDumpCommand} | ${rcloneCommand}`);
await execAsync(`${command} | ${rcloneCommand}`);
}
await sendDatabaseBackupNotifications({
applicationName: name,

View File

@ -1,13 +1,14 @@
import type { BackupSchedule } from "@dokploy/server/services/backup";
import type { Postgres } from "@dokploy/server/services/postgres";
import { findProjectById } from "@dokploy/server/services/project";
import {
getRemoteServiceContainer,
getServiceContainer,
} from "../docker/utils";
import { getServiceContainer } from "../docker/utils";
import { sendDatabaseBackupNotifications } from "../notifications/database-backup";
import { execAsync, execAsyncRemote } from "../process/execAsync";
import { getS3Credentials, normalizeS3Path } from "./utils";
import {
getPostgresBackupCommand,
getS3Credentials,
normalizeS3Path,
} from "./utils";
import { createDeploymentBackup } from "@dokploy/server/services/deployment";
export const runPostgresBackup = async (
@ -31,26 +32,22 @@ export const runPostgresBackup = async (
const rcloneDestination = `:s3:${destination.bucket}/${bucketDestination}`;
const rcloneCommand = `rclone rcat ${rcloneFlags.join(" ")} "${rcloneDestination}"`;
const { Id: containerId } = await getServiceContainer(
appName,
postgres.serverId,
);
const command = getPostgresBackupCommand(
containerId,
database,
databaseUser || "",
);
if (postgres.serverId) {
const { Id: containerId } = await getRemoteServiceContainer(
postgres.serverId,
appName,
);
const pgDumpCommand = `docker exec ${containerId} sh -c "pg_dump -Fc --no-acl --no-owner -h localhost -U ${databaseUser} --no-password '${database}' | gzip"`;
await execAsyncRemote(
postgres.serverId,
`${pgDumpCommand} | ${rcloneCommand}`,
);
await execAsyncRemote(postgres.serverId, `${command} | ${rcloneCommand}`);
} else {
const { Id: containerId } = await getServiceContainer(appName);
const pgDumpCommand = `docker exec ${containerId} sh -c "pg_dump -Fc --no-acl --no-owner -h localhost -U ${databaseUser} --no-password '${database}' | gzip"`;
await execAsync(`${pgDumpCommand} | ${rcloneCommand}`, (data) => {
console.log(data);
});
// await execAsync(`${pgDumpCommand} | ${rcloneCommand}`);
await execAsync(`${command} | ${rcloneCommand}`);
}
await sendDatabaseBackupNotifications({
@ -75,6 +72,3 @@ export const runPostgresBackup = async (
} finally {
}
};
// Restore
// /Applications/pgAdmin 4.app/Contents/SharedSupport/pg_restore --host "localhost" --port "5432" --username "mauricio" --no-password --dbname "postgres" --verbose "/Users/mauricio/Downloads/_databases_2024-04-12T07_02_05.234Z.sql"

View File

@ -494,32 +494,9 @@ export const getCreateFileCommand = (
`;
};
export const getServiceContainer = async (appName: string) => {
try {
const filter = {
status: ["running"],
label: [`com.docker.swarm.service.name=${appName}`],
};
const containers = await docker.listContainers({
filters: JSON.stringify(filter),
});
if (containers.length === 0 || !containers[0]) {
throw new Error(`No container found with name: ${appName}`);
}
const container = containers[0];
return container;
} catch (error) {
throw error;
}
};
export const getRemoteServiceContainer = async (
serverId: string,
export const getServiceContainer = async (
appName: string,
serverId?: string | null,
) => {
try {
const filter = {

View File

@ -1,11 +1,9 @@
import type { Destination } from "@dokploy/server/services/destination";
import type { Mariadb } from "@dokploy/server/services/mariadb";
import { getS3Credentials } from "../backups/utils";
import {
getRemoteServiceContainer,
getServiceContainer,
} from "../docker/utils";
import { getServiceContainer } from "../docker/utils";
import { execAsync, execAsyncRemote } from "../process/execAsync";
import { getMariadbRestoreCommand } from "./utils";
export const restoreMariadbBackup = async (
mariadb: Mariadb,
@ -21,22 +19,27 @@ export const restoreMariadbBackup = async (
const bucketPath = `:s3:${destination.bucket}`;
const backupPath = `${bucketPath}/${backupFile}`;
const { Id: containerName } = serverId
? await getRemoteServiceContainer(serverId, appName)
: await getServiceContainer(appName);
const { Id: containerId } = await getServiceContainer(appName, serverId);
const restoreCommand = `
rclone cat ${rcloneFlags.join(" ")} "${backupPath}" | gunzip | docker exec -i ${containerName} mariadb -u ${databaseUser} -p${databasePassword} ${database}
const restoreCommand = getMariadbRestoreCommand(
containerId,
database,
databaseUser,
databasePassword || "",
);
const command = `
rclone cat ${rcloneFlags.join(" ")} "${backupPath}" | gunzip | ${restoreCommand}
`;
emit("Starting restore...");
emit(`Executing command: ${restoreCommand}`);
emit(`Executing command: ${command}`);
if (serverId) {
await execAsyncRemote(serverId, restoreCommand);
await execAsyncRemote(serverId, command);
} else {
await execAsync(restoreCommand);
await execAsync(command);
}
emit("Restore completed successfully!");

View File

@ -1,11 +1,9 @@
import type { Destination } from "@dokploy/server/services/destination";
import type { Mongo } from "@dokploy/server/services/mongo";
import { getS3Credentials } from "../backups/utils";
import {
getRemoteServiceContainer,
getServiceContainer,
} from "../docker/utils";
import { getServiceContainer } from "../docker/utils";
import { execAsync, execAsyncRemote } from "../process/execAsync";
import { getMongoRestoreCommand } from "./utils";
export const restoreMongoBackup = async (
mongo: Mongo,
@ -21,14 +19,18 @@ export const restoreMongoBackup = async (
const bucketPath = `:s3:${destination.bucket}`;
const backupPath = `${bucketPath}/${backupFile}`;
const { Id: containerName } = serverId
? await getRemoteServiceContainer(serverId, appName)
: await getServiceContainer(appName);
const { Id: containerId } = await getServiceContainer(appName, serverId);
// For MongoDB, we need to first download the backup file since mongorestore expects a directory
const tempDir = "/tmp/dokploy-restore";
const fileName = backupFile.split("/").pop() || "backup.dump.gz";
const decompressedName = fileName.replace(".gz", "");
const restoreCommand = getMongoRestoreCommand(
containerId,
database,
databaseUser,
databasePassword || "",
);
const downloadCommand = `\
rm -rf ${tempDir} && \
@ -36,7 +38,7 @@ mkdir -p ${tempDir} && \
rclone copy ${rcloneFlags.join(" ")} "${backupPath}" ${tempDir} && \
cd ${tempDir} && \
gunzip -f "${fileName}" && \
docker exec -i ${containerName} mongorestore --username ${databaseUser} --password ${databasePassword} --authenticationDatabase admin --db ${database} --archive < "${decompressedName}" && \
${restoreCommand} < "${decompressedName}" && \
rm -rf ${tempDir}`;
emit("Starting restore...");

View File

@ -1,11 +1,9 @@
import type { Destination } from "@dokploy/server/services/destination";
import type { MySql } from "@dokploy/server/services/mysql";
import { getS3Credentials } from "../backups/utils";
import {
getRemoteServiceContainer,
getServiceContainer,
} from "../docker/utils";
import { getServiceContainer } from "../docker/utils";
import { execAsync, execAsyncRemote } from "../process/execAsync";
import { getMysqlRestoreCommand } from "./utils";
export const restoreMySqlBackup = async (
mysql: MySql,
@ -21,22 +19,26 @@ export const restoreMySqlBackup = async (
const bucketPath = `:s3:${destination.bucket}`;
const backupPath = `${bucketPath}/${backupFile}`;
const { Id: containerName } = serverId
? await getRemoteServiceContainer(serverId, appName)
: await getServiceContainer(appName);
const { Id: containerId } = await getServiceContainer(appName, serverId);
const restoreCommand = `
rclone cat ${rcloneFlags.join(" ")} "${backupPath}" | gunzip | docker exec -i ${containerName} mysql -u root -p${databaseRootPassword} ${database}
const restoreCommand = getMysqlRestoreCommand(
containerId,
database,
databaseRootPassword || "",
);
const command = `
rclone cat ${rcloneFlags.join(" ")} "${backupPath}" | gunzip | ${restoreCommand}
`;
emit("Starting restore...");
emit(`Executing command: ${restoreCommand}`);
emit(`Executing command: ${command}`);
if (serverId) {
await execAsyncRemote(serverId, restoreCommand);
await execAsyncRemote(serverId, command);
} else {
await execAsync(restoreCommand);
await execAsync(command);
}
emit("Restore completed successfully!");

View File

@ -1,11 +1,9 @@
import type { Destination } from "@dokploy/server/services/destination";
import type { Postgres } from "@dokploy/server/services/postgres";
import { getS3Credentials } from "../backups/utils";
import {
getRemoteServiceContainer,
getServiceContainer,
} from "../docker/utils";
import { getServiceContainer } from "../docker/utils";
import { execAsync, execAsyncRemote } from "../process/execAsync";
import { getPostgresRestoreCommand } from "./utils";
export const restorePostgresBackup = async (
postgres: Postgres,
@ -22,15 +20,19 @@ export const restorePostgresBackup = async (
const backupPath = `${bucketPath}/${backupFile}`;
const { Id: containerName } = serverId
? await getRemoteServiceContainer(serverId, appName)
: await getServiceContainer(appName);
const { Id: containerId } = await getServiceContainer(appName, serverId);
emit("Starting restore...");
emit(`Backup path: ${backupPath}`);
const restoreCommand = getPostgresRestoreCommand(
containerId,
database,
databaseUser,
);
const command = `\
rclone cat ${rcloneFlags.join(" ")} "${backupPath}" | gunzip | docker exec -i ${containerName} pg_restore -U ${databaseUser} -d ${database} --clean --if-exists`;
rclone cat ${rcloneFlags.join(" ")} "${backupPath}" | gunzip | ${restoreCommand}`;
emit(`Executing command: ${command}`);