Implement metadata handling for database and compose backups. Update backup schemas to include metadata fields for various database types. Enhance backup creation and update processes to accommodate new metadata requirements. Modify UI components to support metadata input for different database types during backup operations.

This commit is contained in:
Mauricio Siu
2025-04-27 22:14:06 -06:00
parent 2ea2605ab1
commit 7c2eb63625
15 changed files with 6010 additions and 77 deletions

View File

@@ -0,0 +1,91 @@
import type { BackupSchedule } from "@dokploy/server/services/backup";
import type { Compose } from "@dokploy/server/services/compose";
import { findProjectById } from "@dokploy/server/services/project";
import { sendDatabaseBackupNotifications } from "../notifications/database-backup";
import { execAsync, execAsyncRemote } from "../process/execAsync";
import { getS3Credentials, normalizeS3Path } from "./utils";
export const runComposeBackup = async (
compose: Compose,
backup: BackupSchedule,
) => {
const { projectId, name } = compose;
const project = await findProjectById(projectId);
const { prefix, database } = backup;
const destination = backup.destination;
const backupFileName = `${new Date().toISOString()}.dump.gz`;
const bucketDestination = `${normalizeS3Path(prefix)}${backupFileName}`;
try {
const rcloneFlags = getS3Credentials(destination);
const rcloneDestination = `:s3:${destination.bucket}/${bucketDestination}`;
const rcloneCommand = `rclone rcat ${rcloneFlags.join(" ")} "${rcloneDestination}"`;
const command = `docker ps --filter "status=running" --filter "label=dokploy.backup.id=${backup.backupId}" --format "{{.ID}}" | head -n 1`;
if (compose.serverId) {
const { stdout } = await execAsyncRemote(compose.serverId, command);
if (!stdout) {
throw new Error("Container not found");
}
const containerId = stdout.trim();
let backupCommand = "";
if (backup.databaseType === "postgres") {
backupCommand = `docker exec ${containerId} sh -c "pg_dump -Fc --no-acl --no-owner -h localhost -U ${backup.metadata?.postgres?.databaseUser} --no-password '${database}' | gzip"`;
} else if (backup.databaseType === "mariadb") {
backupCommand = `docker exec ${containerId} sh -c "mariadb-dump --user='${backup.metadata?.mariadb?.databaseUser}' --password='${backup.metadata?.mariadb?.databasePassword}' --databases ${database} | gzip"`;
} else if (backup.databaseType === "mysql") {
backupCommand = `docker exec ${containerId} sh -c "mysqldump --default-character-set=utf8mb4 -u 'root' --password='${backup.metadata?.mysql?.databaseRootPassword}' --single-transaction --no-tablespaces --quick '${database}' | gzip"`;
} else if (backup.databaseType === "mongo") {
backupCommand = `docker exec ${containerId} sh -c "mongodump -d '${database}' -u '${backup.metadata?.mongo?.databaseUser}' -p '${backup.metadata?.mongo?.databasePassword}' --archive --authenticationDatabase admin --gzip"`;
}
await execAsyncRemote(
compose.serverId,
`${backupCommand} | ${rcloneCommand}`,
);
} else {
const { stdout } = await execAsync(command);
if (!stdout) {
throw new Error("Container not found");
}
const containerId = stdout.trim();
let backupCommand = "";
if (backup.databaseType === "postgres") {
backupCommand = `docker exec ${containerId} sh -c "pg_dump -Fc --no-acl --no-owner -h localhost -U ${backup.metadata?.postgres?.databaseUser} --no-password '${database}' | gzip"`;
} else if (backup.databaseType === "mariadb") {
backupCommand = `docker exec ${containerId} sh -c "mariadb-dump --user='${backup.metadata?.mariadb?.databaseUser}' --password='${backup.metadata?.mariadb?.databasePassword}' --databases ${database} | gzip"`;
} else if (backup.databaseType === "mysql") {
backupCommand = `docker exec ${containerId} sh -c "mysqldump --default-character-set=utf8mb4 -u 'root' --password='${backup.metadata?.mysql?.databaseRootPassword}' --single-transaction --no-tablespaces --quick '${database}' | gzip"`;
} else if (backup.databaseType === "mongo") {
backupCommand = `docker exec ${containerId} sh -c "mongodump -d '${database}' -u '${backup.metadata?.mongo?.databaseUser}' -p '${backup.metadata?.mongo?.databasePassword}' --archive --authenticationDatabase admin --gzip"`;
}
await execAsync(`${backupCommand} | ${rcloneCommand}`);
}
await sendDatabaseBackupNotifications({
applicationName: name,
projectName: project.name,
databaseType: "mongodb",
type: "success",
organizationId: project.organizationId,
});
} catch (error) {
console.log(error);
await sendDatabaseBackupNotifications({
applicationName: name,
projectName: project.name,
databaseType: "mongodb",
type: "error",
// @ts-ignore
errorMessage: error?.message || "Error message not provided",
organizationId: project.organizationId,
});
throw error;
}
};
// mongorestore -d monguito -u mongo -p Bqh7AQl-PRbnBu --authenticationDatabase admin --gzip --archive=2024-04-13T05:03:58.937Z.dump.gz

View File

@@ -7,26 +7,39 @@ import { runMongoBackup } from "./mongo";
import { runMySqlBackup } from "./mysql";
import { runPostgresBackup } from "./postgres";
import { runWebServerBackup } from "./web-server";
import { runComposeBackup } from "./compose";
export const scheduleBackup = (backup: BackupSchedule) => {
const { schedule, backupId, databaseType, postgres, mysql, mongo, mariadb } =
backup;
const {
schedule,
backupId,
databaseType,
postgres,
mysql,
mongo,
mariadb,
compose,
} = backup;
scheduleJob(backupId, schedule, async () => {
if (databaseType === "postgres" && postgres) {
await runPostgresBackup(postgres, backup);
await keepLatestNBackups(backup, postgres.serverId);
} else if (databaseType === "mysql" && mysql) {
await runMySqlBackup(mysql, backup);
await keepLatestNBackups(backup, mysql.serverId);
} else if (databaseType === "mongo" && mongo) {
await runMongoBackup(mongo, backup);
await keepLatestNBackups(backup, mongo.serverId);
} else if (databaseType === "mariadb" && mariadb) {
await runMariadbBackup(mariadb, backup);
await keepLatestNBackups(backup, mariadb.serverId);
} else if (databaseType === "web-server") {
await runWebServerBackup(backup);
await keepLatestNBackups(backup);
if (backup.backupType === "database") {
if (databaseType === "postgres" && postgres) {
await runPostgresBackup(postgres, backup);
await keepLatestNBackups(backup, postgres.serverId);
} else if (databaseType === "mysql" && mysql) {
await runMySqlBackup(mysql, backup);
await keepLatestNBackups(backup, mysql.serverId);
} else if (databaseType === "mongo" && mongo) {
await runMongoBackup(mongo, backup);
await keepLatestNBackups(backup, mongo.serverId);
} else if (databaseType === "mariadb" && mariadb) {
await runMariadbBackup(mariadb, backup);
await keepLatestNBackups(backup, mariadb.serverId);
} else if (databaseType === "web-server") {
await runWebServerBackup(backup);
await keepLatestNBackups(backup);
}
} else if (backup.backupType === "compose" && compose) {
await runComposeBackup(compose, backup);
}
});
};

View File

@@ -22,15 +22,15 @@ import { spawnAsync } from "../process/spawnAsync";
export type ComposeNested = InferResultType<
"compose",
{ project: true; mounts: true; domains: true }
{ project: true; mounts: true; domains: true; backups: true }
>;
export const buildCompose = async (compose: ComposeNested, logPath: string) => {
const writeStream = createWriteStream(logPath, { flags: "a" });
const { sourceType, appName, mounts, composeType, domains } = compose;
const { sourceType, appName, mounts, composeType } = compose;
try {
const { COMPOSE_PATH } = paths();
const command = createCommand(compose);
await writeDomainsToCompose(compose, domains);
await writeDomainsToCompose(compose);
createEnvFile(compose);
if (compose.isolatedDeployment) {

View File

@@ -0,0 +1,4 @@
export const createBackupLabels = (backupId: string) => {
const labels = [`dokploy.backup.id=${backupId}`];
return labels;
};

View File

@@ -38,6 +38,8 @@ import type {
PropertiesNetworks,
} from "./types";
import { encodeBase64 } from "./utils";
import type { Backup } from "@dokploy/server/services/backup";
import { createBackupLabels } from "./backup";
export const cloneCompose = async (compose: Compose) => {
if (compose.sourceType === "github") {
@@ -132,13 +134,13 @@ export const readComposeFile = async (compose: Compose) => {
};
export const writeDomainsToCompose = async (
compose: Compose,
domains: Domain[],
compose: Compose & { domains: Domain[]; backups: Backup[] },
) => {
if (!domains.length) {
const { domains, backups } = compose;
if (!domains.length && !backups.length) {
return;
}
const composeConverted = await addDomainToCompose(compose, domains);
const composeConverted = await addDomainToCompose(compose);
const path = getComposePath(compose);
const composeString = dump(composeConverted, { lineWidth: 1000 });
@@ -150,7 +152,7 @@ export const writeDomainsToCompose = async (
};
export const writeDomainsToComposeRemote = async (
compose: Compose,
compose: Compose & { domains: Domain[]; backups: Backup[] },
domains: Domain[],
logPath: string,
) => {
@@ -159,7 +161,7 @@ export const writeDomainsToComposeRemote = async (
}
try {
const composeConverted = await addDomainToCompose(compose, domains);
const composeConverted = await addDomainToCompose(compose);
const path = getComposePath(compose);
if (!composeConverted) {
@@ -180,22 +182,20 @@ exit 1;
`;
}
};
// (node:59875) MaxListenersExceededWarning: Possible EventEmitter memory leak detected. 11 SIGTERM listeners added to [process]. Use emitter.setMaxListeners() to increase limit
export const addDomainToCompose = async (
compose: Compose,
domains: Domain[],
compose: Compose & { domains: Domain[]; backups: Backup[] },
) => {
const { appName } = compose;
const { appName, domains, backups } = compose;
let result: ComposeSpecification | null;
if (compose.serverId) {
result = await loadDockerComposeRemote(compose); // aca hay que ir al servidor e ir a traer el compose file al servidor
result = await loadDockerComposeRemote(compose);
} else {
result = await loadDockerCompose(compose);
}
if (!result || domains.length === 0) {
if (!result || (domains.length === 0 && backups.length === 0)) {
return null;
}
@@ -264,6 +264,37 @@ export const addDomainToCompose = async (
}
}
for (const backup of backups) {
const { backupId, serviceName, enabled } = backup;
if (!enabled) {
continue;
}
if (!serviceName) {
throw new Error(
"Service name not found, please check the backups to use a valid service name",
);
}
if (!result?.services?.[serviceName]) {
throw new Error(`The service ${serviceName} not found in the compose`);
}
const backupLabels = createBackupLabels(backupId);
if (!result.services[serviceName].labels) {
result.services[serviceName].labels = [];
}
result.services[serviceName].labels = [
...(Array.isArray(result.services[serviceName].labels)
? result.services[serviceName].labels
: []),
...backupLabels,
];
}
// Add dokploy-network to the root of the compose file
if (!compose.isolatedDeployment) {
result.networks = addDokployNetworkToRoot(result.networks);