Implement restore functionality for various database types

- Added `apiRestoreBackup` schema to define input requirements for restore operations.
- Refactored restore utilities for PostgreSQL, MySQL, MariaDB, and MongoDB to utilize a unified command generation approach, enhancing maintainability.
- Improved logging during restore processes to provide clearer feedback on command execution and success/failure states.
- Streamlined the handling of database credentials and backup file paths across different database types, ensuring consistency and reducing redundancy.
This commit is contained in:
Mauricio Siu
2025-05-04 03:25:58 -06:00
parent 66dd890448
commit 614b9d25a8
9 changed files with 295 additions and 207 deletions

View File

@@ -3,6 +3,7 @@ import {
apiCreateBackup,
apiFindOneBackup,
apiRemoveBackup,
apiRestoreBackup,
apiUpdateBackup,
} from "@/server/db/schema";
import { removeJob, schedule, updateJob } from "@/server/utils/backup";
@@ -366,23 +367,7 @@ export const backupRouter = createTRPCRouter({
override: true,
},
})
.input(
z.object({
databaseId: z.string(),
databaseType: z.enum([
"postgres",
"mysql",
"mariadb",
"mongo",
"web-server",
]),
backupType: z.enum(["database", "compose"]),
databaseName: z.string().min(1),
backupFile: z.string().min(1),
destinationId: z.string().min(1),
metadata: z.any(),
}),
)
.input(apiRestoreBackup)
.subscription(async ({ input }) => {
const destination = await findDestinationById(input.destinationId);
if (input.backupType === "database") {
@@ -390,57 +375,33 @@ export const backupRouter = createTRPCRouter({
const postgres = await findPostgresById(input.databaseId);
return observable<string>((emit) => {
restorePostgresBackup(
postgres,
destination,
input.databaseName,
input.backupFile,
(log) => {
emit.next(log);
},
);
restorePostgresBackup(postgres, destination, input, (log) => {
emit.next(log);
});
});
}
if (input.databaseType === "mysql") {
const mysql = await findMySqlById(input.databaseId);
return observable<string>((emit) => {
restoreMySqlBackup(
mysql,
destination,
input.databaseName,
input.backupFile,
(log) => {
emit.next(log);
},
);
restoreMySqlBackup(mysql, destination, input, (log) => {
emit.next(log);
});
});
}
if (input.databaseType === "mariadb") {
const mariadb = await findMariadbById(input.databaseId);
return observable<string>((emit) => {
restoreMariadbBackup(
mariadb,
destination,
input.databaseName,
input.backupFile,
(log) => {
emit.next(log);
},
);
restoreMariadbBackup(mariadb, destination, input, (log) => {
emit.next(log);
});
});
}
if (input.databaseType === "mongo") {
const mongo = await findMongoById(input.databaseId);
return observable<string>((emit) => {
restoreMongoBackup(
mongo,
destination,
input.databaseName,
input.backupFile,
(log) => {
emit.next(log);
},
);
restoreMongoBackup(mongo, destination, input, (log) => {
emit.next(log);
});
});
}
if (input.databaseType === "web-server") {
@@ -454,16 +415,9 @@ export const backupRouter = createTRPCRouter({
if (input.backupType === "compose") {
const compose = await findComposeById(input.databaseId);
return observable<string>((emit) => {
restoreComposeBackup(
compose,
destination,
input.databaseName,
input.backupFile,
input.metadata,
(log) => {
emit.next(log);
},
);
restoreComposeBackup(compose, destination, input, (log) => {
emit.next(log);
});
});
}
return true;

View File

@@ -191,3 +191,39 @@ export const apiUpdateBackup = createSchema
databaseType: true,
})
.required();
export const apiRestoreBackup = z.object({
databaseId: z.string(),
databaseType: z.enum(["postgres", "mysql", "mariadb", "mongo", "web-server"]),
backupType: z.enum(["database", "compose"]),
databaseName: z.string().min(1),
backupFile: z.string().min(1),
destinationId: z.string().min(1),
metadata: z
.object({
serviceName: z.string().optional(),
postgres: z
.object({
databaseUser: z.string(),
})
.optional(),
mariadb: z
.object({
databaseUser: z.string(),
databasePassword: z.string(),
})
.optional(),
mongo: z
.object({
databaseUser: z.string(),
databasePassword: z.string(),
})
.optional(),
mysql: z
.object({
databaseRootPassword: z.string(),
})
.optional(),
})
.optional(),
});

View File

@@ -106,11 +106,11 @@ export const getMongoBackupCommand = (
return `docker exec -i $CONTAINER_ID bash -c "set -o pipefail; mongodump -d '${database}' -u '${databaseUser}' -p '${databasePassword}' --archive --authenticationDatabase admin --gzip"`;
};
const getServiceContainerCommand = (appName: string) => {
export const getServiceContainerCommand = (appName: string) => {
return `docker ps -q --filter "status=running" --filter "label=com.docker.swarm.service.name=${appName}" | head -n 1`;
};
const getComposeContainerCommand = (
export const getComposeContainerCommand = (
appName: string,
serviceName: string,
composeType: "stack" | "docker-compose" | undefined,
@@ -226,7 +226,7 @@ export const getBackupCommand = (
CONTAINER_ID=$(${containerSearch})
if [ -z "$CONTAINER_ID" ]; then
echo "[$(date)] ❌ Container not found" >> ${logPath};
echo "[$(date)] ❌ Error: Container not found" >> ${logPath};
exit 1;
fi
@@ -234,7 +234,7 @@ export const getBackupCommand = (
# Run the backup command and capture the exit status
BACKUP_OUTPUT=$(${backupCommand} 2>&1 >/dev/null) || {
echo "[$(date)] ❌ backup failed" >> ${logPath};
echo "[$(date)] ❌ Error: Backup failed" >> ${logPath};
echo "Error: $BACKUP_OUTPUT" >> ${logPath};
exit 1;
}
@@ -244,7 +244,7 @@ export const getBackupCommand = (
# Run the upload command and capture the exit status
UPLOAD_OUTPUT=$(${backupCommand} | ${rcloneCommand} 2>&1 >/dev/null) || {
echo "[$(date)] ❌ Upload to S3 failed" >> ${logPath};
echo "[$(date)] ❌ Error: Upload to S3 failed" >> ${logPath};
echo "Error: $UPLOAD_OUTPUT" >> ${logPath};
exit 1;
}

View File

@@ -2,76 +2,84 @@ import type { Destination } from "@dokploy/server/services/destination";
import type { Compose } from "@dokploy/server/services/compose";
import { getS3Credentials } from "../backups/utils";
import { execAsync, execAsyncRemote } from "../process/execAsync";
import type { Backup } from "@dokploy/server/services/backup";
import { getComposeContainer } from "../docker/utils";
import {
getMariadbRestoreCommand,
getMongoRestoreCommand,
getMysqlRestoreCommand,
getPostgresRestoreCommand,
} from "./utils";
import { getRestoreCommand } from "./utils";
import type { apiRestoreBackup } from "@dokploy/server/db/schema";
import type { z } from "zod";
interface DatabaseCredentials {
databaseUser?: string;
databasePassword?: string;
}
export const restoreComposeBackup = async (
compose: Compose,
destination: Destination,
database: string,
backupFile: string,
metadata: Backup["metadata"] & { serviceName: string },
backupInput: z.infer<typeof apiRestoreBackup>,
emit: (log: string) => void,
) => {
try {
const { serverId } = compose;
if (backupInput.databaseType === "web-server") {
return;
}
const { serverId, appName, composeType } = compose;
const rcloneFlags = getS3Credentials(destination);
const bucketPath = `:s3:${destination.bucket}`;
const backupPath = `${bucketPath}/${backupFile}`;
const backupPath = `${bucketPath}/${backupInput.backupFile}`;
let rcloneCommand = `rclone cat ${rcloneFlags.join(" ")} "${backupPath}" | gunzip`;
const { Id: containerId } = await getComposeContainer(
compose,
metadata.serviceName || "",
);
let restoreCommand = "";
if (metadata.postgres) {
restoreCommand = `rclone cat ${rcloneFlags.join(" ")} "${backupPath}" | gunzip | ${getPostgresRestoreCommand(containerId, database, metadata.postgres.databaseUser)}`;
} else if (metadata.mariadb) {
restoreCommand = `
rclone cat ${rcloneFlags.join(" ")} "${backupPath}" | gunzip | ${getMariadbRestoreCommand(containerId, database, metadata.mariadb.databaseUser, metadata.mariadb.databasePassword)}
`;
} else if (metadata.mysql) {
restoreCommand = `
rclone cat ${rcloneFlags.join(" ")} "${backupPath}" | gunzip | ${getMysqlRestoreCommand(containerId, database, metadata.mysql.databaseRootPassword)}
`;
} else if (metadata.mongo) {
const tempDir = "/tmp/dokploy-restore";
const fileName = backupFile.split("/").pop() || "backup.dump.gz";
const decompressedName = fileName.replace(".gz", "");
restoreCommand = `\
rm -rf ${tempDir} && \
mkdir -p ${tempDir} && \
rclone copy ${rcloneFlags.join(" ")} "${backupPath}" ${tempDir} && \
cd ${tempDir} && \
gunzip -f "${fileName}" && \
${getMongoRestoreCommand(containerId, database, metadata.mongo.databaseUser, metadata.mongo.databasePassword)} < "${decompressedName}" && \
rm -rf ${tempDir}`;
if (backupInput.metadata?.mongo) {
rcloneCommand = `rclone copy ${rcloneFlags.join(" ")} "${backupPath}"`;
}
let credentials: DatabaseCredentials;
switch (backupInput.databaseType) {
case "postgres":
credentials = {
databaseUser: backupInput.metadata?.postgres?.databaseUser,
};
break;
case "mariadb":
credentials = {
databaseUser: backupInput.metadata?.mariadb?.databaseUser,
databasePassword: backupInput.metadata?.mariadb?.databasePassword,
};
break;
case "mysql":
credentials = {
databasePassword: backupInput.metadata?.mysql?.databaseRootPassword,
};
break;
case "mongo":
credentials = {
databaseUser: backupInput.metadata?.mongo?.databaseUser,
databasePassword: backupInput.metadata?.mongo?.databasePassword,
};
break;
}
const restoreCommand = getRestoreCommand({
appName: appName,
serviceName: backupInput.metadata?.serviceName,
type: backupInput.databaseType,
credentials: {
database: backupInput.databaseName,
...credentials,
},
restoreType: composeType,
rcloneCommand,
});
emit("Starting restore...");
emit(`Backup path: ${backupPath}`);
emit(`Executing command: ${restoreCommand}`);
if (serverId) {
const { stdout, stderr } = await execAsyncRemote(
serverId,
restoreCommand,
);
emit(stdout);
emit(stderr);
await execAsyncRemote(serverId, restoreCommand);
} else {
const { stdout, stderr } = await execAsync(restoreCommand);
emit(stdout);
emit(stderr);
await execAsync(restoreCommand);
}
emit("Restore completed successfully!");

View File

@@ -1,36 +1,37 @@
import type { Destination } from "@dokploy/server/services/destination";
import type { Mariadb } from "@dokploy/server/services/mariadb";
import { getS3Credentials } from "../backups/utils";
import { getServiceContainer } from "../docker/utils";
import { execAsync, execAsyncRemote } from "../process/execAsync";
import { getMariadbRestoreCommand } from "./utils";
import { getRestoreCommand } from "./utils";
import type { apiRestoreBackup } from "@dokploy/server/db/schema";
import type { z } from "zod";
export const restoreMariadbBackup = async (
mariadb: Mariadb,
destination: Destination,
database: string,
backupFile: string,
backupInput: z.infer<typeof apiRestoreBackup>,
emit: (log: string) => void,
) => {
try {
const { appName, databasePassword, databaseUser, serverId } = mariadb;
const { appName, serverId, databaseUser, databasePassword } = mariadb;
const rcloneFlags = getS3Credentials(destination);
const bucketPath = `:s3:${destination.bucket}`;
const backupPath = `${bucketPath}/${backupFile}`;
const backupPath = `${bucketPath}/${backupInput.backupFile}`;
const { Id: containerId } = await getServiceContainer(appName, serverId);
const rcloneCommand = `rclone cat ${rcloneFlags.join(" ")} "${backupPath}" | gunzip`;
const restoreCommand = getMariadbRestoreCommand(
containerId,
database,
databaseUser,
databasePassword || "",
);
const command = `
rclone cat ${rcloneFlags.join(" ")} "${backupPath}" | gunzip | ${restoreCommand}
`;
const command = getRestoreCommand({
appName,
credentials: {
database: backupInput.databaseName,
databaseUser,
databasePassword,
},
type: "mariadb",
rcloneCommand,
restoreType: "database",
});
emit("Starting restore...");

View File

@@ -1,15 +1,15 @@
import type { Destination } from "@dokploy/server/services/destination";
import type { Mongo } from "@dokploy/server/services/mongo";
import { getS3Credentials } from "../backups/utils";
import { getServiceContainer } from "../docker/utils";
import { execAsync, execAsyncRemote } from "../process/execAsync";
import { getMongoRestoreCommand } from "./utils";
import { getRestoreCommand } from "./utils";
import type { apiRestoreBackup } from "@dokploy/server/db/schema";
import type { z } from "zod";
export const restoreMongoBackup = async (
mongo: Mongo,
destination: Destination,
database: string,
backupFile: string,
backupInput: z.infer<typeof apiRestoreBackup>,
emit: (log: string) => void,
) => {
try {
@@ -17,38 +17,30 @@ export const restoreMongoBackup = async (
const rcloneFlags = getS3Credentials(destination);
const bucketPath = `:s3:${destination.bucket}`;
const backupPath = `${bucketPath}/${backupFile}`;
const backupPath = `${bucketPath}/${backupInput.backupFile}`;
const rcloneCommand = `rclone copy ${rcloneFlags.join(" ")} "${backupPath}"`;
const { Id: containerId } = await getServiceContainer(appName, serverId);
// For MongoDB, we need to first download the backup file since mongorestore expects a directory
const tempDir = "/tmp/dokploy-restore";
const fileName = backupFile.split("/").pop() || "backup.dump.gz";
const decompressedName = fileName.replace(".gz", "");
const restoreCommand = getMongoRestoreCommand(
containerId,
database,
databaseUser,
databasePassword || "",
);
const downloadCommand = `\
rm -rf ${tempDir} && \
mkdir -p ${tempDir} && \
rclone copy ${rcloneFlags.join(" ")} "${backupPath}" ${tempDir} && \
cd ${tempDir} && \
gunzip -f "${fileName}" && \
${restoreCommand} < "${decompressedName}" && \
rm -rf ${tempDir}`;
const command = getRestoreCommand({
appName,
type: "mongo",
credentials: {
database: backupInput.databaseName,
databaseUser,
databasePassword,
},
restoreType: "database",
rcloneCommand,
backupFile: backupInput.backupFile,
});
emit("Starting restore...");
emit(`Executing command: ${downloadCommand}`);
emit(`Executing command: ${command}`);
if (serverId) {
await execAsyncRemote(serverId, downloadCommand);
await execAsyncRemote(serverId, command);
} else {
await execAsync(downloadCommand);
await execAsync(command);
}
emit("Restore completed successfully!");

View File

@@ -1,15 +1,15 @@
import type { Destination } from "@dokploy/server/services/destination";
import type { MySql } from "@dokploy/server/services/mysql";
import { getS3Credentials } from "../backups/utils";
import { getServiceContainer } from "../docker/utils";
import { execAsync, execAsyncRemote } from "../process/execAsync";
import { getMysqlRestoreCommand } from "./utils";
import { getRestoreCommand } from "./utils";
import type { apiRestoreBackup } from "@dokploy/server/db/schema";
import type { z } from "zod";
export const restoreMySqlBackup = async (
mysql: MySql,
destination: Destination,
database: string,
backupFile: string,
backupInput: z.infer<typeof apiRestoreBackup>,
emit: (log: string) => void,
) => {
try {
@@ -17,19 +17,20 @@ export const restoreMySqlBackup = async (
const rcloneFlags = getS3Credentials(destination);
const bucketPath = `:s3:${destination.bucket}`;
const backupPath = `${bucketPath}/${backupFile}`;
const backupPath = `${bucketPath}/${backupInput.backupFile}`;
const { Id: containerId } = await getServiceContainer(appName, serverId);
const rcloneCommand = `rclone cat ${rcloneFlags.join(" ")} "${backupPath}" | gunzip`;
const restoreCommand = getMysqlRestoreCommand(
containerId,
database,
databaseRootPassword || "",
);
const command = `
rclone cat ${rcloneFlags.join(" ")} "${backupPath}" | gunzip | ${restoreCommand}
`;
const command = getRestoreCommand({
appName,
type: "mysql",
credentials: {
database: backupInput.databaseName,
databasePassword: databaseRootPassword,
},
restoreType: "database",
rcloneCommand,
});
emit("Starting restore...");

View File

@@ -1,15 +1,15 @@
import type { Destination } from "@dokploy/server/services/destination";
import type { Postgres } from "@dokploy/server/services/postgres";
import { getS3Credentials } from "../backups/utils";
import { getServiceContainer } from "../docker/utils";
import { execAsync, execAsyncRemote } from "../process/execAsync";
import { getPostgresRestoreCommand } from "./utils";
import { getRestoreCommand } from "./utils";
import type { apiRestoreBackup } from "@dokploy/server/db/schema";
import type { z } from "zod";
export const restorePostgresBackup = async (
postgres: Postgres,
destination: Destination,
database: string,
backupFile: string,
backupInput: z.infer<typeof apiRestoreBackup>,
emit: (log: string) => void,
) => {
try {
@@ -18,32 +18,30 @@ export const restorePostgresBackup = async (
const rcloneFlags = getS3Credentials(destination);
const bucketPath = `:s3:${destination.bucket}`;
const backupPath = `${bucketPath}/${backupFile}`;
const backupPath = `${bucketPath}/${backupInput.backupFile}`;
const { Id: containerId } = await getServiceContainer(appName, serverId);
const rcloneCommand = `rclone cat ${rcloneFlags.join(" ")} "${backupPath}" | gunzip`;
emit("Starting restore...");
emit(`Backup path: ${backupPath}`);
const restoreCommand = getPostgresRestoreCommand(
containerId,
database,
databaseUser,
);
const command = `\
rclone cat ${rcloneFlags.join(" ")} "${backupPath}" | gunzip | ${restoreCommand}`;
const command = getRestoreCommand({
appName,
credentials: {
database: backupInput.databaseName,
databaseUser,
},
type: "postgres",
rcloneCommand,
restoreType: "database",
});
emit(`Executing command: ${command}`);
if (serverId) {
const { stdout, stderr } = await execAsyncRemote(serverId, command);
emit(stdout);
emit(stderr);
await execAsyncRemote(serverId, command);
} else {
const { stdout, stderr } = await execAsync(command);
emit(stdout);
emit(stderr);
await execAsync(command);
}
emit("Restore completed successfully!");

View File

@@ -1,33 +1,131 @@
import {
getComposeContainerCommand,
getServiceContainerCommand,
} from "../backups/utils";
export const getPostgresRestoreCommand = (
containerId: string,
database: string,
databaseUser: string,
) => {
return `docker exec -i ${containerId} sh -c "pg_restore -U ${databaseUser} -d ${database} --clean --if-exists"`;
return `docker exec -i $CONTAINER_ID sh -c "pg_restore -U ${databaseUser} -d ${database} --clean --if-exists"`;
};
export const getMariadbRestoreCommand = (
containerId: string,
database: string,
databaseUser: string,
databasePassword: string,
) => {
return `docker exec -i ${containerId} sh -c "mariadb -u ${databaseUser} -p${databasePassword} ${database}"`;
return `docker exec -i $CONTAINER_ID sh -c "mariadb -u ${databaseUser} -p${databasePassword} ${database}"`;
};
export const getMysqlRestoreCommand = (
containerId: string,
database: string,
databasePassword: string,
) => {
return `docker exec -i ${containerId} sh -c "mysql -u root -p${databasePassword} ${database}"`;
return `docker exec -i $CONTAINER_ID sh -c "mysql -u root -p${databasePassword} ${database}"`;
};
export const getMongoRestoreCommand = (
containerId: string,
database: string,
databaseUser: string,
databasePassword: string,
) => {
return `docker exec -i ${containerId} sh -c "mongorestore --username ${databaseUser} --password ${databasePassword} --authenticationDatabase admin --db ${database} --archive"`;
return `docker exec -i $CONTAINER_ID sh -c "mongorestore --username ${databaseUser} --password ${databasePassword} --authenticationDatabase admin --db ${database} --archive"`;
};
export const getComposeSearchCommand = (
appName: string,
type: "stack" | "docker-compose" | "database",
serviceName?: string,
) => {
if (type === "database") {
return getServiceContainerCommand(appName || "");
}
return getComposeContainerCommand(appName || "", serviceName || "", type);
};
interface DatabaseCredentials {
database: string;
databaseUser?: string;
databasePassword?: string;
}
const generateRestoreCommand = (
type: "postgres" | "mariadb" | "mysql" | "mongo",
credentials: DatabaseCredentials,
) => {
const { database, databaseUser, databasePassword } = credentials;
switch (type) {
case "postgres":
return getPostgresRestoreCommand(database, databaseUser || "");
case "mariadb":
return getMariadbRestoreCommand(
database,
databaseUser || "",
databasePassword || "",
);
case "mysql":
return getMysqlRestoreCommand(database, databasePassword || "");
case "mongo":
return getMongoRestoreCommand(
database,
databaseUser || "",
databasePassword || "",
);
}
};
const getMongoSpecificCommand = (
rcloneCommand: string,
restoreCommand: string,
backupFile: string,
): string => {
const tempDir = "/tmp/dokploy-restore";
const fileName = backupFile.split("/").pop() || "backup.dump.gz";
const decompressedName = fileName.replace(".gz", "");
return `
rm -rf ${tempDir} && \
mkdir -p ${tempDir} && \
${rcloneCommand} ${tempDir} && \
cd ${tempDir} && \
gunzip -f "${fileName}" && \
${restoreCommand} < "${decompressedName}" && \
rm -rf ${tempDir}
`;
};
interface RestoreOptions {
appName: string;
type: "postgres" | "mariadb" | "mysql" | "mongo";
restoreType: "stack" | "docker-compose" | "database";
credentials: DatabaseCredentials;
serviceName?: string;
rcloneCommand: string;
backupFile?: string;
}
export const getRestoreCommand = ({
appName,
type,
restoreType,
credentials,
serviceName,
rcloneCommand,
backupFile,
}: RestoreOptions) => {
const containerSearch = getComposeSearchCommand(
appName,
restoreType,
serviceName,
);
const restoreCommand = generateRestoreCommand(type, credentials);
let cmd = `CONTAINER_ID=$(${containerSearch})`;
if (type !== "mongo") {
cmd += ` && ${rcloneCommand} | ${restoreCommand}`;
} else {
cmd += ` && ${getMongoSpecificCommand(rcloneCommand, restoreCommand, backupFile || "")}`;
}
return cmd;
};