mirror of
https://github.com/Dokploy/dokploy
synced 2025-06-26 18:27:59 +00:00
Update restore-backup component and backup router for web server support: set default database name based on type, disable input for web server, and streamline backup restoration process with improved logging and error handling.
This commit is contained in:
parent
ff3e067866
commit
297439a348
@ -91,7 +91,7 @@ export const RestoreBackup = ({
|
||||
defaultValues: {
|
||||
destinationId: "",
|
||||
backupFile: "",
|
||||
databaseName: "",
|
||||
databaseName: databaseType === "web-server" ? "dokploy" : "",
|
||||
},
|
||||
resolver: zodResolver(RestoreBackupSchema),
|
||||
});
|
||||
@ -340,7 +340,11 @@ export const RestoreBackup = ({
|
||||
<FormItem className="">
|
||||
<FormLabel>Database Name</FormLabel>
|
||||
<FormControl>
|
||||
<Input {...field} placeholder="Enter database name" />
|
||||
<Input
|
||||
disabled={databaseType === "web-server"}
|
||||
{...field}
|
||||
placeholder="Enter database name"
|
||||
/>
|
||||
</FormControl>
|
||||
<FormMessage />
|
||||
</FormItem>
|
||||
|
@ -19,13 +19,13 @@ import {
|
||||
findPostgresByBackupId,
|
||||
findPostgresById,
|
||||
findServerById,
|
||||
paths,
|
||||
removeBackupById,
|
||||
removeScheduleBackup,
|
||||
runMariadbBackup,
|
||||
runMongoBackup,
|
||||
runMySqlBackup,
|
||||
runPostgresBackup,
|
||||
runWebServerBackup,
|
||||
scheduleBackup,
|
||||
updateBackupById,
|
||||
} from "@dokploy/server";
|
||||
@ -41,6 +41,7 @@ import {
|
||||
restoreMongoBackup,
|
||||
restoreMySqlBackup,
|
||||
restorePostgresBackup,
|
||||
restoreWebServerBackup,
|
||||
} from "@dokploy/server/utils/restore";
|
||||
import { TRPCError } from "@trpc/server";
|
||||
import { observable } from "@trpc/server/observable";
|
||||
@ -235,50 +236,9 @@ export const backupRouter = createTRPCRouter({
|
||||
manualBackupWebServer: protectedProcedure
|
||||
.input(apiFindOneBackup)
|
||||
.mutation(async ({ input }) => {
|
||||
try {
|
||||
const backup = await findBackupById(input.backupId);
|
||||
const destination = await findDestinationById(backup.destinationId);
|
||||
const rcloneFlags = getS3Credentials(destination);
|
||||
const timestamp = new Date().toISOString().replace(/[:.]/g, "-");
|
||||
const { BASE_PATH } = paths();
|
||||
const tempDir = `${BASE_PATH}/temp-backup-${timestamp}`;
|
||||
const backupFileName = `webserver-backup-${timestamp}.zip`;
|
||||
const s3Path = `:s3:${destination.bucket}/${backup.prefix}${backupFileName}`;
|
||||
|
||||
try {
|
||||
await execAsync(`mkdir -p ${tempDir}/filesystem`);
|
||||
|
||||
const postgresCommand = `docker exec $(docker ps --filter "name=dokploy-postgres" -q) pg_dump -v -Fc -U dokploy -d dokploy > ${tempDir}/database.sql`;
|
||||
await execAsync(postgresCommand);
|
||||
|
||||
await execAsync(`cp -r ${BASE_PATH}/* ${tempDir}/filesystem/`);
|
||||
|
||||
await execAsync(
|
||||
`cd ${tempDir} && zip -r ${backupFileName} database.sql filesystem/`,
|
||||
);
|
||||
|
||||
// // Show zip contents and size
|
||||
// console.log("Zip file contents:");
|
||||
// await execAsync(`unzip -l ${tempDir}/${backupFileName}`);
|
||||
// await execAsync(`du -sh ${tempDir}/${backupFileName}`);
|
||||
|
||||
// Upload to S3
|
||||
const uploadCommand = `rclone copyto ${rcloneFlags.join(" ")} "${tempDir}/${backupFileName}" "${s3Path}"`;
|
||||
await execAsync(uploadCommand);
|
||||
return true;
|
||||
} finally {
|
||||
// Cleanup temporary files
|
||||
console.log("Cleaning up temporary files...");
|
||||
await execAsync(`rm -rf ${tempDir}`);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("Backup error:", error);
|
||||
throw new TRPCError({
|
||||
code: "BAD_REQUEST",
|
||||
message: "Error running manual Web Server backup",
|
||||
cause: error,
|
||||
});
|
||||
}
|
||||
const backup = await findBackupById(input.backupId);
|
||||
await runWebServerBackup(backup);
|
||||
return true;
|
||||
}),
|
||||
listBackupFiles: protectedProcedure
|
||||
.input(
|
||||
@ -424,16 +384,12 @@ export const backupRouter = createTRPCRouter({
|
||||
},
|
||||
);
|
||||
});
|
||||
} else if (input.databaseType === "web-server") {
|
||||
}
|
||||
if (input.databaseType === "web-server") {
|
||||
return observable<string>((emit) => {
|
||||
restoreWebServerBackup(
|
||||
webServer,
|
||||
destination,
|
||||
input.backupFile,
|
||||
(log) => {
|
||||
emit.next(log);
|
||||
},
|
||||
);
|
||||
restoreWebServerBackup(destination, input.backupFile, (log) => {
|
||||
emit.next(log);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -48,6 +48,7 @@ export * from "./utils/backups/mongo";
|
||||
export * from "./utils/backups/mysql";
|
||||
export * from "./utils/backups/postgres";
|
||||
export * from "./utils/backups/utils";
|
||||
export * from "./utils/backups/web-server";
|
||||
export * from "./templates/processors";
|
||||
|
||||
export * from "./utils/notifications/build-error";
|
||||
|
@ -2,3 +2,4 @@ export { restorePostgresBackup } from "./postgres";
|
||||
export { restoreMySqlBackup } from "./mysql";
|
||||
export { restoreMariadbBackup } from "./mariadb";
|
||||
export { restoreMongoBackup } from "./mongo";
|
||||
export { restoreWebServerBackup } from "./web-server";
|
||||
|
@ -1,10 +1,7 @@
|
||||
import type { Destination } from "@dokploy/server/services/destination";
|
||||
import { getS3Credentials } from "../backups/utils";
|
||||
import {
|
||||
getRemoteServiceContainer,
|
||||
getServiceContainer,
|
||||
} from "../docker/utils";
|
||||
import { execAsync, execAsyncRemote } from "../process/execAsync";
|
||||
import { execAsync } from "../process/execAsync";
|
||||
import { paths } from "@dokploy/server";
|
||||
|
||||
export const restoreWebServerBackup = async (
|
||||
destination: Destination,
|
||||
@ -12,46 +9,117 @@ export const restoreWebServerBackup = async (
|
||||
emit: (log: string) => void,
|
||||
) => {
|
||||
try {
|
||||
const { appName, databaseUser, serverId } = postgres;
|
||||
|
||||
const rcloneFlags = getS3Credentials(destination);
|
||||
const bucketPath = `:s3:${destination.bucket}`;
|
||||
|
||||
const backupPath = `${bucketPath}/${backupFile}`;
|
||||
const { BASE_PATH } = paths();
|
||||
const tempDir = `${BASE_PATH}/temp-restore-${new Date().toISOString().replace(/[:.]/g, "-")}`;
|
||||
|
||||
const { Id: containerName } = serverId
|
||||
? await getRemoteServiceContainer(serverId, appName)
|
||||
: await getServiceContainer(appName);
|
||||
try {
|
||||
emit("Starting restore...");
|
||||
emit(`Backup path: ${backupPath}`);
|
||||
emit(`Temp directory: ${tempDir}`);
|
||||
|
||||
emit("Starting restore...");
|
||||
emit(`Backup path: ${backupPath}`);
|
||||
// Create temp directory
|
||||
emit("Creating temporary directory...");
|
||||
await execAsync(`mkdir -p ${tempDir}`);
|
||||
|
||||
const command = `\
|
||||
rclone cat ${rcloneFlags.join(" ")} "${backupPath}" | gunzip | docker exec -i ${containerName} pg_restore -U ${databaseUser} -d ${database} --clean --if-exists`;
|
||||
// Download backup from S3
|
||||
emit("Downloading backup from S3...");
|
||||
await execAsync(
|
||||
`rclone copyto ${rcloneFlags.join(" ")} "${backupPath}" "${tempDir}/${backupFile}"`,
|
||||
);
|
||||
|
||||
emit(`Executing command: ${command}`);
|
||||
// List files before extraction
|
||||
emit("Listing files before extraction...");
|
||||
const { stdout: beforeFiles } = await execAsync(`ls -la ${tempDir}`);
|
||||
emit(`Files before extraction: ${beforeFiles}`);
|
||||
|
||||
if (serverId) {
|
||||
const { stdout, stderr } = await execAsyncRemote(serverId, command);
|
||||
emit(stdout);
|
||||
emit(stderr);
|
||||
} else {
|
||||
const { stdout, stderr } = await execAsync(command);
|
||||
console.log("stdout", stdout);
|
||||
console.log("stderr", stderr);
|
||||
emit(stdout);
|
||||
emit(stderr);
|
||||
// Extract backup
|
||||
emit("Extracting backup...");
|
||||
await execAsync(`cd ${tempDir} && unzip ${backupFile}`);
|
||||
|
||||
// Check if database.sql.gz exists and decompress it
|
||||
const { stdout: hasGzFile } = await execAsync(
|
||||
`ls ${tempDir}/database.sql.gz || true`,
|
||||
);
|
||||
if (hasGzFile.includes("database.sql.gz")) {
|
||||
emit("Found compressed database file, decompressing...");
|
||||
await execAsync(`cd ${tempDir} && gunzip database.sql.gz`);
|
||||
}
|
||||
|
||||
// Verify database file exists
|
||||
const { stdout: hasSqlFile } = await execAsync(
|
||||
`ls ${tempDir}/database.sql || true`,
|
||||
);
|
||||
if (!hasSqlFile.includes("database.sql")) {
|
||||
throw new Error("Database file not found after extraction");
|
||||
}
|
||||
|
||||
// Restore database
|
||||
emit("Restoring database...");
|
||||
|
||||
// Drop and recreate database
|
||||
emit("Disconnecting all users from database...");
|
||||
await execAsync(
|
||||
`docker exec $(docker ps --filter "name=dokploy-postgres" -q) psql -U dokploy postgres -c "SELECT pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity WHERE pg_stat_activity.datname = 'dokploy' AND pid <> pg_backend_pid();"`,
|
||||
);
|
||||
|
||||
emit("Dropping existing database...");
|
||||
await execAsync(
|
||||
`docker exec $(docker ps --filter "name=dokploy-postgres" -q) psql -U dokploy postgres -c "DROP DATABASE IF EXISTS dokploy;"`,
|
||||
);
|
||||
|
||||
emit("Creating fresh database...");
|
||||
await execAsync(
|
||||
`docker exec $(docker ps --filter "name=dokploy-postgres" -q) psql -U dokploy postgres -c "CREATE DATABASE dokploy;"`,
|
||||
);
|
||||
|
||||
// Copy the backup file into the container
|
||||
emit("Copying backup file into container...");
|
||||
await execAsync(
|
||||
`docker cp ${tempDir}/database.sql $(docker ps --filter "name=dokploy-postgres" -q):/tmp/database.sql`,
|
||||
);
|
||||
|
||||
// Verify file in container
|
||||
emit("Verifying file in container...");
|
||||
await execAsync(
|
||||
`docker exec $(docker ps --filter "name=dokploy-postgres" -q) ls -l /tmp/database.sql`,
|
||||
);
|
||||
|
||||
// Restore from the copied file
|
||||
emit("Running database restore...");
|
||||
await execAsync(
|
||||
`docker exec $(docker ps --filter "name=dokploy-postgres" -q) pg_restore -v -U dokploy -d dokploy /tmp/database.sql`,
|
||||
);
|
||||
|
||||
// Cleanup the temporary file in the container
|
||||
emit("Cleaning up container temp file...");
|
||||
await execAsync(
|
||||
`docker exec $(docker ps --filter "name=dokploy-postgres" -q) rm /tmp/database.sql`,
|
||||
);
|
||||
|
||||
// Restore filesystem
|
||||
emit("Restoring filesystem...");
|
||||
await execAsync(`cp -r ${tempDir}/filesystem/* ${BASE_PATH}/`);
|
||||
|
||||
emit("Restore completed successfully!");
|
||||
} finally {
|
||||
// Cleanup
|
||||
emit("Cleaning up temporary files...");
|
||||
await execAsync(`rm -rf ${tempDir}`);
|
||||
}
|
||||
|
||||
emit("Restore completed successfully!");
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
emit(
|
||||
`Error: ${
|
||||
error instanceof Error
|
||||
? error.message
|
||||
: "Error restoring postgres backup"
|
||||
: "Error restoring web server backup"
|
||||
}`,
|
||||
);
|
||||
throw error;
|
||||
}
|
||||
};
|
||||
// docker exec $(docker ps --filter "name=dokploy-postgres" -q) pg_restore -v -U dokploy -d dokploy /Users/mauricio/Documents/Github/Personal/dokploy/apps/dokploy/.docker/temp-restore-2025-03-30T01-09-27-203Z/database.sql
|
||||
// server/webserver-backup-2025-03-30T00-38-08-836Z.zip
|
||||
|
Loading…
Reference in New Issue
Block a user