Enhance backup and deployment features

- Updated the RestoreBackupSchema to require serviceName for compose backups, improving validation and user feedback.
- Refactored the ShowBackups component to include deployment information, enhancing the user interface and experience.
- Introduced new SQL migration files to add backupId to the deployment table and appName to the backup table, improving data relationships and integrity.
- Enhanced deployment creation logic to support backup deployments, ensuring better tracking and management of backup processes.
- Improved backup and restore utility functions to streamline command execution and error handling during backup operations.
This commit is contained in:
Mauricio Siu 2025-05-03 12:39:52 -06:00
parent 50aeeb2fb8
commit e437903ef8
23 changed files with 11785 additions and 92 deletions

View File

@ -103,7 +103,6 @@ const RestoreBackupSchema = z
.enum(["postgres", "mariadb", "mysql", "mongo", "web-server"]) .enum(["postgres", "mariadb", "mysql", "mongo", "web-server"])
.optional(), .optional(),
backupType: z.enum(["database", "compose"]).default("database"), backupType: z.enum(["database", "compose"]).default("database"),
serviceName: z.string().nullable().optional(),
metadata: z metadata: z
.object({ .object({
postgres: z postgres: z
@ -128,6 +127,7 @@ const RestoreBackupSchema = z
databaseRootPassword: z.string(), databaseRootPassword: z.string(),
}) })
.optional(), .optional(),
serviceName: z.string().optional(),
}) })
.optional(), .optional(),
}) })
@ -139,6 +139,17 @@ const RestoreBackupSchema = z
path: ["databaseType"], path: ["databaseType"],
}); });
} }
console.log(data.backupType, { metadata: data.metadata });
if (data.backupType === "compose" && !data.metadata?.serviceName) {
ctx.addIssue({
code: z.ZodIssueCode.custom,
message: "Service name is required for compose backups",
path: ["metadata", "serviceName"],
});
}
if (data.backupType === "compose" && data.databaseType) { if (data.backupType === "compose" && data.databaseType) {
if (data.databaseType === "postgres") { if (data.databaseType === "postgres") {
if (!data.metadata?.postgres?.databaseUser) { if (!data.metadata?.postgres?.databaseUser) {
@ -217,6 +228,7 @@ export const RestoreBackup = ({
databaseName: databaseType === "web-server" ? "dokploy" : "", databaseName: databaseType === "web-server" ? "dokploy" : "",
databaseType: databaseType:
backupType === "compose" ? ("postgres" as DatabaseType) : databaseType, backupType === "compose" ? ("postgres" as DatabaseType) : databaseType,
backupType: backupType,
metadata: {}, metadata: {},
}, },
resolver: zodResolver(RestoreBackupSchema), resolver: zodResolver(RestoreBackupSchema),
@ -559,7 +571,7 @@ export const RestoreBackup = ({
<FormField <FormField
control={form.control} control={form.control}
name="serviceName" name="metadata.serviceName"
render={({ field }) => ( render={({ field }) => (
<FormItem className="w-full"> <FormItem className="w-full">
<FormLabel>Service Name</FormLabel> <FormLabel>Service Name</FormLabel>

View File

@ -14,7 +14,13 @@ import {
TooltipTrigger, TooltipTrigger,
} from "@/components/ui/tooltip"; } from "@/components/ui/tooltip";
import { api } from "@/utils/api"; import { api } from "@/utils/api";
import { Database, DatabaseBackup, Play, Trash2 } from "lucide-react"; import {
ClipboardList,
Database,
DatabaseBackup,
Play,
Trash2,
} from "lucide-react";
import Link from "next/link"; import Link from "next/link";
import { useState } from "react"; import { useState } from "react";
import { toast } from "sonner"; import { toast } from "sonner";
@ -29,6 +35,7 @@ import {
PostgresqlIcon, PostgresqlIcon,
} from "@/components/icons/data-tools-icons"; } from "@/components/icons/data-tools-icons";
import { AlertBlock } from "@/components/shared/alert-block"; import { AlertBlock } from "@/components/shared/alert-block";
import { ShowSchedulesLogs } from "../../application/schedules/show-schedules-logs";
interface Props { interface Props {
id: string; id: string;
@ -156,6 +163,7 @@ export const ShowBackups = ({
<RestoreBackup <RestoreBackup
id={id} id={id}
databaseType={databaseType} databaseType={databaseType}
backupType={backupType}
serverId={ serverId={
"serverId" in postgres ? postgres.serverId : undefined "serverId" in postgres ? postgres.serverId : undefined
} }
@ -267,6 +275,15 @@ export const ShowBackups = ({
</div> </div>
<div className="flex flex-row md:flex-col gap-1.5"> <div className="flex flex-row md:flex-col gap-1.5">
<ShowSchedulesLogs deployments={backup.deployments}>
<Button
variant="ghost"
size="icon"
className="size-8"
>
<ClipboardList className="size-4 transition-colors " />
</Button>
</ShowSchedulesLogs>
<TooltipProvider delayDuration={0}> <TooltipProvider delayDuration={0}>
<Tooltip> <Tooltip>
<TooltipTrigger asChild> <TooltipTrigger asChild>

View File

@ -0,0 +1,2 @@
ALTER TABLE "deployment" ADD COLUMN "backupId" text;--> statement-breakpoint
ALTER TABLE "deployment" ADD CONSTRAINT "deployment_backupId_backup_backupId_fk" FOREIGN KEY ("backupId") REFERENCES "public"."backup"("backupId") ON DELETE cascade ON UPDATE no action;

View File

@ -0,0 +1,2 @@
ALTER TABLE "backup" ADD COLUMN "appName" text NOT NULL;--> statement-breakpoint
ALTER TABLE "backup" ADD CONSTRAINT "backup_appName_unique" UNIQUE("appName");

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -631,6 +631,20 @@
"when": 1746287354535, "when": 1746287354535,
"tag": "0089_eminent_winter_soldier", "tag": "0089_eminent_winter_soldier",
"breakpoints": true "breakpoints": true
},
{
"idx": 90,
"version": "7",
"when": 1746287994297,
"tag": "0090_lame_gressill",
"breakpoints": true
},
{
"idx": 91,
"version": "7",
"when": 1746289884571,
"tag": "0091_colossal_lifeguard",
"breakpoints": true
} }
] ]
} }

View File

@ -103,6 +103,7 @@ export const userRouter = createTRPCRouter({
backups: { backups: {
with: { with: {
destination: true, destination: true,
deployments: true,
}, },
}, },
apiKeys: true, apiKeys: true,

View File

@ -18,7 +18,8 @@ import { mysql } from "./mysql";
import { postgres } from "./postgres"; import { postgres } from "./postgres";
import { users_temp } from "./user"; import { users_temp } from "./user";
import { compose } from "./compose"; import { compose } from "./compose";
import { deployments } from "./deployment";
import { generateAppName } from ".";
export const databaseType = pgEnum("databaseType", [ export const databaseType = pgEnum("databaseType", [
"postgres", "postgres",
"mariadb", "mariadb",
@ -34,6 +35,10 @@ export const backups = pgTable("backup", {
.notNull() .notNull()
.primaryKey() .primaryKey()
.$defaultFn(() => nanoid()), .$defaultFn(() => nanoid()),
appName: text("appName")
.notNull()
.$defaultFn(() => generateAppName("backup"))
.unique(),
schedule: text("schedule").notNull(), schedule: text("schedule").notNull(),
enabled: boolean("enabled"), enabled: boolean("enabled"),
database: text("database").notNull(), database: text("database").notNull(),
@ -92,7 +97,7 @@ export const backups = pgTable("backup", {
>(), >(),
}); });
export const backupsRelations = relations(backups, ({ one }) => ({ export const backupsRelations = relations(backups, ({ one, many }) => ({
destination: one(destinations, { destination: one(destinations, {
fields: [backups.destinationId], fields: [backups.destinationId],
references: [destinations.destinationId], references: [destinations.destinationId],
@ -121,6 +126,7 @@ export const backupsRelations = relations(backups, ({ one }) => ({
fields: [backups.composeId], fields: [backups.composeId],
references: [compose.composeId], references: [compose.composeId],
}), }),
deployments: many(deployments),
})); }));
const createSchema = createInsertSchema(backups, { const createSchema = createInsertSchema(backups, {

View File

@ -14,6 +14,7 @@ import { compose } from "./compose";
import { previewDeployments } from "./preview-deployments"; import { previewDeployments } from "./preview-deployments";
import { server } from "./server"; import { server } from "./server";
import { schedules } from "./schedule"; import { schedules } from "./schedule";
import { backups } from "./backups";
export const deploymentStatus = pgEnum("deploymentStatus", [ export const deploymentStatus = pgEnum("deploymentStatus", [
"running", "running",
"done", "done",
@ -54,6 +55,9 @@ export const deployments = pgTable("deployment", {
(): AnyPgColumn => schedules.scheduleId, (): AnyPgColumn => schedules.scheduleId,
{ onDelete: "cascade" }, { onDelete: "cascade" },
), ),
backupId: text("backupId").references((): AnyPgColumn => backups.backupId, {
onDelete: "cascade",
}),
}); });
export const deploymentsRelations = relations(deployments, ({ one }) => ({ export const deploymentsRelations = relations(deployments, ({ one }) => ({
@ -77,6 +81,10 @@ export const deploymentsRelations = relations(deployments, ({ one }) => ({
fields: [deployments.scheduleId], fields: [deployments.scheduleId],
references: [schedules.scheduleId], references: [schedules.scheduleId],
}), }),
backup: one(backups, {
fields: [deployments.backupId],
references: [backups.backupId],
}),
})); }));
const schema = createInsertSchema(deployments, { const schema = createInsertSchema(deployments, {
@ -126,6 +134,18 @@ export const apiCreateDeploymentCompose = schema
composeId: z.string().min(1), composeId: z.string().min(1),
}); });
export const apiCreateDeploymentBackup = schema
.pick({
title: true,
status: true,
logPath: true,
backupId: true,
description: true,
})
.extend({
backupId: z.string().min(1),
});
export const apiCreateDeploymentServer = schema export const apiCreateDeploymentServer = schema
.pick({ .pick({
title: true, title: true,

View File

@ -134,6 +134,7 @@ export const findComposeById = async (composeId: string) => {
backups: { backups: {
with: { with: {
destination: true, destination: true,
deployments: true,
}, },
}, },
}, },

View File

@ -4,6 +4,7 @@ import { paths } from "@dokploy/server/constants";
import { db } from "@dokploy/server/db"; import { db } from "@dokploy/server/db";
import { import {
type apiCreateDeployment, type apiCreateDeployment,
type apiCreateDeploymentBackup,
type apiCreateDeploymentCompose, type apiCreateDeploymentCompose,
type apiCreateDeploymentPreview, type apiCreateDeploymentPreview,
type apiCreateDeploymentSchedule, type apiCreateDeploymentSchedule,
@ -29,6 +30,7 @@ import {
updatePreviewDeployment, updatePreviewDeployment,
} from "./preview-deployment"; } from "./preview-deployment";
import { findScheduleById } from "./schedule"; import { findScheduleById } from "./schedule";
import { findBackupById } from "./backup";
export type Deployment = typeof deployments.$inferSelect; export type Deployment = typeof deployments.$inferSelect;
@ -284,6 +286,86 @@ echo "Initializing deployment" >> ${logFilePath};
} }
}; };
export const createDeploymentBackup = async (
deployment: Omit<
typeof apiCreateDeploymentBackup._type,
"deploymentId" | "createdAt" | "status" | "logPath"
>,
) => {
const backup = await findBackupById(deployment.backupId);
let serverId: string | null | undefined;
if (backup.backupType === "database") {
serverId =
backup.postgres?.serverId ||
backup.mariadb?.serverId ||
backup.mysql?.serverId ||
backup.mongo?.serverId;
} else if (backup.backupType === "compose") {
serverId = backup.compose?.serverId;
}
try {
await removeLastTenDeployments(deployment.backupId, "backup", serverId);
const { LOGS_PATH } = paths(!!serverId);
const formattedDateTime = format(new Date(), "yyyy-MM-dd:HH:mm:ss");
const fileName = `${backup.appName}-${formattedDateTime}.log`;
const logFilePath = path.join(LOGS_PATH, backup.appName, fileName);
if (serverId) {
const server = await findServerById(serverId);
const command = `
mkdir -p ${LOGS_PATH}/${backup.appName};
echo "Initializing backup" >> ${logFilePath};
`;
await execAsyncRemote(server.serverId, command);
} else {
await fsPromises.mkdir(path.join(LOGS_PATH, backup.appName), {
recursive: true,
});
await fsPromises.writeFile(logFilePath, "Initializing backup");
}
const deploymentCreate = await db
.insert(deployments)
.values({
backupId: deployment.backupId,
title: deployment.title || "Backup",
description: deployment.description || "",
status: "running",
logPath: logFilePath,
startedAt: new Date().toISOString(),
})
.returning();
if (deploymentCreate.length === 0 || !deploymentCreate[0]) {
throw new TRPCError({
code: "BAD_REQUEST",
message: "Error creating the backup",
});
}
return deploymentCreate[0];
} catch (error) {
await db
.insert(deployments)
.values({
backupId: deployment.backupId,
title: deployment.title || "Backup",
status: "error",
logPath: "",
description: deployment.description || "",
errorMessage: `An error have occured: ${error instanceof Error ? error.message : error}`,
startedAt: new Date().toISOString(),
finishedAt: new Date().toISOString(),
})
.returning();
throw new TRPCError({
code: "BAD_REQUEST",
message: "Error creating the backup",
});
}
};
export const createDeploymentSchedule = async ( export const createDeploymentSchedule = async (
deployment: Omit< deployment: Omit<
typeof apiCreateDeploymentSchedule._type, typeof apiCreateDeploymentSchedule._type,
@ -388,7 +470,13 @@ export const removeDeploymentsByApplicationId = async (
const getDeploymentsByType = async ( const getDeploymentsByType = async (
id: string, id: string,
type: "application" | "compose" | "server" | "schedule" | "previewDeployment", type:
| "application"
| "compose"
| "server"
| "schedule"
| "previewDeployment"
| "backup",
) => { ) => {
const deploymentList = await db.query.deployments.findMany({ const deploymentList = await db.query.deployments.findMany({
where: eq(deployments[`${type}Id`], id), where: eq(deployments[`${type}Id`], id),
@ -411,7 +499,13 @@ export const removeDeployments = async (application: Application) => {
const removeLastTenDeployments = async ( const removeLastTenDeployments = async (
id: string, id: string,
type: "application" | "compose" | "server" | "schedule" | "previewDeployment", type:
| "application"
| "compose"
| "server"
| "schedule"
| "previewDeployment"
| "backup",
serverId?: string | null, serverId?: string | null,
) => { ) => {
const deploymentList = await getDeploymentsByType(id, type); const deploymentList = await getDeploymentsByType(id, type);

View File

@ -63,6 +63,7 @@ export const findMariadbById = async (mariadbId: string) => {
backups: { backups: {
with: { with: {
destination: true, destination: true,
deployments: true,
}, },
}, },
}, },

View File

@ -60,6 +60,7 @@ export const findMongoById = async (mongoId: string) => {
backups: { backups: {
with: { with: {
destination: true, destination: true,
deployments: true,
}, },
}, },
}, },

View File

@ -59,6 +59,7 @@ export const findMySqlById = async (mysqlId: string) => {
backups: { backups: {
with: { with: {
destination: true, destination: true,
deployments: true,
}, },
}, },
}, },

View File

@ -58,6 +58,7 @@ export const findPostgresById = async (postgresId: string) => {
backups: { backups: {
with: { with: {
destination: true, destination: true,
deployments: true,
}, },
}, },
}, },

View File

@ -40,6 +40,8 @@ export const getServiceImageDigest = async () => {
"docker service inspect dokploy --format '{{.Spec.TaskTemplate.ContainerSpec.Image}}'", "docker service inspect dokploy --format '{{.Spec.TaskTemplate.ContainerSpec.Image}}'",
); );
console.log("stdout", stdout);
const currentDigest = stdout.trim().split("@")[1]; const currentDigest = stdout.trim().split("@")[1];
if (!currentDigest) { if (!currentDigest) {

View File

@ -2,8 +2,21 @@ import type { BackupSchedule } from "@dokploy/server/services/backup";
import type { Compose } from "@dokploy/server/services/compose"; import type { Compose } from "@dokploy/server/services/compose";
import { findProjectById } from "@dokploy/server/services/project"; import { findProjectById } from "@dokploy/server/services/project";
import { sendDatabaseBackupNotifications } from "../notifications/database-backup"; import { sendDatabaseBackupNotifications } from "../notifications/database-backup";
import { execAsync, execAsyncRemote } from "../process/execAsync"; import { execAsyncRemote, execAsyncStream } from "../process/execAsync";
import { getS3Credentials, normalizeS3Path } from "./utils"; import {
getMariadbBackupCommand,
getMysqlBackupCommand,
getMongoBackupCommand,
getPostgresBackupCommand,
getS3Credentials,
normalizeS3Path,
} from "./utils";
import {
createDeploymentBackup,
updateDeploymentStatus,
} from "@dokploy/server/services/deployment";
import { createWriteStream } from "node:fs";
import { getComposeContainer } from "../docker/utils";
export const runComposeBackup = async ( export const runComposeBackup = async (
compose: Compose, compose: Compose,
@ -15,56 +28,81 @@ export const runComposeBackup = async (
const destination = backup.destination; const destination = backup.destination;
const backupFileName = `${new Date().toISOString()}.dump.gz`; const backupFileName = `${new Date().toISOString()}.dump.gz`;
const bucketDestination = `${normalizeS3Path(prefix)}${backupFileName}`; const bucketDestination = `${normalizeS3Path(prefix)}${backupFileName}`;
const deployment = await createDeploymentBackup({
backupId: backup.backupId,
title: "Compose Backup",
description: "Compose Backup",
});
try { try {
const rcloneFlags = getS3Credentials(destination); const rcloneFlags = getS3Credentials(destination);
const rcloneDestination = `:s3:${destination.bucket}/${bucketDestination}`; const rcloneDestination = `:s3:${destination.bucket}/${bucketDestination}`;
const { Id: containerId } = await getComposeContainer(
compose,
backup.serviceName || "",
);
const rcloneCommand = `rclone rcat ${rcloneFlags.join(" ")} "${rcloneDestination}"`; const rcloneCommand = `rclone rcat ${rcloneFlags.join(" ")} "${rcloneDestination}"`;
const command = getFindContainerCommand(compose, backup.serviceName || ""); let backupCommand = "";
if (backup.databaseType === "postgres") {
backupCommand = getPostgresBackupCommand(
containerId,
database,
backup.metadata?.postgres?.databaseUser || "",
);
} else if (backup.databaseType === "mariadb") {
backupCommand = getMariadbBackupCommand(
containerId,
database,
backup.metadata?.mariadb?.databaseUser || "",
backup.metadata?.mariadb?.databasePassword || "",
);
} else if (backup.databaseType === "mysql") {
backupCommand = getMysqlBackupCommand(
containerId,
database,
backup.metadata?.mysql?.databaseRootPassword || "",
);
} else if (backup.databaseType === "mongo") {
backupCommand = getMongoBackupCommand(
containerId,
database,
backup.metadata?.mongo?.databaseUser || "",
backup.metadata?.mongo?.databasePassword || "",
);
}
if (compose.serverId) { if (compose.serverId) {
const { stdout } = await execAsyncRemote(compose.serverId, command);
if (!stdout) {
throw new Error("Container not found");
}
const containerId = stdout.trim();
let backupCommand = "";
if (backup.databaseType === "postgres") {
backupCommand = `docker exec ${containerId} sh -c "pg_dump -Fc --no-acl --no-owner -h localhost -U ${backup.metadata?.postgres?.databaseUser} --no-password '${database}' | gzip"`;
} else if (backup.databaseType === "mariadb") {
backupCommand = `docker exec ${containerId} sh -c "mariadb-dump --user='${backup.metadata?.mariadb?.databaseUser}' --password='${backup.metadata?.mariadb?.databasePassword}' --databases ${database} | gzip"`;
} else if (backup.databaseType === "mysql") {
backupCommand = `docker exec ${containerId} sh -c "mysqldump --default-character-set=utf8mb4 -u 'root' --password='${backup.metadata?.mysql?.databaseRootPassword}' --single-transaction --no-tablespaces --quick '${database}' | gzip"`;
} else if (backup.databaseType === "mongo") {
backupCommand = `docker exec ${containerId} sh -c "mongodump -d '${database}' -u '${backup.metadata?.mongo?.databaseUser}' -p '${backup.metadata?.mongo?.databasePassword}' --archive --authenticationDatabase admin --gzip"`;
}
await execAsyncRemote( await execAsyncRemote(
compose.serverId, compose.serverId,
`${backupCommand} | ${rcloneCommand}`, `
set -e;
Running command.
${backupCommand} | ${rcloneCommand} >> ${deployment.logPath} 2>> ${deployment.logPath} || {
echo "❌ Command failed" >> ${deployment.logPath};
exit 1;
}
echo "✅ Command executed successfully" >> ${deployment.logPath};
`,
); );
} else { } else {
const { stdout } = await execAsync(command); const writeStream = createWriteStream(deployment.logPath, { flags: "a" });
if (!stdout) { await execAsyncStream(
throw new Error("Container not found"); `${backupCommand} | ${rcloneCommand}`,
} (data) => {
const containerId = stdout.trim(); if (writeStream.write(data)) {
console.log(data);
let backupCommand = ""; }
},
if (backup.databaseType === "postgres") { {
backupCommand = `docker exec ${containerId} sh -c "pg_dump -Fc --no-acl --no-owner -h localhost -U ${backup.metadata?.postgres?.databaseUser} --no-password '${database}' | gzip"`; env: {
} else if (backup.databaseType === "mariadb") { ...process.env,
backupCommand = `docker exec ${containerId} sh -c "mariadb-dump --user='${backup.metadata?.mariadb?.databaseUser}' --password='${backup.metadata?.mariadb?.databasePassword}' --databases ${database} | gzip"`; RCLONE_LOG_LEVEL: "DEBUG",
} else if (backup.databaseType === "mysql") { },
backupCommand = `docker exec ${containerId} sh -c "mysqldump --default-character-set=utf8mb4 -u 'root' --password='${backup.metadata?.mysql?.databaseRootPassword}' --single-transaction --no-tablespaces --quick '${database}' | gzip"`; },
} else if (backup.databaseType === "mongo") { );
backupCommand = `docker exec ${containerId} sh -c "mongodump -d '${database}' -u '${backup.metadata?.mongo?.databaseUser}' -p '${backup.metadata?.mongo?.databasePassword}' --archive --authenticationDatabase admin --gzip"`; writeStream.write("Backup done✅");
} writeStream.end();
await execAsync(`${backupCommand} | ${rcloneCommand}`);
} }
await sendDatabaseBackupNotifications({ await sendDatabaseBackupNotifications({
@ -74,6 +112,8 @@ export const runComposeBackup = async (
type: "success", type: "success",
organizationId: project.organizationId, organizationId: project.organizationId,
}); });
await updateDeploymentStatus(deployment.deploymentId, "done");
} catch (error) { } catch (error) {
console.log(error); console.log(error);
await sendDatabaseBackupNotifications({ await sendDatabaseBackupNotifications({
@ -85,29 +125,8 @@ export const runComposeBackup = async (
errorMessage: error?.message || "Error message not provided", errorMessage: error?.message || "Error message not provided",
organizationId: project.organizationId, organizationId: project.organizationId,
}); });
await updateDeploymentStatus(deployment.deploymentId, "error");
throw error; throw error;
} }
}; };
export const getFindContainerCommand = (
compose: Compose,
serviceName: string,
) => {
const { appName, composeType } = compose;
const labels =
composeType === "stack"
? {
namespace: `label=com.docker.stack.namespace=${appName}`,
service: `label=com.docker.swarm.service.name=${appName}_${serviceName}`,
}
: {
project: `label=com.docker.compose.project=${appName}`,
service: `label=com.docker.compose.service=${serviceName}`,
};
const command = `docker ps --filter "status=running" \
--filter "${Object.values(labels).join('" --filter "')}" \
--format "{{.ID}}" | head -n 1`;
return command.trim();
};

View File

@ -8,6 +8,7 @@ import {
import { sendDatabaseBackupNotifications } from "../notifications/database-backup"; import { sendDatabaseBackupNotifications } from "../notifications/database-backup";
import { execAsync, execAsyncRemote } from "../process/execAsync"; import { execAsync, execAsyncRemote } from "../process/execAsync";
import { getS3Credentials, normalizeS3Path } from "./utils"; import { getS3Credentials, normalizeS3Path } from "./utils";
import { createDeploymentBackup } from "@dokploy/server/services/deployment";
export const runPostgresBackup = async ( export const runPostgresBackup = async (
postgres: Postgres, postgres: Postgres,
@ -16,6 +17,11 @@ export const runPostgresBackup = async (
const { appName, databaseUser, name, projectId } = postgres; const { appName, databaseUser, name, projectId } = postgres;
const project = await findProjectById(projectId); const project = await findProjectById(projectId);
const deployment = await createDeploymentBackup({
backupId: backup.backupId,
title: "Postgres Backup",
description: "Postgres Backup",
});
const { prefix, database } = backup; const { prefix, database } = backup;
const destination = backup.destination; const destination = backup.destination;
const backupFileName = `${new Date().toISOString()}.sql.gz`; const backupFileName = `${new Date().toISOString()}.sql.gz`;
@ -40,7 +46,11 @@ export const runPostgresBackup = async (
const { Id: containerId } = await getServiceContainer(appName); const { Id: containerId } = await getServiceContainer(appName);
const pgDumpCommand = `docker exec ${containerId} sh -c "pg_dump -Fc --no-acl --no-owner -h localhost -U ${databaseUser} --no-password '${database}' | gzip"`; const pgDumpCommand = `docker exec ${containerId} sh -c "pg_dump -Fc --no-acl --no-owner -h localhost -U ${databaseUser} --no-password '${database}' | gzip"`;
await execAsync(`${pgDumpCommand} | ${rcloneCommand}`);
await execAsync(`${pgDumpCommand} | ${rcloneCommand}`, (data) => {
console.log(data);
});
// await execAsync(`${pgDumpCommand} | ${rcloneCommand}`);
} }
await sendDatabaseBackupNotifications({ await sendDatabaseBackupNotifications({

View File

@ -75,3 +75,37 @@ export const getS3Credentials = (destination: Destination) => {
return rcloneFlags; return rcloneFlags;
}; };
export const getPostgresBackupCommand = (
containerId: string,
database: string,
databaseUser: string,
) => {
return `docker exec ${containerId} sh -c "pg_dump -Fc --no-acl --no-owner -h localhost -U ${databaseUser} --no-password '${database}' | gzip"`;
};
export const getMariadbBackupCommand = (
containerId: string,
database: string,
databaseUser: string,
databasePassword: string,
) => {
return `docker exec ${containerId} sh -c "mariadb-dump --user='${databaseUser}' --password='${databasePassword}' --databases ${database} | gzip"`;
};
export const getMysqlBackupCommand = (
containerId: string,
database: string,
databasePassword: string,
) => {
return `docker exec ${containerId} sh -c "mysqldump --default-character-set=utf8mb4 -u 'root' --password='${databasePassword}' --single-transaction --no-tablespaces --quick '${database}' | gzip"`;
};
export const getMongoBackupCommand = (
containerId: string,
database: string,
databaseUser: string,
databasePassword: string,
) => {
return `docker exec ${containerId} sh -c "mongodump -d '${database}' -u '${databaseUser}' -p '${databasePassword}' --archive --authenticationDatabase admin --gzip"`;
};

View File

@ -5,6 +5,52 @@ import { Client } from "ssh2";
export const execAsync = util.promisify(exec); export const execAsync = util.promisify(exec);
interface ExecOptions {
cwd?: string;
env?: NodeJS.ProcessEnv;
}
export const execAsyncStream = (
command: string,
onData?: (data: string) => void,
options: ExecOptions = {},
): Promise<{ stdout: string; stderr: string }> => {
return new Promise((resolve, reject) => {
let stdoutComplete = "";
let stderrComplete = "";
const childProcess = exec(command, options, (error) => {
if (error) {
console.log(error);
reject(error);
return;
}
resolve({ stdout: stdoutComplete, stderr: stderrComplete });
});
childProcess.stdout?.on("data", (data: Buffer | string) => {
const stringData = data.toString();
stdoutComplete += stringData;
if (onData) {
onData(stringData);
}
});
childProcess.stderr?.on("data", (data: Buffer | string) => {
const stringData = data.toString();
stderrComplete += stringData;
if (onData) {
onData(stringData);
}
});
childProcess.on("error", (error) => {
console.log(error);
reject(error);
});
});
};
export const execFileAsync = async ( export const execFileAsync = async (
command: string, command: string,
args: string[], args: string[],

View File

@ -3,7 +3,13 @@ import type { Compose } from "@dokploy/server/services/compose";
import { getS3Credentials } from "../backups/utils"; import { getS3Credentials } from "../backups/utils";
import { execAsync, execAsyncRemote } from "../process/execAsync"; import { execAsync, execAsyncRemote } from "../process/execAsync";
import type { Backup } from "@dokploy/server/services/backup"; import type { Backup } from "@dokploy/server/services/backup";
import { getFindContainerCommand } from "../backups/compose"; import { getComposeContainer } from "../docker/utils";
import {
getMariadbRestoreCommand,
getMongoRestoreCommand,
getMysqlRestoreCommand,
getPostgresRestoreCommand,
} from "./utils";
export const restoreComposeBackup = async ( export const restoreComposeBackup = async (
compose: Compose, compose: Compose,
@ -20,31 +26,21 @@ export const restoreComposeBackup = async (
const bucketPath = `:s3:${destination.bucket}`; const bucketPath = `:s3:${destination.bucket}`;
const backupPath = `${bucketPath}/${backupFile}`; const backupPath = `${bucketPath}/${backupFile}`;
const command = getFindContainerCommand(compose, metadata.serviceName); const { Id: containerId } = await getComposeContainer(
compose,
let containerId = ""; metadata.serviceName || "",
if (serverId) { );
const { stdout, stderr } = await execAsyncRemote(serverId, command);
emit(stdout);
emit(stderr);
containerId = stdout.trim();
} else {
const { stdout, stderr } = await execAsync(command);
emit(stdout);
emit(stderr);
containerId = stdout.trim();
}
let restoreCommand = ""; let restoreCommand = "";
if (metadata.postgres) { if (metadata.postgres) {
restoreCommand = `rclone cat ${rcloneFlags.join(" ")} "${backupPath}" | gunzip | docker exec -i ${containerId} pg_restore -U ${metadata.postgres.databaseUser} -d ${database} --clean --if-exists`; restoreCommand = `rclone cat ${rcloneFlags.join(" ")} "${backupPath}" | gunzip | ${getPostgresRestoreCommand(containerId, database, metadata.postgres.databaseUser)}`;
} else if (metadata.mariadb) { } else if (metadata.mariadb) {
restoreCommand = ` restoreCommand = `
rclone cat ${rcloneFlags.join(" ")} "${backupPath}" | gunzip | docker exec -i ${containerId} mariadb -u ${metadata.mariadb.databaseUser} -p${metadata.mariadb.databasePassword} ${database} rclone cat ${rcloneFlags.join(" ")} "${backupPath}" | gunzip | ${getMariadbRestoreCommand(containerId, database, metadata.mariadb.databaseUser, metadata.mariadb.databasePassword)}
`; `;
} else if (metadata.mysql) { } else if (metadata.mysql) {
restoreCommand = ` restoreCommand = `
rclone cat ${rcloneFlags.join(" ")} "${backupPath}" | gunzip | docker exec -i ${containerId} mysql -u root -p${metadata.mysql.databaseRootPassword} ${database} rclone cat ${rcloneFlags.join(" ")} "${backupPath}" | gunzip | ${getMysqlRestoreCommand(containerId, database, metadata.mysql.databaseRootPassword)}
`; `;
} else if (metadata.mongo) { } else if (metadata.mongo) {
const tempDir = "/tmp/dokploy-restore"; const tempDir = "/tmp/dokploy-restore";
@ -56,7 +52,7 @@ export const restoreComposeBackup = async (
rclone copy ${rcloneFlags.join(" ")} "${backupPath}" ${tempDir} && \ rclone copy ${rcloneFlags.join(" ")} "${backupPath}" ${tempDir} && \
cd ${tempDir} && \ cd ${tempDir} && \
gunzip -f "${fileName}" && \ gunzip -f "${fileName}" && \
docker exec -i ${containerId} mongorestore --username ${metadata.mongo.databaseUser} --password ${metadata.mongo.databasePassword} --authenticationDatabase admin --db ${database} --archive < "${decompressedName}" && \ ${getMongoRestoreCommand(containerId, database, metadata.mongo.databaseUser, metadata.mongo.databasePassword)} < "${decompressedName}" && \
rm -rf ${tempDir}`; rm -rf ${tempDir}`;
} }

View File

@ -0,0 +1,33 @@
export const getPostgresRestoreCommand = (
containerId: string,
database: string,
databaseUser: string,
) => {
return `docker exec -i ${containerId} sh -c "pg_restore -U ${databaseUser} -d ${database} --clean --if-exists"`;
};
export const getMariadbRestoreCommand = (
containerId: string,
database: string,
databaseUser: string,
databasePassword: string,
) => {
return `docker exec -i ${containerId} sh -c "mariadb -u ${databaseUser} -p${databasePassword} ${database}"`;
};
export const getMysqlRestoreCommand = (
containerId: string,
database: string,
databasePassword: string,
) => {
return `docker exec -i ${containerId} sh -c "mysql -u root -p${databasePassword} ${database}"`;
};
export const getMongoRestoreCommand = (
containerId: string,
database: string,
databaseUser: string,
databasePassword: string,
) => {
return `docker exec -i ${containerId} sh -c "mongorestore --username ${databaseUser} --password ${databasePassword} --authenticationDatabase admin --db ${database} --archive"`;
};