mirror of
https://github.com/Dokploy/dokploy
synced 2025-06-26 18:27:59 +00:00
Enhance backup command generation and logging for database backups
- Refactored backup utilities for PostgreSQL, MySQL, MariaDB, and MongoDB to streamline command generation and improve logging. - Introduced a unified `getBackupCommand` function to encapsulate backup command logic, enhancing maintainability. - Improved error handling and logging during backup processes, providing clearer feedback on command execution and success/failure states. - Updated container retrieval methods to return null instead of throwing errors when no containers are found, improving robustness.
This commit is contained in:
parent
557c89ac6d
commit
66dd890448
@ -80,6 +80,7 @@ import {
|
|||||||
Loader2,
|
Loader2,
|
||||||
PlusIcon,
|
PlusIcon,
|
||||||
Search,
|
Search,
|
||||||
|
ServerIcon,
|
||||||
Trash2,
|
Trash2,
|
||||||
X,
|
X,
|
||||||
} from "lucide-react";
|
} from "lucide-react";
|
||||||
@ -968,6 +969,11 @@ const Project = (
|
|||||||
}}
|
}}
|
||||||
className="flex flex-col group relative cursor-pointer bg-transparent transition-colors hover:bg-border"
|
className="flex flex-col group relative cursor-pointer bg-transparent transition-colors hover:bg-border"
|
||||||
>
|
>
|
||||||
|
{service.serverId && (
|
||||||
|
<div className="absolute -left-1 -top-2">
|
||||||
|
<ServerIcon className="size-4 text-muted-foreground" />
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
<div className="absolute -right-1 -top-2">
|
<div className="absolute -right-1 -top-2">
|
||||||
<StatusTooltip status={service.status} />
|
<StatusTooltip status={service.status} />
|
||||||
</div>
|
</div>
|
||||||
|
@ -316,7 +316,7 @@ export const createDeploymentBackup = async (
|
|||||||
|
|
||||||
const command = `
|
const command = `
|
||||||
mkdir -p ${LOGS_PATH}/${backup.appName};
|
mkdir -p ${LOGS_PATH}/${backup.appName};
|
||||||
echo "Initializing backup" >> ${logFilePath};
|
echo "Initializing backup\n" >> ${logFilePath};
|
||||||
`;
|
`;
|
||||||
|
|
||||||
await execAsyncRemote(server.serverId, command);
|
await execAsyncRemote(server.serverId, command);
|
||||||
@ -324,7 +324,7 @@ echo "Initializing backup" >> ${logFilePath};
|
|||||||
await fsPromises.mkdir(path.join(LOGS_PATH, backup.appName), {
|
await fsPromises.mkdir(path.join(LOGS_PATH, backup.appName), {
|
||||||
recursive: true,
|
recursive: true,
|
||||||
});
|
});
|
||||||
await fsPromises.writeFile(logFilePath, "Initializing backup");
|
await fsPromises.writeFile(logFilePath, "Initializing backup\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
const deploymentCreate = await db
|
const deploymentCreate = await db
|
||||||
|
@ -2,21 +2,12 @@ import type { BackupSchedule } from "@dokploy/server/services/backup";
|
|||||||
import type { Compose } from "@dokploy/server/services/compose";
|
import type { Compose } from "@dokploy/server/services/compose";
|
||||||
import { findProjectById } from "@dokploy/server/services/project";
|
import { findProjectById } from "@dokploy/server/services/project";
|
||||||
import { sendDatabaseBackupNotifications } from "../notifications/database-backup";
|
import { sendDatabaseBackupNotifications } from "../notifications/database-backup";
|
||||||
import { execAsyncRemote, execAsyncStream } from "../process/execAsync";
|
import { execAsync, execAsyncRemote } from "../process/execAsync";
|
||||||
import {
|
import { getS3Credentials, normalizeS3Path, getBackupCommand } from "./utils";
|
||||||
getMariadbBackupCommand,
|
|
||||||
getMysqlBackupCommand,
|
|
||||||
getMongoBackupCommand,
|
|
||||||
getPostgresBackupCommand,
|
|
||||||
getS3Credentials,
|
|
||||||
normalizeS3Path,
|
|
||||||
} from "./utils";
|
|
||||||
import {
|
import {
|
||||||
createDeploymentBackup,
|
createDeploymentBackup,
|
||||||
updateDeploymentStatus,
|
updateDeploymentStatus,
|
||||||
} from "@dokploy/server/services/deployment";
|
} from "@dokploy/server/services/deployment";
|
||||||
import { createWriteStream } from "node:fs";
|
|
||||||
import { getComposeContainer } from "../docker/utils";
|
|
||||||
|
|
||||||
export const runComposeBackup = async (
|
export const runComposeBackup = async (
|
||||||
compose: Compose,
|
compose: Compose,
|
||||||
@ -24,7 +15,7 @@ export const runComposeBackup = async (
|
|||||||
) => {
|
) => {
|
||||||
const { projectId, name } = compose;
|
const { projectId, name } = compose;
|
||||||
const project = await findProjectById(projectId);
|
const project = await findProjectById(projectId);
|
||||||
const { prefix, database } = backup;
|
const { prefix } = backup;
|
||||||
const destination = backup.destination;
|
const destination = backup.destination;
|
||||||
const backupFileName = `${new Date().toISOString()}.dump.gz`;
|
const backupFileName = `${new Date().toISOString()}.dump.gz`;
|
||||||
const bucketDestination = `${normalizeS3Path(prefix)}${backupFileName}`;
|
const bucketDestination = `${normalizeS3Path(prefix)}${backupFileName}`;
|
||||||
@ -37,74 +28,17 @@ export const runComposeBackup = async (
|
|||||||
try {
|
try {
|
||||||
const rcloneFlags = getS3Credentials(destination);
|
const rcloneFlags = getS3Credentials(destination);
|
||||||
const rcloneDestination = `:s3:${destination.bucket}/${bucketDestination}`;
|
const rcloneDestination = `:s3:${destination.bucket}/${bucketDestination}`;
|
||||||
|
|
||||||
const { Id: containerId } = await getComposeContainer(
|
|
||||||
compose,
|
|
||||||
backup.serviceName || "",
|
|
||||||
);
|
|
||||||
|
|
||||||
const rcloneCommand = `rclone rcat ${rcloneFlags.join(" ")} "${rcloneDestination}"`;
|
const rcloneCommand = `rclone rcat ${rcloneFlags.join(" ")} "${rcloneDestination}"`;
|
||||||
let backupCommand = "";
|
|
||||||
|
|
||||||
if (backup.databaseType === "postgres") {
|
const backupCommand = getBackupCommand(
|
||||||
backupCommand = getPostgresBackupCommand(
|
backup,
|
||||||
containerId,
|
rcloneCommand,
|
||||||
database,
|
deployment.logPath,
|
||||||
backup.metadata?.postgres?.databaseUser || "",
|
);
|
||||||
);
|
|
||||||
} else if (backup.databaseType === "mariadb") {
|
|
||||||
backupCommand = getMariadbBackupCommand(
|
|
||||||
containerId,
|
|
||||||
database,
|
|
||||||
backup.metadata?.mariadb?.databaseUser || "",
|
|
||||||
backup.metadata?.mariadb?.databasePassword || "",
|
|
||||||
);
|
|
||||||
} else if (backup.databaseType === "mysql") {
|
|
||||||
backupCommand = getMysqlBackupCommand(
|
|
||||||
containerId,
|
|
||||||
database,
|
|
||||||
backup.metadata?.mysql?.databaseRootPassword || "",
|
|
||||||
);
|
|
||||||
} else if (backup.databaseType === "mongo") {
|
|
||||||
backupCommand = getMongoBackupCommand(
|
|
||||||
containerId,
|
|
||||||
database,
|
|
||||||
backup.metadata?.mongo?.databaseUser || "",
|
|
||||||
backup.metadata?.mongo?.databasePassword || "",
|
|
||||||
);
|
|
||||||
}
|
|
||||||
if (compose.serverId) {
|
if (compose.serverId) {
|
||||||
await execAsyncRemote(
|
await execAsyncRemote(compose.serverId, backupCommand);
|
||||||
compose.serverId,
|
|
||||||
`
|
|
||||||
set -e;
|
|
||||||
echo "Running command." >> ${deployment.logPath};
|
|
||||||
export RCLONE_LOG_LEVEL=DEBUG;
|
|
||||||
${backupCommand} | ${rcloneCommand} >> ${deployment.logPath} 2>> ${deployment.logPath} || {
|
|
||||||
echo "❌ Command failed" >> ${deployment.logPath};
|
|
||||||
exit 1;
|
|
||||||
}
|
|
||||||
echo "✅ Command executed successfully" >> ${deployment.logPath};
|
|
||||||
`,
|
|
||||||
);
|
|
||||||
} else {
|
} else {
|
||||||
const writeStream = createWriteStream(deployment.logPath, { flags: "a" });
|
await execAsync(backupCommand);
|
||||||
await execAsyncStream(
|
|
||||||
`${backupCommand} | ${rcloneCommand}`,
|
|
||||||
(data) => {
|
|
||||||
if (writeStream.writable) {
|
|
||||||
writeStream.write(data);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
env: {
|
|
||||||
...process.env,
|
|
||||||
RCLONE_LOG_LEVEL: "DEBUG",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
);
|
|
||||||
writeStream.write("Backup done✅");
|
|
||||||
writeStream.end();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
await sendDatabaseBackupNotifications({
|
await sendDatabaseBackupNotifications({
|
||||||
|
@ -1,27 +1,21 @@
|
|||||||
import type { BackupSchedule } from "@dokploy/server/services/backup";
|
import type { BackupSchedule } from "@dokploy/server/services/backup";
|
||||||
import type { Mariadb } from "@dokploy/server/services/mariadb";
|
import type { Mariadb } from "@dokploy/server/services/mariadb";
|
||||||
import { findProjectById } from "@dokploy/server/services/project";
|
import { findProjectById } from "@dokploy/server/services/project";
|
||||||
import { getServiceContainer } from "../docker/utils";
|
|
||||||
import { sendDatabaseBackupNotifications } from "../notifications/database-backup";
|
import { sendDatabaseBackupNotifications } from "../notifications/database-backup";
|
||||||
import { execAsyncRemote, execAsyncStream } from "../process/execAsync";
|
import { execAsync, execAsyncRemote } from "../process/execAsync";
|
||||||
import {
|
import { getBackupCommand, getS3Credentials, normalizeS3Path } from "./utils";
|
||||||
getMariadbBackupCommand,
|
|
||||||
getS3Credentials,
|
|
||||||
normalizeS3Path,
|
|
||||||
} from "./utils";
|
|
||||||
import {
|
import {
|
||||||
createDeploymentBackup,
|
createDeploymentBackup,
|
||||||
updateDeploymentStatus,
|
updateDeploymentStatus,
|
||||||
} from "@dokploy/server/services/deployment";
|
} from "@dokploy/server/services/deployment";
|
||||||
import { createWriteStream } from "node:fs";
|
|
||||||
|
|
||||||
export const runMariadbBackup = async (
|
export const runMariadbBackup = async (
|
||||||
mariadb: Mariadb,
|
mariadb: Mariadb,
|
||||||
backup: BackupSchedule,
|
backup: BackupSchedule,
|
||||||
) => {
|
) => {
|
||||||
const { appName, databasePassword, databaseUser, projectId, name } = mariadb;
|
const { projectId, name } = mariadb;
|
||||||
const project = await findProjectById(projectId);
|
const project = await findProjectById(projectId);
|
||||||
const { prefix, database } = backup;
|
const { prefix } = backup;
|
||||||
const destination = backup.destination;
|
const destination = backup.destination;
|
||||||
const backupFileName = `${new Date().toISOString()}.sql.gz`;
|
const backupFileName = `${new Date().toISOString()}.sql.gz`;
|
||||||
const bucketDestination = `${normalizeS3Path(prefix)}${backupFileName}`;
|
const bucketDestination = `${normalizeS3Path(prefix)}${backupFileName}`;
|
||||||
@ -33,50 +27,17 @@ export const runMariadbBackup = async (
|
|||||||
try {
|
try {
|
||||||
const rcloneFlags = getS3Credentials(destination);
|
const rcloneFlags = getS3Credentials(destination);
|
||||||
const rcloneDestination = `:s3:${destination.bucket}/${bucketDestination}`;
|
const rcloneDestination = `:s3:${destination.bucket}/${bucketDestination}`;
|
||||||
|
|
||||||
const { Id: containerId } = await getServiceContainer(
|
|
||||||
appName,
|
|
||||||
mariadb.serverId,
|
|
||||||
);
|
|
||||||
|
|
||||||
const rcloneCommand = `rclone rcat ${rcloneFlags.join(" ")} "${rcloneDestination}"`;
|
const rcloneCommand = `rclone rcat ${rcloneFlags.join(" ")} "${rcloneDestination}"`;
|
||||||
|
|
||||||
const command = getMariadbBackupCommand(
|
const backupCommand = getBackupCommand(
|
||||||
containerId,
|
backup,
|
||||||
database,
|
rcloneCommand,
|
||||||
databaseUser,
|
deployment.logPath,
|
||||||
databasePassword || "",
|
|
||||||
);
|
);
|
||||||
if (mariadb.serverId) {
|
if (mariadb.serverId) {
|
||||||
await execAsyncRemote(
|
await execAsyncRemote(mariadb.serverId, backupCommand);
|
||||||
mariadb.serverId,
|
|
||||||
`
|
|
||||||
set -e;
|
|
||||||
echo "Running command." >> ${deployment.logPath};
|
|
||||||
export RCLONE_LOG_LEVEL=DEBUG;
|
|
||||||
${command} | ${rcloneCommand} >> ${deployment.logPath} 2>> ${deployment.logPath} || {
|
|
||||||
echo "❌ Command failed" >> ${deployment.logPath};
|
|
||||||
exit 1;
|
|
||||||
}
|
|
||||||
echo "✅ Command executed successfully" >> ${deployment.logPath};
|
|
||||||
`,
|
|
||||||
);
|
|
||||||
} else {
|
} else {
|
||||||
const writeStream = createWriteStream(deployment.logPath, { flags: "a" });
|
await execAsync(backupCommand);
|
||||||
await execAsyncStream(
|
|
||||||
`${command} | ${rcloneCommand}`,
|
|
||||||
(data) => {
|
|
||||||
if (writeStream.writable) {
|
|
||||||
writeStream.write(data);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
env: {
|
|
||||||
...process.env,
|
|
||||||
RCLONE_LOG_LEVEL: "DEBUG",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
await sendDatabaseBackupNotifications({
|
await sendDatabaseBackupNotifications({
|
||||||
|
@ -1,24 +1,18 @@
|
|||||||
import type { BackupSchedule } from "@dokploy/server/services/backup";
|
import type { BackupSchedule } from "@dokploy/server/services/backup";
|
||||||
import type { Mongo } from "@dokploy/server/services/mongo";
|
import type { Mongo } from "@dokploy/server/services/mongo";
|
||||||
import { findProjectById } from "@dokploy/server/services/project";
|
import { findProjectById } from "@dokploy/server/services/project";
|
||||||
import { getServiceContainer } from "../docker/utils";
|
|
||||||
import { sendDatabaseBackupNotifications } from "../notifications/database-backup";
|
import { sendDatabaseBackupNotifications } from "../notifications/database-backup";
|
||||||
import { execAsyncRemote, execAsyncStream } from "../process/execAsync";
|
import { execAsync, execAsyncRemote } from "../process/execAsync";
|
||||||
import {
|
import { getBackupCommand, getS3Credentials, normalizeS3Path } from "./utils";
|
||||||
getMongoBackupCommand,
|
|
||||||
getS3Credentials,
|
|
||||||
normalizeS3Path,
|
|
||||||
} from "./utils";
|
|
||||||
import {
|
import {
|
||||||
createDeploymentBackup,
|
createDeploymentBackup,
|
||||||
updateDeploymentStatus,
|
updateDeploymentStatus,
|
||||||
} from "@dokploy/server/services/deployment";
|
} from "@dokploy/server/services/deployment";
|
||||||
import { createWriteStream } from "node:fs";
|
|
||||||
|
|
||||||
export const runMongoBackup = async (mongo: Mongo, backup: BackupSchedule) => {
|
export const runMongoBackup = async (mongo: Mongo, backup: BackupSchedule) => {
|
||||||
const { appName, databasePassword, databaseUser, projectId, name } = mongo;
|
const { projectId, name } = mongo;
|
||||||
const project = await findProjectById(projectId);
|
const project = await findProjectById(projectId);
|
||||||
const { prefix, database } = backup;
|
const { prefix } = backup;
|
||||||
const destination = backup.destination;
|
const destination = backup.destination;
|
||||||
const backupFileName = `${new Date().toISOString()}.dump.gz`;
|
const backupFileName = `${new Date().toISOString()}.dump.gz`;
|
||||||
const bucketDestination = `${normalizeS3Path(prefix)}${backupFileName}`;
|
const bucketDestination = `${normalizeS3Path(prefix)}${backupFileName}`;
|
||||||
@ -30,51 +24,18 @@ export const runMongoBackup = async (mongo: Mongo, backup: BackupSchedule) => {
|
|||||||
try {
|
try {
|
||||||
const rcloneFlags = getS3Credentials(destination);
|
const rcloneFlags = getS3Credentials(destination);
|
||||||
const rcloneDestination = `:s3:${destination.bucket}/${bucketDestination}`;
|
const rcloneDestination = `:s3:${destination.bucket}/${bucketDestination}`;
|
||||||
|
|
||||||
const { Id: containerId } = await getServiceContainer(
|
|
||||||
appName,
|
|
||||||
mongo.serverId,
|
|
||||||
);
|
|
||||||
|
|
||||||
const rcloneCommand = `rclone rcat ${rcloneFlags.join(" ")} "${rcloneDestination}"`;
|
const rcloneCommand = `rclone rcat ${rcloneFlags.join(" ")} "${rcloneDestination}"`;
|
||||||
const command = getMongoBackupCommand(
|
|
||||||
containerId,
|
const backupCommand = getBackupCommand(
|
||||||
database,
|
backup,
|
||||||
databaseUser || "",
|
rcloneCommand,
|
||||||
databasePassword || "",
|
deployment.logPath,
|
||||||
);
|
);
|
||||||
|
|
||||||
if (mongo.serverId) {
|
if (mongo.serverId) {
|
||||||
await execAsyncRemote(
|
await execAsyncRemote(mongo.serverId, backupCommand);
|
||||||
mongo.serverId,
|
|
||||||
`
|
|
||||||
set -e;
|
|
||||||
echo "Running command." >> ${deployment.logPath};
|
|
||||||
export RCLONE_LOG_LEVEL=DEBUG;
|
|
||||||
${command} | ${rcloneCommand} >> ${deployment.logPath} 2>> ${deployment.logPath} || {
|
|
||||||
echo "❌ Command failed" >> ${deployment.logPath};
|
|
||||||
exit 1;
|
|
||||||
}
|
|
||||||
echo "✅ Command executed successfully" >> ${deployment.logPath};
|
|
||||||
`,
|
|
||||||
);
|
|
||||||
} else {
|
} else {
|
||||||
const writeStream = createWriteStream(deployment.logPath, { flags: "a" });
|
await execAsync(backupCommand);
|
||||||
await execAsyncStream(
|
|
||||||
`${command} | ${rcloneCommand}`,
|
|
||||||
(data) => {
|
|
||||||
if (writeStream.writable) {
|
|
||||||
writeStream.write(data);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
env: {
|
|
||||||
...process.env,
|
|
||||||
RCLONE_LOG_LEVEL: "DEBUG",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
);
|
|
||||||
writeStream.write("Backup done✅");
|
|
||||||
writeStream.end();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
await sendDatabaseBackupNotifications({
|
await sendDatabaseBackupNotifications({
|
||||||
|
@ -1,24 +1,18 @@
|
|||||||
import type { BackupSchedule } from "@dokploy/server/services/backup";
|
import type { BackupSchedule } from "@dokploy/server/services/backup";
|
||||||
import type { MySql } from "@dokploy/server/services/mysql";
|
import type { MySql } from "@dokploy/server/services/mysql";
|
||||||
import { findProjectById } from "@dokploy/server/services/project";
|
import { findProjectById } from "@dokploy/server/services/project";
|
||||||
import { getServiceContainer } from "../docker/utils";
|
|
||||||
import { sendDatabaseBackupNotifications } from "../notifications/database-backup";
|
import { sendDatabaseBackupNotifications } from "../notifications/database-backup";
|
||||||
import { execAsyncRemote, execAsyncStream } from "../process/execAsync";
|
import { execAsync, execAsyncRemote } from "../process/execAsync";
|
||||||
import {
|
import { getBackupCommand, getS3Credentials, normalizeS3Path } from "./utils";
|
||||||
getMysqlBackupCommand,
|
|
||||||
getS3Credentials,
|
|
||||||
normalizeS3Path,
|
|
||||||
} from "./utils";
|
|
||||||
import { createWriteStream } from "node:fs";
|
|
||||||
import {
|
import {
|
||||||
createDeploymentBackup,
|
createDeploymentBackup,
|
||||||
updateDeploymentStatus,
|
updateDeploymentStatus,
|
||||||
} from "@dokploy/server/services/deployment";
|
} from "@dokploy/server/services/deployment";
|
||||||
|
|
||||||
export const runMySqlBackup = async (mysql: MySql, backup: BackupSchedule) => {
|
export const runMySqlBackup = async (mysql: MySql, backup: BackupSchedule) => {
|
||||||
const { appName, databaseRootPassword, projectId, name } = mysql;
|
const { projectId, name } = mysql;
|
||||||
const project = await findProjectById(projectId);
|
const project = await findProjectById(projectId);
|
||||||
const { prefix, database } = backup;
|
const { prefix } = backup;
|
||||||
const destination = backup.destination;
|
const destination = backup.destination;
|
||||||
const backupFileName = `${new Date().toISOString()}.sql.gz`;
|
const backupFileName = `${new Date().toISOString()}.sql.gz`;
|
||||||
const bucketDestination = `${normalizeS3Path(prefix)}${backupFileName}`;
|
const bucketDestination = `${normalizeS3Path(prefix)}${backupFileName}`;
|
||||||
@ -27,53 +21,23 @@ export const runMySqlBackup = async (mysql: MySql, backup: BackupSchedule) => {
|
|||||||
title: "MySQL Backup",
|
title: "MySQL Backup",
|
||||||
description: "MySQL Backup",
|
description: "MySQL Backup",
|
||||||
});
|
});
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const rcloneFlags = getS3Credentials(destination);
|
const rcloneFlags = getS3Credentials(destination);
|
||||||
const rcloneDestination = `:s3:${destination.bucket}/${bucketDestination}`;
|
const rcloneDestination = `:s3:${destination.bucket}/${bucketDestination}`;
|
||||||
|
|
||||||
const { Id: containerId } = await getServiceContainer(
|
const rcloneCommand = `rclone rcat ${rcloneFlags.join(" ")} "${rcloneDestination}"`;
|
||||||
appName,
|
|
||||||
mysql.serverId,
|
const backupCommand = getBackupCommand(
|
||||||
|
backup,
|
||||||
|
rcloneCommand,
|
||||||
|
deployment.logPath,
|
||||||
);
|
);
|
||||||
|
|
||||||
const rcloneCommand = `rclone rcat ${rcloneFlags.join(" ")} "${rcloneDestination}"`;
|
|
||||||
const command = getMysqlBackupCommand(
|
|
||||||
containerId,
|
|
||||||
database,
|
|
||||||
databaseRootPassword || "",
|
|
||||||
);
|
|
||||||
if (mysql.serverId) {
|
if (mysql.serverId) {
|
||||||
await execAsyncRemote(
|
await execAsyncRemote(mysql.serverId, backupCommand);
|
||||||
mysql.serverId,
|
|
||||||
`
|
|
||||||
set -e;
|
|
||||||
echo "Running command." >> ${deployment.logPath};
|
|
||||||
export RCLONE_LOG_LEVEL=DEBUG;
|
|
||||||
${command} | ${rcloneCommand} >> ${deployment.logPath} 2>> ${deployment.logPath} || {
|
|
||||||
echo "❌ Command failed" >> ${deployment.logPath};
|
|
||||||
exit 1;
|
|
||||||
}
|
|
||||||
echo "✅ Command executed successfully" >> ${deployment.logPath};
|
|
||||||
`,
|
|
||||||
);
|
|
||||||
} else {
|
} else {
|
||||||
const writeStream = createWriteStream(deployment.logPath, { flags: "a" });
|
await execAsync(backupCommand);
|
||||||
await execAsyncStream(
|
|
||||||
`${command} | ${rcloneCommand}`,
|
|
||||||
(data) => {
|
|
||||||
if (writeStream.writable) {
|
|
||||||
writeStream.write(data);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
env: {
|
|
||||||
...process.env,
|
|
||||||
RCLONE_LOG_LEVEL: "DEBUG",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
);
|
|
||||||
writeStream.write("Backup done✅");
|
|
||||||
writeStream.end();
|
|
||||||
}
|
}
|
||||||
await sendDatabaseBackupNotifications({
|
await sendDatabaseBackupNotifications({
|
||||||
applicationName: name,
|
applicationName: name,
|
||||||
|
@ -1,33 +1,27 @@
|
|||||||
import type { BackupSchedule } from "@dokploy/server/services/backup";
|
import type { BackupSchedule } from "@dokploy/server/services/backup";
|
||||||
import type { Postgres } from "@dokploy/server/services/postgres";
|
import type { Postgres } from "@dokploy/server/services/postgres";
|
||||||
import { findProjectById } from "@dokploy/server/services/project";
|
import { findProjectById } from "@dokploy/server/services/project";
|
||||||
import { getServiceContainer } from "../docker/utils";
|
|
||||||
import { sendDatabaseBackupNotifications } from "../notifications/database-backup";
|
import { sendDatabaseBackupNotifications } from "../notifications/database-backup";
|
||||||
import { execAsyncRemote, execAsyncStream } from "../process/execAsync";
|
import { execAsync, execAsyncRemote } from "../process/execAsync";
|
||||||
import {
|
import { getBackupCommand, getS3Credentials, normalizeS3Path } from "./utils";
|
||||||
getPostgresBackupCommand,
|
|
||||||
getS3Credentials,
|
|
||||||
normalizeS3Path,
|
|
||||||
} from "./utils";
|
|
||||||
import {
|
import {
|
||||||
createDeploymentBackup,
|
createDeploymentBackup,
|
||||||
updateDeploymentStatus,
|
updateDeploymentStatus,
|
||||||
} from "@dokploy/server/services/deployment";
|
} from "@dokploy/server/services/deployment";
|
||||||
import { createWriteStream } from "node:fs";
|
|
||||||
|
|
||||||
export const runPostgresBackup = async (
|
export const runPostgresBackup = async (
|
||||||
postgres: Postgres,
|
postgres: Postgres,
|
||||||
backup: BackupSchedule,
|
backup: BackupSchedule,
|
||||||
) => {
|
) => {
|
||||||
const { appName, databaseUser, name, projectId } = postgres;
|
const { name, projectId } = postgres;
|
||||||
const project = await findProjectById(projectId);
|
const project = await findProjectById(projectId);
|
||||||
|
|
||||||
const deployment = await createDeploymentBackup({
|
const deployment = await createDeploymentBackup({
|
||||||
backupId: backup.backupId,
|
backupId: backup.backupId,
|
||||||
title: "Postgres Backup",
|
title: "Initializing Backup",
|
||||||
description: "Postgres Backup",
|
description: "Initializing Backup",
|
||||||
});
|
});
|
||||||
const { prefix, database } = backup;
|
const { prefix } = backup;
|
||||||
const destination = backup.destination;
|
const destination = backup.destination;
|
||||||
const backupFileName = `${new Date().toISOString()}.sql.gz`;
|
const backupFileName = `${new Date().toISOString()}.sql.gz`;
|
||||||
const bucketDestination = `${normalizeS3Path(prefix)}${backupFileName}`;
|
const bucketDestination = `${normalizeS3Path(prefix)}${backupFileName}`;
|
||||||
@ -37,49 +31,15 @@ export const runPostgresBackup = async (
|
|||||||
|
|
||||||
const rcloneCommand = `rclone rcat ${rcloneFlags.join(" ")} "${rcloneDestination}"`;
|
const rcloneCommand = `rclone rcat ${rcloneFlags.join(" ")} "${rcloneDestination}"`;
|
||||||
|
|
||||||
const { Id: containerId } = await getServiceContainer(
|
const backupCommand = getBackupCommand(
|
||||||
appName,
|
backup,
|
||||||
postgres.serverId,
|
rcloneCommand,
|
||||||
|
deployment.logPath,
|
||||||
);
|
);
|
||||||
|
|
||||||
const command = getPostgresBackupCommand(
|
|
||||||
containerId,
|
|
||||||
database,
|
|
||||||
databaseUser || "",
|
|
||||||
);
|
|
||||||
|
|
||||||
if (postgres.serverId) {
|
if (postgres.serverId) {
|
||||||
await execAsyncRemote(
|
await execAsyncRemote(postgres.serverId, backupCommand);
|
||||||
postgres.serverId,
|
|
||||||
`
|
|
||||||
set -e;
|
|
||||||
echo "Running command." >> ${deployment.logPath};
|
|
||||||
export RCLONE_LOG_LEVEL=DEBUG;
|
|
||||||
${command} | ${rcloneCommand} >> ${deployment.logPath} 2>> ${deployment.logPath} || {
|
|
||||||
echo "❌ Command failed" >> ${deployment.logPath};
|
|
||||||
exit 1;
|
|
||||||
}
|
|
||||||
echo "✅ Command executed successfully" >> ${deployment.logPath};
|
|
||||||
`,
|
|
||||||
);
|
|
||||||
} else {
|
} else {
|
||||||
const writeStream = createWriteStream(deployment.logPath, { flags: "a" });
|
await execAsync(backupCommand);
|
||||||
await execAsyncStream(
|
|
||||||
`${command} | ${rcloneCommand}`,
|
|
||||||
(data) => {
|
|
||||||
if (writeStream.writable) {
|
|
||||||
writeStream.write(data);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
env: {
|
|
||||||
...process.env,
|
|
||||||
RCLONE_LOG_LEVEL: "DEBUG",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
);
|
|
||||||
writeStream.write("Backup done✅");
|
|
||||||
writeStream.end();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
await sendDatabaseBackupNotifications({
|
await sendDatabaseBackupNotifications({
|
||||||
|
@ -77,35 +77,179 @@ export const getS3Credentials = (destination: Destination) => {
|
|||||||
};
|
};
|
||||||
|
|
||||||
export const getPostgresBackupCommand = (
|
export const getPostgresBackupCommand = (
|
||||||
containerId: string,
|
|
||||||
database: string,
|
database: string,
|
||||||
databaseUser: string,
|
databaseUser: string,
|
||||||
) => {
|
) => {
|
||||||
return `docker exec ${containerId} sh -c "pg_dump -Fc --no-acl --no-owner -h localhost -U ${databaseUser} --no-password '${database}' | gzip"`;
|
return `docker exec -i $CONTAINER_ID bash -c "set -o pipefail; pg_dump -Fc --no-acl --no-owner -h localhost -U ${databaseUser} --no-password '${database}' | gzip"`;
|
||||||
};
|
};
|
||||||
|
|
||||||
export const getMariadbBackupCommand = (
|
export const getMariadbBackupCommand = (
|
||||||
containerId: string,
|
|
||||||
database: string,
|
database: string,
|
||||||
databaseUser: string,
|
databaseUser: string,
|
||||||
databasePassword: string,
|
databasePassword: string,
|
||||||
) => {
|
) => {
|
||||||
return `docker exec ${containerId} sh -c "mariadb-dump --user='${databaseUser}' --password='${databasePassword}' --databases ${database} | gzip"`;
|
return `docker exec -i $CONTAINER_ID bash -c "set -o pipefail; mariadb-dump --user='${databaseUser}' --password='${databasePassword}' --databases ${database} | gzip"`;
|
||||||
};
|
};
|
||||||
|
|
||||||
export const getMysqlBackupCommand = (
|
export const getMysqlBackupCommand = (
|
||||||
containerId: string,
|
|
||||||
database: string,
|
database: string,
|
||||||
databasePassword: string,
|
databasePassword: string,
|
||||||
) => {
|
) => {
|
||||||
return `docker exec ${containerId} sh -c "mysqldump --default-character-set=utf8mb4 -u 'root' --password='${databasePassword}' --single-transaction --no-tablespaces --quick '${database}' | gzip"`;
|
return `docker exec -i $CONTAINER_ID bash -c "set -o pipefail; mysqldump --default-character-set=utf8mb4 -u 'root' --password='${databasePassword}' --single-transaction --no-tablespaces --quick '${database}' | gzip"`;
|
||||||
};
|
};
|
||||||
|
|
||||||
export const getMongoBackupCommand = (
|
export const getMongoBackupCommand = (
|
||||||
containerId: string,
|
|
||||||
database: string,
|
database: string,
|
||||||
databaseUser: string,
|
databaseUser: string,
|
||||||
databasePassword: string,
|
databasePassword: string,
|
||||||
) => {
|
) => {
|
||||||
return `docker exec ${containerId} sh -c "mongodump -d '${database}' -u '${databaseUser}' -p '${databasePassword}' --archive --authenticationDatabase admin --gzip"`;
|
return `docker exec -i $CONTAINER_ID bash -c "set -o pipefail; mongodump -d '${database}' -u '${databaseUser}' -p '${databasePassword}' --archive --authenticationDatabase admin --gzip"`;
|
||||||
|
};
|
||||||
|
|
||||||
|
const getServiceContainerCommand = (appName: string) => {
|
||||||
|
return `docker ps -q --filter "status=running" --filter "label=com.docker.swarm.service.name=${appName}" | head -n 1`;
|
||||||
|
};
|
||||||
|
|
||||||
|
const getComposeContainerCommand = (
|
||||||
|
appName: string,
|
||||||
|
serviceName: string,
|
||||||
|
composeType: "stack" | "docker-compose" | undefined,
|
||||||
|
) => {
|
||||||
|
if (composeType === "stack") {
|
||||||
|
return `docker ps -q --filter "status=running" --filter "label=com.docker.stack.namespace=${appName}" --filter "label=com.docker.swarm.service.name=${serviceName}" | head -n 1`;
|
||||||
|
}
|
||||||
|
return `docker ps -q --filter "status=running" --filter "label=com.docker.compose.project=${appName}" --filter "label=com.docker.compose.service=${serviceName}" | head -n 1`;
|
||||||
|
};
|
||||||
|
|
||||||
|
const getContainerSearchCommand = (backup: BackupSchedule) => {
|
||||||
|
const { backupType, postgres, mysql, mariadb, mongo, compose, serviceName } =
|
||||||
|
backup;
|
||||||
|
|
||||||
|
if (backupType === "database") {
|
||||||
|
const appName =
|
||||||
|
postgres?.appName || mysql?.appName || mariadb?.appName || mongo?.appName;
|
||||||
|
return getServiceContainerCommand(appName || "");
|
||||||
|
}
|
||||||
|
if (backupType === "compose") {
|
||||||
|
const { appName, composeType } = compose || {};
|
||||||
|
return getComposeContainerCommand(
|
||||||
|
appName || "",
|
||||||
|
serviceName || "",
|
||||||
|
composeType,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
export const generateBackupCommand = (backup: BackupSchedule) => {
|
||||||
|
const { backupType, databaseType } = backup;
|
||||||
|
switch (databaseType) {
|
||||||
|
case "postgres": {
|
||||||
|
const postgres = backup.postgres;
|
||||||
|
if (backupType === "database" && postgres) {
|
||||||
|
return getPostgresBackupCommand(backup.database, postgres.databaseUser);
|
||||||
|
}
|
||||||
|
if (backupType === "compose" && backup.metadata?.postgres) {
|
||||||
|
return getPostgresBackupCommand(
|
||||||
|
backup.database,
|
||||||
|
backup.metadata.postgres.databaseUser,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case "mysql": {
|
||||||
|
const mysql = backup.mysql;
|
||||||
|
if (backupType === "database" && mysql) {
|
||||||
|
return getMysqlBackupCommand(backup.database, mysql.databasePassword);
|
||||||
|
}
|
||||||
|
if (backupType === "compose" && backup.metadata?.mysql) {
|
||||||
|
return getMysqlBackupCommand(
|
||||||
|
backup.database,
|
||||||
|
backup.metadata?.mysql?.databaseRootPassword || "",
|
||||||
|
);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case "mariadb": {
|
||||||
|
const mariadb = backup.mariadb;
|
||||||
|
if (backupType === "database" && mariadb) {
|
||||||
|
return getMariadbBackupCommand(
|
||||||
|
backup.database,
|
||||||
|
mariadb.databaseUser,
|
||||||
|
mariadb.databasePassword,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
if (backupType === "compose" && backup.metadata?.mariadb) {
|
||||||
|
return getMariadbBackupCommand(
|
||||||
|
backup.database,
|
||||||
|
backup.metadata.mariadb.databaseUser,
|
||||||
|
backup.metadata.mariadb.databasePassword,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case "mongo": {
|
||||||
|
const mongo = backup.mongo;
|
||||||
|
if (backupType === "database" && mongo) {
|
||||||
|
return getMongoBackupCommand(
|
||||||
|
backup.database,
|
||||||
|
mongo.databaseUser,
|
||||||
|
mongo.databasePassword,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
if (backupType === "compose" && backup.metadata?.mongo) {
|
||||||
|
return getMongoBackupCommand(
|
||||||
|
backup.database,
|
||||||
|
backup.metadata.mongo.databaseUser,
|
||||||
|
backup.metadata.mongo.databasePassword,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
throw new Error(`Database type not supported: ${databaseType}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
return null;
|
||||||
|
};
|
||||||
|
|
||||||
|
export const getBackupCommand = (
|
||||||
|
backup: BackupSchedule,
|
||||||
|
rcloneCommand: string,
|
||||||
|
logPath: string,
|
||||||
|
) => {
|
||||||
|
const containerSearch = getContainerSearchCommand(backup);
|
||||||
|
const backupCommand = generateBackupCommand(backup);
|
||||||
|
return `
|
||||||
|
set -eo pipefail;
|
||||||
|
echo "[$(date)] Starting backup process..." >> ${logPath};
|
||||||
|
echo "[$(date)] Executing backup command..." >> ${logPath};
|
||||||
|
CONTAINER_ID=$(${containerSearch})
|
||||||
|
|
||||||
|
if [ -z "$CONTAINER_ID" ]; then
|
||||||
|
echo "[$(date)] ❌ Container not found" >> ${logPath};
|
||||||
|
exit 1;
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "[$(date)] Container Up: $CONTAINER_ID" >> ${logPath};
|
||||||
|
|
||||||
|
# Run the backup command and capture the exit status
|
||||||
|
BACKUP_OUTPUT=$(${backupCommand} 2>&1 >/dev/null) || {
|
||||||
|
echo "[$(date)] ❌ backup failed" >> ${logPath};
|
||||||
|
echo "Error: $BACKUP_OUTPUT" >> ${logPath};
|
||||||
|
exit 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
echo "[$(date)] ✅ backup completed successfully" >> ${logPath};
|
||||||
|
echo "[$(date)] Starting upload to S3..." >> ${logPath};
|
||||||
|
|
||||||
|
# Run the upload command and capture the exit status
|
||||||
|
UPLOAD_OUTPUT=$(${backupCommand} | ${rcloneCommand} 2>&1 >/dev/null) || {
|
||||||
|
echo "[$(date)] ❌ Upload to S3 failed" >> ${logPath};
|
||||||
|
echo "Error: $UPLOAD_OUTPUT" >> ${logPath};
|
||||||
|
exit 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
echo "[$(date)] ✅ Upload to S3 completed successfully" >> ${logPath};
|
||||||
|
echo "Backup done ✅" >> ${logPath};
|
||||||
|
`;
|
||||||
};
|
};
|
||||||
|
@ -509,7 +509,7 @@ export const getServiceContainer = async (
|
|||||||
});
|
});
|
||||||
|
|
||||||
if (containers.length === 0 || !containers[0]) {
|
if (containers.length === 0 || !containers[0]) {
|
||||||
throw new Error(`No container found with name: ${appName}`);
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
const container = containers[0];
|
const container = containers[0];
|
||||||
@ -549,7 +549,7 @@ export const getComposeContainer = async (
|
|||||||
});
|
});
|
||||||
|
|
||||||
if (containers.length === 0 || !containers[0]) {
|
if (containers.length === 0 || !containers[0]) {
|
||||||
throw new Error(`No container found with name: ${appName}`);
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
const container = containers[0];
|
const container = containers[0];
|
||||||
|
@ -21,7 +21,6 @@ export const execAsyncStream = (
|
|||||||
|
|
||||||
const childProcess = exec(command, options, (error) => {
|
const childProcess = exec(command, options, (error) => {
|
||||||
if (error) {
|
if (error) {
|
||||||
console.log(error);
|
|
||||||
reject(error);
|
reject(error);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user