feat(multi-server): add backups remote server

This commit is contained in:
Mauricio Siu
2024-09-19 21:20:25 -06:00
parent 54f855e738
commit 6159786dfe
6 changed files with 239 additions and 42 deletions

View File

@@ -5,9 +5,18 @@ import {
apiRemoveBackup,
apiUpdateBackup,
} from "@/server/db/schema";
import { runMariadbBackup } from "@/server/utils/backups/mariadb";
import { runMongoBackup } from "@/server/utils/backups/mongo";
import { runMySqlBackup } from "@/server/utils/backups/mysql";
import {
runMariadbBackup,
runRemoteMariadbBackup,
} from "@/server/utils/backups/mariadb";
import {
runMongoBackup,
runRemoteMongoBackup,
} from "@/server/utils/backups/mongo";
import {
runMySqlBackup,
runRemoteMySqlBackup,
} from "@/server/utils/backups/mysql";
import {
runPostgresBackup,
runRemotePostgresBackup,
@@ -116,7 +125,11 @@ export const backupRouter = createTRPCRouter({
try {
const backup = await findBackupById(input.backupId);
const mysql = await findMySqlByBackupId(backup.backupId);
await runMySqlBackup(mysql, backup);
if (mysql.serverId) {
await runRemoteMySqlBackup(mysql, backup);
} else {
await runMySqlBackup(mysql, backup);
}
return true;
} catch (error) {
throw new TRPCError({
@@ -132,7 +145,12 @@ export const backupRouter = createTRPCRouter({
try {
const backup = await findBackupById(input.backupId);
const mariadb = await findMariadbByBackupId(backup.backupId);
await runMariadbBackup(mariadb, backup);
if (mariadb.serverId) {
await runRemoteMariadbBackup(mariadb, backup);
} else {
await runMariadbBackup(mariadb, backup);
}
return true;
} catch (error) {
throw new TRPCError({
@@ -148,7 +166,12 @@ export const backupRouter = createTRPCRouter({
try {
const backup = await findBackupById(input.backupId);
const mongo = await findMongoByBackupId(backup.backupId);
await runMongoBackup(mongo, backup);
if (mongo.serverId) {
await runRemoteMongoBackup(mongo, backup);
} else {
await runMongoBackup(mongo, backup);
}
return true;
} catch (error) {
throw new TRPCError({

View File

@@ -3,9 +3,12 @@ import path from "node:path";
import type { BackupSchedule } from "@/server/api/services/backup";
import type { Mariadb } from "@/server/api/services/mariadb";
import { findProjectById } from "@/server/api/services/project";
import { getServiceContainer } from "../docker/utils";
import {
getRemoteServiceContainer,
getServiceContainer,
} from "../docker/utils";
import { sendDatabaseBackupNotifications } from "../notifications/database-backup";
import { execAsync } from "../process/execAsync";
import { execAsync, execAsyncRemote } from "../process/execAsync";
import { uploadToS3 } from "./utils";
export const runMariadbBackup = async (
@@ -56,3 +59,59 @@ export const runMariadbBackup = async (
await unlink(hostPath);
}
};
export const runRemoteMariadbBackup = async (
mariadb: Mariadb,
backup: BackupSchedule,
) => {
const { appName, databasePassword, databaseUser, projectId, name, serverId } =
mariadb;
if (!serverId) {
throw new Error("Server ID not provided");
}
const project = await findProjectById(projectId);
const { prefix, database } = backup;
const destination = backup.destination;
const backupFileName = `${new Date().toISOString()}.sql.gz`;
const bucketDestination = path.join(prefix, backupFileName);
const { accessKey, secretAccessKey, bucket, region, endpoint } = destination;
try {
const { Id: containerId } = await getRemoteServiceContainer(
serverId,
appName,
);
const mariadbDumpCommand = `docker exec ${containerId} sh -c "mariadb-dump --user='${databaseUser}' --password='${databasePassword}' --databases ${database} | gzip"`;
const rcloneFlags = [
`--s3-access-key-id=${accessKey}`,
`--s3-secret-access-key=${secretAccessKey}`,
`--s3-region=${region}`,
`--s3-endpoint=${endpoint}`,
"--s3-no-check-bucket",
"--s3-force-path-style",
];
const rcloneDestination = `:s3:${bucket}/${bucketDestination}`;
const rcloneCommand = `rclone rcat ${rcloneFlags.join(" ")} "${rcloneDestination}"`;
await execAsyncRemote(serverId, `${mariadbDumpCommand} | ${rcloneCommand}`);
await sendDatabaseBackupNotifications({
applicationName: name,
projectName: project.name,
databaseType: "mariadb",
type: "success",
});
} catch (error) {
console.log(error);
await sendDatabaseBackupNotifications({
applicationName: name,
projectName: project.name,
databaseType: "mariadb",
type: "error",
// @ts-ignore
errorMessage: error?.message || "Error message not provided",
});
throw error;
}
};

View File

@@ -3,9 +3,12 @@ import path from "node:path";
import type { BackupSchedule } from "@/server/api/services/backup";
import type { Mongo } from "@/server/api/services/mongo";
import { findProjectById } from "@/server/api/services/project";
import { getServiceContainer } from "../docker/utils";
import {
getRemoteServiceContainer,
getServiceContainer,
} from "../docker/utils";
import { sendDatabaseBackupNotifications } from "../notifications/database-backup";
import { execAsync } from "../process/execAsync";
import { execAsync, execAsyncRemote } from "../process/execAsync";
import { uploadToS3 } from "./utils";
// mongodb://mongo:Bqh7AQl-PRbnBu@localhost:27017/?tls=false&directConnection=true
@@ -53,3 +56,59 @@ export const runMongoBackup = async (mongo: Mongo, backup: BackupSchedule) => {
}
};
// mongorestore -d monguito -u mongo -p Bqh7AQl-PRbnBu --authenticationDatabase admin --gzip --archive=2024-04-13T05:03:58.937Z.dump.gz
export const runRemoteMongoBackup = async (
mongo: Mongo,
backup: BackupSchedule,
) => {
const { appName, databasePassword, databaseUser, projectId, name, serverId } =
mongo;
if (!serverId) {
throw new Error("Server ID not provided");
}
const project = await findProjectById(projectId);
const { prefix, database } = backup;
const destination = backup.destination;
const backupFileName = `${new Date().toISOString()}.dump.gz`;
const bucketDestination = path.join(prefix, backupFileName);
const { accessKey, secretAccessKey, bucket, region, endpoint } = destination;
try {
const { Id: containerId } = await getRemoteServiceContainer(
serverId,
appName,
);
const mongoDumpCommand = `docker exec ${containerId} sh -c "mongodump -d '${database}' -u '${databaseUser}' -p '${databasePassword}' --authenticationDatabase=admin --gzip"`;
const rcloneFlags = [
`--s3-access-key-id=${accessKey}`,
`--s3-secret-access-key=${secretAccessKey}`,
`--s3-region=${region}`,
`--s3-endpoint=${endpoint}`,
"--s3-no-check-bucket",
"--s3-force-path-style",
];
const rcloneDestination = `:s3:${bucket}/${bucketDestination}`;
const rcloneCommand = `rclone rcat ${rcloneFlags.join(" ")} "${rcloneDestination}"`;
await execAsyncRemote(serverId, `${mongoDumpCommand} | ${rcloneCommand}`);
await sendDatabaseBackupNotifications({
applicationName: name,
projectName: project.name,
databaseType: "mongodb",
type: "success",
});
} catch (error) {
console.log(error);
await sendDatabaseBackupNotifications({
applicationName: name,
projectName: project.name,
databaseType: "mongodb",
type: "error",
// @ts-ignore
errorMessage: error?.message || "Error message not provided",
});
throw error;
}
};

View File

@@ -3,9 +3,12 @@ import path from "node:path";
import type { BackupSchedule } from "@/server/api/services/backup";
import type { MySql } from "@/server/api/services/mysql";
import { findProjectById } from "@/server/api/services/project";
import { getServiceContainer } from "../docker/utils";
import {
getRemoteServiceContainer,
getServiceContainer,
} from "../docker/utils";
import { sendDatabaseBackupNotifications } from "../notifications/database-backup";
import { execAsync } from "../process/execAsync";
import { execAsync, execAsyncRemote } from "../process/execAsync";
import { uploadToS3 } from "./utils";
export const runMySqlBackup = async (mysql: MySql, backup: BackupSchedule) => {
@@ -53,3 +56,59 @@ export const runMySqlBackup = async (mysql: MySql, backup: BackupSchedule) => {
await unlink(hostPath);
}
};
export const runRemoteMySqlBackup = async (
mysql: MySql,
backup: BackupSchedule,
) => {
const { appName, databaseRootPassword, projectId, name, serverId } = mysql;
if (!serverId) {
throw new Error("Server ID not provided");
}
const project = await findProjectById(projectId);
const { prefix, database } = backup;
const destination = backup.destination;
const backupFileName = `${new Date().toISOString()}.sql.gz`;
const bucketDestination = path.join(prefix, backupFileName);
const { accessKey, secretAccessKey, bucket, region, endpoint } = destination;
try {
const { Id: containerId } = await getRemoteServiceContainer(
serverId,
appName,
);
const mysqlDumpCommand = `docker exec ${containerId} sh -c "mysqldump --default-character-set=utf8mb4 -u 'root' --password='${databaseRootPassword}' --single-transaction --no-tablespaces --quick '${database}' | gzip"`;
const rcloneFlags = [
`--s3-access-key-id=${accessKey}`,
`--s3-secret-access-key=${secretAccessKey}`,
`--s3-region=${region}`,
`--s3-endpoint=${endpoint}`,
"--s3-no-check-bucket",
"--s3-force-path-style",
];
const rcloneDestination = `:s3:${bucket}/${bucketDestination}`;
const rcloneCommand = `rclone rcat ${rcloneFlags.join(" ")} "${rcloneDestination}"`;
await execAsyncRemote(serverId, `${mysqlDumpCommand} | ${rcloneCommand}`);
await sendDatabaseBackupNotifications({
applicationName: name,
projectName: project.name,
databaseType: "mysql",
type: "success",
});
} catch (error) {
console.log(error);
await sendDatabaseBackupNotifications({
applicationName: name,
projectName: project.name,
databaseType: "mysql",
type: "error",
// @ts-ignore
errorMessage: error?.message || "Error message not provided",
});
throw error;
}
};

View File

@@ -65,50 +65,38 @@ export const runRemotePostgresBackup = async (
postgres: Postgres,
backup: BackupSchedule,
) => {
const { appName, databaseUser, name, projectId } = postgres;
const { appName, databaseUser, name, projectId, serverId } = postgres;
if (!serverId) {
throw new Error("Server ID not provided");
}
const project = await findProjectById(projectId);
const { prefix, database } = backup;
const destination = backup.destination;
const backupFileName = `${new Date().toISOString()}.sql.gz`;
const bucketDestination = path.join(prefix, backupFileName);
const containerPath = `/backup/${backupFileName}`;
const hostPath = `./${backupFileName}`;
const { accessKey, secretAccessKey, bucket, region, endpoint } = destination;
const rcloneDestination = `s3:${bucket}:${prefix}/${backupFileName}`;
try {
const { Id: containerId } = await getRemoteServiceContainer(
postgres.serverId,
serverId,
appName,
);
const pgDumpCommand = `docker exec ${containerId} sh -c "pg_dump -Fc --no-acl --no-owner -h localhost -U ${databaseUser} --no-password '${database}' | gzip"`;
// const rcloneConfig = `:s3,provider=Cloudflare,access_key_id=${accessKey},secret_access_key=${secretAccessKey},endpoint=${endpoint},acl=private,region=${region},bucket_name=${bucket},force_path_style=true`;
const rcloneFlags = [
// `--s3-provider=Cloudflare`,
`--s3-access-key-id=${accessKey}`,
`--s3-secret-access-key=${secretAccessKey}`,
`--s3-region=${region}`,
`--s3-endpoint=${endpoint}`, // Aquí puedes incluir 'https://'
`--s3-endpoint=${endpoint}`,
"--s3-no-check-bucket",
"--s3-force-path-style",
];
const rcloneDestination = `:s3:${bucket}/jinza/${backupFileName}`;
const rcloneDestination = `:s3:${bucket}/${bucketDestination}`;
const rcloneCommand = `rclone rcat ${rcloneFlags.join(" ")} "${rcloneDestination}"`;
// const pgDumpCommand = `pg_dump -Fc --no-acl --no-owner -h localhost -U ${databaseUser} --no-password '${database}' | gzip`;
// const rcloneCommand = `rclone rcat --s3-provider Other \
// --s3-access-key-id ${accessKey} \
// --s3-secret-access-key ${secretAccessKey} \
// --s3-region ${region} \
// --s3-endpoint ${endpoint} \
// --buffer-size 16M ${rcloneDestination}`;
// const rcloneCommand = `rclone rcat --buffer-size 16M ${rcloneDestination}`;
// const command = `
// // docker exec ${containerId} /bin/bash -c "${pgDumpCommand} | ${rcloneCommand}"
// `;
console.log(`${pgDumpCommand} | ${rcloneCommand}`);
await execAsyncRemote(
postgres.serverId,

View File

@@ -3,9 +3,9 @@ import type { BackupSchedule } from "@/server/api/services/backup";
import type { Destination } from "@/server/api/services/destination";
import { PutObjectCommand, S3Client } from "@aws-sdk/client-s3";
import { scheduleJob, scheduledJobs } from "node-schedule";
import { runMariadbBackup } from "./mariadb";
import { runMongoBackup } from "./mongo";
import { runMySqlBackup } from "./mysql";
import { runMariadbBackup, runRemoteMariadbBackup } from "./mariadb";
import { runMongoBackup, runRemoteMongoBackup } from "./mongo";
import { runMySqlBackup, runRemoteMySqlBackup } from "./mysql";
import { runPostgresBackup, runRemotePostgresBackup } from "./postgres";
export const uploadToS3 = async (
@@ -17,9 +17,7 @@ export const uploadToS3 = async (
const s3Client = new S3Client({
region: region,
...(endpoint && {
endpoint: endpoint,
}),
endpoint: endpoint,
credentials: {
accessKeyId: accessKey,
secretAccessKey: secretAccessKey,
@@ -36,7 +34,6 @@ export const uploadToS3 = async (
await s3Client.send(command);
};
export const scheduleBackup = (backup: BackupSchedule) => {
const { schedule, backupId, databaseType, postgres, mysql, mongo, mariadb } =
backup;
@@ -48,11 +45,23 @@ export const scheduleBackup = (backup: BackupSchedule) => {
await runPostgresBackup(postgres, backup);
}
} else if (databaseType === "mysql" && mysql) {
await runMySqlBackup(mysql, backup);
if (mysql.serverId) {
await runRemoteMySqlBackup(mysql, backup);
} else {
await runMySqlBackup(mysql, backup);
}
} else if (databaseType === "mongo" && mongo) {
await runMongoBackup(mongo, backup);
if (mongo.serverId) {
await runRemoteMongoBackup(mongo, backup);
} else {
await runMongoBackup(mongo, backup);
}
} else if (databaseType === "mariadb" && mariadb) {
await runMariadbBackup(mariadb, backup);
if (mariadb.serverId) {
await runRemoteMariadbBackup(mariadb, backup);
} else {
await runMariadbBackup(mariadb, backup);
}
}
});
};