Add backup and deployment schema updates for improved data handling

- Introduced new SQL file `0089_noisy_sandman.sql` to create a new enum type `backupType` and add relevant columns to the `backup` and `deployment` tables, enhancing data structure for backup management.
- Removed outdated SQL files `0090_lame_gressill.sql` and `0091_colossal_lifeguard.sql` that contained redundant column definitions, streamlining the database schema.
- Updated journal and snapshot JSON files to reflect the latest schema changes, ensuring consistency across the database structure.
This commit is contained in:
Mauricio Siu
2025-05-04 15:13:49 -06:00
parent 9aa56870b0
commit e3ec8f1589
9 changed files with 76 additions and 11407 deletions

View File

@@ -1,6 +1,10 @@
CREATE TYPE "public"."backupType" AS ENUM('database', 'compose');--> statement-breakpoint
ALTER TABLE "backup" ADD COLUMN "appName" text NOT NULL;--> statement-breakpoint
ALTER TABLE "backup" ADD COLUMN "serviceName" text;--> statement-breakpoint
ALTER TABLE "backup" ADD COLUMN "backupType" "backupType" DEFAULT 'database' NOT NULL;--> statement-breakpoint
ALTER TABLE "backup" ADD COLUMN "composeId" text;--> statement-breakpoint
ALTER TABLE "backup" ADD COLUMN "metadata" jsonb;--> statement-breakpoint
ALTER TABLE "backup" ADD CONSTRAINT "backup_composeId_compose_composeId_fk" FOREIGN KEY ("composeId") REFERENCES "public"."compose"("composeId") ON DELETE cascade ON UPDATE no action;
ALTER TABLE "deployment" ADD COLUMN "backupId" text;--> statement-breakpoint
ALTER TABLE "backup" ADD CONSTRAINT "backup_composeId_compose_composeId_fk" FOREIGN KEY ("composeId") REFERENCES "public"."compose"("composeId") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "deployment" ADD CONSTRAINT "deployment_backupId_backup_backupId_fk" FOREIGN KEY ("backupId") REFERENCES "public"."backup"("backupId") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
ALTER TABLE "backup" ADD CONSTRAINT "backup_appName_unique" UNIQUE("appName");

View File

@@ -1,2 +0,0 @@
ALTER TABLE "deployment" ADD COLUMN "backupId" text;--> statement-breakpoint
ALTER TABLE "deployment" ADD CONSTRAINT "deployment_backupId_backup_backupId_fk" FOREIGN KEY ("backupId") REFERENCES "public"."backup"("backupId") ON DELETE cascade ON UPDATE no action;

View File

@@ -1,2 +0,0 @@
ALTER TABLE "backup" ADD COLUMN "appName" text NOT NULL;--> statement-breakpoint
ALTER TABLE "backup" ADD CONSTRAINT "backup_appName_unique" UNIQUE("appName");

View File

@@ -1,5 +1,5 @@
{
"id": "12ebb86a-87e3-4023-a64d-0c5df96507fb",
"id": "3ec09926-2da4-41c9-8eae-8ef6b023035e",
"prevId": "c7eae4ce-5acc-439b-962f-bb2ef8922187",
"version": "7",
"dialect": "postgresql",
@@ -1746,6 +1746,12 @@
"primaryKey": true,
"notNull": true
},
"appName": {
"name": "appName",
"type": "text",
"primaryKey": false,
"notNull": true
},
"schedule": {
"name": "schedule",
"type": "text",
@@ -1941,7 +1947,15 @@
}
},
"compositePrimaryKeys": {},
"uniqueConstraints": {},
"uniqueConstraints": {
"backup_appName_unique": {
"name": "backup_appName_unique",
"nullsNotDistinct": false,
"columns": [
"appName"
]
}
},
"policies": {},
"checkConstraints": {},
"isRLSEnabled": false
@@ -2130,6 +2144,12 @@
"type": "text",
"primaryKey": false,
"notNull": false
},
"backupId": {
"name": "backupId",
"type": "text",
"primaryKey": false,
"notNull": false
}
},
"indexes": {},
@@ -2198,6 +2218,19 @@
],
"onDelete": "cascade",
"onUpdate": "no action"
},
"deployment_backupId_backup_backupId_fk": {
"name": "deployment_backupId_backup_backupId_fk",
"tableFrom": "deployment",
"tableTo": "backup",
"columnsFrom": [
"backupId"
],
"columnsTo": [
"backupId"
],
"onDelete": "cascade",
"onUpdate": "no action"
}
},
"compositePrimaryKeys": {},

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -628,22 +628,8 @@
{
"idx": 89,
"version": "7",
"when": 1746287354535,
"tag": "0089_eminent_winter_soldier",
"breakpoints": true
},
{
"idx": 90,
"version": "7",
"when": 1746287994297,
"tag": "0090_lame_gressill",
"breakpoints": true
},
{
"idx": 91,
"version": "7",
"when": 1746289884571,
"tag": "0091_colossal_lifeguard",
"when": 1746392564463,
"tag": "0089_noisy_sandman",
"breakpoints": true
}
]

View File

@@ -144,14 +144,44 @@ export const initializeJobs = async () => {
const schedulesResult = await db.query.schedules.findMany({
where: eq(schedules.enabled, true),
with: {
application: {
with: {
server: true,
},
},
compose: {
with: {
server: true,
},
},
server: true,
},
});
for (const schedule of schedulesResult) {
const filteredSchedulesBasedOnServerStatus = schedulesResult.filter(
(schedule) => {
if (schedule.server) {
return schedule.server.serverStatus === "active";
}
if (schedule.application) {
return schedule.application.server?.serverStatus === "active";
}
if (schedule.compose) {
return schedule.compose.server?.serverStatus === "active";
}
},
);
for (const schedule of filteredSchedulesBasedOnServerStatus) {
scheduleJob({
scheduleId: schedule.scheduleId,
type: "schedule",
cronSchedule: schedule.cronExpression,
});
}
logger.info({ Quantity: schedulesResult.length }, "Schedules Initialized");
logger.info(
{ Quantity: filteredSchedulesBasedOnServerStatus.length },
"Schedules Initialized",
);
};

View File

@@ -7,7 +7,7 @@ import { runJobs } from "./utils.js";
export const firstWorker = new Worker(
"backupQueue",
async (job: Job<QueueJob>) => {
logger.info({ data: job.data }, "Job received");
logger.info({ data: job.data }, "Running job");
await runJobs(job.data);
},
{
@@ -18,7 +18,7 @@ export const firstWorker = new Worker(
export const secondWorker = new Worker(
"backupQueue",
async (job: Job<QueueJob>) => {
logger.info({ data: job.data }, "Job received");
logger.info({ data: job.data }, "Running job");
await runJobs(job.data);
},
{