mirror of
https://github.com/Dokploy/dokploy
synced 2025-06-26 18:27:59 +00:00
feat(monitoring): add date range filtering and log cleanup scheduling
- Implement date range filtering for access logs and request statistics - Add log cleanup scheduling with configurable cron expression - Update UI components to support date range selection - Refactor log processing and parsing to handle date filtering - Add new database migration for log cleanup cron configuration - Remove deprecated log rotation management logic
This commit is contained in:
@@ -53,7 +53,7 @@ export const users_temp = pgTable("user_temp", {
|
||||
letsEncryptEmail: text("letsEncryptEmail"),
|
||||
sshPrivateKey: text("sshPrivateKey"),
|
||||
enableDockerCleanup: boolean("enableDockerCleanup").notNull().default(false),
|
||||
enableLogRotation: boolean("enableLogRotation").notNull().default(false),
|
||||
logCleanupCron: text("logCleanupCron"),
|
||||
// Metrics
|
||||
enablePaidFeatures: boolean("enablePaidFeatures").notNull().default(false),
|
||||
metricsConfig: jsonb("metricsConfig")
|
||||
@@ -250,6 +250,12 @@ export const apiReadStatsLogs = z.object({
|
||||
status: z.string().array().optional(),
|
||||
search: z.string().optional(),
|
||||
sort: z.object({ id: z.string(), desc: z.boolean() }).optional(),
|
||||
dateRange: z
|
||||
.object({
|
||||
start: z.string().optional(),
|
||||
end: z.string().optional(),
|
||||
})
|
||||
.optional(),
|
||||
});
|
||||
|
||||
export const apiUpdateWebServerMonitoring = z.object({
|
||||
@@ -305,4 +311,5 @@ export const apiUpdateUser = createSchema.partial().extend({
|
||||
}),
|
||||
})
|
||||
.optional(),
|
||||
logCleanupCron: z.string().optional().nullable(),
|
||||
});
|
||||
|
||||
@@ -116,3 +116,9 @@ export * from "./db/validations/index";
|
||||
export * from "./utils/gpu-setup";
|
||||
|
||||
export * from "./lib/auth";
|
||||
|
||||
export {
|
||||
startLogCleanup,
|
||||
stopLogCleanup,
|
||||
getLogCleanupStatus,
|
||||
} from "./utils/access-log/handler";
|
||||
|
||||
@@ -1,121 +1,77 @@
|
||||
import { IS_CLOUD, paths } from "@dokploy/server/constants";
|
||||
import { type RotatingFileStream, createStream } from "rotating-file-stream";
|
||||
import { paths } from "@dokploy/server/constants";
|
||||
import { execAsync } from "../process/execAsync";
|
||||
import { findAdmin } from "@dokploy/server/services/admin";
|
||||
import { updateUser } from "@dokploy/server/services/user";
|
||||
import { scheduleJob, scheduledJobs } from "node-schedule";
|
||||
|
||||
class LogRotationManager {
|
||||
private static instance: LogRotationManager;
|
||||
private stream: RotatingFileStream | null = null;
|
||||
const LOG_CLEANUP_JOB_NAME = "access-log-cleanup";
|
||||
|
||||
private constructor() {
|
||||
if (IS_CLOUD) {
|
||||
return;
|
||||
}
|
||||
this.initialize().catch(console.error);
|
||||
}
|
||||
|
||||
public static getInstance(): LogRotationManager {
|
||||
if (!LogRotationManager.instance) {
|
||||
LogRotationManager.instance = new LogRotationManager();
|
||||
}
|
||||
return LogRotationManager.instance;
|
||||
}
|
||||
|
||||
private async initialize(): Promise<void> {
|
||||
const isActive = await this.getStateFromDB();
|
||||
if (isActive) {
|
||||
await this.activateStream();
|
||||
}
|
||||
}
|
||||
|
||||
private async getStateFromDB(): Promise<boolean> {
|
||||
const admin = await findAdmin();
|
||||
return admin?.user.enableLogRotation ?? false;
|
||||
}
|
||||
|
||||
private async setStateInDB(active: boolean): Promise<void> {
|
||||
const admin = await findAdmin();
|
||||
if (!admin) {
|
||||
return;
|
||||
}
|
||||
await updateUser(admin.user.id, {
|
||||
enableLogRotation: active,
|
||||
});
|
||||
}
|
||||
|
||||
private async activateStream(): Promise<void> {
|
||||
export const startLogCleanup = async (
|
||||
cronExpression = "0 0 * * *",
|
||||
): Promise<boolean> => {
|
||||
try {
|
||||
const { DYNAMIC_TRAEFIK_PATH } = paths();
|
||||
if (this.stream) {
|
||||
await this.deactivateStream();
|
||||
|
||||
const existingJob = scheduledJobs[LOG_CLEANUP_JOB_NAME];
|
||||
if (existingJob) {
|
||||
existingJob.cancel();
|
||||
}
|
||||
|
||||
this.stream = createStream("access.log", {
|
||||
size: "100M",
|
||||
interval: "1d",
|
||||
path: DYNAMIC_TRAEFIK_PATH,
|
||||
rotate: 6,
|
||||
compress: "gzip",
|
||||
});
|
||||
scheduleJob(LOG_CLEANUP_JOB_NAME, cronExpression, async () => {
|
||||
try {
|
||||
await execAsync(
|
||||
`tail -n 1000 ${DYNAMIC_TRAEFIK_PATH}/access.log > ${DYNAMIC_TRAEFIK_PATH}/access.log.tmp && mv ${DYNAMIC_TRAEFIK_PATH}/access.log.tmp ${DYNAMIC_TRAEFIK_PATH}/access.log`,
|
||||
);
|
||||
|
||||
this.stream.on("rotation", this.handleRotation.bind(this));
|
||||
}
|
||||
|
||||
private async deactivateStream(): Promise<void> {
|
||||
return new Promise<void>((resolve) => {
|
||||
if (this.stream) {
|
||||
this.stream.end(() => {
|
||||
this.stream = null;
|
||||
resolve();
|
||||
});
|
||||
} else {
|
||||
resolve();
|
||||
await execAsync("docker exec dokploy-traefik kill -USR1 1");
|
||||
} catch (error) {
|
||||
console.error("Error during log cleanup:", error);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public async activate(): Promise<boolean> {
|
||||
const currentState = await this.getStateFromDB();
|
||||
if (currentState) {
|
||||
return true;
|
||||
const admin = await findAdmin();
|
||||
if (admin) {
|
||||
await updateUser(admin.user.id, {
|
||||
logCleanupCron: cronExpression,
|
||||
});
|
||||
}
|
||||
|
||||
await this.setStateInDB(true);
|
||||
await this.activateStream();
|
||||
return true;
|
||||
} catch (_) {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
public async deactivate(): Promise<boolean> {
|
||||
console.log("Deactivating log rotation...");
|
||||
const currentState = await this.getStateFromDB();
|
||||
if (!currentState) {
|
||||
console.log("Log rotation is already inactive in DB");
|
||||
return true;
|
||||
export const stopLogCleanup = async (): Promise<boolean> => {
|
||||
try {
|
||||
const existingJob = scheduledJobs[LOG_CLEANUP_JOB_NAME];
|
||||
if (existingJob) {
|
||||
existingJob.cancel();
|
||||
}
|
||||
|
||||
// Update database
|
||||
const admin = await findAdmin();
|
||||
if (admin) {
|
||||
await updateUser(admin.user.id, {
|
||||
logCleanupCron: null,
|
||||
});
|
||||
}
|
||||
|
||||
await this.setStateInDB(false);
|
||||
await this.deactivateStream();
|
||||
console.log("Log rotation deactivated successfully");
|
||||
return true;
|
||||
} catch (error) {
|
||||
console.error("Error stopping log cleanup:", error);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
private async handleRotation() {
|
||||
try {
|
||||
const status = await this.getStatus();
|
||||
if (!status) {
|
||||
await this.deactivateStream();
|
||||
}
|
||||
await execAsync(
|
||||
"docker kill -s USR1 $(docker ps -q --filter name=dokploy-traefik)",
|
||||
);
|
||||
console.log("USR1 Signal send to Traefik");
|
||||
} catch (error) {
|
||||
console.error("Error sending USR1 Signal to Traefik:", error);
|
||||
}
|
||||
}
|
||||
public async getStatus(): Promise<boolean> {
|
||||
const dbState = await this.getStateFromDB();
|
||||
return dbState;
|
||||
}
|
||||
}
|
||||
export const logRotationManager = LogRotationManager.getInstance();
|
||||
export const getLogCleanupStatus = async (): Promise<{
|
||||
enabled: boolean;
|
||||
cronExpression: string | null;
|
||||
}> => {
|
||||
const admin = await findAdmin();
|
||||
const cronExpression = admin?.user.logCleanupCron ?? null;
|
||||
return {
|
||||
enabled: cronExpression !== null,
|
||||
cronExpression,
|
||||
};
|
||||
};
|
||||
|
||||
@@ -6,14 +6,21 @@ interface HourlyData {
|
||||
count: number;
|
||||
}
|
||||
|
||||
export function processLogs(logString: string): HourlyData[] {
|
||||
export function processLogs(
|
||||
logString: string,
|
||||
dateRange?: { start?: string; end?: string },
|
||||
): HourlyData[] {
|
||||
if (_.isEmpty(logString)) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const hourlyData = _(logString)
|
||||
.split("\n")
|
||||
.compact()
|
||||
.filter((line) => {
|
||||
const trimmed = line.trim();
|
||||
// Check if the line starts with { and ends with } to ensure it's a potential JSON object
|
||||
return trimmed !== "" && trimmed.startsWith("{") && trimmed.endsWith("}");
|
||||
})
|
||||
.map((entry) => {
|
||||
try {
|
||||
const log: LogEntry = JSON.parse(entry);
|
||||
@@ -21,6 +28,20 @@ export function processLogs(logString: string): HourlyData[] {
|
||||
return null;
|
||||
}
|
||||
const date = new Date(log.StartUTC);
|
||||
|
||||
if (dateRange?.start || dateRange?.end) {
|
||||
const logDate = date.getTime();
|
||||
const start = dateRange?.start
|
||||
? new Date(dateRange.start).getTime()
|
||||
: 0;
|
||||
const end = dateRange?.end
|
||||
? new Date(dateRange.end).getTime()
|
||||
: Number.POSITIVE_INFINITY;
|
||||
if (logDate < start || logDate > end) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
return `${date.toISOString().slice(0, 13)}:00:00Z`;
|
||||
} catch (error) {
|
||||
console.error("Error parsing log entry:", error);
|
||||
@@ -51,21 +72,46 @@ export function parseRawConfig(
|
||||
sort?: SortInfo,
|
||||
search?: string,
|
||||
status?: string[],
|
||||
dateRange?: { start?: string; end?: string },
|
||||
): { data: LogEntry[]; totalCount: number } {
|
||||
try {
|
||||
if (_.isEmpty(rawConfig)) {
|
||||
return { data: [], totalCount: 0 };
|
||||
}
|
||||
|
||||
// Split logs into chunks to avoid memory issues
|
||||
let parsedLogs = _(rawConfig)
|
||||
.split("\n")
|
||||
.filter((line) => {
|
||||
const trimmed = line.trim();
|
||||
return (
|
||||
trimmed !== "" && trimmed.startsWith("{") && trimmed.endsWith("}")
|
||||
);
|
||||
})
|
||||
.map((line) => {
|
||||
try {
|
||||
return JSON.parse(line) as LogEntry;
|
||||
} catch (error) {
|
||||
console.error("Error parsing log line:", error);
|
||||
return null;
|
||||
}
|
||||
})
|
||||
.compact()
|
||||
.map((line) => JSON.parse(line) as LogEntry)
|
||||
.value();
|
||||
|
||||
parsedLogs = parsedLogs.filter(
|
||||
(log) => log.ServiceName !== "dokploy-service-app@file",
|
||||
);
|
||||
// Apply date range filter if provided
|
||||
if (dateRange?.start || dateRange?.end) {
|
||||
parsedLogs = parsedLogs.filter((log) => {
|
||||
const logDate = new Date(log.StartUTC).getTime();
|
||||
const start = dateRange?.start
|
||||
? new Date(dateRange.start).getTime()
|
||||
: 0;
|
||||
const end = dateRange?.end
|
||||
? new Date(dateRange.end).getTime()
|
||||
: Number.POSITIVE_INFINITY;
|
||||
return logDate >= start && logDate <= end;
|
||||
});
|
||||
}
|
||||
|
||||
if (search) {
|
||||
parsedLogs = parsedLogs.filter((log) =>
|
||||
@@ -78,6 +124,7 @@ export function parseRawConfig(
|
||||
status.some((range) => isStatusInRange(log.DownstreamStatus, range)),
|
||||
);
|
||||
}
|
||||
|
||||
const totalCount = parsedLogs.length;
|
||||
|
||||
if (sort) {
|
||||
@@ -101,6 +148,7 @@ export function parseRawConfig(
|
||||
throw new Error("Failed to parse rawConfig");
|
||||
}
|
||||
}
|
||||
|
||||
const isStatusInRange = (status: number, range: string) => {
|
||||
switch (range) {
|
||||
case "info":
|
||||
|
||||
@@ -12,6 +12,7 @@ import { runMongoBackup } from "./mongo";
|
||||
import { runMySqlBackup } from "./mysql";
|
||||
import { runPostgresBackup } from "./postgres";
|
||||
import { findAdmin } from "../../services/admin";
|
||||
import { startLogCleanup } from "../access-log/handler";
|
||||
|
||||
export const initCronJobs = async () => {
|
||||
console.log("Setting up cron jobs....");
|
||||
@@ -168,4 +169,8 @@ export const initCronJobs = async () => {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (admin?.user.logCleanupCron) {
|
||||
await startLogCleanup(admin.user.logCleanupCron);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -137,12 +137,44 @@ export const readRemoteConfig = async (serverId: string, appName: string) => {
|
||||
}
|
||||
};
|
||||
|
||||
export const readMonitoringConfig = () => {
|
||||
export const readMonitoringConfig = (readAll = false) => {
|
||||
const { DYNAMIC_TRAEFIK_PATH } = paths();
|
||||
const configPath = path.join(DYNAMIC_TRAEFIK_PATH, "access.log");
|
||||
if (fs.existsSync(configPath)) {
|
||||
const yamlStr = fs.readFileSync(configPath, "utf8");
|
||||
return yamlStr;
|
||||
if (!readAll) {
|
||||
// Read first 500 lines
|
||||
let content = "";
|
||||
let chunk = "";
|
||||
let validCount = 0;
|
||||
|
||||
for (const char of fs.readFileSync(configPath, "utf8")) {
|
||||
chunk += char;
|
||||
if (char === "\n") {
|
||||
try {
|
||||
const trimmed = chunk.trim();
|
||||
if (
|
||||
trimmed !== "" &&
|
||||
trimmed.startsWith("{") &&
|
||||
trimmed.endsWith("}")
|
||||
) {
|
||||
const log = JSON.parse(trimmed);
|
||||
if (log.ServiceName !== "dokploy-service-app@file") {
|
||||
content += chunk;
|
||||
validCount++;
|
||||
if (validCount >= 500) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// Ignore invalid JSON
|
||||
}
|
||||
chunk = "";
|
||||
}
|
||||
}
|
||||
return content;
|
||||
}
|
||||
return fs.readFileSync(configPath, "utf8");
|
||||
}
|
||||
return null;
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user