mirror of
https://github.com/Dokploy/dokploy
synced 2025-06-26 18:27:59 +00:00
* feat: add start monitoring remote servers * reafctor: update * refactor: update * refactor: update * refactor: update * refactor: update * refactor: update * refactor: update * refactor: * refactor: add metrics * feat: add disk monitoring * refactor: translate to english * refacotor: add stats * refactor: remove color * feat: add log server metrics * refactor: remove unused deps * refactor: add origin * refactor: add logs * refactor: update * feat: add series monitoring * refactor: add system monitoring * feat: add benchmark to optimize data * refactor: update fn * refactor: remove comments * refactor: update * refactor: exclude items * feat: add refresh rate * feat: add monitoring remote servers * refactor: update * refactor: remove unsued volumes * refactor: update monitoring * refactor: add more presets * feat: add container metrics * feat: add docker monitoring * refactor: update conversion * refactor: remove unused code * refactor: update * refactor: add docker compose logs * refactor: add docker cli * refactor: add install curl * refactor: add get update * refactor: add monitoring remote servers * refactor: add containers config * feat: add container specification * refactor: update path * refactor: add server filter * refactor: simplify logic * fix: verify if file exist before get stats * refactor: update * refactor: remove unused deps * test: add test for containers * refactor: update * refactor add memory collector * refactor: update * refactor: update * refactor: update * refactor: remove * refactor: add memory * refactor: add server memory usage * refactor: change memory * refactor: update * refactor: update * refactor: add container metrics * refactor: comment code * refactor: mount proc bind * refactor: change interval with node cron * refactor: remove opening file * refactor: use streams * refactor: remove unused ws * refactor: disable live when is all * refactor: add sqlite * refactor: update * feat: add golang benchmark * refactor: update go * refactor: update dockerfile * refactor: update db * refactor: add env * refactor: separate logic * refactor: split logic * refactor: update logs * refactor: update dockerfile * refactor: hide .env * refactor: update * chore: hide ,.ebnv * refactor: add end angle * refactor: update * refactor: update * refactor: update * refactor: update * refactor: update * refactor: update monitoring * refactor: add mount db * refactor: add metrics and url callback * refactor: add middleware * refactor: add threshold property * feat: add memory and cpu threshold notification * feat: send notifications to the server * feat: add metrics for dokploy server * refactor: add dokploy server to monitoring * refactor: update methods * refactor: add admin to useeffect * refactor: stop monitoring containers if elements are 0 * refactor: cancel request if appName is empty * refactor: reuse methods * chore; add feat monitoring * refactor: set base url * refactor: adjust monitoring * refactor: delete migrations * feat: add columns * fix: add missing flag * refactor: add free metrics * refactor: add paid monitoring * refactor: update methods * feat: improve ui * feat: add container stats * refactor: add all container metrics * refactor: add color primary * refactor: change default rate limiting refresher * refactor: update retention days * refactor: use json instead of individual properties * refactor: lint * refactor: pass json env * refactor: update * refactor: delete * refactor: update * refactor: fix types * refactor: add retention days * chore: add license * refactor: create db * refactor: update path * refactor: update setup * refactor: update * refactor: create files * refactor: update * refactor: delete * refactor: update * refactor: update token metrics * fix: typechecks * refactor: setup web server * refactor: update error handling and add monitoring * refactor: add local storage save * refactor: add spacing * refactor: update * refactor: upgrade drizzle * refactor: delete * refactor: uppgrade drizzle kit * refactor: update search with jsonB * chore: upgrade drizzle * chore: update packages * refactor: add missing type * refactor: add serverType * refactor: update url * refactor: update * refactor: update * refactor: hide monitoring on self hosted * refactor: update server * refactor: update * refactor: update * refactor: pin node version
271 lines
6.6 KiB
Go
271 lines
6.6 KiB
Go
package containers
|
|
|
|
import (
|
|
"encoding/json"
|
|
"fmt"
|
|
"log"
|
|
"os/exec"
|
|
"strconv"
|
|
"strings"
|
|
"sync"
|
|
"time"
|
|
|
|
"github.com/mauriciogm/dokploy/apps/monitoring/config"
|
|
"github.com/mauriciogm/dokploy/apps/monitoring/database"
|
|
)
|
|
|
|
type ContainerMonitor struct {
|
|
db *database.DB
|
|
isRunning bool
|
|
mu sync.Mutex
|
|
stopChan chan struct{}
|
|
}
|
|
|
|
func NewContainerMonitor(db *database.DB) (*ContainerMonitor, error) {
|
|
if err := db.InitContainerMetricsTable(); err != nil {
|
|
return nil, fmt.Errorf("failed to initialize container metrics table: %v", err)
|
|
}
|
|
|
|
return &ContainerMonitor{
|
|
db: db,
|
|
stopChan: make(chan struct{}),
|
|
}, nil
|
|
}
|
|
|
|
func (cm *ContainerMonitor) Start() error {
|
|
if err := LoadConfig(); err != nil {
|
|
return fmt.Errorf("error loading config: %v", err)
|
|
}
|
|
|
|
// Check if there are services to monitor
|
|
if len(monitorConfig.IncludeServices) == 0 {
|
|
log.Printf("No services to monitor. Skipping container metrics collection")
|
|
return nil
|
|
}
|
|
|
|
metricsConfig := config.GetMetricsConfig()
|
|
refreshRate := metricsConfig.Containers.RefreshRate
|
|
if refreshRate == 0 {
|
|
refreshRate = 60 // default refresh rate
|
|
}
|
|
duration := time.Duration(refreshRate) * time.Second
|
|
|
|
// log.Printf("Container metrics collection will run every %d seconds for services: %v", refreshRate, monitorConfig.IncludeServices)
|
|
|
|
ticker := time.NewTicker(duration)
|
|
go func() {
|
|
for {
|
|
select {
|
|
case <-ticker.C:
|
|
// Check again in case the configuration has changed
|
|
if len(monitorConfig.IncludeServices) == 0 {
|
|
log.Printf("No services to monitor. Stopping metrics collection")
|
|
ticker.Stop()
|
|
return
|
|
}
|
|
cm.collectMetrics()
|
|
case <-cm.stopChan:
|
|
ticker.Stop()
|
|
return
|
|
}
|
|
}
|
|
}()
|
|
|
|
return nil
|
|
}
|
|
|
|
func (cm *ContainerMonitor) Stop() {
|
|
close(cm.stopChan)
|
|
}
|
|
|
|
func (cm *ContainerMonitor) collectMetrics() {
|
|
cm.mu.Lock()
|
|
if cm.isRunning {
|
|
cm.mu.Unlock()
|
|
log.Println("Previous collection still running, skipping...")
|
|
return
|
|
}
|
|
cm.isRunning = true
|
|
cm.mu.Unlock()
|
|
|
|
defer func() {
|
|
cm.mu.Lock()
|
|
cm.isRunning = false
|
|
cm.mu.Unlock()
|
|
}()
|
|
|
|
cmd := exec.Command("docker", "stats", "--no-stream", "--format",
|
|
`{"BlockIO":"{{.BlockIO}}","CPUPerc":"{{.CPUPerc}}","ID":"{{.ID}}","MemPerc":"{{.MemPerc}}","MemUsage":"{{.MemUsage}}","Name":"{{.Name}}","NetIO":"{{.NetIO}}"}`)
|
|
|
|
output, err := cmd.CombinedOutput()
|
|
|
|
// log.Printf("Output: %s", string(output))
|
|
if err != nil {
|
|
log.Printf("Error getting docker stats: %v", err)
|
|
return
|
|
}
|
|
|
|
lines := string(output)
|
|
if lines == "" {
|
|
return
|
|
}
|
|
|
|
seenServices := make(map[string]bool)
|
|
for _, line := range strings.Split(lines, "\n") {
|
|
if line == "" {
|
|
continue
|
|
}
|
|
|
|
var container Container
|
|
if err := json.Unmarshal([]byte(line), &container); err != nil {
|
|
log.Printf("Error parsing container data: %v", err)
|
|
continue
|
|
}
|
|
|
|
if !ShouldMonitorContainer(container.Name) {
|
|
continue
|
|
}
|
|
|
|
serviceName := GetServiceName(container.Name)
|
|
|
|
if seenServices[serviceName] {
|
|
continue
|
|
}
|
|
|
|
seenServices[serviceName] = true
|
|
|
|
// log.Printf("Container: %+v", container)
|
|
|
|
// Process metrics
|
|
metric := processContainerMetrics(container)
|
|
|
|
// log.Printf("Saving metrics for %s: %+v", serviceName, metric)
|
|
|
|
if err := cm.db.SaveContainerMetric(metric); err != nil {
|
|
log.Printf("Error saving metrics for %s: %v", serviceName, err)
|
|
}
|
|
}
|
|
}
|
|
|
|
func processContainerMetrics(container Container) *database.ContainerMetric {
|
|
|
|
// Process CPU
|
|
cpu, _ := strconv.ParseFloat(strings.TrimSuffix(container.CPUPerc, "%"), 64)
|
|
|
|
// Process Memory
|
|
memPerc, _ := strconv.ParseFloat(strings.TrimSuffix(container.MemPerc, "%"), 64)
|
|
memParts := strings.Split(container.MemUsage, " / ")
|
|
|
|
var usedValue, totalValue float64
|
|
var usedUnit, totalUnit string
|
|
|
|
if len(memParts) == 2 {
|
|
// Process used memory
|
|
usedParts := strings.Fields(memParts[0])
|
|
if len(usedParts) > 0 {
|
|
usedValue, _ = strconv.ParseFloat(strings.TrimRight(usedParts[0], "MiBGiB"), 64)
|
|
usedUnit = strings.TrimLeft(usedParts[0], "0123456789.")
|
|
// Convert MiB to MB and GiB to GB
|
|
if usedUnit == "MiB" {
|
|
usedUnit = "MB"
|
|
} else if usedUnit == "GiB" {
|
|
usedUnit = "GB"
|
|
}
|
|
}
|
|
|
|
// Process total memory
|
|
totalParts := strings.Fields(memParts[1])
|
|
if len(totalParts) > 0 {
|
|
totalValue, _ = strconv.ParseFloat(strings.TrimRight(totalParts[0], "MiBGiB"), 64)
|
|
totalUnit = strings.TrimLeft(totalParts[0], "0123456789.")
|
|
// Convert MiB to MB and GiB to GB
|
|
if totalUnit == "MiB" {
|
|
totalUnit = "MB"
|
|
} else if totalUnit == "GiB" {
|
|
totalUnit = "GB"
|
|
}
|
|
}
|
|
}
|
|
|
|
// Process Network I/O
|
|
netParts := strings.Split(container.NetIO, " / ")
|
|
|
|
var netInValue, netOutValue float64
|
|
var netInUnit, netOutUnit string
|
|
|
|
if len(netParts) == 2 {
|
|
// Process input
|
|
inParts := strings.Fields(netParts[0])
|
|
if len(inParts) > 0 {
|
|
netInValue, _ = strconv.ParseFloat(strings.TrimRight(inParts[0], "kMGTB"), 64)
|
|
netInUnit = strings.TrimLeft(inParts[0], "0123456789.")
|
|
}
|
|
|
|
// Process output
|
|
outParts := strings.Fields(netParts[1])
|
|
if len(outParts) > 0 {
|
|
netOutValue, _ = strconv.ParseFloat(strings.TrimRight(outParts[0], "kMGTB"), 64)
|
|
netOutUnit = strings.TrimLeft(outParts[0], "0123456789.")
|
|
}
|
|
}
|
|
|
|
// Process Block I/O
|
|
blockParts := strings.Split(container.BlockIO, " / ")
|
|
|
|
var blockReadValue, blockWriteValue float64
|
|
var blockReadUnit, blockWriteUnit string
|
|
|
|
if len(blockParts) == 2 {
|
|
// Process read
|
|
readParts := strings.Fields(blockParts[0])
|
|
if len(readParts) > 0 {
|
|
blockReadValue, _ = strconv.ParseFloat(strings.TrimRight(readParts[0], "kMGTB"), 64)
|
|
blockReadUnit = strings.TrimLeft(readParts[0], "0123456789.")
|
|
}
|
|
|
|
// Process write
|
|
writeParts := strings.Fields(blockParts[1])
|
|
if len(writeParts) > 0 {
|
|
blockWriteValue, _ = strconv.ParseFloat(strings.TrimRight(writeParts[0], "kMGTB"), 64)
|
|
blockWriteUnit = strings.TrimLeft(writeParts[0], "0123456789.")
|
|
}
|
|
}
|
|
|
|
return &database.ContainerMetric{
|
|
Timestamp: time.Now().UTC().Format(time.RFC3339Nano),
|
|
CPU: cpu,
|
|
Memory: database.MemoryMetric{
|
|
Percentage: memPerc,
|
|
Used: usedValue,
|
|
Total: totalValue,
|
|
UsedUnit: usedUnit,
|
|
TotalUnit: totalUnit,
|
|
},
|
|
Network: database.NetworkMetric{
|
|
Input: netInValue,
|
|
Output: netOutValue,
|
|
InputUnit: netInUnit,
|
|
OutputUnit: netOutUnit,
|
|
},
|
|
BlockIO: database.BlockIOMetric{
|
|
Read: blockReadValue,
|
|
Write: blockWriteValue,
|
|
ReadUnit: blockReadUnit,
|
|
WriteUnit: blockWriteUnit,
|
|
},
|
|
Container: container.ID,
|
|
ID: container.ID,
|
|
Name: container.Name,
|
|
}
|
|
}
|
|
|
|
func parseValue(value string) (float64, string) {
|
|
parts := strings.Fields(value)
|
|
if len(parts) < 1 {
|
|
return 0, "B"
|
|
}
|
|
v, _ := strconv.ParseFloat(parts[0], 64)
|
|
unit := strings.TrimLeft(value, "0123456789.")
|
|
return v, unit
|
|
}
|