mirror of
https://github.com/henrygd/beszel.git
synced 2026-04-16 01:41:49 +02:00
refactor(agent): clean up records package and add tests
This commit is contained in:
@@ -3,11 +3,8 @@ package records
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"log/slog"
|
||||
"math"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/container"
|
||||
@@ -40,16 +37,6 @@ type StatsRecord struct {
|
||||
Stats []byte `db:"stats"`
|
||||
}
|
||||
|
||||
// global variables for reusing allocations
|
||||
var (
|
||||
statsRecord StatsRecord
|
||||
containerStats []container.Stats
|
||||
sumStats system.Stats
|
||||
tempStats system.Stats
|
||||
queryParams = make(dbx.Params, 1)
|
||||
containerSums = make(map[string]*container.Stats)
|
||||
)
|
||||
|
||||
// Create longer records by averaging shorter records
|
||||
func (rm *RecordManager) CreateLongerRecords() {
|
||||
// start := time.Now()
|
||||
@@ -164,41 +151,47 @@ func (rm *RecordManager) CreateLongerRecords() {
|
||||
return nil
|
||||
})
|
||||
|
||||
statsRecord.Stats = statsRecord.Stats[:0]
|
||||
|
||||
// log.Println("finished creating longer records", "time (ms)", time.Since(start).Milliseconds())
|
||||
}
|
||||
|
||||
// Calculate the average stats of a list of system_stats records without reflect
|
||||
func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *system.Stats {
|
||||
// Clear/reset global structs for reuse
|
||||
sumStats = system.Stats{}
|
||||
tempStats = system.Stats{}
|
||||
sum := &sumStats
|
||||
stats := &tempStats
|
||||
stats := make([]system.Stats, 0, len(records))
|
||||
var row StatsRecord
|
||||
params := make(dbx.Params, 1)
|
||||
for _, rec := range records {
|
||||
row.Stats = row.Stats[:0]
|
||||
params["id"] = rec.Id
|
||||
db.NewQuery("SELECT stats FROM system_stats WHERE id = {:id}").Bind(params).One(&row)
|
||||
var s system.Stats
|
||||
if err := json.Unmarshal(row.Stats, &s); err != nil {
|
||||
continue
|
||||
}
|
||||
stats = append(stats, s)
|
||||
}
|
||||
result := AverageSystemStatsSlice(stats)
|
||||
return &result
|
||||
}
|
||||
|
||||
// AverageSystemStatsSlice computes the average of a slice of system stats.
|
||||
func AverageSystemStatsSlice(records []system.Stats) system.Stats {
|
||||
var sum system.Stats
|
||||
count := float64(len(records))
|
||||
if count == 0 {
|
||||
return sum
|
||||
}
|
||||
|
||||
// necessary because uint8 is not big enough for the sum
|
||||
batterySum := 0
|
||||
// accumulate per-core usage across records
|
||||
var cpuCoresSums []uint64
|
||||
// accumulate cpu breakdown [user, system, iowait, steal, idle]
|
||||
var cpuBreakdownSums []float64
|
||||
|
||||
count := float64(len(records))
|
||||
tempCount := float64(0)
|
||||
|
||||
// Accumulate totals
|
||||
for _, record := range records {
|
||||
id := record.Id
|
||||
// clear global statsRecord for reuse
|
||||
statsRecord.Stats = statsRecord.Stats[:0]
|
||||
// reset tempStats each iteration to avoid omitzero fields retaining stale values
|
||||
*stats = system.Stats{}
|
||||
|
||||
queryParams["id"] = id
|
||||
db.NewQuery("SELECT stats FROM system_stats WHERE id = {:id}").Bind(queryParams).One(&statsRecord)
|
||||
if err := json.Unmarshal(statsRecord.Stats, stats); err != nil {
|
||||
continue
|
||||
}
|
||||
for i := range records {
|
||||
stats := &records[i]
|
||||
|
||||
sum.Cpu += stats.Cpu
|
||||
// accumulate cpu time breakdowns if present
|
||||
@@ -206,8 +199,8 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
||||
if len(cpuBreakdownSums) < len(stats.CpuBreakdown) {
|
||||
cpuBreakdownSums = append(cpuBreakdownSums, make([]float64, len(stats.CpuBreakdown)-len(cpuBreakdownSums))...)
|
||||
}
|
||||
for i, v := range stats.CpuBreakdown {
|
||||
cpuBreakdownSums[i] += v
|
||||
for j, v := range stats.CpuBreakdown {
|
||||
cpuBreakdownSums[j] += v
|
||||
}
|
||||
}
|
||||
sum.Mem += stats.Mem
|
||||
@@ -243,8 +236,8 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
||||
// extend slices to accommodate core count
|
||||
cpuCoresSums = append(cpuCoresSums, make([]uint64, len(stats.CpuCoresUsage)-len(cpuCoresSums))...)
|
||||
}
|
||||
for i, v := range stats.CpuCoresUsage {
|
||||
cpuCoresSums[i] += uint64(v)
|
||||
for j, v := range stats.CpuCoresUsage {
|
||||
cpuCoresSums[j] += uint64(v)
|
||||
}
|
||||
}
|
||||
// Set peak values
|
||||
@@ -344,109 +337,107 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
||||
}
|
||||
}
|
||||
|
||||
// Compute averages in place
|
||||
if count > 0 {
|
||||
sum.Cpu = twoDecimals(sum.Cpu / count)
|
||||
sum.Mem = twoDecimals(sum.Mem / count)
|
||||
sum.MemUsed = twoDecimals(sum.MemUsed / count)
|
||||
sum.MemPct = twoDecimals(sum.MemPct / count)
|
||||
sum.MemBuffCache = twoDecimals(sum.MemBuffCache / count)
|
||||
sum.MemZfsArc = twoDecimals(sum.MemZfsArc / count)
|
||||
sum.Swap = twoDecimals(sum.Swap / count)
|
||||
sum.SwapUsed = twoDecimals(sum.SwapUsed / count)
|
||||
sum.DiskTotal = twoDecimals(sum.DiskTotal / count)
|
||||
sum.DiskUsed = twoDecimals(sum.DiskUsed / count)
|
||||
sum.DiskPct = twoDecimals(sum.DiskPct / count)
|
||||
sum.DiskReadPs = twoDecimals(sum.DiskReadPs / count)
|
||||
sum.DiskWritePs = twoDecimals(sum.DiskWritePs / count)
|
||||
sum.DiskIO[0] = sum.DiskIO[0] / uint64(count)
|
||||
sum.DiskIO[1] = sum.DiskIO[1] / uint64(count)
|
||||
for i := range sum.DiskIoStats {
|
||||
sum.DiskIoStats[i] = twoDecimals(sum.DiskIoStats[i] / count)
|
||||
}
|
||||
sum.NetworkSent = twoDecimals(sum.NetworkSent / count)
|
||||
sum.NetworkRecv = twoDecimals(sum.NetworkRecv / count)
|
||||
sum.LoadAvg[0] = twoDecimals(sum.LoadAvg[0] / count)
|
||||
sum.LoadAvg[1] = twoDecimals(sum.LoadAvg[1] / count)
|
||||
sum.LoadAvg[2] = twoDecimals(sum.LoadAvg[2] / count)
|
||||
sum.Bandwidth[0] = sum.Bandwidth[0] / uint64(count)
|
||||
sum.Bandwidth[1] = sum.Bandwidth[1] / uint64(count)
|
||||
sum.Battery[0] = uint8(batterySum / int(count))
|
||||
// Compute averages
|
||||
sum.Cpu = twoDecimals(sum.Cpu / count)
|
||||
sum.Mem = twoDecimals(sum.Mem / count)
|
||||
sum.MemUsed = twoDecimals(sum.MemUsed / count)
|
||||
sum.MemPct = twoDecimals(sum.MemPct / count)
|
||||
sum.MemBuffCache = twoDecimals(sum.MemBuffCache / count)
|
||||
sum.MemZfsArc = twoDecimals(sum.MemZfsArc / count)
|
||||
sum.Swap = twoDecimals(sum.Swap / count)
|
||||
sum.SwapUsed = twoDecimals(sum.SwapUsed / count)
|
||||
sum.DiskTotal = twoDecimals(sum.DiskTotal / count)
|
||||
sum.DiskUsed = twoDecimals(sum.DiskUsed / count)
|
||||
sum.DiskPct = twoDecimals(sum.DiskPct / count)
|
||||
sum.DiskReadPs = twoDecimals(sum.DiskReadPs / count)
|
||||
sum.DiskWritePs = twoDecimals(sum.DiskWritePs / count)
|
||||
sum.DiskIO[0] = sum.DiskIO[0] / uint64(count)
|
||||
sum.DiskIO[1] = sum.DiskIO[1] / uint64(count)
|
||||
for i := range sum.DiskIoStats {
|
||||
sum.DiskIoStats[i] = twoDecimals(sum.DiskIoStats[i] / count)
|
||||
}
|
||||
sum.NetworkSent = twoDecimals(sum.NetworkSent / count)
|
||||
sum.NetworkRecv = twoDecimals(sum.NetworkRecv / count)
|
||||
sum.LoadAvg[0] = twoDecimals(sum.LoadAvg[0] / count)
|
||||
sum.LoadAvg[1] = twoDecimals(sum.LoadAvg[1] / count)
|
||||
sum.LoadAvg[2] = twoDecimals(sum.LoadAvg[2] / count)
|
||||
sum.Bandwidth[0] = sum.Bandwidth[0] / uint64(count)
|
||||
sum.Bandwidth[1] = sum.Bandwidth[1] / uint64(count)
|
||||
sum.Battery[0] = uint8(batterySum / int(count))
|
||||
|
||||
// Average network interfaces
|
||||
if sum.NetworkInterfaces != nil {
|
||||
for key := range sum.NetworkInterfaces {
|
||||
sum.NetworkInterfaces[key] = [4]uint64{
|
||||
sum.NetworkInterfaces[key][0] / uint64(count),
|
||||
sum.NetworkInterfaces[key][1] / uint64(count),
|
||||
sum.NetworkInterfaces[key][2],
|
||||
sum.NetworkInterfaces[key][3],
|
||||
// Average network interfaces
|
||||
if sum.NetworkInterfaces != nil {
|
||||
for key := range sum.NetworkInterfaces {
|
||||
sum.NetworkInterfaces[key] = [4]uint64{
|
||||
sum.NetworkInterfaces[key][0] / uint64(count),
|
||||
sum.NetworkInterfaces[key][1] / uint64(count),
|
||||
sum.NetworkInterfaces[key][2],
|
||||
sum.NetworkInterfaces[key][3],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Average temperatures
|
||||
if sum.Temperatures != nil && tempCount > 0 {
|
||||
for key := range sum.Temperatures {
|
||||
sum.Temperatures[key] = twoDecimals(sum.Temperatures[key] / tempCount)
|
||||
}
|
||||
}
|
||||
|
||||
// Average extra filesystem stats
|
||||
if sum.ExtraFs != nil {
|
||||
for key := range sum.ExtraFs {
|
||||
fs := sum.ExtraFs[key]
|
||||
fs.DiskTotal = twoDecimals(fs.DiskTotal / count)
|
||||
fs.DiskUsed = twoDecimals(fs.DiskUsed / count)
|
||||
fs.DiskWritePs = twoDecimals(fs.DiskWritePs / count)
|
||||
fs.DiskReadPs = twoDecimals(fs.DiskReadPs / count)
|
||||
fs.DiskReadBytes = fs.DiskReadBytes / uint64(count)
|
||||
fs.DiskWriteBytes = fs.DiskWriteBytes / uint64(count)
|
||||
for i := range fs.DiskIoStats {
|
||||
fs.DiskIoStats[i] = twoDecimals(fs.DiskIoStats[i] / count)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Average GPU data
|
||||
if sum.GPUData != nil {
|
||||
for id := range sum.GPUData {
|
||||
gpu := sum.GPUData[id]
|
||||
gpu.Temperature = twoDecimals(gpu.Temperature / count)
|
||||
gpu.MemoryUsed = twoDecimals(gpu.MemoryUsed / count)
|
||||
gpu.MemoryTotal = twoDecimals(gpu.MemoryTotal / count)
|
||||
gpu.Usage = twoDecimals(gpu.Usage / count)
|
||||
gpu.Power = twoDecimals(gpu.Power / count)
|
||||
gpu.Count = twoDecimals(gpu.Count / count)
|
||||
|
||||
if gpu.Engines != nil {
|
||||
for engineKey := range gpu.Engines {
|
||||
gpu.Engines[engineKey] = twoDecimals(gpu.Engines[engineKey] / count)
|
||||
}
|
||||
}
|
||||
|
||||
sum.GPUData[id] = gpu
|
||||
}
|
||||
}
|
||||
|
||||
// Average temperatures
|
||||
if sum.Temperatures != nil && tempCount > 0 {
|
||||
for key := range sum.Temperatures {
|
||||
sum.Temperatures[key] = twoDecimals(sum.Temperatures[key] / tempCount)
|
||||
}
|
||||
// Average per-core usage
|
||||
if len(cpuCoresSums) > 0 {
|
||||
avg := make(system.Uint8Slice, len(cpuCoresSums))
|
||||
for i := range cpuCoresSums {
|
||||
v := math.Round(float64(cpuCoresSums[i]) / count)
|
||||
avg[i] = uint8(v)
|
||||
}
|
||||
sum.CpuCoresUsage = avg
|
||||
}
|
||||
|
||||
// Average extra filesystem stats
|
||||
if sum.ExtraFs != nil {
|
||||
for key := range sum.ExtraFs {
|
||||
fs := sum.ExtraFs[key]
|
||||
fs.DiskTotal = twoDecimals(fs.DiskTotal / count)
|
||||
fs.DiskUsed = twoDecimals(fs.DiskUsed / count)
|
||||
fs.DiskWritePs = twoDecimals(fs.DiskWritePs / count)
|
||||
fs.DiskReadPs = twoDecimals(fs.DiskReadPs / count)
|
||||
fs.DiskReadBytes = fs.DiskReadBytes / uint64(count)
|
||||
fs.DiskWriteBytes = fs.DiskWriteBytes / uint64(count)
|
||||
for i := range fs.DiskIoStats {
|
||||
fs.DiskIoStats[i] = twoDecimals(fs.DiskIoStats[i] / count)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Average GPU data
|
||||
if sum.GPUData != nil {
|
||||
for id := range sum.GPUData {
|
||||
gpu := sum.GPUData[id]
|
||||
gpu.Temperature = twoDecimals(gpu.Temperature / count)
|
||||
gpu.MemoryUsed = twoDecimals(gpu.MemoryUsed / count)
|
||||
gpu.MemoryTotal = twoDecimals(gpu.MemoryTotal / count)
|
||||
gpu.Usage = twoDecimals(gpu.Usage / count)
|
||||
gpu.Power = twoDecimals(gpu.Power / count)
|
||||
gpu.Count = twoDecimals(gpu.Count / count)
|
||||
|
||||
if gpu.Engines != nil {
|
||||
for engineKey := range gpu.Engines {
|
||||
gpu.Engines[engineKey] = twoDecimals(gpu.Engines[engineKey] / count)
|
||||
}
|
||||
}
|
||||
|
||||
sum.GPUData[id] = gpu
|
||||
}
|
||||
}
|
||||
|
||||
// Average per-core usage
|
||||
if len(cpuCoresSums) > 0 {
|
||||
avg := make(system.Uint8Slice, len(cpuCoresSums))
|
||||
for i := range cpuCoresSums {
|
||||
v := math.Round(float64(cpuCoresSums[i]) / count)
|
||||
avg[i] = uint8(v)
|
||||
}
|
||||
sum.CpuCoresUsage = avg
|
||||
}
|
||||
|
||||
// Average CPU breakdown
|
||||
if len(cpuBreakdownSums) > 0 {
|
||||
avg := make([]float64, len(cpuBreakdownSums))
|
||||
for i := range cpuBreakdownSums {
|
||||
avg[i] = twoDecimals(cpuBreakdownSums[i] / count)
|
||||
}
|
||||
sum.CpuBreakdown = avg
|
||||
// Average CPU breakdown
|
||||
if len(cpuBreakdownSums) > 0 {
|
||||
avg := make([]float64, len(cpuBreakdownSums))
|
||||
for i := range cpuBreakdownSums {
|
||||
avg[i] = twoDecimals(cpuBreakdownSums[i] / count)
|
||||
}
|
||||
sum.CpuBreakdown = avg
|
||||
}
|
||||
|
||||
return sum
|
||||
@@ -454,29 +445,33 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
||||
|
||||
// Calculate the average stats of a list of container_stats records
|
||||
func (rm *RecordManager) AverageContainerStats(db dbx.Builder, records RecordIds) []container.Stats {
|
||||
// Clear global map for reuse
|
||||
for k := range containerSums {
|
||||
delete(containerSums, k)
|
||||
}
|
||||
sums := containerSums
|
||||
count := float64(len(records))
|
||||
|
||||
for i := range records {
|
||||
id := records[i].Id
|
||||
// clear global statsRecord for reuse
|
||||
statsRecord.Stats = statsRecord.Stats[:0]
|
||||
// must set to nil (not [:0]) to avoid json.Unmarshal reusing backing array
|
||||
// which causes omitzero fields to inherit stale values from previous iterations
|
||||
containerStats = nil
|
||||
|
||||
queryParams["id"] = id
|
||||
db.NewQuery("SELECT stats FROM container_stats WHERE id = {:id}").Bind(queryParams).One(&statsRecord)
|
||||
|
||||
if err := json.Unmarshal(statsRecord.Stats, &containerStats); err != nil {
|
||||
allStats := make([][]container.Stats, 0, len(records))
|
||||
var row StatsRecord
|
||||
params := make(dbx.Params, 1)
|
||||
for _, rec := range records {
|
||||
row.Stats = row.Stats[:0]
|
||||
params["id"] = rec.Id
|
||||
db.NewQuery("SELECT stats FROM container_stats WHERE id = {:id}").Bind(params).One(&row)
|
||||
var cs []container.Stats
|
||||
if err := json.Unmarshal(row.Stats, &cs); err != nil {
|
||||
return []container.Stats{}
|
||||
}
|
||||
allStats = append(allStats, cs)
|
||||
}
|
||||
return AverageContainerStatsSlice(allStats)
|
||||
}
|
||||
|
||||
// AverageContainerStatsSlice computes the average of container stats across multiple time periods.
|
||||
func AverageContainerStatsSlice(records [][]container.Stats) []container.Stats {
|
||||
if len(records) == 0 {
|
||||
return []container.Stats{}
|
||||
}
|
||||
sums := make(map[string]*container.Stats)
|
||||
count := float64(len(records))
|
||||
|
||||
for _, containerStats := range records {
|
||||
for i := range containerStats {
|
||||
stat := containerStats[i]
|
||||
stat := &containerStats[i]
|
||||
if _, ok := sums[stat.Name]; !ok {
|
||||
sums[stat.Name] = &container.Stats{Name: stat.Name}
|
||||
}
|
||||
@@ -505,133 +500,6 @@ func (rm *RecordManager) AverageContainerStats(db dbx.Builder, records RecordIds
|
||||
return result
|
||||
}
|
||||
|
||||
// Delete old records
|
||||
func (rm *RecordManager) DeleteOldRecords() {
|
||||
rm.app.RunInTransaction(func(txApp core.App) error {
|
||||
err := deleteOldSystemStats(txApp)
|
||||
if err != nil {
|
||||
slog.Error("Error deleting old system stats", "err", err)
|
||||
}
|
||||
err = deleteOldContainerRecords(txApp)
|
||||
if err != nil {
|
||||
slog.Error("Error deleting old container records", "err", err)
|
||||
}
|
||||
err = deleteOldSystemdServiceRecords(txApp)
|
||||
if err != nil {
|
||||
slog.Error("Error deleting old systemd service records", "err", err)
|
||||
}
|
||||
err = deleteOldAlertsHistory(txApp, 200, 250)
|
||||
if err != nil {
|
||||
slog.Error("Error deleting old alerts history", "err", err)
|
||||
}
|
||||
err = deleteOldQuietHours(txApp)
|
||||
if err != nil {
|
||||
slog.Error("Error deleting old quiet hours", "err", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Delete old alerts history records
|
||||
func deleteOldAlertsHistory(app core.App, countToKeep, countBeforeDeletion int) error {
|
||||
db := app.DB()
|
||||
var users []struct {
|
||||
Id string `db:"user"`
|
||||
}
|
||||
err := db.NewQuery("SELECT user, COUNT(*) as count FROM alerts_history GROUP BY user HAVING count > {:countBeforeDeletion}").Bind(dbx.Params{"countBeforeDeletion": countBeforeDeletion}).All(&users)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, user := range users {
|
||||
_, err = db.NewQuery("DELETE FROM alerts_history WHERE user = {:user} AND id NOT IN (SELECT id FROM alerts_history WHERE user = {:user} ORDER BY created DESC LIMIT {:countToKeep})").Bind(dbx.Params{"user": user.Id, "countToKeep": countToKeep}).Execute()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deletes system_stats records older than what is displayed in the UI
|
||||
func deleteOldSystemStats(app core.App) error {
|
||||
// Collections to process
|
||||
collections := [2]string{"system_stats", "container_stats"}
|
||||
|
||||
// Record types and their retention periods
|
||||
type RecordDeletionData struct {
|
||||
recordType string
|
||||
retention time.Duration
|
||||
}
|
||||
recordData := []RecordDeletionData{
|
||||
{recordType: "1m", retention: time.Hour}, // 1 hour
|
||||
{recordType: "10m", retention: 12 * time.Hour}, // 12 hours
|
||||
{recordType: "20m", retention: 24 * time.Hour}, // 1 day
|
||||
{recordType: "120m", retention: 7 * 24 * time.Hour}, // 7 days
|
||||
{recordType: "480m", retention: 30 * 24 * time.Hour}, // 30 days
|
||||
}
|
||||
|
||||
now := time.Now().UTC()
|
||||
|
||||
for _, collection := range collections {
|
||||
// Build the WHERE clause
|
||||
var conditionParts []string
|
||||
var params dbx.Params = make(map[string]any)
|
||||
for i := range recordData {
|
||||
rd := recordData[i]
|
||||
// Create parameterized condition for this record type
|
||||
dateParam := fmt.Sprintf("date%d", i)
|
||||
conditionParts = append(conditionParts, fmt.Sprintf("(type = '%s' AND created < {:%s})", rd.recordType, dateParam))
|
||||
params[dateParam] = now.Add(-rd.retention)
|
||||
}
|
||||
// Combine conditions with OR
|
||||
conditionStr := strings.Join(conditionParts, " OR ")
|
||||
// Construct and execute the full raw query
|
||||
rawQuery := fmt.Sprintf("DELETE FROM %s WHERE %s", collection, conditionStr)
|
||||
if _, err := app.DB().NewQuery(rawQuery).Bind(params).Execute(); err != nil {
|
||||
return fmt.Errorf("failed to delete from %s: %v", collection, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deletes systemd service records that haven't been updated in the last 20 minutes
|
||||
func deleteOldSystemdServiceRecords(app core.App) error {
|
||||
now := time.Now().UTC()
|
||||
twentyMinutesAgo := now.Add(-20 * time.Minute)
|
||||
|
||||
// Delete systemd service records where updated < twentyMinutesAgo
|
||||
_, err := app.DB().NewQuery("DELETE FROM systemd_services WHERE updated < {:updated}").Bind(dbx.Params{"updated": twentyMinutesAgo.UnixMilli()}).Execute()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete old systemd service records: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deletes container records that haven't been updated in the last 10 minutes
|
||||
func deleteOldContainerRecords(app core.App) error {
|
||||
now := time.Now().UTC()
|
||||
tenMinutesAgo := now.Add(-10 * time.Minute)
|
||||
|
||||
// Delete container records where updated < tenMinutesAgo
|
||||
_, err := app.DB().NewQuery("DELETE FROM containers WHERE updated < {:updated}").Bind(dbx.Params{"updated": tenMinutesAgo.UnixMilli()}).Execute()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete old container records: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deletes old quiet hours records where end date has passed
|
||||
func deleteOldQuietHours(app core.App) error {
|
||||
now := time.Now().UTC()
|
||||
_, err := app.DB().NewQuery("DELETE FROM quiet_hours WHERE type = 'one-time' AND end < {:now}").Bind(dbx.Params{"now": now}).Execute()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
/* Round float to two decimals */
|
||||
func twoDecimals(value float64) float64 {
|
||||
return math.Round(value*100) / 100
|
||||
|
||||
820
internal/records/records_averaging_test.go
Normal file
820
internal/records/records_averaging_test.go
Normal file
@@ -0,0 +1,820 @@
|
||||
//go:build testing
|
||||
|
||||
package records_test
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/container"
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
"github.com/henrygd/beszel/internal/records"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAverageSystemStatsSlice_Empty(t *testing.T) {
|
||||
result := records.AverageSystemStatsSlice(nil)
|
||||
assert.Equal(t, system.Stats{}, result)
|
||||
|
||||
result = records.AverageSystemStatsSlice([]system.Stats{})
|
||||
assert.Equal(t, system.Stats{}, result)
|
||||
}
|
||||
|
||||
func TestAverageSystemStatsSlice_SingleRecord(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{
|
||||
Cpu: 45.67,
|
||||
Mem: 16.0,
|
||||
MemUsed: 8.5,
|
||||
MemPct: 53.12,
|
||||
MemBuffCache: 2.0,
|
||||
Swap: 4.0,
|
||||
SwapUsed: 1.0,
|
||||
DiskTotal: 500.0,
|
||||
DiskUsed: 250.0,
|
||||
DiskPct: 50.0,
|
||||
DiskReadPs: 100.5,
|
||||
DiskWritePs: 200.75,
|
||||
NetworkSent: 10.5,
|
||||
NetworkRecv: 20.25,
|
||||
LoadAvg: [3]float64{1.5, 2.0, 3.5},
|
||||
Bandwidth: [2]uint64{1000, 2000},
|
||||
DiskIO: [2]uint64{500, 600},
|
||||
Battery: [2]uint8{80, 1},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
assert.Equal(t, 45.67, result.Cpu)
|
||||
assert.Equal(t, 16.0, result.Mem)
|
||||
assert.Equal(t, 8.5, result.MemUsed)
|
||||
assert.Equal(t, 53.12, result.MemPct)
|
||||
assert.Equal(t, 2.0, result.MemBuffCache)
|
||||
assert.Equal(t, 4.0, result.Swap)
|
||||
assert.Equal(t, 1.0, result.SwapUsed)
|
||||
assert.Equal(t, 500.0, result.DiskTotal)
|
||||
assert.Equal(t, 250.0, result.DiskUsed)
|
||||
assert.Equal(t, 50.0, result.DiskPct)
|
||||
assert.Equal(t, 100.5, result.DiskReadPs)
|
||||
assert.Equal(t, 200.75, result.DiskWritePs)
|
||||
assert.Equal(t, 10.5, result.NetworkSent)
|
||||
assert.Equal(t, 20.25, result.NetworkRecv)
|
||||
assert.Equal(t, [3]float64{1.5, 2.0, 3.5}, result.LoadAvg)
|
||||
assert.Equal(t, [2]uint64{1000, 2000}, result.Bandwidth)
|
||||
assert.Equal(t, [2]uint64{500, 600}, result.DiskIO)
|
||||
assert.Equal(t, uint8(80), result.Battery[0])
|
||||
assert.Equal(t, uint8(1), result.Battery[1])
|
||||
}
|
||||
|
||||
func TestAverageSystemStatsSlice_BasicAveraging(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{
|
||||
Cpu: 20.0,
|
||||
Mem: 16.0,
|
||||
MemUsed: 6.0,
|
||||
MemPct: 37.5,
|
||||
MemBuffCache: 1.0,
|
||||
MemZfsArc: 0.5,
|
||||
Swap: 4.0,
|
||||
SwapUsed: 1.0,
|
||||
DiskTotal: 500.0,
|
||||
DiskUsed: 200.0,
|
||||
DiskPct: 40.0,
|
||||
DiskReadPs: 100.0,
|
||||
DiskWritePs: 200.0,
|
||||
NetworkSent: 10.0,
|
||||
NetworkRecv: 20.0,
|
||||
LoadAvg: [3]float64{1.0, 2.0, 3.0},
|
||||
Bandwidth: [2]uint64{1000, 2000},
|
||||
DiskIO: [2]uint64{400, 600},
|
||||
Battery: [2]uint8{80, 1},
|
||||
},
|
||||
{
|
||||
Cpu: 40.0,
|
||||
Mem: 16.0,
|
||||
MemUsed: 10.0,
|
||||
MemPct: 62.5,
|
||||
MemBuffCache: 3.0,
|
||||
MemZfsArc: 1.5,
|
||||
Swap: 4.0,
|
||||
SwapUsed: 3.0,
|
||||
DiskTotal: 500.0,
|
||||
DiskUsed: 300.0,
|
||||
DiskPct: 60.0,
|
||||
DiskReadPs: 200.0,
|
||||
DiskWritePs: 400.0,
|
||||
NetworkSent: 30.0,
|
||||
NetworkRecv: 40.0,
|
||||
LoadAvg: [3]float64{3.0, 4.0, 5.0},
|
||||
Bandwidth: [2]uint64{3000, 4000},
|
||||
DiskIO: [2]uint64{600, 800},
|
||||
Battery: [2]uint8{60, 1},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
assert.Equal(t, 30.0, result.Cpu)
|
||||
assert.Equal(t, 16.0, result.Mem)
|
||||
assert.Equal(t, 8.0, result.MemUsed)
|
||||
assert.Equal(t, 50.0, result.MemPct)
|
||||
assert.Equal(t, 2.0, result.MemBuffCache)
|
||||
assert.Equal(t, 1.0, result.MemZfsArc)
|
||||
assert.Equal(t, 4.0, result.Swap)
|
||||
assert.Equal(t, 2.0, result.SwapUsed)
|
||||
assert.Equal(t, 500.0, result.DiskTotal)
|
||||
assert.Equal(t, 250.0, result.DiskUsed)
|
||||
assert.Equal(t, 50.0, result.DiskPct)
|
||||
assert.Equal(t, 150.0, result.DiskReadPs)
|
||||
assert.Equal(t, 300.0, result.DiskWritePs)
|
||||
assert.Equal(t, 20.0, result.NetworkSent)
|
||||
assert.Equal(t, 30.0, result.NetworkRecv)
|
||||
assert.Equal(t, [3]float64{2.0, 3.0, 4.0}, result.LoadAvg)
|
||||
assert.Equal(t, [2]uint64{2000, 3000}, result.Bandwidth)
|
||||
assert.Equal(t, [2]uint64{500, 700}, result.DiskIO)
|
||||
assert.Equal(t, uint8(70), result.Battery[0])
|
||||
assert.Equal(t, uint8(1), result.Battery[1])
|
||||
}
|
||||
|
||||
func TestAverageSystemStatsSlice_PeakValues(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{
|
||||
Cpu: 20.0,
|
||||
MaxCpu: 25.0,
|
||||
MemUsed: 6.0,
|
||||
MaxMem: 7.0,
|
||||
NetworkSent: 10.0,
|
||||
MaxNetworkSent: 15.0,
|
||||
NetworkRecv: 20.0,
|
||||
MaxNetworkRecv: 25.0,
|
||||
DiskReadPs: 100.0,
|
||||
MaxDiskReadPs: 120.0,
|
||||
DiskWritePs: 200.0,
|
||||
MaxDiskWritePs: 220.0,
|
||||
Bandwidth: [2]uint64{1000, 2000},
|
||||
MaxBandwidth: [2]uint64{1500, 2500},
|
||||
DiskIO: [2]uint64{400, 600},
|
||||
MaxDiskIO: [2]uint64{500, 700},
|
||||
DiskIoStats: [6]float64{10.0, 20.0, 30.0, 5.0, 8.0, 12.0},
|
||||
MaxDiskIoStats: [6]float64{15.0, 25.0, 35.0, 6.0, 9.0, 14.0},
|
||||
},
|
||||
{
|
||||
Cpu: 40.0,
|
||||
MaxCpu: 50.0,
|
||||
MemUsed: 10.0,
|
||||
MaxMem: 12.0,
|
||||
NetworkSent: 30.0,
|
||||
MaxNetworkSent: 35.0,
|
||||
NetworkRecv: 40.0,
|
||||
MaxNetworkRecv: 45.0,
|
||||
DiskReadPs: 200.0,
|
||||
MaxDiskReadPs: 210.0,
|
||||
DiskWritePs: 400.0,
|
||||
MaxDiskWritePs: 410.0,
|
||||
Bandwidth: [2]uint64{3000, 4000},
|
||||
MaxBandwidth: [2]uint64{3500, 4500},
|
||||
DiskIO: [2]uint64{600, 800},
|
||||
MaxDiskIO: [2]uint64{650, 850},
|
||||
DiskIoStats: [6]float64{50.0, 60.0, 70.0, 15.0, 18.0, 22.0},
|
||||
MaxDiskIoStats: [6]float64{55.0, 65.0, 75.0, 16.0, 19.0, 23.0},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
assert.Equal(t, 50.0, result.MaxCpu)
|
||||
assert.Equal(t, 12.0, result.MaxMem)
|
||||
assert.Equal(t, 35.0, result.MaxNetworkSent)
|
||||
assert.Equal(t, 45.0, result.MaxNetworkRecv)
|
||||
assert.Equal(t, 210.0, result.MaxDiskReadPs)
|
||||
assert.Equal(t, 410.0, result.MaxDiskWritePs)
|
||||
assert.Equal(t, [2]uint64{3500, 4500}, result.MaxBandwidth)
|
||||
assert.Equal(t, [2]uint64{650, 850}, result.MaxDiskIO)
|
||||
assert.Equal(t, [6]float64{30.0, 40.0, 50.0, 10.0, 13.0, 17.0}, result.DiskIoStats)
|
||||
assert.Equal(t, [6]float64{55.0, 65.0, 75.0, 16.0, 19.0, 23.0}, result.MaxDiskIoStats)
|
||||
}
|
||||
|
||||
func TestAverageSystemStatsSlice_DiskIoStats(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{
|
||||
Cpu: 10.0,
|
||||
DiskIoStats: [6]float64{10.0, 20.0, 30.0, 5.0, 8.0, 12.0},
|
||||
MaxDiskIoStats: [6]float64{12.0, 22.0, 32.0, 6.0, 9.0, 13.0},
|
||||
},
|
||||
{
|
||||
Cpu: 20.0,
|
||||
DiskIoStats: [6]float64{30.0, 40.0, 50.0, 15.0, 18.0, 22.0},
|
||||
MaxDiskIoStats: [6]float64{28.0, 38.0, 48.0, 14.0, 17.0, 21.0},
|
||||
},
|
||||
{
|
||||
Cpu: 30.0,
|
||||
DiskIoStats: [6]float64{20.0, 30.0, 40.0, 10.0, 12.0, 16.0},
|
||||
MaxDiskIoStats: [6]float64{25.0, 35.0, 45.0, 11.0, 13.0, 17.0},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
// Average: (10+30+20)/3=20, (20+40+30)/3=30, (30+50+40)/3=40, (5+15+10)/3=10, (8+18+12)/3≈12.67, (12+22+16)/3≈16.67
|
||||
assert.Equal(t, 20.0, result.DiskIoStats[0])
|
||||
assert.Equal(t, 30.0, result.DiskIoStats[1])
|
||||
assert.Equal(t, 40.0, result.DiskIoStats[2])
|
||||
assert.Equal(t, 10.0, result.DiskIoStats[3])
|
||||
assert.Equal(t, 12.67, result.DiskIoStats[4])
|
||||
assert.Equal(t, 16.67, result.DiskIoStats[5])
|
||||
// Max: current DiskIoStats[0] wins for record 2 (30 > MaxDiskIoStats 28)
|
||||
assert.Equal(t, 30.0, result.MaxDiskIoStats[0])
|
||||
assert.Equal(t, 40.0, result.MaxDiskIoStats[1])
|
||||
assert.Equal(t, 50.0, result.MaxDiskIoStats[2])
|
||||
assert.Equal(t, 15.0, result.MaxDiskIoStats[3])
|
||||
assert.Equal(t, 18.0, result.MaxDiskIoStats[4])
|
||||
assert.Equal(t, 22.0, result.MaxDiskIoStats[5])
|
||||
}
|
||||
|
||||
// Tests that current DiskIoStats values are considered when computing MaxDiskIoStats.
|
||||
func TestAverageSystemStatsSlice_DiskIoStatsPeakFromCurrentValues(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{Cpu: 10.0, DiskIoStats: [6]float64{95.0, 90.0, 85.0, 50.0, 60.0, 80.0}, MaxDiskIoStats: [6]float64{80.0, 80.0, 80.0, 40.0, 50.0, 70.0}},
|
||||
{Cpu: 20.0, DiskIoStats: [6]float64{10.0, 10.0, 10.0, 5.0, 6.0, 8.0}, MaxDiskIoStats: [6]float64{20.0, 20.0, 20.0, 10.0, 12.0, 16.0}},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
// Current value from first record (95, 90, 85, 50, 60, 80) beats MaxDiskIoStats in both records
|
||||
assert.Equal(t, 95.0, result.MaxDiskIoStats[0])
|
||||
assert.Equal(t, 90.0, result.MaxDiskIoStats[1])
|
||||
assert.Equal(t, 85.0, result.MaxDiskIoStats[2])
|
||||
assert.Equal(t, 50.0, result.MaxDiskIoStats[3])
|
||||
assert.Equal(t, 60.0, result.MaxDiskIoStats[4])
|
||||
assert.Equal(t, 80.0, result.MaxDiskIoStats[5])
|
||||
}
|
||||
|
||||
// Tests that current values are considered when computing peaks
|
||||
// (i.e., current cpu > MaxCpu should still win).
|
||||
func TestAverageSystemStatsSlice_PeakFromCurrentValues(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{Cpu: 95.0, MaxCpu: 80.0, MemUsed: 15.0, MaxMem: 10.0},
|
||||
{Cpu: 10.0, MaxCpu: 20.0, MemUsed: 5.0, MaxMem: 8.0},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
assert.Equal(t, 95.0, result.MaxCpu)
|
||||
assert.Equal(t, 15.0, result.MaxMem)
|
||||
}
|
||||
|
||||
// Tests that records without temperature data are excluded from the temperature average.
|
||||
func TestAverageSystemStatsSlice_Temperatures(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{
|
||||
Cpu: 10.0,
|
||||
Temperatures: map[string]float64{"cpu": 60.0, "gpu": 70.0},
|
||||
},
|
||||
{
|
||||
Cpu: 20.0,
|
||||
Temperatures: map[string]float64{"cpu": 80.0, "gpu": 90.0},
|
||||
},
|
||||
{
|
||||
// No temperatures - should not affect temp averaging
|
||||
Cpu: 30.0,
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
require.NotNil(t, result.Temperatures)
|
||||
// Average over 2 records that had temps, not 3
|
||||
assert.Equal(t, 70.0, result.Temperatures["cpu"])
|
||||
assert.Equal(t, 80.0, result.Temperatures["gpu"])
|
||||
}
|
||||
|
||||
func TestAverageSystemStatsSlice_NetworkInterfaces(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{
|
||||
Cpu: 10.0,
|
||||
NetworkInterfaces: map[string][4]uint64{
|
||||
"eth0": {100, 200, 150, 250},
|
||||
"eth1": {50, 60, 70, 80},
|
||||
},
|
||||
},
|
||||
{
|
||||
Cpu: 20.0,
|
||||
NetworkInterfaces: map[string][4]uint64{
|
||||
"eth0": {200, 400, 300, 500},
|
||||
"eth1": {150, 160, 170, 180},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
require.NotNil(t, result.NetworkInterfaces)
|
||||
// [0] and [1] are averaged, [2] and [3] are max
|
||||
assert.Equal(t, [4]uint64{150, 300, 300, 500}, result.NetworkInterfaces["eth0"])
|
||||
assert.Equal(t, [4]uint64{100, 110, 170, 180}, result.NetworkInterfaces["eth1"])
|
||||
}
|
||||
|
||||
func TestAverageSystemStatsSlice_ExtraFs(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{
|
||||
Cpu: 10.0,
|
||||
ExtraFs: map[string]*system.FsStats{
|
||||
"/data": {
|
||||
DiskTotal: 1000.0,
|
||||
DiskUsed: 400.0,
|
||||
DiskReadPs: 50.0,
|
||||
DiskWritePs: 100.0,
|
||||
MaxDiskReadPS: 60.0,
|
||||
MaxDiskWritePS: 110.0,
|
||||
DiskReadBytes: 5000,
|
||||
DiskWriteBytes: 10000,
|
||||
MaxDiskReadBytes: 6000,
|
||||
MaxDiskWriteBytes: 11000,
|
||||
DiskIoStats: [6]float64{10.0, 20.0, 30.0, 5.0, 8.0, 12.0},
|
||||
MaxDiskIoStats: [6]float64{12.0, 22.0, 32.0, 6.0, 9.0, 13.0},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Cpu: 20.0,
|
||||
ExtraFs: map[string]*system.FsStats{
|
||||
"/data": {
|
||||
DiskTotal: 1000.0,
|
||||
DiskUsed: 600.0,
|
||||
DiskReadPs: 150.0,
|
||||
DiskWritePs: 200.0,
|
||||
MaxDiskReadPS: 160.0,
|
||||
MaxDiskWritePS: 210.0,
|
||||
DiskReadBytes: 15000,
|
||||
DiskWriteBytes: 20000,
|
||||
MaxDiskReadBytes: 16000,
|
||||
MaxDiskWriteBytes: 21000,
|
||||
DiskIoStats: [6]float64{50.0, 60.0, 70.0, 15.0, 18.0, 22.0},
|
||||
MaxDiskIoStats: [6]float64{55.0, 65.0, 75.0, 16.0, 19.0, 23.0},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
require.NotNil(t, result.ExtraFs)
|
||||
require.NotNil(t, result.ExtraFs["/data"])
|
||||
fs := result.ExtraFs["/data"]
|
||||
assert.Equal(t, 1000.0, fs.DiskTotal)
|
||||
assert.Equal(t, 500.0, fs.DiskUsed)
|
||||
assert.Equal(t, 100.0, fs.DiskReadPs)
|
||||
assert.Equal(t, 150.0, fs.DiskWritePs)
|
||||
assert.Equal(t, 160.0, fs.MaxDiskReadPS)
|
||||
assert.Equal(t, 210.0, fs.MaxDiskWritePS)
|
||||
assert.Equal(t, uint64(10000), fs.DiskReadBytes)
|
||||
assert.Equal(t, uint64(15000), fs.DiskWriteBytes)
|
||||
assert.Equal(t, uint64(16000), fs.MaxDiskReadBytes)
|
||||
assert.Equal(t, uint64(21000), fs.MaxDiskWriteBytes)
|
||||
assert.Equal(t, [6]float64{30.0, 40.0, 50.0, 10.0, 13.0, 17.0}, fs.DiskIoStats)
|
||||
assert.Equal(t, [6]float64{55.0, 65.0, 75.0, 16.0, 19.0, 23.0}, fs.MaxDiskIoStats)
|
||||
}
|
||||
|
||||
// Tests that ExtraFs DiskIoStats peak considers current values, not just previous peaks.
|
||||
func TestAverageSystemStatsSlice_ExtraFsDiskIoStatsPeakFromCurrentValues(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{
|
||||
Cpu: 10.0,
|
||||
ExtraFs: map[string]*system.FsStats{
|
||||
"/data": {
|
||||
DiskIoStats: [6]float64{95.0, 90.0, 85.0, 50.0, 60.0, 80.0}, // exceeds MaxDiskIoStats
|
||||
MaxDiskIoStats: [6]float64{80.0, 80.0, 80.0, 40.0, 50.0, 70.0},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Cpu: 20.0,
|
||||
ExtraFs: map[string]*system.FsStats{
|
||||
"/data": {
|
||||
DiskIoStats: [6]float64{10.0, 10.0, 10.0, 5.0, 6.0, 8.0},
|
||||
MaxDiskIoStats: [6]float64{20.0, 20.0, 20.0, 10.0, 12.0, 16.0},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
fs := result.ExtraFs["/data"]
|
||||
assert.Equal(t, 95.0, fs.MaxDiskIoStats[0])
|
||||
assert.Equal(t, 90.0, fs.MaxDiskIoStats[1])
|
||||
assert.Equal(t, 85.0, fs.MaxDiskIoStats[2])
|
||||
assert.Equal(t, 50.0, fs.MaxDiskIoStats[3])
|
||||
assert.Equal(t, 60.0, fs.MaxDiskIoStats[4])
|
||||
assert.Equal(t, 80.0, fs.MaxDiskIoStats[5])
|
||||
}
|
||||
|
||||
// Tests that extra FS peak values consider current values, not just previous peaks.
|
||||
func TestAverageSystemStatsSlice_ExtraFsPeaksFromCurrentValues(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{
|
||||
Cpu: 10.0,
|
||||
ExtraFs: map[string]*system.FsStats{
|
||||
"/data": {
|
||||
DiskReadPs: 500.0, // exceeds MaxDiskReadPS
|
||||
MaxDiskReadPS: 100.0,
|
||||
DiskReadBytes: 50000,
|
||||
MaxDiskReadBytes: 10000,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Cpu: 20.0,
|
||||
ExtraFs: map[string]*system.FsStats{
|
||||
"/data": {
|
||||
DiskReadPs: 50.0,
|
||||
MaxDiskReadPS: 200.0,
|
||||
DiskReadBytes: 5000,
|
||||
MaxDiskReadBytes: 20000,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
fs := result.ExtraFs["/data"]
|
||||
assert.Equal(t, 500.0, fs.MaxDiskReadPS)
|
||||
assert.Equal(t, uint64(50000), fs.MaxDiskReadBytes)
|
||||
}
|
||||
|
||||
func TestAverageSystemStatsSlice_GPUData(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{
|
||||
Cpu: 10.0,
|
||||
GPUData: map[string]system.GPUData{
|
||||
"gpu0": {
|
||||
Name: "RTX 4090",
|
||||
Temperature: 60.0,
|
||||
MemoryUsed: 4.0,
|
||||
MemoryTotal: 24.0,
|
||||
Usage: 30.0,
|
||||
Power: 200.0,
|
||||
Count: 1.0,
|
||||
Engines: map[string]float64{
|
||||
"3D": 50.0,
|
||||
"Video": 10.0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Cpu: 20.0,
|
||||
GPUData: map[string]system.GPUData{
|
||||
"gpu0": {
|
||||
Name: "RTX 4090",
|
||||
Temperature: 80.0,
|
||||
MemoryUsed: 8.0,
|
||||
MemoryTotal: 24.0,
|
||||
Usage: 70.0,
|
||||
Power: 300.0,
|
||||
Count: 1.0,
|
||||
Engines: map[string]float64{
|
||||
"3D": 90.0,
|
||||
"Video": 30.0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
require.NotNil(t, result.GPUData)
|
||||
gpu := result.GPUData["gpu0"]
|
||||
assert.Equal(t, "RTX 4090", gpu.Name)
|
||||
assert.Equal(t, 70.0, gpu.Temperature)
|
||||
assert.Equal(t, 6.0, gpu.MemoryUsed)
|
||||
assert.Equal(t, 24.0, gpu.MemoryTotal)
|
||||
assert.Equal(t, 50.0, gpu.Usage)
|
||||
assert.Equal(t, 250.0, gpu.Power)
|
||||
assert.Equal(t, 1.0, gpu.Count)
|
||||
require.NotNil(t, gpu.Engines)
|
||||
assert.Equal(t, 70.0, gpu.Engines["3D"])
|
||||
assert.Equal(t, 20.0, gpu.Engines["Video"])
|
||||
}
|
||||
|
||||
func TestAverageSystemStatsSlice_MultipleGPUs(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{
|
||||
Cpu: 10.0,
|
||||
GPUData: map[string]system.GPUData{
|
||||
"gpu0": {Name: "GPU A", Usage: 20.0, Temperature: 50.0},
|
||||
"gpu1": {Name: "GPU B", Usage: 60.0, Temperature: 70.0},
|
||||
},
|
||||
},
|
||||
{
|
||||
Cpu: 20.0,
|
||||
GPUData: map[string]system.GPUData{
|
||||
"gpu0": {Name: "GPU A", Usage: 40.0, Temperature: 60.0},
|
||||
"gpu1": {Name: "GPU B", Usage: 80.0, Temperature: 80.0},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
require.NotNil(t, result.GPUData)
|
||||
assert.Equal(t, 30.0, result.GPUData["gpu0"].Usage)
|
||||
assert.Equal(t, 55.0, result.GPUData["gpu0"].Temperature)
|
||||
assert.Equal(t, 70.0, result.GPUData["gpu1"].Usage)
|
||||
assert.Equal(t, 75.0, result.GPUData["gpu1"].Temperature)
|
||||
}
|
||||
|
||||
func TestAverageSystemStatsSlice_CpuCoresUsage(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{Cpu: 10.0, CpuCoresUsage: system.Uint8Slice{10, 20, 30, 40}},
|
||||
{Cpu: 20.0, CpuCoresUsage: system.Uint8Slice{30, 40, 50, 60}},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
require.NotNil(t, result.CpuCoresUsage)
|
||||
assert.Equal(t, system.Uint8Slice{20, 30, 40, 50}, result.CpuCoresUsage)
|
||||
}
|
||||
|
||||
// Tests that per-core usage rounds correctly (e.g., 15.5 -> 16 via math.Round).
|
||||
func TestAverageSystemStatsSlice_CpuCoresUsageRounding(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{Cpu: 10.0, CpuCoresUsage: system.Uint8Slice{11}},
|
||||
{Cpu: 20.0, CpuCoresUsage: system.Uint8Slice{20}},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
require.NotNil(t, result.CpuCoresUsage)
|
||||
// (11+20)/2 = 15.5, rounds to 16
|
||||
assert.Equal(t, uint8(16), result.CpuCoresUsage[0])
|
||||
}
|
||||
|
||||
func TestAverageSystemStatsSlice_CpuBreakdown(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{Cpu: 10.0, CpuBreakdown: []float64{5.0, 3.0, 1.0, 0.5, 90.5}},
|
||||
{Cpu: 20.0, CpuBreakdown: []float64{15.0, 7.0, 3.0, 1.5, 73.5}},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
require.NotNil(t, result.CpuBreakdown)
|
||||
assert.Equal(t, []float64{10.0, 5.0, 2.0, 1.0, 82.0}, result.CpuBreakdown)
|
||||
}
|
||||
|
||||
// Tests that Battery[1] (charge state) uses the last record's value.
|
||||
func TestAverageSystemStatsSlice_BatteryLastChargeState(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{Cpu: 10.0, Battery: [2]uint8{100, 1}}, // charging
|
||||
{Cpu: 20.0, Battery: [2]uint8{90, 0}}, // not charging
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
assert.Equal(t, uint8(95), result.Battery[0])
|
||||
assert.Equal(t, uint8(0), result.Battery[1]) // last record's charge state
|
||||
}
|
||||
|
||||
func TestAverageSystemStatsSlice_ThreeRecordsRounding(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{Cpu: 10.0, Mem: 8.0},
|
||||
{Cpu: 20.0, Mem: 8.0},
|
||||
{Cpu: 30.0, Mem: 8.0},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
assert.Equal(t, 20.0, result.Cpu)
|
||||
assert.Equal(t, 8.0, result.Mem)
|
||||
}
|
||||
|
||||
// Tests records where some have optional fields and others don't.
|
||||
func TestAverageSystemStatsSlice_MixedOptionalFields(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{
|
||||
Cpu: 10.0,
|
||||
CpuCoresUsage: system.Uint8Slice{50, 60},
|
||||
CpuBreakdown: []float64{5.0, 3.0, 1.0, 0.5, 90.5},
|
||||
GPUData: map[string]system.GPUData{
|
||||
"gpu0": {Name: "GPU", Usage: 40.0},
|
||||
},
|
||||
},
|
||||
{
|
||||
Cpu: 20.0,
|
||||
// No CpuCoresUsage, CpuBreakdown, or GPUData
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
assert.Equal(t, 15.0, result.Cpu)
|
||||
// CpuCoresUsage: only 1 record had it, so sum/2
|
||||
require.NotNil(t, result.CpuCoresUsage)
|
||||
assert.Equal(t, uint8(25), result.CpuCoresUsage[0])
|
||||
assert.Equal(t, uint8(30), result.CpuCoresUsage[1])
|
||||
// CpuBreakdown: only 1 record had it, so sum/2
|
||||
require.NotNil(t, result.CpuBreakdown)
|
||||
assert.Equal(t, 2.5, result.CpuBreakdown[0])
|
||||
// GPUData: only 1 record had it, so sum/2
|
||||
require.NotNil(t, result.GPUData)
|
||||
assert.Equal(t, 20.0, result.GPUData["gpu0"].Usage)
|
||||
}
|
||||
|
||||
// Tests with 10 records matching the common real-world case (10 x 1m -> 1 x 10m).
|
||||
func TestAverageSystemStatsSlice_TenRecords(t *testing.T) {
|
||||
input := make([]system.Stats, 10)
|
||||
for i := range input {
|
||||
input[i] = system.Stats{
|
||||
Cpu: float64(i * 10), // 0, 10, 20, ..., 90
|
||||
Mem: 16.0,
|
||||
MemUsed: float64(4 + i), // 4, 5, 6, ..., 13
|
||||
MemPct: float64(25 + i), // 25, 26, ..., 34
|
||||
DiskTotal: 500.0,
|
||||
DiskUsed: 250.0,
|
||||
DiskPct: 50.0,
|
||||
NetworkSent: float64(i),
|
||||
NetworkRecv: float64(i * 2),
|
||||
Bandwidth: [2]uint64{uint64(i * 1000), uint64(i * 2000)},
|
||||
LoadAvg: [3]float64{float64(i), float64(i) * 0.5, float64(i) * 0.25},
|
||||
}
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
assert.Equal(t, 45.0, result.Cpu) // avg of 0..90
|
||||
assert.Equal(t, 16.0, result.Mem) // constant
|
||||
assert.Equal(t, 8.5, result.MemUsed) // avg of 4..13
|
||||
assert.Equal(t, 29.5, result.MemPct) // avg of 25..34
|
||||
assert.Equal(t, 500.0, result.DiskTotal)
|
||||
assert.Equal(t, 250.0, result.DiskUsed)
|
||||
assert.Equal(t, 50.0, result.DiskPct)
|
||||
assert.Equal(t, 4.5, result.NetworkSent)
|
||||
assert.Equal(t, 9.0, result.NetworkRecv)
|
||||
assert.Equal(t, [2]uint64{4500, 9000}, result.Bandwidth)
|
||||
}
|
||||
|
||||
// --- Container Stats Tests ---
|
||||
|
||||
func TestAverageContainerStatsSlice_Empty(t *testing.T) {
|
||||
result := records.AverageContainerStatsSlice(nil)
|
||||
assert.Equal(t, []container.Stats{}, result)
|
||||
|
||||
result = records.AverageContainerStatsSlice([][]container.Stats{})
|
||||
assert.Equal(t, []container.Stats{}, result)
|
||||
}
|
||||
|
||||
func TestAverageContainerStatsSlice_SingleRecord(t *testing.T) {
|
||||
input := [][]container.Stats{
|
||||
{
|
||||
{Name: "nginx", Cpu: 5.0, Mem: 128.0, Bandwidth: [2]uint64{1000, 2000}},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageContainerStatsSlice(input)
|
||||
|
||||
require.Len(t, result, 1)
|
||||
assert.Equal(t, "nginx", result[0].Name)
|
||||
assert.Equal(t, 5.0, result[0].Cpu)
|
||||
assert.Equal(t, 128.0, result[0].Mem)
|
||||
assert.Equal(t, [2]uint64{1000, 2000}, result[0].Bandwidth)
|
||||
}
|
||||
|
||||
func TestAverageContainerStatsSlice_BasicAveraging(t *testing.T) {
|
||||
input := [][]container.Stats{
|
||||
{
|
||||
{Name: "nginx", Cpu: 10.0, Mem: 100.0, Bandwidth: [2]uint64{1000, 2000}},
|
||||
{Name: "redis", Cpu: 5.0, Mem: 64.0, Bandwidth: [2]uint64{500, 1000}},
|
||||
},
|
||||
{
|
||||
{Name: "nginx", Cpu: 20.0, Mem: 200.0, Bandwidth: [2]uint64{3000, 4000}},
|
||||
{Name: "redis", Cpu: 15.0, Mem: 128.0, Bandwidth: [2]uint64{1500, 2000}},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageContainerStatsSlice(input)
|
||||
sort.Slice(result, func(i, j int) bool { return result[i].Name < result[j].Name })
|
||||
|
||||
require.Len(t, result, 2)
|
||||
|
||||
assert.Equal(t, "nginx", result[0].Name)
|
||||
assert.Equal(t, 15.0, result[0].Cpu)
|
||||
assert.Equal(t, 150.0, result[0].Mem)
|
||||
assert.Equal(t, [2]uint64{2000, 3000}, result[0].Bandwidth)
|
||||
|
||||
assert.Equal(t, "redis", result[1].Name)
|
||||
assert.Equal(t, 10.0, result[1].Cpu)
|
||||
assert.Equal(t, 96.0, result[1].Mem)
|
||||
assert.Equal(t, [2]uint64{1000, 1500}, result[1].Bandwidth)
|
||||
}
|
||||
|
||||
// Tests containers that appear in some records but not all.
|
||||
func TestAverageContainerStatsSlice_ContainerAppearsInSomeRecords(t *testing.T) {
|
||||
input := [][]container.Stats{
|
||||
{
|
||||
{Name: "nginx", Cpu: 10.0, Mem: 100.0},
|
||||
{Name: "redis", Cpu: 5.0, Mem: 64.0},
|
||||
},
|
||||
{
|
||||
{Name: "nginx", Cpu: 20.0, Mem: 200.0},
|
||||
// redis not present
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageContainerStatsSlice(input)
|
||||
sort.Slice(result, func(i, j int) bool { return result[i].Name < result[j].Name })
|
||||
|
||||
require.Len(t, result, 2)
|
||||
|
||||
assert.Equal(t, "nginx", result[0].Name)
|
||||
assert.Equal(t, 15.0, result[0].Cpu)
|
||||
assert.Equal(t, 150.0, result[0].Mem)
|
||||
|
||||
// redis: sum / count where count = total records (2), not records containing redis
|
||||
assert.Equal(t, "redis", result[1].Name)
|
||||
assert.Equal(t, 2.5, result[1].Cpu)
|
||||
assert.Equal(t, 32.0, result[1].Mem)
|
||||
}
|
||||
|
||||
// Tests backward compatibility with deprecated NetworkSent/NetworkRecv (MB) when Bandwidth is zero.
|
||||
func TestAverageContainerStatsSlice_DeprecatedNetworkFields(t *testing.T) {
|
||||
input := [][]container.Stats{
|
||||
{
|
||||
{Name: "nginx", Cpu: 10.0, Mem: 100.0, NetworkSent: 1.0, NetworkRecv: 2.0}, // 1 MB, 2 MB
|
||||
},
|
||||
{
|
||||
{Name: "nginx", Cpu: 20.0, Mem: 200.0, NetworkSent: 3.0, NetworkRecv: 4.0}, // 3 MB, 4 MB
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageContainerStatsSlice(input)
|
||||
|
||||
require.Len(t, result, 1)
|
||||
assert.Equal(t, "nginx", result[0].Name)
|
||||
// avg sent = (1*1048576 + 3*1048576) / 2 = 2*1048576
|
||||
assert.Equal(t, uint64(2*1048576), result[0].Bandwidth[0])
|
||||
// avg recv = (2*1048576 + 4*1048576) / 2 = 3*1048576
|
||||
assert.Equal(t, uint64(3*1048576), result[0].Bandwidth[1])
|
||||
}
|
||||
|
||||
// Tests that when Bandwidth is set, deprecated NetworkSent/NetworkRecv are ignored.
|
||||
func TestAverageContainerStatsSlice_MixedBandwidthAndDeprecated(t *testing.T) {
|
||||
input := [][]container.Stats{
|
||||
{
|
||||
{Name: "nginx", Cpu: 10.0, Mem: 100.0, Bandwidth: [2]uint64{5000, 6000}, NetworkSent: 99.0, NetworkRecv: 99.0},
|
||||
},
|
||||
{
|
||||
{Name: "nginx", Cpu: 20.0, Mem: 200.0, Bandwidth: [2]uint64{7000, 8000}},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageContainerStatsSlice(input)
|
||||
|
||||
require.Len(t, result, 1)
|
||||
assert.Equal(t, uint64(6000), result[0].Bandwidth[0])
|
||||
assert.Equal(t, uint64(7000), result[0].Bandwidth[1])
|
||||
}
|
||||
|
||||
func TestAverageContainerStatsSlice_ThreeRecords(t *testing.T) {
|
||||
input := [][]container.Stats{
|
||||
{{Name: "app", Cpu: 1.0, Mem: 100.0}},
|
||||
{{Name: "app", Cpu: 2.0, Mem: 200.0}},
|
||||
{{Name: "app", Cpu: 3.0, Mem: 300.0}},
|
||||
}
|
||||
|
||||
result := records.AverageContainerStatsSlice(input)
|
||||
|
||||
require.Len(t, result, 1)
|
||||
assert.Equal(t, 2.0, result[0].Cpu)
|
||||
assert.Equal(t, 200.0, result[0].Mem)
|
||||
}
|
||||
|
||||
func TestAverageContainerStatsSlice_ManyContainers(t *testing.T) {
|
||||
input := [][]container.Stats{
|
||||
{
|
||||
{Name: "a", Cpu: 10.0, Mem: 100.0},
|
||||
{Name: "b", Cpu: 20.0, Mem: 200.0},
|
||||
{Name: "c", Cpu: 30.0, Mem: 300.0},
|
||||
{Name: "d", Cpu: 40.0, Mem: 400.0},
|
||||
},
|
||||
{
|
||||
{Name: "a", Cpu: 20.0, Mem: 200.0},
|
||||
{Name: "b", Cpu: 30.0, Mem: 300.0},
|
||||
{Name: "c", Cpu: 40.0, Mem: 400.0},
|
||||
{Name: "d", Cpu: 50.0, Mem: 500.0},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageContainerStatsSlice(input)
|
||||
sort.Slice(result, func(i, j int) bool { return result[i].Name < result[j].Name })
|
||||
|
||||
require.Len(t, result, 4)
|
||||
assert.Equal(t, 15.0, result[0].Cpu)
|
||||
assert.Equal(t, 25.0, result[1].Cpu)
|
||||
assert.Equal(t, 35.0, result[2].Cpu)
|
||||
assert.Equal(t, 45.0, result[3].Cpu)
|
||||
}
|
||||
138
internal/records/records_deletion.go
Normal file
138
internal/records/records_deletion.go
Normal file
@@ -0,0 +1,138 @@
|
||||
package records
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pocketbase/dbx"
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
)
|
||||
|
||||
// Delete old records
|
||||
func (rm *RecordManager) DeleteOldRecords() {
|
||||
rm.app.RunInTransaction(func(txApp core.App) error {
|
||||
err := deleteOldSystemStats(txApp)
|
||||
if err != nil {
|
||||
slog.Error("Error deleting old system stats", "err", err)
|
||||
}
|
||||
err = deleteOldContainerRecords(txApp)
|
||||
if err != nil {
|
||||
slog.Error("Error deleting old container records", "err", err)
|
||||
}
|
||||
err = deleteOldSystemdServiceRecords(txApp)
|
||||
if err != nil {
|
||||
slog.Error("Error deleting old systemd service records", "err", err)
|
||||
}
|
||||
err = deleteOldAlertsHistory(txApp, 200, 250)
|
||||
if err != nil {
|
||||
slog.Error("Error deleting old alerts history", "err", err)
|
||||
}
|
||||
err = deleteOldQuietHours(txApp)
|
||||
if err != nil {
|
||||
slog.Error("Error deleting old quiet hours", "err", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Delete old alerts history records
|
||||
func deleteOldAlertsHistory(app core.App, countToKeep, countBeforeDeletion int) error {
|
||||
db := app.DB()
|
||||
var users []struct {
|
||||
Id string `db:"user"`
|
||||
}
|
||||
err := db.NewQuery("SELECT user, COUNT(*) as count FROM alerts_history GROUP BY user HAVING count > {:countBeforeDeletion}").Bind(dbx.Params{"countBeforeDeletion": countBeforeDeletion}).All(&users)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, user := range users {
|
||||
_, err = db.NewQuery("DELETE FROM alerts_history WHERE user = {:user} AND id NOT IN (SELECT id FROM alerts_history WHERE user = {:user} ORDER BY created DESC LIMIT {:countToKeep})").Bind(dbx.Params{"user": user.Id, "countToKeep": countToKeep}).Execute()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deletes system_stats records older than what is displayed in the UI
|
||||
func deleteOldSystemStats(app core.App) error {
|
||||
// Collections to process
|
||||
collections := [2]string{"system_stats", "container_stats"}
|
||||
|
||||
// Record types and their retention periods
|
||||
type RecordDeletionData struct {
|
||||
recordType string
|
||||
retention time.Duration
|
||||
}
|
||||
recordData := []RecordDeletionData{
|
||||
{recordType: "1m", retention: time.Hour}, // 1 hour
|
||||
{recordType: "10m", retention: 12 * time.Hour}, // 12 hours
|
||||
{recordType: "20m", retention: 24 * time.Hour}, // 1 day
|
||||
{recordType: "120m", retention: 7 * 24 * time.Hour}, // 7 days
|
||||
{recordType: "480m", retention: 30 * 24 * time.Hour}, // 30 days
|
||||
}
|
||||
|
||||
now := time.Now().UTC()
|
||||
|
||||
for _, collection := range collections {
|
||||
// Build the WHERE clause
|
||||
var conditionParts []string
|
||||
var params dbx.Params = make(map[string]any)
|
||||
for i := range recordData {
|
||||
rd := recordData[i]
|
||||
// Create parameterized condition for this record type
|
||||
dateParam := fmt.Sprintf("date%d", i)
|
||||
conditionParts = append(conditionParts, fmt.Sprintf("(type = '%s' AND created < {:%s})", rd.recordType, dateParam))
|
||||
params[dateParam] = now.Add(-rd.retention)
|
||||
}
|
||||
// Combine conditions with OR
|
||||
conditionStr := strings.Join(conditionParts, " OR ")
|
||||
// Construct and execute the full raw query
|
||||
rawQuery := fmt.Sprintf("DELETE FROM %s WHERE %s", collection, conditionStr)
|
||||
if _, err := app.DB().NewQuery(rawQuery).Bind(params).Execute(); err != nil {
|
||||
return fmt.Errorf("failed to delete from %s: %v", collection, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deletes systemd service records that haven't been updated in the last 20 minutes
|
||||
func deleteOldSystemdServiceRecords(app core.App) error {
|
||||
now := time.Now().UTC()
|
||||
twentyMinutesAgo := now.Add(-20 * time.Minute)
|
||||
|
||||
// Delete systemd service records where updated < twentyMinutesAgo
|
||||
_, err := app.DB().NewQuery("DELETE FROM systemd_services WHERE updated < {:updated}").Bind(dbx.Params{"updated": twentyMinutesAgo.UnixMilli()}).Execute()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete old systemd service records: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deletes container records that haven't been updated in the last 10 minutes
|
||||
func deleteOldContainerRecords(app core.App) error {
|
||||
now := time.Now().UTC()
|
||||
tenMinutesAgo := now.Add(-10 * time.Minute)
|
||||
|
||||
// Delete container records where updated < tenMinutesAgo
|
||||
_, err := app.DB().NewQuery("DELETE FROM containers WHERE updated < {:updated}").Bind(dbx.Params{"updated": tenMinutesAgo.UnixMilli()}).Execute()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete old container records: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deletes old quiet hours records where end date has passed
|
||||
func deleteOldQuietHours(app core.App) error {
|
||||
now := time.Now().UTC()
|
||||
_, err := app.DB().NewQuery("DELETE FROM quiet_hours WHERE type = 'one-time' AND end < {:now}").Bind(dbx.Params{"now": now}).Execute()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
428
internal/records/records_deletion_test.go
Normal file
428
internal/records/records_deletion_test.go
Normal file
@@ -0,0 +1,428 @@
|
||||
//go:build testing
|
||||
|
||||
package records_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/internal/records"
|
||||
"github.com/henrygd/beszel/internal/tests"
|
||||
|
||||
"github.com/pocketbase/dbx"
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
"github.com/pocketbase/pocketbase/tools/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestDeleteOldRecords tests the main DeleteOldRecords function
|
||||
func TestDeleteOldRecords(t *testing.T) {
|
||||
hub, err := tests.NewTestHub(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
defer hub.Cleanup()
|
||||
|
||||
rm := records.NewRecordManager(hub)
|
||||
|
||||
// Create test user for alerts history
|
||||
user, err := tests.CreateUser(hub, "test@example.com", "testtesttest")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create test system
|
||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "test-system",
|
||||
"host": "localhost",
|
||||
"port": "45876",
|
||||
"status": "up",
|
||||
"users": []string{user.Id},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
now := time.Now()
|
||||
|
||||
// Create old system_stats records that should be deleted
|
||||
var record *core.Record
|
||||
record, err = tests.CreateRecord(hub, "system_stats", map[string]any{
|
||||
"system": system.Id,
|
||||
"type": "1m",
|
||||
"stats": `{"cpu": 50.0, "mem": 1024}`,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
// created is autodate field, so we need to set it manually
|
||||
record.SetRaw("created", now.UTC().Add(-2*time.Hour).Format(types.DefaultDateLayout))
|
||||
err = hub.SaveNoValidate(record)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, record)
|
||||
require.InDelta(t, record.GetDateTime("created").Time().UTC().Unix(), now.UTC().Add(-2*time.Hour).Unix(), 1)
|
||||
require.Equal(t, record.Get("system"), system.Id)
|
||||
require.Equal(t, record.Get("type"), "1m")
|
||||
|
||||
// Create recent system_stats record that should be kept
|
||||
_, err = tests.CreateRecord(hub, "system_stats", map[string]any{
|
||||
"system": system.Id,
|
||||
"type": "1m",
|
||||
"stats": `{"cpu": 30.0, "mem": 512}`,
|
||||
"created": now.Add(-30 * time.Minute), // 30 minutes old, should be kept
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create many alerts history records to trigger deletion
|
||||
for i := range 260 { // More than countBeforeDeletion (250)
|
||||
_, err = tests.CreateRecord(hub, "alerts_history", map[string]any{
|
||||
"user": user.Id,
|
||||
"name": "CPU",
|
||||
"value": i + 1,
|
||||
"system": system.Id,
|
||||
"created": now.Add(-time.Duration(i) * time.Minute),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Count records before deletion
|
||||
systemStatsCountBefore, err := hub.CountRecords("system_stats")
|
||||
require.NoError(t, err)
|
||||
alertsCountBefore, err := hub.CountRecords("alerts_history")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Run deletion
|
||||
rm.DeleteOldRecords()
|
||||
|
||||
// Count records after deletion
|
||||
systemStatsCountAfter, err := hub.CountRecords("system_stats")
|
||||
require.NoError(t, err)
|
||||
alertsCountAfter, err := hub.CountRecords("alerts_history")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify old system stats were deleted
|
||||
assert.Less(t, systemStatsCountAfter, systemStatsCountBefore, "Old system stats should be deleted")
|
||||
|
||||
// Verify alerts history was trimmed
|
||||
assert.Less(t, alertsCountAfter, alertsCountBefore, "Excessive alerts history should be deleted")
|
||||
assert.Equal(t, alertsCountAfter, int64(200), "Alerts count should be equal to countToKeep (200)")
|
||||
}
|
||||
|
||||
// TestDeleteOldSystemStats tests the deleteOldSystemStats function
|
||||
func TestDeleteOldSystemStats(t *testing.T) {
|
||||
hub, err := tests.NewTestHub(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
defer hub.Cleanup()
|
||||
|
||||
// Create test system
|
||||
user, err := tests.CreateUser(hub, "test@example.com", "testtesttest")
|
||||
require.NoError(t, err)
|
||||
|
||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "test-system",
|
||||
"host": "localhost",
|
||||
"port": "45876",
|
||||
"status": "up",
|
||||
"users": []string{user.Id},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
now := time.Now().UTC()
|
||||
|
||||
// Test data for different record types and their retention periods
|
||||
testCases := []struct {
|
||||
recordType string
|
||||
retention time.Duration
|
||||
shouldBeKept bool
|
||||
ageFromNow time.Duration
|
||||
description string
|
||||
}{
|
||||
{"1m", time.Hour, true, 30 * time.Minute, "1m record within 1 hour should be kept"},
|
||||
{"1m", time.Hour, false, 2 * time.Hour, "1m record older than 1 hour should be deleted"},
|
||||
{"10m", 12 * time.Hour, true, 6 * time.Hour, "10m record within 12 hours should be kept"},
|
||||
{"10m", 12 * time.Hour, false, 24 * time.Hour, "10m record older than 12 hours should be deleted"},
|
||||
{"20m", 24 * time.Hour, true, 12 * time.Hour, "20m record within 24 hours should be kept"},
|
||||
{"20m", 24 * time.Hour, false, 48 * time.Hour, "20m record older than 24 hours should be deleted"},
|
||||
{"120m", 7 * 24 * time.Hour, true, 3 * 24 * time.Hour, "120m record within 7 days should be kept"},
|
||||
{"120m", 7 * 24 * time.Hour, false, 10 * 24 * time.Hour, "120m record older than 7 days should be deleted"},
|
||||
{"480m", 30 * 24 * time.Hour, true, 15 * 24 * time.Hour, "480m record within 30 days should be kept"},
|
||||
{"480m", 30 * 24 * time.Hour, false, 45 * 24 * time.Hour, "480m record older than 30 days should be deleted"},
|
||||
}
|
||||
|
||||
// Create test records for both system_stats and container_stats
|
||||
collections := []string{"system_stats", "container_stats"}
|
||||
recordIds := make(map[string][]string)
|
||||
|
||||
for _, collection := range collections {
|
||||
recordIds[collection] = make([]string, 0)
|
||||
|
||||
for i, tc := range testCases {
|
||||
recordTime := now.Add(-tc.ageFromNow)
|
||||
|
||||
var stats string
|
||||
if collection == "system_stats" {
|
||||
stats = fmt.Sprintf(`{"cpu": %d.0, "mem": %d}`, i*10, i*100)
|
||||
} else {
|
||||
stats = fmt.Sprintf(`[{"name": "container%d", "cpu": %d.0, "mem": %d}]`, i, i*5, i*50)
|
||||
}
|
||||
|
||||
record, err := tests.CreateRecord(hub, collection, map[string]any{
|
||||
"system": system.Id,
|
||||
"type": tc.recordType,
|
||||
"stats": stats,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
record.SetRaw("created", recordTime.Format(types.DefaultDateLayout))
|
||||
err = hub.SaveNoValidate(record)
|
||||
require.NoError(t, err)
|
||||
recordIds[collection] = append(recordIds[collection], record.Id)
|
||||
}
|
||||
}
|
||||
|
||||
// Run deletion
|
||||
err = records.DeleteOldSystemStats(hub)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify results
|
||||
for _, collection := range collections {
|
||||
for i, tc := range testCases {
|
||||
recordId := recordIds[collection][i]
|
||||
|
||||
// Try to find the record
|
||||
_, err := hub.FindRecordById(collection, recordId)
|
||||
|
||||
if tc.shouldBeKept {
|
||||
assert.NoError(t, err, "Record should exist: %s", tc.description)
|
||||
} else {
|
||||
assert.Error(t, err, "Record should be deleted: %s", tc.description)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestDeleteOldAlertsHistory tests the deleteOldAlertsHistory function
|
||||
func TestDeleteOldAlertsHistory(t *testing.T) {
|
||||
hub, err := tests.NewTestHub(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
defer hub.Cleanup()
|
||||
|
||||
// Create test users
|
||||
user1, err := tests.CreateUser(hub, "user1@example.com", "testtesttest")
|
||||
require.NoError(t, err)
|
||||
|
||||
user2, err := tests.CreateUser(hub, "user2@example.com", "testtesttest")
|
||||
require.NoError(t, err)
|
||||
|
||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "test-system",
|
||||
"host": "localhost",
|
||||
"port": "45876",
|
||||
"status": "up",
|
||||
"users": []string{user1.Id, user2.Id},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
now := time.Now().UTC()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
user *core.Record
|
||||
alertCount int
|
||||
countToKeep int
|
||||
countBeforeDeletion int
|
||||
expectedAfterDeletion int
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "User with few alerts (below threshold)",
|
||||
user: user1,
|
||||
alertCount: 100,
|
||||
countToKeep: 50,
|
||||
countBeforeDeletion: 150,
|
||||
expectedAfterDeletion: 100, // No deletion because below threshold
|
||||
description: "User with alerts below countBeforeDeletion should not have any deleted",
|
||||
},
|
||||
{
|
||||
name: "User with many alerts (above threshold)",
|
||||
user: user2,
|
||||
alertCount: 300,
|
||||
countToKeep: 100,
|
||||
countBeforeDeletion: 200,
|
||||
expectedAfterDeletion: 100, // Should be trimmed to countToKeep
|
||||
description: "User with alerts above countBeforeDeletion should be trimmed to countToKeep",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Create alerts for this user
|
||||
for i := 0; i < tc.alertCount; i++ {
|
||||
_, err := tests.CreateRecord(hub, "alerts_history", map[string]any{
|
||||
"user": tc.user.Id,
|
||||
"name": "CPU",
|
||||
"value": i + 1,
|
||||
"system": system.Id,
|
||||
"created": now.Add(-time.Duration(i) * time.Minute),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Count before deletion
|
||||
countBefore, err := hub.CountRecords("alerts_history",
|
||||
dbx.NewExp("user = {:user}", dbx.Params{"user": tc.user.Id}))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(tc.alertCount), countBefore, "Initial count should match")
|
||||
|
||||
// Run deletion
|
||||
err = records.DeleteOldAlertsHistory(hub, tc.countToKeep, tc.countBeforeDeletion)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Count after deletion
|
||||
countAfter, err := hub.CountRecords("alerts_history",
|
||||
dbx.NewExp("user = {:user}", dbx.Params{"user": tc.user.Id}))
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, int64(tc.expectedAfterDeletion), countAfter, tc.description)
|
||||
|
||||
// If deletion occurred, verify the most recent records were kept
|
||||
if tc.expectedAfterDeletion < tc.alertCount {
|
||||
records, err := hub.FindRecordsByFilter("alerts_history",
|
||||
"user = {:user}",
|
||||
"-created", // Order by created DESC
|
||||
tc.countToKeep,
|
||||
0,
|
||||
map[string]any{"user": tc.user.Id})
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, records, tc.expectedAfterDeletion, "Should have exactly countToKeep records")
|
||||
|
||||
// Verify records are in descending order by created time
|
||||
for i := 1; i < len(records); i++ {
|
||||
prev := records[i-1].GetDateTime("created").Time()
|
||||
curr := records[i].GetDateTime("created").Time()
|
||||
assert.True(t, prev.After(curr) || prev.Equal(curr),
|
||||
"Records should be ordered by created time (newest first)")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestDeleteOldAlertsHistoryEdgeCases tests edge cases for alerts history deletion
|
||||
func TestDeleteOldAlertsHistoryEdgeCases(t *testing.T) {
|
||||
hub, err := tests.NewTestHub(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
defer hub.Cleanup()
|
||||
|
||||
t.Run("No users with excessive alerts", func(t *testing.T) {
|
||||
// Create user with few alerts
|
||||
user, err := tests.CreateUser(hub, "few@example.com", "testtesttest")
|
||||
require.NoError(t, err)
|
||||
|
||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "test-system",
|
||||
"host": "localhost",
|
||||
"port": "45876",
|
||||
"status": "up",
|
||||
"users": []string{user.Id},
|
||||
})
|
||||
|
||||
// Create only 5 alerts (well below threshold)
|
||||
for i := range 5 {
|
||||
_, err := tests.CreateRecord(hub, "alerts_history", map[string]any{
|
||||
"user": user.Id,
|
||||
"name": "CPU",
|
||||
"value": i + 1,
|
||||
"system": system.Id,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Should not error and should not delete anything
|
||||
err = records.DeleteOldAlertsHistory(hub, 10, 20)
|
||||
require.NoError(t, err)
|
||||
|
||||
count, err := hub.CountRecords("alerts_history")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(5), count, "All alerts should remain")
|
||||
})
|
||||
|
||||
t.Run("Empty alerts_history table", func(t *testing.T) {
|
||||
// Clear any existing alerts
|
||||
_, err := hub.DB().NewQuery("DELETE FROM alerts_history").Execute()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Should not error with empty table
|
||||
err = records.DeleteOldAlertsHistory(hub, 10, 20)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
// TestDeleteOldSystemdServiceRecords tests systemd service cleanup via DeleteOldRecords
|
||||
func TestDeleteOldSystemdServiceRecords(t *testing.T) {
|
||||
hub, err := tests.NewTestHub(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
defer hub.Cleanup()
|
||||
|
||||
rm := records.NewRecordManager(hub)
|
||||
|
||||
// Create test user and system
|
||||
user, err := tests.CreateUser(hub, "test@example.com", "testtesttest")
|
||||
require.NoError(t, err)
|
||||
|
||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "test-system",
|
||||
"host": "localhost",
|
||||
"port": "45876",
|
||||
"status": "up",
|
||||
"users": []string{user.Id},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
now := time.Now().UTC()
|
||||
|
||||
// Create old systemd service records that should be deleted (older than 20 minutes)
|
||||
oldRecord, err := tests.CreateRecord(hub, "systemd_services", map[string]any{
|
||||
"system": system.Id,
|
||||
"name": "nginx.service",
|
||||
"state": 0, // Active
|
||||
"sub": 1, // Running
|
||||
"cpu": 5.0,
|
||||
"cpuPeak": 10.0,
|
||||
"memory": 1024000,
|
||||
"memPeak": 2048000,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
// Set updated time to 25 minutes ago (should be deleted)
|
||||
oldRecord.SetRaw("updated", now.Add(-25*time.Minute).UnixMilli())
|
||||
err = hub.SaveNoValidate(oldRecord)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create recent systemd service record that should be kept (within 20 minutes)
|
||||
recentRecord, err := tests.CreateRecord(hub, "systemd_services", map[string]any{
|
||||
"system": system.Id,
|
||||
"name": "apache.service",
|
||||
"state": 1, // Inactive
|
||||
"sub": 0, // Dead
|
||||
"cpu": 2.0,
|
||||
"cpuPeak": 3.0,
|
||||
"memory": 512000,
|
||||
"memPeak": 1024000,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
// Set updated time to 10 minutes ago (should be kept)
|
||||
recentRecord.SetRaw("updated", now.Add(-10*time.Minute).UnixMilli())
|
||||
err = hub.SaveNoValidate(recentRecord)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Count records before deletion
|
||||
countBefore, err := hub.CountRecords("systemd_services")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(2), countBefore, "Should have 2 systemd service records initially")
|
||||
|
||||
// Run deletion via RecordManager
|
||||
rm.DeleteOldRecords()
|
||||
|
||||
// Count records after deletion
|
||||
countAfter, err := hub.CountRecords("systemd_services")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(1), countAfter, "Should have 1 systemd service record after deletion")
|
||||
|
||||
// Verify the correct record was kept
|
||||
remainingRecords, err := hub.FindRecordsByFilter("systemd_services", "", "", 10, 0, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, remainingRecords, 1, "Should have exactly 1 record remaining")
|
||||
assert.Equal(t, "apache.service", remainingRecords[0].Get("name"), "The recent record should be kept")
|
||||
}
|
||||
@@ -3,430 +3,15 @@
|
||||
package records_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/internal/records"
|
||||
"github.com/henrygd/beszel/internal/tests"
|
||||
|
||||
"github.com/pocketbase/dbx"
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
"github.com/pocketbase/pocketbase/tools/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestDeleteOldRecords tests the main DeleteOldRecords function
|
||||
func TestDeleteOldRecords(t *testing.T) {
|
||||
hub, err := tests.NewTestHub(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
defer hub.Cleanup()
|
||||
|
||||
rm := records.NewRecordManager(hub)
|
||||
|
||||
// Create test user for alerts history
|
||||
user, err := tests.CreateUser(hub, "test@example.com", "testtesttest")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create test system
|
||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "test-system",
|
||||
"host": "localhost",
|
||||
"port": "45876",
|
||||
"status": "up",
|
||||
"users": []string{user.Id},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
now := time.Now()
|
||||
|
||||
// Create old system_stats records that should be deleted
|
||||
var record *core.Record
|
||||
record, err = tests.CreateRecord(hub, "system_stats", map[string]any{
|
||||
"system": system.Id,
|
||||
"type": "1m",
|
||||
"stats": `{"cpu": 50.0, "mem": 1024}`,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
// created is autodate field, so we need to set it manually
|
||||
record.SetRaw("created", now.UTC().Add(-2*time.Hour).Format(types.DefaultDateLayout))
|
||||
err = hub.SaveNoValidate(record)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, record)
|
||||
require.InDelta(t, record.GetDateTime("created").Time().UTC().Unix(), now.UTC().Add(-2*time.Hour).Unix(), 1)
|
||||
require.Equal(t, record.Get("system"), system.Id)
|
||||
require.Equal(t, record.Get("type"), "1m")
|
||||
|
||||
// Create recent system_stats record that should be kept
|
||||
_, err = tests.CreateRecord(hub, "system_stats", map[string]any{
|
||||
"system": system.Id,
|
||||
"type": "1m",
|
||||
"stats": `{"cpu": 30.0, "mem": 512}`,
|
||||
"created": now.Add(-30 * time.Minute), // 30 minutes old, should be kept
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create many alerts history records to trigger deletion
|
||||
for i := range 260 { // More than countBeforeDeletion (250)
|
||||
_, err = tests.CreateRecord(hub, "alerts_history", map[string]any{
|
||||
"user": user.Id,
|
||||
"name": "CPU",
|
||||
"value": i + 1,
|
||||
"system": system.Id,
|
||||
"created": now.Add(-time.Duration(i) * time.Minute),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Count records before deletion
|
||||
systemStatsCountBefore, err := hub.CountRecords("system_stats")
|
||||
require.NoError(t, err)
|
||||
alertsCountBefore, err := hub.CountRecords("alerts_history")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Run deletion
|
||||
rm.DeleteOldRecords()
|
||||
|
||||
// Count records after deletion
|
||||
systemStatsCountAfter, err := hub.CountRecords("system_stats")
|
||||
require.NoError(t, err)
|
||||
alertsCountAfter, err := hub.CountRecords("alerts_history")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify old system stats were deleted
|
||||
assert.Less(t, systemStatsCountAfter, systemStatsCountBefore, "Old system stats should be deleted")
|
||||
|
||||
// Verify alerts history was trimmed
|
||||
assert.Less(t, alertsCountAfter, alertsCountBefore, "Excessive alerts history should be deleted")
|
||||
assert.Equal(t, alertsCountAfter, int64(200), "Alerts count should be equal to countToKeep (200)")
|
||||
}
|
||||
|
||||
// TestDeleteOldSystemStats tests the deleteOldSystemStats function
|
||||
func TestDeleteOldSystemStats(t *testing.T) {
|
||||
hub, err := tests.NewTestHub(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
defer hub.Cleanup()
|
||||
|
||||
// Create test system
|
||||
user, err := tests.CreateUser(hub, "test@example.com", "testtesttest")
|
||||
require.NoError(t, err)
|
||||
|
||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "test-system",
|
||||
"host": "localhost",
|
||||
"port": "45876",
|
||||
"status": "up",
|
||||
"users": []string{user.Id},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
now := time.Now().UTC()
|
||||
|
||||
// Test data for different record types and their retention periods
|
||||
testCases := []struct {
|
||||
recordType string
|
||||
retention time.Duration
|
||||
shouldBeKept bool
|
||||
ageFromNow time.Duration
|
||||
description string
|
||||
}{
|
||||
{"1m", time.Hour, true, 30 * time.Minute, "1m record within 1 hour should be kept"},
|
||||
{"1m", time.Hour, false, 2 * time.Hour, "1m record older than 1 hour should be deleted"},
|
||||
{"10m", 12 * time.Hour, true, 6 * time.Hour, "10m record within 12 hours should be kept"},
|
||||
{"10m", 12 * time.Hour, false, 24 * time.Hour, "10m record older than 12 hours should be deleted"},
|
||||
{"20m", 24 * time.Hour, true, 12 * time.Hour, "20m record within 24 hours should be kept"},
|
||||
{"20m", 24 * time.Hour, false, 48 * time.Hour, "20m record older than 24 hours should be deleted"},
|
||||
{"120m", 7 * 24 * time.Hour, true, 3 * 24 * time.Hour, "120m record within 7 days should be kept"},
|
||||
{"120m", 7 * 24 * time.Hour, false, 10 * 24 * time.Hour, "120m record older than 7 days should be deleted"},
|
||||
{"480m", 30 * 24 * time.Hour, true, 15 * 24 * time.Hour, "480m record within 30 days should be kept"},
|
||||
{"480m", 30 * 24 * time.Hour, false, 45 * 24 * time.Hour, "480m record older than 30 days should be deleted"},
|
||||
}
|
||||
|
||||
// Create test records for both system_stats and container_stats
|
||||
collections := []string{"system_stats", "container_stats"}
|
||||
recordIds := make(map[string][]string)
|
||||
|
||||
for _, collection := range collections {
|
||||
recordIds[collection] = make([]string, 0)
|
||||
|
||||
for i, tc := range testCases {
|
||||
recordTime := now.Add(-tc.ageFromNow)
|
||||
|
||||
var stats string
|
||||
if collection == "system_stats" {
|
||||
stats = fmt.Sprintf(`{"cpu": %d.0, "mem": %d}`, i*10, i*100)
|
||||
} else {
|
||||
stats = fmt.Sprintf(`[{"name": "container%d", "cpu": %d.0, "mem": %d}]`, i, i*5, i*50)
|
||||
}
|
||||
|
||||
record, err := tests.CreateRecord(hub, collection, map[string]any{
|
||||
"system": system.Id,
|
||||
"type": tc.recordType,
|
||||
"stats": stats,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
record.SetRaw("created", recordTime.Format(types.DefaultDateLayout))
|
||||
err = hub.SaveNoValidate(record)
|
||||
require.NoError(t, err)
|
||||
recordIds[collection] = append(recordIds[collection], record.Id)
|
||||
}
|
||||
}
|
||||
|
||||
// Run deletion
|
||||
err = records.DeleteOldSystemStats(hub)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify results
|
||||
for _, collection := range collections {
|
||||
for i, tc := range testCases {
|
||||
recordId := recordIds[collection][i]
|
||||
|
||||
// Try to find the record
|
||||
_, err := hub.FindRecordById(collection, recordId)
|
||||
|
||||
if tc.shouldBeKept {
|
||||
assert.NoError(t, err, "Record should exist: %s", tc.description)
|
||||
} else {
|
||||
assert.Error(t, err, "Record should be deleted: %s", tc.description)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestDeleteOldAlertsHistory tests the deleteOldAlertsHistory function
|
||||
func TestDeleteOldAlertsHistory(t *testing.T) {
|
||||
hub, err := tests.NewTestHub(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
defer hub.Cleanup()
|
||||
|
||||
// Create test users
|
||||
user1, err := tests.CreateUser(hub, "user1@example.com", "testtesttest")
|
||||
require.NoError(t, err)
|
||||
|
||||
user2, err := tests.CreateUser(hub, "user2@example.com", "testtesttest")
|
||||
require.NoError(t, err)
|
||||
|
||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "test-system",
|
||||
"host": "localhost",
|
||||
"port": "45876",
|
||||
"status": "up",
|
||||
"users": []string{user1.Id, user2.Id},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
now := time.Now().UTC()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
user *core.Record
|
||||
alertCount int
|
||||
countToKeep int
|
||||
countBeforeDeletion int
|
||||
expectedAfterDeletion int
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "User with few alerts (below threshold)",
|
||||
user: user1,
|
||||
alertCount: 100,
|
||||
countToKeep: 50,
|
||||
countBeforeDeletion: 150,
|
||||
expectedAfterDeletion: 100, // No deletion because below threshold
|
||||
description: "User with alerts below countBeforeDeletion should not have any deleted",
|
||||
},
|
||||
{
|
||||
name: "User with many alerts (above threshold)",
|
||||
user: user2,
|
||||
alertCount: 300,
|
||||
countToKeep: 100,
|
||||
countBeforeDeletion: 200,
|
||||
expectedAfterDeletion: 100, // Should be trimmed to countToKeep
|
||||
description: "User with alerts above countBeforeDeletion should be trimmed to countToKeep",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Create alerts for this user
|
||||
for i := 0; i < tc.alertCount; i++ {
|
||||
_, err := tests.CreateRecord(hub, "alerts_history", map[string]any{
|
||||
"user": tc.user.Id,
|
||||
"name": "CPU",
|
||||
"value": i + 1,
|
||||
"system": system.Id,
|
||||
"created": now.Add(-time.Duration(i) * time.Minute),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Count before deletion
|
||||
countBefore, err := hub.CountRecords("alerts_history",
|
||||
dbx.NewExp("user = {:user}", dbx.Params{"user": tc.user.Id}))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(tc.alertCount), countBefore, "Initial count should match")
|
||||
|
||||
// Run deletion
|
||||
err = records.DeleteOldAlertsHistory(hub, tc.countToKeep, tc.countBeforeDeletion)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Count after deletion
|
||||
countAfter, err := hub.CountRecords("alerts_history",
|
||||
dbx.NewExp("user = {:user}", dbx.Params{"user": tc.user.Id}))
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, int64(tc.expectedAfterDeletion), countAfter, tc.description)
|
||||
|
||||
// If deletion occurred, verify the most recent records were kept
|
||||
if tc.expectedAfterDeletion < tc.alertCount {
|
||||
records, err := hub.FindRecordsByFilter("alerts_history",
|
||||
"user = {:user}",
|
||||
"-created", // Order by created DESC
|
||||
tc.countToKeep,
|
||||
0,
|
||||
map[string]any{"user": tc.user.Id})
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, records, tc.expectedAfterDeletion, "Should have exactly countToKeep records")
|
||||
|
||||
// Verify records are in descending order by created time
|
||||
for i := 1; i < len(records); i++ {
|
||||
prev := records[i-1].GetDateTime("created").Time()
|
||||
curr := records[i].GetDateTime("created").Time()
|
||||
assert.True(t, prev.After(curr) || prev.Equal(curr),
|
||||
"Records should be ordered by created time (newest first)")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestDeleteOldAlertsHistoryEdgeCases tests edge cases for alerts history deletion
|
||||
func TestDeleteOldAlertsHistoryEdgeCases(t *testing.T) {
|
||||
hub, err := tests.NewTestHub(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
defer hub.Cleanup()
|
||||
|
||||
t.Run("No users with excessive alerts", func(t *testing.T) {
|
||||
// Create user with few alerts
|
||||
user, err := tests.CreateUser(hub, "few@example.com", "testtesttest")
|
||||
require.NoError(t, err)
|
||||
|
||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "test-system",
|
||||
"host": "localhost",
|
||||
"port": "45876",
|
||||
"status": "up",
|
||||
"users": []string{user.Id},
|
||||
})
|
||||
|
||||
// Create only 5 alerts (well below threshold)
|
||||
for i := range 5 {
|
||||
_, err := tests.CreateRecord(hub, "alerts_history", map[string]any{
|
||||
"user": user.Id,
|
||||
"name": "CPU",
|
||||
"value": i + 1,
|
||||
"system": system.Id,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Should not error and should not delete anything
|
||||
err = records.DeleteOldAlertsHistory(hub, 10, 20)
|
||||
require.NoError(t, err)
|
||||
|
||||
count, err := hub.CountRecords("alerts_history")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(5), count, "All alerts should remain")
|
||||
})
|
||||
|
||||
t.Run("Empty alerts_history table", func(t *testing.T) {
|
||||
// Clear any existing alerts
|
||||
_, err := hub.DB().NewQuery("DELETE FROM alerts_history").Execute()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Should not error with empty table
|
||||
err = records.DeleteOldAlertsHistory(hub, 10, 20)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
// TestDeleteOldSystemdServiceRecords tests systemd service cleanup via DeleteOldRecords
|
||||
func TestDeleteOldSystemdServiceRecords(t *testing.T) {
|
||||
hub, err := tests.NewTestHub(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
defer hub.Cleanup()
|
||||
|
||||
rm := records.NewRecordManager(hub)
|
||||
|
||||
// Create test user and system
|
||||
user, err := tests.CreateUser(hub, "test@example.com", "testtesttest")
|
||||
require.NoError(t, err)
|
||||
|
||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "test-system",
|
||||
"host": "localhost",
|
||||
"port": "45876",
|
||||
"status": "up",
|
||||
"users": []string{user.Id},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
now := time.Now().UTC()
|
||||
|
||||
// Create old systemd service records that should be deleted (older than 20 minutes)
|
||||
oldRecord, err := tests.CreateRecord(hub, "systemd_services", map[string]any{
|
||||
"system": system.Id,
|
||||
"name": "nginx.service",
|
||||
"state": 0, // Active
|
||||
"sub": 1, // Running
|
||||
"cpu": 5.0,
|
||||
"cpuPeak": 10.0,
|
||||
"memory": 1024000,
|
||||
"memPeak": 2048000,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
// Set updated time to 25 minutes ago (should be deleted)
|
||||
oldRecord.SetRaw("updated", now.Add(-25*time.Minute).UnixMilli())
|
||||
err = hub.SaveNoValidate(oldRecord)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create recent systemd service record that should be kept (within 20 minutes)
|
||||
recentRecord, err := tests.CreateRecord(hub, "systemd_services", map[string]any{
|
||||
"system": system.Id,
|
||||
"name": "apache.service",
|
||||
"state": 1, // Inactive
|
||||
"sub": 0, // Dead
|
||||
"cpu": 2.0,
|
||||
"cpuPeak": 3.0,
|
||||
"memory": 512000,
|
||||
"memPeak": 1024000,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
// Set updated time to 10 minutes ago (should be kept)
|
||||
recentRecord.SetRaw("updated", now.Add(-10*time.Minute).UnixMilli())
|
||||
err = hub.SaveNoValidate(recentRecord)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Count records before deletion
|
||||
countBefore, err := hub.CountRecords("systemd_services")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(2), countBefore, "Should have 2 systemd service records initially")
|
||||
|
||||
// Run deletion via RecordManager
|
||||
rm.DeleteOldRecords()
|
||||
|
||||
// Count records after deletion
|
||||
countAfter, err := hub.CountRecords("systemd_services")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(1), countAfter, "Should have 1 systemd service record after deletion")
|
||||
|
||||
// Verify the correct record was kept
|
||||
remainingRecords, err := hub.FindRecordsByFilter("systemd_services", "", "", 10, 0, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, remainingRecords, 1, "Should have exactly 1 record remaining")
|
||||
assert.Equal(t, "apache.service", remainingRecords[0].Get("name"), "The recent record should be kept")
|
||||
}
|
||||
|
||||
// TestRecordManagerCreation tests RecordManager creation
|
||||
func TestRecordManagerCreation(t *testing.T) {
|
||||
hub, err := tests.NewTestHub(t.TempDir())
|
||||
|
||||
Reference in New Issue
Block a user