Compare commits

...

16 Commits

Author SHA1 Message Date
henrygd
8e2316f845 refactor: simplify/improve status alert handling (#1519)
also adds new functionality to restore any pending down alerts
that were lost by hub restart before creation
2026-03-12 15:53:40 -04:00
Sven van Ginkel
0d3dfcb207 fix(hub): check if status alert is triggered before sending up alert (#1806) 2026-03-12 13:38:42 -04:00
henrygd
b386ce5190 hub: add ExpiryMap.UpdateExpiration and sync SMART fetch intervals (#1800)
- Update smartFetchMap expiration when agent smart interval changes
- Prevent background SMART fetching before initial system details are
loaded
- Add buffer to SMART fetch timing check
- Get rid of unnecessary pointers in expirymap
2026-03-11 16:25:52 -04:00
henrygd
e527534016 ensure deprecated system fields are migrated to newer structures
also removes refs to legacy load avg fields (l1, l5, l15) that were
around for a very short period
2026-03-10 18:46:57 -04:00
Victor Eduardo
ec7ad632a9 fix: Use historical records to average disk usage for extra disk alerts (#1801)
- Introduced a new test file `alerts_disk_test.go` to validate the behavior of disk alerts using historical data for extra filesystems.
- Enhanced the `HandleSystemAlerts` function to correctly calculate disk usage for extra filesystems based on historical records.
- Updated the `SystemAlertStats` struct to include `ExtraFs` for tracking additional filesystem statistics.
2026-03-09 18:32:35 -04:00
VACInc
963fce5a33 agent: mark mdraid rebuild as warning, not failed (#1797) 2026-03-09 17:54:53 -04:00
Sven van Ginkel
d38c0da06d fix: bypass NIC auto-filter when interface is explicitly whitelisted via NICS (#1805)
Co-authored-by: henrygd <hank@henrygd.me>
2026-03-09 17:47:59 -04:00
henrygd
cae6ac4626 update go version to 1.26.1 2026-03-09 16:10:38 -04:00
henrygd
6b1ff264f2 gpu(amd): add workaround for misreported sysfs filesize (#1799) 2026-03-09 14:53:52 -04:00
henrygd
35d0e792ad refactor(expirymap): optimize performance and add StopCleaner method 2026-03-08 19:09:41 -04:00
henrygd
654cd06b19 respect SMART_INTERVAL across agent reconnects (#1800)
Move tracking of the last SMART data fetch from individual System
instances to the SystemManager using a TTL-based ExpiryMap.

This ensures that the SMART_INTERVAL is respected even if an
agent connection is dropped and re-established, preventing
redundant data collection on every reconnect.
2026-03-08 19:03:50 -04:00
henrygd
5e1b028130 refactor(smart): improve perf by skipping ata_device_statistics parsing if unnecessary 2026-03-08 15:19:50 -04:00
henrygd
638e7dc12a fix(smart): handle negative ATA device statistics values (#1791) 2026-03-08 13:34:16 -04:00
henrygd
73c262455d refactor(agent): move GetEnv to utils package 2026-03-07 14:12:17 -05:00
henrygd
0c4d2edd45 refactor(agent): add utils package; rm utils.go and fs_utils.go 2026-03-07 13:50:49 -05:00
henrygd
8f23fff1c9 refactor: mdraid comments and organization
also hide serial / firmware in smart details if empty, remove a few
unnecessary ops, and add a few more passed state values
2026-02-27 14:23:10 -05:00
52 changed files with 2140 additions and 731 deletions

View File

@@ -6,7 +6,6 @@ package agent
import (
"log/slog"
"os"
"strings"
"sync"
"time"
@@ -14,6 +13,7 @@ import (
"github.com/gliderlabs/ssh"
"github.com/henrygd/beszel"
"github.com/henrygd/beszel/agent/deltatracker"
"github.com/henrygd/beszel/agent/utils"
"github.com/henrygd/beszel/internal/common"
"github.com/henrygd/beszel/internal/entities/system"
gossh "golang.org/x/crypto/ssh"
@@ -68,11 +68,11 @@ func NewAgent(dataDir ...string) (agent *Agent, err error) {
slog.Info("Data directory", "path", agent.dataDir)
}
agent.memCalc, _ = GetEnv("MEM_CALC")
agent.memCalc, _ = utils.GetEnv("MEM_CALC")
agent.sensorConfig = agent.newSensorConfig()
// Parse disk usage cache duration (e.g., "15m", "1h") to avoid waking sleeping disks
if diskUsageCache, exists := GetEnv("DISK_USAGE_CACHE"); exists {
if diskUsageCache, exists := utils.GetEnv("DISK_USAGE_CACHE"); exists {
if duration, err := time.ParseDuration(diskUsageCache); err == nil {
agent.diskUsageCacheDuration = duration
slog.Info("DISK_USAGE_CACHE", "duration", duration)
@@ -82,7 +82,7 @@ func NewAgent(dataDir ...string) (agent *Agent, err error) {
}
// Set up slog with a log level determined by the LOG_LEVEL env var
if logLevelStr, exists := GetEnv("LOG_LEVEL"); exists {
if logLevelStr, exists := utils.GetEnv("LOG_LEVEL"); exists {
switch strings.ToLower(logLevelStr) {
case "debug":
agent.debug = true
@@ -103,7 +103,7 @@ func NewAgent(dataDir ...string) (agent *Agent, err error) {
agent.refreshSystemDetails()
// SMART_INTERVAL env var to update smart data at this interval
if smartIntervalEnv, exists := GetEnv("SMART_INTERVAL"); exists {
if smartIntervalEnv, exists := utils.GetEnv("SMART_INTERVAL"); exists {
if duration, err := time.ParseDuration(smartIntervalEnv); err == nil && duration > 0 {
agent.systemDetails.SmartInterval = duration
slog.Info("SMART_INTERVAL", "duration", duration)
@@ -148,15 +148,6 @@ func NewAgent(dataDir ...string) (agent *Agent, err error) {
return agent, nil
}
// GetEnv retrieves an environment variable with a "BESZEL_AGENT_" prefix, or falls back to the unprefixed key.
func GetEnv(key string) (value string, exists bool) {
if value, exists = os.LookupEnv("BESZEL_AGENT_" + key); exists {
return value, exists
}
// Fallback to the old unprefixed key
return os.LookupEnv(key)
}
func (a *Agent) gatherStats(options common.DataRequestOptions) *system.CombinedData {
a.Lock()
defer a.Unlock()
@@ -213,7 +204,7 @@ func (a *Agent) gatherStats(options common.DataRequestOptions) *system.CombinedD
data.Stats.ExtraFs[key] = stats
// Add percentages to Info struct for dashboard
if stats.DiskTotal > 0 {
pct := twoDecimals((stats.DiskUsed / stats.DiskTotal) * 100)
pct := utils.TwoDecimals((stats.DiskUsed / stats.DiskTotal) * 100)
data.Info.ExtraFsPct[key] = pct
}
}

View File

@@ -14,6 +14,7 @@ import (
"time"
"github.com/henrygd/beszel"
"github.com/henrygd/beszel/agent/utils"
"github.com/henrygd/beszel/internal/common"
"github.com/fxamacker/cbor/v2"
@@ -43,7 +44,7 @@ type WebSocketClient struct {
// newWebSocketClient creates a new WebSocket client for the given agent.
// It reads configuration from environment variables and validates the hub URL.
func newWebSocketClient(agent *Agent) (client *WebSocketClient, err error) {
hubURLStr, exists := GetEnv("HUB_URL")
hubURLStr, exists := utils.GetEnv("HUB_URL")
if !exists {
return nil, errors.New("HUB_URL environment variable not set")
}
@@ -72,12 +73,12 @@ func newWebSocketClient(agent *Agent) (client *WebSocketClient, err error) {
// If neither is set, it returns an error.
func getToken() (string, error) {
// get token from env var
token, _ := GetEnv("TOKEN")
token, _ := utils.GetEnv("TOKEN")
if token != "" {
return token, nil
}
// get token from file
tokenFile, _ := GetEnv("TOKEN_FILE")
tokenFile, _ := utils.GetEnv("TOKEN_FILE")
if tokenFile == "" {
return "", errors.New("must set TOKEN or TOKEN_FILE")
}
@@ -197,7 +198,7 @@ func (client *WebSocketClient) handleAuthChallenge(msg *common.HubRequest[cbor.R
}
if authRequest.NeedSysInfo {
response.Name, _ = GetEnv("SYSTEM_NAME")
response.Name, _ = utils.GetEnv("SYSTEM_NAME")
response.Hostname = client.agent.systemDetails.Hostname
serverAddr := client.agent.connectionManager.serverOptions.Addr
_, response.Port, _ = net.SplitHostPort(serverAddr)

View File

@@ -6,6 +6,8 @@ import (
"os"
"path/filepath"
"runtime"
"github.com/henrygd/beszel/agent/utils"
)
// GetDataDir returns the path to the data directory for the agent and an error
@@ -16,7 +18,7 @@ func GetDataDir(dataDirs ...string) (string, error) {
return testDataDirs(dataDirs)
}
dataDir, _ := GetEnv("DATA_DIR")
dataDir, _ := utils.GetEnv("DATA_DIR")
if dataDir != "" {
dataDirs = append(dataDirs, dataDir)
}

View File

@@ -8,6 +8,7 @@ import (
"strings"
"time"
"github.com/henrygd/beszel/agent/utils"
"github.com/henrygd/beszel/internal/entities/system"
"github.com/shirou/gopsutil/v4/disk"
@@ -37,7 +38,7 @@ func isDockerSpecialMountpoint(mountpoint string) bool {
// Sets up the filesystems to monitor for disk usage and I/O.
func (a *Agent) initializeDiskInfo() {
filesystem, _ := GetEnv("FILESYSTEM")
filesystem, _ := utils.GetEnv("FILESYSTEM")
efPath := "/extra-filesystems"
hasRoot := false
isWindows := runtime.GOOS == "windows"
@@ -141,7 +142,7 @@ func (a *Agent) initializeDiskInfo() {
}
// Add EXTRA_FILESYSTEMS env var values to fsStats
if extraFilesystems, exists := GetEnv("EXTRA_FILESYSTEMS"); exists {
if extraFilesystems, exists := utils.GetEnv("EXTRA_FILESYSTEMS"); exists {
for fsEntry := range strings.SplitSeq(extraFilesystems, ",") {
// Parse custom name from format: device__customname
fs, customName := parseFilesystemEntry(fsEntry)
@@ -412,12 +413,12 @@ func (a *Agent) updateDiskUsage(systemStats *system.Stats) {
continue
}
if d, err := disk.Usage(stats.Mountpoint); err == nil {
stats.DiskTotal = bytesToGigabytes(d.Total)
stats.DiskUsed = bytesToGigabytes(d.Used)
stats.DiskTotal = utils.BytesToGigabytes(d.Total)
stats.DiskUsed = utils.BytesToGigabytes(d.Used)
if stats.Root {
systemStats.DiskTotal = bytesToGigabytes(d.Total)
systemStats.DiskUsed = bytesToGigabytes(d.Used)
systemStats.DiskPct = twoDecimals(d.UsedPercent)
systemStats.DiskTotal = utils.BytesToGigabytes(d.Total)
systemStats.DiskUsed = utils.BytesToGigabytes(d.Used)
systemStats.DiskPct = utils.TwoDecimals(d.UsedPercent)
}
} else {
// reset stats if error (likely unmounted)
@@ -470,8 +471,8 @@ func (a *Agent) updateDiskIo(cacheTimeMs uint16, systemStats *system.Stats) {
diskIORead := (d.ReadBytes - prev.readBytes) * 1000 / msElapsed
diskIOWrite := (d.WriteBytes - prev.writeBytes) * 1000 / msElapsed
readMbPerSecond := bytesToMegabytes(float64(diskIORead))
writeMbPerSecond := bytesToMegabytes(float64(diskIOWrite))
readMbPerSecond := utils.BytesToMegabytes(float64(diskIORead))
writeMbPerSecond := utils.BytesToMegabytes(float64(diskIOWrite))
// validate values
if readMbPerSecond > 50_000 || writeMbPerSecond > 50_000 {

View File

@@ -21,6 +21,7 @@ import (
"time"
"github.com/henrygd/beszel/agent/deltatracker"
"github.com/henrygd/beszel/agent/utils"
"github.com/henrygd/beszel/internal/entities/container"
"github.com/blang/semver"
@@ -336,12 +337,12 @@ func validateCpuPercentage(cpuPct float64, containerName string) error {
// updateContainerStatsValues updates the final stats values
func updateContainerStatsValues(stats *container.Stats, cpuPct float64, usedMemory uint64, sent_delta, recv_delta uint64, readTime time.Time) {
stats.Cpu = twoDecimals(cpuPct)
stats.Mem = bytesToMegabytes(float64(usedMemory))
stats.Cpu = utils.TwoDecimals(cpuPct)
stats.Mem = utils.BytesToMegabytes(float64(usedMemory))
stats.Bandwidth = [2]uint64{sent_delta, recv_delta}
// TODO(0.19+): stop populating NetworkSent/NetworkRecv (deprecated in 0.18.3)
stats.NetworkSent = bytesToMegabytes(float64(sent_delta))
stats.NetworkRecv = bytesToMegabytes(float64(recv_delta))
stats.NetworkSent = utils.BytesToMegabytes(float64(sent_delta))
stats.NetworkRecv = utils.BytesToMegabytes(float64(recv_delta))
stats.PrevReadTime = readTime
}
@@ -487,7 +488,7 @@ func (dm *dockerManager) deleteContainerStatsSync(id string) {
// Creates a new http client for Docker or Podman API
func newDockerManager() *dockerManager {
dockerHost, exists := GetEnv("DOCKER_HOST")
dockerHost, exists := utils.GetEnv("DOCKER_HOST")
if exists {
// return nil if set to empty string
if dockerHost == "" {
@@ -523,7 +524,7 @@ func newDockerManager() *dockerManager {
// configurable timeout
timeout := time.Millisecond * time.Duration(dockerTimeoutMs)
if t, set := GetEnv("DOCKER_TIMEOUT"); set {
if t, set := utils.GetEnv("DOCKER_TIMEOUT"); set {
timeout, err = time.ParseDuration(t)
if err != nil {
slog.Error(err.Error())
@@ -540,7 +541,7 @@ func newDockerManager() *dockerManager {
// Read container exclusion patterns from environment variable
var excludeContainers []string
if excludeStr, set := GetEnv("EXCLUDE_CONTAINERS"); set && excludeStr != "" {
if excludeStr, set := utils.GetEnv("EXCLUDE_CONTAINERS"); set && excludeStr != "" {
parts := strings.SplitSeq(excludeStr, ",")
for part := range parts {
trimmed := strings.TrimSpace(part)

View File

@@ -18,6 +18,7 @@ import (
"time"
"github.com/henrygd/beszel/agent/deltatracker"
"github.com/henrygd/beszel/agent/utils"
"github.com/henrygd/beszel/internal/entities/container"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -298,48 +299,6 @@ func TestUpdateContainerStatsValues(t *testing.T) {
assert.Equal(t, testTime, stats.PrevReadTime)
}
func TestTwoDecimals(t *testing.T) {
tests := []struct {
name string
input float64
expected float64
}{
{"round down", 1.234, 1.23},
{"round half up", 1.235, 1.24}, // math.Round rounds half up
{"no rounding needed", 1.23, 1.23},
{"negative number", -1.235, -1.24}, // math.Round rounds half up (more negative)
{"zero", 0.0, 0.0},
{"large number", 123.456, 123.46}, // rounds 5 up
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := twoDecimals(tt.input)
assert.Equal(t, tt.expected, result)
})
}
}
func TestBytesToMegabytes(t *testing.T) {
tests := []struct {
name string
input float64
expected float64
}{
{"1 MB", 1048576, 1.0},
{"512 KB", 524288, 0.5},
{"zero", 0, 0},
{"large value", 1073741824, 1024}, // 1 GB = 1024 MB
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := bytesToMegabytes(tt.input)
assert.Equal(t, tt.expected, result)
})
}
}
func TestInitializeCpuTracking(t *testing.T) {
dm := &dockerManager{
lastCpuContainer: make(map[uint16]map[string]uint64),
@@ -905,11 +864,11 @@ func TestContainerStatsEndToEndWithRealData(t *testing.T) {
updateContainerStatsValues(testStats, cpuPct, usedMemory, 1000000, 500000, testTime)
assert.Equal(t, cpuPct, testStats.Cpu)
assert.Equal(t, bytesToMegabytes(float64(usedMemory)), testStats.Mem)
assert.Equal(t, utils.BytesToMegabytes(float64(usedMemory)), testStats.Mem)
assert.Equal(t, [2]uint64{1000000, 500000}, testStats.Bandwidth)
// Deprecated fields still populated for backward compatibility with older hubs
assert.Equal(t, bytesToMegabytes(1000000), testStats.NetworkSent)
assert.Equal(t, bytesToMegabytes(500000), testStats.NetworkRecv)
assert.Equal(t, utils.BytesToMegabytes(1000000), testStats.NetworkSent)
assert.Equal(t, utils.BytesToMegabytes(500000), testStats.NetworkRecv)
assert.Equal(t, testTime, testStats.PrevReadTime)
}
@@ -1190,13 +1149,13 @@ func TestConstantsAndUtilityFunctions(t *testing.T) {
assert.Equal(t, 5*1024*1024, maxTotalLogSize) // 5MB
// Test utility functions
assert.Equal(t, 1.5, twoDecimals(1.499))
assert.Equal(t, 1.5, twoDecimals(1.5))
assert.Equal(t, 1.5, twoDecimals(1.501))
assert.Equal(t, 1.5, utils.TwoDecimals(1.499))
assert.Equal(t, 1.5, utils.TwoDecimals(1.5))
assert.Equal(t, 1.5, utils.TwoDecimals(1.501))
assert.Equal(t, 1.0, bytesToMegabytes(1048576)) // 1 MB
assert.Equal(t, 0.5, bytesToMegabytes(524288)) // 512 KB
assert.Equal(t, 0.0, bytesToMegabytes(0))
assert.Equal(t, 1.0, utils.BytesToMegabytes(1048576)) // 1 MB
assert.Equal(t, 0.5, utils.BytesToMegabytes(524288)) // 512 KB
assert.Equal(t, 0.0, utils.BytesToMegabytes(0))
}
func TestDecodeDockerLogStream(t *testing.T) {

View File

@@ -8,6 +8,7 @@ import (
"strconv"
"strings"
"github.com/henrygd/beszel/agent/utils"
"github.com/henrygd/beszel/internal/entities/smart"
)
@@ -141,9 +142,9 @@ func readEmmcHealth(blockName string) (emmcHealth, bool) {
out.lifeA = lifeA
out.lifeB = lifeB
out.model = readStringFile(filepath.Join(deviceDir, "name"))
out.serial = readStringFile(filepath.Join(deviceDir, "serial"))
out.revision = readStringFile(filepath.Join(deviceDir, "prv"))
out.model = utils.ReadStringFile(filepath.Join(deviceDir, "name"))
out.serial = utils.ReadStringFile(filepath.Join(deviceDir, "serial"))
out.revision = utils.ReadStringFile(filepath.Join(deviceDir, "prv"))
if capBytes, ok := readBlockCapacityBytes(blockName); ok {
out.capacity = capBytes
@@ -153,7 +154,7 @@ func readEmmcHealth(blockName string) (emmcHealth, bool) {
}
func readLifeTime(deviceDir string) (uint8, uint8, bool) {
if content, ok := readStringFileOK(filepath.Join(deviceDir, "life_time")); ok {
if content, ok := utils.ReadStringFileOK(filepath.Join(deviceDir, "life_time")); ok {
a, b, ok := parseHexBytePair(content)
return a, b, ok
}
@@ -170,7 +171,7 @@ func readBlockCapacityBytes(blockName string) (uint64, bool) {
sizePath := filepath.Join(emmcSysfsRoot, "class", "block", blockName, "size")
lbsPath := filepath.Join(emmcSysfsRoot, "class", "block", blockName, "queue", "logical_block_size")
sizeStr, ok := readStringFileOK(sizePath)
sizeStr, ok := utils.ReadStringFileOK(sizePath)
if !ok {
return 0, false
}
@@ -179,7 +180,7 @@ func readBlockCapacityBytes(blockName string) (uint64, bool) {
return 0, false
}
lbsStr, ok := readStringFileOK(lbsPath)
lbsStr, ok := utils.ReadStringFileOK(lbsPath)
logicalBlockSize := uint64(512)
if ok {
if parsed, err := strconv.ParseUint(lbsStr, 10, 64); err == nil && parsed > 0 {
@@ -191,7 +192,7 @@ func readBlockCapacityBytes(blockName string) (uint64, bool) {
}
func readHexByteFile(path string) (uint8, bool) {
content, ok := readStringFileOK(path)
content, ok := utils.ReadStringFileOK(path)
if !ok {
return 0, false
}

View File

@@ -1,24 +0,0 @@
package agent
import (
"os"
"strings"
)
func readStringFile(path string) string {
content, _ := readStringFileOK(path)
return content
}
func readStringFileOK(path string) (string, bool) {
b, err := os.ReadFile(path)
if err != nil {
return "", false
}
return strings.TrimSpace(string(b)), true
}
func fileExists(path string) bool {
_, err := os.Stat(path)
return err == nil
}

View File

@@ -15,6 +15,7 @@ import (
"sync"
"time"
"github.com/henrygd/beszel/agent/utils"
"github.com/henrygd/beszel/internal/entities/system"
)
@@ -291,8 +292,8 @@ func (gm *GPUManager) parseAmdData(output []byte) bool {
}
gpu := gm.GpuDataMap[id]
gpu.Temperature, _ = strconv.ParseFloat(v.Temperature, 64)
gpu.MemoryUsed = bytesToMegabytes(memoryUsage)
gpu.MemoryTotal = bytesToMegabytes(totalMemory)
gpu.MemoryUsed = utils.BytesToMegabytes(memoryUsage)
gpu.MemoryTotal = utils.BytesToMegabytes(totalMemory)
gpu.Usage += usage
gpu.Power += power
gpu.Count++
@@ -366,16 +367,16 @@ func (gm *GPUManager) calculateGPUAverage(id string, gpu *system.GPUData, cacheK
gpuAvg := *gpu
deltaUsage, deltaPower, deltaPowerPkg := gm.calculateDeltas(gpu, lastSnapshot)
gpuAvg.Power = twoDecimals(deltaPower / float64(deltaCount))
gpuAvg.Power = utils.TwoDecimals(deltaPower / float64(deltaCount))
if gpu.Engines != nil {
// make fresh map for averaged engine metrics to avoid mutating
// the accumulator map stored in gm.GpuDataMap
gpuAvg.Engines = make(map[string]float64, len(gpu.Engines))
gpuAvg.Usage = gm.calculateIntelGPUUsage(&gpuAvg, gpu, lastSnapshot, deltaCount)
gpuAvg.PowerPkg = twoDecimals(deltaPowerPkg / float64(deltaCount))
gpuAvg.PowerPkg = utils.TwoDecimals(deltaPowerPkg / float64(deltaCount))
} else {
gpuAvg.Usage = twoDecimals(deltaUsage / float64(deltaCount))
gpuAvg.Usage = utils.TwoDecimals(deltaUsage / float64(deltaCount))
}
gm.lastAvgData[id] = gpuAvg
@@ -410,17 +411,17 @@ func (gm *GPUManager) calculateIntelGPUUsage(gpuAvg, gpu *system.GPUData, lastSn
} else {
deltaEngine = engine
}
gpuAvg.Engines[name] = twoDecimals(deltaEngine / float64(deltaCount))
gpuAvg.Engines[name] = utils.TwoDecimals(deltaEngine / float64(deltaCount))
maxEngineUsage = max(maxEngineUsage, deltaEngine/float64(deltaCount))
}
return twoDecimals(maxEngineUsage)
return utils.TwoDecimals(maxEngineUsage)
}
// updateInstantaneousValues updates values that should reflect current state, not averages
func (gm *GPUManager) updateInstantaneousValues(gpuAvg *system.GPUData, gpu *system.GPUData) {
gpuAvg.Temperature = twoDecimals(gpu.Temperature)
gpuAvg.MemoryUsed = twoDecimals(gpu.MemoryUsed)
gpuAvg.MemoryTotal = twoDecimals(gpu.MemoryTotal)
gpuAvg.Temperature = utils.TwoDecimals(gpu.Temperature)
gpuAvg.MemoryUsed = utils.TwoDecimals(gpu.MemoryUsed)
gpuAvg.MemoryTotal = utils.TwoDecimals(gpu.MemoryTotal)
}
// storeSnapshot saves the current GPU state for this cache key
@@ -687,7 +688,7 @@ func (gm *GPUManager) resolveLegacyCollectorPriority(caps gpuCapabilities) []col
priorities := make([]collectorSource, 0, 4)
if caps.hasNvidiaSmi && !caps.hasTegrastats {
if nvml, _ := GetEnv("NVML"); nvml == "true" {
if nvml, _ := utils.GetEnv("NVML"); nvml == "true" {
priorities = append(priorities, collectorSourceNVML, collectorSourceNvidiaSMI)
} else {
priorities = append(priorities, collectorSourceNvidiaSMI)
@@ -695,7 +696,7 @@ func (gm *GPUManager) resolveLegacyCollectorPriority(caps gpuCapabilities) []col
}
if caps.hasRocmSmi {
if val, _ := GetEnv("AMD_SYSFS"); val == "true" {
if val, _ := utils.GetEnv("AMD_SYSFS"); val == "true" {
priorities = append(priorities, collectorSourceAmdSysfs)
} else {
priorities = append(priorities, collectorSourceRocmSMI)
@@ -728,7 +729,7 @@ func (gm *GPUManager) resolveLegacyCollectorPriority(caps gpuCapabilities) []col
// NewGPUManager creates and initializes a new GPUManager
func NewGPUManager() (*GPUManager, error) {
if skipGPU, _ := GetEnv("SKIP_GPU"); skipGPU == "true" {
if skipGPU, _ := utils.GetEnv("SKIP_GPU"); skipGPU == "true" {
return nil, nil
}
var gm GPUManager
@@ -745,7 +746,7 @@ func NewGPUManager() (*GPUManager, error) {
}
// if GPU_COLLECTOR is set, start user-defined collectors.
if collectorConfig, ok := GetEnv("GPU_COLLECTOR"); ok && strings.TrimSpace(collectorConfig) != "" {
if collectorConfig, ok := utils.GetEnv("GPU_COLLECTOR"); ok && strings.TrimSpace(collectorConfig) != "" {
priorities := parseCollectorPriority(collectorConfig)
if gm.startCollectorsByPriority(priorities, caps) == 0 {
return nil, fmt.Errorf("no configured GPU collectors are available")

View File

@@ -13,6 +13,7 @@ import (
"sync"
"time"
"github.com/henrygd/beszel/agent/utils"
"github.com/henrygd/beszel/internal/entities/system"
)
@@ -32,8 +33,8 @@ func (gm *GPUManager) hasAmdSysfs() bool {
return false
}
for _, vendorPath := range cards {
vendor, err := os.ReadFile(vendorPath)
if err == nil && strings.TrimSpace(string(vendor)) == "0x1002" {
vendor, err := utils.ReadStringFileLimited(vendorPath, 64)
if err == nil && vendor == "0x1002" {
return true
}
}
@@ -87,12 +88,11 @@ func (gm *GPUManager) collectAmdStats() error {
// isAmdGpu checks whether a DRM card path belongs to AMD vendor ID 0x1002.
func isAmdGpu(cardPath string) bool {
vendorPath := filepath.Join(cardPath, "device/vendor")
vendor, err := os.ReadFile(vendorPath)
vendor, err := utils.ReadStringFileLimited(filepath.Join(cardPath, "device/vendor"), 64)
if err != nil {
return false
}
return strings.TrimSpace(string(vendor)) == "0x1002"
return vendor == "0x1002"
}
// updateAmdGpuData reads GPU metrics from sysfs and updates the GPU data map.
@@ -144,8 +144,8 @@ func (gm *GPUManager) updateAmdGpuData(cardPath string) bool {
if usageErr == nil {
gpu.Usage += usage
}
gpu.MemoryUsed = bytesToMegabytes(memUsed)
gpu.MemoryTotal = bytesToMegabytes(memTotal)
gpu.MemoryUsed = utils.BytesToMegabytes(memUsed)
gpu.MemoryTotal = utils.BytesToMegabytes(memTotal)
gpu.Temperature = temp
gpu.Power += power
gpu.Count++
@@ -154,11 +154,11 @@ func (gm *GPUManager) updateAmdGpuData(cardPath string) bool {
// readSysfsFloat reads and parses a numeric value from a sysfs file.
func readSysfsFloat(path string) (float64, error) {
val, err := os.ReadFile(path)
val, err := utils.ReadStringFileLimited(path, 64)
if err != nil {
return 0, err
}
return strconv.ParseFloat(strings.TrimSpace(string(val)), 64)
return strconv.ParseFloat(val, 64)
}
// normalizeHexID normalizes hex IDs by trimming spaces, lowercasing, and dropping 0x.
@@ -273,16 +273,16 @@ func cacheMissingAmdgpuName(deviceID, revisionID string) {
// Falls back to showing the raw device ID if not found in the lookup table.
func getAmdGpuName(devicePath string) string {
// Try product_name first (works for some enterprise GPUs)
if prod, err := os.ReadFile(filepath.Join(devicePath, "product_name")); err == nil {
return strings.TrimSpace(string(prod))
if prod, err := utils.ReadStringFileLimited(filepath.Join(devicePath, "product_name"), 128); err == nil {
return prod
}
// Read PCI device ID and look it up
if deviceID, err := os.ReadFile(filepath.Join(devicePath, "device")); err == nil {
id := normalizeHexID(string(deviceID))
if deviceID, err := utils.ReadStringFileLimited(filepath.Join(devicePath, "device"), 64); err == nil {
id := normalizeHexID(deviceID)
revision := ""
if revBytes, revErr := os.ReadFile(filepath.Join(devicePath, "revision")); revErr == nil {
revision = normalizeHexID(string(revBytes))
if rev, revErr := utils.ReadStringFileLimited(filepath.Join(devicePath, "revision"), 64); revErr == nil {
revision = normalizeHexID(rev)
}
if name, found, done := getCachedAmdgpuName(id, revision); found {

View File

@@ -7,6 +7,7 @@ import (
"path/filepath"
"testing"
"github.com/henrygd/beszel/agent/utils"
"github.com/henrygd/beszel/internal/entities/system"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -128,14 +129,14 @@ func TestUpdateAmdGpuDataWithFakeSysfs(t *testing.T) {
{
name: "sums vram and gtt when gtt is present",
writeGTT: true,
wantMemoryUsed: bytesToMegabytes(1073741824 + 536870912),
wantMemoryTotal: bytesToMegabytes(2147483648 + 4294967296),
wantMemoryUsed: utils.BytesToMegabytes(1073741824 + 536870912),
wantMemoryTotal: utils.BytesToMegabytes(2147483648 + 4294967296),
},
{
name: "falls back to vram when gtt is missing",
writeGTT: false,
wantMemoryUsed: bytesToMegabytes(1073741824),
wantMemoryTotal: bytesToMegabytes(2147483648),
wantMemoryUsed: utils.BytesToMegabytes(1073741824),
wantMemoryTotal: utils.BytesToMegabytes(2147483648),
},
}

View File

@@ -7,6 +7,7 @@ import (
"strconv"
"strings"
"github.com/henrygd/beszel/agent/utils"
"github.com/henrygd/beszel/internal/entities/system"
)
@@ -52,7 +53,7 @@ func (gm *GPUManager) updateIntelFromStats(sample *intelGpuStats) bool {
func (gm *GPUManager) collectIntelStats() (err error) {
// Build command arguments, optionally selecting a device via -d
args := []string{"-s", intelGpuStatsInterval, "-l"}
if dev, ok := GetEnv("INTEL_GPU_DEVICE"); ok && dev != "" {
if dev, ok := utils.GetEnv("INTEL_GPU_DEVICE"); ok && dev != "" {
args = append(args, "-d", dev)
}
cmd := exec.Command(intelGpuStatsCmd, args...)

View File

@@ -9,6 +9,7 @@ import (
"strings"
"time"
"github.com/henrygd/beszel/agent/utils"
"github.com/henrygd/beszel/internal/entities/system"
)
@@ -80,10 +81,10 @@ func (gm *GPUManager) updateNvtopSnapshots(snapshots []nvtopSnapshot) bool {
gpu.Temperature = parseNvtopNumber(*sample.Temp)
}
if sample.MemUsed != nil {
gpu.MemoryUsed = bytesToMegabytes(parseNvtopNumber(*sample.MemUsed))
gpu.MemoryUsed = utils.BytesToMegabytes(parseNvtopNumber(*sample.MemUsed))
}
if sample.MemTotal != nil {
gpu.MemoryTotal = bytesToMegabytes(parseNvtopNumber(*sample.MemTotal))
gpu.MemoryTotal = utils.BytesToMegabytes(parseNvtopNumber(*sample.MemTotal))
}
if sample.GpuUtil != nil {
gpu.Usage += parseNvtopNumber(*sample.GpuUtil)

View File

@@ -10,6 +10,7 @@ import (
"testing"
"time"
"github.com/henrygd/beszel/agent/utils"
"github.com/henrygd/beszel/internal/entities/system"
"github.com/stretchr/testify/assert"
@@ -265,8 +266,8 @@ func TestParseNvtopData(t *testing.T) {
assert.Equal(t, 48.0, g0.Temperature)
assert.Equal(t, 5.0, g0.Usage)
assert.Equal(t, 13.0, g0.Power)
assert.Equal(t, bytesToMegabytes(349372416), g0.MemoryUsed)
assert.Equal(t, bytesToMegabytes(4294967296), g0.MemoryTotal)
assert.Equal(t, utils.BytesToMegabytes(349372416), g0.MemoryUsed)
assert.Equal(t, utils.BytesToMegabytes(4294967296), g0.MemoryTotal)
assert.Equal(t, 1.0, g0.Count)
g1, ok := gm.GpuDataMap["n1"]
@@ -275,8 +276,8 @@ func TestParseNvtopData(t *testing.T) {
assert.Equal(t, 48.0, g1.Temperature)
assert.Equal(t, 12.0, g1.Usage)
assert.Equal(t, 9.0, g1.Power)
assert.Equal(t, bytesToMegabytes(1213784064), g1.MemoryUsed)
assert.Equal(t, bytesToMegabytes(16929173504), g1.MemoryTotal)
assert.Equal(t, utils.BytesToMegabytes(1213784064), g1.MemoryUsed)
assert.Equal(t, utils.BytesToMegabytes(16929173504), g1.MemoryTotal)
assert.Equal(t, 1.0, g1.Count)
}

View File

@@ -9,6 +9,7 @@ import (
"strconv"
"strings"
"github.com/henrygd/beszel/agent/utils"
"github.com/henrygd/beszel/internal/entities/smart"
)
@@ -16,7 +17,6 @@ import (
var mdraidSysfsRoot = "/sys"
type mdraidHealth struct {
name string
level string
arrayState string
degraded uint64
@@ -28,6 +28,7 @@ type mdraidHealth struct {
capacity uint64
}
// scanMdraidDevices discovers Linux md arrays exposed in sysfs.
func scanMdraidDevices() []*DeviceInfo {
blockDir := filepath.Join(mdraidSysfsRoot, "block")
entries, err := os.ReadDir(blockDir)
@@ -42,7 +43,7 @@ func scanMdraidDevices() []*DeviceInfo {
continue
}
mdDir := filepath.Join(blockDir, name, "md")
if !fileExists(filepath.Join(mdDir, "array_state")) {
if !utils.FileExists(filepath.Join(mdDir, "array_state")) {
continue
}
@@ -58,6 +59,7 @@ func scanMdraidDevices() []*DeviceInfo {
return devices
}
// collectMdraidHealth reads mdraid health and stores it in SmartDataMap.
func (sm *SmartManager) collectMdraidHealth(deviceInfo *DeviceInfo) (bool, error) {
if deviceInfo == nil || deviceInfo.Name == "" {
return false, nil
@@ -115,19 +117,16 @@ func (sm *SmartManager) collectMdraidHealth(deviceInfo *DeviceInfo) (bool, error
if health.level != "" {
data.ModelName = "Linux MD RAID (" + health.level + ")"
}
data.SerialNumber = ""
data.FirmwareVersion = ""
data.Capacity = health.capacity
data.Temperature = 0
data.SmartStatus = status
data.DiskName = filepath.Join("/dev", base)
data.DiskType = "mdraid"
data.Attributes = attrs
sm.SmartDataMap[key] = data
return true, nil
}
// readMdraidHealth reads md array health fields from sysfs.
func readMdraidHealth(blockName string) (mdraidHealth, bool) {
var out mdraidHealth
@@ -136,25 +135,24 @@ func readMdraidHealth(blockName string) (mdraidHealth, bool) {
}
mdDir := filepath.Join(mdraidSysfsRoot, "block", blockName, "md")
arrayState, okState := readStringFileOK(filepath.Join(mdDir, "array_state"))
arrayState, okState := utils.ReadStringFileOK(filepath.Join(mdDir, "array_state"))
if !okState {
return out, false
}
out.name = blockName
out.arrayState = arrayState
out.level = readStringFile(filepath.Join(mdDir, "level"))
out.syncAction = readStringFile(filepath.Join(mdDir, "sync_action"))
out.syncCompleted = readStringFile(filepath.Join(mdDir, "sync_completed"))
out.syncSpeed = readStringFile(filepath.Join(mdDir, "sync_speed"))
out.level = utils.ReadStringFile(filepath.Join(mdDir, "level"))
out.syncAction = utils.ReadStringFile(filepath.Join(mdDir, "sync_action"))
out.syncCompleted = utils.ReadStringFile(filepath.Join(mdDir, "sync_completed"))
out.syncSpeed = utils.ReadStringFile(filepath.Join(mdDir, "sync_speed"))
if val, ok := readUintFile(filepath.Join(mdDir, "raid_disks")); ok {
if val, ok := utils.ReadUintFile(filepath.Join(mdDir, "raid_disks")); ok {
out.raidDisks = val
}
if val, ok := readUintFile(filepath.Join(mdDir, "degraded")); ok {
if val, ok := utils.ReadUintFile(filepath.Join(mdDir, "degraded")); ok {
out.degraded = val
}
if val, ok := readUintFile(filepath.Join(mdDir, "mismatch_cnt")); ok {
if val, ok := utils.ReadUintFile(filepath.Join(mdDir, "mismatch_cnt")); ok {
out.mismatchCnt = val
}
@@ -165,27 +163,35 @@ func readMdraidHealth(blockName string) (mdraidHealth, bool) {
return out, true
}
// mdraidSmartStatus maps md state/sync signals to a SMART-like status.
func mdraidSmartStatus(health mdraidHealth) string {
state := strings.ToLower(strings.TrimSpace(health.arrayState))
switch state {
case "inactive", "faulty", "broken", "stopped":
return "FAILED"
}
// During rebuild/recovery, arrays are often temporarily degraded; report as
// warning instead of hard failure while synchronization is in progress.
syncAction := strings.ToLower(strings.TrimSpace(health.syncAction))
switch syncAction {
case "resync", "recover", "reshape":
return "WARNING"
}
if health.degraded > 0 {
return "FAILED"
}
switch strings.ToLower(strings.TrimSpace(health.syncAction)) {
case "resync", "recover", "reshape", "check", "repair":
switch syncAction {
case "check", "repair":
return "WARNING"
}
if state == "clean" || state == "active" || state == "readonly" {
switch state {
case "clean", "active", "active-idle", "write-pending", "read-auto", "readonly":
return "PASSED"
}
return "UNKNOWN"
}
// isMdraidBlockName matches /dev/mdN-style block device names.
func isMdraidBlockName(name string) bool {
if !strings.HasPrefix(name, "md") {
return false
@@ -202,35 +208,23 @@ func isMdraidBlockName(name string) bool {
return true
}
func readUintFile(path string) (uint64, bool) {
raw, ok := readStringFileOK(path)
if !ok {
return 0, false
}
parsed, err := strconv.ParseUint(strings.TrimSpace(raw), 10, 64)
if err != nil {
return 0, false
}
return parsed, true
}
// readMdraidBlockCapacityBytes converts block size metadata into bytes.
func readMdraidBlockCapacityBytes(blockName, root string) (uint64, bool) {
sizePath := filepath.Join(root, "block", blockName, "size")
lbsPath := filepath.Join(root, "block", blockName, "queue", "logical_block_size")
sizeStr, ok := readStringFileOK(sizePath)
sizeStr, ok := utils.ReadStringFileOK(sizePath)
if !ok {
return 0, false
}
sectors, err := strconv.ParseUint(strings.TrimSpace(sizeStr), 10, 64)
sectors, err := strconv.ParseUint(sizeStr, 10, 64)
if err != nil || sectors == 0 {
return 0, false
}
lbsStr, ok := readStringFileOK(lbsPath)
logicalBlockSize := uint64(512)
if ok {
if parsed, err := strconv.ParseUint(strings.TrimSpace(lbsStr), 10, 64); err == nil && parsed > 0 {
if lbsStr, ok := utils.ReadStringFileOK(lbsPath); ok {
if parsed, err := strconv.ParseUint(lbsStr, 10, 64); err == nil && parsed > 0 {
logicalBlockSize = parsed
}
}

View File

@@ -85,6 +85,9 @@ func TestMdraidSmartStatus(t *testing.T) {
if got := mdraidSmartStatus(mdraidHealth{arrayState: "inactive"}); got != "FAILED" {
t.Fatalf("mdraidSmartStatus(inactive) = %q, want FAILED", got)
}
if got := mdraidSmartStatus(mdraidHealth{arrayState: "active", degraded: 1, syncAction: "recover"}); got != "WARNING" {
t.Fatalf("mdraidSmartStatus(degraded+recover) = %q, want WARNING", got)
}
if got := mdraidSmartStatus(mdraidHealth{arrayState: "active", degraded: 1}); got != "FAILED" {
t.Fatalf("mdraidSmartStatus(degraded) = %q, want FAILED", got)
}

View File

@@ -8,6 +8,7 @@ import (
"time"
"github.com/henrygd/beszel/agent/deltatracker"
"github.com/henrygd/beszel/agent/utils"
"github.com/henrygd/beszel/internal/entities/system"
psutilNet "github.com/shirou/gopsutil/v4/net"
)
@@ -94,7 +95,7 @@ func (a *Agent) initializeNetIoStats() {
a.netInterfaces = make(map[string]struct{}, 0)
// parse NICS env var for whitelist / blacklist
nicsEnvVal, nicsEnvExists := GetEnv("NICS")
nicsEnvVal, nicsEnvExists := utils.GetEnv("NICS")
var nicCfg *NicConfig
if nicsEnvExists {
nicCfg = newNicConfig(nicsEnvVal)
@@ -103,10 +104,7 @@ func (a *Agent) initializeNetIoStats() {
// get current network I/O stats and record valid interfaces
if netIO, err := psutilNet.IOCounters(true); err == nil {
for _, v := range netIO {
if nicsEnvExists && !isValidNic(v.Name, nicCfg) {
continue
}
if a.skipNetworkInterface(v) {
if skipNetworkInterface(v, nicCfg) {
continue
}
slog.Info("Detected network interface", "name", v.Name, "sent", v.BytesSent, "recv", v.BytesRecv)
@@ -215,10 +213,8 @@ func (a *Agent) applyNetworkTotals(
totalBytesSent, totalBytesRecv uint64,
bytesSentPerSecond, bytesRecvPerSecond uint64,
) {
networkSentPs := bytesToMegabytes(float64(bytesSentPerSecond))
networkRecvPs := bytesToMegabytes(float64(bytesRecvPerSecond))
if networkSentPs > 10_000 || networkRecvPs > 10_000 {
slog.Warn("Invalid net stats. Resetting.", "sent", networkSentPs, "recv", networkRecvPs)
if bytesSentPerSecond > 10_000_000_000 || bytesRecvPerSecond > 10_000_000_000 {
slog.Warn("Invalid net stats. Resetting.", "sent", bytesSentPerSecond, "recv", bytesRecvPerSecond)
for _, v := range netIO {
if _, exists := a.netInterfaces[v.Name]; !exists {
continue
@@ -228,21 +224,29 @@ func (a *Agent) applyNetworkTotals(
a.initializeNetIoStats()
delete(a.netIoStats, cacheTimeMs)
delete(a.netInterfaceDeltaTrackers, cacheTimeMs)
systemStats.NetworkSent = 0
systemStats.NetworkRecv = 0
systemStats.Bandwidth[0], systemStats.Bandwidth[1] = 0, 0
return
}
systemStats.NetworkSent = networkSentPs
systemStats.NetworkRecv = networkRecvPs
systemStats.Bandwidth[0], systemStats.Bandwidth[1] = bytesSentPerSecond, bytesRecvPerSecond
nis.BytesSent = totalBytesSent
nis.BytesRecv = totalBytesRecv
a.netIoStats[cacheTimeMs] = nis
}
func (a *Agent) skipNetworkInterface(v psutilNet.IOCountersStat) bool {
// skipNetworkInterface returns true if the network interface should be ignored.
func skipNetworkInterface(v psutilNet.IOCountersStat, nicCfg *NicConfig) bool {
if nicCfg != nil {
if !isValidNic(v.Name, nicCfg) {
return true
}
// In whitelist mode, we honor explicit inclusion without auto-filtering.
if !nicCfg.isBlacklist {
return false
}
// In blacklist mode, still apply the auto-filter below.
}
switch {
case strings.HasPrefix(v.Name, "lo"),
strings.HasPrefix(v.Name, "docker"),

View File

@@ -261,6 +261,39 @@ func TestNewNicConfig(t *testing.T) {
})
}
}
func TestSkipNetworkInterface(t *testing.T) {
tests := []struct {
name string
nic psutilNet.IOCountersStat
nicCfg *NicConfig
expectSkip bool
}{
{"loopback lo", psutilNet.IOCountersStat{Name: "lo", BytesSent: 100, BytesRecv: 100}, nil, true},
{"loopback lo0", psutilNet.IOCountersStat{Name: "lo0", BytesSent: 100, BytesRecv: 100}, nil, true},
{"docker prefix", psutilNet.IOCountersStat{Name: "docker0", BytesSent: 100, BytesRecv: 100}, nil, true},
{"br- prefix", psutilNet.IOCountersStat{Name: "br-lan", BytesSent: 100, BytesRecv: 100}, nil, true},
{"veth prefix", psutilNet.IOCountersStat{Name: "veth0abc", BytesSent: 100, BytesRecv: 100}, nil, true},
{"bond prefix", psutilNet.IOCountersStat{Name: "bond0", BytesSent: 100, BytesRecv: 100}, nil, true},
{"cali prefix", psutilNet.IOCountersStat{Name: "cali1234", BytesSent: 100, BytesRecv: 100}, nil, true},
{"zero BytesRecv", psutilNet.IOCountersStat{Name: "eth0", BytesSent: 100, BytesRecv: 0}, nil, true},
{"zero BytesSent", psutilNet.IOCountersStat{Name: "eth0", BytesSent: 0, BytesRecv: 100}, nil, true},
{"both zero", psutilNet.IOCountersStat{Name: "eth0", BytesSent: 0, BytesRecv: 0}, nil, true},
{"normal eth0", psutilNet.IOCountersStat{Name: "eth0", BytesSent: 100, BytesRecv: 200}, nil, false},
{"normal wlan0", psutilNet.IOCountersStat{Name: "wlan0", BytesSent: 1, BytesRecv: 1}, nil, false},
{"whitelist overrides skip (docker)", psutilNet.IOCountersStat{Name: "docker0", BytesSent: 100, BytesRecv: 100}, newNicConfig("docker0"), false},
{"whitelist overrides skip (lo)", psutilNet.IOCountersStat{Name: "lo", BytesSent: 100, BytesRecv: 100}, newNicConfig("lo"), false},
{"whitelist exclusion", psutilNet.IOCountersStat{Name: "eth1", BytesSent: 100, BytesRecv: 100}, newNicConfig("eth0"), true},
{"blacklist skip lo", psutilNet.IOCountersStat{Name: "lo", BytesSent: 100, BytesRecv: 100}, newNicConfig("-eth0"), true},
{"blacklist explicit eth0", psutilNet.IOCountersStat{Name: "eth0", BytesSent: 100, BytesRecv: 100}, newNicConfig("-eth0"), true},
{"blacklist allow eth1", psutilNet.IOCountersStat{Name: "eth1", BytesSent: 100, BytesRecv: 100}, newNicConfig("-eth0"), false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, tt.expectSkip, skipNetworkInterface(tt.nic, tt.nicCfg))
})
}
}
func TestEnsureNetworkInterfacesMap(t *testing.T) {
var a Agent
var stats system.Stats
@@ -383,8 +416,6 @@ func TestApplyNetworkTotals(t *testing.T) {
totalBytesSent uint64
totalBytesRecv uint64
expectReset bool
expectedNetworkSent float64
expectedNetworkRecv float64
expectedBandwidthSent uint64
expectedBandwidthRecv uint64
}{
@@ -395,8 +426,6 @@ func TestApplyNetworkTotals(t *testing.T) {
totalBytesSent: 10000000,
totalBytesRecv: 20000000,
expectReset: false,
expectedNetworkSent: 0.95, // ~1 MB/s rounded to 2 decimals
expectedNetworkRecv: 1.91, // ~2 MB/s rounded to 2 decimals
expectedBandwidthSent: 1000000,
expectedBandwidthRecv: 2000000,
},
@@ -424,18 +453,6 @@ func TestApplyNetworkTotals(t *testing.T) {
totalBytesRecv: 20000000,
expectReset: true,
},
{
name: "Valid network stats - at threshold boundary",
bytesSentPerSecond: 10485750000, // ~9999.99 MB/s (rounds to 9999.99)
bytesRecvPerSecond: 10485750000, // ~9999.99 MB/s (rounds to 9999.99)
totalBytesSent: 10000000,
totalBytesRecv: 20000000,
expectReset: false,
expectedNetworkSent: 9999.99,
expectedNetworkRecv: 9999.99,
expectedBandwidthSent: 10485750000,
expectedBandwidthRecv: 10485750000,
},
{
name: "Zero values",
bytesSentPerSecond: 0,
@@ -443,8 +460,6 @@ func TestApplyNetworkTotals(t *testing.T) {
totalBytesSent: 0,
totalBytesRecv: 0,
expectReset: false,
expectedNetworkSent: 0.0,
expectedNetworkRecv: 0.0,
expectedBandwidthSent: 0,
expectedBandwidthRecv: 0,
},
@@ -481,14 +496,10 @@ func TestApplyNetworkTotals(t *testing.T) {
// Should have reset network tracking state - maps cleared and stats zeroed
assert.NotContains(t, a.netIoStats, cacheTimeMs, "cache entry should be cleared after reset")
assert.NotContains(t, a.netInterfaceDeltaTrackers, cacheTimeMs, "tracker should be cleared on reset")
assert.Zero(t, systemStats.NetworkSent)
assert.Zero(t, systemStats.NetworkRecv)
assert.Zero(t, systemStats.Bandwidth[0])
assert.Zero(t, systemStats.Bandwidth[1])
} else {
// Should have applied stats
assert.Equal(t, tt.expectedNetworkSent, systemStats.NetworkSent)
assert.Equal(t, tt.expectedNetworkRecv, systemStats.NetworkRecv)
assert.Equal(t, tt.expectedBandwidthSent, systemStats.Bandwidth[0])
assert.Equal(t, tt.expectedBandwidthRecv, systemStats.Bandwidth[1])

View File

@@ -10,6 +10,7 @@ import (
"strings"
"unicode/utf8"
"github.com/henrygd/beszel/agent/utils"
"github.com/henrygd/beszel/internal/entities/system"
"github.com/shirou/gopsutil/v4/common"
@@ -26,9 +27,9 @@ type SensorConfig struct {
}
func (a *Agent) newSensorConfig() *SensorConfig {
primarySensor, _ := GetEnv("PRIMARY_SENSOR")
sysSensors, _ := GetEnv("SYS_SENSORS")
sensorsEnvVal, sensorsSet := GetEnv("SENSORS")
primarySensor, _ := utils.GetEnv("PRIMARY_SENSOR")
sysSensors, _ := utils.GetEnv("SYS_SENSORS")
sensorsEnvVal, sensorsSet := utils.GetEnv("SENSORS")
skipCollection := sensorsSet && sensorsEnvVal == ""
return a.newSensorConfigWithEnv(primarySensor, sysSensors, sensorsEnvVal, skipCollection)
@@ -135,7 +136,7 @@ func (a *Agent) updateTemperatures(systemStats *system.Stats) {
case sensorName:
a.systemInfo.DashboardTemp = sensor.Temperature
}
systemStats.Temperatures[sensorName] = twoDecimals(sensor.Temperature)
systemStats.Temperatures[sensorName] = utils.TwoDecimals(sensor.Temperature)
}
}

View File

@@ -12,6 +12,7 @@ import (
"time"
"github.com/henrygd/beszel"
"github.com/henrygd/beszel/agent/utils"
"github.com/henrygd/beszel/internal/common"
"github.com/henrygd/beszel/internal/entities/system"
@@ -36,7 +37,7 @@ var hubVersions map[string]semver.Version
// and begins listening for connections. Returns an error if the server
// is already running or if there's an issue starting the server.
func (a *Agent) StartServer(opts ServerOptions) error {
if disableSSH, _ := GetEnv("DISABLE_SSH"); disableSSH == "true" {
if disableSSH, _ := utils.GetEnv("DISABLE_SSH"); disableSSH == "true" {
return errors.New("SSH disabled")
}
if a.server != nil {
@@ -238,11 +239,11 @@ func ParseKeys(input string) ([]gossh.PublicKey, error) {
// and finally defaults to ":45876".
func GetAddress(addr string) string {
if addr == "" {
addr, _ = GetEnv("LISTEN")
addr, _ = utils.GetEnv("LISTEN")
}
if addr == "" {
// Legacy PORT environment variable support
addr, _ = GetEnv("PORT")
addr, _ = utils.GetEnv("PORT")
}
if addr == "" {
return ":45876"
@@ -258,7 +259,7 @@ func GetAddress(addr string) string {
// It checks the NETWORK environment variable first, then infers from
// the address format: addresses starting with "/" are "unix", others are "tcp".
func GetNetwork(addr string) string {
if network, ok := GetEnv("NETWORK"); ok && network != "" {
if network, ok := utils.GetEnv("NETWORK"); ok && network != "" {
return network
}
if strings.HasPrefix(addr, "/") {

View File

@@ -18,6 +18,7 @@ import (
"sync"
"time"
"github.com/henrygd/beszel/agent/utils"
"github.com/henrygd/beszel/internal/entities/smart"
)
@@ -156,7 +157,7 @@ func (sm *SmartManager) ScanDevices(force bool) error {
currentDevices := sm.devicesSnapshot()
var configuredDevices []*DeviceInfo
if configuredRaw, ok := GetEnv("SMART_DEVICES"); ok {
if configuredRaw, ok := utils.GetEnv("SMART_DEVICES"); ok {
slog.Info("SMART_DEVICES", "value", configuredRaw)
config := strings.TrimSpace(configuredRaw)
if config == "" {
@@ -222,7 +223,7 @@ func (sm *SmartManager) ScanDevices(force bool) error {
}
func (sm *SmartManager) parseConfiguredDevices(config string) ([]*DeviceInfo, error) {
splitChar := os.Getenv("SMART_DEVICES_SEPARATOR")
splitChar, _ := utils.GetEnv("SMART_DEVICES_SEPARATOR")
if splitChar == "" {
splitChar = ","
}
@@ -260,7 +261,7 @@ func (sm *SmartManager) parseConfiguredDevices(config string) ([]*DeviceInfo, er
}
func (sm *SmartManager) refreshExcludedDevices() {
rawValue, _ := GetEnv("EXCLUDE_SMART")
rawValue, _ := utils.GetEnv("EXCLUDE_SMART")
sm.excludedDevices = make(map[string]struct{})
for entry := range strings.SplitSeq(rawValue, ",") {
@@ -870,15 +871,18 @@ func (sm *SmartManager) parseSmartForSata(output []byte) (bool, int) {
smartData.FirmwareVersion = data.FirmwareVersion
smartData.Capacity = data.UserCapacity.Bytes
smartData.Temperature = data.Temperature.Current
if smartData.Temperature == 0 {
if temp, ok := temperatureFromAtaDeviceStatistics(data.AtaDeviceStatistics); ok {
smartData.Temperature = temp
}
}
smartData.SmartStatus = getSmartStatus(smartData.Temperature, data.SmartStatus.Passed)
smartData.DiskName = data.Device.Name
smartData.DiskType = data.Device.Type
// get values from ata_device_statistics if necessary
var ataDeviceStats smart.AtaDeviceStatistics
if smartData.Temperature == 0 {
if temp := findAtaDeviceStatisticsValue(&data, &ataDeviceStats, 5, "Current Temperature", 0, 255); temp != nil {
smartData.Temperature = uint8(*temp)
}
}
// update SmartAttributes
smartData.Attributes = make([]*smart.SmartAttribute, 0, len(data.AtaSmartAttributes.Table))
for _, attr := range data.AtaSmartAttributes.Table {
@@ -913,23 +917,20 @@ func getSmartStatus(temperature uint8, passed bool) string {
}
}
func temperatureFromAtaDeviceStatistics(stats smart.AtaDeviceStatistics) (uint8, bool) {
entry := findAtaDeviceStatisticsEntry(stats, 5, "Current Temperature")
if entry == nil || entry.Value == nil {
return 0, false
}
if *entry.Value > 255 {
return 0, false
}
return uint8(*entry.Value), true
}
// findAtaDeviceStatisticsEntry centralizes ATA devstat lookups so additional
// metrics can be pulled from the same structure in the future.
func findAtaDeviceStatisticsEntry(stats smart.AtaDeviceStatistics, pageNumber uint8, entryName string) *smart.AtaDeviceStatisticsEntry {
for pageIdx := range stats.Pages {
page := &stats.Pages[pageIdx]
if page.Number != pageNumber {
func findAtaDeviceStatisticsValue(data *smart.SmartInfoForSata, ataDeviceStats *smart.AtaDeviceStatistics, entryNumber uint8, entryName string, minValue, maxValue int64) *int64 {
if len(ataDeviceStats.Pages) == 0 {
if len(data.AtaDeviceStatistics) == 0 {
return nil
}
if err := json.Unmarshal(data.AtaDeviceStatistics, ataDeviceStats); err != nil {
return nil
}
}
for pageIdx := range ataDeviceStats.Pages {
page := &ataDeviceStats.Pages[pageIdx]
if page.Number != entryNumber {
continue
}
for entryIdx := range page.Table {
@@ -937,7 +938,10 @@ func findAtaDeviceStatisticsEntry(stats smart.AtaDeviceStatistics, pageNumber ui
if !strings.EqualFold(entry.Name, entryName) {
continue
}
return entry
if entry.Value == nil || *entry.Value < minValue || *entry.Value > maxValue {
return nil
}
return entry.Value
}
}
return nil

View File

@@ -121,6 +121,78 @@ func TestParseSmartForSataDeviceStatisticsTemperature(t *testing.T) {
assert.Equal(t, uint8(22), deviceData.Temperature)
}
func TestParseSmartForSataAtaDeviceStatistics(t *testing.T) {
// tests that ata_device_statistics values are parsed correctly
jsonPayload := []byte(`{
"smartctl": {"exit_status": 0},
"device": {"name": "/dev/sdb", "type": "sat"},
"model_name": "SanDisk SSD U110 16GB",
"serial_number": "lksjfh23lhj",
"firmware_version": "U21B001",
"user_capacity": {"bytes": 16013942784},
"smart_status": {"passed": true},
"ata_smart_attributes": {"table": []},
"ata_device_statistics": {
"pages": [
{
"number": 5,
"name": "Temperature Statistics",
"table": [
{"name": "Current Temperature", "value": 43, "flags": {"valid": true}},
{"name": "Specified Minimum Operating Temperature", "value": -20, "flags": {"valid": true}}
]
}
]
}
}`)
sm := &SmartManager{SmartDataMap: make(map[string]*smart.SmartData)}
hasData, exitStatus := sm.parseSmartForSata(jsonPayload)
require.True(t, hasData)
assert.Equal(t, 0, exitStatus)
deviceData, ok := sm.SmartDataMap["lksjfh23lhj"]
require.True(t, ok, "expected smart data entry for serial lksjfh23lhj")
assert.Equal(t, uint8(43), deviceData.Temperature)
}
func TestParseSmartForSataNegativeDeviceStatistics(t *testing.T) {
// Tests that negative values in ata_device_statistics (e.g. min operating temp)
// do not cause the entire SAT parser to fail.
jsonPayload := []byte(`{
"smartctl": {"exit_status": 0},
"device": {"name": "/dev/sdb", "type": "sat"},
"model_name": "SanDisk SSD U110 16GB",
"serial_number": "NEGATIVE123",
"firmware_version": "U21B001",
"user_capacity": {"bytes": 16013942784},
"smart_status": {"passed": true},
"temperature": {"current": 38},
"ata_smart_attributes": {"table": []},
"ata_device_statistics": {
"pages": [
{
"number": 5,
"name": "Temperature Statistics",
"table": [
{"name": "Current Temperature", "value": 38, "flags": {"valid": true}},
{"name": "Specified Minimum Operating Temperature", "value": -20, "flags": {"valid": true}}
]
}
]
}
}`)
sm := &SmartManager{SmartDataMap: make(map[string]*smart.SmartData)}
hasData, exitStatus := sm.parseSmartForSata(jsonPayload)
require.True(t, hasData)
assert.Equal(t, 0, exitStatus)
deviceData, ok := sm.SmartDataMap["NEGATIVE123"]
require.True(t, ok, "expected smart data entry for serial NEGATIVE123")
assert.Equal(t, uint8(38), deviceData.Temperature)
}
func TestParseSmartForSataParentheticalRawValue(t *testing.T) {
jsonPayload := []byte(`{
"smartctl": {"exit_status": 0},
@@ -727,6 +799,182 @@ func TestIsVirtualDeviceScsi(t *testing.T) {
}
}
func TestFindAtaDeviceStatisticsValue(t *testing.T) {
val42 := int64(42)
val100 := int64(100)
valMinus20 := int64(-20)
tests := []struct {
name string
data smart.SmartInfoForSata
ataDeviceStats smart.AtaDeviceStatistics
entryNumber uint8
entryName string
minValue int64
maxValue int64
expectedValue *int64
}{
{
name: "value in ataDeviceStats",
ataDeviceStats: smart.AtaDeviceStatistics{
Pages: []smart.AtaDeviceStatisticsPage{
{
Number: 5,
Table: []smart.AtaDeviceStatisticsEntry{
{Name: "Current Temperature", Value: &val42},
},
},
},
},
entryNumber: 5,
entryName: "Current Temperature",
minValue: 0,
maxValue: 100,
expectedValue: &val42,
},
{
name: "value unmarshaled from data",
data: smart.SmartInfoForSata{
AtaDeviceStatistics: []byte(`{"pages":[{"number":5,"table":[{"name":"Current Temperature","value":100}]}]}`),
},
entryNumber: 5,
entryName: "Current Temperature",
minValue: 0,
maxValue: 255,
expectedValue: &val100,
},
{
name: "value out of range (too high)",
ataDeviceStats: smart.AtaDeviceStatistics{
Pages: []smart.AtaDeviceStatisticsPage{
{
Number: 5,
Table: []smart.AtaDeviceStatisticsEntry{
{Name: "Current Temperature", Value: &val100},
},
},
},
},
entryNumber: 5,
entryName: "Current Temperature",
minValue: 0,
maxValue: 50,
expectedValue: nil,
},
{
name: "value out of range (too low)",
ataDeviceStats: smart.AtaDeviceStatistics{
Pages: []smart.AtaDeviceStatisticsPage{
{
Number: 5,
Table: []smart.AtaDeviceStatisticsEntry{
{Name: "Min Temp", Value: &valMinus20},
},
},
},
},
entryNumber: 5,
entryName: "Min Temp",
minValue: 0,
maxValue: 100,
expectedValue: nil,
},
{
name: "no statistics available",
data: smart.SmartInfoForSata{},
entryNumber: 5,
entryName: "Current Temperature",
minValue: 0,
maxValue: 255,
expectedValue: nil,
},
{
name: "wrong page number",
ataDeviceStats: smart.AtaDeviceStatistics{
Pages: []smart.AtaDeviceStatisticsPage{
{
Number: 1,
Table: []smart.AtaDeviceStatisticsEntry{
{Name: "Current Temperature", Value: &val42},
},
},
},
},
entryNumber: 5,
entryName: "Current Temperature",
minValue: 0,
maxValue: 100,
expectedValue: nil,
},
{
name: "wrong entry name",
ataDeviceStats: smart.AtaDeviceStatistics{
Pages: []smart.AtaDeviceStatisticsPage{
{
Number: 5,
Table: []smart.AtaDeviceStatisticsEntry{
{Name: "Other Stat", Value: &val42},
},
},
},
},
entryNumber: 5,
entryName: "Current Temperature",
minValue: 0,
maxValue: 100,
expectedValue: nil,
},
{
name: "case insensitive name match",
ataDeviceStats: smart.AtaDeviceStatistics{
Pages: []smart.AtaDeviceStatisticsPage{
{
Number: 5,
Table: []smart.AtaDeviceStatisticsEntry{
{Name: "CURRENT TEMPERATURE", Value: &val42},
},
},
},
},
entryNumber: 5,
entryName: "Current Temperature",
minValue: 0,
maxValue: 100,
expectedValue: &val42,
},
{
name: "entry value is nil",
ataDeviceStats: smart.AtaDeviceStatistics{
Pages: []smart.AtaDeviceStatisticsPage{
{
Number: 5,
Table: []smart.AtaDeviceStatisticsEntry{
{Name: "Current Temperature", Value: nil},
},
},
},
},
entryNumber: 5,
entryName: "Current Temperature",
minValue: 0,
maxValue: 100,
expectedValue: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := findAtaDeviceStatisticsValue(&tt.data, &tt.ataDeviceStats, tt.entryNumber, tt.entryName, tt.minValue, tt.maxValue)
if tt.expectedValue == nil {
assert.Nil(t, result)
} else {
require.NotNil(t, result)
assert.Equal(t, *tt.expectedValue, *result)
}
})
}
}
func TestRefreshExcludedDevices(t *testing.T) {
tests := []struct {
name string

View File

@@ -12,6 +12,7 @@ import (
"github.com/henrygd/beszel"
"github.com/henrygd/beszel/agent/battery"
"github.com/henrygd/beszel/agent/utils"
"github.com/henrygd/beszel/agent/zfs"
"github.com/henrygd/beszel/internal/entities/container"
"github.com/henrygd/beszel/internal/entities/system"
@@ -127,13 +128,13 @@ func (a *Agent) getSystemStats(cacheTimeMs uint16) system.Stats {
// cpu metrics
cpuMetrics, err := getCpuMetrics(cacheTimeMs)
if err == nil {
systemStats.Cpu = twoDecimals(cpuMetrics.Total)
systemStats.Cpu = utils.TwoDecimals(cpuMetrics.Total)
systemStats.CpuBreakdown = []float64{
twoDecimals(cpuMetrics.User),
twoDecimals(cpuMetrics.System),
twoDecimals(cpuMetrics.Iowait),
twoDecimals(cpuMetrics.Steal),
twoDecimals(cpuMetrics.Idle),
utils.TwoDecimals(cpuMetrics.User),
utils.TwoDecimals(cpuMetrics.System),
utils.TwoDecimals(cpuMetrics.Iowait),
utils.TwoDecimals(cpuMetrics.Steal),
utils.TwoDecimals(cpuMetrics.Idle),
}
} else {
slog.Error("Error getting cpu metrics", "err", err)
@@ -157,8 +158,8 @@ func (a *Agent) getSystemStats(cacheTimeMs uint16) system.Stats {
// memory
if v, err := mem.VirtualMemory(); err == nil {
// swap
systemStats.Swap = bytesToGigabytes(v.SwapTotal)
systemStats.SwapUsed = bytesToGigabytes(v.SwapTotal - v.SwapFree - v.SwapCached)
systemStats.Swap = utils.BytesToGigabytes(v.SwapTotal)
systemStats.SwapUsed = utils.BytesToGigabytes(v.SwapTotal - v.SwapFree - v.SwapCached)
// cache + buffers value for default mem calculation
// note: gopsutil automatically adds SReclaimable to v.Cached
cacheBuff := v.Cached + v.Buffers - v.Shared
@@ -181,13 +182,13 @@ func (a *Agent) getSystemStats(cacheTimeMs uint16) system.Stats {
if arcSize, _ := zfs.ARCSize(); arcSize > 0 && arcSize < v.Used {
v.Used = v.Used - arcSize
v.UsedPercent = float64(v.Used) / float64(v.Total) * 100.0
systemStats.MemZfsArc = bytesToGigabytes(arcSize)
systemStats.MemZfsArc = utils.BytesToGigabytes(arcSize)
}
}
systemStats.Mem = bytesToGigabytes(v.Total)
systemStats.MemBuffCache = bytesToGigabytes(cacheBuff)
systemStats.MemUsed = bytesToGigabytes(v.Used)
systemStats.MemPct = twoDecimals(v.UsedPercent)
systemStats.Mem = utils.BytesToGigabytes(v.Total)
systemStats.MemBuffCache = utils.BytesToGigabytes(cacheBuff)
systemStats.MemUsed = utils.BytesToGigabytes(v.Used)
systemStats.MemPct = utils.TwoDecimals(v.UsedPercent)
}
// disk usage

View File

@@ -15,6 +15,7 @@ import (
"time"
"github.com/coreos/go-systemd/v22/dbus"
"github.com/henrygd/beszel/agent/utils"
"github.com/henrygd/beszel/internal/entities/systemd"
)
@@ -49,7 +50,7 @@ func isSystemdAvailable() bool {
// newSystemdManager creates a new systemdManager.
func newSystemdManager() (*systemdManager, error) {
if skipSystemd, _ := GetEnv("SKIP_SYSTEMD"); skipSystemd == "true" {
if skipSystemd, _ := utils.GetEnv("SKIP_SYSTEMD"); skipSystemd == "true" {
return nil, nil
}
@@ -294,7 +295,7 @@ func unescapeServiceName(name string) string {
// otherwise defaults to "*service".
func getServicePatterns() []string {
patterns := []string{}
if envPatterns, _ := GetEnv("SERVICE_PATTERNS"); envPatterns != "" {
if envPatterns, _ := utils.GetEnv("SERVICE_PATTERNS"); envPatterns != "" {
for pattern := range strings.SplitSeq(envPatterns, ",") {
pattern = strings.TrimSpace(pattern)
if pattern == "" {

View File

@@ -1,15 +0,0 @@
package agent
import "math"
func bytesToMegabytes(b float64) float64 {
return twoDecimals(b / 1048576)
}
func bytesToGigabytes(b uint64) float64 {
return twoDecimals(float64(b) / 1073741824)
}
func twoDecimals(value float64) float64 {
return math.Round(value*100) / 100
}

88
agent/utils/utils.go Normal file
View File

@@ -0,0 +1,88 @@
package utils
import (
"io"
"math"
"os"
"strconv"
"strings"
)
// GetEnv retrieves an environment variable with a "BESZEL_AGENT_" prefix, or falls back to the unprefixed key.
func GetEnv(key string) (value string, exists bool) {
if value, exists = os.LookupEnv("BESZEL_AGENT_" + key); exists {
return value, exists
}
return os.LookupEnv(key)
}
// BytesToMegabytes converts bytes to megabytes and rounds to two decimal places.
func BytesToMegabytes(b float64) float64 {
return TwoDecimals(b / 1048576)
}
// BytesToGigabytes converts bytes to gigabytes and rounds to two decimal places.
func BytesToGigabytes(b uint64) float64 {
return TwoDecimals(float64(b) / 1073741824)
}
// TwoDecimals rounds a float64 value to two decimal places.
func TwoDecimals(value float64) float64 {
return math.Round(value*100) / 100
}
// func RoundFloat(val float64, precision uint) float64 {
// ratio := math.Pow(10, float64(precision))
// return math.Round(val*ratio) / ratio
// }
// ReadStringFile returns trimmed file contents or empty string on error.
func ReadStringFile(path string) string {
content, _ := ReadStringFileOK(path)
return content
}
// ReadStringFileOK returns trimmed file contents and read success.
func ReadStringFileOK(path string) (string, bool) {
b, err := os.ReadFile(path)
if err != nil {
return "", false
}
return strings.TrimSpace(string(b)), true
}
// ReadStringFileLimited reads a file into a string with a maximum size (in bytes) to avoid
// allocating large buffers and potential panics with pseudo-files when the size is misreported.
func ReadStringFileLimited(path string, maxSize int) (string, error) {
f, err := os.Open(path)
if err != nil {
return "", err
}
defer f.Close()
buf := make([]byte, maxSize)
n, err := f.Read(buf)
if err != nil && err != io.EOF {
return "", err
}
return strings.TrimSpace(string(buf[:n])), nil
}
// FileExists reports whether the given path exists.
func FileExists(path string) bool {
_, err := os.Stat(path)
return err == nil
}
// ReadUintFile parses a decimal uint64 value from a file.
func ReadUintFile(path string) (uint64, bool) {
raw, ok := ReadStringFileOK(path)
if !ok {
return 0, false
}
parsed, err := strconv.ParseUint(raw, 10, 64)
if err != nil {
return 0, false
}
return parsed, true
}

165
agent/utils/utils_test.go Normal file
View File

@@ -0,0 +1,165 @@
package utils
import (
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
)
func TestTwoDecimals(t *testing.T) {
tests := []struct {
name string
input float64
expected float64
}{
{"round down", 1.234, 1.23},
{"round half up", 1.235, 1.24}, // math.Round rounds half up
{"no rounding needed", 1.23, 1.23},
{"negative number", -1.235, -1.24}, // math.Round rounds half up (more negative)
{"zero", 0.0, 0.0},
{"large number", 123.456, 123.46}, // rounds 5 up
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := TwoDecimals(tt.input)
assert.Equal(t, tt.expected, result)
})
}
}
func TestBytesToMegabytes(t *testing.T) {
tests := []struct {
name string
input float64
expected float64
}{
{"1 MB", 1048576, 1.0},
{"512 KB", 524288, 0.5},
{"zero", 0, 0},
{"large value", 1073741824, 1024}, // 1 GB = 1024 MB
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := BytesToMegabytes(tt.input)
assert.Equal(t, tt.expected, result)
})
}
}
func TestBytesToGigabytes(t *testing.T) {
tests := []struct {
name string
input uint64
expected float64
}{
{"1 GB", 1073741824, 1.0},
{"512 MB", 536870912, 0.5},
{"0 GB", 0, 0},
{"2 GB", 2147483648, 2.0},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := BytesToGigabytes(tt.input)
assert.Equal(t, tt.expected, result)
})
}
}
func TestFileFunctions(t *testing.T) {
tmpDir := t.TempDir()
testFilePath := filepath.Join(tmpDir, "test.txt")
testContent := "hello world"
// Test FileExists (false)
assert.False(t, FileExists(testFilePath))
// Test ReadStringFileOK (false)
content, ok := ReadStringFileOK(testFilePath)
assert.False(t, ok)
assert.Empty(t, content)
// Test ReadStringFile (empty)
assert.Empty(t, ReadStringFile(testFilePath))
// Write file
err := os.WriteFile(testFilePath, []byte(testContent+"\n "), 0644)
assert.NoError(t, err)
// Test FileExists (true)
assert.True(t, FileExists(testFilePath))
// Test ReadStringFileOK (true)
content, ok = ReadStringFileOK(testFilePath)
assert.True(t, ok)
assert.Equal(t, testContent, content)
// Test ReadStringFile (content)
assert.Equal(t, testContent, ReadStringFile(testFilePath))
}
func TestReadUintFile(t *testing.T) {
tmpDir := t.TempDir()
t.Run("valid uint", func(t *testing.T) {
path := filepath.Join(tmpDir, "uint.txt")
os.WriteFile(path, []byte(" 12345\n"), 0644)
val, ok := ReadUintFile(path)
assert.True(t, ok)
assert.Equal(t, uint64(12345), val)
})
t.Run("invalid uint", func(t *testing.T) {
path := filepath.Join(tmpDir, "invalid.txt")
os.WriteFile(path, []byte("abc"), 0644)
val, ok := ReadUintFile(path)
assert.False(t, ok)
assert.Equal(t, uint64(0), val)
})
t.Run("missing file", func(t *testing.T) {
path := filepath.Join(tmpDir, "missing.txt")
val, ok := ReadUintFile(path)
assert.False(t, ok)
assert.Equal(t, uint64(0), val)
})
}
func TestGetEnv(t *testing.T) {
key := "TEST_VAR"
prefixedKey := "BESZEL_AGENT_" + key
t.Run("prefixed variable exists", func(t *testing.T) {
os.Setenv(prefixedKey, "prefixed_val")
os.Setenv(key, "unprefixed_val")
defer os.Unsetenv(prefixedKey)
defer os.Unsetenv(key)
val, exists := GetEnv(key)
assert.True(t, exists)
assert.Equal(t, "prefixed_val", val)
})
t.Run("only unprefixed variable exists", func(t *testing.T) {
os.Unsetenv(prefixedKey)
os.Setenv(key, "unprefixed_val")
defer os.Unsetenv(key)
val, exists := GetEnv(key)
assert.True(t, exists)
assert.Equal(t, "unprefixed_val", val)
})
t.Run("neither variable exists", func(t *testing.T) {
os.Unsetenv(prefixedKey)
os.Unsetenv(key)
val, exists := GetEnv(key)
assert.False(t, exists)
assert.Empty(t, val)
})
}

2
go.mod
View File

@@ -1,6 +1,6 @@
module github.com/henrygd/beszel
go 1.26.0
go 1.26.1
require (
github.com/blang/semver v3.5.1+incompatible

View File

@@ -21,8 +21,7 @@ type hubLike interface {
type AlertManager struct {
hub hubLike
alertQueue chan alertTask
stopChan chan struct{}
stopOnce sync.Once
pendingAlerts sync.Map
}
@@ -40,16 +39,22 @@ type UserNotificationSettings struct {
Webhooks []string `json:"webhooks"`
}
type SystemAlertFsStats struct {
DiskTotal float64 `json:"d"`
DiskUsed float64 `json:"du"`
}
// Values pulled from system_stats.stats that are relevant to alerts.
type SystemAlertStats struct {
Cpu float64 `json:"cpu"`
Mem float64 `json:"mp"`
Disk float64 `json:"dp"`
NetSent float64 `json:"ns"`
NetRecv float64 `json:"nr"`
Bandwidth [2]uint64 `json:"b"`
GPU map[string]SystemAlertGPUData `json:"g"`
Temperatures map[string]float32 `json:"t"`
LoadAvg [3]float64 `json:"la"`
Battery [2]uint8 `json:"bat"`
ExtraFs map[string]SystemAlertFsStats `json:"efs"`
}
type SystemAlertGPUData struct {
@@ -92,12 +97,9 @@ var supportsTitle = map[string]struct{}{
// NewAlertManager creates a new AlertManager instance.
func NewAlertManager(app hubLike) *AlertManager {
am := &AlertManager{
hub: app,
alertQueue: make(chan alertTask, 5),
stopChan: make(chan struct{}),
hub: app,
}
am.bindEvents()
go am.startWorker()
return am
}
@@ -106,6 +108,16 @@ func (am *AlertManager) bindEvents() {
am.hub.OnRecordAfterUpdateSuccess("alerts").BindFunc(updateHistoryOnAlertUpdate)
am.hub.OnRecordAfterDeleteSuccess("alerts").BindFunc(resolveHistoryOnAlertDelete)
am.hub.OnRecordAfterUpdateSuccess("smart_devices").BindFunc(am.handleSmartDeviceAlert)
am.hub.OnServe().BindFunc(func(e *core.ServeEvent) error {
if err := resolveStatusAlerts(e.App); err != nil {
e.App.Logger().Error("Failed to resolve stale status alerts", "err", err)
}
if err := am.restorePendingStatusAlerts(); err != nil {
e.App.Logger().Error("Failed to restore pending status alerts", "err", err)
}
return e.Next()
})
}
// IsNotificationSilenced checks if a notification should be silenced based on configured quiet hours
@@ -259,13 +271,14 @@ func (am *AlertManager) SendShoutrrrAlert(notificationUrl, title, message, link,
}
// Add link
if scheme == "ntfy" {
switch scheme {
case "ntfy":
queryParams.Add("Actions", fmt.Sprintf("view, %s, %s", linkText, link))
} else if scheme == "lark" {
case "lark":
queryParams.Add("link", link)
} else if scheme == "bark" {
case "bark":
queryParams.Add("url", link)
} else {
default:
message += "\n\n" + link
}

View File

@@ -0,0 +1,155 @@
//go:build testing
package alerts_test
import (
"encoding/json"
"testing"
"time"
"github.com/henrygd/beszel/internal/entities/system"
beszelTests "github.com/henrygd/beszel/internal/tests"
"github.com/pocketbase/dbx"
"github.com/pocketbase/pocketbase/tools/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestDiskAlertExtraFsMultiMinute tests that multi-minute disk alerts correctly use
// historical per-minute values for extra (non-root) filesystems, not the current live snapshot.
func TestDiskAlertExtraFsMultiMinute(t *testing.T) {
hub, user := beszelTests.GetHubWithUser(t)
defer hub.Cleanup()
systems, err := beszelTests.CreateSystems(hub, 1, user.Id, "up")
require.NoError(t, err)
systemRecord := systems[0]
// Disk alert: threshold 80%, min=2 (requires historical averaging)
diskAlert, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
"name": "Disk",
"system": systemRecord.Id,
"user": user.Id,
"value": 80, // threshold: 80%
"min": 2, // 2 minutes - requires historical averaging
})
require.NoError(t, err)
assert.False(t, diskAlert.GetBool("triggered"), "Alert should not be triggered initially")
am := hub.GetAlertManager()
now := time.Now().UTC()
extraFsHigh := map[string]*system.FsStats{
"/mnt/data": {DiskTotal: 1000, DiskUsed: 920}, // 92% - above threshold
}
// Insert 4 historical records spread over 3 minutes (same pattern as battery tests).
// The oldest record must predate (now - 2min) so the alert time window is valid.
recordTimes := []time.Duration{
-180 * time.Second, // 3 min ago - anchors oldest record before alert.time
-90 * time.Second,
-60 * time.Second,
-30 * time.Second,
}
for _, offset := range recordTimes {
stats := system.Stats{
DiskPct: 30, // root disk at 30% - below threshold
ExtraFs: extraFsHigh,
}
statsJSON, _ := json.Marshal(stats)
recordTime := now.Add(offset)
record, err := beszelTests.CreateRecord(hub, "system_stats", map[string]any{
"system": systemRecord.Id,
"type": "1m",
"stats": string(statsJSON),
})
require.NoError(t, err)
record.SetRaw("created", recordTime.Format(types.DefaultDateLayout))
err = hub.SaveNoValidate(record)
require.NoError(t, err)
}
combinedDataHigh := &system.CombinedData{
Stats: system.Stats{
DiskPct: 30,
ExtraFs: extraFsHigh,
},
Info: system.Info{
DiskPct: 30,
},
}
systemRecord.Set("updated", now)
err = hub.SaveNoValidate(systemRecord)
require.NoError(t, err)
err = am.HandleSystemAlerts(systemRecord, combinedDataHigh)
require.NoError(t, err)
time.Sleep(20 * time.Millisecond)
diskAlert, err = hub.FindFirstRecordByFilter("alerts", "id={:id}", dbx.Params{"id": diskAlert.Id})
require.NoError(t, err)
assert.True(t, diskAlert.GetBool("triggered"),
"Alert SHOULD be triggered when extra disk average (92%%) exceeds threshold (80%%)")
// --- Resolution: extra disk drops to 50%, alert should resolve ---
extraFsLow := map[string]*system.FsStats{
"/mnt/data": {DiskTotal: 1000, DiskUsed: 500}, // 50% - below threshold
}
newNow := now.Add(2 * time.Minute)
recordTimesLow := []time.Duration{
-180 * time.Second,
-90 * time.Second,
-60 * time.Second,
-30 * time.Second,
}
for _, offset := range recordTimesLow {
stats := system.Stats{
DiskPct: 30,
ExtraFs: extraFsLow,
}
statsJSON, _ := json.Marshal(stats)
recordTime := newNow.Add(offset)
record, err := beszelTests.CreateRecord(hub, "system_stats", map[string]any{
"system": systemRecord.Id,
"type": "1m",
"stats": string(statsJSON),
})
require.NoError(t, err)
record.SetRaw("created", recordTime.Format(types.DefaultDateLayout))
err = hub.SaveNoValidate(record)
require.NoError(t, err)
}
combinedDataLow := &system.CombinedData{
Stats: system.Stats{
DiskPct: 30,
ExtraFs: extraFsLow,
},
Info: system.Info{
DiskPct: 30,
},
}
systemRecord.Set("updated", newNow)
err = hub.SaveNoValidate(systemRecord)
require.NoError(t, err)
err = am.HandleSystemAlerts(systemRecord, combinedDataLow)
require.NoError(t, err)
time.Sleep(20 * time.Millisecond)
diskAlert, err = hub.FindFirstRecordByFilter("alerts", "id={:id}", dbx.Params{"id": diskAlert.Id})
require.NoError(t, err)
assert.False(t, diskAlert.GetBool("triggered"),
"Alert should be resolved when extra disk average (50%%) drops below threshold (80%%)")
}

View File

@@ -49,7 +49,7 @@ func TestAlertSilencedOneTime(t *testing.T) {
// Get alert manager
am := alerts.NewAlertManager(hub)
defer am.StopWorker()
defer am.Stop()
// Test that alert is silenced
silenced := am.IsNotificationSilenced(user.Id, system.Id)
@@ -106,7 +106,7 @@ func TestAlertSilencedDaily(t *testing.T) {
// Get alert manager
am := alerts.NewAlertManager(hub)
defer am.StopWorker()
defer am.Stop()
// Get current hour and create a window that includes current time
now := time.Now().UTC()
@@ -170,7 +170,7 @@ func TestAlertSilencedDailyMidnightCrossing(t *testing.T) {
// Get alert manager
am := alerts.NewAlertManager(hub)
defer am.StopWorker()
defer am.Stop()
// Create a window that crosses midnight: 22:00 - 02:00
startTime := time.Date(2000, 1, 1, 22, 0, 0, 0, time.UTC)
@@ -211,7 +211,7 @@ func TestAlertSilencedGlobal(t *testing.T) {
// Get alert manager
am := alerts.NewAlertManager(hub)
defer am.StopWorker()
defer am.Stop()
// Create a global quiet hours window (no system specified)
now := time.Now().UTC()
@@ -250,7 +250,7 @@ func TestAlertSilencedSystemSpecific(t *testing.T) {
// Get alert manager
am := alerts.NewAlertManager(hub)
defer am.StopWorker()
defer am.Stop()
// Create a system-specific quiet hours window for system1 only
now := time.Now().UTC()
@@ -296,7 +296,7 @@ func TestAlertSilencedMultiUser(t *testing.T) {
// Get alert manager
am := alerts.NewAlertManager(hub)
defer am.StopWorker()
defer am.Stop()
// Create a quiet hours window for user1 only
now := time.Now().UTC()
@@ -417,7 +417,7 @@ func TestAlertSilencedNoWindows(t *testing.T) {
// Get alert manager
am := alerts.NewAlertManager(hub)
defer am.StopWorker()
defer am.Stop()
// Without any quiet hours windows, alert should NOT be silenced
silenced := am.IsNotificationSilenced(user.Id, system.Id)

View File

@@ -9,63 +9,25 @@ import (
"github.com/pocketbase/pocketbase/core"
)
type alertTask struct {
action string // "schedule" or "cancel"
systemName string
alertRecord *core.Record
delay time.Duration
}
type alertInfo struct {
systemName string
alertRecord *core.Record
expireTime time.Time
timer *time.Timer
}
// startWorker is a long-running goroutine that processes alert tasks
// every x seconds. It must be running to process status alerts.
func (am *AlertManager) startWorker() {
processPendingAlerts := time.Tick(15 * time.Second)
// check for status alerts that are not resolved when system comes up
// (can be removed if we figure out core bug in #1052)
checkStatusAlerts := time.Tick(561 * time.Second)
for {
select {
case <-am.stopChan:
return
case task := <-am.alertQueue:
switch task.action {
case "schedule":
am.pendingAlerts.Store(task.alertRecord.Id, &alertInfo{
systemName: task.systemName,
alertRecord: task.alertRecord,
expireTime: time.Now().Add(task.delay),
})
case "cancel":
am.pendingAlerts.Delete(task.alertRecord.Id)
// Stop cancels all pending status alert timers.
func (am *AlertManager) Stop() {
am.stopOnce.Do(func() {
am.pendingAlerts.Range(func(key, value any) bool {
info := value.(*alertInfo)
if info.timer != nil {
info.timer.Stop()
}
case <-checkStatusAlerts:
resolveStatusAlerts(am.hub)
case <-processPendingAlerts:
// Check for expired alerts every tick
now := time.Now()
for key, value := range am.pendingAlerts.Range {
info := value.(*alertInfo)
if now.After(info.expireTime) {
// Downtime delay has passed, process alert
am.sendStatusAlert("down", info.systemName, info.alertRecord)
am.pendingAlerts.Delete(key)
}
}
}
}
}
// StopWorker shuts down the AlertManager.worker goroutine
func (am *AlertManager) StopWorker() {
close(am.stopChan)
am.pendingAlerts.Delete(key)
return true
})
})
}
// HandleStatusAlerts manages the logic when system status changes.
@@ -103,44 +65,82 @@ func (am *AlertManager) getSystemStatusAlerts(systemID string) ([]*core.Record,
return alertRecords, nil
}
// Schedules delayed "down" alerts for each alert record.
// handleSystemDown manages the logic when a system status changes to "down". It schedules pending alerts for each alert record.
func (am *AlertManager) handleSystemDown(systemName string, alertRecords []*core.Record) {
for _, alertRecord := range alertRecords {
// Continue if alert is already scheduled
if _, exists := am.pendingAlerts.Load(alertRecord.Id); exists {
continue
}
// Schedule by adding to queue
min := max(1, alertRecord.GetInt("min"))
am.alertQueue <- alertTask{
action: "schedule",
systemName: systemName,
alertRecord: alertRecord,
delay: time.Duration(min) * time.Minute,
}
am.schedulePendingStatusAlert(systemName, alertRecord, time.Duration(min)*time.Minute)
}
}
// schedulePendingStatusAlert sets up a timer to send a "down" alert after the specified delay if the system is still down.
// It returns true if the alert was scheduled, or false if an alert was already pending for the given alert record.
func (am *AlertManager) schedulePendingStatusAlert(systemName string, alertRecord *core.Record, delay time.Duration) bool {
alert := &alertInfo{
systemName: systemName,
alertRecord: alertRecord,
expireTime: time.Now().Add(delay),
}
storedAlert, loaded := am.pendingAlerts.LoadOrStore(alertRecord.Id, alert)
if loaded {
return false
}
stored := storedAlert.(*alertInfo)
stored.timer = time.AfterFunc(time.Until(stored.expireTime), func() {
am.processPendingAlert(alertRecord.Id)
})
return true
}
// handleSystemUp manages the logic when a system status changes to "up".
// It cancels any pending alerts and sends "up" alerts.
func (am *AlertManager) handleSystemUp(systemName string, alertRecords []*core.Record) {
for _, alertRecord := range alertRecords {
alertRecordID := alertRecord.Id
// If alert exists for record, delete and continue (down alert not sent)
if _, exists := am.pendingAlerts.Load(alertRecordID); exists {
am.alertQueue <- alertTask{
action: "cancel",
alertRecord: alertRecord,
}
if am.cancelPendingAlert(alertRecord.Id) {
continue
}
if !alertRecord.GetBool("triggered") {
continue
}
// No alert scheduled for this record, send "up" alert
if err := am.sendStatusAlert("up", systemName, alertRecord); err != nil {
am.hub.Logger().Error("Failed to send alert", "err", err)
}
}
}
// cancelPendingAlert stops the timer and removes the pending alert for the given alert ID. Returns true if a pending alert was found and cancelled.
func (am *AlertManager) cancelPendingAlert(alertID string) bool {
value, loaded := am.pendingAlerts.LoadAndDelete(alertID)
if !loaded {
return false
}
info := value.(*alertInfo)
if info.timer != nil {
info.timer.Stop()
}
return true
}
// processPendingAlert sends a "down" alert if the pending alert has expired and the system is still down.
func (am *AlertManager) processPendingAlert(alertID string) {
value, loaded := am.pendingAlerts.LoadAndDelete(alertID)
if !loaded {
return
}
info := value.(*alertInfo)
if info.alertRecord.GetBool("triggered") {
return
}
if err := am.sendStatusAlert("down", info.systemName, info.alertRecord); err != nil {
am.hub.Logger().Error("Failed to send alert", "err", err)
}
}
// sendStatusAlert sends a status alert ("up" or "down") to the users associated with the alert records.
func (am *AlertManager) sendStatusAlert(alertStatus string, systemName string, alertRecord *core.Record) error {
switch alertStatus {
@@ -174,8 +174,8 @@ func (am *AlertManager) sendStatusAlert(alertStatus string, systemName string, a
})
}
// resolveStatusAlerts resolves any status alerts that weren't resolved
// when system came up (https://github.com/henrygd/beszel/issues/1052)
// resolveStatusAlerts resolves any triggered status alerts that weren't resolved
// when system came up (https://github.com/henrygd/beszel/issues/1052).
func resolveStatusAlerts(app core.App) error {
db := app.DB()
// Find all active status alerts where the system is actually up
@@ -205,3 +205,36 @@ func resolveStatusAlerts(app core.App) error {
}
return nil
}
// restorePendingStatusAlerts re-queues untriggered status alerts for systems that
// are still down after a hub restart. This rebuilds the lost in-memory timer state.
func (am *AlertManager) restorePendingStatusAlerts() error {
type pendingStatusAlert struct {
AlertID string `db:"alert_id"`
SystemName string `db:"system_name"`
}
var pending []pendingStatusAlert
err := am.hub.DB().NewQuery(`
SELECT a.id AS alert_id, s.name AS system_name
FROM alerts a
JOIN systems s ON a.system = s.id
WHERE a.name = 'Status'
AND a.triggered = false
AND s.status = 'down'
`).All(&pending)
if err != nil {
return err
}
for _, item := range pending {
alertRecord, err := am.hub.FindRecordById("alerts", item.AlertID)
if err != nil {
return err
}
min := max(1, alertRecord.GetInt("min"))
am.schedulePendingStatusAlert(item.SystemName, alertRecord, time.Duration(min)*time.Minute)
}
return nil
}

View File

@@ -0,0 +1,628 @@
//go:build testing
package alerts_test
import (
"testing"
"testing/synctest"
"time"
"github.com/henrygd/beszel/internal/alerts"
beszelTests "github.com/henrygd/beszel/internal/tests"
"github.com/pocketbase/dbx"
"github.com/pocketbase/pocketbase/core"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestStatusAlerts(t *testing.T) {
synctest.Test(t, func(t *testing.T) {
hub, user := beszelTests.GetHubWithUser(t)
defer hub.Cleanup()
systems, err := beszelTests.CreateSystems(hub, 4, user.Id, "paused")
assert.NoError(t, err)
var alerts []*core.Record
for i, system := range systems {
alert, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
"name": "Status",
"system": system.Id,
"user": user.Id,
"min": i + 1,
})
assert.NoError(t, err)
alerts = append(alerts, alert)
}
time.Sleep(10 * time.Millisecond)
for _, alert := range alerts {
assert.False(t, alert.GetBool("triggered"), "Alert should not be triggered immediately")
}
if hub.TestMailer.TotalSend() != 0 {
assert.Zero(t, hub.TestMailer.TotalSend(), "Expected 0 messages, got %d", hub.TestMailer.TotalSend())
}
for _, system := range systems {
assert.EqualValues(t, "paused", system.GetString("status"), "System should be paused")
}
for _, system := range systems {
system.Set("status", "up")
err = hub.SaveNoValidate(system)
assert.NoError(t, err)
}
time.Sleep(time.Second)
assert.EqualValues(t, 0, hub.GetPendingAlertsCount(), "should have 0 alerts in the pendingAlerts map")
for _, system := range systems {
system.Set("status", "down")
err = hub.SaveNoValidate(system)
assert.NoError(t, err)
}
// after 30 seconds, should have 4 alerts in the pendingAlerts map, no triggered alerts
time.Sleep(time.Second * 30)
assert.EqualValues(t, 4, hub.GetPendingAlertsCount(), "should have 4 alerts in the pendingAlerts map")
triggeredCount, err := hub.CountRecords("alerts", dbx.HashExp{"triggered": true})
assert.NoError(t, err)
assert.EqualValues(t, 0, triggeredCount, "should have 0 alert triggered")
assert.EqualValues(t, 0, hub.TestMailer.TotalSend(), "should have 0 messages sent")
// after 1:30 seconds, should have 1 triggered alert and 3 pending alerts
time.Sleep(time.Second * 60)
assert.EqualValues(t, 3, hub.GetPendingAlertsCount(), "should have 3 alerts in the pendingAlerts map")
triggeredCount, err = hub.CountRecords("alerts", dbx.HashExp{"triggered": true})
assert.NoError(t, err)
assert.EqualValues(t, 1, triggeredCount, "should have 1 alert triggered")
assert.EqualValues(t, 1, hub.TestMailer.TotalSend(), "should have 1 messages sent")
// after 2:30 seconds, should have 2 triggered alerts and 2 pending alerts
time.Sleep(time.Second * 60)
assert.EqualValues(t, 2, hub.GetPendingAlertsCount(), "should have 2 alerts in the pendingAlerts map")
triggeredCount, err = hub.CountRecords("alerts", dbx.HashExp{"triggered": true})
assert.NoError(t, err)
assert.EqualValues(t, 2, triggeredCount, "should have 2 alert triggered")
assert.EqualValues(t, 2, hub.TestMailer.TotalSend(), "should have 2 messages sent")
// now we will bring the remaning systems back up
for _, system := range systems {
system.Set("status", "up")
err = hub.SaveNoValidate(system)
assert.NoError(t, err)
}
time.Sleep(time.Second)
// should have 0 alerts in the pendingAlerts map and 0 alerts triggered
assert.EqualValues(t, 0, hub.GetPendingAlertsCount(), "should have 0 alerts in the pendingAlerts map")
triggeredCount, err = hub.CountRecords("alerts", dbx.HashExp{"triggered": true})
assert.NoError(t, err)
assert.Zero(t, triggeredCount, "should have 0 alert triggered")
// 4 messages sent, 2 down alerts and 2 up alerts for first 2 systems
assert.EqualValues(t, 4, hub.TestMailer.TotalSend(), "should have 4 messages sent")
})
}
func TestStatusAlertRecoveryBeforeDeadline(t *testing.T) {
hub, user := beszelTests.GetHubWithUser(t)
defer hub.Cleanup()
// Ensure user settings have an email
userSettings, _ := hub.FindFirstRecordByFilter("user_settings", "user={:user}", map[string]any{"user": user.Id})
userSettings.Set("settings", `{"emails":["test@example.com"],"webhooks":[]}`)
hub.Save(userSettings)
// Initial email count
initialEmailCount := hub.TestMailer.TotalSend()
systemCollection, _ := hub.FindCollectionByNameOrId("systems")
system := core.NewRecord(systemCollection)
system.Set("name", "test-system")
system.Set("status", "up")
system.Set("host", "127.0.0.1")
system.Set("users", []string{user.Id})
hub.Save(system)
alertCollection, _ := hub.FindCollectionByNameOrId("alerts")
alert := core.NewRecord(alertCollection)
alert.Set("user", user.Id)
alert.Set("system", system.Id)
alert.Set("name", "Status")
alert.Set("triggered", false)
alert.Set("min", 1)
hub.Save(alert)
am := hub.AlertManager
// 1. System goes down
am.HandleStatusAlerts("down", system)
assert.Equal(t, 1, am.GetPendingAlertsCount(), "Alert should be scheduled")
// 2. System goes up BEFORE delay expires
// Triggering HandleStatusAlerts("up") SHOULD NOT send an alert.
am.HandleStatusAlerts("up", system)
assert.Equal(t, 0, am.GetPendingAlertsCount(), "Alert should be canceled if system recovers before delay expires")
// Verify that NO email was sent.
assert.Equal(t, initialEmailCount, hub.TestMailer.TotalSend(), "Recovery notification should not be sent if system never went down")
}
func TestStatusAlertNormalRecovery(t *testing.T) {
hub, user := beszelTests.GetHubWithUser(t)
defer hub.Cleanup()
// Ensure user settings have an email
userSettings, _ := hub.FindFirstRecordByFilter("user_settings", "user={:user}", map[string]any{"user": user.Id})
userSettings.Set("settings", `{"emails":["test@example.com"],"webhooks":[]}`)
hub.Save(userSettings)
systemCollection, _ := hub.FindCollectionByNameOrId("systems")
system := core.NewRecord(systemCollection)
system.Set("name", "test-system")
system.Set("status", "up")
system.Set("host", "127.0.0.1")
system.Set("users", []string{user.Id})
hub.Save(system)
alertCollection, _ := hub.FindCollectionByNameOrId("alerts")
alert := core.NewRecord(alertCollection)
alert.Set("user", user.Id)
alert.Set("system", system.Id)
alert.Set("name", "Status")
alert.Set("triggered", true) // System was confirmed DOWN
hub.Save(alert)
am := hub.AlertManager
initialEmailCount := hub.TestMailer.TotalSend()
// System goes up
am.HandleStatusAlerts("up", system)
// Verify that an email WAS sent (normal recovery).
assert.Equal(t, initialEmailCount+1, hub.TestMailer.TotalSend(), "Recovery notification should be sent if system was triggered as down")
}
func TestHandleStatusAlertsDoesNotSendRecoveryWhileDownIsOnlyPending(t *testing.T) {
hub, user := beszelTests.GetHubWithUser(t)
defer hub.Cleanup()
userSettings, err := hub.FindFirstRecordByFilter("user_settings", "user={:user}", map[string]any{"user": user.Id})
require.NoError(t, err)
userSettings.Set("settings", `{"emails":["test@example.com"],"webhooks":[]}`)
require.NoError(t, hub.Save(userSettings))
systemCollection, err := hub.FindCollectionByNameOrId("systems")
require.NoError(t, err)
system := core.NewRecord(systemCollection)
system.Set("name", "test-system")
system.Set("status", "up")
system.Set("host", "127.0.0.1")
system.Set("users", []string{user.Id})
require.NoError(t, hub.Save(system))
alertCollection, err := hub.FindCollectionByNameOrId("alerts")
require.NoError(t, err)
alert := core.NewRecord(alertCollection)
alert.Set("user", user.Id)
alert.Set("system", system.Id)
alert.Set("name", "Status")
alert.Set("triggered", false)
alert.Set("min", 1)
require.NoError(t, hub.Save(alert))
initialEmailCount := hub.TestMailer.TotalSend()
am := alerts.NewTestAlertManagerWithoutWorker(hub)
require.NoError(t, am.HandleStatusAlerts("down", system))
assert.Equal(t, 1, am.GetPendingAlertsCount(), "down transition should register a pending alert immediately")
require.NoError(t, am.HandleStatusAlerts("up", system))
assert.Zero(t, am.GetPendingAlertsCount(), "recovery should cancel the pending down alert")
assert.Equal(t, initialEmailCount, hub.TestMailer.TotalSend(), "recovery notification should not be sent before a down alert triggers")
alertRecord, err := hub.FindRecordById("alerts", alert.Id)
require.NoError(t, err)
assert.False(t, alertRecord.GetBool("triggered"), "alert should remain untriggered when downtime never matured")
}
func TestStatusAlertTimerCancellationPreventsBoundaryDelivery(t *testing.T) {
synctest.Test(t, func(t *testing.T) {
hub, user := beszelTests.GetHubWithUser(t)
defer hub.Cleanup()
userSettings, err := hub.FindFirstRecordByFilter("user_settings", "user={:user}", map[string]any{"user": user.Id})
require.NoError(t, err)
userSettings.Set("settings", `{"emails":["test@example.com"],"webhooks":[]}`)
require.NoError(t, hub.Save(userSettings))
systemCollection, err := hub.FindCollectionByNameOrId("systems")
require.NoError(t, err)
system := core.NewRecord(systemCollection)
system.Set("name", "test-system")
system.Set("status", "up")
system.Set("host", "127.0.0.1")
system.Set("users", []string{user.Id})
require.NoError(t, hub.Save(system))
alertCollection, err := hub.FindCollectionByNameOrId("alerts")
require.NoError(t, err)
alert := core.NewRecord(alertCollection)
alert.Set("user", user.Id)
alert.Set("system", system.Id)
alert.Set("name", "Status")
alert.Set("triggered", false)
alert.Set("min", 1)
require.NoError(t, hub.Save(alert))
initialEmailCount := hub.TestMailer.TotalSend()
am := alerts.NewTestAlertManagerWithoutWorker(hub)
require.NoError(t, am.HandleStatusAlerts("down", system))
assert.Equal(t, 1, am.GetPendingAlertsCount(), "down transition should register a pending alert immediately")
require.True(t, am.ResetPendingAlertTimer(alert.Id, 25*time.Millisecond), "test should shorten the pending alert timer")
time.Sleep(10 * time.Millisecond)
require.NoError(t, am.HandleStatusAlerts("up", system))
assert.Zero(t, am.GetPendingAlertsCount(), "recovery should remove the pending alert before the timer callback runs")
time.Sleep(40 * time.Millisecond)
assert.Equal(t, initialEmailCount, hub.TestMailer.TotalSend(), "timer callback should not deliver after recovery cancels the pending alert")
alertRecord, err := hub.FindRecordById("alerts", alert.Id)
require.NoError(t, err)
assert.False(t, alertRecord.GetBool("triggered"), "alert should remain untriggered when cancellation wins the timer race")
time.Sleep(time.Minute)
synctest.Wait()
})
}
func TestStatusAlertDownFiresAfterDelayExpires(t *testing.T) {
hub, user := beszelTests.GetHubWithUser(t)
defer hub.Cleanup()
userSettings, err := hub.FindFirstRecordByFilter("user_settings", "user={:user}", map[string]any{"user": user.Id})
require.NoError(t, err)
userSettings.Set("settings", `{"emails":["test@example.com"],"webhooks":[]}`)
require.NoError(t, hub.Save(userSettings))
systemCollection, err := hub.FindCollectionByNameOrId("systems")
require.NoError(t, err)
system := core.NewRecord(systemCollection)
system.Set("name", "test-system")
system.Set("status", "up")
system.Set("host", "127.0.0.1")
system.Set("users", []string{user.Id})
require.NoError(t, hub.Save(system))
alertCollection, err := hub.FindCollectionByNameOrId("alerts")
require.NoError(t, err)
alert := core.NewRecord(alertCollection)
alert.Set("user", user.Id)
alert.Set("system", system.Id)
alert.Set("name", "Status")
alert.Set("triggered", false)
alert.Set("min", 1)
require.NoError(t, hub.Save(alert))
initialEmailCount := hub.TestMailer.TotalSend()
am := alerts.NewTestAlertManagerWithoutWorker(hub)
require.NoError(t, am.HandleStatusAlerts("down", system))
assert.Equal(t, 1, am.GetPendingAlertsCount(), "alert should be pending after system goes down")
// Expire the pending alert and process it
am.ForceExpirePendingAlerts()
processed, err := am.ProcessPendingAlerts()
require.NoError(t, err)
assert.Len(t, processed, 1, "one alert should have been processed")
assert.Equal(t, 0, am.GetPendingAlertsCount(), "pending alert should be consumed after processing")
// Verify down email was sent
assert.Equal(t, initialEmailCount+1, hub.TestMailer.TotalSend(), "down notification should be sent after delay expires")
// Verify triggered flag is set in the DB
alertRecord, err := hub.FindRecordById("alerts", alert.Id)
require.NoError(t, err)
assert.True(t, alertRecord.GetBool("triggered"), "alert should be marked triggered after downtime matures")
}
func TestStatusAlertDuplicateDownCallIsIdempotent(t *testing.T) {
hub, user := beszelTests.GetHubWithUser(t)
defer hub.Cleanup()
userSettings, err := hub.FindFirstRecordByFilter("user_settings", "user={:user}", map[string]any{"user": user.Id})
require.NoError(t, err)
userSettings.Set("settings", `{"emails":["test@example.com"],"webhooks":[]}`)
require.NoError(t, hub.Save(userSettings))
systemCollection, err := hub.FindCollectionByNameOrId("systems")
require.NoError(t, err)
system := core.NewRecord(systemCollection)
system.Set("name", "test-system")
system.Set("status", "up")
system.Set("host", "127.0.0.1")
system.Set("users", []string{user.Id})
require.NoError(t, hub.Save(system))
alertCollection, err := hub.FindCollectionByNameOrId("alerts")
require.NoError(t, err)
alert := core.NewRecord(alertCollection)
alert.Set("user", user.Id)
alert.Set("system", system.Id)
alert.Set("name", "Status")
alert.Set("triggered", false)
alert.Set("min", 5)
require.NoError(t, hub.Save(alert))
am := alerts.NewTestAlertManagerWithoutWorker(hub)
require.NoError(t, am.HandleStatusAlerts("down", system))
require.NoError(t, am.HandleStatusAlerts("down", system))
require.NoError(t, am.HandleStatusAlerts("down", system))
assert.Equal(t, 1, am.GetPendingAlertsCount(), "repeated down calls should not schedule duplicate pending alerts")
}
func TestStatusAlertNoAlertRecord(t *testing.T) {
hub, user := beszelTests.GetHubWithUser(t)
defer hub.Cleanup()
systemCollection, err := hub.FindCollectionByNameOrId("systems")
require.NoError(t, err)
system := core.NewRecord(systemCollection)
system.Set("name", "test-system")
system.Set("status", "up")
system.Set("host", "127.0.0.1")
system.Set("users", []string{user.Id})
require.NoError(t, hub.Save(system))
// No Status alert record created for this system
initialEmailCount := hub.TestMailer.TotalSend()
am := alerts.NewTestAlertManagerWithoutWorker(hub)
require.NoError(t, am.HandleStatusAlerts("down", system))
assert.Equal(t, 0, am.GetPendingAlertsCount(), "no pending alert when no alert record exists")
require.NoError(t, am.HandleStatusAlerts("up", system))
assert.Equal(t, initialEmailCount, hub.TestMailer.TotalSend(), "no email when no alert record exists")
}
func TestRestorePendingStatusAlertsRequeuesDownSystemsAfterRestart(t *testing.T) {
hub, user := beszelTests.GetHubWithUser(t)
defer hub.Cleanup()
userSettings, err := hub.FindFirstRecordByFilter("user_settings", "user={:user}", map[string]any{"user": user.Id})
require.NoError(t, err)
userSettings.Set("settings", `{"emails":["test@example.com"],"webhooks":[]}`)
require.NoError(t, hub.Save(userSettings))
systems, err := beszelTests.CreateSystems(hub, 1, user.Id, "down")
require.NoError(t, err)
system := systems[0]
alertCollection, err := hub.FindCollectionByNameOrId("alerts")
require.NoError(t, err)
alert := core.NewRecord(alertCollection)
alert.Set("user", user.Id)
alert.Set("system", system.Id)
alert.Set("name", "Status")
alert.Set("triggered", false)
alert.Set("min", 1)
require.NoError(t, hub.Save(alert))
initialEmailCount := hub.TestMailer.TotalSend()
am := alerts.NewTestAlertManagerWithoutWorker(hub)
require.NoError(t, am.RestorePendingStatusAlerts())
assert.Equal(t, 1, am.GetPendingAlertsCount(), "startup restore should requeue a pending down alert for a system still marked down")
am.ForceExpirePendingAlerts()
processed, err := am.ProcessPendingAlerts()
require.NoError(t, err)
assert.Len(t, processed, 1, "restored pending alert should be processable after the delay expires")
assert.Equal(t, initialEmailCount+1, hub.TestMailer.TotalSend(), "restored pending alert should send the down notification")
alertRecord, err := hub.FindRecordById("alerts", alert.Id)
require.NoError(t, err)
assert.True(t, alertRecord.GetBool("triggered"), "restored pending alert should mark the alert as triggered once delivered")
}
func TestRestorePendingStatusAlertsSkipsNonDownOrAlreadyTriggeredAlerts(t *testing.T) {
hub, user := beszelTests.GetHubWithUser(t)
defer hub.Cleanup()
systemsDown, err := beszelTests.CreateSystems(hub, 2, user.Id, "down")
require.NoError(t, err)
systemDownPending := systemsDown[0]
systemDownTriggered := systemsDown[1]
systemUp, err := beszelTests.CreateRecord(hub, "systems", map[string]any{
"name": "up-system",
"users": []string{user.Id},
"host": "127.0.0.2",
"status": "up",
})
require.NoError(t, err)
_, err = beszelTests.CreateRecord(hub, "alerts", map[string]any{
"name": "Status",
"system": systemDownPending.Id,
"user": user.Id,
"min": 1,
"triggered": false,
})
require.NoError(t, err)
_, err = beszelTests.CreateRecord(hub, "alerts", map[string]any{
"name": "Status",
"system": systemUp.Id,
"user": user.Id,
"min": 1,
"triggered": false,
})
require.NoError(t, err)
_, err = beszelTests.CreateRecord(hub, "alerts", map[string]any{
"name": "Status",
"system": systemDownTriggered.Id,
"user": user.Id,
"min": 1,
"triggered": true,
})
require.NoError(t, err)
am := alerts.NewTestAlertManagerWithoutWorker(hub)
require.NoError(t, am.RestorePendingStatusAlerts())
assert.Equal(t, 1, am.GetPendingAlertsCount(), "only untriggered alerts for currently down systems should be restored")
}
func TestRestorePendingStatusAlertsIsIdempotent(t *testing.T) {
hub, user := beszelTests.GetHubWithUser(t)
defer hub.Cleanup()
systems, err := beszelTests.CreateSystems(hub, 1, user.Id, "down")
require.NoError(t, err)
system := systems[0]
_, err = beszelTests.CreateRecord(hub, "alerts", map[string]any{
"name": "Status",
"system": system.Id,
"user": user.Id,
"min": 1,
"triggered": false,
})
require.NoError(t, err)
am := alerts.NewTestAlertManagerWithoutWorker(hub)
require.NoError(t, am.RestorePendingStatusAlerts())
require.NoError(t, am.RestorePendingStatusAlerts())
assert.Equal(t, 1, am.GetPendingAlertsCount(), "restoring twice should not create duplicate pending alerts")
am.ForceExpirePendingAlerts()
processed, err := am.ProcessPendingAlerts()
require.NoError(t, err)
assert.Len(t, processed, 1, "restored alert should still be processable exactly once")
assert.Zero(t, am.GetPendingAlertsCount(), "processing the restored alert should empty the pending map")
}
func TestResolveStatusAlertsFixesStaleTriggered(t *testing.T) {
hub, user := beszelTests.GetHubWithUser(t)
defer hub.Cleanup()
// CreateSystems uses SaveNoValidate after initial save to bypass the
// onRecordCreate hook that forces status = "pending".
systems, err := beszelTests.CreateSystems(hub, 1, user.Id, "up")
require.NoError(t, err)
system := systems[0]
alertCollection, err := hub.FindCollectionByNameOrId("alerts")
require.NoError(t, err)
alert := core.NewRecord(alertCollection)
alert.Set("user", user.Id)
alert.Set("system", system.Id)
alert.Set("name", "Status")
alert.Set("triggered", true) // Stale: system is up but alert still says triggered
require.NoError(t, hub.Save(alert))
// resolveStatusAlerts should clear the stale triggered flag
require.NoError(t, alerts.ResolveStatusAlerts(hub))
alertRecord, err := hub.FindRecordById("alerts", alert.Id)
require.NoError(t, err)
assert.False(t, alertRecord.GetBool("triggered"), "stale triggered flag should be cleared when system is up")
}
func TestResolveStatusAlerts(t *testing.T) {
hub, user := beszelTests.GetHubWithUser(t)
defer hub.Cleanup()
// Create a systemUp
systemUp, err := beszelTests.CreateRecord(hub, "systems", map[string]any{
"name": "test-system",
"users": []string{user.Id},
"host": "127.0.0.1",
"status": "up",
})
assert.NoError(t, err)
systemDown, err := beszelTests.CreateRecord(hub, "systems", map[string]any{
"name": "test-system-2",
"users": []string{user.Id},
"host": "127.0.0.2",
"status": "up",
})
assert.NoError(t, err)
// Create a status alertUp for the system
alertUp, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
"name": "Status",
"system": systemUp.Id,
"user": user.Id,
"min": 1,
})
assert.NoError(t, err)
alertDown, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
"name": "Status",
"system": systemDown.Id,
"user": user.Id,
"min": 1,
})
assert.NoError(t, err)
// Verify alert is not triggered initially
assert.False(t, alertUp.GetBool("triggered"), "Alert should not be triggered initially")
// Set the system to 'up' (this should not trigger the alert)
systemUp.Set("status", "up")
err = hub.SaveNoValidate(systemUp)
assert.NoError(t, err)
systemDown.Set("status", "down")
err = hub.SaveNoValidate(systemDown)
assert.NoError(t, err)
// Wait a moment for any processing
time.Sleep(10 * time.Millisecond)
// Verify alertUp is still not triggered after setting system to up
alertUp, err = hub.FindFirstRecordByFilter("alerts", "id={:id}", dbx.Params{"id": alertUp.Id})
assert.NoError(t, err)
assert.False(t, alertUp.GetBool("triggered"), "Alert should not be triggered when system is up")
// Manually set both alerts triggered to true
alertUp.Set("triggered", true)
err = hub.SaveNoValidate(alertUp)
assert.NoError(t, err)
alertDown.Set("triggered", true)
err = hub.SaveNoValidate(alertDown)
assert.NoError(t, err)
// Verify we have exactly one alert with triggered true
triggeredCount, err := hub.CountRecords("alerts", dbx.HashExp{"triggered": true})
assert.NoError(t, err)
assert.EqualValues(t, 2, triggeredCount, "Should have exactly two alerts with triggered true")
// Verify the specific alertUp is triggered
alertUp, err = hub.FindFirstRecordByFilter("alerts", "id={:id}", dbx.Params{"id": alertUp.Id})
assert.NoError(t, err)
assert.True(t, alertUp.GetBool("triggered"), "Alert should be triggered")
// Verify we have two unresolved alert history records
alertHistoryCount, err := hub.CountRecords("alerts_history", dbx.HashExp{"resolved": ""})
assert.NoError(t, err)
assert.EqualValues(t, 2, alertHistoryCount, "Should have exactly two unresolved alert history records")
err = alerts.ResolveStatusAlerts(hub)
assert.NoError(t, err)
// Verify alertUp is not triggered after resolving
alertUp, err = hub.FindFirstRecordByFilter("alerts", "id={:id}", dbx.Params{"id": alertUp.Id})
assert.NoError(t, err)
assert.False(t, alertUp.GetBool("triggered"), "Alert should not be triggered after resolving")
// Verify alertDown is still triggered
alertDown, err = hub.FindFirstRecordByFilter("alerts", "id={:id}", dbx.Params{"id": alertDown.Id})
assert.NoError(t, err)
assert.True(t, alertDown.GetBool("triggered"), "Alert should still be triggered after resolving")
// Verify we have one unresolved alert history record
alertHistoryCount, err = hub.CountRecords("alerts_history", dbx.HashExp{"resolved": ""})
assert.NoError(t, err)
assert.EqualValues(t, 1, alertHistoryCount, "Should have exactly one unresolved alert history record")
}

View File

@@ -11,7 +11,6 @@ import (
"github.com/pocketbase/dbx"
"github.com/pocketbase/pocketbase/core"
"github.com/pocketbase/pocketbase/tools/types"
"github.com/spf13/cast"
)
func (am *AlertManager) HandleSystemAlerts(systemRecord *core.Record, data *system.CombinedData) error {
@@ -92,7 +91,7 @@ func (am *AlertManager) HandleSystemAlerts(systemRecord *core.Record, data *syst
}
}
min := max(1, cast.ToUint8(alertRecord.Get("min")))
min := max(1, uint8(alertRecord.GetInt("min")))
alert := SystemAlertData{
systemRecord: systemRecord,
@@ -192,22 +191,24 @@ func (am *AlertManager) HandleSystemAlerts(systemRecord *core.Record, data *syst
case "Memory":
alert.val += stats.Mem
case "Bandwidth":
alert.val += stats.NetSent + stats.NetRecv
alert.val += float64(stats.Bandwidth[0]+stats.Bandwidth[1]) / (1024 * 1024)
case "Disk":
if alert.mapSums == nil {
alert.mapSums = make(map[string]float32, len(data.Stats.ExtraFs)+1)
alert.mapSums = make(map[string]float32, len(stats.ExtraFs)+1)
}
// add root disk
if _, ok := alert.mapSums["root"]; !ok {
alert.mapSums["root"] = 0.0
}
alert.mapSums["root"] += float32(stats.Disk)
// add extra disks
for key, fs := range data.Stats.ExtraFs {
if _, ok := alert.mapSums[key]; !ok {
alert.mapSums[key] = 0.0
// add extra disks from historical record
for key, fs := range stats.ExtraFs {
if fs.DiskTotal > 0 {
if _, ok := alert.mapSums[key]; !ok {
alert.mapSums[key] = 0.0
}
alert.mapSums[key] += float32(fs.DiskUsed / fs.DiskTotal * 100)
}
alert.mapSums[key] += float32(fs.DiskUsed / fs.DiskTotal * 100)
}
case "Temperature":
if alert.mapSums == nil {

View File

@@ -12,7 +12,6 @@ import (
"testing/synctest"
"time"
"github.com/henrygd/beszel/internal/alerts"
beszelTests "github.com/henrygd/beszel/internal/tests"
"github.com/pocketbase/dbx"
@@ -369,87 +368,6 @@ func TestUserAlertsApi(t *testing.T) {
}
}
func TestStatusAlerts(t *testing.T) {
synctest.Test(t, func(t *testing.T) {
hub, user := beszelTests.GetHubWithUser(t)
defer hub.Cleanup()
systems, err := beszelTests.CreateSystems(hub, 4, user.Id, "paused")
assert.NoError(t, err)
var alerts []*core.Record
for i, system := range systems {
alert, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
"name": "Status",
"system": system.Id,
"user": user.Id,
"min": i + 1,
})
assert.NoError(t, err)
alerts = append(alerts, alert)
}
time.Sleep(10 * time.Millisecond)
for _, alert := range alerts {
assert.False(t, alert.GetBool("triggered"), "Alert should not be triggered immediately")
}
if hub.TestMailer.TotalSend() != 0 {
assert.Zero(t, hub.TestMailer.TotalSend(), "Expected 0 messages, got %d", hub.TestMailer.TotalSend())
}
for _, system := range systems {
assert.EqualValues(t, "paused", system.GetString("status"), "System should be paused")
}
for _, system := range systems {
system.Set("status", "up")
err = hub.SaveNoValidate(system)
assert.NoError(t, err)
}
time.Sleep(time.Second)
assert.EqualValues(t, 0, hub.GetPendingAlertsCount(), "should have 0 alerts in the pendingAlerts map")
for _, system := range systems {
system.Set("status", "down")
err = hub.SaveNoValidate(system)
assert.NoError(t, err)
}
// after 30 seconds, should have 4 alerts in the pendingAlerts map, no triggered alerts
time.Sleep(time.Second * 30)
assert.EqualValues(t, 4, hub.GetPendingAlertsCount(), "should have 4 alerts in the pendingAlerts map")
triggeredCount, err := hub.CountRecords("alerts", dbx.HashExp{"triggered": true})
assert.NoError(t, err)
assert.EqualValues(t, 0, triggeredCount, "should have 0 alert triggered")
assert.EqualValues(t, 0, hub.TestMailer.TotalSend(), "should have 0 messages sent")
// after 1:30 seconds, should have 1 triggered alert and 3 pending alerts
time.Sleep(time.Second * 60)
assert.EqualValues(t, 3, hub.GetPendingAlertsCount(), "should have 3 alerts in the pendingAlerts map")
triggeredCount, err = hub.CountRecords("alerts", dbx.HashExp{"triggered": true})
assert.NoError(t, err)
assert.EqualValues(t, 1, triggeredCount, "should have 1 alert triggered")
assert.EqualValues(t, 1, hub.TestMailer.TotalSend(), "should have 1 messages sent")
// after 2:30 seconds, should have 2 triggered alerts and 2 pending alerts
time.Sleep(time.Second * 60)
assert.EqualValues(t, 2, hub.GetPendingAlertsCount(), "should have 2 alerts in the pendingAlerts map")
triggeredCount, err = hub.CountRecords("alerts", dbx.HashExp{"triggered": true})
assert.NoError(t, err)
assert.EqualValues(t, 2, triggeredCount, "should have 2 alert triggered")
assert.EqualValues(t, 2, hub.TestMailer.TotalSend(), "should have 2 messages sent")
// now we will bring the remaning systems back up
for _, system := range systems {
system.Set("status", "up")
err = hub.SaveNoValidate(system)
assert.NoError(t, err)
}
time.Sleep(time.Second)
// should have 0 alerts in the pendingAlerts map and 0 alerts triggered
assert.EqualValues(t, 0, hub.GetPendingAlertsCount(), "should have 0 alerts in the pendingAlerts map")
triggeredCount, err = hub.CountRecords("alerts", dbx.HashExp{"triggered": true})
assert.NoError(t, err)
assert.Zero(t, triggeredCount, "should have 0 alert triggered")
// 4 messages sent, 2 down alerts and 2 up alerts for first 2 systems
assert.EqualValues(t, 4, hub.TestMailer.TotalSend(), "should have 4 messages sent")
})
}
func TestAlertsHistory(t *testing.T) {
synctest.Test(t, func(t *testing.T) {
hub, user := beszelTests.GetHubWithUser(t)
@@ -578,102 +496,3 @@ func TestAlertsHistory(t *testing.T) {
assert.EqualValues(t, 2, totalHistoryCount, "Should have 2 total alert history records")
})
}
func TestResolveStatusAlerts(t *testing.T) {
hub, user := beszelTests.GetHubWithUser(t)
defer hub.Cleanup()
// Create a systemUp
systemUp, err := beszelTests.CreateRecord(hub, "systems", map[string]any{
"name": "test-system",
"users": []string{user.Id},
"host": "127.0.0.1",
"status": "up",
})
assert.NoError(t, err)
systemDown, err := beszelTests.CreateRecord(hub, "systems", map[string]any{
"name": "test-system-2",
"users": []string{user.Id},
"host": "127.0.0.2",
"status": "up",
})
assert.NoError(t, err)
// Create a status alertUp for the system
alertUp, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
"name": "Status",
"system": systemUp.Id,
"user": user.Id,
"min": 1,
})
assert.NoError(t, err)
alertDown, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
"name": "Status",
"system": systemDown.Id,
"user": user.Id,
"min": 1,
})
assert.NoError(t, err)
// Verify alert is not triggered initially
assert.False(t, alertUp.GetBool("triggered"), "Alert should not be triggered initially")
// Set the system to 'up' (this should not trigger the alert)
systemUp.Set("status", "up")
err = hub.SaveNoValidate(systemUp)
assert.NoError(t, err)
systemDown.Set("status", "down")
err = hub.SaveNoValidate(systemDown)
assert.NoError(t, err)
// Wait a moment for any processing
time.Sleep(10 * time.Millisecond)
// Verify alertUp is still not triggered after setting system to up
alertUp, err = hub.FindFirstRecordByFilter("alerts", "id={:id}", dbx.Params{"id": alertUp.Id})
assert.NoError(t, err)
assert.False(t, alertUp.GetBool("triggered"), "Alert should not be triggered when system is up")
// Manually set both alerts triggered to true
alertUp.Set("triggered", true)
err = hub.SaveNoValidate(alertUp)
assert.NoError(t, err)
alertDown.Set("triggered", true)
err = hub.SaveNoValidate(alertDown)
assert.NoError(t, err)
// Verify we have exactly one alert with triggered true
triggeredCount, err := hub.CountRecords("alerts", dbx.HashExp{"triggered": true})
assert.NoError(t, err)
assert.EqualValues(t, 2, triggeredCount, "Should have exactly two alerts with triggered true")
// Verify the specific alertUp is triggered
alertUp, err = hub.FindFirstRecordByFilter("alerts", "id={:id}", dbx.Params{"id": alertUp.Id})
assert.NoError(t, err)
assert.True(t, alertUp.GetBool("triggered"), "Alert should be triggered")
// Verify we have two unresolved alert history records
alertHistoryCount, err := hub.CountRecords("alerts_history", dbx.HashExp{"resolved": ""})
assert.NoError(t, err)
assert.EqualValues(t, 2, alertHistoryCount, "Should have exactly two unresolved alert history records")
err = alerts.ResolveStatusAlerts(hub)
assert.NoError(t, err)
// Verify alertUp is not triggered after resolving
alertUp, err = hub.FindFirstRecordByFilter("alerts", "id={:id}", dbx.Params{"id": alertUp.Id})
assert.NoError(t, err)
assert.False(t, alertUp.GetBool("triggered"), "Alert should not be triggered after resolving")
// Verify alertDown is still triggered
alertDown, err = hub.FindFirstRecordByFilter("alerts", "id={:id}", dbx.Params{"id": alertDown.Id})
assert.NoError(t, err)
assert.True(t, alertDown.GetBool("triggered"), "Alert should still be triggered after resolving")
// Verify we have one unresolved alert history record
alertHistoryCount, err = hub.CountRecords("alerts_history", dbx.HashExp{"resolved": ""})
assert.NoError(t, err)
assert.EqualValues(t, 1, alertHistoryCount, "Should have exactly one unresolved alert history record")
}

View File

@@ -9,6 +9,12 @@ import (
"github.com/pocketbase/pocketbase/core"
)
func NewTestAlertManagerWithoutWorker(app hubLike) *AlertManager {
return &AlertManager{
hub: app,
}
}
func (am *AlertManager) GetAlertManager() *AlertManager {
return am
}
@@ -34,12 +40,11 @@ func (am *AlertManager) ProcessPendingAlerts() ([]*core.Record, error) {
am.pendingAlerts.Range(func(key, value any) bool {
info := value.(*alertInfo)
if now.After(info.expireTime) {
// Downtime delay has passed, process alert
if err := am.sendStatusAlert("down", info.systemName, info.alertRecord); err != nil {
lastErr = err
if info.timer != nil {
info.timer.Stop()
}
am.processPendingAlert(key.(string))
processedAlerts = append(processedAlerts, info.alertRecord)
am.pendingAlerts.Delete(key)
}
return true
})
@@ -56,6 +61,27 @@ func (am *AlertManager) ForceExpirePendingAlerts() {
})
}
func (am *AlertManager) ResetPendingAlertTimer(alertID string, delay time.Duration) bool {
value, loaded := am.pendingAlerts.Load(alertID)
if !loaded {
return false
}
info := value.(*alertInfo)
if info.timer != nil {
info.timer.Stop()
}
info.expireTime = time.Now().Add(delay)
info.timer = time.AfterFunc(delay, func() {
am.processPendingAlert(alertID)
})
return true
}
func ResolveStatusAlerts(app core.App) error {
return resolveStatusAlerts(app)
}
func (am *AlertManager) RestorePendingStatusAlerts() error {
return am.restorePendingStatusAlerts()
}

View File

@@ -9,6 +9,7 @@ import (
"github.com/henrygd/beszel"
"github.com/henrygd/beszel/agent"
"github.com/henrygd/beszel/agent/health"
"github.com/henrygd/beszel/agent/utils"
"github.com/spf13/pflag"
"golang.org/x/crypto/ssh"
)
@@ -116,12 +117,12 @@ func (opts *cmdOptions) loadPublicKeys() ([]ssh.PublicKey, error) {
}
// Try environment variable
if key, ok := agent.GetEnv("KEY"); ok && key != "" {
if key, ok := utils.GetEnv("KEY"); ok && key != "" {
return agent.ParseKeys(key)
}
// Try key file
keyFile, ok := agent.GetEnv("KEY_FILE")
keyFile, ok := utils.GetEnv("KEY_FILE")
if !ok {
return nil, fmt.Errorf("no key provided: must set -key flag, KEY env var, or KEY_FILE env var. Use 'beszel-agent help' for usage")
}

View File

@@ -143,8 +143,8 @@ type AtaDeviceStatisticsPage struct {
}
type AtaDeviceStatisticsEntry struct {
Name string `json:"name"`
Value *uint64 `json:"value,omitempty"`
Name string `json:"name"`
Value *int64 `json:"value,omitempty"`
}
type AtaSmartAttribute struct {
@@ -356,8 +356,8 @@ type SmartInfoForSata struct {
SmartStatus SmartStatusInfo `json:"smart_status"`
// AtaSmartData AtaSmartData `json:"ata_smart_data"`
// AtaSctCapabilities AtaSctCapabilities `json:"ata_sct_capabilities"`
AtaSmartAttributes AtaSmartAttributes `json:"ata_smart_attributes"`
AtaDeviceStatistics AtaDeviceStatistics `json:"ata_device_statistics"`
AtaSmartAttributes AtaSmartAttributes `json:"ata_smart_attributes"`
AtaDeviceStatistics json.RawMessage `json:"ata_device_statistics"`
// PowerOnTime PowerOnTimeInfo `json:"power_on_time"`
// PowerCycleCount uint16 `json:"power_cycle_count"`
Temperature TemperatureInfo `json:"temperature"`

View File

@@ -12,8 +12,9 @@ import (
type Stats struct {
Cpu float64 `json:"cpu" cbor:"0,keyasint"`
MaxCpu float64 `json:"cpum,omitempty" cbor:"1,keyasint,omitempty"`
MaxCpu float64 `json:"cpum,omitempty" cbor:"-"`
Mem float64 `json:"m" cbor:"2,keyasint"`
MaxMem float64 `json:"mm,omitempty" cbor:"-"`
MemUsed float64 `json:"mu" cbor:"3,keyasint"`
MemPct float64 `json:"mp" cbor:"4,keyasint"`
MemBuffCache float64 `json:"mb" cbor:"5,keyasint"`
@@ -23,26 +24,25 @@ type Stats struct {
DiskTotal float64 `json:"d" cbor:"9,keyasint"`
DiskUsed float64 `json:"du" cbor:"10,keyasint"`
DiskPct float64 `json:"dp" cbor:"11,keyasint"`
DiskReadPs float64 `json:"dr" cbor:"12,keyasint"`
DiskWritePs float64 `json:"dw" cbor:"13,keyasint"`
MaxDiskReadPs float64 `json:"drm,omitempty" cbor:"14,keyasint,omitempty"`
MaxDiskWritePs float64 `json:"dwm,omitempty" cbor:"15,keyasint,omitempty"`
DiskReadPs float64 `json:"dr,omitzero" cbor:"12,keyasint,omitzero"`
DiskWritePs float64 `json:"dw,omitzero" cbor:"13,keyasint,omitzero"`
MaxDiskReadPs float64 `json:"drm,omitempty" cbor:"-"`
MaxDiskWritePs float64 `json:"dwm,omitempty" cbor:"-"`
NetworkSent float64 `json:"ns,omitzero" cbor:"16,keyasint,omitzero"`
NetworkRecv float64 `json:"nr,omitzero" cbor:"17,keyasint,omitzero"`
MaxNetworkSent float64 `json:"nsm,omitempty" cbor:"18,keyasint,omitempty"`
MaxNetworkRecv float64 `json:"nrm,omitempty" cbor:"19,keyasint,omitempty"`
MaxNetworkSent float64 `json:"nsm,omitempty" cbor:"-"`
MaxNetworkRecv float64 `json:"nrm,omitempty" cbor:"-"`
Temperatures map[string]float64 `json:"t,omitempty" cbor:"20,keyasint,omitempty"`
ExtraFs map[string]*FsStats `json:"efs,omitempty" cbor:"21,keyasint,omitempty"`
GPUData map[string]GPUData `json:"g,omitempty" cbor:"22,keyasint,omitempty"`
LoadAvg1 float64 `json:"l1,omitempty" cbor:"23,keyasint,omitempty"`
LoadAvg5 float64 `json:"l5,omitempty" cbor:"24,keyasint,omitempty"`
LoadAvg15 float64 `json:"l15,omitempty" cbor:"25,keyasint,omitempty"`
Bandwidth [2]uint64 `json:"b,omitzero" cbor:"26,keyasint,omitzero"` // [sent bytes, recv bytes]
MaxBandwidth [2]uint64 `json:"bm,omitzero" cbor:"27,keyasint,omitzero"` // [sent bytes, recv bytes]
// LoadAvg1 float64 `json:"l1,omitempty" cbor:"23,keyasint,omitempty"`
// LoadAvg5 float64 `json:"l5,omitempty" cbor:"24,keyasint,omitempty"`
// LoadAvg15 float64 `json:"l15,omitempty" cbor:"25,keyasint,omitempty"`
Bandwidth [2]uint64 `json:"b,omitzero" cbor:"26,keyasint,omitzero"` // [sent bytes, recv bytes]
MaxBandwidth [2]uint64 `json:"bm,omitzero" cbor:"-"` // [sent bytes, recv bytes]
// TODO: remove other load fields in future release in favor of load avg array
LoadAvg [3]float64 `json:"la,omitempty" cbor:"28,keyasint"`
Battery [2]uint8 `json:"bat,omitzero" cbor:"29,keyasint,omitzero"` // [percent, charge state, current]
MaxMem float64 `json:"mm,omitempty" cbor:"30,keyasint,omitempty"`
Battery [2]uint8 `json:"bat,omitzero" cbor:"29,keyasint,omitzero"` // [percent, charge state, current]
NetworkInterfaces map[string][4]uint64 `json:"ni,omitempty" cbor:"31,keyasint,omitempty"` // [upload bytes, download bytes, total upload, total download]
DiskIO [2]uint64 `json:"dio,omitzero" cbor:"32,keyasint,omitzero"` // [read bytes, write bytes]
MaxDiskIO [2]uint64 `json:"diom,omitzero" cbor:"-"` // [max read bytes, max write bytes]
@@ -90,8 +90,8 @@ type FsStats struct {
TotalWrite uint64 `json:"-"`
DiskReadPs float64 `json:"r" cbor:"2,keyasint"`
DiskWritePs float64 `json:"w" cbor:"3,keyasint"`
MaxDiskReadPS float64 `json:"rm,omitempty" cbor:"4,keyasint,omitempty"`
MaxDiskWritePS float64 `json:"wm,omitempty" cbor:"5,keyasint,omitempty"`
MaxDiskReadPS float64 `json:"rm,omitempty" cbor:"-"`
MaxDiskWritePS float64 `json:"wm,omitempty" cbor:"-"`
// TODO: remove DiskReadPs and DiskWritePs in future release in favor of DiskReadBytes and DiskWriteBytes
DiskReadBytes uint64 `json:"rb" cbor:"6,keyasint,omitempty"`
DiskWriteBytes uint64 `json:"wb" cbor:"7,keyasint,omitempty"`
@@ -129,23 +129,23 @@ type Info struct {
KernelVersion string `json:"k,omitempty" cbor:"1,keyasint,omitempty"` // deprecated - moved to Details struct
Cores int `json:"c,omitzero" cbor:"2,keyasint,omitzero"` // deprecated - moved to Details struct
// Threads is needed in Info struct to calculate load average thresholds
Threads int `json:"t,omitempty" cbor:"3,keyasint,omitempty"`
CpuModel string `json:"m,omitempty" cbor:"4,keyasint,omitempty"` // deprecated - moved to Details struct
Uptime uint64 `json:"u" cbor:"5,keyasint"`
Cpu float64 `json:"cpu" cbor:"6,keyasint"`
MemPct float64 `json:"mp" cbor:"7,keyasint"`
DiskPct float64 `json:"dp" cbor:"8,keyasint"`
Bandwidth float64 `json:"b" cbor:"9,keyasint"`
AgentVersion string `json:"v" cbor:"10,keyasint"`
Podman bool `json:"p,omitempty" cbor:"11,keyasint,omitempty"` // deprecated - moved to Details struct
GpuPct float64 `json:"g,omitempty" cbor:"12,keyasint,omitempty"`
DashboardTemp float64 `json:"dt,omitempty" cbor:"13,keyasint,omitempty"`
Os Os `json:"os,omitempty" cbor:"14,keyasint,omitempty"` // deprecated - moved to Details struct
LoadAvg1 float64 `json:"l1,omitempty" cbor:"15,keyasint,omitempty"` // deprecated - use `la` array instead
LoadAvg5 float64 `json:"l5,omitempty" cbor:"16,keyasint,omitempty"` // deprecated - use `la` array instead
LoadAvg15 float64 `json:"l15,omitempty" cbor:"17,keyasint,omitempty"` // deprecated - use `la` array instead
BandwidthBytes uint64 `json:"bb" cbor:"18,keyasint"`
Threads int `json:"t,omitempty" cbor:"3,keyasint,omitempty"`
CpuModel string `json:"m,omitempty" cbor:"4,keyasint,omitempty"` // deprecated - moved to Details struct
Uptime uint64 `json:"u" cbor:"5,keyasint"`
Cpu float64 `json:"cpu" cbor:"6,keyasint"`
MemPct float64 `json:"mp" cbor:"7,keyasint"`
DiskPct float64 `json:"dp" cbor:"8,keyasint"`
Bandwidth float64 `json:"b,omitzero" cbor:"9,keyasint"` // deprecated in favor of BandwidthBytes
AgentVersion string `json:"v" cbor:"10,keyasint"`
Podman bool `json:"p,omitempty" cbor:"11,keyasint,omitempty"` // deprecated - moved to Details struct
GpuPct float64 `json:"g,omitempty" cbor:"12,keyasint,omitempty"`
DashboardTemp float64 `json:"dt,omitempty" cbor:"13,keyasint,omitempty"`
Os Os `json:"os,omitempty" cbor:"14,keyasint,omitempty"` // deprecated - moved to Details struct
// LoadAvg1 float64 `json:"l1,omitempty" cbor:"15,keyasint,omitempty"` // deprecated - use `la` array instead
// LoadAvg5 float64 `json:"l5,omitempty" cbor:"16,keyasint,omitempty"` // deprecated - use `la` array instead
// LoadAvg15 float64 `json:"l15,omitempty" cbor:"17,keyasint,omitempty"` // deprecated - use `la` array instead
BandwidthBytes uint64 `json:"bb" cbor:"18,keyasint"`
LoadAvg [3]float64 `json:"la,omitempty" cbor:"19,keyasint"`
ConnectionType ConnectionType `json:"ct,omitempty" cbor:"20,keyasint,omitempty,omitzero"`
ExtraFsPct map[string]float64 `json:"efs,omitempty" cbor:"21,keyasint,omitempty"`

View File

@@ -917,7 +917,7 @@ func TestAgentWebSocketIntegration(t *testing.T) {
// Wait for connection result
maxWait := 2 * time.Second
time.Sleep(20 * time.Millisecond)
time.Sleep(40 * time.Millisecond)
checkInterval := 20 * time.Millisecond
timeout := time.After(maxWait)
ticker := time.Tick(checkInterval)

View File

@@ -1,35 +1,39 @@
// Package expirymap provides a thread-safe map with expiring entries.
// It supports TTL-based expiration with both lazy cleanup on access
// and periodic background cleanup.
package expirymap
import (
"reflect"
"sync"
"time"
"github.com/pocketbase/pocketbase/tools/store"
)
type val[T any] struct {
type val[T comparable] struct {
value T
expires time.Time
}
type ExpiryMap[T any] struct {
store *store.Store[string, *val[T]]
cleanupInterval time.Duration
type ExpiryMap[T comparable] struct {
store store.Store[string, val[T]]
stopChan chan struct{}
stopOnce sync.Once
}
// New creates a new expiry map with custom cleanup interval
func New[T any](cleanupInterval time.Duration) *ExpiryMap[T] {
func New[T comparable](cleanupInterval time.Duration) *ExpiryMap[T] {
m := &ExpiryMap[T]{
store: store.New(map[string]*val[T]{}),
cleanupInterval: cleanupInterval,
store: *store.New(map[string]val[T]{}),
stopChan: make(chan struct{}),
}
m.startCleaner()
go m.startCleaner(cleanupInterval)
return m
}
// Set stores a value with the given TTL
func (m *ExpiryMap[T]) Set(key string, value T, ttl time.Duration) {
m.store.Set(key, &val[T]{
m.store.Set(key, val[T]{
value: value,
expires: time.Now().Add(ttl),
})
@@ -55,7 +59,7 @@ func (m *ExpiryMap[T]) GetOk(key string) (T, bool) {
// GetByValue retrieves a value by value
func (m *ExpiryMap[T]) GetByValue(val T) (key string, value T, ok bool) {
for key, v := range m.store.GetAll() {
if reflect.DeepEqual(v.value, val) {
if v.value == val {
// check if expired
if v.expires.Before(time.Now()) {
m.store.Remove(key)
@@ -75,7 +79,7 @@ func (m *ExpiryMap[T]) Remove(key string) {
// RemovebyValue removes a value by value
func (m *ExpiryMap[T]) RemovebyValue(value T) (T, bool) {
for key, val := range m.store.GetAll() {
if reflect.DeepEqual(val.value, value) {
if val.value == value {
m.store.Remove(key)
return val.value, true
}
@@ -84,13 +88,23 @@ func (m *ExpiryMap[T]) RemovebyValue(value T) (T, bool) {
}
// startCleaner runs the background cleanup process
func (m *ExpiryMap[T]) startCleaner() {
go func() {
tick := time.Tick(m.cleanupInterval)
for range tick {
func (m *ExpiryMap[T]) startCleaner(interval time.Duration) {
tick := time.Tick(interval)
for {
select {
case <-tick:
m.cleanup()
case <-m.stopChan:
return
}
}()
}
}
// StopCleaner stops the background cleanup process
func (m *ExpiryMap[T]) StopCleaner() {
m.stopOnce.Do(func() {
close(m.stopChan)
})
}
// cleanup removes all expired entries
@@ -102,3 +116,12 @@ func (m *ExpiryMap[T]) cleanup() {
}
}
}
// UpdateExpiration updates the expiration time of a key
func (m *ExpiryMap[T]) UpdateExpiration(key string, ttl time.Duration) {
value, ok := m.store.GetOk(key)
if ok {
value.expires = time.Now().Add(ttl)
m.store.Set(key, value)
}
}

View File

@@ -4,6 +4,7 @@ package expirymap
import (
"testing"
"testing/synctest"
"time"
"github.com/stretchr/testify/assert"
@@ -177,6 +178,33 @@ func TestExpiryMap_GenericTypes(t *testing.T) {
})
}
func TestExpiryMap_UpdateExpiration(t *testing.T) {
em := New[string](time.Hour)
// Set a value with short TTL
em.Set("key1", "value1", time.Millisecond*50)
// Verify it exists
assert.True(t, em.Has("key1"))
// Update expiration to a longer TTL
em.UpdateExpiration("key1", time.Hour)
// Wait for the original TTL to pass
time.Sleep(time.Millisecond * 100)
// Should still exist because expiration was updated
assert.True(t, em.Has("key1"))
value, ok := em.GetOk("key1")
assert.True(t, ok)
assert.Equal(t, "value1", value)
// Try updating non-existent key (should not panic)
assert.NotPanics(t, func() {
em.UpdateExpiration("nonexistent", time.Hour)
})
}
func TestExpiryMap_ZeroValues(t *testing.T) {
em := New[string](time.Hour)
@@ -473,3 +501,52 @@ func TestExpiryMap_ValueOperations_Integration(t *testing.T) {
assert.Equal(t, "unique", value)
assert.Equal(t, "key2", key)
}
func TestExpiryMap_Cleaner(t *testing.T) {
synctest.Test(t, func(t *testing.T) {
em := New[string](time.Second)
defer em.StopCleaner()
em.Set("test", "value", 500*time.Millisecond)
// Wait 600ms, value is expired but cleaner hasn't run yet (interval is 1s)
time.Sleep(600 * time.Millisecond)
synctest.Wait()
// Map should still hold the value in its internal store before lazy access or cleaner
assert.Equal(t, 1, len(em.store.GetAll()), "store should still have 1 item before cleaner runs")
// Wait another 500ms so cleaner (1s interval) runs
time.Sleep(500 * time.Millisecond)
synctest.Wait() // Wait for background goroutine to process the tick
assert.Equal(t, 0, len(em.store.GetAll()), "store should be empty after cleaner runs")
})
}
func TestExpiryMap_StopCleaner(t *testing.T) {
em := New[string](time.Hour)
// Initially, stopChan is open, reading would block
select {
case <-em.stopChan:
t.Fatal("stopChan should be open initially")
default:
// success
}
em.StopCleaner()
// After StopCleaner, stopChan is closed, reading returns immediately
select {
case <-em.stopChan:
// success
default:
t.Fatal("stopChan was not closed by StopCleaner")
}
// Calling StopCleaner again should NOT panic thanks to sync.Once
assert.NotPanics(t, func() {
em.StopCleaner()
})
}

View File

@@ -48,7 +48,6 @@ type System struct {
detailsFetched atomic.Bool // True if static system details have been fetched and saved
smartFetching atomic.Bool // True if SMART devices are currently being fetched
smartInterval time.Duration // Interval for periodic SMART data updates
lastSmartFetch atomic.Int64 // Unix milliseconds of last SMART data fetch
}
func (sm *SystemManager) NewSystem(systemId string) *System {
@@ -134,19 +133,34 @@ func (sys *System) update() error {
return err
}
// ensure deprecated fields from older agents are migrated to current fields
migrateDeprecatedFields(data, !sys.detailsFetched.Load())
// create system records
_, err = sys.createRecords(data)
// if details were included and fetched successfully, mark details as fetched and update smart interval if set by agent
if err == nil && data.Details != nil {
sys.detailsFetched.Store(true)
// update smart interval if it's set on the agent side
if data.Details.SmartInterval > 0 {
sys.smartInterval = data.Details.SmartInterval
// make sure we reset expiration of lastFetch to remain as long as the new smart interval
// to prevent premature expiration leading to new fetch if interval is different.
sys.manager.smartFetchMap.UpdateExpiration(sys.Id, sys.smartInterval+time.Minute)
}
}
// Fetch and save SMART devices when system first comes online or at intervals
if backgroundSmartFetchEnabled() {
if backgroundSmartFetchEnabled() && sys.detailsFetched.Load() {
if sys.smartInterval <= 0 {
sys.smartInterval = time.Hour
}
lastFetch := sys.lastSmartFetch.Load()
if time.Since(time.UnixMilli(lastFetch)) >= sys.smartInterval && sys.smartFetching.CompareAndSwap(false, true) {
lastFetch, _ := sys.manager.smartFetchMap.GetOk(sys.Id)
if time.Since(time.UnixMilli(lastFetch-1e4)) >= sys.smartInterval && sys.smartFetching.CompareAndSwap(false, true) {
go func() {
defer sys.smartFetching.Store(false)
sys.lastSmartFetch.Store(time.Now().UnixMilli())
sys.manager.smartFetchMap.Set(sys.Id, time.Now().UnixMilli(), sys.smartInterval+time.Minute)
_ = sys.FetchAndSaveSmartDevices()
}()
}
@@ -221,11 +235,6 @@ func (sys *System) createRecords(data *system.CombinedData) (*core.Record, error
if err := createSystemDetailsRecord(txApp, data.Details, sys.Id); err != nil {
return err
}
sys.detailsFetched.Store(true)
// update smart interval if it's set on the agent side
if data.Details.SmartInterval > 0 {
sys.smartInterval = data.Details.SmartInterval
}
}
// update system record (do this last because it triggers alerts and we need above records to be inserted first)
@@ -703,3 +712,50 @@ func getJitter() <-chan time.Time {
msDelay := (interval * minPercent / 100) + rand.Intn(interval*jitterRange/100)
return time.After(time.Duration(msDelay) * time.Millisecond)
}
// migrateDeprecatedFields moves values from deprecated fields to their new locations if the new
// fields are not already populated. Deprecated fields and refs may be removed at least 30 days
// and one minor version release after the release that includes the migration.
//
// This is run when processing incoming system data from agents, which may be on older versions.
func migrateDeprecatedFields(cd *system.CombinedData, createDetails bool) {
// migration added 0.19.0
if cd.Stats.Bandwidth[0] == 0 && cd.Stats.Bandwidth[1] == 0 {
cd.Stats.Bandwidth[0] = uint64(cd.Stats.NetworkSent * 1024 * 1024)
cd.Stats.Bandwidth[1] = uint64(cd.Stats.NetworkRecv * 1024 * 1024)
cd.Stats.NetworkSent, cd.Stats.NetworkRecv = 0, 0
}
// migration added 0.19.0
if cd.Info.BandwidthBytes == 0 {
cd.Info.BandwidthBytes = uint64(cd.Info.Bandwidth * 1024 * 1024)
cd.Info.Bandwidth = 0
}
// migration added 0.19.0
if cd.Stats.DiskIO[0] == 0 && cd.Stats.DiskIO[1] == 0 {
cd.Stats.DiskIO[0] = uint64(cd.Stats.DiskReadPs * 1024 * 1024)
cd.Stats.DiskIO[1] = uint64(cd.Stats.DiskWritePs * 1024 * 1024)
cd.Stats.DiskReadPs, cd.Stats.DiskWritePs = 0, 0
}
// migration added 0.19.0 - Move deprecated Info fields to Details struct
if cd.Details == nil && cd.Info.Hostname != "" {
if createDetails {
cd.Details = &system.Details{
Hostname: cd.Info.Hostname,
Kernel: cd.Info.KernelVersion,
Cores: cd.Info.Cores,
Threads: cd.Info.Threads,
CpuModel: cd.Info.CpuModel,
Podman: cd.Info.Podman,
Os: cd.Info.Os,
MemoryTotal: uint64(cd.Stats.Mem * 1024 * 1024 * 1024),
}
}
// zero the deprecated fields to prevent saving them in systems.info DB json payload
cd.Info.Hostname = ""
cd.Info.KernelVersion = ""
cd.Info.Cores = 0
cd.Info.CpuModel = ""
cd.Info.Podman = false
cd.Info.Os = 0
}
}

View File

@@ -8,6 +8,7 @@ import (
"github.com/henrygd/beszel/internal/hub/ws"
"github.com/henrygd/beszel/internal/entities/system"
"github.com/henrygd/beszel/internal/hub/expirymap"
"github.com/henrygd/beszel/internal/common"
@@ -40,9 +41,10 @@ var errSystemExists = errors.New("system exists")
// SystemManager manages a collection of monitored systems and their connections.
// It handles system lifecycle, status updates, and maintains both SSH and WebSocket connections.
type SystemManager struct {
hub hubLike // Hub interface for database and alert operations
systems *store.Store[string, *System] // Thread-safe store of active systems
sshConfig *ssh.ClientConfig // SSH client configuration for system connections
hub hubLike // Hub interface for database and alert operations
systems *store.Store[string, *System] // Thread-safe store of active systems
sshConfig *ssh.ClientConfig // SSH client configuration for system connections
smartFetchMap *expirymap.ExpiryMap[int64] // Stores last SMART fetch time per system ID
}
// hubLike defines the interface requirements for the hub dependency.
@@ -58,8 +60,9 @@ type hubLike interface {
// The hub must implement the hubLike interface to provide database and alert functionality.
func NewSystemManager(hub hubLike) *SystemManager {
return &SystemManager{
systems: store.New(map[string]*System{}),
hub: hub,
systems: store.New(map[string]*System{}),
hub: hub,
smartFetchMap: expirymap.New[int64](time.Hour),
}
}

View File

@@ -0,0 +1,159 @@
//go:build testing
package systems
import (
"testing"
"github.com/henrygd/beszel/internal/entities/system"
)
func TestCombinedData_MigrateDeprecatedFields(t *testing.T) {
t.Run("Migrate NetworkSent and NetworkRecv to Bandwidth", func(t *testing.T) {
cd := &system.CombinedData{
Stats: system.Stats{
NetworkSent: 1.5, // 1.5 MB
NetworkRecv: 2.5, // 2.5 MB
},
}
migrateDeprecatedFields(cd, true)
expectedSent := uint64(1.5 * 1024 * 1024)
expectedRecv := uint64(2.5 * 1024 * 1024)
if cd.Stats.Bandwidth[0] != expectedSent {
t.Errorf("expected Bandwidth[0] %d, got %d", expectedSent, cd.Stats.Bandwidth[0])
}
if cd.Stats.Bandwidth[1] != expectedRecv {
t.Errorf("expected Bandwidth[1] %d, got %d", expectedRecv, cd.Stats.Bandwidth[1])
}
if cd.Stats.NetworkSent != 0 || cd.Stats.NetworkRecv != 0 {
t.Errorf("expected NetworkSent and NetworkRecv to be reset, got %f, %f", cd.Stats.NetworkSent, cd.Stats.NetworkRecv)
}
})
t.Run("Migrate Info.Bandwidth to Info.BandwidthBytes", func(t *testing.T) {
cd := &system.CombinedData{
Info: system.Info{
Bandwidth: 10.0, // 10 MB
},
}
migrateDeprecatedFields(cd, true)
expected := uint64(10 * 1024 * 1024)
if cd.Info.BandwidthBytes != expected {
t.Errorf("expected BandwidthBytes %d, got %d", expected, cd.Info.BandwidthBytes)
}
if cd.Info.Bandwidth != 0 {
t.Errorf("expected Info.Bandwidth to be reset, got %f", cd.Info.Bandwidth)
}
})
t.Run("Migrate DiskReadPs and DiskWritePs to DiskIO", func(t *testing.T) {
cd := &system.CombinedData{
Stats: system.Stats{
DiskReadPs: 3.0, // 3 MB
DiskWritePs: 4.0, // 4 MB
},
}
migrateDeprecatedFields(cd, true)
expectedRead := uint64(3 * 1024 * 1024)
expectedWrite := uint64(4 * 1024 * 1024)
if cd.Stats.DiskIO[0] != expectedRead {
t.Errorf("expected DiskIO[0] %d, got %d", expectedRead, cd.Stats.DiskIO[0])
}
if cd.Stats.DiskIO[1] != expectedWrite {
t.Errorf("expected DiskIO[1] %d, got %d", expectedWrite, cd.Stats.DiskIO[1])
}
if cd.Stats.DiskReadPs != 0 || cd.Stats.DiskWritePs != 0 {
t.Errorf("expected DiskReadPs and DiskWritePs to be reset, got %f, %f", cd.Stats.DiskReadPs, cd.Stats.DiskWritePs)
}
})
t.Run("Migrate Info fields to Details struct", func(t *testing.T) {
cd := &system.CombinedData{
Stats: system.Stats{
Mem: 16.0, // 16 GB
},
Info: system.Info{
Hostname: "test-host",
KernelVersion: "6.8.0",
Cores: 8,
Threads: 16,
CpuModel: "Intel i7",
Podman: true,
Os: system.Linux,
},
}
migrateDeprecatedFields(cd, true)
if cd.Details == nil {
t.Fatal("expected Details struct to be created")
}
if cd.Details.Hostname != "test-host" {
t.Errorf("expected Hostname 'test-host', got '%s'", cd.Details.Hostname)
}
if cd.Details.Kernel != "6.8.0" {
t.Errorf("expected Kernel '6.8.0', got '%s'", cd.Details.Kernel)
}
if cd.Details.Cores != 8 {
t.Errorf("expected Cores 8, got %d", cd.Details.Cores)
}
if cd.Details.Threads != 16 {
t.Errorf("expected Threads 16, got %d", cd.Details.Threads)
}
if cd.Details.CpuModel != "Intel i7" {
t.Errorf("expected CpuModel 'Intel i7', got '%s'", cd.Details.CpuModel)
}
if cd.Details.Podman != true {
t.Errorf("expected Podman true, got %v", cd.Details.Podman)
}
if cd.Details.Os != system.Linux {
t.Errorf("expected Os Linux, got %d", cd.Details.Os)
}
expectedMem := uint64(16 * 1024 * 1024 * 1024)
if cd.Details.MemoryTotal != expectedMem {
t.Errorf("expected MemoryTotal %d, got %d", expectedMem, cd.Details.MemoryTotal)
}
if cd.Info.Hostname != "" || cd.Info.KernelVersion != "" || cd.Info.Cores != 0 || cd.Info.CpuModel != "" || cd.Info.Podman != false || cd.Info.Os != 0 {
t.Errorf("expected Info fields to be reset, got %+v", cd.Info)
}
})
t.Run("Do not migrate if Details already exists", func(t *testing.T) {
cd := &system.CombinedData{
Details: &system.Details{Hostname: "existing-host"},
Info: system.Info{
Hostname: "deprecated-host",
},
}
migrateDeprecatedFields(cd, true)
if cd.Details.Hostname != "existing-host" {
t.Errorf("expected Hostname 'existing-host', got '%s'", cd.Details.Hostname)
}
if cd.Info.Hostname != "deprecated-host" {
t.Errorf("expected Info.Hostname to remain 'deprecated-host', got '%s'", cd.Info.Hostname)
}
})
t.Run("Do not create details if migrateDetails is false", func(t *testing.T) {
cd := &system.CombinedData{
Info: system.Info{
Hostname: "deprecated-host",
},
}
migrateDeprecatedFields(cd, false)
if cd.Details != nil {
t.Fatal("expected Details struct to not be created")
}
if cd.Info.Hostname != "" {
t.Errorf("expected Info.Hostname to be reset, got '%s'", cd.Info.Hostname)
}
})
}

View File

@@ -113,4 +113,5 @@ func (sm *SystemManager) RemoveAllSystems() {
for _, system := range sm.systems.GetAll() {
sm.RemoveSystem(system.Id)
}
sm.smartFetchMap.StopCleaner()
}

View File

@@ -16,19 +16,16 @@ import { useYAxisWidth } from "./hooks"
export default memo(function LoadAverageChart({ chartData }: { chartData: ChartData }) {
const { yAxisWidth, updateYAxisWidth } = useYAxisWidth()
const keys: { legacy: keyof SystemStats; color: string; label: string }[] = [
const keys: { color: string; label: string }[] = [
{
legacy: "l1",
color: "hsl(271, 81%, 60%)", // Purple
label: t({ message: `1 min`, comment: "Load average" }),
},
{
legacy: "l5",
color: "hsl(217, 91%, 60%)", // Blue
label: t({ message: `5 min`, comment: "Load average" }),
},
{
legacy: "l15",
color: "hsl(25, 95%, 53%)", // Orange
label: t({ message: `15 min`, comment: "Load average" }),
},
@@ -66,27 +63,18 @@ export default memo(function LoadAverageChart({ chartData }: { chartData: ChartD
/>
}
/>
{keys.map(({ legacy, color, label }, i) => {
const dataKey = (value: { stats: SystemStats }) => {
const { minor, patch } = chartData.agentVersion
if (minor <= 12 && patch < 1) {
return value.stats?.[legacy]
}
return value.stats?.la?.[i] ?? value.stats?.[legacy]
}
return (
<Line
key={label}
dataKey={dataKey}
name={label}
type="monotoneX"
dot={false}
strokeWidth={1.5}
stroke={color}
isAnimationActive={false}
/>
)
})}
{keys.map(({ color, label }, i) => (
<Line
key={label}
dataKey={(value: { stats: SystemStats }) => value.stats?.la?.[i]}
name={label}
type="monotoneX"
dot={false}
strokeWidth={1.5}
stroke={color}
isAnimationActive={false}
/>
))}
<ChartLegend content={<ChartLegendContent />} />
</LineChart>
</ChartContainer>

View File

@@ -654,7 +654,7 @@ export default memo(function SystemDetail({ id }: { id: string }) {
)}
{/* Load Average chart */}
{chartData.agentVersion?.minor >= 12 && (
{chartData.agentVersion?.minor > 12 && (
<ChartCard
empty={dataEmpty}
grid={grid}

View File

@@ -621,8 +621,8 @@ function DiskSheet({
const deviceName = disk?.name || unknown
const model = disk?.model || unknown
const capacity = disk?.capacity ? formatCapacity(disk.capacity) : unknown
const serialNumber = disk?.serial || unknown
const firmwareVersion = disk?.firmware || unknown
const serialNumber = disk?.serial
const firmwareVersion = disk?.firmware
const status = disk?.state || unknown
return (
@@ -636,24 +636,32 @@ function DiskSheet({
{model}
<Separator orientation="vertical" className="h-2.5 bg-muted-foreground opacity-70" />
{capacity}
<Separator orientation="vertical" className="h-2.5 bg-muted-foreground opacity-70" />
<Tooltip>
<TooltipTrigger asChild>
<span>{serialNumber}</span>
</TooltipTrigger>
<TooltipContent>
<Trans>Serial Number</Trans>
</TooltipContent>
</Tooltip>
<Separator orientation="vertical" className="h-2.5 bg-muted-foreground opacity-70" />
<Tooltip>
<TooltipTrigger asChild>
<span>{firmwareVersion}</span>
</TooltipTrigger>
<TooltipContent>
<Trans>Firmware</Trans>
</TooltipContent>
</Tooltip>
{serialNumber && (
<>
<Separator orientation="vertical" className="h-2.5 bg-muted-foreground opacity-70" />
<Tooltip>
<TooltipTrigger asChild>
<span>{serialNumber}</span>
</TooltipTrigger>
<TooltipContent>
<Trans>Serial Number</Trans>
</TooltipContent>
</Tooltip>
</>
)}
{firmwareVersion && (
<>
<Separator orientation="vertical" className="h-2.5 bg-muted-foreground opacity-70" />
<Tooltip>
<TooltipTrigger asChild>
<span>{firmwareVersion}</span>
</TooltipTrigger>
<TooltipContent>
<Trans>Firmware</Trans>
</TooltipContent>
</Tooltip>
</>
)}
</SheetDescription>
</SheetHeader>
<div className="flex-1 overflow-hidden p-4 flex flex-col gap-4">

View File

@@ -198,32 +198,19 @@ export function SystemsTableColumns(viewMode: "table" | "grid"): ColumnDef<Syste
},
{
id: "loadAverage",
accessorFn: ({ info }) => {
const sum = info.la?.reduce((acc, curr) => acc + curr, 0)
// TODO: remove this in future release in favor of la array
if (!sum) {
return (info.l1 ?? 0) + (info.l5 ?? 0) + (info.l15 ?? 0) || undefined
}
return sum || undefined
},
accessorFn: ({ info }) => info.la?.reduce((acc, curr) => acc + curr, 0),
name: () => t({ message: "Load Avg", comment: "Short label for load average" }),
size: 0,
Icon: HourglassIcon,
header: sortableHeader,
cell(info: CellContext<SystemRecord, unknown>) {
const { info: sysInfo, status } = info.row.original
const { major, minor } = parseSemVer(sysInfo.v)
const { colorWarn = 65, colorCrit = 90 } = useStore($userSettings, { keys: ["colorWarn", "colorCrit"] })
// agent version
const { minor, patch } = parseSemVer(sysInfo.v)
let loadAverages = sysInfo.la
// use legacy load averages if agent version is less than 12.1.0
if (!loadAverages || (minor === 12 && patch < 1)) {
loadAverages = [sysInfo.l1 ?? 0, sysInfo.l5 ?? 0, sysInfo.l15 ?? 0]
}
const loadAverages = sysInfo.la || []
const max = Math.max(...loadAverages)
if (max === 0 && (status === SystemStatus.Paused || minor < 12)) {
if (max === 0 && (status === SystemStatus.Paused || (major < 1 && minor < 13))) {
return null
}
@@ -248,19 +235,20 @@ export function SystemsTableColumns(viewMode: "table" | "grid"): ColumnDef<Syste
},
},
{
accessorFn: ({ info }) => info.bb || (info.b || 0) * 1024 * 1024 || undefined,
accessorFn: ({ info, status }) => (status !== SystemStatus.Up ? undefined : info.bb),
id: "net",
name: () => t`Net`,
size: 0,
Icon: EthernetIcon,
header: sortableHeader,
sortUndefined: "last",
cell(info) {
const sys = info.row.original
const userSettings = useStore($userSettings, { keys: ["unitNet"] })
if (sys.status === SystemStatus.Paused) {
const val = info.getValue() as number | undefined
if (val === undefined) {
return null
}
const { value, unit } = formatBytes((info.getValue() || 0) as number, true, userSettings.unitNet, false)
const userSettings = useStore($userSettings, { keys: ["unitNet"] })
const { value, unit } = formatBytes(val, true, userSettings.unitNet, false)
return (
<span className="tabular-nums whitespace-nowrap">
{decimalString(value, value >= 100 ? 1 : 2)} {unit}

View File

@@ -45,12 +45,6 @@ export interface SystemInfo {
c: number
/** cpu model */
m: string
/** load average 1 minute */
l1?: number
/** load average 5 minutes */
l5?: number
/** load average 15 minutes */
l15?: number
/** load average */
la?: [number, number, number]
/** operating system */
@@ -94,13 +88,6 @@ export interface SystemStats {
cpub?: number[]
/** per-core cpu usage [CPU0..] (0-100 integers) */
cpus?: number[]
// TODO: remove these in future release in favor of la
/** load average 1 minute */
l1?: number
/** load average 5 minutes */
l5?: number
/** load average 15 minutes */
l15?: number
/** load average */
la?: [number, number, number]
/** total memory (gb) */

View File

@@ -98,7 +98,7 @@ func ClearCollection(t testing.TB, app core.App, collectionName string) error {
}
func (h *TestHub) Cleanup() {
h.GetAlertManager().StopWorker()
h.GetAlertManager().Stop()
h.GetSystemManager().RemoveAllSystems()
h.TestApp.Cleanup()
}