mirror of
https://github.com/henrygd/beszel.git
synced 2026-04-18 02:41:50 +02:00
Compare commits
140 Commits
283fa9d5c2
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a71617e058 | ||
|
|
e5507fa106 | ||
|
|
a024c3cfd0 | ||
|
|
07466804e7 | ||
|
|
981c788d6f | ||
|
|
f5576759de | ||
|
|
be0b708064 | ||
|
|
ab3a3de46c | ||
|
|
1556e53926 | ||
|
|
e3ade3aeb8 | ||
|
|
b013f06956 | ||
|
|
3793b27958 | ||
|
|
5b02158228 | ||
|
|
0ae8c42ae0 | ||
|
|
ea80f3c5a2 | ||
|
|
c3dffff5e4 | ||
|
|
06fdd0e7a8 | ||
|
|
6e3fd90834 | ||
|
|
5ab82183fa | ||
|
|
a68e02ca84 | ||
|
|
0f2e16c63c | ||
|
|
c4009f2b43 | ||
|
|
ef0c1420d1 | ||
|
|
eb9a8e1ef9 | ||
|
|
6b5e6ffa9a | ||
|
|
d656036d3b | ||
|
|
80b73c7faf | ||
|
|
afe9eb7a70 | ||
|
|
7f565a3086 | ||
|
|
77862d4cb1 | ||
|
|
e158a9001b | ||
|
|
f670e868e4 | ||
|
|
0fff699bf6 | ||
|
|
ba10da1b9f | ||
|
|
7f4f14b505 | ||
|
|
2fda4ff264 | ||
|
|
20b0b40ec8 | ||
|
|
d548a012b4 | ||
|
|
ce5d1217dd | ||
|
|
cef09d7cb1 | ||
|
|
f6440acb43 | ||
|
|
5463a38f0f | ||
|
|
80135fdad3 | ||
|
|
5db4eb4346 | ||
|
|
f6c5e2928a | ||
|
|
6a207c33fa | ||
|
|
9f19afccde | ||
|
|
f25f2469e3 | ||
|
|
5bd43ed461 | ||
|
|
afdc3f7779 | ||
|
|
a227c77526 | ||
|
|
8202d746af | ||
|
|
9840b99327 | ||
|
|
f7b5a505e8 | ||
|
|
3cb32ac046 | ||
|
|
e610d9bfc8 | ||
|
|
b53fdbe0ef | ||
|
|
c7261b56f1 | ||
|
|
3f4c3d51b6 | ||
|
|
ad21cab457 | ||
|
|
f04684b30a | ||
|
|
4d4e4fba9b | ||
|
|
62587919f4 | ||
|
|
35528332fd | ||
|
|
e3e453140e | ||
|
|
7a64da9f65 | ||
|
|
8e71c8ad97 | ||
|
|
97f3b8c61f | ||
|
|
0b0b5d16d7 | ||
|
|
b2fd50211e | ||
|
|
c159eaacd1 | ||
|
|
441bdd2ec5 | ||
|
|
ff36138229 | ||
|
|
be70840609 | ||
|
|
565162ef5f | ||
|
|
adbfe7cfb7 | ||
|
|
1ff7762c80 | ||
|
|
0ab8a606e0 | ||
|
|
e4e0affbc1 | ||
|
|
c3a0e645ee | ||
|
|
c6c3950fb0 | ||
|
|
48ddc96a0d | ||
|
|
704cb86de8 | ||
|
|
2854ce882f | ||
|
|
ed50367f70 | ||
|
|
4ebe869591 | ||
|
|
c9bbbe91f2 | ||
|
|
5bfe4f6970 | ||
|
|
380d2b1091 | ||
|
|
a7f99e7a8c | ||
|
|
bd94a9d142 | ||
|
|
8e2316f845 | ||
|
|
0d3dfcb207 | ||
|
|
b386ce5190 | ||
|
|
e527534016 | ||
|
|
ec7ad632a9 | ||
|
|
963fce5a33 | ||
|
|
d38c0da06d | ||
|
|
cae6ac4626 | ||
|
|
6b1ff264f2 | ||
|
|
35d0e792ad | ||
|
|
654cd06b19 | ||
|
|
5e1b028130 | ||
|
|
638e7dc12a | ||
|
|
73c262455d | ||
|
|
0c4d2edd45 | ||
|
|
8f23fff1c9 | ||
|
|
02c1a0c13d | ||
|
|
69fdcb36ab | ||
|
|
b91eb6de40 | ||
|
|
ec69f6c6e0 | ||
|
|
a86cb91e07 | ||
|
|
004841717a | ||
|
|
096296ba7b | ||
|
|
b012df5669 | ||
|
|
12545b4b6d | ||
|
|
9e2296452b | ||
|
|
ac79860d4a | ||
|
|
e13a99fdac | ||
|
|
4cfb2a86ad | ||
|
|
191f25f6e0 | ||
|
|
aa8b3711d7 | ||
|
|
1fb0b25988 | ||
|
|
04600d83cc | ||
|
|
5d8906c9b2 | ||
|
|
daac287b9d | ||
|
|
d526ea61a9 | ||
|
|
79616e1662 | ||
|
|
01e8bdf040 | ||
|
|
1e3a44e05d | ||
|
|
311095cfdd | ||
|
|
4869c834bb | ||
|
|
e1c1e97f0a | ||
|
|
f6b2824ccc | ||
|
|
f17ffc21b8 | ||
|
|
f792f9b102 | ||
|
|
1def7d8d3a | ||
|
|
ef92b254bf | ||
|
|
10d853c004 | ||
|
|
cdfd116da0 |
6
.github/workflows/vulncheck.yml
vendored
6
.github/workflows/vulncheck.yml
vendored
@@ -19,11 +19,11 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Check out code into the Go module directory
|
- name: Check out code into the Go module directory
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v6
|
||||||
with:
|
with:
|
||||||
go-version: 1.25.x
|
go-version: 1.26.x
|
||||||
# cached: false
|
# cached: false
|
||||||
- name: Get official govulncheck
|
- name: Get official govulncheck
|
||||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||||
|
|||||||
1
Makefile
1
Makefile
@@ -51,7 +51,6 @@ clean:
|
|||||||
lint:
|
lint:
|
||||||
golangci-lint run
|
golangci-lint run
|
||||||
|
|
||||||
test: export GOEXPERIMENT=synctest
|
|
||||||
test:
|
test:
|
||||||
go test -tags=testing ./...
|
go test -tags=testing ./...
|
||||||
|
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ package agent
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"os"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
@@ -14,11 +13,14 @@ import (
|
|||||||
"github.com/gliderlabs/ssh"
|
"github.com/gliderlabs/ssh"
|
||||||
"github.com/henrygd/beszel"
|
"github.com/henrygd/beszel"
|
||||||
"github.com/henrygd/beszel/agent/deltatracker"
|
"github.com/henrygd/beszel/agent/deltatracker"
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
"github.com/henrygd/beszel/internal/common"
|
"github.com/henrygd/beszel/internal/common"
|
||||||
"github.com/henrygd/beszel/internal/entities/system"
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
gossh "golang.org/x/crypto/ssh"
|
gossh "golang.org/x/crypto/ssh"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const defaultDataCacheTimeMs uint16 = 60_000
|
||||||
|
|
||||||
type Agent struct {
|
type Agent struct {
|
||||||
sync.Mutex // Used to lock agent while collecting data
|
sync.Mutex // Used to lock agent while collecting data
|
||||||
debug bool // true if LOG_LEVEL is set to debug
|
debug bool // true if LOG_LEVEL is set to debug
|
||||||
@@ -36,6 +38,7 @@ type Agent struct {
|
|||||||
sensorConfig *SensorConfig // Sensors config
|
sensorConfig *SensorConfig // Sensors config
|
||||||
systemInfo system.Info // Host system info (dynamic)
|
systemInfo system.Info // Host system info (dynamic)
|
||||||
systemDetails system.Details // Host system details (static, once-per-connection)
|
systemDetails system.Details // Host system details (static, once-per-connection)
|
||||||
|
detailsDirty bool // Whether system details have changed and need to be resent
|
||||||
gpuManager *GPUManager // Manages GPU data
|
gpuManager *GPUManager // Manages GPU data
|
||||||
cache *systemDataCache // Cache for system stats based on cache time
|
cache *systemDataCache // Cache for system stats based on cache time
|
||||||
connectionManager *ConnectionManager // Channel to signal connection events
|
connectionManager *ConnectionManager // Channel to signal connection events
|
||||||
@@ -68,11 +71,11 @@ func NewAgent(dataDir ...string) (agent *Agent, err error) {
|
|||||||
slog.Info("Data directory", "path", agent.dataDir)
|
slog.Info("Data directory", "path", agent.dataDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
agent.memCalc, _ = GetEnv("MEM_CALC")
|
agent.memCalc, _ = utils.GetEnv("MEM_CALC")
|
||||||
agent.sensorConfig = agent.newSensorConfig()
|
agent.sensorConfig = agent.newSensorConfig()
|
||||||
|
|
||||||
// Parse disk usage cache duration (e.g., "15m", "1h") to avoid waking sleeping disks
|
// Parse disk usage cache duration (e.g., "15m", "1h") to avoid waking sleeping disks
|
||||||
if diskUsageCache, exists := GetEnv("DISK_USAGE_CACHE"); exists {
|
if diskUsageCache, exists := utils.GetEnv("DISK_USAGE_CACHE"); exists {
|
||||||
if duration, err := time.ParseDuration(diskUsageCache); err == nil {
|
if duration, err := time.ParseDuration(diskUsageCache); err == nil {
|
||||||
agent.diskUsageCacheDuration = duration
|
agent.diskUsageCacheDuration = duration
|
||||||
slog.Info("DISK_USAGE_CACHE", "duration", duration)
|
slog.Info("DISK_USAGE_CACHE", "duration", duration)
|
||||||
@@ -82,7 +85,7 @@ func NewAgent(dataDir ...string) (agent *Agent, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Set up slog with a log level determined by the LOG_LEVEL env var
|
// Set up slog with a log level determined by the LOG_LEVEL env var
|
||||||
if logLevelStr, exists := GetEnv("LOG_LEVEL"); exists {
|
if logLevelStr, exists := utils.GetEnv("LOG_LEVEL"); exists {
|
||||||
switch strings.ToLower(logLevelStr) {
|
switch strings.ToLower(logLevelStr) {
|
||||||
case "debug":
|
case "debug":
|
||||||
agent.debug = true
|
agent.debug = true
|
||||||
@@ -97,13 +100,13 @@ func NewAgent(dataDir ...string) (agent *Agent, err error) {
|
|||||||
slog.Debug(beszel.Version)
|
slog.Debug(beszel.Version)
|
||||||
|
|
||||||
// initialize docker manager
|
// initialize docker manager
|
||||||
agent.dockerManager = newDockerManager()
|
agent.dockerManager = newDockerManager(agent)
|
||||||
|
|
||||||
// initialize system info
|
// initialize system info
|
||||||
agent.refreshSystemDetails()
|
agent.refreshSystemDetails()
|
||||||
|
|
||||||
// SMART_INTERVAL env var to update smart data at this interval
|
// SMART_INTERVAL env var to update smart data at this interval
|
||||||
if smartIntervalEnv, exists := GetEnv("SMART_INTERVAL"); exists {
|
if smartIntervalEnv, exists := utils.GetEnv("SMART_INTERVAL"); exists {
|
||||||
if duration, err := time.ParseDuration(smartIntervalEnv); err == nil && duration > 0 {
|
if duration, err := time.ParseDuration(smartIntervalEnv); err == nil && duration > 0 {
|
||||||
agent.systemDetails.SmartInterval = duration
|
agent.systemDetails.SmartInterval = duration
|
||||||
slog.Info("SMART_INTERVAL", "duration", duration)
|
slog.Info("SMART_INTERVAL", "duration", duration)
|
||||||
@@ -142,21 +145,12 @@ func NewAgent(dataDir ...string) (agent *Agent, err error) {
|
|||||||
|
|
||||||
// if debugging, print stats
|
// if debugging, print stats
|
||||||
if agent.debug {
|
if agent.debug {
|
||||||
slog.Debug("Stats", "data", agent.gatherStats(common.DataRequestOptions{CacheTimeMs: 60_000, IncludeDetails: true}))
|
slog.Debug("Stats", "data", agent.gatherStats(common.DataRequestOptions{CacheTimeMs: defaultDataCacheTimeMs, IncludeDetails: true}))
|
||||||
}
|
}
|
||||||
|
|
||||||
return agent, nil
|
return agent, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetEnv retrieves an environment variable with a "BESZEL_AGENT_" prefix, or falls back to the unprefixed key.
|
|
||||||
func GetEnv(key string) (value string, exists bool) {
|
|
||||||
if value, exists = os.LookupEnv("BESZEL_AGENT_" + key); exists {
|
|
||||||
return value, exists
|
|
||||||
}
|
|
||||||
// Fallback to the old unprefixed key
|
|
||||||
return os.LookupEnv(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Agent) gatherStats(options common.DataRequestOptions) *system.CombinedData {
|
func (a *Agent) gatherStats(options common.DataRequestOptions) *system.CombinedData {
|
||||||
a.Lock()
|
a.Lock()
|
||||||
defer a.Unlock()
|
defer a.Unlock()
|
||||||
@@ -173,11 +167,6 @@ func (a *Agent) gatherStats(options common.DataRequestOptions) *system.CombinedD
|
|||||||
Info: a.systemInfo,
|
Info: a.systemInfo,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Include static system details only when requested
|
|
||||||
if options.IncludeDetails {
|
|
||||||
data.Details = &a.systemDetails
|
|
||||||
}
|
|
||||||
|
|
||||||
// slog.Info("System data", "data", data, "cacheTimeMs", cacheTimeMs)
|
// slog.Info("System data", "data", data, "cacheTimeMs", cacheTimeMs)
|
||||||
|
|
||||||
if a.dockerManager != nil {
|
if a.dockerManager != nil {
|
||||||
@@ -190,7 +179,7 @@ func (a *Agent) gatherStats(options common.DataRequestOptions) *system.CombinedD
|
|||||||
}
|
}
|
||||||
|
|
||||||
// skip updating systemd services if cache time is not the default 60sec interval
|
// skip updating systemd services if cache time is not the default 60sec interval
|
||||||
if a.systemdManager != nil && cacheTimeMs == 60_000 {
|
if a.systemdManager != nil && cacheTimeMs == defaultDataCacheTimeMs {
|
||||||
totalCount := uint16(a.systemdManager.getServiceStatsCount())
|
totalCount := uint16(a.systemdManager.getServiceStatsCount())
|
||||||
if totalCount > 0 {
|
if totalCount > 0 {
|
||||||
numFailed := a.systemdManager.getFailedServiceCount()
|
numFailed := a.systemdManager.getFailedServiceCount()
|
||||||
@@ -213,7 +202,7 @@ func (a *Agent) gatherStats(options common.DataRequestOptions) *system.CombinedD
|
|||||||
data.Stats.ExtraFs[key] = stats
|
data.Stats.ExtraFs[key] = stats
|
||||||
// Add percentages to Info struct for dashboard
|
// Add percentages to Info struct for dashboard
|
||||||
if stats.DiskTotal > 0 {
|
if stats.DiskTotal > 0 {
|
||||||
pct := twoDecimals((stats.DiskUsed / stats.DiskTotal) * 100)
|
pct := utils.TwoDecimals((stats.DiskUsed / stats.DiskTotal) * 100)
|
||||||
data.Info.ExtraFsPct[key] = pct
|
data.Info.ExtraFsPct[key] = pct
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -221,7 +210,8 @@ func (a *Agent) gatherStats(options common.DataRequestOptions) *system.CombinedD
|
|||||||
slog.Debug("Extra FS", "data", data.Stats.ExtraFs)
|
slog.Debug("Extra FS", "data", data.Stats.ExtraFs)
|
||||||
|
|
||||||
a.cache.Set(data, cacheTimeMs)
|
a.cache.Set(data, cacheTimeMs)
|
||||||
return data
|
|
||||||
|
return a.attachSystemDetails(data, cacheTimeMs, options.IncludeDetails)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start initializes and starts the agent with optional WebSocket connection
|
// Start initializes and starts the agent with optional WebSocket connection
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build testing
|
//go:build testing
|
||||||
// +build testing
|
|
||||||
|
|
||||||
package agent
|
package agent
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build testing
|
//go:build testing
|
||||||
// +build testing
|
|
||||||
|
|
||||||
package agent
|
package agent
|
||||||
|
|
||||||
|
|||||||
@@ -1,84 +1,11 @@
|
|||||||
//go:build !freebsd
|
// Package battery provides functions to check if the system has a battery and return the charge state and percentage.
|
||||||
|
|
||||||
// Package battery provides functions to check if the system has a battery and to get the battery stats.
|
|
||||||
package battery
|
package battery
|
||||||
|
|
||||||
import (
|
const (
|
||||||
"errors"
|
stateUnknown uint8 = iota
|
||||||
"log/slog"
|
stateEmpty
|
||||||
"math"
|
stateFull
|
||||||
|
stateCharging
|
||||||
"github.com/distatus/battery"
|
stateDischarging
|
||||||
|
stateIdle
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
systemHasBattery = false
|
|
||||||
haveCheckedBattery = false
|
|
||||||
)
|
|
||||||
|
|
||||||
// HasReadableBattery checks if the system has a battery and returns true if it does.
|
|
||||||
func HasReadableBattery() bool {
|
|
||||||
if haveCheckedBattery {
|
|
||||||
return systemHasBattery
|
|
||||||
}
|
|
||||||
haveCheckedBattery = true
|
|
||||||
batteries, err := battery.GetAll()
|
|
||||||
for _, bat := range batteries {
|
|
||||||
if bat != nil && (bat.Full > 0 || bat.Design > 0) {
|
|
||||||
systemHasBattery = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !systemHasBattery {
|
|
||||||
slog.Debug("No battery found", "err", err)
|
|
||||||
}
|
|
||||||
return systemHasBattery
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetBatteryStats returns the current battery percent and charge state
|
|
||||||
// percent = (current charge of all batteries) / (sum of designed/full capacity of all batteries)
|
|
||||||
func GetBatteryStats() (batteryPercent uint8, batteryState uint8, err error) {
|
|
||||||
if !HasReadableBattery() {
|
|
||||||
return batteryPercent, batteryState, errors.ErrUnsupported
|
|
||||||
}
|
|
||||||
batteries, err := battery.GetAll()
|
|
||||||
// we'll handle errors later by skipping batteries with errors, rather
|
|
||||||
// than skipping everything because of the presence of some errors.
|
|
||||||
if len(batteries) == 0 {
|
|
||||||
return batteryPercent, batteryState, errors.New("no batteries")
|
|
||||||
}
|
|
||||||
|
|
||||||
totalCapacity := float64(0)
|
|
||||||
totalCharge := float64(0)
|
|
||||||
errs, partialErrs := err.(battery.Errors)
|
|
||||||
|
|
||||||
batteryState = math.MaxUint8
|
|
||||||
|
|
||||||
for i, bat := range batteries {
|
|
||||||
if partialErrs && errs[i] != nil {
|
|
||||||
// if there were some errors, like missing data, skip it
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if bat == nil || bat.Full == 0 {
|
|
||||||
// skip batteries with no capacity. Charge is unlikely to ever be zero, but
|
|
||||||
// we can't guarantee that, so don't skip based on charge.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
totalCapacity += bat.Full
|
|
||||||
totalCharge += min(bat.Current, bat.Full)
|
|
||||||
if bat.State.Raw >= 0 {
|
|
||||||
batteryState = uint8(bat.State.Raw)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if totalCapacity == 0 || batteryState == math.MaxUint8 {
|
|
||||||
// for macs there's sometimes a ghost battery with 0 capacity
|
|
||||||
// https://github.com/distatus/battery/issues/34
|
|
||||||
// Instead of skipping over those batteries, we'll check for total 0 capacity
|
|
||||||
// and return an error. This also prevents a divide by zero.
|
|
||||||
return batteryPercent, batteryState, errors.New("no battery capacity")
|
|
||||||
}
|
|
||||||
|
|
||||||
batteryPercent = uint8(totalCharge / totalCapacity * 100)
|
|
||||||
return batteryPercent, batteryState, nil
|
|
||||||
}
|
|
||||||
|
|||||||
96
agent/battery/battery_darwin.go
Normal file
96
agent/battery/battery_darwin.go
Normal file
@@ -0,0 +1,96 @@
|
|||||||
|
//go:build darwin
|
||||||
|
|
||||||
|
package battery
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"log/slog"
|
||||||
|
"math"
|
||||||
|
"os/exec"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"howett.net/plist"
|
||||||
|
)
|
||||||
|
|
||||||
|
type macBattery struct {
|
||||||
|
CurrentCapacity int `plist:"CurrentCapacity"`
|
||||||
|
MaxCapacity int `plist:"MaxCapacity"`
|
||||||
|
FullyCharged bool `plist:"FullyCharged"`
|
||||||
|
IsCharging bool `plist:"IsCharging"`
|
||||||
|
ExternalConnected bool `plist:"ExternalConnected"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func readMacBatteries() ([]macBattery, error) {
|
||||||
|
out, err := exec.Command("ioreg", "-n", "AppleSmartBattery", "-r", "-a").Output()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(out) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
var batteries []macBattery
|
||||||
|
if _, err := plist.Unmarshal(out, &batteries); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return batteries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasReadableBattery checks if the system has a battery and returns true if it does.
|
||||||
|
var HasReadableBattery = sync.OnceValue(func() bool {
|
||||||
|
systemHasBattery := false
|
||||||
|
batteries, err := readMacBatteries()
|
||||||
|
slog.Debug("Batteries", "batteries", batteries, "err", err)
|
||||||
|
for _, bat := range batteries {
|
||||||
|
if bat.MaxCapacity > 0 {
|
||||||
|
systemHasBattery = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return systemHasBattery
|
||||||
|
})
|
||||||
|
|
||||||
|
// GetBatteryStats returns the current battery percent and charge state.
|
||||||
|
// Uses CurrentCapacity/MaxCapacity to match the value macOS displays.
|
||||||
|
func GetBatteryStats() (batteryPercent uint8, batteryState uint8, err error) {
|
||||||
|
if !HasReadableBattery() {
|
||||||
|
return batteryPercent, batteryState, errors.ErrUnsupported
|
||||||
|
}
|
||||||
|
batteries, err := readMacBatteries()
|
||||||
|
if len(batteries) == 0 {
|
||||||
|
return batteryPercent, batteryState, errors.New("no batteries")
|
||||||
|
}
|
||||||
|
|
||||||
|
totalCapacity := 0
|
||||||
|
totalCharge := 0
|
||||||
|
batteryState = math.MaxUint8
|
||||||
|
|
||||||
|
for _, bat := range batteries {
|
||||||
|
if bat.MaxCapacity == 0 {
|
||||||
|
// skip ghost batteries with 0 capacity
|
||||||
|
// https://github.com/distatus/battery/issues/34
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
totalCapacity += bat.MaxCapacity
|
||||||
|
totalCharge += min(bat.CurrentCapacity, bat.MaxCapacity)
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case !bat.ExternalConnected:
|
||||||
|
batteryState = stateDischarging
|
||||||
|
case bat.IsCharging:
|
||||||
|
batteryState = stateCharging
|
||||||
|
case bat.CurrentCapacity == 0:
|
||||||
|
batteryState = stateEmpty
|
||||||
|
case !bat.FullyCharged:
|
||||||
|
batteryState = stateIdle
|
||||||
|
default:
|
||||||
|
batteryState = stateFull
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if totalCapacity == 0 || batteryState == math.MaxUint8 {
|
||||||
|
return batteryPercent, batteryState, errors.New("no battery capacity")
|
||||||
|
}
|
||||||
|
|
||||||
|
batteryPercent = uint8(float64(totalCharge) / float64(totalCapacity) * 100)
|
||||||
|
return batteryPercent, batteryState, nil
|
||||||
|
}
|
||||||
120
agent/battery/battery_linux.go
Normal file
120
agent/battery/battery_linux.go
Normal file
@@ -0,0 +1,120 @@
|
|||||||
|
//go:build linux
|
||||||
|
|
||||||
|
package battery
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"log/slog"
|
||||||
|
"math"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
// getBatteryPaths returns the paths of all batteries in /sys/class/power_supply
|
||||||
|
var getBatteryPaths func() ([]string, error)
|
||||||
|
|
||||||
|
// HasReadableBattery checks if the system has a battery and returns true if it does.
|
||||||
|
var HasReadableBattery func() bool
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
resetBatteryState("/sys/class/power_supply")
|
||||||
|
}
|
||||||
|
|
||||||
|
// resetBatteryState resets the sync.Once functions to a fresh state.
|
||||||
|
// Tests call this after swapping sysfsPowerSupply so the new path is picked up.
|
||||||
|
func resetBatteryState(sysfsPowerSupplyPath string) {
|
||||||
|
getBatteryPaths = sync.OnceValues(func() ([]string, error) {
|
||||||
|
entries, err := os.ReadDir(sysfsPowerSupplyPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var paths []string
|
||||||
|
for _, e := range entries {
|
||||||
|
path := filepath.Join(sysfsPowerSupplyPath, e.Name())
|
||||||
|
if utils.ReadStringFile(filepath.Join(path, "type")) == "Battery" {
|
||||||
|
paths = append(paths, path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return paths, nil
|
||||||
|
})
|
||||||
|
HasReadableBattery = sync.OnceValue(func() bool {
|
||||||
|
systemHasBattery := false
|
||||||
|
paths, err := getBatteryPaths()
|
||||||
|
for _, path := range paths {
|
||||||
|
if _, ok := utils.ReadStringFileOK(filepath.Join(path, "capacity")); ok {
|
||||||
|
systemHasBattery = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !systemHasBattery {
|
||||||
|
slog.Debug("No battery found", "err", err)
|
||||||
|
}
|
||||||
|
return systemHasBattery
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseSysfsState(status string) uint8 {
|
||||||
|
switch status {
|
||||||
|
case "Empty":
|
||||||
|
return stateEmpty
|
||||||
|
case "Full":
|
||||||
|
return stateFull
|
||||||
|
case "Charging":
|
||||||
|
return stateCharging
|
||||||
|
case "Discharging":
|
||||||
|
return stateDischarging
|
||||||
|
case "Not charging":
|
||||||
|
return stateIdle
|
||||||
|
default:
|
||||||
|
return stateUnknown
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBatteryStats returns the current battery percent and charge state.
|
||||||
|
// Reads /sys/class/power_supply/*/capacity directly so the kernel-reported
|
||||||
|
// value is used, which is always 0-100 and matches what the OS displays.
|
||||||
|
func GetBatteryStats() (batteryPercent uint8, batteryState uint8, err error) {
|
||||||
|
if !HasReadableBattery() {
|
||||||
|
return batteryPercent, batteryState, errors.ErrUnsupported
|
||||||
|
}
|
||||||
|
paths, err := getBatteryPaths()
|
||||||
|
if err != nil {
|
||||||
|
return batteryPercent, batteryState, err
|
||||||
|
}
|
||||||
|
if len(paths) == 0 {
|
||||||
|
return batteryPercent, batteryState, errors.New("no batteries")
|
||||||
|
}
|
||||||
|
|
||||||
|
batteryState = math.MaxUint8
|
||||||
|
totalPercent := 0
|
||||||
|
count := 0
|
||||||
|
|
||||||
|
for _, path := range paths {
|
||||||
|
capStr, ok := utils.ReadStringFileOK(filepath.Join(path, "capacity"))
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
cap, parseErr := strconv.Atoi(capStr)
|
||||||
|
if parseErr != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
totalPercent += cap
|
||||||
|
count++
|
||||||
|
|
||||||
|
state := parseSysfsState(utils.ReadStringFile(filepath.Join(path, "status")))
|
||||||
|
if state != stateUnknown {
|
||||||
|
batteryState = state
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if count == 0 || batteryState == math.MaxUint8 {
|
||||||
|
return batteryPercent, batteryState, errors.New("no battery capacity")
|
||||||
|
}
|
||||||
|
|
||||||
|
batteryPercent = uint8(totalPercent / count)
|
||||||
|
return batteryPercent, batteryState, nil
|
||||||
|
}
|
||||||
201
agent/battery/battery_linux_test.go
Normal file
201
agent/battery/battery_linux_test.go
Normal file
@@ -0,0 +1,201 @@
|
|||||||
|
//go:build testing && linux
|
||||||
|
|
||||||
|
package battery
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
// setupFakeSysfs creates a temporary sysfs-like tree under t.TempDir(),
|
||||||
|
// swaps sysfsPowerSupply, resets the sync.Once caches, and restores
|
||||||
|
// everything on cleanup. Returns a helper to create battery directories.
|
||||||
|
func setupFakeSysfs(t *testing.T) (tmpDir string, addBattery func(name, capacity, status string)) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
tmp := t.TempDir()
|
||||||
|
resetBatteryState(tmp)
|
||||||
|
|
||||||
|
write := func(path, content string) {
|
||||||
|
t.Helper()
|
||||||
|
dir := filepath.Dir(path)
|
||||||
|
if err := os.MkdirAll(dir, 0o755); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := os.WriteFile(path, []byte(content), 0o644); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
addBattery = func(name, capacity, status string) {
|
||||||
|
t.Helper()
|
||||||
|
batDir := filepath.Join(tmp, name)
|
||||||
|
write(filepath.Join(batDir, "type"), "Battery")
|
||||||
|
write(filepath.Join(batDir, "capacity"), capacity)
|
||||||
|
write(filepath.Join(batDir, "status"), status)
|
||||||
|
}
|
||||||
|
|
||||||
|
return tmp, addBattery
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseSysfsState(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
input string
|
||||||
|
want uint8
|
||||||
|
}{
|
||||||
|
{"Empty", stateEmpty},
|
||||||
|
{"Full", stateFull},
|
||||||
|
{"Charging", stateCharging},
|
||||||
|
{"Discharging", stateDischarging},
|
||||||
|
{"Not charging", stateIdle},
|
||||||
|
{"", stateUnknown},
|
||||||
|
{"SomethingElse", stateUnknown},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
assert.Equal(t, tt.want, parseSysfsState(tt.input), "parseSysfsState(%q)", tt.input)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetBatteryStats_SingleBattery(t *testing.T) {
|
||||||
|
_, addBattery := setupFakeSysfs(t)
|
||||||
|
addBattery("BAT0", "72", "Discharging")
|
||||||
|
|
||||||
|
pct, state, err := GetBatteryStats()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, uint8(72), pct)
|
||||||
|
assert.Equal(t, stateDischarging, state)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetBatteryStats_MultipleBatteries(t *testing.T) {
|
||||||
|
_, addBattery := setupFakeSysfs(t)
|
||||||
|
addBattery("BAT0", "80", "Charging")
|
||||||
|
addBattery("BAT1", "40", "Charging")
|
||||||
|
|
||||||
|
pct, state, err := GetBatteryStats()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// average of 80 and 40 = 60
|
||||||
|
assert.EqualValues(t, 60, pct)
|
||||||
|
assert.Equal(t, stateCharging, state)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetBatteryStats_FullBattery(t *testing.T) {
|
||||||
|
_, addBattery := setupFakeSysfs(t)
|
||||||
|
addBattery("BAT0", "100", "Full")
|
||||||
|
|
||||||
|
pct, state, err := GetBatteryStats()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, uint8(100), pct)
|
||||||
|
assert.Equal(t, stateFull, state)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetBatteryStats_EmptyBattery(t *testing.T) {
|
||||||
|
_, addBattery := setupFakeSysfs(t)
|
||||||
|
addBattery("BAT0", "0", "Empty")
|
||||||
|
|
||||||
|
pct, state, err := GetBatteryStats()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, uint8(0), pct)
|
||||||
|
assert.Equal(t, stateEmpty, state)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetBatteryStats_NotCharging(t *testing.T) {
|
||||||
|
_, addBattery := setupFakeSysfs(t)
|
||||||
|
addBattery("BAT0", "80", "Not charging")
|
||||||
|
|
||||||
|
pct, state, err := GetBatteryStats()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, uint8(80), pct)
|
||||||
|
assert.Equal(t, stateIdle, state)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetBatteryStats_NoBatteries(t *testing.T) {
|
||||||
|
setupFakeSysfs(t) // empty directory, no batteries
|
||||||
|
|
||||||
|
_, _, err := GetBatteryStats()
|
||||||
|
assert.Error(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetBatteryStats_NonBatterySupplyIgnored(t *testing.T) {
|
||||||
|
tmp, addBattery := setupFakeSysfs(t)
|
||||||
|
|
||||||
|
// Add a real battery
|
||||||
|
addBattery("BAT0", "55", "Charging")
|
||||||
|
|
||||||
|
// Add an AC adapter (type != Battery) - should be ignored
|
||||||
|
acDir := filepath.Join(tmp, "AC0")
|
||||||
|
if err := os.MkdirAll(acDir, 0o755); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := os.WriteFile(filepath.Join(acDir, "type"), []byte("Mains"), 0o644); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pct, state, err := GetBatteryStats()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, uint8(55), pct)
|
||||||
|
assert.Equal(t, stateCharging, state)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetBatteryStats_InvalidCapacitySkipped(t *testing.T) {
|
||||||
|
tmp, addBattery := setupFakeSysfs(t)
|
||||||
|
|
||||||
|
// One battery with valid capacity
|
||||||
|
addBattery("BAT0", "90", "Discharging")
|
||||||
|
|
||||||
|
// Another with invalid capacity text
|
||||||
|
badDir := filepath.Join(tmp, "BAT1")
|
||||||
|
if err := os.MkdirAll(badDir, 0o755); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := os.WriteFile(filepath.Join(badDir, "type"), []byte("Battery"), 0o644); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := os.WriteFile(filepath.Join(badDir, "capacity"), []byte("not-a-number"), 0o644); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := os.WriteFile(filepath.Join(badDir, "status"), []byte("Discharging"), 0o644); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pct, _, err := GetBatteryStats()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// Only BAT0 counted
|
||||||
|
assert.Equal(t, uint8(90), pct)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetBatteryStats_UnknownStatusOnly(t *testing.T) {
|
||||||
|
_, addBattery := setupFakeSysfs(t)
|
||||||
|
addBattery("BAT0", "50", "SomethingWeird")
|
||||||
|
|
||||||
|
_, _, err := GetBatteryStats()
|
||||||
|
assert.Error(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHasReadableBattery_True(t *testing.T) {
|
||||||
|
_, addBattery := setupFakeSysfs(t)
|
||||||
|
addBattery("BAT0", "50", "Charging")
|
||||||
|
|
||||||
|
assert.True(t, HasReadableBattery())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHasReadableBattery_False(t *testing.T) {
|
||||||
|
setupFakeSysfs(t) // no batteries
|
||||||
|
|
||||||
|
assert.False(t, HasReadableBattery())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHasReadableBattery_NoCapacityFile(t *testing.T) {
|
||||||
|
tmp, _ := setupFakeSysfs(t)
|
||||||
|
|
||||||
|
// Battery dir with type file but no capacity file
|
||||||
|
batDir := filepath.Join(tmp, "BAT0")
|
||||||
|
err := os.MkdirAll(batDir, 0o755)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
err = os.WriteFile(filepath.Join(batDir, "type"), []byte("Battery"), 0o644)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
assert.False(t, HasReadableBattery())
|
||||||
|
}
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build freebsd
|
//go:build !darwin && !linux && !windows
|
||||||
|
|
||||||
package battery
|
package battery
|
||||||
|
|
||||||
298
agent/battery/battery_windows.go
Normal file
298
agent/battery/battery_windows.go
Normal file
@@ -0,0 +1,298 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
|
// Most of the Windows battery code is based on
|
||||||
|
// distatus/battery by Karol 'Kenji Takahashi' Woźniak
|
||||||
|
|
||||||
|
package battery
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"log/slog"
|
||||||
|
"math"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
|
)
|
||||||
|
|
||||||
|
type batteryQueryInformation struct {
|
||||||
|
BatteryTag uint32
|
||||||
|
InformationLevel int32
|
||||||
|
AtRate int32
|
||||||
|
}
|
||||||
|
|
||||||
|
type batteryInformation struct {
|
||||||
|
Capabilities uint32
|
||||||
|
Technology uint8
|
||||||
|
Reserved [3]uint8
|
||||||
|
Chemistry [4]uint8
|
||||||
|
DesignedCapacity uint32
|
||||||
|
FullChargedCapacity uint32
|
||||||
|
DefaultAlert1 uint32
|
||||||
|
DefaultAlert2 uint32
|
||||||
|
CriticalBias uint32
|
||||||
|
CycleCount uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type batteryWaitStatus struct {
|
||||||
|
BatteryTag uint32
|
||||||
|
Timeout uint32
|
||||||
|
PowerState uint32
|
||||||
|
LowCapacity uint32
|
||||||
|
HighCapacity uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type batteryStatus struct {
|
||||||
|
PowerState uint32
|
||||||
|
Capacity uint32
|
||||||
|
Voltage uint32
|
||||||
|
Rate int32
|
||||||
|
}
|
||||||
|
|
||||||
|
type winGUID struct {
|
||||||
|
Data1 uint32
|
||||||
|
Data2 uint16
|
||||||
|
Data3 uint16
|
||||||
|
Data4 [8]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
type spDeviceInterfaceData struct {
|
||||||
|
cbSize uint32
|
||||||
|
InterfaceClassGuid winGUID
|
||||||
|
Flags uint32
|
||||||
|
Reserved uint
|
||||||
|
}
|
||||||
|
|
||||||
|
var guidDeviceBattery = winGUID{
|
||||||
|
0x72631e54,
|
||||||
|
0x78A4,
|
||||||
|
0x11d0,
|
||||||
|
[8]byte{0xbc, 0xf7, 0x00, 0xaa, 0x00, 0xb7, 0xb3, 0x2a},
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
setupapi = &windows.LazyDLL{Name: "setupapi.dll", System: true}
|
||||||
|
setupDiGetClassDevsW = setupapi.NewProc("SetupDiGetClassDevsW")
|
||||||
|
setupDiEnumDeviceInterfaces = setupapi.NewProc("SetupDiEnumDeviceInterfaces")
|
||||||
|
setupDiGetDeviceInterfaceDetailW = setupapi.NewProc("SetupDiGetDeviceInterfaceDetailW")
|
||||||
|
setupDiDestroyDeviceInfoList = setupapi.NewProc("SetupDiDestroyDeviceInfoList")
|
||||||
|
)
|
||||||
|
|
||||||
|
// winBatteryGet reads one battery by index. Returns (fullCapacity, currentCapacity, state, error).
|
||||||
|
// Returns error == errNotFound when there are no more batteries.
|
||||||
|
var errNotFound = errors.New("no more batteries")
|
||||||
|
|
||||||
|
func setupDiSetup(proc *windows.LazyProc, nargs, a1, a2, a3, a4, a5, a6 uintptr) (uintptr, error) {
|
||||||
|
_ = nargs
|
||||||
|
r1, _, errno := syscall.SyscallN(proc.Addr(), a1, a2, a3, a4, a5, a6)
|
||||||
|
if windows.Handle(r1) == windows.InvalidHandle {
|
||||||
|
if errno != 0 {
|
||||||
|
return 0, error(errno)
|
||||||
|
}
|
||||||
|
return 0, syscall.EINVAL
|
||||||
|
}
|
||||||
|
return r1, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func setupDiCall(proc *windows.LazyProc, nargs, a1, a2, a3, a4, a5, a6 uintptr) syscall.Errno {
|
||||||
|
_ = nargs
|
||||||
|
r1, _, errno := syscall.SyscallN(proc.Addr(), a1, a2, a3, a4, a5, a6)
|
||||||
|
if r1 == 0 {
|
||||||
|
if errno != 0 {
|
||||||
|
return errno
|
||||||
|
}
|
||||||
|
return syscall.EINVAL
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func readWinBatteryState(powerState uint32) uint8 {
|
||||||
|
switch {
|
||||||
|
case powerState&0x00000004 != 0:
|
||||||
|
return stateCharging
|
||||||
|
case powerState&0x00000008 != 0:
|
||||||
|
return stateEmpty
|
||||||
|
case powerState&0x00000002 != 0:
|
||||||
|
return stateDischarging
|
||||||
|
case powerState&0x00000001 != 0:
|
||||||
|
return stateFull
|
||||||
|
default:
|
||||||
|
return stateUnknown
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func winBatteryGet(idx int) (full, current uint32, state uint8, err error) {
|
||||||
|
hdev, err := setupDiSetup(
|
||||||
|
setupDiGetClassDevsW,
|
||||||
|
4,
|
||||||
|
uintptr(unsafe.Pointer(&guidDeviceBattery)),
|
||||||
|
0, 0,
|
||||||
|
2|16, // DIGCF_PRESENT|DIGCF_DEVICEINTERFACE
|
||||||
|
0, 0,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, stateUnknown, err
|
||||||
|
}
|
||||||
|
defer syscall.SyscallN(setupDiDestroyDeviceInfoList.Addr(), hdev)
|
||||||
|
|
||||||
|
var did spDeviceInterfaceData
|
||||||
|
did.cbSize = uint32(unsafe.Sizeof(did))
|
||||||
|
errno := setupDiCall(
|
||||||
|
setupDiEnumDeviceInterfaces,
|
||||||
|
5,
|
||||||
|
hdev, 0,
|
||||||
|
uintptr(unsafe.Pointer(&guidDeviceBattery)),
|
||||||
|
uintptr(idx),
|
||||||
|
uintptr(unsafe.Pointer(&did)),
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
if errno == 259 { // ERROR_NO_MORE_ITEMS
|
||||||
|
return 0, 0, stateUnknown, errNotFound
|
||||||
|
}
|
||||||
|
if errno != 0 {
|
||||||
|
return 0, 0, stateUnknown, errno
|
||||||
|
}
|
||||||
|
|
||||||
|
var cbRequired uint32
|
||||||
|
errno = setupDiCall(
|
||||||
|
setupDiGetDeviceInterfaceDetailW,
|
||||||
|
6,
|
||||||
|
hdev,
|
||||||
|
uintptr(unsafe.Pointer(&did)),
|
||||||
|
0, 0,
|
||||||
|
uintptr(unsafe.Pointer(&cbRequired)),
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
if errno != 0 && errno != 122 { // ERROR_INSUFFICIENT_BUFFER
|
||||||
|
return 0, 0, stateUnknown, errno
|
||||||
|
}
|
||||||
|
didd := make([]uint16, cbRequired/2)
|
||||||
|
cbSize := (*uint32)(unsafe.Pointer(&didd[0]))
|
||||||
|
if unsafe.Sizeof(uint(0)) == 8 {
|
||||||
|
*cbSize = 8
|
||||||
|
} else {
|
||||||
|
*cbSize = 6
|
||||||
|
}
|
||||||
|
errno = setupDiCall(
|
||||||
|
setupDiGetDeviceInterfaceDetailW,
|
||||||
|
6,
|
||||||
|
hdev,
|
||||||
|
uintptr(unsafe.Pointer(&did)),
|
||||||
|
uintptr(unsafe.Pointer(&didd[0])),
|
||||||
|
uintptr(cbRequired),
|
||||||
|
uintptr(unsafe.Pointer(&cbRequired)),
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
if errno != 0 {
|
||||||
|
return 0, 0, stateUnknown, errno
|
||||||
|
}
|
||||||
|
devicePath := &didd[2:][0]
|
||||||
|
|
||||||
|
handle, err := windows.CreateFile(
|
||||||
|
devicePath,
|
||||||
|
windows.GENERIC_READ|windows.GENERIC_WRITE,
|
||||||
|
windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE,
|
||||||
|
nil,
|
||||||
|
windows.OPEN_EXISTING,
|
||||||
|
windows.FILE_ATTRIBUTE_NORMAL,
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, stateUnknown, err
|
||||||
|
}
|
||||||
|
defer windows.CloseHandle(handle)
|
||||||
|
|
||||||
|
var dwOut uint32
|
||||||
|
var dwWait uint32
|
||||||
|
var bqi batteryQueryInformation
|
||||||
|
err = windows.DeviceIoControl(
|
||||||
|
handle,
|
||||||
|
2703424, // IOCTL_BATTERY_QUERY_TAG
|
||||||
|
(*byte)(unsafe.Pointer(&dwWait)),
|
||||||
|
uint32(unsafe.Sizeof(dwWait)),
|
||||||
|
(*byte)(unsafe.Pointer(&bqi.BatteryTag)),
|
||||||
|
uint32(unsafe.Sizeof(bqi.BatteryTag)),
|
||||||
|
&dwOut, nil,
|
||||||
|
)
|
||||||
|
if err != nil || bqi.BatteryTag == 0 {
|
||||||
|
return 0, 0, stateUnknown, errors.New("battery tag not returned")
|
||||||
|
}
|
||||||
|
|
||||||
|
var bi batteryInformation
|
||||||
|
if err = windows.DeviceIoControl(
|
||||||
|
handle,
|
||||||
|
2703428, // IOCTL_BATTERY_QUERY_INFORMATION
|
||||||
|
(*byte)(unsafe.Pointer(&bqi)),
|
||||||
|
uint32(unsafe.Sizeof(bqi)),
|
||||||
|
(*byte)(unsafe.Pointer(&bi)),
|
||||||
|
uint32(unsafe.Sizeof(bi)),
|
||||||
|
&dwOut, nil,
|
||||||
|
); err != nil {
|
||||||
|
return 0, 0, stateUnknown, err
|
||||||
|
}
|
||||||
|
|
||||||
|
bws := batteryWaitStatus{BatteryTag: bqi.BatteryTag}
|
||||||
|
var bs batteryStatus
|
||||||
|
if err = windows.DeviceIoControl(
|
||||||
|
handle,
|
||||||
|
2703436, // IOCTL_BATTERY_QUERY_STATUS
|
||||||
|
(*byte)(unsafe.Pointer(&bws)),
|
||||||
|
uint32(unsafe.Sizeof(bws)),
|
||||||
|
(*byte)(unsafe.Pointer(&bs)),
|
||||||
|
uint32(unsafe.Sizeof(bs)),
|
||||||
|
&dwOut, nil,
|
||||||
|
); err != nil {
|
||||||
|
return 0, 0, stateUnknown, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if bs.Capacity == 0xffffffff { // BATTERY_UNKNOWN_CAPACITY
|
||||||
|
return 0, 0, stateUnknown, errors.New("battery capacity unknown")
|
||||||
|
}
|
||||||
|
|
||||||
|
return bi.FullChargedCapacity, bs.Capacity, readWinBatteryState(bs.PowerState), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasReadableBattery checks if the system has a battery and returns true if it does.
|
||||||
|
var HasReadableBattery = sync.OnceValue(func() bool {
|
||||||
|
systemHasBattery := false
|
||||||
|
full, _, _, err := winBatteryGet(0)
|
||||||
|
if err == nil && full > 0 {
|
||||||
|
systemHasBattery = true
|
||||||
|
}
|
||||||
|
if !systemHasBattery {
|
||||||
|
slog.Debug("No battery found", "err", err)
|
||||||
|
}
|
||||||
|
return systemHasBattery
|
||||||
|
})
|
||||||
|
|
||||||
|
// GetBatteryStats returns the current battery percent and charge state.
|
||||||
|
func GetBatteryStats() (batteryPercent uint8, batteryState uint8, err error) {
|
||||||
|
if !HasReadableBattery() {
|
||||||
|
return batteryPercent, batteryState, errors.ErrUnsupported
|
||||||
|
}
|
||||||
|
|
||||||
|
totalFull := uint32(0)
|
||||||
|
totalCurrent := uint32(0)
|
||||||
|
batteryState = math.MaxUint8
|
||||||
|
|
||||||
|
for i := 0; ; i++ {
|
||||||
|
full, current, state, bErr := winBatteryGet(i)
|
||||||
|
if errors.Is(bErr, errNotFound) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if bErr != nil || full == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
totalFull += full
|
||||||
|
totalCurrent += min(current, full)
|
||||||
|
batteryState = state
|
||||||
|
}
|
||||||
|
|
||||||
|
if totalFull == 0 || batteryState == math.MaxUint8 {
|
||||||
|
return batteryPercent, batteryState, errors.New("no battery capacity")
|
||||||
|
}
|
||||||
|
|
||||||
|
batteryPercent = uint8(float64(totalCurrent) / float64(totalFull) * 100)
|
||||||
|
return batteryPercent, batteryState, nil
|
||||||
|
}
|
||||||
@@ -14,11 +14,13 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/henrygd/beszel"
|
"github.com/henrygd/beszel"
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
"github.com/henrygd/beszel/internal/common"
|
"github.com/henrygd/beszel/internal/common"
|
||||||
|
|
||||||
"github.com/fxamacker/cbor/v2"
|
"github.com/fxamacker/cbor/v2"
|
||||||
"github.com/lxzan/gws"
|
"github.com/lxzan/gws"
|
||||||
"golang.org/x/crypto/ssh"
|
"golang.org/x/crypto/ssh"
|
||||||
|
"golang.org/x/net/proxy"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -43,7 +45,7 @@ type WebSocketClient struct {
|
|||||||
// newWebSocketClient creates a new WebSocket client for the given agent.
|
// newWebSocketClient creates a new WebSocket client for the given agent.
|
||||||
// It reads configuration from environment variables and validates the hub URL.
|
// It reads configuration from environment variables and validates the hub URL.
|
||||||
func newWebSocketClient(agent *Agent) (client *WebSocketClient, err error) {
|
func newWebSocketClient(agent *Agent) (client *WebSocketClient, err error) {
|
||||||
hubURLStr, exists := GetEnv("HUB_URL")
|
hubURLStr, exists := utils.GetEnv("HUB_URL")
|
||||||
if !exists {
|
if !exists {
|
||||||
return nil, errors.New("HUB_URL environment variable not set")
|
return nil, errors.New("HUB_URL environment variable not set")
|
||||||
}
|
}
|
||||||
@@ -72,12 +74,12 @@ func newWebSocketClient(agent *Agent) (client *WebSocketClient, err error) {
|
|||||||
// If neither is set, it returns an error.
|
// If neither is set, it returns an error.
|
||||||
func getToken() (string, error) {
|
func getToken() (string, error) {
|
||||||
// get token from env var
|
// get token from env var
|
||||||
token, _ := GetEnv("TOKEN")
|
token, _ := utils.GetEnv("TOKEN")
|
||||||
if token != "" {
|
if token != "" {
|
||||||
return token, nil
|
return token, nil
|
||||||
}
|
}
|
||||||
// get token from file
|
// get token from file
|
||||||
tokenFile, _ := GetEnv("TOKEN_FILE")
|
tokenFile, _ := utils.GetEnv("TOKEN_FILE")
|
||||||
if tokenFile == "" {
|
if tokenFile == "" {
|
||||||
return "", errors.New("must set TOKEN or TOKEN_FILE")
|
return "", errors.New("must set TOKEN or TOKEN_FILE")
|
||||||
}
|
}
|
||||||
@@ -103,6 +105,11 @@ func (client *WebSocketClient) getOptions() *gws.ClientOption {
|
|||||||
}
|
}
|
||||||
client.hubURL.Path = path.Join(client.hubURL.Path, "api/beszel/agent-connect")
|
client.hubURL.Path = path.Join(client.hubURL.Path, "api/beszel/agent-connect")
|
||||||
|
|
||||||
|
// make sure BESZEL_AGENT_ALL_PROXY works (GWS only checks ALL_PROXY)
|
||||||
|
if val := os.Getenv("BESZEL_AGENT_ALL_PROXY"); val != "" {
|
||||||
|
os.Setenv("ALL_PROXY", val)
|
||||||
|
}
|
||||||
|
|
||||||
client.options = &gws.ClientOption{
|
client.options = &gws.ClientOption{
|
||||||
Addr: client.hubURL.String(),
|
Addr: client.hubURL.String(),
|
||||||
TlsConfig: &tls.Config{InsecureSkipVerify: true},
|
TlsConfig: &tls.Config{InsecureSkipVerify: true},
|
||||||
@@ -111,6 +118,9 @@ func (client *WebSocketClient) getOptions() *gws.ClientOption {
|
|||||||
"X-Token": []string{client.token},
|
"X-Token": []string{client.token},
|
||||||
"X-Beszel": []string{beszel.Version},
|
"X-Beszel": []string{beszel.Version},
|
||||||
},
|
},
|
||||||
|
NewDialer: func() (gws.Dialer, error) {
|
||||||
|
return proxy.FromEnvironment(), nil
|
||||||
|
},
|
||||||
}
|
}
|
||||||
return client.options
|
return client.options
|
||||||
}
|
}
|
||||||
@@ -197,7 +207,7 @@ func (client *WebSocketClient) handleAuthChallenge(msg *common.HubRequest[cbor.R
|
|||||||
}
|
}
|
||||||
|
|
||||||
if authRequest.NeedSysInfo {
|
if authRequest.NeedSysInfo {
|
||||||
response.Name, _ = GetEnv("SYSTEM_NAME")
|
response.Name, _ = utils.GetEnv("SYSTEM_NAME")
|
||||||
response.Hostname = client.agent.systemDetails.Hostname
|
response.Hostname = client.agent.systemDetails.Hostname
|
||||||
serverAddr := client.agent.connectionManager.serverOptions.Addr
|
serverAddr := client.agent.connectionManager.serverOptions.Addr
|
||||||
_, response.Port, _ = net.SplitHostPort(serverAddr)
|
_, response.Port, _ = net.SplitHostPort(serverAddr)
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build testing
|
//go:build testing
|
||||||
// +build testing
|
|
||||||
|
|
||||||
package agent
|
package agent
|
||||||
|
|
||||||
@@ -71,19 +70,11 @@ func TestNewWebSocketClient(t *testing.T) {
|
|||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
// Set up environment
|
// Set up environment
|
||||||
if tc.hubURL != "" {
|
if tc.hubURL != "" {
|
||||||
os.Setenv("BESZEL_AGENT_HUB_URL", tc.hubURL)
|
t.Setenv("BESZEL_AGENT_HUB_URL", tc.hubURL)
|
||||||
} else {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
|
||||||
}
|
}
|
||||||
if tc.token != "" {
|
if tc.token != "" {
|
||||||
os.Setenv("BESZEL_AGENT_TOKEN", tc.token)
|
t.Setenv("BESZEL_AGENT_TOKEN", tc.token)
|
||||||
} else {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
}
|
}
|
||||||
defer func() {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
}()
|
|
||||||
|
|
||||||
client, err := newWebSocketClient(agent)
|
client, err := newWebSocketClient(agent)
|
||||||
|
|
||||||
@@ -139,12 +130,8 @@ func TestWebSocketClient_GetOptions(t *testing.T) {
|
|||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
// Set up environment
|
// Set up environment
|
||||||
os.Setenv("BESZEL_AGENT_HUB_URL", tc.inputURL)
|
t.Setenv("BESZEL_AGENT_HUB_URL", tc.inputURL)
|
||||||
os.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
t.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
||||||
defer func() {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
}()
|
|
||||||
|
|
||||||
client, err := newWebSocketClient(agent)
|
client, err := newWebSocketClient(agent)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -186,12 +173,8 @@ func TestWebSocketClient_VerifySignature(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Set up environment
|
// Set up environment
|
||||||
os.Setenv("BESZEL_AGENT_HUB_URL", "http://localhost:8080")
|
t.Setenv("BESZEL_AGENT_HUB_URL", "http://localhost:8080")
|
||||||
os.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
t.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
||||||
defer func() {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
}()
|
|
||||||
|
|
||||||
client, err := newWebSocketClient(agent)
|
client, err := newWebSocketClient(agent)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -259,12 +242,8 @@ func TestWebSocketClient_HandleHubRequest(t *testing.T) {
|
|||||||
agent := createTestAgent(t)
|
agent := createTestAgent(t)
|
||||||
|
|
||||||
// Set up environment
|
// Set up environment
|
||||||
os.Setenv("BESZEL_AGENT_HUB_URL", "http://localhost:8080")
|
t.Setenv("BESZEL_AGENT_HUB_URL", "http://localhost:8080")
|
||||||
os.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
t.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
||||||
defer func() {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
}()
|
|
||||||
|
|
||||||
client, err := newWebSocketClient(agent)
|
client, err := newWebSocketClient(agent)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -351,13 +330,8 @@ func TestGetUserAgent(t *testing.T) {
|
|||||||
func TestWebSocketClient_Close(t *testing.T) {
|
func TestWebSocketClient_Close(t *testing.T) {
|
||||||
agent := createTestAgent(t)
|
agent := createTestAgent(t)
|
||||||
|
|
||||||
// Set up environment
|
t.Setenv("BESZEL_AGENT_HUB_URL", "http://localhost:8080")
|
||||||
os.Setenv("BESZEL_AGENT_HUB_URL", "http://localhost:8080")
|
t.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
||||||
os.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
|
||||||
defer func() {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
}()
|
|
||||||
|
|
||||||
client, err := newWebSocketClient(agent)
|
client, err := newWebSocketClient(agent)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -372,13 +346,8 @@ func TestWebSocketClient_Close(t *testing.T) {
|
|||||||
func TestWebSocketClient_ConnectRateLimit(t *testing.T) {
|
func TestWebSocketClient_ConnectRateLimit(t *testing.T) {
|
||||||
agent := createTestAgent(t)
|
agent := createTestAgent(t)
|
||||||
|
|
||||||
// Set up environment
|
t.Setenv("BESZEL_AGENT_HUB_URL", "http://localhost:8080")
|
||||||
os.Setenv("BESZEL_AGENT_HUB_URL", "http://localhost:8080")
|
t.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
||||||
os.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
|
||||||
defer func() {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
}()
|
|
||||||
|
|
||||||
client, err := newWebSocketClient(agent)
|
client, err := newWebSocketClient(agent)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -394,20 +363,10 @@ func TestWebSocketClient_ConnectRateLimit(t *testing.T) {
|
|||||||
|
|
||||||
// TestGetToken tests the getToken function with various scenarios
|
// TestGetToken tests the getToken function with various scenarios
|
||||||
func TestGetToken(t *testing.T) {
|
func TestGetToken(t *testing.T) {
|
||||||
unsetEnvVars := func() {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
os.Unsetenv("TOKEN")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN_FILE")
|
|
||||||
os.Unsetenv("TOKEN_FILE")
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Run("token from TOKEN environment variable", func(t *testing.T) {
|
t.Run("token from TOKEN environment variable", func(t *testing.T) {
|
||||||
unsetEnvVars()
|
|
||||||
|
|
||||||
// Set TOKEN env var
|
// Set TOKEN env var
|
||||||
expectedToken := "test-token-from-env"
|
expectedToken := "test-token-from-env"
|
||||||
os.Setenv("TOKEN", expectedToken)
|
t.Setenv("TOKEN", expectedToken)
|
||||||
defer os.Unsetenv("TOKEN")
|
|
||||||
|
|
||||||
token, err := getToken()
|
token, err := getToken()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@@ -415,12 +374,9 @@ func TestGetToken(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("token from BESZEL_AGENT_TOKEN environment variable", func(t *testing.T) {
|
t.Run("token from BESZEL_AGENT_TOKEN environment variable", func(t *testing.T) {
|
||||||
unsetEnvVars()
|
|
||||||
|
|
||||||
// Set BESZEL_AGENT_TOKEN env var (should take precedence)
|
// Set BESZEL_AGENT_TOKEN env var (should take precedence)
|
||||||
expectedToken := "test-token-from-beszel-env"
|
expectedToken := "test-token-from-beszel-env"
|
||||||
os.Setenv("BESZEL_AGENT_TOKEN", expectedToken)
|
t.Setenv("BESZEL_AGENT_TOKEN", expectedToken)
|
||||||
defer os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
|
|
||||||
token, err := getToken()
|
token, err := getToken()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@@ -428,8 +384,6 @@ func TestGetToken(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("token from TOKEN_FILE", func(t *testing.T) {
|
t.Run("token from TOKEN_FILE", func(t *testing.T) {
|
||||||
unsetEnvVars()
|
|
||||||
|
|
||||||
// Create a temporary token file
|
// Create a temporary token file
|
||||||
expectedToken := "test-token-from-file"
|
expectedToken := "test-token-from-file"
|
||||||
tokenFile, err := os.CreateTemp("", "token-test-*.txt")
|
tokenFile, err := os.CreateTemp("", "token-test-*.txt")
|
||||||
@@ -441,8 +395,7 @@ func TestGetToken(t *testing.T) {
|
|||||||
tokenFile.Close()
|
tokenFile.Close()
|
||||||
|
|
||||||
// Set TOKEN_FILE env var
|
// Set TOKEN_FILE env var
|
||||||
os.Setenv("TOKEN_FILE", tokenFile.Name())
|
t.Setenv("TOKEN_FILE", tokenFile.Name())
|
||||||
defer os.Unsetenv("TOKEN_FILE")
|
|
||||||
|
|
||||||
token, err := getToken()
|
token, err := getToken()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@@ -450,8 +403,6 @@ func TestGetToken(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("token from BESZEL_AGENT_TOKEN_FILE", func(t *testing.T) {
|
t.Run("token from BESZEL_AGENT_TOKEN_FILE", func(t *testing.T) {
|
||||||
unsetEnvVars()
|
|
||||||
|
|
||||||
// Create a temporary token file
|
// Create a temporary token file
|
||||||
expectedToken := "test-token-from-beszel-file"
|
expectedToken := "test-token-from-beszel-file"
|
||||||
tokenFile, err := os.CreateTemp("", "token-test-*.txt")
|
tokenFile, err := os.CreateTemp("", "token-test-*.txt")
|
||||||
@@ -463,8 +414,7 @@ func TestGetToken(t *testing.T) {
|
|||||||
tokenFile.Close()
|
tokenFile.Close()
|
||||||
|
|
||||||
// Set BESZEL_AGENT_TOKEN_FILE env var (should take precedence)
|
// Set BESZEL_AGENT_TOKEN_FILE env var (should take precedence)
|
||||||
os.Setenv("BESZEL_AGENT_TOKEN_FILE", tokenFile.Name())
|
t.Setenv("BESZEL_AGENT_TOKEN_FILE", tokenFile.Name())
|
||||||
defer os.Unsetenv("BESZEL_AGENT_TOKEN_FILE")
|
|
||||||
|
|
||||||
token, err := getToken()
|
token, err := getToken()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@@ -472,8 +422,6 @@ func TestGetToken(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("TOKEN takes precedence over TOKEN_FILE", func(t *testing.T) {
|
t.Run("TOKEN takes precedence over TOKEN_FILE", func(t *testing.T) {
|
||||||
unsetEnvVars()
|
|
||||||
|
|
||||||
// Create a temporary token file
|
// Create a temporary token file
|
||||||
fileToken := "token-from-file"
|
fileToken := "token-from-file"
|
||||||
tokenFile, err := os.CreateTemp("", "token-test-*.txt")
|
tokenFile, err := os.CreateTemp("", "token-test-*.txt")
|
||||||
@@ -486,12 +434,8 @@ func TestGetToken(t *testing.T) {
|
|||||||
|
|
||||||
// Set both TOKEN and TOKEN_FILE
|
// Set both TOKEN and TOKEN_FILE
|
||||||
envToken := "token-from-env"
|
envToken := "token-from-env"
|
||||||
os.Setenv("TOKEN", envToken)
|
t.Setenv("TOKEN", envToken)
|
||||||
os.Setenv("TOKEN_FILE", tokenFile.Name())
|
t.Setenv("TOKEN_FILE", tokenFile.Name())
|
||||||
defer func() {
|
|
||||||
os.Unsetenv("TOKEN")
|
|
||||||
os.Unsetenv("TOKEN_FILE")
|
|
||||||
}()
|
|
||||||
|
|
||||||
token, err := getToken()
|
token, err := getToken()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@@ -499,7 +443,10 @@ func TestGetToken(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("error when neither TOKEN nor TOKEN_FILE is set", func(t *testing.T) {
|
t.Run("error when neither TOKEN nor TOKEN_FILE is set", func(t *testing.T) {
|
||||||
unsetEnvVars()
|
t.Setenv("BESZEL_AGENT_TOKEN", "")
|
||||||
|
t.Setenv("TOKEN", "")
|
||||||
|
t.Setenv("BESZEL_AGENT_TOKEN_FILE", "")
|
||||||
|
t.Setenv("TOKEN_FILE", "")
|
||||||
|
|
||||||
token, err := getToken()
|
token, err := getToken()
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
@@ -508,11 +455,8 @@ func TestGetToken(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("error when TOKEN_FILE points to non-existent file", func(t *testing.T) {
|
t.Run("error when TOKEN_FILE points to non-existent file", func(t *testing.T) {
|
||||||
unsetEnvVars()
|
|
||||||
|
|
||||||
// Set TOKEN_FILE to a non-existent file
|
// Set TOKEN_FILE to a non-existent file
|
||||||
os.Setenv("TOKEN_FILE", "/non/existent/file.txt")
|
t.Setenv("TOKEN_FILE", "/non/existent/file.txt")
|
||||||
defer os.Unsetenv("TOKEN_FILE")
|
|
||||||
|
|
||||||
token, err := getToken()
|
token, err := getToken()
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
@@ -521,8 +465,6 @@ func TestGetToken(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("handles empty token file", func(t *testing.T) {
|
t.Run("handles empty token file", func(t *testing.T) {
|
||||||
unsetEnvVars()
|
|
||||||
|
|
||||||
// Create an empty token file
|
// Create an empty token file
|
||||||
tokenFile, err := os.CreateTemp("", "token-test-*.txt")
|
tokenFile, err := os.CreateTemp("", "token-test-*.txt")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -530,8 +472,7 @@ func TestGetToken(t *testing.T) {
|
|||||||
tokenFile.Close()
|
tokenFile.Close()
|
||||||
|
|
||||||
// Set TOKEN_FILE env var
|
// Set TOKEN_FILE env var
|
||||||
os.Setenv("TOKEN_FILE", tokenFile.Name())
|
t.Setenv("TOKEN_FILE", tokenFile.Name())
|
||||||
defer os.Unsetenv("TOKEN_FILE")
|
|
||||||
|
|
||||||
token, err := getToken()
|
token, err := getToken()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@@ -539,8 +480,6 @@ func TestGetToken(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("strips whitespace from TOKEN_FILE", func(t *testing.T) {
|
t.Run("strips whitespace from TOKEN_FILE", func(t *testing.T) {
|
||||||
unsetEnvVars()
|
|
||||||
|
|
||||||
tokenWithWhitespace := " test-token-with-whitespace \n\t"
|
tokenWithWhitespace := " test-token-with-whitespace \n\t"
|
||||||
expectedToken := "test-token-with-whitespace"
|
expectedToken := "test-token-with-whitespace"
|
||||||
tokenFile, err := os.CreateTemp("", "token-test-*.txt")
|
tokenFile, err := os.CreateTemp("", "token-test-*.txt")
|
||||||
@@ -551,8 +490,7 @@ func TestGetToken(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
tokenFile.Close()
|
tokenFile.Close()
|
||||||
|
|
||||||
os.Setenv("TOKEN_FILE", tokenFile.Name())
|
t.Setenv("TOKEN_FILE", tokenFile.Name())
|
||||||
defer os.Unsetenv("TOKEN_FILE")
|
|
||||||
|
|
||||||
token, err := getToken()
|
token, err := getToken()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|||||||
@@ -1,14 +1,18 @@
|
|||||||
package agent
|
package agent
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/henrygd/beszel/agent/health"
|
"github.com/henrygd/beszel/agent/health"
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
"github.com/henrygd/beszel/internal/entities/system"
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -91,8 +95,8 @@ func (c *ConnectionManager) Start(serverOptions ServerOptions) error {
|
|||||||
c.eventChan = make(chan ConnectionEvent, 1)
|
c.eventChan = make(chan ConnectionEvent, 1)
|
||||||
|
|
||||||
// signal handling for shutdown
|
// signal handling for shutdown
|
||||||
sigChan := make(chan os.Signal, 1)
|
sigCtx, stopSignals := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
||||||
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
|
defer stopSignals()
|
||||||
|
|
||||||
c.startWsTicker()
|
c.startWsTicker()
|
||||||
c.connect()
|
c.connect()
|
||||||
@@ -109,13 +113,36 @@ func (c *ConnectionManager) Start(serverOptions ServerOptions) error {
|
|||||||
_ = c.startWebSocketConnection()
|
_ = c.startWebSocketConnection()
|
||||||
case <-healthTicker:
|
case <-healthTicker:
|
||||||
_ = health.Update()
|
_ = health.Update()
|
||||||
case <-sigChan:
|
case <-sigCtx.Done():
|
||||||
slog.Info("Shutting down")
|
slog.Info("Shutting down", "cause", context.Cause(sigCtx))
|
||||||
|
return c.stop()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// stop does not stop the connection manager itself, just any active connections. The manager will attempt to reconnect after stopping, so this should only be called immediately before shutting down the entire agent.
|
||||||
|
//
|
||||||
|
// If we need or want to expose a graceful Stop method in the future, do something like this to actually stop the manager:
|
||||||
|
//
|
||||||
|
// func (c *ConnectionManager) Start(serverOptions ServerOptions) error {
|
||||||
|
// ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
// c.cancel = cancel
|
||||||
|
//
|
||||||
|
// for {
|
||||||
|
// select {
|
||||||
|
// case <-ctx.Done():
|
||||||
|
// return c.stop()
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// func (c *ConnectionManager) Stop() {
|
||||||
|
// c.cancel()
|
||||||
|
// }
|
||||||
|
func (c *ConnectionManager) stop() error {
|
||||||
_ = c.agent.StopServer()
|
_ = c.agent.StopServer()
|
||||||
c.closeWebSocket()
|
c.closeWebSocket()
|
||||||
return health.CleanUp()
|
return health.CleanUp()
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// handleEvent processes connection events and updates the connection state accordingly.
|
// handleEvent processes connection events and updates the connection state accordingly.
|
||||||
@@ -185,10 +212,17 @@ func (c *ConnectionManager) connect() {
|
|||||||
|
|
||||||
// Try WebSocket first, if it fails, start SSH server
|
// Try WebSocket first, if it fails, start SSH server
|
||||||
err := c.startWebSocketConnection()
|
err := c.startWebSocketConnection()
|
||||||
if err != nil && c.State == Disconnected {
|
if err != nil {
|
||||||
|
if shouldExitOnErr(err) {
|
||||||
|
time.Sleep(2 * time.Second) // prevent tight restart loop
|
||||||
|
_ = c.stop()
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
if c.State == Disconnected {
|
||||||
c.startSSHServer()
|
c.startSSHServer()
|
||||||
c.startWsTicker()
|
c.startWsTicker()
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// startWebSocketConnection attempts to establish a WebSocket connection to the hub.
|
// startWebSocketConnection attempts to establish a WebSocket connection to the hub.
|
||||||
@@ -224,3 +258,14 @@ func (c *ConnectionManager) closeWebSocket() {
|
|||||||
c.wsClient.Close()
|
c.wsClient.Close()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// shouldExitOnErr checks if the error is a DNS resolution failure and if the
|
||||||
|
// EXIT_ON_DNS_ERROR env var is set. https://github.com/henrygd/beszel/issues/1924.
|
||||||
|
func shouldExitOnErr(err error) bool {
|
||||||
|
if val, _ := utils.GetEnv("EXIT_ON_DNS_ERROR"); val == "true" {
|
||||||
|
if opErr, ok := errors.AsType[*net.OpError](err); ok {
|
||||||
|
return strings.Contains(opErr.Err.Error(), "lookup")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,14 +1,13 @@
|
|||||||
//go:build testing
|
//go:build testing
|
||||||
// +build testing
|
|
||||||
|
|
||||||
package agent
|
package agent
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/ed25519"
|
"crypto/ed25519"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -184,10 +183,6 @@ func TestConnectionManager_TickerManagement(t *testing.T) {
|
|||||||
|
|
||||||
// TestConnectionManager_WebSocketConnectionFlow tests WebSocket connection logic
|
// TestConnectionManager_WebSocketConnectionFlow tests WebSocket connection logic
|
||||||
func TestConnectionManager_WebSocketConnectionFlow(t *testing.T) {
|
func TestConnectionManager_WebSocketConnectionFlow(t *testing.T) {
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("Skipping WebSocket connection test in short mode")
|
|
||||||
}
|
|
||||||
|
|
||||||
agent := createTestAgent(t)
|
agent := createTestAgent(t)
|
||||||
cm := agent.connectionManager
|
cm := agent.connectionManager
|
||||||
|
|
||||||
@@ -197,19 +192,18 @@ func TestConnectionManager_WebSocketConnectionFlow(t *testing.T) {
|
|||||||
assert.Equal(t, Disconnected, cm.State, "State should remain Disconnected after failed connection")
|
assert.Equal(t, Disconnected, cm.State, "State should remain Disconnected after failed connection")
|
||||||
|
|
||||||
// Test with invalid URL
|
// Test with invalid URL
|
||||||
os.Setenv("BESZEL_AGENT_HUB_URL", "invalid-url")
|
t.Setenv("BESZEL_AGENT_HUB_URL", "1,33%")
|
||||||
os.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
t.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
||||||
defer func() {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Test with missing token
|
|
||||||
os.Setenv("BESZEL_AGENT_HUB_URL", "http://localhost:8080")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
|
|
||||||
_, err2 := newWebSocketClient(agent)
|
_, err2 := newWebSocketClient(agent)
|
||||||
assert.Error(t, err2, "WebSocket client creation should fail without token")
|
assert.Error(t, err2, "WebSocket client creation should fail with invalid URL")
|
||||||
|
|
||||||
|
// Test with missing token
|
||||||
|
t.Setenv("BESZEL_AGENT_HUB_URL", "http://localhost:8080")
|
||||||
|
t.Setenv("BESZEL_AGENT_TOKEN", "")
|
||||||
|
|
||||||
|
_, err3 := newWebSocketClient(agent)
|
||||||
|
assert.Error(t, err3, "WebSocket client creation should fail without token")
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestConnectionManager_ReconnectionLogic tests reconnection prevention logic
|
// TestConnectionManager_ReconnectionLogic tests reconnection prevention logic
|
||||||
@@ -235,12 +229,8 @@ func TestConnectionManager_ConnectWithRateLimit(t *testing.T) {
|
|||||||
cm := agent.connectionManager
|
cm := agent.connectionManager
|
||||||
|
|
||||||
// Set up environment for WebSocket client creation
|
// Set up environment for WebSocket client creation
|
||||||
os.Setenv("BESZEL_AGENT_HUB_URL", "ws://localhost:8080")
|
t.Setenv("BESZEL_AGENT_HUB_URL", "ws://localhost:8080")
|
||||||
os.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
t.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
||||||
defer func() {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Create WebSocket client
|
// Create WebSocket client
|
||||||
wsClient, err := newWebSocketClient(agent)
|
wsClient, err := newWebSocketClient(agent)
|
||||||
@@ -286,12 +276,8 @@ func TestConnectionManager_CloseWebSocket(t *testing.T) {
|
|||||||
}, "Should not panic when closing nil WebSocket client")
|
}, "Should not panic when closing nil WebSocket client")
|
||||||
|
|
||||||
// Set up environment and create WebSocket client
|
// Set up environment and create WebSocket client
|
||||||
os.Setenv("BESZEL_AGENT_HUB_URL", "ws://localhost:8080")
|
t.Setenv("BESZEL_AGENT_HUB_URL", "ws://localhost:8080")
|
||||||
os.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
t.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
||||||
defer func() {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
}()
|
|
||||||
|
|
||||||
wsClient, err := newWebSocketClient(agent)
|
wsClient, err := newWebSocketClient(agent)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -313,3 +299,65 @@ func TestConnectionManager_ConnectFlow(t *testing.T) {
|
|||||||
cm.connect()
|
cm.connect()
|
||||||
}, "Connect should not panic without WebSocket client")
|
}, "Connect should not panic without WebSocket client")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestShouldExitOnErr(t *testing.T) {
|
||||||
|
createDialErr := func(msg string) error {
|
||||||
|
return &net.OpError{
|
||||||
|
Op: "dial",
|
||||||
|
Net: "tcp",
|
||||||
|
Err: errors.New(msg),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
err error
|
||||||
|
envValue string
|
||||||
|
expected bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "no env var",
|
||||||
|
err: createDialErr("lookup lkahsdfasdf: no such host"),
|
||||||
|
envValue: "",
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "env var false",
|
||||||
|
err: createDialErr("lookup lkahsdfasdf: no such host"),
|
||||||
|
envValue: "false",
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "env var true, matching error",
|
||||||
|
err: createDialErr("lookup lkahsdfasdf: no such host"),
|
||||||
|
envValue: "true",
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "env var true, matching error with extra context",
|
||||||
|
err: createDialErr("lookup beszel.server.lan on [::1]:53: read udp [::1]:44557->[::1]:53: read: connection refused"),
|
||||||
|
envValue: "true",
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "env var true, non-matching error",
|
||||||
|
err: errors.New("connection refused"),
|
||||||
|
envValue: "true",
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "env var true, dial but not lookup",
|
||||||
|
err: createDialErr("connection timeout"),
|
||||||
|
envValue: "true",
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
t.Setenv("EXIT_ON_DNS_ERROR", tt.envValue)
|
||||||
|
result := shouldExitOnErr(tt.err)
|
||||||
|
assert.Equal(t, tt.expected, result)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -14,10 +14,10 @@ var lastPerCoreCpuTimes = make(map[uint16][]cpu.TimesStat)
|
|||||||
// init initializes the CPU monitoring by storing the initial CPU times
|
// init initializes the CPU monitoring by storing the initial CPU times
|
||||||
// for the default 60-second cache interval.
|
// for the default 60-second cache interval.
|
||||||
func init() {
|
func init() {
|
||||||
if times, err := cpu.Times(false); err == nil {
|
if times, err := cpu.Times(false); err == nil && len(times) > 0 {
|
||||||
lastCpuTimes[60000] = times[0]
|
lastCpuTimes[60000] = times[0]
|
||||||
}
|
}
|
||||||
if perCoreTimes, err := cpu.Times(true); err == nil {
|
if perCoreTimes, err := cpu.Times(true); err == nil && len(perCoreTimes) > 0 {
|
||||||
lastPerCoreCpuTimes[60000] = perCoreTimes
|
lastPerCoreCpuTimes[60000] = perCoreTimes
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -89,10 +89,7 @@ func getPerCoreCpuUsage(cacheTimeMs uint16) (system.Uint8Slice, error) {
|
|||||||
lastTimes := lastPerCoreCpuTimes[cacheTimeMs]
|
lastTimes := lastPerCoreCpuTimes[cacheTimeMs]
|
||||||
|
|
||||||
// Limit to the number of cores available in both samples
|
// Limit to the number of cores available in both samples
|
||||||
length := len(perCoreTimes)
|
length := min(len(lastTimes), len(perCoreTimes))
|
||||||
if len(lastTimes) < length {
|
|
||||||
length = len(lastTimes)
|
|
||||||
}
|
|
||||||
|
|
||||||
usage := make([]uint8, length)
|
usage := make([]uint8, length)
|
||||||
for i := 0; i < length; i++ {
|
for i := 0; i < length; i++ {
|
||||||
|
|||||||
@@ -6,6 +6,8 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// GetDataDir returns the path to the data directory for the agent and an error
|
// GetDataDir returns the path to the data directory for the agent and an error
|
||||||
@@ -16,7 +18,7 @@ func GetDataDir(dataDirs ...string) (string, error) {
|
|||||||
return testDataDirs(dataDirs)
|
return testDataDirs(dataDirs)
|
||||||
}
|
}
|
||||||
|
|
||||||
dataDir, _ := GetEnv("DATA_DIR")
|
dataDir, _ := utils.GetEnv("DATA_DIR")
|
||||||
if dataDir != "" {
|
if dataDir != "" {
|
||||||
dataDirs = append(dataDirs, dataDir)
|
dataDirs = append(dataDirs, dataDir)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build testing
|
//go:build testing
|
||||||
// +build testing
|
|
||||||
|
|
||||||
package agent
|
package agent
|
||||||
|
|
||||||
@@ -40,17 +39,7 @@ func TestGetDataDir(t *testing.T) {
|
|||||||
t.Run("DATA_DIR environment variable", func(t *testing.T) {
|
t.Run("DATA_DIR environment variable", func(t *testing.T) {
|
||||||
tempDir := t.TempDir()
|
tempDir := t.TempDir()
|
||||||
|
|
||||||
// Set environment variable
|
t.Setenv("BESZEL_AGENT_DATA_DIR", tempDir)
|
||||||
oldValue := os.Getenv("DATA_DIR")
|
|
||||||
defer func() {
|
|
||||||
if oldValue == "" {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_DATA_DIR")
|
|
||||||
} else {
|
|
||||||
os.Setenv("BESZEL_AGENT_DATA_DIR", oldValue)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
os.Setenv("BESZEL_AGENT_DATA_DIR", tempDir)
|
|
||||||
|
|
||||||
result, err := GetDataDir()
|
result, err := GetDataDir()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -66,17 +55,6 @@ func TestGetDataDir(t *testing.T) {
|
|||||||
|
|
||||||
// Test fallback behavior (empty dataDir, no env var)
|
// Test fallback behavior (empty dataDir, no env var)
|
||||||
t.Run("fallback to default directories", func(t *testing.T) {
|
t.Run("fallback to default directories", func(t *testing.T) {
|
||||||
// Clear DATA_DIR environment variable
|
|
||||||
oldValue := os.Getenv("DATA_DIR")
|
|
||||||
defer func() {
|
|
||||||
if oldValue == "" {
|
|
||||||
os.Unsetenv("DATA_DIR")
|
|
||||||
} else {
|
|
||||||
os.Setenv("DATA_DIR", oldValue)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
os.Unsetenv("DATA_DIR")
|
|
||||||
|
|
||||||
// This will try platform-specific defaults, which may or may not work
|
// This will try platform-specific defaults, which may or may not work
|
||||||
// We're mainly testing that it doesn't panic and returns some result
|
// We're mainly testing that it doesn't panic and returns some result
|
||||||
result, err := GetDataDir()
|
result, err := GetDataDir()
|
||||||
|
|||||||
653
agent/disk.go
653
agent/disk.go
@@ -1,6 +1,7 @@
|
|||||||
package agent
|
package agent
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@@ -8,11 +9,59 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
"github.com/henrygd/beszel/internal/entities/system"
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
|
||||||
"github.com/shirou/gopsutil/v4/disk"
|
"github.com/shirou/gopsutil/v4/disk"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// fsRegistrationContext holds the shared lookup state needed to resolve a
|
||||||
|
// filesystem into the tracked fsStats key and metadata.
|
||||||
|
type fsRegistrationContext struct {
|
||||||
|
filesystem string // value of optional FILESYSTEM env var
|
||||||
|
isWindows bool
|
||||||
|
efPath string // path to extra filesystems (default "/extra-filesystems")
|
||||||
|
diskIoCounters map[string]disk.IOCountersStat
|
||||||
|
}
|
||||||
|
|
||||||
|
// diskDiscovery groups the transient state for a single initializeDiskInfo run so
|
||||||
|
// helper methods can share the same partitions, mount paths, and lookup functions
|
||||||
|
type diskDiscovery struct {
|
||||||
|
agent *Agent
|
||||||
|
rootMountPoint string
|
||||||
|
partitions []disk.PartitionStat
|
||||||
|
usageFn func(string) (*disk.UsageStat, error)
|
||||||
|
ctx fsRegistrationContext
|
||||||
|
}
|
||||||
|
|
||||||
|
// prevDisk stores previous per-device disk counters for a given cache interval
|
||||||
|
type prevDisk struct {
|
||||||
|
readBytes uint64
|
||||||
|
writeBytes uint64
|
||||||
|
readTime uint64 // cumulative ms spent on reads (from ReadTime)
|
||||||
|
writeTime uint64 // cumulative ms spent on writes (from WriteTime)
|
||||||
|
ioTime uint64 // cumulative ms spent doing I/O (from IoTime)
|
||||||
|
weightedIO uint64 // cumulative weighted ms (queue-depth × ms, from WeightedIO)
|
||||||
|
readCount uint64 // cumulative read operation count
|
||||||
|
writeCount uint64 // cumulative write operation count
|
||||||
|
at time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// prevDiskFromCounter creates a prevDisk snapshot from a disk.IOCountersStat at time t.
|
||||||
|
func prevDiskFromCounter(d disk.IOCountersStat, t time.Time) prevDisk {
|
||||||
|
return prevDisk{
|
||||||
|
readBytes: d.ReadBytes,
|
||||||
|
writeBytes: d.WriteBytes,
|
||||||
|
readTime: d.ReadTime,
|
||||||
|
writeTime: d.WriteTime,
|
||||||
|
ioTime: d.IoTime,
|
||||||
|
weightedIO: d.WeightedIO,
|
||||||
|
readCount: d.ReadCount,
|
||||||
|
writeCount: d.WriteCount,
|
||||||
|
at: t,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// parseFilesystemEntry parses a filesystem entry in the format "device__customname"
|
// parseFilesystemEntry parses a filesystem entry in the format "device__customname"
|
||||||
// Returns the device/filesystem part and the custom name part
|
// Returns the device/filesystem part and the custom name part
|
||||||
func parseFilesystemEntry(entry string) (device, customName string) {
|
func parseFilesystemEntry(entry string) (device, customName string) {
|
||||||
@@ -26,23 +75,236 @@ func parseFilesystemEntry(entry string) (device, customName string) {
|
|||||||
return device, customName
|
return device, customName
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// extraFilesystemPartitionInfo derives the I/O device and optional display name
|
||||||
|
// for a mounted /extra-filesystems partition. Prefer the partition device reported
|
||||||
|
// by the system and only use the folder name for custom naming metadata.
|
||||||
|
func extraFilesystemPartitionInfo(p disk.PartitionStat) (device, customName string) {
|
||||||
|
device = strings.TrimSpace(p.Device)
|
||||||
|
folderDevice, customName := parseFilesystemEntry(filepath.Base(p.Mountpoint))
|
||||||
|
if device == "" {
|
||||||
|
device = folderDevice
|
||||||
|
}
|
||||||
|
return device, customName
|
||||||
|
}
|
||||||
|
|
||||||
func isDockerSpecialMountpoint(mountpoint string) bool {
|
func isDockerSpecialMountpoint(mountpoint string) bool {
|
||||||
switch mountpoint {
|
switch mountpoint {
|
||||||
case "/etc/hosts", "/etc/resolv.conf", "/etc/hostname":
|
case "/etc/hosts", "/etc/resolv.conf", "/etc/hostname":
|
||||||
return true
|
return true
|
||||||
default:
|
}
|
||||||
return false
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// registerFilesystemStats resolves the tracked key and stats payload for a
|
||||||
|
// filesystem before it is inserted into fsStats.
|
||||||
|
func registerFilesystemStats(existing map[string]*system.FsStats, device, mountpoint string, root bool, customName string, ctx fsRegistrationContext) (string, *system.FsStats, bool) {
|
||||||
|
key := device
|
||||||
|
if !ctx.isWindows {
|
||||||
|
key = filepath.Base(device)
|
||||||
|
}
|
||||||
|
|
||||||
|
if root {
|
||||||
|
// Try to map root device to a diskIoCounters entry. First checks for an
|
||||||
|
// exact key match, then uses findIoDevice for normalized / prefix-based
|
||||||
|
// matching (e.g. nda0p2 -> nda0), and finally falls back to FILESYSTEM.
|
||||||
|
if _, ioMatch := ctx.diskIoCounters[key]; !ioMatch {
|
||||||
|
if matchedKey, match := findIoDevice(key, ctx.diskIoCounters); match {
|
||||||
|
key = matchedKey
|
||||||
|
} else if ctx.filesystem != "" {
|
||||||
|
if matchedKey, match := findIoDevice(ctx.filesystem, ctx.diskIoCounters); match {
|
||||||
|
key = matchedKey
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _, ioMatch = ctx.diskIoCounters[key]; !ioMatch {
|
||||||
|
slog.Warn("Root I/O unmapped; set FILESYSTEM", "device", device, "mountpoint", mountpoint)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Check if non-root has diskstats and prefer the folder device for
|
||||||
|
// /extra-filesystems mounts when the discovered partition device is a
|
||||||
|
// mapper path (e.g. luks UUID) that obscures the underlying block device.
|
||||||
|
if _, ioMatch := ctx.diskIoCounters[key]; !ioMatch {
|
||||||
|
if strings.HasPrefix(mountpoint, ctx.efPath) {
|
||||||
|
folderDevice, _ := parseFilesystemEntry(filepath.Base(mountpoint))
|
||||||
|
if folderDevice != "" {
|
||||||
|
if matchedKey, match := findIoDevice(folderDevice, ctx.diskIoCounters); match {
|
||||||
|
key = matchedKey
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _, ioMatch = ctx.diskIoCounters[key]; !ioMatch {
|
||||||
|
if matchedKey, match := findIoDevice(key, ctx.diskIoCounters); match {
|
||||||
|
key = matchedKey
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, exists := existing[key]; exists {
|
||||||
|
return "", nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
fsStats := &system.FsStats{Root: root, Mountpoint: mountpoint}
|
||||||
|
if customName != "" {
|
||||||
|
fsStats.Name = customName
|
||||||
|
}
|
||||||
|
return key, fsStats, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// addFsStat inserts a discovered filesystem if it resolves to a new tracking
|
||||||
|
// key. The key selection itself lives in buildFsStatRegistration so that logic
|
||||||
|
// can stay directly unit-tested.
|
||||||
|
func (d *diskDiscovery) addFsStat(device, mountpoint string, root bool, customName string) {
|
||||||
|
key, fsStats, ok := registerFilesystemStats(d.agent.fsStats, device, mountpoint, root, customName, d.ctx)
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d.agent.fsStats[key] = fsStats
|
||||||
|
name := key
|
||||||
|
if customName != "" {
|
||||||
|
name = customName
|
||||||
|
}
|
||||||
|
slog.Info("Detected disk", "name", name, "device", device, "mount", mountpoint, "io", key, "root", root)
|
||||||
|
}
|
||||||
|
|
||||||
|
// addConfiguredRootFs resolves FILESYSTEM against partitions first, then falls
|
||||||
|
// back to direct diskstats matching for setups like ZFS where partitions do not
|
||||||
|
// expose the physical device name.
|
||||||
|
func (d *diskDiscovery) addConfiguredRootFs() bool {
|
||||||
|
if d.ctx.filesystem == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, p := range d.partitions {
|
||||||
|
if filesystemMatchesPartitionSetting(d.ctx.filesystem, p) {
|
||||||
|
d.addFsStat(p.Device, p.Mountpoint, true, "")
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FILESYSTEM may name a physical disk absent from partitions (e.g. ZFS lists
|
||||||
|
// dataset paths like zroot/ROOT/default, not block devices).
|
||||||
|
if ioKey, match := findIoDevice(d.ctx.filesystem, d.ctx.diskIoCounters); match {
|
||||||
|
d.agent.fsStats[ioKey] = &system.FsStats{Root: true, Mountpoint: d.rootMountPoint}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
slog.Warn("Partition details not found", "filesystem", d.ctx.filesystem)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func isRootFallbackPartition(p disk.PartitionStat, rootMountPoint string) bool {
|
||||||
|
return p.Mountpoint == rootMountPoint ||
|
||||||
|
(isDockerSpecialMountpoint(p.Mountpoint) && strings.HasPrefix(p.Device, "/dev"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// addPartitionRootFs handles the non-configured root fallback path when a
|
||||||
|
// partition looks like the active root mount but still needs translating to an
|
||||||
|
// I/O device key.
|
||||||
|
func (d *diskDiscovery) addPartitionRootFs(device, mountpoint string) bool {
|
||||||
|
fs, match := findIoDevice(filepath.Base(device), d.ctx.diskIoCounters)
|
||||||
|
if !match {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// The resolved I/O device is already known here, so use it directly to avoid
|
||||||
|
// a second fallback search inside buildFsStatRegistration.
|
||||||
|
d.addFsStat(fs, mountpoint, true, "")
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// addLastResortRootFs is only used when neither FILESYSTEM nor partition-based
|
||||||
|
// heuristics can identify root, so it picks the busiest I/O device as a final
|
||||||
|
// fallback and preserves the root mountpoint for usage collection.
|
||||||
|
func (d *diskDiscovery) addLastResortRootFs() {
|
||||||
|
rootKey := mostActiveIoDevice(d.ctx.diskIoCounters)
|
||||||
|
if rootKey != "" {
|
||||||
|
slog.Warn("Using most active device for root I/O; set FILESYSTEM to override", "device", rootKey)
|
||||||
|
} else {
|
||||||
|
rootKey = filepath.Base(d.rootMountPoint)
|
||||||
|
if _, exists := d.agent.fsStats[rootKey]; exists {
|
||||||
|
rootKey = "root"
|
||||||
|
}
|
||||||
|
slog.Warn("Root I/O device not detected; set FILESYSTEM to override")
|
||||||
|
}
|
||||||
|
d.agent.fsStats[rootKey] = &system.FsStats{Root: true, Mountpoint: d.rootMountPoint}
|
||||||
|
}
|
||||||
|
|
||||||
|
// findPartitionByFilesystemSetting matches an EXTRA_FILESYSTEMS entry against a
|
||||||
|
// discovered partition either by mountpoint or by device suffix.
|
||||||
|
func findPartitionByFilesystemSetting(filesystem string, partitions []disk.PartitionStat) (disk.PartitionStat, bool) {
|
||||||
|
for _, p := range partitions {
|
||||||
|
if strings.HasSuffix(p.Device, filesystem) || p.Mountpoint == filesystem {
|
||||||
|
return p, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return disk.PartitionStat{}, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// addConfiguredExtraFsEntry resolves one EXTRA_FILESYSTEMS entry, preferring a
|
||||||
|
// discovered partition and falling back to any path that disk.Usage accepts.
|
||||||
|
func (d *diskDiscovery) addConfiguredExtraFsEntry(filesystem, customName string) {
|
||||||
|
if p, found := findPartitionByFilesystemSetting(filesystem, d.partitions); found {
|
||||||
|
d.addFsStat(p.Device, p.Mountpoint, false, customName)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := d.usageFn(filesystem); err == nil {
|
||||||
|
d.addFsStat(filepath.Base(filesystem), filesystem, false, customName)
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
slog.Error("Invalid filesystem", "name", filesystem, "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// addConfiguredExtraFilesystems parses and registers the comma-separated
|
||||||
|
// EXTRA_FILESYSTEMS env var entries.
|
||||||
|
func (d *diskDiscovery) addConfiguredExtraFilesystems(extraFilesystems string) {
|
||||||
|
for fsEntry := range strings.SplitSeq(extraFilesystems, ",") {
|
||||||
|
filesystem, customName := parseFilesystemEntry(fsEntry)
|
||||||
|
d.addConfiguredExtraFsEntry(filesystem, customName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// addPartitionExtraFs registers partitions mounted under /extra-filesystems so
|
||||||
|
// their display names can come from the folder name while their I/O keys still
|
||||||
|
// prefer the underlying partition device. Only direct children are matched to
|
||||||
|
// avoid registering nested virtual mounts (e.g. /proc, /sys) that are returned by
|
||||||
|
// disk.Partitions(true) when the host root is bind-mounted in /extra-filesystems.
|
||||||
|
func (d *diskDiscovery) addPartitionExtraFs(p disk.PartitionStat) {
|
||||||
|
if filepath.Dir(p.Mountpoint) != d.ctx.efPath {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
device, customName := extraFilesystemPartitionInfo(p)
|
||||||
|
d.addFsStat(device, p.Mountpoint, false, customName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// addExtraFilesystemFolders handles bare directories under /extra-filesystems
|
||||||
|
// that may not appear in partition discovery, while skipping mountpoints that
|
||||||
|
// were already registered from higher-fidelity sources.
|
||||||
|
func (d *diskDiscovery) addExtraFilesystemFolders(folderNames []string) {
|
||||||
|
existingMountpoints := make(map[string]bool, len(d.agent.fsStats))
|
||||||
|
for _, stats := range d.agent.fsStats {
|
||||||
|
existingMountpoints[stats.Mountpoint] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, folderName := range folderNames {
|
||||||
|
mountpoint := filepath.Join(d.ctx.efPath, folderName)
|
||||||
|
slog.Debug("/extra-filesystems", "mountpoint", mountpoint)
|
||||||
|
if existingMountpoints[mountpoint] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
device, customName := parseFilesystemEntry(folderName)
|
||||||
|
d.addFsStat(device, mountpoint, false, customName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sets up the filesystems to monitor for disk usage and I/O.
|
// Sets up the filesystems to monitor for disk usage and I/O.
|
||||||
func (a *Agent) initializeDiskInfo() {
|
func (a *Agent) initializeDiskInfo() {
|
||||||
filesystem, _ := GetEnv("FILESYSTEM")
|
filesystem, _ := utils.GetEnv("FILESYSTEM")
|
||||||
efPath := "/extra-filesystems"
|
|
||||||
hasRoot := false
|
hasRoot := false
|
||||||
isWindows := runtime.GOOS == "windows"
|
isWindows := runtime.GOOS == "windows"
|
||||||
|
|
||||||
partitions, err := disk.Partitions(false)
|
partitions, err := disk.PartitionsWithContext(context.Background(), true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Error("Error getting disk partitions", "err", err)
|
slog.Error("Error getting disk partitions", "err", err)
|
||||||
}
|
}
|
||||||
@@ -55,164 +317,223 @@ func (a *Agent) initializeDiskInfo() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ioContext := context.WithValue(a.sensorsContext,
|
|
||||||
// common.EnvKey, common.EnvMap{common.HostProcEnvKey: "/tmp/testproc"},
|
|
||||||
// )
|
|
||||||
// diskIoCounters, err := disk.IOCountersWithContext(ioContext)
|
|
||||||
|
|
||||||
diskIoCounters, err := disk.IOCounters()
|
diskIoCounters, err := disk.IOCounters()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Error("Error getting diskstats", "err", err)
|
slog.Error("Error getting diskstats", "err", err)
|
||||||
}
|
}
|
||||||
slog.Debug("Disk I/O", "diskstats", diskIoCounters)
|
slog.Debug("Disk I/O", "diskstats", diskIoCounters)
|
||||||
|
ctx := fsRegistrationContext{
|
||||||
// Helper function to add a filesystem to fsStats if it doesn't exist
|
filesystem: filesystem,
|
||||||
addFsStat := func(device, mountpoint string, root bool, customName ...string) {
|
isWindows: isWindows,
|
||||||
var key string
|
diskIoCounters: diskIoCounters,
|
||||||
if isWindows {
|
efPath: "/extra-filesystems",
|
||||||
key = device
|
|
||||||
} else {
|
|
||||||
key = filepath.Base(device)
|
|
||||||
}
|
|
||||||
var ioMatch bool
|
|
||||||
if _, exists := a.fsStats[key]; !exists {
|
|
||||||
if root {
|
|
||||||
slog.Info("Detected root device", "name", key)
|
|
||||||
// Check if root device is in /proc/diskstats. Do not guess a
|
|
||||||
// fallback device for root: that can misattribute root I/O to a
|
|
||||||
// different disk while usage remains tied to root mountpoint.
|
|
||||||
if _, ioMatch = diskIoCounters[key]; !ioMatch {
|
|
||||||
if matchedKey, match := findIoDevice(filesystem, diskIoCounters); match {
|
|
||||||
key = matchedKey
|
|
||||||
ioMatch = true
|
|
||||||
} else {
|
|
||||||
slog.Warn("Root I/O unmapped; set FILESYSTEM", "device", device, "mountpoint", mountpoint)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Check if non-root has diskstats and fall back to folder name if not
|
|
||||||
// Scenario: device is encrypted and named luks-2bcb02be-999d-4417-8d18-5c61e660fb6e - not in /proc/diskstats.
|
|
||||||
// However, the device can be specified by mounting folder from luks device at /extra-filesystems/sda1
|
|
||||||
if _, ioMatch = diskIoCounters[key]; !ioMatch {
|
|
||||||
efBase := filepath.Base(mountpoint)
|
|
||||||
if _, ioMatch = diskIoCounters[efBase]; ioMatch {
|
|
||||||
key = efBase
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fsStats := &system.FsStats{Root: root, Mountpoint: mountpoint}
|
|
||||||
if len(customName) > 0 && customName[0] != "" {
|
|
||||||
fsStats.Name = customName[0]
|
|
||||||
}
|
|
||||||
a.fsStats[key] = fsStats
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the appropriate root mount point for this system
|
// Get the appropriate root mount point for this system
|
||||||
rootMountPoint := a.getRootMountPoint()
|
discovery := diskDiscovery{
|
||||||
|
agent: a,
|
||||||
|
rootMountPoint: a.getRootMountPoint(),
|
||||||
|
partitions: partitions,
|
||||||
|
usageFn: disk.Usage,
|
||||||
|
ctx: ctx,
|
||||||
|
}
|
||||||
|
|
||||||
// Use FILESYSTEM env var to find root filesystem
|
hasRoot = discovery.addConfiguredRootFs()
|
||||||
if filesystem != "" {
|
|
||||||
for _, p := range partitions {
|
|
||||||
if strings.HasSuffix(p.Device, filesystem) || p.Mountpoint == filesystem {
|
|
||||||
addFsStat(p.Device, p.Mountpoint, true)
|
|
||||||
hasRoot = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !hasRoot {
|
|
||||||
slog.Warn("Partition details not found", "filesystem", filesystem)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add EXTRA_FILESYSTEMS env var values to fsStats
|
// Add EXTRA_FILESYSTEMS env var values to fsStats
|
||||||
if extraFilesystems, exists := GetEnv("EXTRA_FILESYSTEMS"); exists {
|
if extraFilesystems, exists := utils.GetEnv("EXTRA_FILESYSTEMS"); exists {
|
||||||
for _, fsEntry := range strings.Split(extraFilesystems, ",") {
|
discovery.addConfiguredExtraFilesystems(extraFilesystems)
|
||||||
// Parse custom name from format: device__customname
|
|
||||||
fs, customName := parseFilesystemEntry(fsEntry)
|
|
||||||
|
|
||||||
found := false
|
|
||||||
for _, p := range partitions {
|
|
||||||
if strings.HasSuffix(p.Device, fs) || p.Mountpoint == fs {
|
|
||||||
addFsStat(p.Device, p.Mountpoint, false, customName)
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// if not in partitions, test if we can get disk usage
|
|
||||||
if !found {
|
|
||||||
if _, err := disk.Usage(fs); err == nil {
|
|
||||||
addFsStat(filepath.Base(fs), fs, false, customName)
|
|
||||||
} else {
|
|
||||||
slog.Error("Invalid filesystem", "name", fs, "err", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process partitions for various mount points
|
// Process partitions for various mount points
|
||||||
for _, p := range partitions {
|
for _, p := range partitions {
|
||||||
// fmt.Println(p.Device, p.Mountpoint)
|
if !hasRoot && isRootFallbackPartition(p, discovery.rootMountPoint) {
|
||||||
// Binary root fallback or docker root fallback
|
hasRoot = discovery.addPartitionRootFs(p.Device, p.Mountpoint)
|
||||||
if !hasRoot && (p.Mountpoint == rootMountPoint || (isDockerSpecialMountpoint(p.Mountpoint) && strings.HasPrefix(p.Device, "/dev"))) {
|
|
||||||
fs, match := findIoDevice(filepath.Base(p.Device), diskIoCounters)
|
|
||||||
if match {
|
|
||||||
addFsStat(fs, p.Mountpoint, true)
|
|
||||||
hasRoot = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if device is in /extra-filesystems
|
|
||||||
if strings.HasPrefix(p.Mountpoint, efPath) {
|
|
||||||
device, customName := parseFilesystemEntry(p.Mountpoint)
|
|
||||||
addFsStat(device, p.Mountpoint, false, customName)
|
|
||||||
}
|
}
|
||||||
|
discovery.addPartitionExtraFs(p)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check all folders in /extra-filesystems and add them if not already present
|
// Check all folders in /extra-filesystems and add them if not already present
|
||||||
if folders, err := os.ReadDir(efPath); err == nil {
|
if folders, err := os.ReadDir(discovery.ctx.efPath); err == nil {
|
||||||
existingMountpoints := make(map[string]bool)
|
folderNames := make([]string, 0, len(folders))
|
||||||
for _, stats := range a.fsStats {
|
|
||||||
existingMountpoints[stats.Mountpoint] = true
|
|
||||||
}
|
|
||||||
for _, folder := range folders {
|
for _, folder := range folders {
|
||||||
if folder.IsDir() {
|
if folder.IsDir() {
|
||||||
mountpoint := filepath.Join(efPath, folder.Name())
|
folderNames = append(folderNames, folder.Name())
|
||||||
slog.Debug("/extra-filesystems", "mountpoint", mountpoint)
|
|
||||||
if !existingMountpoints[mountpoint] {
|
|
||||||
device, customName := parseFilesystemEntry(folder.Name())
|
|
||||||
addFsStat(device, mountpoint, false, customName)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
discovery.addExtraFilesystemFolders(folderNames)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If no root filesystem set, use fallback
|
// If no root filesystem set, try the most active I/O device as a last
|
||||||
|
// resort (e.g. ZFS where dataset names are unrelated to disk names).
|
||||||
if !hasRoot {
|
if !hasRoot {
|
||||||
rootKey := filepath.Base(rootMountPoint)
|
discovery.addLastResortRootFs()
|
||||||
if _, exists := a.fsStats[rootKey]; exists {
|
|
||||||
rootKey = "root"
|
|
||||||
}
|
|
||||||
slog.Warn("Root device not detected; root I/O disabled", "mountpoint", rootMountPoint)
|
|
||||||
a.fsStats[rootKey] = &system.FsStats{Root: true, Mountpoint: rootMountPoint}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
a.pruneDuplicateRootExtraFilesystems()
|
||||||
a.initializeDiskIoStats(diskIoCounters)
|
a.initializeDiskIoStats(diskIoCounters)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns matching device from /proc/diskstats.
|
// Removes extra filesystems that mirror root usage (https://github.com/henrygd/beszel/issues/1428).
|
||||||
// bool is true if a match was found.
|
func (a *Agent) pruneDuplicateRootExtraFilesystems() {
|
||||||
|
var rootMountpoint string
|
||||||
|
for _, stats := range a.fsStats {
|
||||||
|
if stats != nil && stats.Root {
|
||||||
|
rootMountpoint = stats.Mountpoint
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if rootMountpoint == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
rootUsage, err := disk.Usage(rootMountpoint)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for name, stats := range a.fsStats {
|
||||||
|
if stats == nil || stats.Root {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
extraUsage, err := disk.Usage(stats.Mountpoint)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if hasSameDiskUsage(rootUsage, extraUsage) {
|
||||||
|
slog.Info("Ignoring duplicate FS", "name", name, "mount", stats.Mountpoint)
|
||||||
|
delete(a.fsStats, name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// hasSameDiskUsage compares root/extra usage with a small byte tolerance.
|
||||||
|
func hasSameDiskUsage(a, b *disk.UsageStat) bool {
|
||||||
|
if a == nil || b == nil || a.Total == 0 || b.Total == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// Allow minor drift between sequential disk usage calls.
|
||||||
|
const toleranceBytes uint64 = 16 * 1024 * 1024
|
||||||
|
return withinUsageTolerance(a.Total, b.Total, toleranceBytes) &&
|
||||||
|
withinUsageTolerance(a.Used, b.Used, toleranceBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// withinUsageTolerance reports whether two byte values differ by at most tolerance.
|
||||||
|
func withinUsageTolerance(a, b, tolerance uint64) bool {
|
||||||
|
if a >= b {
|
||||||
|
return a-b <= tolerance
|
||||||
|
}
|
||||||
|
return b-a <= tolerance
|
||||||
|
}
|
||||||
|
|
||||||
|
type ioMatchCandidate struct {
|
||||||
|
name string
|
||||||
|
bytes uint64
|
||||||
|
ops uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// findIoDevice prefers exact device/label matches, then falls back to a
|
||||||
|
// prefix-related candidate with the highest recent activity.
|
||||||
func findIoDevice(filesystem string, diskIoCounters map[string]disk.IOCountersStat) (string, bool) {
|
func findIoDevice(filesystem string, diskIoCounters map[string]disk.IOCountersStat) (string, bool) {
|
||||||
|
filesystem = normalizeDeviceName(filesystem)
|
||||||
|
if filesystem == "" {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
|
||||||
|
candidates := []ioMatchCandidate{}
|
||||||
|
|
||||||
for _, d := range diskIoCounters {
|
for _, d := range diskIoCounters {
|
||||||
if d.Name == filesystem || (d.Label != "" && d.Label == filesystem) {
|
if normalizeDeviceName(d.Name) == filesystem || (d.Label != "" && normalizeDeviceName(d.Label) == filesystem) {
|
||||||
return d.Name, true
|
return d.Name, true
|
||||||
}
|
}
|
||||||
|
if prefixRelated(normalizeDeviceName(d.Name), filesystem) ||
|
||||||
|
(d.Label != "" && prefixRelated(normalizeDeviceName(d.Label), filesystem)) {
|
||||||
|
candidates = append(candidates, ioMatchCandidate{
|
||||||
|
name: d.Name,
|
||||||
|
bytes: d.ReadBytes + d.WriteBytes,
|
||||||
|
ops: d.ReadCount + d.WriteCount,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(candidates) == 0 {
|
||||||
return "", false
|
return "", false
|
||||||
|
}
|
||||||
|
|
||||||
|
best := candidates[0]
|
||||||
|
for _, c := range candidates[1:] {
|
||||||
|
if c.bytes > best.bytes ||
|
||||||
|
(c.bytes == best.bytes && c.ops > best.ops) ||
|
||||||
|
(c.bytes == best.bytes && c.ops == best.ops && c.name < best.name) {
|
||||||
|
best = c
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
slog.Info("Using disk I/O fallback", "requested", filesystem, "selected", best.name)
|
||||||
|
return best.name, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// mostActiveIoDevice returns the device with the highest I/O activity,
|
||||||
|
// or "" if diskIoCounters is empty.
|
||||||
|
func mostActiveIoDevice(diskIoCounters map[string]disk.IOCountersStat) string {
|
||||||
|
var best ioMatchCandidate
|
||||||
|
for _, d := range diskIoCounters {
|
||||||
|
c := ioMatchCandidate{
|
||||||
|
name: d.Name,
|
||||||
|
bytes: d.ReadBytes + d.WriteBytes,
|
||||||
|
ops: d.ReadCount + d.WriteCount,
|
||||||
|
}
|
||||||
|
if best.name == "" || c.bytes > best.bytes ||
|
||||||
|
(c.bytes == best.bytes && c.ops > best.ops) ||
|
||||||
|
(c.bytes == best.bytes && c.ops == best.ops && c.name < best.name) {
|
||||||
|
best = c
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return best.name
|
||||||
|
}
|
||||||
|
|
||||||
|
// prefixRelated reports whether either identifier is a prefix of the other.
|
||||||
|
func prefixRelated(a, b string) bool {
|
||||||
|
if a == "" || b == "" || a == b {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return strings.HasPrefix(a, b) || strings.HasPrefix(b, a)
|
||||||
|
}
|
||||||
|
|
||||||
|
// filesystemMatchesPartitionSetting checks whether a FILESYSTEM env var value
|
||||||
|
// matches a partition by mountpoint, exact device name, or prefix relationship
|
||||||
|
// (e.g. FILESYSTEM=ada0 matches partition /dev/ada0p2).
|
||||||
|
func filesystemMatchesPartitionSetting(filesystem string, p disk.PartitionStat) bool {
|
||||||
|
filesystem = strings.TrimSpace(filesystem)
|
||||||
|
if filesystem == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if p.Mountpoint == filesystem {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
fsName := normalizeDeviceName(filesystem)
|
||||||
|
partName := normalizeDeviceName(p.Device)
|
||||||
|
if fsName == "" || partName == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if fsName == partName {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return prefixRelated(partName, fsName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// normalizeDeviceName canonicalizes device strings for comparisons.
|
||||||
|
func normalizeDeviceName(value string) string {
|
||||||
|
name := filepath.Base(strings.TrimSpace(value))
|
||||||
|
if name == "." {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return name
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sets start values for disk I/O stats.
|
// Sets start values for disk I/O stats.
|
||||||
func (a *Agent) initializeDiskIoStats(diskIoCounters map[string]disk.IOCountersStat) {
|
func (a *Agent) initializeDiskIoStats(diskIoCounters map[string]disk.IOCountersStat) {
|
||||||
|
a.fsNames = a.fsNames[:0]
|
||||||
|
now := time.Now()
|
||||||
for device, stats := range a.fsStats {
|
for device, stats := range a.fsStats {
|
||||||
// skip if not in diskIoCounters
|
// skip if not in diskIoCounters
|
||||||
d, exists := diskIoCounters[device]
|
d, exists := diskIoCounters[device]
|
||||||
@@ -221,7 +542,7 @@ func (a *Agent) initializeDiskIoStats(diskIoCounters map[string]disk.IOCountersS
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// populate initial values
|
// populate initial values
|
||||||
stats.Time = time.Now()
|
stats.Time = now
|
||||||
stats.TotalRead = d.ReadBytes
|
stats.TotalRead = d.ReadBytes
|
||||||
stats.TotalWrite = d.WriteBytes
|
stats.TotalWrite = d.WriteBytes
|
||||||
// add to list of valid io device names
|
// add to list of valid io device names
|
||||||
@@ -245,12 +566,12 @@ func (a *Agent) updateDiskUsage(systemStats *system.Stats) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if d, err := disk.Usage(stats.Mountpoint); err == nil {
|
if d, err := disk.Usage(stats.Mountpoint); err == nil {
|
||||||
stats.DiskTotal = bytesToGigabytes(d.Total)
|
stats.DiskTotal = utils.BytesToGigabytes(d.Total)
|
||||||
stats.DiskUsed = bytesToGigabytes(d.Used)
|
stats.DiskUsed = utils.BytesToGigabytes(d.Used)
|
||||||
if stats.Root {
|
if stats.Root {
|
||||||
systemStats.DiskTotal = bytesToGigabytes(d.Total)
|
systemStats.DiskTotal = utils.BytesToGigabytes(d.Total)
|
||||||
systemStats.DiskUsed = bytesToGigabytes(d.Used)
|
systemStats.DiskUsed = utils.BytesToGigabytes(d.Used)
|
||||||
systemStats.DiskPct = twoDecimals(d.UsedPercent)
|
systemStats.DiskPct = utils.TwoDecimals(d.UsedPercent)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// reset stats if error (likely unmounted)
|
// reset stats if error (likely unmounted)
|
||||||
@@ -288,36 +609,72 @@ func (a *Agent) updateDiskIo(cacheTimeMs uint16, systemStats *system.Stats) {
|
|||||||
prev, hasPrev := a.diskPrev[cacheTimeMs][name]
|
prev, hasPrev := a.diskPrev[cacheTimeMs][name]
|
||||||
if !hasPrev {
|
if !hasPrev {
|
||||||
// Seed from agent-level fsStats if present, else seed from current
|
// Seed from agent-level fsStats if present, else seed from current
|
||||||
prev = prevDisk{readBytes: stats.TotalRead, writeBytes: stats.TotalWrite, at: stats.Time}
|
prev = prevDisk{
|
||||||
|
readBytes: stats.TotalRead,
|
||||||
|
writeBytes: stats.TotalWrite,
|
||||||
|
readTime: d.ReadTime,
|
||||||
|
writeTime: d.WriteTime,
|
||||||
|
ioTime: d.IoTime,
|
||||||
|
weightedIO: d.WeightedIO,
|
||||||
|
readCount: d.ReadCount,
|
||||||
|
writeCount: d.WriteCount,
|
||||||
|
at: stats.Time,
|
||||||
|
}
|
||||||
if prev.at.IsZero() {
|
if prev.at.IsZero() {
|
||||||
prev = prevDisk{readBytes: d.ReadBytes, writeBytes: d.WriteBytes, at: now}
|
prev = prevDiskFromCounter(d, now)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
msElapsed := uint64(now.Sub(prev.at).Milliseconds())
|
msElapsed := uint64(now.Sub(prev.at).Milliseconds())
|
||||||
|
|
||||||
|
// Update per-interval snapshot
|
||||||
|
a.diskPrev[cacheTimeMs][name] = prevDiskFromCounter(d, now)
|
||||||
|
|
||||||
|
// Avoid division by zero or clock issues
|
||||||
if msElapsed < 100 {
|
if msElapsed < 100 {
|
||||||
// Avoid division by zero or clock issues; update snapshot and continue
|
|
||||||
a.diskPrev[cacheTimeMs][name] = prevDisk{readBytes: d.ReadBytes, writeBytes: d.WriteBytes, at: now}
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
diskIORead := (d.ReadBytes - prev.readBytes) * 1000 / msElapsed
|
diskIORead := (d.ReadBytes - prev.readBytes) * 1000 / msElapsed
|
||||||
diskIOWrite := (d.WriteBytes - prev.writeBytes) * 1000 / msElapsed
|
diskIOWrite := (d.WriteBytes - prev.writeBytes) * 1000 / msElapsed
|
||||||
readMbPerSecond := bytesToMegabytes(float64(diskIORead))
|
readMbPerSecond := utils.BytesToMegabytes(float64(diskIORead))
|
||||||
writeMbPerSecond := bytesToMegabytes(float64(diskIOWrite))
|
writeMbPerSecond := utils.BytesToMegabytes(float64(diskIOWrite))
|
||||||
|
|
||||||
// validate values
|
// validate values
|
||||||
if readMbPerSecond > 50_000 || writeMbPerSecond > 50_000 {
|
if readMbPerSecond > 50_000 || writeMbPerSecond > 50_000 {
|
||||||
slog.Warn("Invalid disk I/O. Resetting.", "name", d.Name, "read", readMbPerSecond, "write", writeMbPerSecond)
|
slog.Warn("Invalid disk I/O. Resetting.", "name", d.Name, "read", readMbPerSecond, "write", writeMbPerSecond)
|
||||||
// Reset interval snapshot and seed from current
|
|
||||||
a.diskPrev[cacheTimeMs][name] = prevDisk{readBytes: d.ReadBytes, writeBytes: d.WriteBytes, at: now}
|
|
||||||
// also refresh agent baseline to avoid future negatives
|
// also refresh agent baseline to avoid future negatives
|
||||||
a.initializeDiskIoStats(ioCounters)
|
a.initializeDiskIoStats(ioCounters)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update per-interval snapshot
|
// These properties are calculated differently on different platforms,
|
||||||
a.diskPrev[cacheTimeMs][name] = prevDisk{readBytes: d.ReadBytes, writeBytes: d.WriteBytes, at: now}
|
// but generally represent cumulative time spent doing reads/writes on the device.
|
||||||
|
// This can surpass 100% if there are multiple concurrent I/O operations.
|
||||||
|
// Linux kernel docs:
|
||||||
|
// This is the total number of milliseconds spent by all reads (as
|
||||||
|
// measured from __make_request() to end_that_request_last()).
|
||||||
|
// https://www.kernel.org/doc/Documentation/iostats.txt (fields 4, 8)
|
||||||
|
diskReadTime := utils.TwoDecimals(float64(d.ReadTime-prev.readTime) / float64(msElapsed) * 100)
|
||||||
|
diskWriteTime := utils.TwoDecimals(float64(d.WriteTime-prev.writeTime) / float64(msElapsed) * 100)
|
||||||
|
|
||||||
|
// I/O utilization %: fraction of wall time the device had any I/O in progress (0-100).
|
||||||
|
diskIoUtilPct := utils.TwoDecimals(float64(d.IoTime-prev.ioTime) / float64(msElapsed) * 100)
|
||||||
|
|
||||||
|
// Weighted I/O: queue-depth weighted I/O time, normalized to interval (can exceed 100%).
|
||||||
|
// Linux kernel field 11: incremented by iops_in_progress × ms_since_last_update.
|
||||||
|
// Used to display queue depth. Multipled by 100 to increase accuracy of digit truncation (divided by 100 in UI).
|
||||||
|
diskWeightedIO := utils.TwoDecimals(float64(d.WeightedIO-prev.weightedIO) / float64(msElapsed) * 100)
|
||||||
|
|
||||||
|
// r_await / w_await: average time per read/write operation in milliseconds.
|
||||||
|
// Equivalent to r_await and w_await in iostat.
|
||||||
|
var rAwait, wAwait float64
|
||||||
|
if deltaReadCount := d.ReadCount - prev.readCount; deltaReadCount > 0 {
|
||||||
|
rAwait = utils.TwoDecimals(float64(d.ReadTime-prev.readTime) / float64(deltaReadCount))
|
||||||
|
}
|
||||||
|
if deltaWriteCount := d.WriteCount - prev.writeCount; deltaWriteCount > 0 {
|
||||||
|
wAwait = utils.TwoDecimals(float64(d.WriteTime-prev.writeTime) / float64(deltaWriteCount))
|
||||||
|
}
|
||||||
|
|
||||||
// Update global fsStats baseline for cross-interval correctness
|
// Update global fsStats baseline for cross-interval correctness
|
||||||
stats.Time = now
|
stats.Time = now
|
||||||
@@ -327,20 +684,40 @@ func (a *Agent) updateDiskIo(cacheTimeMs uint16, systemStats *system.Stats) {
|
|||||||
stats.DiskWritePs = writeMbPerSecond
|
stats.DiskWritePs = writeMbPerSecond
|
||||||
stats.DiskReadBytes = diskIORead
|
stats.DiskReadBytes = diskIORead
|
||||||
stats.DiskWriteBytes = diskIOWrite
|
stats.DiskWriteBytes = diskIOWrite
|
||||||
|
stats.DiskIoStats[0] = diskReadTime
|
||||||
|
stats.DiskIoStats[1] = diskWriteTime
|
||||||
|
stats.DiskIoStats[2] = diskIoUtilPct
|
||||||
|
stats.DiskIoStats[3] = rAwait
|
||||||
|
stats.DiskIoStats[4] = wAwait
|
||||||
|
stats.DiskIoStats[5] = diskWeightedIO
|
||||||
|
|
||||||
if stats.Root {
|
if stats.Root {
|
||||||
systemStats.DiskReadPs = stats.DiskReadPs
|
systemStats.DiskReadPs = stats.DiskReadPs
|
||||||
systemStats.DiskWritePs = stats.DiskWritePs
|
systemStats.DiskWritePs = stats.DiskWritePs
|
||||||
systemStats.DiskIO[0] = diskIORead
|
systemStats.DiskIO[0] = diskIORead
|
||||||
systemStats.DiskIO[1] = diskIOWrite
|
systemStats.DiskIO[1] = diskIOWrite
|
||||||
|
systemStats.DiskIoStats[0] = diskReadTime
|
||||||
|
systemStats.DiskIoStats[1] = diskWriteTime
|
||||||
|
systemStats.DiskIoStats[2] = diskIoUtilPct
|
||||||
|
systemStats.DiskIoStats[3] = rAwait
|
||||||
|
systemStats.DiskIoStats[4] = wAwait
|
||||||
|
systemStats.DiskIoStats[5] = diskWeightedIO
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// getRootMountPoint returns the appropriate root mount point for the system
|
// getRootMountPoint returns the appropriate root mount point for the system.
|
||||||
|
// On Windows it returns the system drive (e.g. "C:").
|
||||||
// For immutable systems like Fedora Silverblue, it returns /sysroot instead of /
|
// For immutable systems like Fedora Silverblue, it returns /sysroot instead of /
|
||||||
func (a *Agent) getRootMountPoint() string {
|
func (a *Agent) getRootMountPoint() string {
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
if sd := os.Getenv("SystemDrive"); sd != "" {
|
||||||
|
return sd
|
||||||
|
}
|
||||||
|
return "C:"
|
||||||
|
}
|
||||||
|
|
||||||
// 1. Check if /etc/os-release contains indicators of an immutable system
|
// 1. Check if /etc/os-release contains indicators of an immutable system
|
||||||
if osReleaseContent, err := os.ReadFile("/etc/os-release"); err == nil {
|
if osReleaseContent, err := os.ReadFile("/etc/os-release"); err == nil {
|
||||||
content := string(osReleaseContent)
|
content := string(osReleaseContent)
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build testing
|
//go:build testing
|
||||||
// +build testing
|
|
||||||
|
|
||||||
package agent
|
package agent
|
||||||
|
|
||||||
@@ -94,6 +93,524 @@ func TestParseFilesystemEntry(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestExtraFilesystemPartitionInfo(t *testing.T) {
|
||||||
|
t.Run("uses partition device for label-only mountpoint", func(t *testing.T) {
|
||||||
|
device, customName := extraFilesystemPartitionInfo(disk.PartitionStat{
|
||||||
|
Device: "/dev/sdc",
|
||||||
|
Mountpoint: "/extra-filesystems/Share",
|
||||||
|
})
|
||||||
|
|
||||||
|
assert.Equal(t, "/dev/sdc", device)
|
||||||
|
assert.Equal(t, "", customName)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("uses custom name from mountpoint suffix", func(t *testing.T) {
|
||||||
|
device, customName := extraFilesystemPartitionInfo(disk.PartitionStat{
|
||||||
|
Device: "/dev/sdc",
|
||||||
|
Mountpoint: "/extra-filesystems/sdc__Share",
|
||||||
|
})
|
||||||
|
|
||||||
|
assert.Equal(t, "/dev/sdc", device)
|
||||||
|
assert.Equal(t, "Share", customName)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("falls back to folder device when partition device is unavailable", func(t *testing.T) {
|
||||||
|
device, customName := extraFilesystemPartitionInfo(disk.PartitionStat{
|
||||||
|
Mountpoint: "/extra-filesystems/sdc__Share",
|
||||||
|
})
|
||||||
|
|
||||||
|
assert.Equal(t, "sdc", device)
|
||||||
|
assert.Equal(t, "Share", customName)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("supports custom name without folder device prefix", func(t *testing.T) {
|
||||||
|
device, customName := extraFilesystemPartitionInfo(disk.PartitionStat{
|
||||||
|
Device: "/dev/sdc",
|
||||||
|
Mountpoint: "/extra-filesystems/__Share",
|
||||||
|
})
|
||||||
|
|
||||||
|
assert.Equal(t, "/dev/sdc", device)
|
||||||
|
assert.Equal(t, "Share", customName)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuildFsStatRegistration(t *testing.T) {
|
||||||
|
t.Run("uses basename for non-windows exact io match", func(t *testing.T) {
|
||||||
|
key, stats, ok := registerFilesystemStats(
|
||||||
|
map[string]*system.FsStats{},
|
||||||
|
"/dev/sda1",
|
||||||
|
"/mnt/data",
|
||||||
|
false,
|
||||||
|
"archive",
|
||||||
|
fsRegistrationContext{
|
||||||
|
isWindows: false,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"sda1": {Name: "sda1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "sda1", key)
|
||||||
|
assert.Equal(t, "/mnt/data", stats.Mountpoint)
|
||||||
|
assert.Equal(t, "archive", stats.Name)
|
||||||
|
assert.False(t, stats.Root)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("maps root partition to io device by prefix", func(t *testing.T) {
|
||||||
|
key, stats, ok := registerFilesystemStats(
|
||||||
|
map[string]*system.FsStats{},
|
||||||
|
"/dev/ada0p2",
|
||||||
|
"/",
|
||||||
|
true,
|
||||||
|
"",
|
||||||
|
fsRegistrationContext{
|
||||||
|
isWindows: false,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"ada0": {Name: "ada0", ReadBytes: 1000, WriteBytes: 1000},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "ada0", key)
|
||||||
|
assert.True(t, stats.Root)
|
||||||
|
assert.Equal(t, "/", stats.Mountpoint)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("uses filesystem setting as root fallback", func(t *testing.T) {
|
||||||
|
key, _, ok := registerFilesystemStats(
|
||||||
|
map[string]*system.FsStats{},
|
||||||
|
"overlay",
|
||||||
|
"/",
|
||||||
|
true,
|
||||||
|
"",
|
||||||
|
fsRegistrationContext{
|
||||||
|
filesystem: "nvme0n1p2",
|
||||||
|
isWindows: false,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"nvme0n1": {Name: "nvme0n1", ReadBytes: 1000, WriteBytes: 1000},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "nvme0n1", key)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("prefers parsed extra-filesystems device over mapper device", func(t *testing.T) {
|
||||||
|
key, stats, ok := registerFilesystemStats(
|
||||||
|
map[string]*system.FsStats{},
|
||||||
|
"/dev/mapper/luks-2bcb02be-999d-4417-8d18-5c61e660fb6e",
|
||||||
|
"/extra-filesystems/nvme0n1p2__Archive",
|
||||||
|
false,
|
||||||
|
"Archive",
|
||||||
|
fsRegistrationContext{
|
||||||
|
isWindows: false,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"dm-1": {Name: "dm-1", Label: "luks-2bcb02be-999d-4417-8d18-5c61e660fb6e"},
|
||||||
|
"nvme0n1p2": {Name: "nvme0n1p2"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "nvme0n1p2", key)
|
||||||
|
assert.Equal(t, "Archive", stats.Name)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("falls back to mapper io device when folder device cannot be resolved", func(t *testing.T) {
|
||||||
|
key, stats, ok := registerFilesystemStats(
|
||||||
|
map[string]*system.FsStats{},
|
||||||
|
"/dev/mapper/luks-2bcb02be-999d-4417-8d18-5c61e660fb6e",
|
||||||
|
"/extra-filesystems/Archive",
|
||||||
|
false,
|
||||||
|
"Archive",
|
||||||
|
fsRegistrationContext{
|
||||||
|
isWindows: false,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"dm-1": {Name: "dm-1", Label: "luks-2bcb02be-999d-4417-8d18-5c61e660fb6e"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "dm-1", key)
|
||||||
|
assert.Equal(t, "Archive", stats.Name)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("uses full device name on windows", func(t *testing.T) {
|
||||||
|
key, _, ok := registerFilesystemStats(
|
||||||
|
map[string]*system.FsStats{},
|
||||||
|
`C:`,
|
||||||
|
`C:\\`,
|
||||||
|
false,
|
||||||
|
"",
|
||||||
|
fsRegistrationContext{
|
||||||
|
isWindows: true,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
`C:`: {Name: `C:`},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, `C:`, key)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("skips existing key", func(t *testing.T) {
|
||||||
|
key, stats, ok := registerFilesystemStats(
|
||||||
|
map[string]*system.FsStats{"sda1": {Mountpoint: "/existing"}},
|
||||||
|
"/dev/sda1",
|
||||||
|
"/mnt/data",
|
||||||
|
false,
|
||||||
|
"",
|
||||||
|
fsRegistrationContext{
|
||||||
|
isWindows: false,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"sda1": {Name: "sda1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert.False(t, ok)
|
||||||
|
assert.Empty(t, key)
|
||||||
|
assert.Nil(t, stats)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddConfiguredRootFs(t *testing.T) {
|
||||||
|
t.Run("adds root from matching partition", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
discovery := diskDiscovery{
|
||||||
|
agent: agent,
|
||||||
|
rootMountPoint: "/",
|
||||||
|
partitions: []disk.PartitionStat{{Device: "/dev/ada0p2", Mountpoint: "/"}},
|
||||||
|
ctx: fsRegistrationContext{
|
||||||
|
filesystem: "/dev/ada0p2",
|
||||||
|
isWindows: false,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"ada0": {Name: "ada0", ReadBytes: 1000, WriteBytes: 1000},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
ok := discovery.addConfiguredRootFs()
|
||||||
|
|
||||||
|
assert.True(t, ok)
|
||||||
|
stats, exists := agent.fsStats["ada0"]
|
||||||
|
assert.True(t, exists)
|
||||||
|
assert.True(t, stats.Root)
|
||||||
|
assert.Equal(t, "/", stats.Mountpoint)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("adds root from io device when partition is missing", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
discovery := diskDiscovery{
|
||||||
|
agent: agent,
|
||||||
|
rootMountPoint: "/sysroot",
|
||||||
|
ctx: fsRegistrationContext{
|
||||||
|
filesystem: "zroot",
|
||||||
|
isWindows: false,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"nda0": {Name: "nda0", Label: "zroot", ReadBytes: 1000, WriteBytes: 1000},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
ok := discovery.addConfiguredRootFs()
|
||||||
|
|
||||||
|
assert.True(t, ok)
|
||||||
|
stats, exists := agent.fsStats["nda0"]
|
||||||
|
assert.True(t, exists)
|
||||||
|
assert.True(t, stats.Root)
|
||||||
|
assert.Equal(t, "/sysroot", stats.Mountpoint)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("returns false when filesystem cannot be resolved", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
discovery := diskDiscovery{
|
||||||
|
agent: agent,
|
||||||
|
rootMountPoint: "/",
|
||||||
|
ctx: fsRegistrationContext{
|
||||||
|
filesystem: "missing-disk",
|
||||||
|
isWindows: false,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
ok := discovery.addConfiguredRootFs()
|
||||||
|
|
||||||
|
assert.False(t, ok)
|
||||||
|
assert.Empty(t, agent.fsStats)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddPartitionRootFs(t *testing.T) {
|
||||||
|
t.Run("adds root from fallback partition candidate", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
discovery := diskDiscovery{
|
||||||
|
agent: agent,
|
||||||
|
ctx: fsRegistrationContext{
|
||||||
|
isWindows: false,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"nvme0n1": {Name: "nvme0n1", ReadBytes: 1000, WriteBytes: 1000},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
ok := discovery.addPartitionRootFs("/dev/nvme0n1p2", "/")
|
||||||
|
|
||||||
|
assert.True(t, ok)
|
||||||
|
stats, exists := agent.fsStats["nvme0n1"]
|
||||||
|
assert.True(t, exists)
|
||||||
|
assert.True(t, stats.Root)
|
||||||
|
assert.Equal(t, "/", stats.Mountpoint)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("returns false when no io device matches", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
discovery := diskDiscovery{agent: agent, ctx: fsRegistrationContext{diskIoCounters: map[string]disk.IOCountersStat{}}}
|
||||||
|
|
||||||
|
ok := discovery.addPartitionRootFs("/dev/mapper/root", "/")
|
||||||
|
|
||||||
|
assert.False(t, ok)
|
||||||
|
assert.Empty(t, agent.fsStats)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddLastResortRootFs(t *testing.T) {
|
||||||
|
t.Run("uses most active io device when available", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
discovery := diskDiscovery{agent: agent, rootMountPoint: "/", ctx: fsRegistrationContext{diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"sda": {Name: "sda", ReadBytes: 5000, WriteBytes: 5000},
|
||||||
|
"sdb": {Name: "sdb", ReadBytes: 1000, WriteBytes: 1000},
|
||||||
|
}}}
|
||||||
|
|
||||||
|
discovery.addLastResortRootFs()
|
||||||
|
|
||||||
|
stats, exists := agent.fsStats["sda"]
|
||||||
|
assert.True(t, exists)
|
||||||
|
assert.True(t, stats.Root)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("falls back to root key when mountpoint basename collides", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: map[string]*system.FsStats{
|
||||||
|
"sysroot": {Mountpoint: "/extra-filesystems/sysroot"},
|
||||||
|
}}
|
||||||
|
discovery := diskDiscovery{agent: agent, rootMountPoint: "/sysroot", ctx: fsRegistrationContext{diskIoCounters: map[string]disk.IOCountersStat{}}}
|
||||||
|
|
||||||
|
discovery.addLastResortRootFs()
|
||||||
|
|
||||||
|
stats, exists := agent.fsStats["root"]
|
||||||
|
assert.True(t, exists)
|
||||||
|
assert.True(t, stats.Root)
|
||||||
|
assert.Equal(t, "/sysroot", stats.Mountpoint)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddConfiguredExtraFsEntry(t *testing.T) {
|
||||||
|
t.Run("uses matching partition when present", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
discovery := diskDiscovery{
|
||||||
|
agent: agent,
|
||||||
|
partitions: []disk.PartitionStat{{Device: "/dev/sdb1", Mountpoint: "/mnt/backup"}},
|
||||||
|
usageFn: func(string) (*disk.UsageStat, error) {
|
||||||
|
t.Fatal("usage fallback should not be called when partition matches")
|
||||||
|
return nil, nil
|
||||||
|
},
|
||||||
|
ctx: fsRegistrationContext{
|
||||||
|
isWindows: false,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"sdb1": {Name: "sdb1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
discovery.addConfiguredExtraFsEntry("sdb1", "backup")
|
||||||
|
|
||||||
|
stats, exists := agent.fsStats["sdb1"]
|
||||||
|
assert.True(t, exists)
|
||||||
|
assert.Equal(t, "/mnt/backup", stats.Mountpoint)
|
||||||
|
assert.Equal(t, "backup", stats.Name)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("falls back to usage-validated path", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
discovery := diskDiscovery{
|
||||||
|
agent: agent,
|
||||||
|
usageFn: func(path string) (*disk.UsageStat, error) {
|
||||||
|
assert.Equal(t, "/srv/archive", path)
|
||||||
|
return &disk.UsageStat{}, nil
|
||||||
|
},
|
||||||
|
ctx: fsRegistrationContext{
|
||||||
|
isWindows: false,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"archive": {Name: "archive"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
discovery.addConfiguredExtraFsEntry("/srv/archive", "archive")
|
||||||
|
|
||||||
|
stats, exists := agent.fsStats["archive"]
|
||||||
|
assert.True(t, exists)
|
||||||
|
assert.Equal(t, "/srv/archive", stats.Mountpoint)
|
||||||
|
assert.Equal(t, "archive", stats.Name)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("ignores invalid filesystem entry", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
discovery := diskDiscovery{
|
||||||
|
agent: agent,
|
||||||
|
usageFn: func(string) (*disk.UsageStat, error) {
|
||||||
|
return nil, os.ErrNotExist
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
discovery.addConfiguredExtraFsEntry("/missing/archive", "")
|
||||||
|
|
||||||
|
assert.Empty(t, agent.fsStats)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddConfiguredExtraFilesystems(t *testing.T) {
|
||||||
|
t.Run("parses and registers multiple configured filesystems", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
discovery := diskDiscovery{
|
||||||
|
agent: agent,
|
||||||
|
partitions: []disk.PartitionStat{{Device: "/dev/sda1", Mountpoint: "/mnt/fast"}},
|
||||||
|
usageFn: func(path string) (*disk.UsageStat, error) {
|
||||||
|
if path == "/srv/archive" {
|
||||||
|
return &disk.UsageStat{}, nil
|
||||||
|
}
|
||||||
|
return nil, os.ErrNotExist
|
||||||
|
},
|
||||||
|
ctx: fsRegistrationContext{
|
||||||
|
isWindows: false,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"sda1": {Name: "sda1"},
|
||||||
|
"archive": {Name: "archive"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
discovery.addConfiguredExtraFilesystems("sda1__fast,/srv/archive__cold")
|
||||||
|
|
||||||
|
assert.Contains(t, agent.fsStats, "sda1")
|
||||||
|
assert.Equal(t, "fast", agent.fsStats["sda1"].Name)
|
||||||
|
assert.Contains(t, agent.fsStats, "archive")
|
||||||
|
assert.Equal(t, "cold", agent.fsStats["archive"].Name)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddExtraFilesystemFolders(t *testing.T) {
|
||||||
|
t.Run("adds missing folders and skips existing mountpoints", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: map[string]*system.FsStats{
|
||||||
|
"existing": {Mountpoint: "/extra-filesystems/existing"},
|
||||||
|
}}
|
||||||
|
discovery := diskDiscovery{
|
||||||
|
agent: agent,
|
||||||
|
ctx: fsRegistrationContext{
|
||||||
|
isWindows: false,
|
||||||
|
efPath: "/extra-filesystems",
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"newdisk": {Name: "newdisk"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
discovery.addExtraFilesystemFolders([]string{"existing", "newdisk__Archive"})
|
||||||
|
|
||||||
|
assert.Len(t, agent.fsStats, 2)
|
||||||
|
stats, exists := agent.fsStats["newdisk"]
|
||||||
|
assert.True(t, exists)
|
||||||
|
assert.Equal(t, "/extra-filesystems/newdisk__Archive", stats.Mountpoint)
|
||||||
|
assert.Equal(t, "Archive", stats.Name)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddPartitionExtraFs(t *testing.T) {
|
||||||
|
makeDiscovery := func(agent *Agent) diskDiscovery {
|
||||||
|
return diskDiscovery{
|
||||||
|
agent: agent,
|
||||||
|
ctx: fsRegistrationContext{
|
||||||
|
isWindows: false,
|
||||||
|
efPath: "/extra-filesystems",
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"nvme0n1p1": {Name: "nvme0n1p1"},
|
||||||
|
"nvme1n1": {Name: "nvme1n1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("registers direct child of extra-filesystems", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
d := makeDiscovery(agent)
|
||||||
|
|
||||||
|
d.addPartitionExtraFs(disk.PartitionStat{
|
||||||
|
Device: "/dev/nvme0n1p1",
|
||||||
|
Mountpoint: "/extra-filesystems/nvme0n1p1__caddy1-root",
|
||||||
|
})
|
||||||
|
|
||||||
|
stats, exists := agent.fsStats["nvme0n1p1"]
|
||||||
|
assert.True(t, exists)
|
||||||
|
assert.Equal(t, "/extra-filesystems/nvme0n1p1__caddy1-root", stats.Mountpoint)
|
||||||
|
assert.Equal(t, "caddy1-root", stats.Name)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("skips nested mount under extra-filesystem bind mount", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
d := makeDiscovery(agent)
|
||||||
|
|
||||||
|
// These simulate the virtual mounts that appear when host / is bind-mounted
|
||||||
|
// with disk.Partitions(all=true) — e.g. /proc, /sys, /dev visible under the mount.
|
||||||
|
for _, nested := range []string{
|
||||||
|
"/extra-filesystems/nvme0n1p1__caddy1-root/proc",
|
||||||
|
"/extra-filesystems/nvme0n1p1__caddy1-root/sys",
|
||||||
|
"/extra-filesystems/nvme0n1p1__caddy1-root/dev",
|
||||||
|
"/extra-filesystems/nvme0n1p1__caddy1-root/run",
|
||||||
|
} {
|
||||||
|
d.addPartitionExtraFs(disk.PartitionStat{Device: "tmpfs", Mountpoint: nested})
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Empty(t, agent.fsStats)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("registers both direct children, skips their nested mounts", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
d := makeDiscovery(agent)
|
||||||
|
|
||||||
|
partitions := []disk.PartitionStat{
|
||||||
|
{Device: "/dev/nvme0n1p1", Mountpoint: "/extra-filesystems/nvme0n1p1__caddy1-root"},
|
||||||
|
{Device: "/dev/nvme1n1", Mountpoint: "/extra-filesystems/nvme1n1__caddy1-docker"},
|
||||||
|
{Device: "proc", Mountpoint: "/extra-filesystems/nvme0n1p1__caddy1-root/proc"},
|
||||||
|
{Device: "sysfs", Mountpoint: "/extra-filesystems/nvme0n1p1__caddy1-root/sys"},
|
||||||
|
{Device: "overlay", Mountpoint: "/extra-filesystems/nvme0n1p1__caddy1-root/var/lib/docker"},
|
||||||
|
}
|
||||||
|
for _, p := range partitions {
|
||||||
|
d.addPartitionExtraFs(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Len(t, agent.fsStats, 2)
|
||||||
|
assert.Equal(t, "caddy1-root", agent.fsStats["nvme0n1p1"].Name)
|
||||||
|
assert.Equal(t, "caddy1-docker", agent.fsStats["nvme1n1"].Name)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("skips partition not under extra-filesystems", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
d := makeDiscovery(agent)
|
||||||
|
|
||||||
|
d.addPartitionExtraFs(disk.PartitionStat{
|
||||||
|
Device: "/dev/nvme0n1p1",
|
||||||
|
Mountpoint: "/",
|
||||||
|
})
|
||||||
|
|
||||||
|
assert.Empty(t, agent.fsStats)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestFindIoDevice(t *testing.T) {
|
func TestFindIoDevice(t *testing.T) {
|
||||||
t.Run("matches by device name", func(t *testing.T) {
|
t.Run("matches by device name", func(t *testing.T) {
|
||||||
ioCounters := map[string]disk.IOCountersStat{
|
ioCounters := map[string]disk.IOCountersStat{
|
||||||
@@ -117,7 +634,7 @@ func TestFindIoDevice(t *testing.T) {
|
|||||||
assert.Equal(t, "sda", device)
|
assert.Equal(t, "sda", device)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("returns no fallback when not found", func(t *testing.T) {
|
t.Run("returns no match when not found", func(t *testing.T) {
|
||||||
ioCounters := map[string]disk.IOCountersStat{
|
ioCounters := map[string]disk.IOCountersStat{
|
||||||
"sda": {Name: "sda"},
|
"sda": {Name: "sda"},
|
||||||
"sdb": {Name: "sdb"},
|
"sdb": {Name: "sdb"},
|
||||||
@@ -127,6 +644,106 @@ func TestFindIoDevice(t *testing.T) {
|
|||||||
assert.False(t, ok)
|
assert.False(t, ok)
|
||||||
assert.Equal(t, "", device)
|
assert.Equal(t, "", device)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
t.Run("uses uncertain unique prefix fallback", func(t *testing.T) {
|
||||||
|
ioCounters := map[string]disk.IOCountersStat{
|
||||||
|
"nvme0n1": {Name: "nvme0n1"},
|
||||||
|
"sda": {Name: "sda"},
|
||||||
|
}
|
||||||
|
|
||||||
|
device, ok := findIoDevice("nvme0n1p2", ioCounters)
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "nvme0n1", device)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("uses dominant activity when prefix matches are ambiguous", func(t *testing.T) {
|
||||||
|
ioCounters := map[string]disk.IOCountersStat{
|
||||||
|
"sda": {Name: "sda", ReadBytes: 5000, WriteBytes: 5000, ReadCount: 100, WriteCount: 100},
|
||||||
|
"sdb": {Name: "sdb", ReadBytes: 1000, WriteBytes: 1000, ReadCount: 50, WriteCount: 50},
|
||||||
|
}
|
||||||
|
|
||||||
|
device, ok := findIoDevice("sd", ioCounters)
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "sda", device)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("uses highest activity when ambiguous without dominance", func(t *testing.T) {
|
||||||
|
ioCounters := map[string]disk.IOCountersStat{
|
||||||
|
"sda": {Name: "sda", ReadBytes: 3000, WriteBytes: 3000, ReadCount: 50, WriteCount: 50},
|
||||||
|
"sdb": {Name: "sdb", ReadBytes: 2500, WriteBytes: 2500, ReadCount: 40, WriteCount: 40},
|
||||||
|
}
|
||||||
|
|
||||||
|
device, ok := findIoDevice("sd", ioCounters)
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "sda", device)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("matches /dev/-prefixed partition to parent disk", func(t *testing.T) {
|
||||||
|
ioCounters := map[string]disk.IOCountersStat{
|
||||||
|
"nda0": {Name: "nda0", ReadBytes: 1000, WriteBytes: 1000},
|
||||||
|
}
|
||||||
|
|
||||||
|
device, ok := findIoDevice("/dev/nda0p2", ioCounters)
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "nda0", device)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("uses deterministic name tie-breaker", func(t *testing.T) {
|
||||||
|
ioCounters := map[string]disk.IOCountersStat{
|
||||||
|
"sdb": {Name: "sdb", ReadBytes: 2000, WriteBytes: 2000, ReadCount: 10, WriteCount: 10},
|
||||||
|
"sda": {Name: "sda", ReadBytes: 2000, WriteBytes: 2000, ReadCount: 10, WriteCount: 10},
|
||||||
|
}
|
||||||
|
|
||||||
|
device, ok := findIoDevice("sd", ioCounters)
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "sda", device)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFilesystemMatchesPartitionSetting(t *testing.T) {
|
||||||
|
p := disk.PartitionStat{Device: "/dev/ada0p2", Mountpoint: "/"}
|
||||||
|
|
||||||
|
t.Run("matches mountpoint setting", func(t *testing.T) {
|
||||||
|
assert.True(t, filesystemMatchesPartitionSetting("/", p))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("matches exact partition setting", func(t *testing.T) {
|
||||||
|
assert.True(t, filesystemMatchesPartitionSetting("ada0p2", p))
|
||||||
|
assert.True(t, filesystemMatchesPartitionSetting("/dev/ada0p2", p))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("matches prefix-style parent setting", func(t *testing.T) {
|
||||||
|
assert.True(t, filesystemMatchesPartitionSetting("ada0", p))
|
||||||
|
assert.True(t, filesystemMatchesPartitionSetting("/dev/ada0", p))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("does not match unrelated device", func(t *testing.T) {
|
||||||
|
assert.False(t, filesystemMatchesPartitionSetting("sda", p))
|
||||||
|
assert.False(t, filesystemMatchesPartitionSetting("nvme0n1", p))
|
||||||
|
assert.False(t, filesystemMatchesPartitionSetting("", p))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMostActiveIoDevice(t *testing.T) {
|
||||||
|
t.Run("returns most active device", func(t *testing.T) {
|
||||||
|
ioCounters := map[string]disk.IOCountersStat{
|
||||||
|
"nda0": {Name: "nda0", ReadBytes: 5000, WriteBytes: 5000, ReadCount: 100, WriteCount: 100},
|
||||||
|
"nda1": {Name: "nda1", ReadBytes: 1000, WriteBytes: 1000, ReadCount: 50, WriteCount: 50},
|
||||||
|
}
|
||||||
|
assert.Equal(t, "nda0", mostActiveIoDevice(ioCounters))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("uses deterministic tie-breaker", func(t *testing.T) {
|
||||||
|
ioCounters := map[string]disk.IOCountersStat{
|
||||||
|
"sdb": {Name: "sdb", ReadBytes: 1000, WriteBytes: 1000, ReadCount: 10, WriteCount: 10},
|
||||||
|
"sda": {Name: "sda", ReadBytes: 1000, WriteBytes: 1000, ReadCount: 10, WriteCount: 10},
|
||||||
|
}
|
||||||
|
assert.Equal(t, "sda", mostActiveIoDevice(ioCounters))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("returns empty for empty map", func(t *testing.T) {
|
||||||
|
assert.Equal(t, "", mostActiveIoDevice(map[string]disk.IOCountersStat{}))
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIsDockerSpecialMountpoint(t *testing.T) {
|
func TestIsDockerSpecialMountpoint(t *testing.T) {
|
||||||
@@ -151,18 +768,8 @@ func TestIsDockerSpecialMountpoint(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestInitializeDiskInfoWithCustomNames(t *testing.T) {
|
func TestInitializeDiskInfoWithCustomNames(t *testing.T) {
|
||||||
// Set up environment variables
|
|
||||||
oldEnv := os.Getenv("EXTRA_FILESYSTEMS")
|
|
||||||
defer func() {
|
|
||||||
if oldEnv != "" {
|
|
||||||
os.Setenv("EXTRA_FILESYSTEMS", oldEnv)
|
|
||||||
} else {
|
|
||||||
os.Unsetenv("EXTRA_FILESYSTEMS")
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Test with custom names
|
// Test with custom names
|
||||||
os.Setenv("EXTRA_FILESYSTEMS", "sda1__my-storage,/dev/sdb1__backup-drive,nvme0n1p2")
|
t.Setenv("EXTRA_FILESYSTEMS", "sda1__my-storage,/dev/sdb1__backup-drive,nvme0n1p2")
|
||||||
|
|
||||||
// Mock disk partitions (we'll just test the parsing logic)
|
// Mock disk partitions (we'll just test the parsing logic)
|
||||||
// Since the actual disk operations are system-dependent, we'll focus on the parsing
|
// Since the actual disk operations are system-dependent, we'll focus on the parsing
|
||||||
@@ -190,7 +797,7 @@ func TestInitializeDiskInfoWithCustomNames(t *testing.T) {
|
|||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run("env_"+tc.envValue, func(t *testing.T) {
|
t.Run("env_"+tc.envValue, func(t *testing.T) {
|
||||||
os.Setenv("EXTRA_FILESYSTEMS", tc.envValue)
|
t.Setenv("EXTRA_FILESYSTEMS", tc.envValue)
|
||||||
|
|
||||||
// Create mock partitions that would match our test cases
|
// Create mock partitions that would match our test cases
|
||||||
partitions := []disk.PartitionStat{}
|
partitions := []disk.PartitionStat{}
|
||||||
@@ -211,7 +818,7 @@ func TestInitializeDiskInfoWithCustomNames(t *testing.T) {
|
|||||||
// Test the parsing logic by calling the relevant part
|
// Test the parsing logic by calling the relevant part
|
||||||
// We'll create a simplified version to test just the parsing
|
// We'll create a simplified version to test just the parsing
|
||||||
extraFilesystems := tc.envValue
|
extraFilesystems := tc.envValue
|
||||||
for _, fsEntry := range strings.Split(extraFilesystems, ",") {
|
for fsEntry := range strings.SplitSeq(extraFilesystems, ",") {
|
||||||
// Parse the entry
|
// Parse the entry
|
||||||
fsEntry = strings.TrimSpace(fsEntry)
|
fsEntry = strings.TrimSpace(fsEntry)
|
||||||
var fs, customName string
|
var fs, customName string
|
||||||
@@ -373,3 +980,67 @@ func TestDiskUsageCaching(t *testing.T) {
|
|||||||
"lastDiskUsageUpdate should be refreshed when cache expires")
|
"lastDiskUsageUpdate should be refreshed when cache expires")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestHasSameDiskUsage(t *testing.T) {
|
||||||
|
const toleranceBytes uint64 = 16 * 1024 * 1024
|
||||||
|
|
||||||
|
t.Run("returns true when totals and usage are equal", func(t *testing.T) {
|
||||||
|
a := &disk.UsageStat{Total: 100 * 1024 * 1024 * 1024, Used: 42 * 1024 * 1024 * 1024}
|
||||||
|
b := &disk.UsageStat{Total: 100 * 1024 * 1024 * 1024, Used: 42 * 1024 * 1024 * 1024}
|
||||||
|
assert.True(t, hasSameDiskUsage(a, b))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("returns true within tolerance", func(t *testing.T) {
|
||||||
|
a := &disk.UsageStat{Total: 100 * 1024 * 1024 * 1024, Used: 42 * 1024 * 1024 * 1024}
|
||||||
|
b := &disk.UsageStat{
|
||||||
|
Total: a.Total + toleranceBytes - 1,
|
||||||
|
Used: a.Used - toleranceBytes + 1,
|
||||||
|
}
|
||||||
|
assert.True(t, hasSameDiskUsage(a, b))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("returns false when total exceeds tolerance", func(t *testing.T) {
|
||||||
|
a := &disk.UsageStat{Total: 100 * 1024 * 1024 * 1024, Used: 42 * 1024 * 1024 * 1024}
|
||||||
|
b := &disk.UsageStat{
|
||||||
|
Total: a.Total + toleranceBytes + 1,
|
||||||
|
Used: a.Used,
|
||||||
|
}
|
||||||
|
assert.False(t, hasSameDiskUsage(a, b))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("returns false for nil or zero total", func(t *testing.T) {
|
||||||
|
assert.False(t, hasSameDiskUsage(nil, &disk.UsageStat{Total: 1, Used: 1}))
|
||||||
|
assert.False(t, hasSameDiskUsage(&disk.UsageStat{Total: 1, Used: 1}, nil))
|
||||||
|
assert.False(t, hasSameDiskUsage(&disk.UsageStat{Total: 0, Used: 0}, &disk.UsageStat{Total: 1, Used: 1}))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInitializeDiskIoStatsResetsTrackedDevices(t *testing.T) {
|
||||||
|
agent := &Agent{
|
||||||
|
fsStats: map[string]*system.FsStats{
|
||||||
|
"sda": {},
|
||||||
|
"sdb": {},
|
||||||
|
},
|
||||||
|
fsNames: []string{"stale", "sda"},
|
||||||
|
}
|
||||||
|
|
||||||
|
agent.initializeDiskIoStats(map[string]disk.IOCountersStat{
|
||||||
|
"sda": {Name: "sda", ReadBytes: 10, WriteBytes: 20},
|
||||||
|
"sdb": {Name: "sdb", ReadBytes: 30, WriteBytes: 40},
|
||||||
|
})
|
||||||
|
|
||||||
|
assert.ElementsMatch(t, []string{"sda", "sdb"}, agent.fsNames)
|
||||||
|
assert.Len(t, agent.fsNames, 2)
|
||||||
|
assert.Equal(t, uint64(10), agent.fsStats["sda"].TotalRead)
|
||||||
|
assert.Equal(t, uint64(20), agent.fsStats["sda"].TotalWrite)
|
||||||
|
assert.False(t, agent.fsStats["sda"].Time.IsZero())
|
||||||
|
assert.False(t, agent.fsStats["sdb"].Time.IsZero())
|
||||||
|
|
||||||
|
agent.initializeDiskIoStats(map[string]disk.IOCountersStat{
|
||||||
|
"sdb": {Name: "sdb", ReadBytes: 50, WriteBytes: 60},
|
||||||
|
})
|
||||||
|
|
||||||
|
assert.Equal(t, []string{"sdb"}, agent.fsNames)
|
||||||
|
assert.Equal(t, uint64(50), agent.fsStats["sdb"].TotalRead)
|
||||||
|
assert.Equal(t, uint64(60), agent.fsStats["sdb"].TotalWrite)
|
||||||
|
}
|
||||||
|
|||||||
342
agent/docker.go
342
agent/docker.go
@@ -1,6 +1,7 @@
|
|||||||
package agent
|
package agent
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
@@ -15,12 +16,16 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/henrygd/beszel/agent/deltatracker"
|
"github.com/henrygd/beszel/agent/deltatracker"
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
"github.com/henrygd/beszel/internal/entities/container"
|
"github.com/henrygd/beszel/internal/entities/container"
|
||||||
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
|
||||||
"github.com/blang/semver"
|
"github.com/blang/semver"
|
||||||
)
|
)
|
||||||
@@ -28,6 +33,7 @@ import (
|
|||||||
// ansiEscapePattern matches ANSI escape sequences (colors, cursor movement, etc.)
|
// ansiEscapePattern matches ANSI escape sequences (colors, cursor movement, etc.)
|
||||||
// This includes CSI sequences like \x1b[...m and simple escapes like \x1b[K
|
// This includes CSI sequences like \x1b[...m and simple escapes like \x1b[K
|
||||||
var ansiEscapePattern = regexp.MustCompile(`\x1b\[[0-9;]*[a-zA-Z]|\x1b\][^\x07]*\x07|\x1b[@-Z\\-_]`)
|
var ansiEscapePattern = regexp.MustCompile(`\x1b\[[0-9;]*[a-zA-Z]|\x1b\][^\x07]*\x07|\x1b[@-Z\\-_]`)
|
||||||
|
var dockerContainerIDPattern = regexp.MustCompile(`^[a-fA-F0-9]{12,64}$`)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// Docker API timeout in milliseconds
|
// Docker API timeout in milliseconds
|
||||||
@@ -47,6 +53,7 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type dockerManager struct {
|
type dockerManager struct {
|
||||||
|
agent *Agent // Used to propagate system detail changes back to the agent
|
||||||
client *http.Client // Client to query Docker API
|
client *http.Client // Client to query Docker API
|
||||||
wg sync.WaitGroup // WaitGroup to wait for all goroutines to finish
|
wg sync.WaitGroup // WaitGroup to wait for all goroutines to finish
|
||||||
sem chan struct{} // Semaphore to limit concurrent container requests
|
sem chan struct{} // Semaphore to limit concurrent container requests
|
||||||
@@ -55,6 +62,7 @@ type dockerManager struct {
|
|||||||
containerStatsMap map[string]*container.Stats // Keeps track of container stats
|
containerStatsMap map[string]*container.Stats // Keeps track of container stats
|
||||||
validIds map[string]struct{} // Map of valid container ids, used to prune invalid containers from containerStatsMap
|
validIds map[string]struct{} // Map of valid container ids, used to prune invalid containers from containerStatsMap
|
||||||
goodDockerVersion bool // Whether docker version is at least 25.0.0 (one-shot works correctly)
|
goodDockerVersion bool // Whether docker version is at least 25.0.0 (one-shot works correctly)
|
||||||
|
dockerVersionChecked bool // Whether a version probe has completed successfully
|
||||||
isWindows bool // Whether the Docker Engine API is running on Windows
|
isWindows bool // Whether the Docker Engine API is running on Windows
|
||||||
buf *bytes.Buffer // Buffer to store and read response bodies
|
buf *bytes.Buffer // Buffer to store and read response bodies
|
||||||
decoder *json.Decoder // Reusable JSON decoder that reads from buf
|
decoder *json.Decoder // Reusable JSON decoder that reads from buf
|
||||||
@@ -72,6 +80,7 @@ type dockerManager struct {
|
|||||||
// cacheTimeMs -> DeltaTracker for network bytes sent/received
|
// cacheTimeMs -> DeltaTracker for network bytes sent/received
|
||||||
networkSentTrackers map[uint16]*deltatracker.DeltaTracker[string, uint64]
|
networkSentTrackers map[uint16]*deltatracker.DeltaTracker[string, uint64]
|
||||||
networkRecvTrackers map[uint16]*deltatracker.DeltaTracker[string, uint64]
|
networkRecvTrackers map[uint16]*deltatracker.DeltaTracker[string, uint64]
|
||||||
|
lastNetworkReadTime map[uint16]map[string]time.Time // cacheTimeMs -> containerId -> last network read time
|
||||||
}
|
}
|
||||||
|
|
||||||
// userAgentRoundTripper is a custom http.RoundTripper that adds a User-Agent header to all requests
|
// userAgentRoundTripper is a custom http.RoundTripper that adds a User-Agent header to all requests
|
||||||
@@ -80,6 +89,14 @@ type userAgentRoundTripper struct {
|
|||||||
userAgent string
|
userAgent string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// dockerVersionResponse contains the /version fields used for engine checks.
|
||||||
|
type dockerVersionResponse struct {
|
||||||
|
Version string `json:"Version"`
|
||||||
|
Components []struct {
|
||||||
|
Name string `json:"Name"`
|
||||||
|
} `json:"Components"`
|
||||||
|
}
|
||||||
|
|
||||||
// RoundTrip implements the http.RoundTripper interface
|
// RoundTrip implements the http.RoundTripper interface
|
||||||
func (u *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
func (u *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||||
req.Header.Set("User-Agent", u.userAgent)
|
req.Header.Set("User-Agent", u.userAgent)
|
||||||
@@ -127,7 +144,14 @@ func (dm *dockerManager) getDockerStats(cacheTimeMs uint16) ([]*container.Stats,
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
dm.isWindows = strings.Contains(resp.Header.Get("Server"), "windows")
|
// Detect Podman and Windows from Server header
|
||||||
|
serverHeader := resp.Header.Get("Server")
|
||||||
|
if !dm.usingPodman && detectPodmanFromHeader(serverHeader) {
|
||||||
|
dm.setIsPodman()
|
||||||
|
}
|
||||||
|
dm.isWindows = strings.Contains(serverHeader, "windows")
|
||||||
|
|
||||||
|
dm.ensureDockerVersionChecked()
|
||||||
|
|
||||||
containersLength := len(dm.apiContainerList)
|
containersLength := len(dm.apiContainerList)
|
||||||
|
|
||||||
@@ -279,7 +303,7 @@ func (dm *dockerManager) cycleNetworkDeltasForCacheTime(cacheTimeMs uint16) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// calculateNetworkStats calculates network sent/receive deltas using DeltaTracker
|
// calculateNetworkStats calculates network sent/receive deltas using DeltaTracker
|
||||||
func (dm *dockerManager) calculateNetworkStats(ctr *container.ApiInfo, apiStats *container.ApiStats, stats *container.Stats, initialized bool, name string, cacheTimeMs uint16) (uint64, uint64) {
|
func (dm *dockerManager) calculateNetworkStats(ctr *container.ApiInfo, apiStats *container.ApiStats, name string, cacheTimeMs uint16) (uint64, uint64) {
|
||||||
var total_sent, total_recv uint64
|
var total_sent, total_recv uint64
|
||||||
for _, v := range apiStats.Networks {
|
for _, v := range apiStats.Networks {
|
||||||
total_sent += v.TxBytes
|
total_sent += v.TxBytes
|
||||||
@@ -298,10 +322,11 @@ func (dm *dockerManager) calculateNetworkStats(ctr *container.ApiInfo, apiStats
|
|||||||
sent_delta_raw := sentTracker.Delta(ctr.IdShort)
|
sent_delta_raw := sentTracker.Delta(ctr.IdShort)
|
||||||
recv_delta_raw := recvTracker.Delta(ctr.IdShort)
|
recv_delta_raw := recvTracker.Delta(ctr.IdShort)
|
||||||
|
|
||||||
// Calculate bytes per second independently for Tx and Rx if we have previous data
|
// Calculate bytes per second using per-cache-time read time to avoid
|
||||||
|
// interference between different cache intervals (e.g. 1000ms vs 60000ms)
|
||||||
var sent_delta, recv_delta uint64
|
var sent_delta, recv_delta uint64
|
||||||
if initialized {
|
if prevReadTime, ok := dm.lastNetworkReadTime[cacheTimeMs][ctr.IdShort]; ok {
|
||||||
millisecondsElapsed := uint64(time.Since(stats.PrevReadTime).Milliseconds())
|
millisecondsElapsed := uint64(time.Since(prevReadTime).Milliseconds())
|
||||||
if millisecondsElapsed > 0 {
|
if millisecondsElapsed > 0 {
|
||||||
if sent_delta_raw > 0 {
|
if sent_delta_raw > 0 {
|
||||||
sent_delta = sent_delta_raw * 1000 / millisecondsElapsed
|
sent_delta = sent_delta_raw * 1000 / millisecondsElapsed
|
||||||
@@ -333,15 +358,48 @@ func validateCpuPercentage(cpuPct float64, containerName string) error {
|
|||||||
|
|
||||||
// updateContainerStatsValues updates the final stats values
|
// updateContainerStatsValues updates the final stats values
|
||||||
func updateContainerStatsValues(stats *container.Stats, cpuPct float64, usedMemory uint64, sent_delta, recv_delta uint64, readTime time.Time) {
|
func updateContainerStatsValues(stats *container.Stats, cpuPct float64, usedMemory uint64, sent_delta, recv_delta uint64, readTime time.Time) {
|
||||||
stats.Cpu = twoDecimals(cpuPct)
|
stats.Cpu = utils.TwoDecimals(cpuPct)
|
||||||
stats.Mem = bytesToMegabytes(float64(usedMemory))
|
stats.Mem = utils.BytesToMegabytes(float64(usedMemory))
|
||||||
stats.Bandwidth = [2]uint64{sent_delta, recv_delta}
|
stats.Bandwidth = [2]uint64{sent_delta, recv_delta}
|
||||||
// TODO(0.19+): stop populating NetworkSent/NetworkRecv (deprecated in 0.18.3)
|
// TODO(0.19+): stop populating NetworkSent/NetworkRecv (deprecated in 0.18.3)
|
||||||
stats.NetworkSent = bytesToMegabytes(float64(sent_delta))
|
stats.NetworkSent = utils.BytesToMegabytes(float64(sent_delta))
|
||||||
stats.NetworkRecv = bytesToMegabytes(float64(recv_delta))
|
stats.NetworkRecv = utils.BytesToMegabytes(float64(recv_delta))
|
||||||
stats.PrevReadTime = readTime
|
stats.PrevReadTime = readTime
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// convertContainerPortsToString formats the ports of a container into a sorted, deduplicated string.
|
||||||
|
// ctr.Ports is nilled out after processing so the slice is not accidentally reused.
|
||||||
|
func convertContainerPortsToString(ctr *container.ApiInfo) string {
|
||||||
|
if len(ctr.Ports) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
sort.Slice(ctr.Ports, func(i, j int) bool {
|
||||||
|
return ctr.Ports[i].PublicPort < ctr.Ports[j].PublicPort
|
||||||
|
})
|
||||||
|
var builder strings.Builder
|
||||||
|
seenPorts := make(map[uint16]struct{})
|
||||||
|
for _, p := range ctr.Ports {
|
||||||
|
_, ok := seenPorts[p.PublicPort]
|
||||||
|
if p.PublicPort == 0 || ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
seenPorts[p.PublicPort] = struct{}{}
|
||||||
|
if builder.Len() > 0 {
|
||||||
|
builder.WriteString(", ")
|
||||||
|
}
|
||||||
|
switch p.IP {
|
||||||
|
case "0.0.0.0", "::":
|
||||||
|
default:
|
||||||
|
builder.WriteString(p.IP)
|
||||||
|
builder.WriteByte(':')
|
||||||
|
}
|
||||||
|
builder.WriteString(strconv.Itoa(int(p.PublicPort)))
|
||||||
|
}
|
||||||
|
// clear ports slice so it doesn't get reused and blend into next response
|
||||||
|
ctr.Ports = nil
|
||||||
|
return builder.String()
|
||||||
|
}
|
||||||
|
|
||||||
func parseDockerStatus(status string) (string, container.DockerHealth) {
|
func parseDockerStatus(status string) (string, container.DockerHealth) {
|
||||||
trimmed := strings.TrimSpace(status)
|
trimmed := strings.TrimSpace(status)
|
||||||
if trimmed == "" {
|
if trimmed == "" {
|
||||||
@@ -361,22 +419,60 @@ func parseDockerStatus(status string) (string, container.DockerHealth) {
|
|||||||
statusText = trimmed
|
statusText = trimmed
|
||||||
}
|
}
|
||||||
|
|
||||||
healthText := strings.ToLower(strings.TrimSpace(strings.TrimSuffix(trimmed[openIdx+1:], ")")))
|
healthText := strings.TrimSpace(strings.TrimSuffix(trimmed[openIdx+1:], ")"))
|
||||||
// Some Docker statuses include a "health:" prefix inside the parentheses.
|
// Some Docker statuses include a "health:" prefix inside the parentheses.
|
||||||
// Strip it so it maps correctly to the known health states.
|
// Strip it so it maps correctly to the known health states.
|
||||||
if colonIdx := strings.IndexRune(healthText, ':'); colonIdx != -1 {
|
if colonIdx := strings.IndexRune(healthText, ':'); colonIdx != -1 {
|
||||||
prefix := strings.TrimSpace(healthText[:colonIdx])
|
prefix := strings.ToLower(strings.TrimSpace(healthText[:colonIdx]))
|
||||||
if prefix == "health" || prefix == "health status" {
|
if prefix == "health" || prefix == "health status" {
|
||||||
healthText = strings.TrimSpace(healthText[colonIdx+1:])
|
healthText = strings.TrimSpace(healthText[colonIdx+1:])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if health, ok := container.DockerHealthStrings[healthText]; ok {
|
if health, ok := parseDockerHealthStatus(healthText); ok {
|
||||||
return statusText, health
|
return statusText, health
|
||||||
}
|
}
|
||||||
|
|
||||||
return trimmed, container.DockerHealthNone
|
return trimmed, container.DockerHealthNone
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// parseDockerHealthStatus maps Docker health status strings to container.DockerHealth values
|
||||||
|
func parseDockerHealthStatus(status string) (container.DockerHealth, bool) {
|
||||||
|
health, ok := container.DockerHealthStrings[strings.ToLower(strings.TrimSpace(status))]
|
||||||
|
return health, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// getPodmanContainerHealth fetches container health status from the container inspect endpoint.
|
||||||
|
// Used for Podman which doesn't provide health status in the /containers/json endpoint as of March 2026.
|
||||||
|
// https://github.com/containers/podman/issues/27786
|
||||||
|
func (dm *dockerManager) getPodmanContainerHealth(containerID string) (container.DockerHealth, error) {
|
||||||
|
resp, err := dm.client.Get(fmt.Sprintf("http://localhost/containers/%s/json", url.PathEscape(containerID)))
|
||||||
|
if err != nil {
|
||||||
|
return container.DockerHealthNone, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return container.DockerHealthNone, fmt.Errorf("container inspect request failed: %s", resp.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
var inspectInfo struct {
|
||||||
|
State struct {
|
||||||
|
Health struct {
|
||||||
|
Status string
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&inspectInfo); err != nil {
|
||||||
|
return container.DockerHealthNone, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if health, ok := parseDockerHealthStatus(inspectInfo.State.Health.Status); ok {
|
||||||
|
return health, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return container.DockerHealthNone, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Updates stats for individual container with cache-time-aware delta tracking
|
// Updates stats for individual container with cache-time-aware delta tracking
|
||||||
func (dm *dockerManager) updateContainerStats(ctr *container.ApiInfo, cacheTimeMs uint16) error {
|
func (dm *dockerManager) updateContainerStats(ctr *container.ApiInfo, cacheTimeMs uint16) error {
|
||||||
name := ctr.Names[0][1:]
|
name := ctr.Names[0][1:]
|
||||||
@@ -386,6 +482,21 @@ func (dm *dockerManager) updateContainerStats(ctr *container.ApiInfo, cacheTimeM
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
statusText, health := parseDockerStatus(ctr.Status)
|
||||||
|
|
||||||
|
// Docker exposes Health.Status on /containers/json in API 1.52+.
|
||||||
|
// Podman currently requires falling back to the inspect endpoint as of March 2026.
|
||||||
|
// https://github.com/containers/podman/issues/27786
|
||||||
|
if ctr.Health.Status != "" {
|
||||||
|
if h, ok := parseDockerHealthStatus(ctr.Health.Status); ok {
|
||||||
|
health = h
|
||||||
|
}
|
||||||
|
} else if dm.usingPodman {
|
||||||
|
if podmanHealth, err := dm.getPodmanContainerHealth(ctr.IdShort); err == nil {
|
||||||
|
health = podmanHealth
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
dm.containerStatsMutex.Lock()
|
dm.containerStatsMutex.Lock()
|
||||||
defer dm.containerStatsMutex.Unlock()
|
defer dm.containerStatsMutex.Unlock()
|
||||||
|
|
||||||
@@ -397,11 +508,13 @@ func (dm *dockerManager) updateContainerStats(ctr *container.ApiInfo, cacheTimeM
|
|||||||
}
|
}
|
||||||
|
|
||||||
stats.Id = ctr.IdShort
|
stats.Id = ctr.IdShort
|
||||||
|
|
||||||
statusText, health := parseDockerStatus(ctr.Status)
|
|
||||||
stats.Status = statusText
|
stats.Status = statusText
|
||||||
stats.Health = health
|
stats.Health = health
|
||||||
|
|
||||||
|
if len(ctr.Ports) > 0 {
|
||||||
|
stats.Ports = convertContainerPortsToString(ctr)
|
||||||
|
}
|
||||||
|
|
||||||
// reset current stats
|
// reset current stats
|
||||||
stats.Cpu = 0
|
stats.Cpu = 0
|
||||||
stats.Mem = 0
|
stats.Mem = 0
|
||||||
@@ -448,7 +561,13 @@ func (dm *dockerManager) updateContainerStats(ctr *container.ApiInfo, cacheTimeM
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Calculate network stats using DeltaTracker
|
// Calculate network stats using DeltaTracker
|
||||||
sent_delta, recv_delta := dm.calculateNetworkStats(ctr, res, stats, initialized, name, cacheTimeMs)
|
sent_delta, recv_delta := dm.calculateNetworkStats(ctr, res, name, cacheTimeMs)
|
||||||
|
|
||||||
|
// Store per-cache-time network read time for next rate calculation
|
||||||
|
if dm.lastNetworkReadTime[cacheTimeMs] == nil {
|
||||||
|
dm.lastNetworkReadTime[cacheTimeMs] = make(map[string]time.Time)
|
||||||
|
}
|
||||||
|
dm.lastNetworkReadTime[cacheTimeMs][ctr.IdShort] = time.Now()
|
||||||
|
|
||||||
// Store current network values for legacy compatibility
|
// Store current network values for legacy compatibility
|
||||||
var total_sent, total_recv uint64
|
var total_sent, total_recv uint64
|
||||||
@@ -480,11 +599,14 @@ func (dm *dockerManager) deleteContainerStatsSync(id string) {
|
|||||||
for ct := range dm.lastCpuReadTime {
|
for ct := range dm.lastCpuReadTime {
|
||||||
delete(dm.lastCpuReadTime[ct], id)
|
delete(dm.lastCpuReadTime[ct], id)
|
||||||
}
|
}
|
||||||
|
for ct := range dm.lastNetworkReadTime {
|
||||||
|
delete(dm.lastNetworkReadTime[ct], id)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creates a new http client for Docker or Podman API
|
// Creates a new http client for Docker or Podman API
|
||||||
func newDockerManager() *dockerManager {
|
func newDockerManager(agent *Agent) *dockerManager {
|
||||||
dockerHost, exists := GetEnv("DOCKER_HOST")
|
dockerHost, exists := utils.GetEnv("DOCKER_HOST")
|
||||||
if exists {
|
if exists {
|
||||||
// return nil if set to empty string
|
// return nil if set to empty string
|
||||||
if dockerHost == "" {
|
if dockerHost == "" {
|
||||||
@@ -520,7 +642,7 @@ func newDockerManager() *dockerManager {
|
|||||||
|
|
||||||
// configurable timeout
|
// configurable timeout
|
||||||
timeout := time.Millisecond * time.Duration(dockerTimeoutMs)
|
timeout := time.Millisecond * time.Duration(dockerTimeoutMs)
|
||||||
if t, set := GetEnv("DOCKER_TIMEOUT"); set {
|
if t, set := utils.GetEnv("DOCKER_TIMEOUT"); set {
|
||||||
timeout, err = time.ParseDuration(t)
|
timeout, err = time.ParseDuration(t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Error(err.Error())
|
slog.Error(err.Error())
|
||||||
@@ -537,7 +659,7 @@ func newDockerManager() *dockerManager {
|
|||||||
|
|
||||||
// Read container exclusion patterns from environment variable
|
// Read container exclusion patterns from environment variable
|
||||||
var excludeContainers []string
|
var excludeContainers []string
|
||||||
if excludeStr, set := GetEnv("EXCLUDE_CONTAINERS"); set && excludeStr != "" {
|
if excludeStr, set := utils.GetEnv("EXCLUDE_CONTAINERS"); set && excludeStr != "" {
|
||||||
parts := strings.SplitSeq(excludeStr, ",")
|
parts := strings.SplitSeq(excludeStr, ",")
|
||||||
for part := range parts {
|
for part := range parts {
|
||||||
trimmed := strings.TrimSpace(part)
|
trimmed := strings.TrimSpace(part)
|
||||||
@@ -549,6 +671,7 @@ func newDockerManager() *dockerManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
manager := &dockerManager{
|
manager := &dockerManager{
|
||||||
|
agent: agent,
|
||||||
client: &http.Client{
|
client: &http.Client{
|
||||||
Timeout: timeout,
|
Timeout: timeout,
|
||||||
Transport: userAgentTransport,
|
Transport: userAgentTransport,
|
||||||
@@ -565,50 +688,55 @@ func newDockerManager() *dockerManager {
|
|||||||
lastCpuReadTime: make(map[uint16]map[string]time.Time),
|
lastCpuReadTime: make(map[uint16]map[string]time.Time),
|
||||||
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
lastNetworkReadTime: make(map[uint16]map[string]time.Time),
|
||||||
}
|
}
|
||||||
|
|
||||||
// If using podman, return client
|
// Best-effort startup probe. If the engine is not ready yet, getDockerStats will
|
||||||
if strings.Contains(dockerHost, "podman") {
|
// retry after the first successful /containers/json request.
|
||||||
manager.usingPodman = true
|
_, _ = manager.checkDockerVersion()
|
||||||
manager.goodDockerVersion = true
|
|
||||||
return manager
|
|
||||||
}
|
|
||||||
|
|
||||||
// this can take up to 5 seconds with retry, so run in goroutine
|
|
||||||
go manager.checkDockerVersion()
|
|
||||||
|
|
||||||
// give version check a chance to complete before returning
|
|
||||||
time.Sleep(50 * time.Millisecond)
|
|
||||||
|
|
||||||
return manager
|
return manager
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkDockerVersion checks Docker version and sets goodDockerVersion if at least 25.0.0.
|
// checkDockerVersion checks Docker version and sets goodDockerVersion if at least 25.0.0.
|
||||||
// Versions before 25.0.0 have a bug with one-shot which requires all requests to be made in one batch.
|
// Versions before 25.0.0 have a bug with one-shot which requires all requests to be made in one batch.
|
||||||
func (dm *dockerManager) checkDockerVersion() {
|
func (dm *dockerManager) checkDockerVersion() (bool, error) {
|
||||||
var err error
|
resp, err := dm.client.Get("http://localhost/version")
|
||||||
var resp *http.Response
|
|
||||||
var versionInfo struct {
|
|
||||||
Version string `json:"Version"`
|
|
||||||
}
|
|
||||||
const versionMaxTries = 2
|
|
||||||
for i := 1; i <= versionMaxTries; i++ {
|
|
||||||
resp, err = dm.client.Get("http://localhost/version")
|
|
||||||
if err == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if resp != nil {
|
|
||||||
resp.Body.Close()
|
|
||||||
}
|
|
||||||
if i < versionMaxTries {
|
|
||||||
slog.Debug("Failed to get Docker version; retrying", "attempt", i, "error", err)
|
|
||||||
time.Sleep(5 * time.Second)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
status := resp.Status
|
||||||
|
resp.Body.Close()
|
||||||
|
return false, fmt.Errorf("docker version request failed: %s", status)
|
||||||
|
}
|
||||||
|
|
||||||
|
var versionInfo dockerVersionResponse
|
||||||
|
serverHeader := resp.Header.Get("Server")
|
||||||
|
if err := dm.decode(resp, &versionInfo); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
dm.applyDockerVersionInfo(serverHeader, &versionInfo)
|
||||||
|
dm.dockerVersionChecked = true
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ensureDockerVersionChecked retries the version probe after a successful
|
||||||
|
// container list request.
|
||||||
|
func (dm *dockerManager) ensureDockerVersionChecked() {
|
||||||
|
if dm.dockerVersionChecked {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := dm.decode(resp, &versionInfo); err != nil {
|
if _, err := dm.checkDockerVersion(); err != nil {
|
||||||
|
slog.Debug("Failed to get Docker version", "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// applyDockerVersionInfo updates version-dependent behavior from engine metadata.
|
||||||
|
func (dm *dockerManager) applyDockerVersionInfo(serverHeader string, versionInfo *dockerVersionResponse) {
|
||||||
|
if detectPodmanEngine(serverHeader, versionInfo) {
|
||||||
|
dm.setIsPodman()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// if version > 24, one-shot works correctly and we can limit concurrent operations
|
// if version > 24, one-shot works correctly and we can limit concurrent operations
|
||||||
@@ -647,9 +775,34 @@ func getDockerHost() string {
|
|||||||
return scheme + socks[0]
|
return scheme + socks[0]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func validateContainerID(containerID string) error {
|
||||||
|
if !dockerContainerIDPattern.MatchString(containerID) {
|
||||||
|
return fmt.Errorf("invalid container id")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildDockerContainerEndpoint(containerID, action string, query url.Values) (string, error) {
|
||||||
|
if err := validateContainerID(containerID); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
u := &url.URL{
|
||||||
|
Scheme: "http",
|
||||||
|
Host: "localhost",
|
||||||
|
Path: fmt.Sprintf("/containers/%s/%s", url.PathEscape(containerID), action),
|
||||||
|
}
|
||||||
|
if len(query) > 0 {
|
||||||
|
u.RawQuery = query.Encode()
|
||||||
|
}
|
||||||
|
return u.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
// getContainerInfo fetches the inspection data for a container
|
// getContainerInfo fetches the inspection data for a container
|
||||||
func (dm *dockerManager) getContainerInfo(ctx context.Context, containerID string) ([]byte, error) {
|
func (dm *dockerManager) getContainerInfo(ctx context.Context, containerID string) ([]byte, error) {
|
||||||
endpoint := fmt.Sprintf("http://localhost/containers/%s/json", containerID)
|
endpoint, err := buildDockerContainerEndpoint(containerID, "json", nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -680,7 +833,15 @@ func (dm *dockerManager) getContainerInfo(ctx context.Context, containerID strin
|
|||||||
|
|
||||||
// getLogs fetches the logs for a container
|
// getLogs fetches the logs for a container
|
||||||
func (dm *dockerManager) getLogs(ctx context.Context, containerID string) (string, error) {
|
func (dm *dockerManager) getLogs(ctx context.Context, containerID string) (string, error) {
|
||||||
endpoint := fmt.Sprintf("http://localhost/containers/%s/logs?stdout=1&stderr=1&tail=%d", containerID, dockerLogsTail)
|
query := url.Values{
|
||||||
|
"stdout": []string{"1"},
|
||||||
|
"stderr": []string{"1"},
|
||||||
|
"tail": []string{fmt.Sprintf("%d", dockerLogsTail)},
|
||||||
|
}
|
||||||
|
endpoint, err := buildDockerContainerEndpoint(containerID, "logs", query)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
@@ -698,8 +859,17 @@ func (dm *dockerManager) getLogs(ctx context.Context, containerID string) (strin
|
|||||||
}
|
}
|
||||||
|
|
||||||
var builder strings.Builder
|
var builder strings.Builder
|
||||||
multiplexed := resp.Header.Get("Content-Type") == "application/vnd.docker.multiplexed-stream"
|
contentType := resp.Header.Get("Content-Type")
|
||||||
if err := decodeDockerLogStream(resp.Body, &builder, multiplexed); err != nil {
|
multiplexed := strings.HasSuffix(contentType, "multiplexed-stream")
|
||||||
|
logReader := io.Reader(resp.Body)
|
||||||
|
if !multiplexed {
|
||||||
|
// Podman may return multiplexed logs without Content-Type. Sniff the first frame header
|
||||||
|
// with a small buffered reader only when the header check fails.
|
||||||
|
bufferedReader := bufio.NewReaderSize(resp.Body, 8)
|
||||||
|
multiplexed = detectDockerMultiplexedStream(bufferedReader)
|
||||||
|
logReader = bufferedReader
|
||||||
|
}
|
||||||
|
if err := decodeDockerLogStream(logReader, &builder, multiplexed); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -711,6 +881,23 @@ func (dm *dockerManager) getLogs(ctx context.Context, containerID string) (strin
|
|||||||
return logs, nil
|
return logs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func detectDockerMultiplexedStream(reader *bufio.Reader) bool {
|
||||||
|
const headerSize = 8
|
||||||
|
header, err := reader.Peek(headerSize)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if header[0] != 0x01 && header[0] != 0x02 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// Docker's stream framing header reserves bytes 1-3 as zero.
|
||||||
|
if header[1] != 0 || header[2] != 0 || header[3] != 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
frameLen := binary.BigEndian.Uint32(header[4:])
|
||||||
|
return frameLen <= maxLogFrameSize
|
||||||
|
}
|
||||||
|
|
||||||
func decodeDockerLogStream(reader io.Reader, builder *strings.Builder, multiplexed bool) error {
|
func decodeDockerLogStream(reader io.Reader, builder *strings.Builder, multiplexed bool) error {
|
||||||
if !multiplexed {
|
if !multiplexed {
|
||||||
_, err := io.Copy(builder, io.LimitReader(reader, maxTotalLogSize))
|
_, err := io.Copy(builder, io.LimitReader(reader, maxTotalLogSize))
|
||||||
@@ -775,3 +962,46 @@ func (dm *dockerManager) GetHostInfo() (info container.HostInfo, err error) {
|
|||||||
func (dm *dockerManager) IsPodman() bool {
|
func (dm *dockerManager) IsPodman() bool {
|
||||||
return dm.usingPodman
|
return dm.usingPodman
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// setIsPodman sets the manager to Podman mode and updates system details accordingly.
|
||||||
|
func (dm *dockerManager) setIsPodman() {
|
||||||
|
if dm.usingPodman {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
dm.usingPodman = true
|
||||||
|
dm.goodDockerVersion = true
|
||||||
|
dm.dockerVersionChecked = true
|
||||||
|
// keep system details updated - this may be detected late if server isn't ready when
|
||||||
|
// agent starts, so make sure we notify the hub if this happens later.
|
||||||
|
if dm.agent != nil {
|
||||||
|
dm.agent.updateSystemDetails(func(details *system.Details) {
|
||||||
|
details.Podman = true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// detectPodmanFromHeader identifies Podman from the Docker API server header.
|
||||||
|
func detectPodmanFromHeader(server string) bool {
|
||||||
|
return strings.HasPrefix(server, "Libpod")
|
||||||
|
}
|
||||||
|
|
||||||
|
// detectPodmanFromVersion identifies Podman from the version payload.
|
||||||
|
func detectPodmanFromVersion(versionInfo *dockerVersionResponse) bool {
|
||||||
|
if versionInfo == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, component := range versionInfo.Components {
|
||||||
|
if strings.HasPrefix(component.Name, "Podman") {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// detectPodmanEngine checks both header and version metadata for Podman.
|
||||||
|
func detectPodmanEngine(serverHeader string, versionInfo *dockerVersionResponse) bool {
|
||||||
|
if detectPodmanFromHeader(serverHeader) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return detectPodmanFromVersion(versionInfo)
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,17 +1,24 @@
|
|||||||
//go:build testing
|
//go:build testing
|
||||||
// +build testing
|
|
||||||
|
|
||||||
package agent
|
package agent
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/henrygd/beszel/agent/deltatracker"
|
"github.com/henrygd/beszel/agent/deltatracker"
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
"github.com/henrygd/beszel/internal/entities/container"
|
"github.com/henrygd/beszel/internal/entities/container"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
@@ -19,6 +26,43 @@ import (
|
|||||||
|
|
||||||
var defaultCacheTimeMs = uint16(60_000)
|
var defaultCacheTimeMs = uint16(60_000)
|
||||||
|
|
||||||
|
type recordingRoundTripper struct {
|
||||||
|
statusCode int
|
||||||
|
body string
|
||||||
|
contentType string
|
||||||
|
called bool
|
||||||
|
lastPath string
|
||||||
|
lastQuery map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
type roundTripFunc func(*http.Request) (*http.Response, error)
|
||||||
|
|
||||||
|
func (fn roundTripFunc) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||||
|
return fn(req)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rt *recordingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||||
|
rt.called = true
|
||||||
|
rt.lastPath = req.URL.EscapedPath()
|
||||||
|
rt.lastQuery = map[string]string{}
|
||||||
|
for key, values := range req.URL.Query() {
|
||||||
|
if len(values) > 0 {
|
||||||
|
rt.lastQuery[key] = values[0]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
resp := &http.Response{
|
||||||
|
StatusCode: rt.statusCode,
|
||||||
|
Status: "200 OK",
|
||||||
|
Header: make(http.Header),
|
||||||
|
Body: io.NopCloser(strings.NewReader(rt.body)),
|
||||||
|
Request: req,
|
||||||
|
}
|
||||||
|
if rt.contentType != "" {
|
||||||
|
resp.Header.Set("Content-Type", rt.contentType)
|
||||||
|
}
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
// cycleCpuDeltas cycles the CPU tracking data for a specific cache time interval
|
// cycleCpuDeltas cycles the CPU tracking data for a specific cache time interval
|
||||||
func (dm *dockerManager) cycleCpuDeltas(cacheTimeMs uint16) {
|
func (dm *dockerManager) cycleCpuDeltas(cacheTimeMs uint16) {
|
||||||
// Clear the CPU tracking maps for this cache time interval
|
// Clear the CPU tracking maps for this cache time interval
|
||||||
@@ -110,6 +154,94 @@ func TestCalculateMemoryUsage(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestBuildDockerContainerEndpoint(t *testing.T) {
|
||||||
|
t.Run("valid container ID builds escaped endpoint", func(t *testing.T) {
|
||||||
|
endpoint, err := buildDockerContainerEndpoint("0123456789ab", "json", nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, "http://localhost/containers/0123456789ab/json", endpoint)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("invalid container ID is rejected", func(t *testing.T) {
|
||||||
|
_, err := buildDockerContainerEndpoint("../../version", "json", nil)
|
||||||
|
require.Error(t, err)
|
||||||
|
assert.Contains(t, err.Error(), "invalid container id")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestContainerDetailsRequestsValidateContainerID(t *testing.T) {
|
||||||
|
rt := &recordingRoundTripper{
|
||||||
|
statusCode: 200,
|
||||||
|
body: `{"Config":{"Env":["SECRET=1"]}}`,
|
||||||
|
}
|
||||||
|
dm := &dockerManager{
|
||||||
|
client: &http.Client{Transport: rt},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := dm.getContainerInfo(context.Background(), "../version")
|
||||||
|
require.Error(t, err)
|
||||||
|
assert.Contains(t, err.Error(), "invalid container id")
|
||||||
|
assert.False(t, rt.called, "request should be rejected before dispatching to Docker API")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestContainerDetailsRequestsUseExpectedDockerPaths(t *testing.T) {
|
||||||
|
t.Run("container info uses container json endpoint", func(t *testing.T) {
|
||||||
|
rt := &recordingRoundTripper{
|
||||||
|
statusCode: 200,
|
||||||
|
body: `{"Config":{"Env":["SECRET=1"]},"Name":"demo"}`,
|
||||||
|
}
|
||||||
|
dm := &dockerManager{
|
||||||
|
client: &http.Client{Transport: rt},
|
||||||
|
}
|
||||||
|
|
||||||
|
body, err := dm.getContainerInfo(context.Background(), "0123456789ab")
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.True(t, rt.called)
|
||||||
|
assert.Equal(t, "/containers/0123456789ab/json", rt.lastPath)
|
||||||
|
assert.NotContains(t, string(body), "SECRET=1", "sensitive env vars should be removed")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("container logs uses expected endpoint and query params", func(t *testing.T) {
|
||||||
|
rt := &recordingRoundTripper{
|
||||||
|
statusCode: 200,
|
||||||
|
body: "line1\nline2\n",
|
||||||
|
}
|
||||||
|
dm := &dockerManager{
|
||||||
|
client: &http.Client{Transport: rt},
|
||||||
|
}
|
||||||
|
|
||||||
|
logs, err := dm.getLogs(context.Background(), "abcdef123456")
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.True(t, rt.called)
|
||||||
|
assert.Equal(t, "/containers/abcdef123456/logs", rt.lastPath)
|
||||||
|
assert.Equal(t, "1", rt.lastQuery["stdout"])
|
||||||
|
assert.Equal(t, "1", rt.lastQuery["stderr"])
|
||||||
|
assert.Equal(t, "200", rt.lastQuery["tail"])
|
||||||
|
assert.Equal(t, "line1\nline2\n", logs)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetPodmanContainerHealth(t *testing.T) {
|
||||||
|
called := false
|
||||||
|
dm := &dockerManager{
|
||||||
|
client: &http.Client{Transport: roundTripFunc(func(req *http.Request) (*http.Response, error) {
|
||||||
|
called = true
|
||||||
|
assert.Equal(t, "/containers/0123456789ab/json", req.URL.EscapedPath())
|
||||||
|
return &http.Response{
|
||||||
|
StatusCode: http.StatusOK,
|
||||||
|
Status: "200 OK",
|
||||||
|
Header: make(http.Header),
|
||||||
|
Body: io.NopCloser(strings.NewReader(`{"State":{"Health":{"Status":"healthy"}}}`)),
|
||||||
|
Request: req,
|
||||||
|
}, nil
|
||||||
|
})},
|
||||||
|
}
|
||||||
|
|
||||||
|
health, err := dm.getPodmanContainerHealth("0123456789ab")
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.True(t, called)
|
||||||
|
assert.Equal(t, container.DockerHealthHealthy, health)
|
||||||
|
}
|
||||||
|
|
||||||
func TestValidateCpuPercentage(t *testing.T) {
|
func TestValidateCpuPercentage(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
@@ -195,48 +327,6 @@ func TestUpdateContainerStatsValues(t *testing.T) {
|
|||||||
assert.Equal(t, testTime, stats.PrevReadTime)
|
assert.Equal(t, testTime, stats.PrevReadTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTwoDecimals(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
input float64
|
|
||||||
expected float64
|
|
||||||
}{
|
|
||||||
{"round down", 1.234, 1.23},
|
|
||||||
{"round half up", 1.235, 1.24}, // math.Round rounds half up
|
|
||||||
{"no rounding needed", 1.23, 1.23},
|
|
||||||
{"negative number", -1.235, -1.24}, // math.Round rounds half up (more negative)
|
|
||||||
{"zero", 0.0, 0.0},
|
|
||||||
{"large number", 123.456, 123.46}, // rounds 5 up
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
result := twoDecimals(tt.input)
|
|
||||||
assert.Equal(t, tt.expected, result)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBytesToMegabytes(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
input float64
|
|
||||||
expected float64
|
|
||||||
}{
|
|
||||||
{"1 MB", 1048576, 1.0},
|
|
||||||
{"512 KB", 524288, 0.5},
|
|
||||||
{"zero", 0, 0},
|
|
||||||
{"large value", 1073741824, 1024}, // 1 GB = 1024 MB
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
result := bytesToMegabytes(tt.input)
|
|
||||||
assert.Equal(t, tt.expected, result)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestInitializeCpuTracking(t *testing.T) {
|
func TestInitializeCpuTracking(t *testing.T) {
|
||||||
dm := &dockerManager{
|
dm := &dockerManager{
|
||||||
lastCpuContainer: make(map[uint16]map[string]uint64),
|
lastCpuContainer: make(map[uint16]map[string]uint64),
|
||||||
@@ -318,6 +408,7 @@ func TestCalculateNetworkStats(t *testing.T) {
|
|||||||
dm := &dockerManager{
|
dm := &dockerManager{
|
||||||
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
lastNetworkReadTime: make(map[uint16]map[string]time.Time),
|
||||||
}
|
}
|
||||||
|
|
||||||
cacheTimeMs := uint16(30000)
|
cacheTimeMs := uint16(30000)
|
||||||
@@ -333,6 +424,11 @@ func TestCalculateNetworkStats(t *testing.T) {
|
|||||||
dm.networkSentTrackers[cacheTimeMs] = sentTracker
|
dm.networkSentTrackers[cacheTimeMs] = sentTracker
|
||||||
dm.networkRecvTrackers[cacheTimeMs] = recvTracker
|
dm.networkRecvTrackers[cacheTimeMs] = recvTracker
|
||||||
|
|
||||||
|
// Set per-cache-time network read time (1 second ago)
|
||||||
|
dm.lastNetworkReadTime[cacheTimeMs] = map[string]time.Time{
|
||||||
|
"container1": time.Now().Add(-time.Second),
|
||||||
|
}
|
||||||
|
|
||||||
ctr := &container.ApiInfo{
|
ctr := &container.ApiInfo{
|
||||||
IdShort: "container1",
|
IdShort: "container1",
|
||||||
}
|
}
|
||||||
@@ -343,12 +439,8 @@ func TestCalculateNetworkStats(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
stats := &container.Stats{
|
|
||||||
PrevReadTime: time.Now().Add(-time.Second), // 1 second ago
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test with initialized container
|
// Test with initialized container
|
||||||
sent, recv := dm.calculateNetworkStats(ctr, apiStats, stats, true, "test-container", cacheTimeMs)
|
sent, recv := dm.calculateNetworkStats(ctr, apiStats, "test-container", cacheTimeMs)
|
||||||
|
|
||||||
// Should return calculated byte rates per second
|
// Should return calculated byte rates per second
|
||||||
assert.GreaterOrEqual(t, sent, uint64(0))
|
assert.GreaterOrEqual(t, sent, uint64(0))
|
||||||
@@ -356,12 +448,76 @@ func TestCalculateNetworkStats(t *testing.T) {
|
|||||||
|
|
||||||
// Cycle and test one-direction change (Tx only) is reflected independently
|
// Cycle and test one-direction change (Tx only) is reflected independently
|
||||||
dm.cycleNetworkDeltasForCacheTime(cacheTimeMs)
|
dm.cycleNetworkDeltasForCacheTime(cacheTimeMs)
|
||||||
|
dm.lastNetworkReadTime[cacheTimeMs]["container1"] = time.Now().Add(-time.Second)
|
||||||
apiStats.Networks["eth0"] = container.NetworkStats{TxBytes: 2500, RxBytes: 1800} // +500 Tx only
|
apiStats.Networks["eth0"] = container.NetworkStats{TxBytes: 2500, RxBytes: 1800} // +500 Tx only
|
||||||
sent, recv = dm.calculateNetworkStats(ctr, apiStats, stats, true, "test-container", cacheTimeMs)
|
sent, recv = dm.calculateNetworkStats(ctr, apiStats, "test-container", cacheTimeMs)
|
||||||
assert.Greater(t, sent, uint64(0))
|
assert.Greater(t, sent, uint64(0))
|
||||||
assert.Equal(t, uint64(0), recv)
|
assert.Equal(t, uint64(0), recv)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestNetworkStatsCacheTimeIsolation verifies that frequent collections at one cache time
|
||||||
|
// (e.g. 1000ms) don't cause inflated rates at another cache time (e.g. 60000ms).
|
||||||
|
// This was a bug where PrevReadTime was shared, so the 60000ms tracker would see a
|
||||||
|
// large byte delta divided by a tiny elapsed time (set by the 1000ms path).
|
||||||
|
func TestNetworkStatsCacheTimeIsolation(t *testing.T) {
|
||||||
|
dm := &dockerManager{
|
||||||
|
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
lastNetworkReadTime: make(map[uint16]map[string]time.Time),
|
||||||
|
}
|
||||||
|
|
||||||
|
ctr := &container.ApiInfo{IdShort: "container1"}
|
||||||
|
fastCache := uint16(1000)
|
||||||
|
slowCache := uint16(60000)
|
||||||
|
|
||||||
|
// Baseline for both cache times at T=0 with 100 bytes total
|
||||||
|
baseline := &container.ApiStats{
|
||||||
|
Networks: map[string]container.NetworkStats{
|
||||||
|
"eth0": {TxBytes: 100, RxBytes: 100},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
dm.calculateNetworkStats(ctr, baseline, "test", fastCache)
|
||||||
|
dm.calculateNetworkStats(ctr, baseline, "test", slowCache)
|
||||||
|
|
||||||
|
// Record read times and cycle both
|
||||||
|
now := time.Now()
|
||||||
|
dm.lastNetworkReadTime[fastCache] = map[string]time.Time{"container1": now}
|
||||||
|
dm.lastNetworkReadTime[slowCache] = map[string]time.Time{"container1": now}
|
||||||
|
dm.cycleNetworkDeltasForCacheTime(fastCache)
|
||||||
|
dm.cycleNetworkDeltasForCacheTime(slowCache)
|
||||||
|
|
||||||
|
// Simulate many fast (1000ms) collections over ~5 seconds, each adding 10 bytes
|
||||||
|
totalBytes := uint64(100)
|
||||||
|
for i := 0; i < 5; i++ {
|
||||||
|
totalBytes += 10
|
||||||
|
stats := &container.ApiStats{
|
||||||
|
Networks: map[string]container.NetworkStats{
|
||||||
|
"eth0": {TxBytes: totalBytes, RxBytes: totalBytes},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// Set fast cache read time to 1 second ago
|
||||||
|
dm.lastNetworkReadTime[fastCache]["container1"] = time.Now().Add(-time.Second)
|
||||||
|
sent, _ := dm.calculateNetworkStats(ctr, stats, "test", fastCache)
|
||||||
|
// Fast cache should see ~10 bytes/sec per interval
|
||||||
|
assert.LessOrEqual(t, sent, uint64(100), "fast cache rate should be reasonable")
|
||||||
|
dm.cycleNetworkDeltasForCacheTime(fastCache)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now do slow cache collection — total delta is 50 bytes over ~5 seconds
|
||||||
|
// Set slow cache read time to 5 seconds ago (the actual elapsed time)
|
||||||
|
dm.lastNetworkReadTime[slowCache]["container1"] = time.Now().Add(-5 * time.Second)
|
||||||
|
finalStats := &container.ApiStats{
|
||||||
|
Networks: map[string]container.NetworkStats{
|
||||||
|
"eth0": {TxBytes: totalBytes, RxBytes: totalBytes},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
sent, _ := dm.calculateNetworkStats(ctr, finalStats, "test", slowCache)
|
||||||
|
|
||||||
|
// Slow cache rate should be ~10 bytes/sec (50 bytes / 5 seconds), NOT 100x inflated
|
||||||
|
assert.LessOrEqual(t, sent, uint64(100), "slow cache rate should NOT be inflated by fast cache collections")
|
||||||
|
assert.GreaterOrEqual(t, sent, uint64(1), "slow cache should still report some traffic")
|
||||||
|
}
|
||||||
|
|
||||||
func TestDockerManagerCreation(t *testing.T) {
|
func TestDockerManagerCreation(t *testing.T) {
|
||||||
// Test that dockerManager can be created without panicking
|
// Test that dockerManager can be created without panicking
|
||||||
dm := &dockerManager{
|
dm := &dockerManager{
|
||||||
@@ -370,6 +526,7 @@ func TestDockerManagerCreation(t *testing.T) {
|
|||||||
lastCpuReadTime: make(map[uint16]map[string]time.Time),
|
lastCpuReadTime: make(map[uint16]map[string]time.Time),
|
||||||
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
lastNetworkReadTime: make(map[uint16]map[string]time.Time),
|
||||||
}
|
}
|
||||||
|
|
||||||
assert.NotNil(t, dm)
|
assert.NotNil(t, dm)
|
||||||
@@ -377,6 +534,274 @@ func TestDockerManagerCreation(t *testing.T) {
|
|||||||
assert.NotNil(t, dm.lastCpuSystem)
|
assert.NotNil(t, dm.lastCpuSystem)
|
||||||
assert.NotNil(t, dm.networkSentTrackers)
|
assert.NotNil(t, dm.networkSentTrackers)
|
||||||
assert.NotNil(t, dm.networkRecvTrackers)
|
assert.NotNil(t, dm.networkRecvTrackers)
|
||||||
|
assert.NotNil(t, dm.lastNetworkReadTime)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCheckDockerVersion(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
statusCode int
|
||||||
|
body string
|
||||||
|
server string
|
||||||
|
expectSuccess bool
|
||||||
|
expectedGood bool
|
||||||
|
expectedPodman bool
|
||||||
|
expectError bool
|
||||||
|
expectedRequest string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "good docker version",
|
||||||
|
statusCode: http.StatusOK,
|
||||||
|
body: `{"Version":"25.0.1"}`,
|
||||||
|
expectSuccess: true,
|
||||||
|
expectedGood: true,
|
||||||
|
expectedPodman: false,
|
||||||
|
expectedRequest: "/version",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "old docker version",
|
||||||
|
statusCode: http.StatusOK,
|
||||||
|
body: `{"Version":"24.0.7"}`,
|
||||||
|
expectSuccess: true,
|
||||||
|
expectedGood: false,
|
||||||
|
expectedPodman: false,
|
||||||
|
expectedRequest: "/version",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "podman from server header",
|
||||||
|
statusCode: http.StatusOK,
|
||||||
|
body: `{"Version":"5.5.0"}`,
|
||||||
|
server: "Libpod/5.5.0",
|
||||||
|
expectSuccess: true,
|
||||||
|
expectedGood: true,
|
||||||
|
expectedPodman: true,
|
||||||
|
expectedRequest: "/version",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "non-200 response",
|
||||||
|
statusCode: http.StatusServiceUnavailable,
|
||||||
|
body: `"not ready"`,
|
||||||
|
expectSuccess: false,
|
||||||
|
expectedGood: false,
|
||||||
|
expectedPodman: false,
|
||||||
|
expectError: true,
|
||||||
|
expectedRequest: "/version",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
requestCount := 0
|
||||||
|
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
requestCount++
|
||||||
|
assert.Equal(t, tt.expectedRequest, r.URL.EscapedPath())
|
||||||
|
if tt.server != "" {
|
||||||
|
w.Header().Set("Server", tt.server)
|
||||||
|
}
|
||||||
|
w.WriteHeader(tt.statusCode)
|
||||||
|
fmt.Fprint(w, tt.body)
|
||||||
|
}))
|
||||||
|
defer server.Close()
|
||||||
|
|
||||||
|
dm := &dockerManager{
|
||||||
|
client: &http.Client{
|
||||||
|
Transport: &http.Transport{
|
||||||
|
DialContext: func(_ context.Context, network, _ string) (net.Conn, error) {
|
||||||
|
return net.Dial(network, server.Listener.Addr().String())
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
success, err := dm.checkDockerVersion()
|
||||||
|
|
||||||
|
assert.Equal(t, tt.expectSuccess, success)
|
||||||
|
assert.Equal(t, tt.expectSuccess, dm.dockerVersionChecked)
|
||||||
|
assert.Equal(t, tt.expectedGood, dm.goodDockerVersion)
|
||||||
|
assert.Equal(t, tt.expectedPodman, dm.usingPodman)
|
||||||
|
assert.Equal(t, 1, requestCount)
|
||||||
|
if tt.expectError {
|
||||||
|
require.Error(t, err)
|
||||||
|
} else {
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("request error", func(t *testing.T) {
|
||||||
|
requestCount := 0
|
||||||
|
dm := &dockerManager{
|
||||||
|
client: &http.Client{
|
||||||
|
Transport: &http.Transport{
|
||||||
|
DialContext: func(_ context.Context, _, _ string) (net.Conn, error) {
|
||||||
|
requestCount++
|
||||||
|
return nil, errors.New("connection refused")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
success, err := dm.checkDockerVersion()
|
||||||
|
|
||||||
|
assert.False(t, success)
|
||||||
|
require.Error(t, err)
|
||||||
|
assert.False(t, dm.dockerVersionChecked)
|
||||||
|
assert.False(t, dm.goodDockerVersion)
|
||||||
|
assert.False(t, dm.usingPodman)
|
||||||
|
assert.Equal(t, 1, requestCount)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// newDockerManagerForVersionTest creates a dockerManager wired to a test server.
|
||||||
|
func newDockerManagerForVersionTest(server *httptest.Server) *dockerManager {
|
||||||
|
return &dockerManager{
|
||||||
|
client: &http.Client{
|
||||||
|
Transport: &http.Transport{
|
||||||
|
DialContext: func(_ context.Context, network, _ string) (net.Conn, error) {
|
||||||
|
return net.Dial(network, server.Listener.Addr().String())
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
containerStatsMap: make(map[string]*container.Stats),
|
||||||
|
lastCpuContainer: make(map[uint16]map[string]uint64),
|
||||||
|
lastCpuSystem: make(map[uint16]map[string]uint64),
|
||||||
|
lastCpuReadTime: make(map[uint16]map[string]time.Time),
|
||||||
|
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
lastNetworkReadTime: make(map[uint16]map[string]time.Time),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetDockerStatsChecksDockerVersionAfterContainerList(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
containerServer string
|
||||||
|
versionServer string
|
||||||
|
versionBody string
|
||||||
|
expectedGood bool
|
||||||
|
expectedPodman bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "200 with good version on first try",
|
||||||
|
versionBody: `{"Version":"25.0.1"}`,
|
||||||
|
expectedGood: true,
|
||||||
|
expectedPodman: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "200 with old version on first try",
|
||||||
|
versionBody: `{"Version":"24.0.7"}`,
|
||||||
|
expectedGood: false,
|
||||||
|
expectedPodman: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "podman detected from server header",
|
||||||
|
containerServer: "Libpod/5.5.0",
|
||||||
|
expectedGood: true,
|
||||||
|
expectedPodman: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
requestCounts := map[string]int{}
|
||||||
|
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
requestCounts[r.URL.EscapedPath()]++
|
||||||
|
switch r.URL.EscapedPath() {
|
||||||
|
case "/containers/json":
|
||||||
|
if tt.containerServer != "" {
|
||||||
|
w.Header().Set("Server", tt.containerServer)
|
||||||
|
}
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
fmt.Fprint(w, `[]`)
|
||||||
|
case "/version":
|
||||||
|
if tt.versionServer != "" {
|
||||||
|
w.Header().Set("Server", tt.versionServer)
|
||||||
|
}
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
fmt.Fprint(w, tt.versionBody)
|
||||||
|
default:
|
||||||
|
t.Fatalf("unexpected path: %s", r.URL.EscapedPath())
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
defer server.Close()
|
||||||
|
|
||||||
|
dm := newDockerManagerForVersionTest(server)
|
||||||
|
|
||||||
|
stats, err := dm.getDockerStats(defaultCacheTimeMs)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Empty(t, stats)
|
||||||
|
assert.True(t, dm.dockerVersionChecked)
|
||||||
|
assert.Equal(t, tt.expectedGood, dm.goodDockerVersion)
|
||||||
|
assert.Equal(t, tt.expectedPodman, dm.usingPodman)
|
||||||
|
assert.Equal(t, 1, requestCounts["/containers/json"])
|
||||||
|
if tt.expectedPodman {
|
||||||
|
assert.Equal(t, 0, requestCounts["/version"])
|
||||||
|
} else {
|
||||||
|
assert.Equal(t, 1, requestCounts["/version"])
|
||||||
|
}
|
||||||
|
|
||||||
|
stats, err = dm.getDockerStats(defaultCacheTimeMs)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Empty(t, stats)
|
||||||
|
assert.Equal(t, tt.expectedGood, dm.goodDockerVersion)
|
||||||
|
assert.Equal(t, tt.expectedPodman, dm.usingPodman)
|
||||||
|
assert.Equal(t, 2, requestCounts["/containers/json"])
|
||||||
|
if tt.expectedPodman {
|
||||||
|
assert.Equal(t, 0, requestCounts["/version"])
|
||||||
|
} else {
|
||||||
|
assert.Equal(t, 1, requestCounts["/version"])
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetDockerStatsRetriesVersionCheckUntilSuccess(t *testing.T) {
|
||||||
|
requestCounts := map[string]int{}
|
||||||
|
versionStatuses := []int{http.StatusServiceUnavailable, http.StatusOK}
|
||||||
|
versionBodies := []string{`"not ready"`, `{"Version":"25.1.0"}`}
|
||||||
|
|
||||||
|
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
requestCounts[r.URL.EscapedPath()]++
|
||||||
|
switch r.URL.EscapedPath() {
|
||||||
|
case "/containers/json":
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
fmt.Fprint(w, `[]`)
|
||||||
|
case "/version":
|
||||||
|
idx := requestCounts["/version"] - 1
|
||||||
|
if idx >= len(versionStatuses) {
|
||||||
|
idx = len(versionStatuses) - 1
|
||||||
|
}
|
||||||
|
w.WriteHeader(versionStatuses[idx])
|
||||||
|
fmt.Fprint(w, versionBodies[idx])
|
||||||
|
default:
|
||||||
|
t.Fatalf("unexpected path: %s", r.URL.EscapedPath())
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
defer server.Close()
|
||||||
|
|
||||||
|
dm := newDockerManagerForVersionTest(server)
|
||||||
|
|
||||||
|
stats, err := dm.getDockerStats(defaultCacheTimeMs)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Empty(t, stats)
|
||||||
|
assert.False(t, dm.dockerVersionChecked)
|
||||||
|
assert.False(t, dm.goodDockerVersion)
|
||||||
|
assert.Equal(t, 1, requestCounts["/version"])
|
||||||
|
|
||||||
|
stats, err = dm.getDockerStats(defaultCacheTimeMs)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Empty(t, stats)
|
||||||
|
assert.True(t, dm.dockerVersionChecked)
|
||||||
|
assert.True(t, dm.goodDockerVersion)
|
||||||
|
assert.Equal(t, 2, requestCounts["/containers/json"])
|
||||||
|
assert.Equal(t, 2, requestCounts["/version"])
|
||||||
|
|
||||||
|
stats, err = dm.getDockerStats(defaultCacheTimeMs)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Empty(t, stats)
|
||||||
|
assert.Equal(t, 3, requestCounts["/containers/json"])
|
||||||
|
assert.Equal(t, 2, requestCounts["/version"])
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCycleCpuDeltas(t *testing.T) {
|
func TestCycleCpuDeltas(t *testing.T) {
|
||||||
@@ -450,6 +875,7 @@ func TestDockerStatsWithMockData(t *testing.T) {
|
|||||||
lastCpuReadTime: make(map[uint16]map[string]time.Time),
|
lastCpuReadTime: make(map[uint16]map[string]time.Time),
|
||||||
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
lastNetworkReadTime: make(map[uint16]map[string]time.Time),
|
||||||
containerStatsMap: make(map[string]*container.Stats),
|
containerStatsMap: make(map[string]*container.Stats),
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -595,23 +1021,22 @@ func TestNetworkStatsCalculationWithRealData(t *testing.T) {
|
|||||||
dm := &dockerManager{
|
dm := &dockerManager{
|
||||||
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
lastNetworkReadTime: make(map[uint16]map[string]time.Time),
|
||||||
}
|
}
|
||||||
|
|
||||||
ctr := &container.ApiInfo{IdShort: "test-container"}
|
ctr := &container.ApiInfo{IdShort: "test-container"}
|
||||||
cacheTimeMs := uint16(30000) // Test with 30 second cache
|
cacheTimeMs := uint16(30000) // Test with 30 second cache
|
||||||
|
|
||||||
// Use exact timing for deterministic results
|
// First call sets baseline (no previous read time, so rates should be 0)
|
||||||
exactly1000msAgo := time.Now().Add(-1000 * time.Millisecond)
|
sent1, recv1 := dm.calculateNetworkStats(ctr, apiStats1, "test", cacheTimeMs)
|
||||||
stats := &container.Stats{
|
|
||||||
PrevReadTime: exactly1000msAgo,
|
|
||||||
}
|
|
||||||
|
|
||||||
// First call sets baseline
|
|
||||||
sent1, recv1 := dm.calculateNetworkStats(ctr, apiStats1, stats, true, "test", cacheTimeMs)
|
|
||||||
assert.Equal(t, uint64(0), sent1)
|
assert.Equal(t, uint64(0), sent1)
|
||||||
assert.Equal(t, uint64(0), recv1)
|
assert.Equal(t, uint64(0), recv1)
|
||||||
|
|
||||||
// Cycle to establish baseline for this cache time
|
// Record read time and cycle to establish baseline for this cache time
|
||||||
|
exactly1000msAgo := time.Now().Add(-1000 * time.Millisecond)
|
||||||
|
dm.lastNetworkReadTime[cacheTimeMs] = map[string]time.Time{
|
||||||
|
"test-container": exactly1000msAgo,
|
||||||
|
}
|
||||||
dm.cycleNetworkDeltasForCacheTime(cacheTimeMs)
|
dm.cycleNetworkDeltasForCacheTime(cacheTimeMs)
|
||||||
|
|
||||||
// Calculate expected results precisely
|
// Calculate expected results precisely
|
||||||
@@ -622,7 +1047,7 @@ func TestNetworkStatsCalculationWithRealData(t *testing.T) {
|
|||||||
expectedRecvRate := deltaRecv * 1000 / expectedElapsedMs // Should be exactly 1000000
|
expectedRecvRate := deltaRecv * 1000 / expectedElapsedMs // Should be exactly 1000000
|
||||||
|
|
||||||
// Second call with changed data
|
// Second call with changed data
|
||||||
sent2, recv2 := dm.calculateNetworkStats(ctr, apiStats2, stats, true, "test", cacheTimeMs)
|
sent2, recv2 := dm.calculateNetworkStats(ctr, apiStats2, "test", cacheTimeMs)
|
||||||
|
|
||||||
// Should be exactly the expected rates (no tolerance needed)
|
// Should be exactly the expected rates (no tolerance needed)
|
||||||
assert.Equal(t, expectedSentRate, sent2)
|
assert.Equal(t, expectedSentRate, sent2)
|
||||||
@@ -630,12 +1055,13 @@ func TestNetworkStatsCalculationWithRealData(t *testing.T) {
|
|||||||
|
|
||||||
// Bad speed cap: set absurd delta over 1ms and expect 0 due to cap
|
// Bad speed cap: set absurd delta over 1ms and expect 0 due to cap
|
||||||
dm.cycleNetworkDeltasForCacheTime(cacheTimeMs)
|
dm.cycleNetworkDeltasForCacheTime(cacheTimeMs)
|
||||||
stats.PrevReadTime = time.Now().Add(-1 * time.Millisecond)
|
dm.lastNetworkReadTime[cacheTimeMs]["test-container"] = time.Now().Add(-1 * time.Millisecond)
|
||||||
apiStats1.Networks["eth0"] = container.NetworkStats{TxBytes: 0, RxBytes: 0}
|
apiStats1.Networks["eth0"] = container.NetworkStats{TxBytes: 0, RxBytes: 0}
|
||||||
apiStats2.Networks["eth0"] = container.NetworkStats{TxBytes: 10 * 1024 * 1024 * 1024, RxBytes: 0} // 10GB delta
|
apiStats2.Networks["eth0"] = container.NetworkStats{TxBytes: 10 * 1024 * 1024 * 1024, RxBytes: 0} // 10GB delta
|
||||||
_, _ = dm.calculateNetworkStats(ctr, apiStats1, stats, true, "test", cacheTimeMs) // baseline
|
_, _ = dm.calculateNetworkStats(ctr, apiStats1, "test", cacheTimeMs) // baseline
|
||||||
dm.cycleNetworkDeltasForCacheTime(cacheTimeMs)
|
dm.cycleNetworkDeltasForCacheTime(cacheTimeMs)
|
||||||
sent3, recv3 := dm.calculateNetworkStats(ctr, apiStats2, stats, true, "test", cacheTimeMs)
|
dm.lastNetworkReadTime[cacheTimeMs]["test-container"] = time.Now().Add(-1 * time.Millisecond)
|
||||||
|
sent3, recv3 := dm.calculateNetworkStats(ctr, apiStats2, "test", cacheTimeMs)
|
||||||
assert.Equal(t, uint64(0), sent3)
|
assert.Equal(t, uint64(0), sent3)
|
||||||
assert.Equal(t, uint64(0), recv3)
|
assert.Equal(t, uint64(0), recv3)
|
||||||
}
|
}
|
||||||
@@ -656,6 +1082,7 @@ func TestContainerStatsEndToEndWithRealData(t *testing.T) {
|
|||||||
lastCpuReadTime: make(map[uint16]map[string]time.Time),
|
lastCpuReadTime: make(map[uint16]map[string]time.Time),
|
||||||
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
lastNetworkReadTime: make(map[uint16]map[string]time.Time),
|
||||||
containerStatsMap: make(map[string]*container.Stats),
|
containerStatsMap: make(map[string]*container.Stats),
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -691,14 +1118,50 @@ func TestContainerStatsEndToEndWithRealData(t *testing.T) {
|
|||||||
updateContainerStatsValues(testStats, cpuPct, usedMemory, 1000000, 500000, testTime)
|
updateContainerStatsValues(testStats, cpuPct, usedMemory, 1000000, 500000, testTime)
|
||||||
|
|
||||||
assert.Equal(t, cpuPct, testStats.Cpu)
|
assert.Equal(t, cpuPct, testStats.Cpu)
|
||||||
assert.Equal(t, bytesToMegabytes(float64(usedMemory)), testStats.Mem)
|
assert.Equal(t, utils.BytesToMegabytes(float64(usedMemory)), testStats.Mem)
|
||||||
assert.Equal(t, [2]uint64{1000000, 500000}, testStats.Bandwidth)
|
assert.Equal(t, [2]uint64{1000000, 500000}, testStats.Bandwidth)
|
||||||
// Deprecated fields still populated for backward compatibility with older hubs
|
// Deprecated fields still populated for backward compatibility with older hubs
|
||||||
assert.Equal(t, bytesToMegabytes(1000000), testStats.NetworkSent)
|
assert.Equal(t, utils.BytesToMegabytes(1000000), testStats.NetworkSent)
|
||||||
assert.Equal(t, bytesToMegabytes(500000), testStats.NetworkRecv)
|
assert.Equal(t, utils.BytesToMegabytes(500000), testStats.NetworkRecv)
|
||||||
assert.Equal(t, testTime, testStats.PrevReadTime)
|
assert.Equal(t, testTime, testStats.PrevReadTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGetLogsDetectsMultiplexedWithoutContentType(t *testing.T) {
|
||||||
|
// Docker multiplexed frame: [stream][0,0,0][len(4 bytes BE)][payload]
|
||||||
|
frame := []byte{
|
||||||
|
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
|
||||||
|
'H', 'e', 'l', 'l', 'o',
|
||||||
|
}
|
||||||
|
rt := &recordingRoundTripper{
|
||||||
|
statusCode: 200,
|
||||||
|
body: string(frame),
|
||||||
|
// Intentionally omit content type to simulate Podman behavior.
|
||||||
|
}
|
||||||
|
dm := &dockerManager{
|
||||||
|
client: &http.Client{Transport: rt},
|
||||||
|
}
|
||||||
|
|
||||||
|
logs, err := dm.getLogs(context.Background(), "abcdef123456")
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, "Hello", logs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetLogsDoesNotMisclassifyRawStreamAsMultiplexed(t *testing.T) {
|
||||||
|
// Starts with 0x01, but doesn't match Docker frame signature (reserved bytes aren't all zero).
|
||||||
|
raw := []byte{0x01, 0x02, 0x03, 0x04, 'r', 'a', 'w'}
|
||||||
|
rt := &recordingRoundTripper{
|
||||||
|
statusCode: 200,
|
||||||
|
body: string(raw),
|
||||||
|
}
|
||||||
|
dm := &dockerManager{
|
||||||
|
client: &http.Client{Transport: rt},
|
||||||
|
}
|
||||||
|
|
||||||
|
logs, err := dm.getLogs(context.Background(), "abcdef123456")
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, raw, []byte(logs))
|
||||||
|
}
|
||||||
|
|
||||||
func TestEdgeCasesWithRealData(t *testing.T) {
|
func TestEdgeCasesWithRealData(t *testing.T) {
|
||||||
// Test with minimal container stats
|
// Test with minimal container stats
|
||||||
minimalStats := &container.ApiStats{
|
minimalStats := &container.ApiStats{
|
||||||
@@ -741,6 +1204,7 @@ func TestDockerStatsWorkflow(t *testing.T) {
|
|||||||
lastCpuSystem: make(map[uint16]map[string]uint64),
|
lastCpuSystem: make(map[uint16]map[string]uint64),
|
||||||
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
lastNetworkReadTime: make(map[uint16]map[string]time.Time),
|
||||||
containerStatsMap: make(map[string]*container.Stats),
|
containerStatsMap: make(map[string]*container.Stats),
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -920,6 +1384,18 @@ func TestParseDockerStatus(t *testing.T) {
|
|||||||
expectedStatus: "",
|
expectedStatus: "",
|
||||||
expectedHealth: container.DockerHealthNone,
|
expectedHealth: container.DockerHealthNone,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "status health with health: prefix",
|
||||||
|
input: "Up 5 minutes (health: starting)",
|
||||||
|
expectedStatus: "Up 5 minutes",
|
||||||
|
expectedHealth: container.DockerHealthStarting,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "status health with health status: prefix",
|
||||||
|
input: "Up 10 minutes (health status: unhealthy)",
|
||||||
|
expectedStatus: "Up 10 minutes",
|
||||||
|
expectedHealth: container.DockerHealthUnhealthy,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
@@ -931,6 +1407,85 @@ func TestParseDockerStatus(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestParseDockerHealthStatus(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
input string
|
||||||
|
expectedHealth container.DockerHealth
|
||||||
|
expectedOk bool
|
||||||
|
}{
|
||||||
|
{"healthy", container.DockerHealthHealthy, true},
|
||||||
|
{"unhealthy", container.DockerHealthUnhealthy, true},
|
||||||
|
{"starting", container.DockerHealthStarting, true},
|
||||||
|
{"none", container.DockerHealthNone, true},
|
||||||
|
{" Healthy ", container.DockerHealthHealthy, true},
|
||||||
|
{"unknown", container.DockerHealthNone, false},
|
||||||
|
{"", container.DockerHealthNone, false},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.input, func(t *testing.T) {
|
||||||
|
health, ok := parseDockerHealthStatus(tt.input)
|
||||||
|
assert.Equal(t, tt.expectedHealth, health)
|
||||||
|
assert.Equal(t, tt.expectedOk, ok)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUpdateContainerStatsUsesPodmanInspectHealthFallback(t *testing.T) {
|
||||||
|
var requestedPaths []string
|
||||||
|
dm := &dockerManager{
|
||||||
|
client: &http.Client{Transport: roundTripFunc(func(req *http.Request) (*http.Response, error) {
|
||||||
|
requestedPaths = append(requestedPaths, req.URL.EscapedPath())
|
||||||
|
switch req.URL.EscapedPath() {
|
||||||
|
case "/containers/0123456789ab/stats":
|
||||||
|
return &http.Response{
|
||||||
|
StatusCode: http.StatusOK,
|
||||||
|
Status: "200 OK",
|
||||||
|
Header: make(http.Header),
|
||||||
|
Body: io.NopCloser(strings.NewReader(`{
|
||||||
|
"read":"2026-03-15T21:26:59Z",
|
||||||
|
"cpu_stats":{"cpu_usage":{"total_usage":1000},"system_cpu_usage":2000},
|
||||||
|
"memory_stats":{"usage":1048576,"stats":{"inactive_file":262144}},
|
||||||
|
"networks":{"eth0":{"rx_bytes":0,"tx_bytes":0}}
|
||||||
|
}`)),
|
||||||
|
Request: req,
|
||||||
|
}, nil
|
||||||
|
case "/containers/0123456789ab/json":
|
||||||
|
return &http.Response{
|
||||||
|
StatusCode: http.StatusOK,
|
||||||
|
Status: "200 OK",
|
||||||
|
Header: make(http.Header),
|
||||||
|
Body: io.NopCloser(strings.NewReader(`{"State":{"Health":{"Status":"healthy"}}}`)),
|
||||||
|
Request: req,
|
||||||
|
}, nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unexpected path: %s", req.URL.EscapedPath())
|
||||||
|
}
|
||||||
|
})},
|
||||||
|
containerStatsMap: make(map[string]*container.Stats),
|
||||||
|
apiStats: &container.ApiStats{},
|
||||||
|
usingPodman: true,
|
||||||
|
lastCpuContainer: make(map[uint16]map[string]uint64),
|
||||||
|
lastCpuSystem: make(map[uint16]map[string]uint64),
|
||||||
|
lastCpuReadTime: make(map[uint16]map[string]time.Time),
|
||||||
|
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
lastNetworkReadTime: make(map[uint16]map[string]time.Time),
|
||||||
|
}
|
||||||
|
|
||||||
|
ctr := &container.ApiInfo{
|
||||||
|
IdShort: "0123456789ab",
|
||||||
|
Names: []string{"/beszel"},
|
||||||
|
Status: "Up 2 minutes",
|
||||||
|
Image: "beszel:latest",
|
||||||
|
}
|
||||||
|
|
||||||
|
err := dm.updateContainerStats(ctr, defaultCacheTimeMs)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, []string{"/containers/0123456789ab/stats", "/containers/0123456789ab/json"}, requestedPaths)
|
||||||
|
assert.Equal(t, container.DockerHealthHealthy, dm.containerStatsMap[ctr.IdShort].Health)
|
||||||
|
assert.Equal(t, "Up 2 minutes", dm.containerStatsMap[ctr.IdShort].Status)
|
||||||
|
}
|
||||||
|
|
||||||
func TestConstantsAndUtilityFunctions(t *testing.T) {
|
func TestConstantsAndUtilityFunctions(t *testing.T) {
|
||||||
// Test constants are properly defined
|
// Test constants are properly defined
|
||||||
assert.Equal(t, uint16(60000), defaultCacheTimeMs)
|
assert.Equal(t, uint16(60000), defaultCacheTimeMs)
|
||||||
@@ -940,13 +1495,13 @@ func TestConstantsAndUtilityFunctions(t *testing.T) {
|
|||||||
assert.Equal(t, 5*1024*1024, maxTotalLogSize) // 5MB
|
assert.Equal(t, 5*1024*1024, maxTotalLogSize) // 5MB
|
||||||
|
|
||||||
// Test utility functions
|
// Test utility functions
|
||||||
assert.Equal(t, 1.5, twoDecimals(1.499))
|
assert.Equal(t, 1.5, utils.TwoDecimals(1.499))
|
||||||
assert.Equal(t, 1.5, twoDecimals(1.5))
|
assert.Equal(t, 1.5, utils.TwoDecimals(1.5))
|
||||||
assert.Equal(t, 1.5, twoDecimals(1.501))
|
assert.Equal(t, 1.5, utils.TwoDecimals(1.501))
|
||||||
|
|
||||||
assert.Equal(t, 1.0, bytesToMegabytes(1048576)) // 1 MB
|
assert.Equal(t, 1.0, utils.BytesToMegabytes(1048576)) // 1 MB
|
||||||
assert.Equal(t, 0.5, bytesToMegabytes(524288)) // 512 KB
|
assert.Equal(t, 0.5, utils.BytesToMegabytes(524288)) // 512 KB
|
||||||
assert.Equal(t, 0.0, bytesToMegabytes(0))
|
assert.Equal(t, 0.0, utils.BytesToMegabytes(0))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDecodeDockerLogStream(t *testing.T) {
|
func TestDecodeDockerLogStream(t *testing.T) {
|
||||||
@@ -1246,3 +1801,99 @@ func TestAnsiEscapePattern(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestConvertContainerPortsToString(t *testing.T) {
|
||||||
|
type port = struct {
|
||||||
|
PublicPort uint16
|
||||||
|
IP string
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
ports []port
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "empty ports",
|
||||||
|
ports: nil,
|
||||||
|
expected: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "single port",
|
||||||
|
ports: []port{
|
||||||
|
{PublicPort: 80, IP: "0.0.0.0"},
|
||||||
|
},
|
||||||
|
expected: "80",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "single port with non-default IP",
|
||||||
|
ports: []port{
|
||||||
|
{PublicPort: 80, IP: "1.2.3.4"},
|
||||||
|
},
|
||||||
|
expected: "1.2.3.4:80",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ipv6 default ip",
|
||||||
|
ports: []port{
|
||||||
|
{PublicPort: 80, IP: "::"},
|
||||||
|
},
|
||||||
|
expected: "80",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "zero PublicPort is skipped",
|
||||||
|
ports: []port{
|
||||||
|
{PublicPort: 0, IP: "0.0.0.0"},
|
||||||
|
{PublicPort: 80, IP: "0.0.0.0"},
|
||||||
|
},
|
||||||
|
expected: "80",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ports sorted ascending by PublicPort",
|
||||||
|
ports: []port{
|
||||||
|
{PublicPort: 443, IP: "0.0.0.0"},
|
||||||
|
{PublicPort: 80, IP: "0.0.0.0"},
|
||||||
|
{PublicPort: 8080, IP: "0.0.0.0"},
|
||||||
|
},
|
||||||
|
expected: "80, 443, 8080",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "duplicates are deduplicated",
|
||||||
|
ports: []port{
|
||||||
|
{PublicPort: 80, IP: "0.0.0.0"},
|
||||||
|
{PublicPort: 80, IP: "0.0.0.0"},
|
||||||
|
{PublicPort: 443, IP: "0.0.0.0"},
|
||||||
|
},
|
||||||
|
expected: "80, 443",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "multiple ports with different IPs",
|
||||||
|
ports: []port{
|
||||||
|
{PublicPort: 80, IP: "0.0.0.0"},
|
||||||
|
{PublicPort: 443, IP: "1.2.3.4"},
|
||||||
|
},
|
||||||
|
expected: "80, 1.2.3.4:443",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ports slice is nilled after call",
|
||||||
|
ports: []port{
|
||||||
|
{PublicPort: 8080, IP: "0.0.0.0"},
|
||||||
|
},
|
||||||
|
expected: "8080",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
ctr := &container.ApiInfo{}
|
||||||
|
for _, p := range tt.ports {
|
||||||
|
ctr.Ports = append(ctr.Ports, struct {
|
||||||
|
PublicPort uint16
|
||||||
|
IP string
|
||||||
|
}{PublicPort: p.PublicPort, IP: p.IP})
|
||||||
|
}
|
||||||
|
result := convertContainerPortsToString(ctr)
|
||||||
|
assert.Equal(t, tt.expected, result)
|
||||||
|
// Ports slice must be cleared to prevent bleed-over into the next response
|
||||||
|
assert.Nil(t, ctr.Ports, "ctr.Ports should be nil after formatContainerPorts")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
"github.com/henrygd/beszel/internal/entities/smart"
|
"github.com/henrygd/beszel/internal/entities/smart"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -141,9 +142,9 @@ func readEmmcHealth(blockName string) (emmcHealth, bool) {
|
|||||||
out.lifeA = lifeA
|
out.lifeA = lifeA
|
||||||
out.lifeB = lifeB
|
out.lifeB = lifeB
|
||||||
|
|
||||||
out.model = readStringFile(filepath.Join(deviceDir, "name"))
|
out.model = utils.ReadStringFile(filepath.Join(deviceDir, "name"))
|
||||||
out.serial = readStringFile(filepath.Join(deviceDir, "serial"))
|
out.serial = utils.ReadStringFile(filepath.Join(deviceDir, "serial"))
|
||||||
out.revision = readStringFile(filepath.Join(deviceDir, "prv"))
|
out.revision = utils.ReadStringFile(filepath.Join(deviceDir, "prv"))
|
||||||
|
|
||||||
if capBytes, ok := readBlockCapacityBytes(blockName); ok {
|
if capBytes, ok := readBlockCapacityBytes(blockName); ok {
|
||||||
out.capacity = capBytes
|
out.capacity = capBytes
|
||||||
@@ -153,7 +154,7 @@ func readEmmcHealth(blockName string) (emmcHealth, bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func readLifeTime(deviceDir string) (uint8, uint8, bool) {
|
func readLifeTime(deviceDir string) (uint8, uint8, bool) {
|
||||||
if content, ok := readStringFileOK(filepath.Join(deviceDir, "life_time")); ok {
|
if content, ok := utils.ReadStringFileOK(filepath.Join(deviceDir, "life_time")); ok {
|
||||||
a, b, ok := parseHexBytePair(content)
|
a, b, ok := parseHexBytePair(content)
|
||||||
return a, b, ok
|
return a, b, ok
|
||||||
}
|
}
|
||||||
@@ -170,7 +171,7 @@ func readBlockCapacityBytes(blockName string) (uint64, bool) {
|
|||||||
sizePath := filepath.Join(emmcSysfsRoot, "class", "block", blockName, "size")
|
sizePath := filepath.Join(emmcSysfsRoot, "class", "block", blockName, "size")
|
||||||
lbsPath := filepath.Join(emmcSysfsRoot, "class", "block", blockName, "queue", "logical_block_size")
|
lbsPath := filepath.Join(emmcSysfsRoot, "class", "block", blockName, "queue", "logical_block_size")
|
||||||
|
|
||||||
sizeStr, ok := readStringFileOK(sizePath)
|
sizeStr, ok := utils.ReadStringFileOK(sizePath)
|
||||||
if !ok {
|
if !ok {
|
||||||
return 0, false
|
return 0, false
|
||||||
}
|
}
|
||||||
@@ -179,7 +180,7 @@ func readBlockCapacityBytes(blockName string) (uint64, bool) {
|
|||||||
return 0, false
|
return 0, false
|
||||||
}
|
}
|
||||||
|
|
||||||
lbsStr, ok := readStringFileOK(lbsPath)
|
lbsStr, ok := utils.ReadStringFileOK(lbsPath)
|
||||||
logicalBlockSize := uint64(512)
|
logicalBlockSize := uint64(512)
|
||||||
if ok {
|
if ok {
|
||||||
if parsed, err := strconv.ParseUint(lbsStr, 10, 64); err == nil && parsed > 0 {
|
if parsed, err := strconv.ParseUint(lbsStr, 10, 64); err == nil && parsed > 0 {
|
||||||
@@ -191,7 +192,7 @@ func readBlockCapacityBytes(blockName string) (uint64, bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func readHexByteFile(path string) (uint8, bool) {
|
func readHexByteFile(path string) (uint8, bool) {
|
||||||
content, ok := readStringFileOK(path)
|
content, ok := utils.ReadStringFileOK(path)
|
||||||
if !ok {
|
if !ok {
|
||||||
return 0, false
|
return 0, false
|
||||||
}
|
}
|
||||||
@@ -199,19 +200,6 @@ func readHexByteFile(path string) (uint8, bool) {
|
|||||||
return b, ok
|
return b, ok
|
||||||
}
|
}
|
||||||
|
|
||||||
func readStringFile(path string) string {
|
|
||||||
content, _ := readStringFileOK(path)
|
|
||||||
return content
|
|
||||||
}
|
|
||||||
|
|
||||||
func readStringFileOK(path string) (string, bool) {
|
|
||||||
b, err := os.ReadFile(path)
|
|
||||||
if err != nil {
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
return strings.TrimSpace(string(b)), true
|
|
||||||
}
|
|
||||||
|
|
||||||
func hasEmmcHealthFiles(deviceDir string) bool {
|
func hasEmmcHealthFiles(deviceDir string) bool {
|
||||||
entries, err := os.ReadDir(deviceDir)
|
entries, err := os.ReadDir(deviceDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build testing
|
//go:build testing
|
||||||
// +build testing
|
|
||||||
|
|
||||||
package agent
|
package agent
|
||||||
|
|
||||||
|
|||||||
91
agent/gpu.go
91
agent/gpu.go
@@ -9,11 +9,13 @@ import (
|
|||||||
"maps"
|
"maps"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
"github.com/henrygd/beszel/internal/entities/system"
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -23,6 +25,8 @@ const (
|
|||||||
rocmSmiCmd string = "rocm-smi"
|
rocmSmiCmd string = "rocm-smi"
|
||||||
tegraStatsCmd string = "tegrastats"
|
tegraStatsCmd string = "tegrastats"
|
||||||
nvtopCmd string = "nvtop"
|
nvtopCmd string = "nvtop"
|
||||||
|
powermetricsCmd string = "powermetrics"
|
||||||
|
macmonCmd string = "macmon"
|
||||||
noGPUFoundMsg string = "no GPU found - see https://beszel.dev/guide/gpu"
|
noGPUFoundMsg string = "no GPU found - see https://beszel.dev/guide/gpu"
|
||||||
|
|
||||||
// Command retry and timeout constants
|
// Command retry and timeout constants
|
||||||
@@ -88,9 +92,12 @@ const (
|
|||||||
collectorSourceIntelGpuTop collectorSource = collectorSource(intelGpuStatsCmd)
|
collectorSourceIntelGpuTop collectorSource = collectorSource(intelGpuStatsCmd)
|
||||||
collectorSourceAmdSysfs collectorSource = "amd_sysfs"
|
collectorSourceAmdSysfs collectorSource = "amd_sysfs"
|
||||||
collectorSourceRocmSMI collectorSource = collectorSource(rocmSmiCmd)
|
collectorSourceRocmSMI collectorSource = collectorSource(rocmSmiCmd)
|
||||||
|
collectorSourceMacmon collectorSource = collectorSource(macmonCmd)
|
||||||
|
collectorSourcePowermetrics collectorSource = collectorSource(powermetricsCmd)
|
||||||
collectorGroupNvidia string = "nvidia"
|
collectorGroupNvidia string = "nvidia"
|
||||||
collectorGroupIntel string = "intel"
|
collectorGroupIntel string = "intel"
|
||||||
collectorGroupAmd string = "amd"
|
collectorGroupAmd string = "amd"
|
||||||
|
collectorGroupApple string = "apple"
|
||||||
)
|
)
|
||||||
|
|
||||||
func isValidCollectorSource(source collectorSource) bool {
|
func isValidCollectorSource(source collectorSource) bool {
|
||||||
@@ -100,7 +107,9 @@ func isValidCollectorSource(source collectorSource) bool {
|
|||||||
collectorSourceNvidiaSMI,
|
collectorSourceNvidiaSMI,
|
||||||
collectorSourceIntelGpuTop,
|
collectorSourceIntelGpuTop,
|
||||||
collectorSourceAmdSysfs,
|
collectorSourceAmdSysfs,
|
||||||
collectorSourceRocmSMI:
|
collectorSourceRocmSMI,
|
||||||
|
collectorSourceMacmon,
|
||||||
|
collectorSourcePowermetrics:
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
@@ -114,6 +123,8 @@ type gpuCapabilities struct {
|
|||||||
hasTegrastats bool
|
hasTegrastats bool
|
||||||
hasIntelGpuTop bool
|
hasIntelGpuTop bool
|
||||||
hasNvtop bool
|
hasNvtop bool
|
||||||
|
hasMacmon bool
|
||||||
|
hasPowermetrics bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type collectorDefinition struct {
|
type collectorDefinition struct {
|
||||||
@@ -281,8 +292,8 @@ func (gm *GPUManager) parseAmdData(output []byte) bool {
|
|||||||
}
|
}
|
||||||
gpu := gm.GpuDataMap[id]
|
gpu := gm.GpuDataMap[id]
|
||||||
gpu.Temperature, _ = strconv.ParseFloat(v.Temperature, 64)
|
gpu.Temperature, _ = strconv.ParseFloat(v.Temperature, 64)
|
||||||
gpu.MemoryUsed = bytesToMegabytes(memoryUsage)
|
gpu.MemoryUsed = utils.BytesToMegabytes(memoryUsage)
|
||||||
gpu.MemoryTotal = bytesToMegabytes(totalMemory)
|
gpu.MemoryTotal = utils.BytesToMegabytes(totalMemory)
|
||||||
gpu.Usage += usage
|
gpu.Usage += usage
|
||||||
gpu.Power += power
|
gpu.Power += power
|
||||||
gpu.Count++
|
gpu.Count++
|
||||||
@@ -356,16 +367,16 @@ func (gm *GPUManager) calculateGPUAverage(id string, gpu *system.GPUData, cacheK
|
|||||||
gpuAvg := *gpu
|
gpuAvg := *gpu
|
||||||
deltaUsage, deltaPower, deltaPowerPkg := gm.calculateDeltas(gpu, lastSnapshot)
|
deltaUsage, deltaPower, deltaPowerPkg := gm.calculateDeltas(gpu, lastSnapshot)
|
||||||
|
|
||||||
gpuAvg.Power = twoDecimals(deltaPower / float64(deltaCount))
|
gpuAvg.Power = utils.TwoDecimals(deltaPower / float64(deltaCount))
|
||||||
|
|
||||||
if gpu.Engines != nil {
|
if gpu.Engines != nil {
|
||||||
// make fresh map for averaged engine metrics to avoid mutating
|
// make fresh map for averaged engine metrics to avoid mutating
|
||||||
// the accumulator map stored in gm.GpuDataMap
|
// the accumulator map stored in gm.GpuDataMap
|
||||||
gpuAvg.Engines = make(map[string]float64, len(gpu.Engines))
|
gpuAvg.Engines = make(map[string]float64, len(gpu.Engines))
|
||||||
gpuAvg.Usage = gm.calculateIntelGPUUsage(&gpuAvg, gpu, lastSnapshot, deltaCount)
|
gpuAvg.Usage = gm.calculateIntelGPUUsage(&gpuAvg, gpu, lastSnapshot, deltaCount)
|
||||||
gpuAvg.PowerPkg = twoDecimals(deltaPowerPkg / float64(deltaCount))
|
gpuAvg.PowerPkg = utils.TwoDecimals(deltaPowerPkg / float64(deltaCount))
|
||||||
} else {
|
} else {
|
||||||
gpuAvg.Usage = twoDecimals(deltaUsage / float64(deltaCount))
|
gpuAvg.Usage = utils.TwoDecimals(deltaUsage / float64(deltaCount))
|
||||||
}
|
}
|
||||||
|
|
||||||
gm.lastAvgData[id] = gpuAvg
|
gm.lastAvgData[id] = gpuAvg
|
||||||
@@ -400,17 +411,17 @@ func (gm *GPUManager) calculateIntelGPUUsage(gpuAvg, gpu *system.GPUData, lastSn
|
|||||||
} else {
|
} else {
|
||||||
deltaEngine = engine
|
deltaEngine = engine
|
||||||
}
|
}
|
||||||
gpuAvg.Engines[name] = twoDecimals(deltaEngine / float64(deltaCount))
|
gpuAvg.Engines[name] = utils.TwoDecimals(deltaEngine / float64(deltaCount))
|
||||||
maxEngineUsage = max(maxEngineUsage, deltaEngine/float64(deltaCount))
|
maxEngineUsage = max(maxEngineUsage, deltaEngine/float64(deltaCount))
|
||||||
}
|
}
|
||||||
return twoDecimals(maxEngineUsage)
|
return utils.TwoDecimals(maxEngineUsage)
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateInstantaneousValues updates values that should reflect current state, not averages
|
// updateInstantaneousValues updates values that should reflect current state, not averages
|
||||||
func (gm *GPUManager) updateInstantaneousValues(gpuAvg *system.GPUData, gpu *system.GPUData) {
|
func (gm *GPUManager) updateInstantaneousValues(gpuAvg *system.GPUData, gpu *system.GPUData) {
|
||||||
gpuAvg.Temperature = twoDecimals(gpu.Temperature)
|
gpuAvg.Temperature = utils.TwoDecimals(gpu.Temperature)
|
||||||
gpuAvg.MemoryUsed = twoDecimals(gpu.MemoryUsed)
|
gpuAvg.MemoryUsed = utils.TwoDecimals(gpu.MemoryUsed)
|
||||||
gpuAvg.MemoryTotal = twoDecimals(gpu.MemoryTotal)
|
gpuAvg.MemoryTotal = utils.TwoDecimals(gpu.MemoryTotal)
|
||||||
}
|
}
|
||||||
|
|
||||||
// storeSnapshot saves the current GPU state for this cache key
|
// storeSnapshot saves the current GPU state for this cache key
|
||||||
@@ -449,11 +460,19 @@ func (gm *GPUManager) discoverGpuCapabilities() gpuCapabilities {
|
|||||||
if _, err := exec.LookPath(nvtopCmd); err == nil {
|
if _, err := exec.LookPath(nvtopCmd); err == nil {
|
||||||
caps.hasNvtop = true
|
caps.hasNvtop = true
|
||||||
}
|
}
|
||||||
|
if runtime.GOOS == "darwin" {
|
||||||
|
if _, err := utils.LookPathHomebrew(macmonCmd); err == nil {
|
||||||
|
caps.hasMacmon = true
|
||||||
|
}
|
||||||
|
if _, err := exec.LookPath(powermetricsCmd); err == nil {
|
||||||
|
caps.hasPowermetrics = true
|
||||||
|
}
|
||||||
|
}
|
||||||
return caps
|
return caps
|
||||||
}
|
}
|
||||||
|
|
||||||
func hasAnyGpuCollector(caps gpuCapabilities) bool {
|
func hasAnyGpuCollector(caps gpuCapabilities) bool {
|
||||||
return caps.hasNvidiaSmi || caps.hasRocmSmi || caps.hasAmdSysfs || caps.hasTegrastats || caps.hasIntelGpuTop || caps.hasNvtop
|
return caps.hasNvidiaSmi || caps.hasRocmSmi || caps.hasAmdSysfs || caps.hasTegrastats || caps.hasIntelGpuTop || caps.hasNvtop || caps.hasMacmon || caps.hasPowermetrics
|
||||||
}
|
}
|
||||||
|
|
||||||
func (gm *GPUManager) startIntelCollector() {
|
func (gm *GPUManager) startIntelCollector() {
|
||||||
@@ -523,7 +542,7 @@ func (gm *GPUManager) collectorDefinitions(caps gpuCapabilities) map[collectorSo
|
|||||||
return map[collectorSource]collectorDefinition{
|
return map[collectorSource]collectorDefinition{
|
||||||
collectorSourceNVML: {
|
collectorSourceNVML: {
|
||||||
group: collectorGroupNvidia,
|
group: collectorGroupNvidia,
|
||||||
available: caps.hasNvidiaSmi,
|
available: true,
|
||||||
start: func(_ func()) bool {
|
start: func(_ func()) bool {
|
||||||
return gm.startNvmlCollector()
|
return gm.startNvmlCollector()
|
||||||
},
|
},
|
||||||
@@ -567,6 +586,22 @@ func (gm *GPUManager) collectorDefinitions(caps gpuCapabilities) map[collectorSo
|
|||||||
return true
|
return true
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
collectorSourceMacmon: {
|
||||||
|
group: collectorGroupApple,
|
||||||
|
available: caps.hasMacmon,
|
||||||
|
start: func(_ func()) bool {
|
||||||
|
gm.startMacmonCollector()
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
},
|
||||||
|
collectorSourcePowermetrics: {
|
||||||
|
group: collectorGroupApple,
|
||||||
|
available: caps.hasPowermetrics,
|
||||||
|
start: func(_ func()) bool {
|
||||||
|
gm.startPowermetricsCollector()
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -653,7 +688,7 @@ func (gm *GPUManager) resolveLegacyCollectorPriority(caps gpuCapabilities) []col
|
|||||||
priorities := make([]collectorSource, 0, 4)
|
priorities := make([]collectorSource, 0, 4)
|
||||||
|
|
||||||
if caps.hasNvidiaSmi && !caps.hasTegrastats {
|
if caps.hasNvidiaSmi && !caps.hasTegrastats {
|
||||||
if nvml, _ := GetEnv("NVML"); nvml == "true" {
|
if nvml, _ := utils.GetEnv("NVML"); nvml == "true" {
|
||||||
priorities = append(priorities, collectorSourceNVML, collectorSourceNvidiaSMI)
|
priorities = append(priorities, collectorSourceNVML, collectorSourceNvidiaSMI)
|
||||||
} else {
|
} else {
|
||||||
priorities = append(priorities, collectorSourceNvidiaSMI)
|
priorities = append(priorities, collectorSourceNvidiaSMI)
|
||||||
@@ -661,7 +696,7 @@ func (gm *GPUManager) resolveLegacyCollectorPriority(caps gpuCapabilities) []col
|
|||||||
}
|
}
|
||||||
|
|
||||||
if caps.hasRocmSmi {
|
if caps.hasRocmSmi {
|
||||||
if val, _ := GetEnv("AMD_SYSFS"); val == "true" {
|
if val, _ := utils.GetEnv("AMD_SYSFS"); val == "true" {
|
||||||
priorities = append(priorities, collectorSourceAmdSysfs)
|
priorities = append(priorities, collectorSourceAmdSysfs)
|
||||||
} else {
|
} else {
|
||||||
priorities = append(priorities, collectorSourceRocmSMI)
|
priorities = append(priorities, collectorSourceRocmSMI)
|
||||||
@@ -674,7 +709,18 @@ func (gm *GPUManager) resolveLegacyCollectorPriority(caps gpuCapabilities) []col
|
|||||||
priorities = append(priorities, collectorSourceIntelGpuTop)
|
priorities = append(priorities, collectorSourceIntelGpuTop)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Keep nvtop as a legacy last resort only when no vendor collector exists.
|
// Apple collectors are currently opt-in only for testing.
|
||||||
|
// Enable them with GPU_COLLECTOR=macmon or GPU_COLLECTOR=powermetrics.
|
||||||
|
// TODO: uncomment below when Apple collectors are confirmed to be working.
|
||||||
|
//
|
||||||
|
// Prefer macmon on macOS (no sudo). Fall back to powermetrics if present.
|
||||||
|
// if caps.hasMacmon {
|
||||||
|
// priorities = append(priorities, collectorSourceMacmon)
|
||||||
|
// } else if caps.hasPowermetrics {
|
||||||
|
// priorities = append(priorities, collectorSourcePowermetrics)
|
||||||
|
// }
|
||||||
|
|
||||||
|
// Keep nvtop as a last resort only when no vendor collector exists.
|
||||||
if len(priorities) == 0 && caps.hasNvtop {
|
if len(priorities) == 0 && caps.hasNvtop {
|
||||||
priorities = append(priorities, collectorSourceNVTop)
|
priorities = append(priorities, collectorSourceNVTop)
|
||||||
}
|
}
|
||||||
@@ -683,14 +729,11 @@ func (gm *GPUManager) resolveLegacyCollectorPriority(caps gpuCapabilities) []col
|
|||||||
|
|
||||||
// NewGPUManager creates and initializes a new GPUManager
|
// NewGPUManager creates and initializes a new GPUManager
|
||||||
func NewGPUManager() (*GPUManager, error) {
|
func NewGPUManager() (*GPUManager, error) {
|
||||||
if skipGPU, _ := GetEnv("SKIP_GPU"); skipGPU == "true" {
|
if skipGPU, _ := utils.GetEnv("SKIP_GPU"); skipGPU == "true" {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
var gm GPUManager
|
var gm GPUManager
|
||||||
caps := gm.discoverGpuCapabilities()
|
caps := gm.discoverGpuCapabilities()
|
||||||
if !hasAnyGpuCollector(caps) {
|
|
||||||
return nil, fmt.Errorf(noGPUFoundMsg)
|
|
||||||
}
|
|
||||||
gm.GpuDataMap = make(map[string]*system.GPUData)
|
gm.GpuDataMap = make(map[string]*system.GPUData)
|
||||||
|
|
||||||
// Jetson devices should always use tegrastats (ignore GPU_COLLECTOR).
|
// Jetson devices should always use tegrastats (ignore GPU_COLLECTOR).
|
||||||
@@ -699,8 +742,8 @@ func NewGPUManager() (*GPUManager, error) {
|
|||||||
return &gm, nil
|
return &gm, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// if GPU_COLLECTOR is set, start user-defined collectors.
|
// Respect explicit collector selection before capability auto-detection.
|
||||||
if collectorConfig, ok := GetEnv("GPU_COLLECTOR"); ok && strings.TrimSpace(collectorConfig) != "" {
|
if collectorConfig, ok := utils.GetEnv("GPU_COLLECTOR"); ok && strings.TrimSpace(collectorConfig) != "" {
|
||||||
priorities := parseCollectorPriority(collectorConfig)
|
priorities := parseCollectorPriority(collectorConfig)
|
||||||
if gm.startCollectorsByPriority(priorities, caps) == 0 {
|
if gm.startCollectorsByPriority(priorities, caps) == 0 {
|
||||||
return nil, fmt.Errorf("no configured GPU collectors are available")
|
return nil, fmt.Errorf("no configured GPU collectors are available")
|
||||||
@@ -708,6 +751,10 @@ func NewGPUManager() (*GPUManager, error) {
|
|||||||
return &gm, nil
|
return &gm, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !hasAnyGpuCollector(caps) {
|
||||||
|
return nil, fmt.Errorf(noGPUFoundMsg)
|
||||||
|
}
|
||||||
|
|
||||||
// auto-detect and start collectors when GPU_COLLECTOR is unset.
|
// auto-detect and start collectors when GPU_COLLECTOR is unset.
|
||||||
if gm.startCollectorsByPriority(gm.resolveLegacyCollectorPriority(caps), caps) == 0 {
|
if gm.startCollectorsByPriority(gm.resolveLegacyCollectorPriority(caps), caps) == 0 {
|
||||||
return nil, fmt.Errorf(noGPUFoundMsg)
|
return nil, fmt.Errorf(noGPUFoundMsg)
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
"github.com/henrygd/beszel/internal/entities/system"
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -32,8 +33,8 @@ func (gm *GPUManager) hasAmdSysfs() bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
for _, vendorPath := range cards {
|
for _, vendorPath := range cards {
|
||||||
vendor, err := os.ReadFile(vendorPath)
|
vendor, err := utils.ReadStringFileLimited(vendorPath, 64)
|
||||||
if err == nil && strings.TrimSpace(string(vendor)) == "0x1002" {
|
if err == nil && vendor == "0x1002" {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -87,12 +88,11 @@ func (gm *GPUManager) collectAmdStats() error {
|
|||||||
|
|
||||||
// isAmdGpu checks whether a DRM card path belongs to AMD vendor ID 0x1002.
|
// isAmdGpu checks whether a DRM card path belongs to AMD vendor ID 0x1002.
|
||||||
func isAmdGpu(cardPath string) bool {
|
func isAmdGpu(cardPath string) bool {
|
||||||
vendorPath := filepath.Join(cardPath, "device/vendor")
|
vendor, err := utils.ReadStringFileLimited(filepath.Join(cardPath, "device/vendor"), 64)
|
||||||
vendor, err := os.ReadFile(vendorPath)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return strings.TrimSpace(string(vendor)) == "0x1002"
|
return vendor == "0x1002"
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateAmdGpuData reads GPU metrics from sysfs and updates the GPU data map.
|
// updateAmdGpuData reads GPU metrics from sysfs and updates the GPU data map.
|
||||||
@@ -103,10 +103,8 @@ func (gm *GPUManager) updateAmdGpuData(cardPath string) bool {
|
|||||||
|
|
||||||
// Read all sysfs values first (no lock needed - these can be slow)
|
// Read all sysfs values first (no lock needed - these can be slow)
|
||||||
usage, usageErr := readSysfsFloat(filepath.Join(devicePath, "gpu_busy_percent"))
|
usage, usageErr := readSysfsFloat(filepath.Join(devicePath, "gpu_busy_percent"))
|
||||||
vramUsed, memUsedErr := readSysfsFloat(filepath.Join(devicePath, "mem_info_vram_used"))
|
memUsed, memUsedErr := readSysfsFloat(filepath.Join(devicePath, "mem_info_vram_used"))
|
||||||
vramTotal, _ := readSysfsFloat(filepath.Join(devicePath, "mem_info_vram_total"))
|
memTotal, _ := readSysfsFloat(filepath.Join(devicePath, "mem_info_vram_total"))
|
||||||
memUsed := vramUsed
|
|
||||||
memTotal := vramTotal
|
|
||||||
// if gtt is present, add it to the memory used and total (https://github.com/henrygd/beszel/issues/1569#issuecomment-3837640484)
|
// if gtt is present, add it to the memory used and total (https://github.com/henrygd/beszel/issues/1569#issuecomment-3837640484)
|
||||||
if gttUsed, err := readSysfsFloat(filepath.Join(devicePath, "mem_info_gtt_used")); err == nil && gttUsed > 0 {
|
if gttUsed, err := readSysfsFloat(filepath.Join(devicePath, "mem_info_gtt_used")); err == nil && gttUsed > 0 {
|
||||||
if gttTotal, err := readSysfsFloat(filepath.Join(devicePath, "mem_info_gtt_total")); err == nil {
|
if gttTotal, err := readSysfsFloat(filepath.Join(devicePath, "mem_info_gtt_total")); err == nil {
|
||||||
@@ -146,8 +144,8 @@ func (gm *GPUManager) updateAmdGpuData(cardPath string) bool {
|
|||||||
if usageErr == nil {
|
if usageErr == nil {
|
||||||
gpu.Usage += usage
|
gpu.Usage += usage
|
||||||
}
|
}
|
||||||
gpu.MemoryUsed = bytesToMegabytes(memUsed)
|
gpu.MemoryUsed = utils.BytesToMegabytes(memUsed)
|
||||||
gpu.MemoryTotal = bytesToMegabytes(memTotal)
|
gpu.MemoryTotal = utils.BytesToMegabytes(memTotal)
|
||||||
gpu.Temperature = temp
|
gpu.Temperature = temp
|
||||||
gpu.Power += power
|
gpu.Power += power
|
||||||
gpu.Count++
|
gpu.Count++
|
||||||
@@ -156,11 +154,12 @@ func (gm *GPUManager) updateAmdGpuData(cardPath string) bool {
|
|||||||
|
|
||||||
// readSysfsFloat reads and parses a numeric value from a sysfs file.
|
// readSysfsFloat reads and parses a numeric value from a sysfs file.
|
||||||
func readSysfsFloat(path string) (float64, error) {
|
func readSysfsFloat(path string) (float64, error) {
|
||||||
val, err := os.ReadFile(path)
|
val, err := utils.ReadStringFileLimited(path, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
slog.Debug("Failed to read sysfs value", "path", path, "error", err)
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
return strconv.ParseFloat(strings.TrimSpace(string(val)), 64)
|
return strconv.ParseFloat(val, 64)
|
||||||
}
|
}
|
||||||
|
|
||||||
// normalizeHexID normalizes hex IDs by trimming spaces, lowercasing, and dropping 0x.
|
// normalizeHexID normalizes hex IDs by trimming spaces, lowercasing, and dropping 0x.
|
||||||
@@ -243,7 +242,10 @@ func getCachedAmdgpuName(deviceID, revisionID string) (name string, found bool,
|
|||||||
|
|
||||||
// normalizeAmdgpuName trims standard suffixes from AMDGPU product names.
|
// normalizeAmdgpuName trims standard suffixes from AMDGPU product names.
|
||||||
func normalizeAmdgpuName(name string) string {
|
func normalizeAmdgpuName(name string) string {
|
||||||
return strings.TrimSuffix(strings.TrimSpace(name), " Graphics")
|
for _, suffix := range []string{" Graphics", " Series"} {
|
||||||
|
name = strings.TrimSuffix(name, suffix)
|
||||||
|
}
|
||||||
|
return name
|
||||||
}
|
}
|
||||||
|
|
||||||
// cacheAmdgpuName stores a resolved AMDGPU name in the lookup cache.
|
// cacheAmdgpuName stores a resolved AMDGPU name in the lookup cache.
|
||||||
@@ -272,16 +274,16 @@ func cacheMissingAmdgpuName(deviceID, revisionID string) {
|
|||||||
// Falls back to showing the raw device ID if not found in the lookup table.
|
// Falls back to showing the raw device ID if not found in the lookup table.
|
||||||
func getAmdGpuName(devicePath string) string {
|
func getAmdGpuName(devicePath string) string {
|
||||||
// Try product_name first (works for some enterprise GPUs)
|
// Try product_name first (works for some enterprise GPUs)
|
||||||
if prod, err := os.ReadFile(filepath.Join(devicePath, "product_name")); err == nil {
|
if prod, err := utils.ReadStringFileLimited(filepath.Join(devicePath, "product_name"), 128); err == nil {
|
||||||
return strings.TrimSpace(string(prod))
|
return prod
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read PCI device ID and look it up
|
// Read PCI device ID and look it up
|
||||||
if deviceID, err := os.ReadFile(filepath.Join(devicePath, "device")); err == nil {
|
if deviceID, err := utils.ReadStringFileLimited(filepath.Join(devicePath, "device"), 64); err == nil {
|
||||||
id := normalizeHexID(string(deviceID))
|
id := normalizeHexID(deviceID)
|
||||||
revision := ""
|
revision := ""
|
||||||
if revBytes, revErr := os.ReadFile(filepath.Join(devicePath, "revision")); revErr == nil {
|
if rev, revErr := utils.ReadStringFileLimited(filepath.Join(devicePath, "revision"), 64); revErr == nil {
|
||||||
revision = normalizeHexID(string(revBytes))
|
revision = normalizeHexID(rev)
|
||||||
}
|
}
|
||||||
|
|
||||||
if name, found, done := getCachedAmdgpuName(id, revision); found {
|
if name, found, done := getCachedAmdgpuName(id, revision); found {
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
"github.com/henrygd/beszel/internal/entities/system"
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
@@ -128,14 +129,14 @@ func TestUpdateAmdGpuDataWithFakeSysfs(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "sums vram and gtt when gtt is present",
|
name: "sums vram and gtt when gtt is present",
|
||||||
writeGTT: true,
|
writeGTT: true,
|
||||||
wantMemoryUsed: bytesToMegabytes(1073741824 + 536870912),
|
wantMemoryUsed: utils.BytesToMegabytes(1073741824 + 536870912),
|
||||||
wantMemoryTotal: bytesToMegabytes(2147483648 + 4294967296),
|
wantMemoryTotal: utils.BytesToMegabytes(2147483648 + 4294967296),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "falls back to vram when gtt is missing",
|
name: "falls back to vram when gtt is missing",
|
||||||
writeGTT: false,
|
writeGTT: false,
|
||||||
wantMemoryUsed: bytesToMegabytes(1073741824),
|
wantMemoryUsed: utils.BytesToMegabytes(1073741824),
|
||||||
wantMemoryTotal: bytesToMegabytes(2147483648),
|
wantMemoryTotal: utils.BytesToMegabytes(2147483648),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
257
agent/gpu_darwin.go
Normal file
257
agent/gpu_darwin.go
Normal file
@@ -0,0 +1,257 @@
|
|||||||
|
//go:build darwin
|
||||||
|
|
||||||
|
package agent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"log/slog"
|
||||||
|
"os/exec"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// powermetricsSampleIntervalMs is the sampling interval passed to powermetrics (-i).
|
||||||
|
powermetricsSampleIntervalMs = 500
|
||||||
|
// powermetricsPollInterval is how often we run powermetrics to collect a new sample.
|
||||||
|
powermetricsPollInterval = 2 * time.Second
|
||||||
|
// macmonIntervalMs is the sampling interval passed to macmon pipe (-i), in milliseconds.
|
||||||
|
macmonIntervalMs = 2500
|
||||||
|
)
|
||||||
|
|
||||||
|
const appleGPUID = "0"
|
||||||
|
|
||||||
|
// startPowermetricsCollector runs powermetrics --samplers gpu_power in a loop and updates
|
||||||
|
// GPU usage and power. Requires root (sudo) on macOS. A single logical GPU is reported as id "0".
|
||||||
|
func (gm *GPUManager) startPowermetricsCollector() {
|
||||||
|
// Ensure single GPU entry for Apple GPU
|
||||||
|
if _, ok := gm.GpuDataMap[appleGPUID]; !ok {
|
||||||
|
gm.GpuDataMap[appleGPUID] = &system.GPUData{Name: "Apple GPU"}
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
failures := 0
|
||||||
|
for {
|
||||||
|
if err := gm.collectPowermetrics(); err != nil {
|
||||||
|
failures++
|
||||||
|
if failures > maxFailureRetries {
|
||||||
|
slog.Warn("powermetrics GPU collector failed repeatedly, stopping", "err", err)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
slog.Warn("Error collecting macOS GPU data via powermetrics (may require sudo)", "err", err)
|
||||||
|
time.Sleep(retryWaitTime)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
failures = 0
|
||||||
|
time.Sleep(powermetricsPollInterval)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// collectPowermetrics runs powermetrics once and parses GPU usage and power from its output.
|
||||||
|
func (gm *GPUManager) collectPowermetrics() error {
|
||||||
|
interval := strconv.Itoa(powermetricsSampleIntervalMs)
|
||||||
|
cmd := exec.Command(powermetricsCmd, "--samplers", "gpu_power", "-i", interval, "-n", "1")
|
||||||
|
cmd.Stderr = nil
|
||||||
|
out, err := cmd.Output()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !gm.parsePowermetricsData(out) {
|
||||||
|
return errNoValidData
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parsePowermetricsData parses powermetrics gpu_power output and updates GpuDataMap["0"].
|
||||||
|
// Example output:
|
||||||
|
//
|
||||||
|
// **** GPU usage ****
|
||||||
|
// GPU HW active frequency: 444 MHz
|
||||||
|
// GPU HW active residency: 0.97% (444 MHz: .97% ...
|
||||||
|
// GPU idle residency: 99.03%
|
||||||
|
// GPU Power: 4 mW
|
||||||
|
func (gm *GPUManager) parsePowermetricsData(output []byte) bool {
|
||||||
|
var idleResidency, powerMW float64
|
||||||
|
var gotIdle, gotPower bool
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(bytes.NewReader(output))
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := strings.TrimSpace(scanner.Text())
|
||||||
|
if strings.HasPrefix(line, "GPU idle residency:") {
|
||||||
|
// "GPU idle residency: 99.03%"
|
||||||
|
fields := strings.Fields(strings.TrimPrefix(line, "GPU idle residency:"))
|
||||||
|
if len(fields) >= 1 {
|
||||||
|
pct := strings.TrimSuffix(fields[0], "%")
|
||||||
|
if v, err := strconv.ParseFloat(pct, 64); err == nil {
|
||||||
|
idleResidency = v
|
||||||
|
gotIdle = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if strings.HasPrefix(line, "GPU Power:") {
|
||||||
|
// "GPU Power: 4 mW"
|
||||||
|
fields := strings.Fields(strings.TrimPrefix(line, "GPU Power:"))
|
||||||
|
if len(fields) >= 1 {
|
||||||
|
if v, err := strconv.ParseFloat(fields[0], 64); err == nil {
|
||||||
|
powerMW = v
|
||||||
|
gotPower = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !gotIdle && !gotPower {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
gm.Lock()
|
||||||
|
defer gm.Unlock()
|
||||||
|
|
||||||
|
if _, ok := gm.GpuDataMap[appleGPUID]; !ok {
|
||||||
|
gm.GpuDataMap[appleGPUID] = &system.GPUData{Name: "Apple GPU"}
|
||||||
|
}
|
||||||
|
gpu := gm.GpuDataMap[appleGPUID]
|
||||||
|
|
||||||
|
if gotIdle {
|
||||||
|
// Usage = 100 - idle residency (e.g. 100 - 99.03 = 0.97%)
|
||||||
|
gpu.Usage += 100 - idleResidency
|
||||||
|
}
|
||||||
|
if gotPower {
|
||||||
|
// mW -> W
|
||||||
|
gpu.Power += powerMW / milliwattsInAWatt
|
||||||
|
}
|
||||||
|
gpu.Count++
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// startMacmonCollector runs `macmon pipe` in a loop and parses one JSON object per line.
|
||||||
|
// This collector does not require sudo. A single logical GPU is reported as id "0".
|
||||||
|
func (gm *GPUManager) startMacmonCollector() {
|
||||||
|
if _, ok := gm.GpuDataMap[appleGPUID]; !ok {
|
||||||
|
gm.GpuDataMap[appleGPUID] = &system.GPUData{Name: "Apple GPU"}
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
failures := 0
|
||||||
|
for {
|
||||||
|
if err := gm.collectMacmonPipe(); err != nil {
|
||||||
|
failures++
|
||||||
|
if failures > maxFailureRetries {
|
||||||
|
slog.Warn("macmon GPU collector failed repeatedly, stopping", "err", err)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
slog.Warn("Error collecting macOS GPU data via macmon", "err", err)
|
||||||
|
time.Sleep(retryWaitTime)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
failures = 0
|
||||||
|
// `macmon pipe` is long-running; if it returns, wait a bit before restarting.
|
||||||
|
time.Sleep(retryWaitTime)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
type macmonTemp struct {
|
||||||
|
GPUTempAvg float64 `json:"gpu_temp_avg"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type macmonSample struct {
|
||||||
|
GPUPower float64 `json:"gpu_power"` // watts (macmon reports fractional values)
|
||||||
|
GPURAMPower float64 `json:"gpu_ram_power"` // watts
|
||||||
|
GPUUsage []float64 `json:"gpu_usage"` // [freq_mhz, usage] where usage is typically 0..1
|
||||||
|
Temp macmonTemp `json:"temp"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (gm *GPUManager) collectMacmonPipe() (err error) {
|
||||||
|
macmonPath, err := utils.LookPathHomebrew(macmonCmd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
cmd := exec.Command(macmonPath, "pipe", "-i", strconv.Itoa(macmonIntervalMs))
|
||||||
|
// Avoid blocking if macmon writes to stderr.
|
||||||
|
cmd.Stderr = io.Discard
|
||||||
|
stdout, err := cmd.StdoutPipe()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := cmd.Start(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure we always reap the child to avoid zombies on any return path and
|
||||||
|
// propagate a non-zero exit code if no other error was set.
|
||||||
|
defer func() {
|
||||||
|
_ = stdout.Close()
|
||||||
|
if cmd.ProcessState == nil || !cmd.ProcessState.Exited() {
|
||||||
|
_ = cmd.Process.Kill()
|
||||||
|
}
|
||||||
|
if waitErr := cmd.Wait(); err == nil && waitErr != nil {
|
||||||
|
err = waitErr
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(stdout)
|
||||||
|
var hadSample bool
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := bytes.TrimSpace(scanner.Bytes())
|
||||||
|
if len(line) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if gm.parseMacmonLine(line) {
|
||||||
|
hadSample = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if scanErr := scanner.Err(); scanErr != nil {
|
||||||
|
return scanErr
|
||||||
|
}
|
||||||
|
if !hadSample {
|
||||||
|
return errNoValidData
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseMacmonLine parses a single macmon JSON line and updates Apple GPU metrics.
|
||||||
|
func (gm *GPUManager) parseMacmonLine(line []byte) bool {
|
||||||
|
var sample macmonSample
|
||||||
|
if err := json.Unmarshal(line, &sample); err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
usage := 0.0
|
||||||
|
if len(sample.GPUUsage) >= 2 {
|
||||||
|
usage = sample.GPUUsage[1]
|
||||||
|
// Heuristic: macmon typically reports 0..1; convert to percentage.
|
||||||
|
if usage <= 1.0 {
|
||||||
|
usage *= 100
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Consider the line valid if it contains at least one GPU metric.
|
||||||
|
if usage == 0 && sample.GPUPower == 0 && sample.Temp.GPUTempAvg == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
gm.Lock()
|
||||||
|
defer gm.Unlock()
|
||||||
|
|
||||||
|
gpu, ok := gm.GpuDataMap[appleGPUID]
|
||||||
|
if !ok {
|
||||||
|
gpu = &system.GPUData{Name: "Apple GPU"}
|
||||||
|
gm.GpuDataMap[appleGPUID] = gpu
|
||||||
|
}
|
||||||
|
gpu.Temperature = sample.Temp.GPUTempAvg
|
||||||
|
gpu.Usage += usage
|
||||||
|
// macmon reports power in watts; include VRAM power if present.
|
||||||
|
gpu.Power += sample.GPUPower + sample.GPURAMPower
|
||||||
|
gpu.Count++
|
||||||
|
return true
|
||||||
|
}
|
||||||
81
agent/gpu_darwin_test.go
Normal file
81
agent/gpu_darwin_test.go
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
//go:build darwin
|
||||||
|
|
||||||
|
package agent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestParsePowermetricsData(t *testing.T) {
|
||||||
|
input := `
|
||||||
|
Machine model: Mac14,10
|
||||||
|
OS version: 25D125
|
||||||
|
|
||||||
|
*** Sampled system activity (Sat Feb 14 00:42:06 2026 -0500) (503.05ms elapsed) ***
|
||||||
|
|
||||||
|
**** GPU usage ****
|
||||||
|
|
||||||
|
GPU HW active frequency: 444 MHz
|
||||||
|
GPU HW active residency: 0.97% (444 MHz: .97% 612 MHz: 0% 808 MHz: 0% 968 MHz: 0% 1110 MHz: 0% 1236 MHz: 0% 1338 MHz: 0% 1398 MHz: 0%)
|
||||||
|
GPU SW requested state: (P1 : 100% P2 : 0% P3 : 0% P4 : 0% P5 : 0% P6 : 0% P7 : 0% P8 : 0%)
|
||||||
|
GPU idle residency: 99.03%
|
||||||
|
GPU Power: 4 mW
|
||||||
|
`
|
||||||
|
gm := &GPUManager{
|
||||||
|
GpuDataMap: make(map[string]*system.GPUData),
|
||||||
|
}
|
||||||
|
valid := gm.parsePowermetricsData([]byte(input))
|
||||||
|
require.True(t, valid)
|
||||||
|
|
||||||
|
g0, ok := gm.GpuDataMap["0"]
|
||||||
|
require.True(t, ok)
|
||||||
|
assert.Equal(t, "Apple GPU", g0.Name)
|
||||||
|
// Usage = 100 - 99.03 = 0.97
|
||||||
|
assert.InDelta(t, 0.97, g0.Usage, 0.01)
|
||||||
|
// 4 mW -> 0.004 W
|
||||||
|
assert.InDelta(t, 0.004, g0.Power, 0.0001)
|
||||||
|
assert.Equal(t, 1.0, g0.Count)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParsePowermetricsDataPartial(t *testing.T) {
|
||||||
|
// Only power line (e.g. older macOS or different sampler output)
|
||||||
|
input := `
|
||||||
|
**** GPU usage ****
|
||||||
|
GPU Power: 120 mW
|
||||||
|
`
|
||||||
|
gm := &GPUManager{
|
||||||
|
GpuDataMap: make(map[string]*system.GPUData),
|
||||||
|
}
|
||||||
|
valid := gm.parsePowermetricsData([]byte(input))
|
||||||
|
require.True(t, valid)
|
||||||
|
|
||||||
|
g0, ok := gm.GpuDataMap["0"]
|
||||||
|
require.True(t, ok)
|
||||||
|
assert.Equal(t, "Apple GPU", g0.Name)
|
||||||
|
assert.InDelta(t, 0.12, g0.Power, 0.001)
|
||||||
|
assert.Equal(t, 1.0, g0.Count)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseMacmonLine(t *testing.T) {
|
||||||
|
input := `{"all_power":0.6468324661254883,"ane_power":0.0,"cpu_power":0.6359732151031494,"ecpu_usage":[2061,0.1726151406764984],"gpu_power":0.010859241709113121,"gpu_ram_power":0.000965250947047025,"gpu_usage":[503,0.013633215799927711],"memory":{"ram_total":17179869184,"ram_usage":12322914304,"swap_total":0,"swap_usage":0},"pcpu_usage":[1248,0.11792058497667313],"ram_power":0.14885640144348145,"sys_power":10.4955415725708,"temp":{"cpu_temp_avg":23.041261672973633,"gpu_temp_avg":29.44516944885254},"timestamp":"2026-02-17T19:34:27.942556+00:00"}`
|
||||||
|
|
||||||
|
gm := &GPUManager{
|
||||||
|
GpuDataMap: make(map[string]*system.GPUData),
|
||||||
|
}
|
||||||
|
valid := gm.parseMacmonLine([]byte(input))
|
||||||
|
require.True(t, valid)
|
||||||
|
|
||||||
|
g0, ok := gm.GpuDataMap["0"]
|
||||||
|
require.True(t, ok)
|
||||||
|
assert.Equal(t, "Apple GPU", g0.Name)
|
||||||
|
// macmon reports usage fraction 0..1; expect percent conversion.
|
||||||
|
assert.InDelta(t, 1.3633, g0.Usage, 0.05)
|
||||||
|
// power includes gpu_power + gpu_ram_power
|
||||||
|
assert.InDelta(t, 0.011824, g0.Power, 0.0005)
|
||||||
|
assert.InDelta(t, 29.445, g0.Temperature, 0.01)
|
||||||
|
assert.Equal(t, 1.0, g0.Count)
|
||||||
|
}
|
||||||
9
agent/gpu_darwin_unsupported.go
Normal file
9
agent/gpu_darwin_unsupported.go
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
//go:build !darwin
|
||||||
|
|
||||||
|
package agent
|
||||||
|
|
||||||
|
// startPowermetricsCollector is a no-op on non-darwin platforms; the real implementation is in gpu_darwin.go.
|
||||||
|
func (gm *GPUManager) startPowermetricsCollector() {}
|
||||||
|
|
||||||
|
// startMacmonCollector is a no-op on non-darwin platforms; the real implementation is in gpu_darwin.go.
|
||||||
|
func (gm *GPUManager) startMacmonCollector() {}
|
||||||
@@ -7,6 +7,7 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
"github.com/henrygd/beszel/internal/entities/system"
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -52,7 +53,7 @@ func (gm *GPUManager) updateIntelFromStats(sample *intelGpuStats) bool {
|
|||||||
func (gm *GPUManager) collectIntelStats() (err error) {
|
func (gm *GPUManager) collectIntelStats() (err error) {
|
||||||
// Build command arguments, optionally selecting a device via -d
|
// Build command arguments, optionally selecting a device via -d
|
||||||
args := []string{"-s", intelGpuStatsInterval, "-l"}
|
args := []string{"-s", intelGpuStatsInterval, "-l"}
|
||||||
if dev, ok := GetEnv("INTEL_GPU_DEVICE"); ok && dev != "" {
|
if dev, ok := utils.GetEnv("INTEL_GPU_DEVICE"); ok && dev != "" {
|
||||||
args = append(args, "-d", dev)
|
args = append(args, "-d", dev)
|
||||||
}
|
}
|
||||||
cmd := exec.Command(intelGpuStatsCmd, args...)
|
cmd := exec.Command(intelGpuStatsCmd, args...)
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
"github.com/henrygd/beszel/internal/entities/system"
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -80,10 +81,10 @@ func (gm *GPUManager) updateNvtopSnapshots(snapshots []nvtopSnapshot) bool {
|
|||||||
gpu.Temperature = parseNvtopNumber(*sample.Temp)
|
gpu.Temperature = parseNvtopNumber(*sample.Temp)
|
||||||
}
|
}
|
||||||
if sample.MemUsed != nil {
|
if sample.MemUsed != nil {
|
||||||
gpu.MemoryUsed = bytesToMegabytes(parseNvtopNumber(*sample.MemUsed))
|
gpu.MemoryUsed = utils.BytesToMegabytes(parseNvtopNumber(*sample.MemUsed))
|
||||||
}
|
}
|
||||||
if sample.MemTotal != nil {
|
if sample.MemTotal != nil {
|
||||||
gpu.MemoryTotal = bytesToMegabytes(parseNvtopNumber(*sample.MemTotal))
|
gpu.MemoryTotal = utils.BytesToMegabytes(parseNvtopNumber(*sample.MemTotal))
|
||||||
}
|
}
|
||||||
if sample.GpuUtil != nil {
|
if sample.GpuUtil != nil {
|
||||||
gpu.Usage += parseNvtopNumber(*sample.GpuUtil)
|
gpu.Usage += parseNvtopNumber(*sample.GpuUtil)
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build testing
|
//go:build testing
|
||||||
// +build testing
|
|
||||||
|
|
||||||
package agent
|
package agent
|
||||||
|
|
||||||
@@ -11,6 +10,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
"github.com/henrygd/beszel/internal/entities/system"
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
@@ -266,8 +266,8 @@ func TestParseNvtopData(t *testing.T) {
|
|||||||
assert.Equal(t, 48.0, g0.Temperature)
|
assert.Equal(t, 48.0, g0.Temperature)
|
||||||
assert.Equal(t, 5.0, g0.Usage)
|
assert.Equal(t, 5.0, g0.Usage)
|
||||||
assert.Equal(t, 13.0, g0.Power)
|
assert.Equal(t, 13.0, g0.Power)
|
||||||
assert.Equal(t, bytesToMegabytes(349372416), g0.MemoryUsed)
|
assert.Equal(t, utils.BytesToMegabytes(349372416), g0.MemoryUsed)
|
||||||
assert.Equal(t, bytesToMegabytes(4294967296), g0.MemoryTotal)
|
assert.Equal(t, utils.BytesToMegabytes(4294967296), g0.MemoryTotal)
|
||||||
assert.Equal(t, 1.0, g0.Count)
|
assert.Equal(t, 1.0, g0.Count)
|
||||||
|
|
||||||
g1, ok := gm.GpuDataMap["n1"]
|
g1, ok := gm.GpuDataMap["n1"]
|
||||||
@@ -276,8 +276,8 @@ func TestParseNvtopData(t *testing.T) {
|
|||||||
assert.Equal(t, 48.0, g1.Temperature)
|
assert.Equal(t, 48.0, g1.Temperature)
|
||||||
assert.Equal(t, 12.0, g1.Usage)
|
assert.Equal(t, 12.0, g1.Usage)
|
||||||
assert.Equal(t, 9.0, g1.Power)
|
assert.Equal(t, 9.0, g1.Power)
|
||||||
assert.Equal(t, bytesToMegabytes(1213784064), g1.MemoryUsed)
|
assert.Equal(t, utils.BytesToMegabytes(1213784064), g1.MemoryUsed)
|
||||||
assert.Equal(t, bytesToMegabytes(16929173504), g1.MemoryTotal)
|
assert.Equal(t, utils.BytesToMegabytes(16929173504), g1.MemoryTotal)
|
||||||
assert.Equal(t, 1.0, g1.Count)
|
assert.Equal(t, 1.0, g1.Count)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1083,8 +1083,6 @@ func TestCalculateGPUAverage(t *testing.T) {
|
|||||||
|
|
||||||
func TestGPUCapabilitiesAndLegacyPriority(t *testing.T) {
|
func TestGPUCapabilitiesAndLegacyPriority(t *testing.T) {
|
||||||
// Save original PATH
|
// Save original PATH
|
||||||
origPath := os.Getenv("PATH")
|
|
||||||
defer os.Setenv("PATH", origPath)
|
|
||||||
hasAmdSysfs := (&GPUManager{}).hasAmdSysfs()
|
hasAmdSysfs := (&GPUManager{}).hasAmdSysfs()
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
@@ -1178,7 +1176,7 @@ echo "[]"`
|
|||||||
{
|
{
|
||||||
name: "no gpu tools available",
|
name: "no gpu tools available",
|
||||||
setupCommands: func(_ string) error {
|
setupCommands: func(_ string) error {
|
||||||
os.Setenv("PATH", "")
|
t.Setenv("PATH", "")
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
@@ -1188,7 +1186,7 @@ echo "[]"`
|
|||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
tempDir := t.TempDir()
|
tempDir := t.TempDir()
|
||||||
os.Setenv("PATH", tempDir)
|
t.Setenv("PATH", tempDir)
|
||||||
if err := tt.setupCommands(tempDir); err != nil {
|
if err := tt.setupCommands(tempDir); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -1234,13 +1232,9 @@ echo "[]"`
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestCollectorStartHelpers(t *testing.T) {
|
func TestCollectorStartHelpers(t *testing.T) {
|
||||||
// Save original PATH
|
|
||||||
origPath := os.Getenv("PATH")
|
|
||||||
defer os.Setenv("PATH", origPath)
|
|
||||||
|
|
||||||
// Set up temp dir with the commands
|
// Set up temp dir with the commands
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
os.Setenv("PATH", dir)
|
t.Setenv("PATH", dir)
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
@@ -1370,11 +1364,8 @@ echo '[{"device_name":"NVIDIA Test GPU","temp":"52C","power_draw":"31W","gpu_uti
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNewGPUManagerPriorityNvtopFallback(t *testing.T) {
|
func TestNewGPUManagerPriorityNvtopFallback(t *testing.T) {
|
||||||
origPath := os.Getenv("PATH")
|
|
||||||
defer os.Setenv("PATH", origPath)
|
|
||||||
|
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
os.Setenv("PATH", dir)
|
t.Setenv("PATH", dir)
|
||||||
t.Setenv("BESZEL_AGENT_GPU_COLLECTOR", "nvtop,nvidia-smi")
|
t.Setenv("BESZEL_AGENT_GPU_COLLECTOR", "nvtop,nvidia-smi")
|
||||||
|
|
||||||
nvtopPath := filepath.Join(dir, "nvtop")
|
nvtopPath := filepath.Join(dir, "nvtop")
|
||||||
@@ -1399,11 +1390,8 @@ echo "0, NVIDIA Priority GPU, 45, 512, 2048, 12, 25"`
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNewGPUManagerPriorityMixedCollectors(t *testing.T) {
|
func TestNewGPUManagerPriorityMixedCollectors(t *testing.T) {
|
||||||
origPath := os.Getenv("PATH")
|
|
||||||
defer os.Setenv("PATH", origPath)
|
|
||||||
|
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
os.Setenv("PATH", dir)
|
t.Setenv("PATH", dir)
|
||||||
t.Setenv("BESZEL_AGENT_GPU_COLLECTOR", "intel_gpu_top,rocm-smi")
|
t.Setenv("BESZEL_AGENT_GPU_COLLECTOR", "intel_gpu_top,rocm-smi")
|
||||||
|
|
||||||
intelPath := filepath.Join(dir, "intel_gpu_top")
|
intelPath := filepath.Join(dir, "intel_gpu_top")
|
||||||
@@ -1433,11 +1421,8 @@ echo '{"card0": {"Temperature (Sensor edge) (C)": "49.0", "Current Socket Graphi
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNewGPUManagerPriorityNvmlFallbackToNvidiaSmi(t *testing.T) {
|
func TestNewGPUManagerPriorityNvmlFallbackToNvidiaSmi(t *testing.T) {
|
||||||
origPath := os.Getenv("PATH")
|
|
||||||
defer os.Setenv("PATH", origPath)
|
|
||||||
|
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
os.Setenv("PATH", dir)
|
t.Setenv("PATH", dir)
|
||||||
t.Setenv("BESZEL_AGENT_GPU_COLLECTOR", "nvml,nvidia-smi")
|
t.Setenv("BESZEL_AGENT_GPU_COLLECTOR", "nvml,nvidia-smi")
|
||||||
|
|
||||||
nvidiaPath := filepath.Join(dir, "nvidia-smi")
|
nvidiaPath := filepath.Join(dir, "nvidia-smi")
|
||||||
@@ -1456,11 +1441,8 @@ echo "0, NVIDIA Fallback GPU, 41, 256, 1024, 8, 14"`
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNewGPUManagerConfiguredCollectorsMustStart(t *testing.T) {
|
func TestNewGPUManagerConfiguredCollectorsMustStart(t *testing.T) {
|
||||||
origPath := os.Getenv("PATH")
|
|
||||||
defer os.Setenv("PATH", origPath)
|
|
||||||
|
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
os.Setenv("PATH", dir)
|
t.Setenv("PATH", dir)
|
||||||
|
|
||||||
t.Run("configured valid collector unavailable", func(t *testing.T) {
|
t.Run("configured valid collector unavailable", func(t *testing.T) {
|
||||||
t.Setenv("BESZEL_AGENT_GPU_COLLECTOR", "nvidia-smi")
|
t.Setenv("BESZEL_AGENT_GPU_COLLECTOR", "nvidia-smi")
|
||||||
@@ -1479,12 +1461,28 @@ func TestNewGPUManagerConfiguredCollectorsMustStart(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNewGPUManagerJetsonIgnoresCollectorConfig(t *testing.T) {
|
func TestCollectorDefinitionsNvmlDoesNotRequireNvidiaSmi(t *testing.T) {
|
||||||
origPath := os.Getenv("PATH")
|
gm := &GPUManager{}
|
||||||
defer os.Setenv("PATH", origPath)
|
definitions := gm.collectorDefinitions(gpuCapabilities{})
|
||||||
|
require.Contains(t, definitions, collectorSourceNVML)
|
||||||
|
assert.True(t, definitions[collectorSourceNVML].available)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewGPUManagerConfiguredNvmlBypassesCapabilityGate(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
os.Setenv("PATH", dir)
|
t.Setenv("PATH", dir)
|
||||||
|
t.Setenv("BESZEL_AGENT_GPU_COLLECTOR", "nvml")
|
||||||
|
|
||||||
|
gm, err := NewGPUManager()
|
||||||
|
require.Nil(t, gm)
|
||||||
|
require.Error(t, err)
|
||||||
|
assert.Contains(t, err.Error(), "no configured GPU collectors are available")
|
||||||
|
assert.NotContains(t, err.Error(), noGPUFoundMsg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewGPUManagerJetsonIgnoresCollectorConfig(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
t.Setenv("PATH", dir)
|
||||||
t.Setenv("BESZEL_AGENT_GPU_COLLECTOR", "nvidia-smi")
|
t.Setenv("BESZEL_AGENT_GPU_COLLECTOR", "nvidia-smi")
|
||||||
|
|
||||||
tegraPath := filepath.Join(dir, "tegrastats")
|
tegraPath := filepath.Join(dir, "tegrastats")
|
||||||
@@ -1719,12 +1717,8 @@ func TestIntelUpdateFromStats(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestIntelCollectorStreaming(t *testing.T) {
|
func TestIntelCollectorStreaming(t *testing.T) {
|
||||||
// Save and override PATH
|
|
||||||
origPath := os.Getenv("PATH")
|
|
||||||
defer os.Setenv("PATH", origPath)
|
|
||||||
|
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
os.Setenv("PATH", dir)
|
t.Setenv("PATH", dir)
|
||||||
|
|
||||||
// Create a fake intel_gpu_top that prints -l format with four samples (first will be skipped) and exits
|
// Create a fake intel_gpu_top that prints -l format with four samples (first will be skipped) and exits
|
||||||
scriptPath := filepath.Join(dir, "intel_gpu_top")
|
scriptPath := filepath.Join(dir, "intel_gpu_top")
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build testing
|
//go:build testing
|
||||||
// +build testing
|
|
||||||
|
|
||||||
package agent
|
package agent
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build testing
|
//go:build testing
|
||||||
// +build testing
|
|
||||||
|
|
||||||
package health
|
package health
|
||||||
|
|
||||||
@@ -37,7 +36,6 @@ func TestHealth(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
// This test uses synctest to simulate time passing.
|
// This test uses synctest to simulate time passing.
|
||||||
// NOTE: This test requires GOEXPERIMENT=synctest to run.
|
|
||||||
t.Run("check with simulated time", func(t *testing.T) {
|
t.Run("check with simulated time", func(t *testing.T) {
|
||||||
synctest.Test(t, func(t *testing.T) {
|
synctest.Test(t, func(t *testing.T) {
|
||||||
// Update the file to set the initial timestamp.
|
// Update the file to set the initial timestamp.
|
||||||
|
|||||||
@@ -8,6 +8,6 @@
|
|||||||
</PropertyGroup>
|
</PropertyGroup>
|
||||||
|
|
||||||
<ItemGroup>
|
<ItemGroup>
|
||||||
<PackageReference Include="LibreHardwareMonitorLib" Version="0.9.5" />
|
<PackageReference Include="LibreHardwareMonitorLib" Version="0.9.6" />
|
||||||
</ItemGroup>
|
</ItemGroup>
|
||||||
</Project>
|
</Project>
|
||||||
|
|||||||
233
agent/mdraid_linux.go
Normal file
233
agent/mdraid_linux.go
Normal file
@@ -0,0 +1,233 @@
|
|||||||
|
//go:build linux
|
||||||
|
|
||||||
|
package agent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
|
"github.com/henrygd/beszel/internal/entities/smart"
|
||||||
|
)
|
||||||
|
|
||||||
|
// mdraidSysfsRoot is a test hook; production value is "/sys".
|
||||||
|
var mdraidSysfsRoot = "/sys"
|
||||||
|
|
||||||
|
type mdraidHealth struct {
|
||||||
|
level string
|
||||||
|
arrayState string
|
||||||
|
degraded uint64
|
||||||
|
raidDisks uint64
|
||||||
|
syncAction string
|
||||||
|
syncCompleted string
|
||||||
|
syncSpeed string
|
||||||
|
mismatchCnt uint64
|
||||||
|
capacity uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanMdraidDevices discovers Linux md arrays exposed in sysfs.
|
||||||
|
func scanMdraidDevices() []*DeviceInfo {
|
||||||
|
blockDir := filepath.Join(mdraidSysfsRoot, "block")
|
||||||
|
entries, err := os.ReadDir(blockDir)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
devices := make([]*DeviceInfo, 0, 2)
|
||||||
|
for _, ent := range entries {
|
||||||
|
name := ent.Name()
|
||||||
|
if !isMdraidBlockName(name) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
mdDir := filepath.Join(blockDir, name, "md")
|
||||||
|
if !utils.FileExists(filepath.Join(mdDir, "array_state")) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
devPath := filepath.Join("/dev", name)
|
||||||
|
devices = append(devices, &DeviceInfo{
|
||||||
|
Name: devPath,
|
||||||
|
Type: "mdraid",
|
||||||
|
InfoName: devPath + " [mdraid]",
|
||||||
|
Protocol: "MD",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return devices
|
||||||
|
}
|
||||||
|
|
||||||
|
// collectMdraidHealth reads mdraid health and stores it in SmartDataMap.
|
||||||
|
func (sm *SmartManager) collectMdraidHealth(deviceInfo *DeviceInfo) (bool, error) {
|
||||||
|
if deviceInfo == nil || deviceInfo.Name == "" {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
base := filepath.Base(deviceInfo.Name)
|
||||||
|
if !isMdraidBlockName(base) && !strings.EqualFold(deviceInfo.Type, "mdraid") {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
health, ok := readMdraidHealth(base)
|
||||||
|
if !ok {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
deviceInfo.Type = "mdraid"
|
||||||
|
key := fmt.Sprintf("mdraid:%s", base)
|
||||||
|
status := mdraidSmartStatus(health)
|
||||||
|
|
||||||
|
attrs := make([]*smart.SmartAttribute, 0, 10)
|
||||||
|
if health.arrayState != "" {
|
||||||
|
attrs = append(attrs, &smart.SmartAttribute{Name: "ArrayState", RawString: health.arrayState})
|
||||||
|
}
|
||||||
|
if health.level != "" {
|
||||||
|
attrs = append(attrs, &smart.SmartAttribute{Name: "RaidLevel", RawString: health.level})
|
||||||
|
}
|
||||||
|
if health.raidDisks > 0 {
|
||||||
|
attrs = append(attrs, &smart.SmartAttribute{Name: "RaidDisks", RawValue: health.raidDisks})
|
||||||
|
}
|
||||||
|
if health.degraded > 0 {
|
||||||
|
attrs = append(attrs, &smart.SmartAttribute{Name: "Degraded", RawValue: health.degraded})
|
||||||
|
}
|
||||||
|
if health.syncAction != "" {
|
||||||
|
attrs = append(attrs, &smart.SmartAttribute{Name: "SyncAction", RawString: health.syncAction})
|
||||||
|
}
|
||||||
|
if health.syncCompleted != "" {
|
||||||
|
attrs = append(attrs, &smart.SmartAttribute{Name: "SyncCompleted", RawString: health.syncCompleted})
|
||||||
|
}
|
||||||
|
if health.syncSpeed != "" {
|
||||||
|
attrs = append(attrs, &smart.SmartAttribute{Name: "SyncSpeed", RawString: health.syncSpeed})
|
||||||
|
}
|
||||||
|
if health.mismatchCnt > 0 {
|
||||||
|
attrs = append(attrs, &smart.SmartAttribute{Name: "MismatchCount", RawValue: health.mismatchCnt})
|
||||||
|
}
|
||||||
|
|
||||||
|
sm.Lock()
|
||||||
|
defer sm.Unlock()
|
||||||
|
|
||||||
|
if _, exists := sm.SmartDataMap[key]; !exists {
|
||||||
|
sm.SmartDataMap[key] = &smart.SmartData{}
|
||||||
|
}
|
||||||
|
|
||||||
|
data := sm.SmartDataMap[key]
|
||||||
|
data.ModelName = "Linux MD RAID"
|
||||||
|
if health.level != "" {
|
||||||
|
data.ModelName = "Linux MD RAID (" + health.level + ")"
|
||||||
|
}
|
||||||
|
data.Capacity = health.capacity
|
||||||
|
data.SmartStatus = status
|
||||||
|
data.DiskName = filepath.Join("/dev", base)
|
||||||
|
data.DiskType = "mdraid"
|
||||||
|
data.Attributes = attrs
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// readMdraidHealth reads md array health fields from sysfs.
|
||||||
|
func readMdraidHealth(blockName string) (mdraidHealth, bool) {
|
||||||
|
var out mdraidHealth
|
||||||
|
|
||||||
|
if !isMdraidBlockName(blockName) {
|
||||||
|
return out, false
|
||||||
|
}
|
||||||
|
|
||||||
|
mdDir := filepath.Join(mdraidSysfsRoot, "block", blockName, "md")
|
||||||
|
arrayState, okState := utils.ReadStringFileOK(filepath.Join(mdDir, "array_state"))
|
||||||
|
if !okState {
|
||||||
|
return out, false
|
||||||
|
}
|
||||||
|
|
||||||
|
out.arrayState = arrayState
|
||||||
|
out.level = utils.ReadStringFile(filepath.Join(mdDir, "level"))
|
||||||
|
out.syncAction = utils.ReadStringFile(filepath.Join(mdDir, "sync_action"))
|
||||||
|
out.syncCompleted = utils.ReadStringFile(filepath.Join(mdDir, "sync_completed"))
|
||||||
|
out.syncSpeed = utils.ReadStringFile(filepath.Join(mdDir, "sync_speed"))
|
||||||
|
|
||||||
|
if val, ok := utils.ReadUintFile(filepath.Join(mdDir, "raid_disks")); ok {
|
||||||
|
out.raidDisks = val
|
||||||
|
}
|
||||||
|
if val, ok := utils.ReadUintFile(filepath.Join(mdDir, "degraded")); ok {
|
||||||
|
out.degraded = val
|
||||||
|
}
|
||||||
|
if val, ok := utils.ReadUintFile(filepath.Join(mdDir, "mismatch_cnt")); ok {
|
||||||
|
out.mismatchCnt = val
|
||||||
|
}
|
||||||
|
|
||||||
|
if capBytes, ok := readMdraidBlockCapacityBytes(blockName, mdraidSysfsRoot); ok {
|
||||||
|
out.capacity = capBytes
|
||||||
|
}
|
||||||
|
|
||||||
|
return out, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// mdraidSmartStatus maps md state/sync signals to a SMART-like status.
|
||||||
|
func mdraidSmartStatus(health mdraidHealth) string {
|
||||||
|
state := strings.ToLower(strings.TrimSpace(health.arrayState))
|
||||||
|
switch state {
|
||||||
|
case "inactive", "faulty", "broken", "stopped":
|
||||||
|
return "FAILED"
|
||||||
|
}
|
||||||
|
// During rebuild/recovery, arrays are often temporarily degraded; report as
|
||||||
|
// warning instead of hard failure while synchronization is in progress.
|
||||||
|
syncAction := strings.ToLower(strings.TrimSpace(health.syncAction))
|
||||||
|
switch syncAction {
|
||||||
|
case "resync", "recover", "reshape":
|
||||||
|
return "WARNING"
|
||||||
|
}
|
||||||
|
if health.degraded > 0 {
|
||||||
|
return "FAILED"
|
||||||
|
}
|
||||||
|
switch syncAction {
|
||||||
|
case "check", "repair":
|
||||||
|
return "WARNING"
|
||||||
|
}
|
||||||
|
switch state {
|
||||||
|
case "clean", "active", "active-idle", "write-pending", "read-auto", "readonly":
|
||||||
|
return "PASSED"
|
||||||
|
}
|
||||||
|
return "UNKNOWN"
|
||||||
|
}
|
||||||
|
|
||||||
|
// isMdraidBlockName matches /dev/mdN-style block device names.
|
||||||
|
func isMdraidBlockName(name string) bool {
|
||||||
|
if !strings.HasPrefix(name, "md") {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
suffix := strings.TrimPrefix(name, "md")
|
||||||
|
if suffix == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, c := range suffix {
|
||||||
|
if c < '0' || c > '9' {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// readMdraidBlockCapacityBytes converts block size metadata into bytes.
|
||||||
|
func readMdraidBlockCapacityBytes(blockName, root string) (uint64, bool) {
|
||||||
|
sizePath := filepath.Join(root, "block", blockName, "size")
|
||||||
|
lbsPath := filepath.Join(root, "block", blockName, "queue", "logical_block_size")
|
||||||
|
|
||||||
|
sizeStr, ok := utils.ReadStringFileOK(sizePath)
|
||||||
|
if !ok {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
sectors, err := strconv.ParseUint(sizeStr, 10, 64)
|
||||||
|
if err != nil || sectors == 0 {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
logicalBlockSize := uint64(512)
|
||||||
|
if lbsStr, ok := utils.ReadStringFileOK(lbsPath); ok {
|
||||||
|
if parsed, err := strconv.ParseUint(lbsStr, 10, 64); err == nil && parsed > 0 {
|
||||||
|
logicalBlockSize = parsed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return sectors * logicalBlockSize, true
|
||||||
|
}
|
||||||
103
agent/mdraid_linux_test.go
Normal file
103
agent/mdraid_linux_test.go
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
//go:build linux
|
||||||
|
|
||||||
|
package agent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/entities/smart"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMdraidMockSysfsScanAndCollect(t *testing.T) {
|
||||||
|
tmp := t.TempDir()
|
||||||
|
prev := mdraidSysfsRoot
|
||||||
|
mdraidSysfsRoot = tmp
|
||||||
|
t.Cleanup(func() { mdraidSysfsRoot = prev })
|
||||||
|
|
||||||
|
mdDir := filepath.Join(tmp, "block", "md0", "md")
|
||||||
|
queueDir := filepath.Join(tmp, "block", "md0", "queue")
|
||||||
|
if err := os.MkdirAll(mdDir, 0o755); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := os.MkdirAll(queueDir, 0o755); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
write := func(path, content string) {
|
||||||
|
t.Helper()
|
||||||
|
if err := os.WriteFile(path, []byte(content), 0o644); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
write(filepath.Join(mdDir, "array_state"), "active\n")
|
||||||
|
write(filepath.Join(mdDir, "level"), "raid1\n")
|
||||||
|
write(filepath.Join(mdDir, "raid_disks"), "2\n")
|
||||||
|
write(filepath.Join(mdDir, "degraded"), "0\n")
|
||||||
|
write(filepath.Join(mdDir, "sync_action"), "resync\n")
|
||||||
|
write(filepath.Join(mdDir, "sync_completed"), "10%\n")
|
||||||
|
write(filepath.Join(mdDir, "sync_speed"), "100M\n")
|
||||||
|
write(filepath.Join(mdDir, "mismatch_cnt"), "0\n")
|
||||||
|
write(filepath.Join(queueDir, "logical_block_size"), "512\n")
|
||||||
|
write(filepath.Join(tmp, "block", "md0", "size"), "2048\n")
|
||||||
|
|
||||||
|
devs := scanMdraidDevices()
|
||||||
|
if len(devs) != 1 {
|
||||||
|
t.Fatalf("scanMdraidDevices() = %d devices, want 1", len(devs))
|
||||||
|
}
|
||||||
|
if devs[0].Name != "/dev/md0" || devs[0].Type != "mdraid" {
|
||||||
|
t.Fatalf("scanMdraidDevices()[0] = %+v, want Name=/dev/md0 Type=mdraid", devs[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
sm := &SmartManager{SmartDataMap: map[string]*smart.SmartData{}}
|
||||||
|
ok, err := sm.collectMdraidHealth(devs[0])
|
||||||
|
if err != nil || !ok {
|
||||||
|
t.Fatalf("collectMdraidHealth() = (ok=%v, err=%v), want (true,nil)", ok, err)
|
||||||
|
}
|
||||||
|
if len(sm.SmartDataMap) != 1 {
|
||||||
|
t.Fatalf("SmartDataMap len=%d, want 1", len(sm.SmartDataMap))
|
||||||
|
}
|
||||||
|
var got *smart.SmartData
|
||||||
|
for _, v := range sm.SmartDataMap {
|
||||||
|
got = v
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if got == nil {
|
||||||
|
t.Fatalf("SmartDataMap value nil")
|
||||||
|
}
|
||||||
|
if got.DiskType != "mdraid" || got.DiskName != "/dev/md0" {
|
||||||
|
t.Fatalf("disk fields = (type=%q name=%q), want (mdraid,/dev/md0)", got.DiskType, got.DiskName)
|
||||||
|
}
|
||||||
|
if got.SmartStatus != "WARNING" {
|
||||||
|
t.Fatalf("SmartStatus=%q, want WARNING", got.SmartStatus)
|
||||||
|
}
|
||||||
|
if got.ModelName == "" || got.Capacity == 0 {
|
||||||
|
t.Fatalf("identity fields = (model=%q cap=%d), want non-empty model and cap>0", got.ModelName, got.Capacity)
|
||||||
|
}
|
||||||
|
if len(got.Attributes) < 5 {
|
||||||
|
t.Fatalf("attributes len=%d, want >= 5", len(got.Attributes))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMdraidSmartStatus(t *testing.T) {
|
||||||
|
if got := mdraidSmartStatus(mdraidHealth{arrayState: "inactive"}); got != "FAILED" {
|
||||||
|
t.Fatalf("mdraidSmartStatus(inactive) = %q, want FAILED", got)
|
||||||
|
}
|
||||||
|
if got := mdraidSmartStatus(mdraidHealth{arrayState: "active", degraded: 1, syncAction: "recover"}); got != "WARNING" {
|
||||||
|
t.Fatalf("mdraidSmartStatus(degraded+recover) = %q, want WARNING", got)
|
||||||
|
}
|
||||||
|
if got := mdraidSmartStatus(mdraidHealth{arrayState: "active", degraded: 1}); got != "FAILED" {
|
||||||
|
t.Fatalf("mdraidSmartStatus(degraded) = %q, want FAILED", got)
|
||||||
|
}
|
||||||
|
if got := mdraidSmartStatus(mdraidHealth{arrayState: "active", syncAction: "recover"}); got != "WARNING" {
|
||||||
|
t.Fatalf("mdraidSmartStatus(recover) = %q, want WARNING", got)
|
||||||
|
}
|
||||||
|
if got := mdraidSmartStatus(mdraidHealth{arrayState: "clean"}); got != "PASSED" {
|
||||||
|
t.Fatalf("mdraidSmartStatus(clean) = %q, want PASSED", got)
|
||||||
|
}
|
||||||
|
if got := mdraidSmartStatus(mdraidHealth{arrayState: "unknown"}); got != "UNKNOWN" {
|
||||||
|
t.Fatalf("mdraidSmartStatus(unknown) = %q, want UNKNOWN", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
11
agent/mdraid_stub.go
Normal file
11
agent/mdraid_stub.go
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
//go:build !linux
|
||||||
|
|
||||||
|
package agent
|
||||||
|
|
||||||
|
func scanMdraidDevices() []*DeviceInfo {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sm *SmartManager) collectMdraidHealth(deviceInfo *DeviceInfo) (bool, error) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/henrygd/beszel/agent/deltatracker"
|
"github.com/henrygd/beszel/agent/deltatracker"
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
"github.com/henrygd/beszel/internal/entities/system"
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
psutilNet "github.com/shirou/gopsutil/v4/net"
|
psutilNet "github.com/shirou/gopsutil/v4/net"
|
||||||
)
|
)
|
||||||
@@ -94,7 +95,7 @@ func (a *Agent) initializeNetIoStats() {
|
|||||||
a.netInterfaces = make(map[string]struct{}, 0)
|
a.netInterfaces = make(map[string]struct{}, 0)
|
||||||
|
|
||||||
// parse NICS env var for whitelist / blacklist
|
// parse NICS env var for whitelist / blacklist
|
||||||
nicsEnvVal, nicsEnvExists := GetEnv("NICS")
|
nicsEnvVal, nicsEnvExists := utils.GetEnv("NICS")
|
||||||
var nicCfg *NicConfig
|
var nicCfg *NicConfig
|
||||||
if nicsEnvExists {
|
if nicsEnvExists {
|
||||||
nicCfg = newNicConfig(nicsEnvVal)
|
nicCfg = newNicConfig(nicsEnvVal)
|
||||||
@@ -103,10 +104,7 @@ func (a *Agent) initializeNetIoStats() {
|
|||||||
// get current network I/O stats and record valid interfaces
|
// get current network I/O stats and record valid interfaces
|
||||||
if netIO, err := psutilNet.IOCounters(true); err == nil {
|
if netIO, err := psutilNet.IOCounters(true); err == nil {
|
||||||
for _, v := range netIO {
|
for _, v := range netIO {
|
||||||
if nicsEnvExists && !isValidNic(v.Name, nicCfg) {
|
if skipNetworkInterface(v, nicCfg) {
|
||||||
continue
|
|
||||||
}
|
|
||||||
if a.skipNetworkInterface(v) {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
slog.Info("Detected network interface", "name", v.Name, "sent", v.BytesSent, "recv", v.BytesRecv)
|
slog.Info("Detected network interface", "name", v.Name, "sent", v.BytesSent, "recv", v.BytesRecv)
|
||||||
@@ -215,10 +213,8 @@ func (a *Agent) applyNetworkTotals(
|
|||||||
totalBytesSent, totalBytesRecv uint64,
|
totalBytesSent, totalBytesRecv uint64,
|
||||||
bytesSentPerSecond, bytesRecvPerSecond uint64,
|
bytesSentPerSecond, bytesRecvPerSecond uint64,
|
||||||
) {
|
) {
|
||||||
networkSentPs := bytesToMegabytes(float64(bytesSentPerSecond))
|
if bytesSentPerSecond > 10_000_000_000 || bytesRecvPerSecond > 10_000_000_000 {
|
||||||
networkRecvPs := bytesToMegabytes(float64(bytesRecvPerSecond))
|
slog.Warn("Invalid net stats. Resetting.", "sent", bytesSentPerSecond, "recv", bytesRecvPerSecond)
|
||||||
if networkSentPs > 10_000 || networkRecvPs > 10_000 {
|
|
||||||
slog.Warn("Invalid net stats. Resetting.", "sent", networkSentPs, "recv", networkRecvPs)
|
|
||||||
for _, v := range netIO {
|
for _, v := range netIO {
|
||||||
if _, exists := a.netInterfaces[v.Name]; !exists {
|
if _, exists := a.netInterfaces[v.Name]; !exists {
|
||||||
continue
|
continue
|
||||||
@@ -228,21 +224,29 @@ func (a *Agent) applyNetworkTotals(
|
|||||||
a.initializeNetIoStats()
|
a.initializeNetIoStats()
|
||||||
delete(a.netIoStats, cacheTimeMs)
|
delete(a.netIoStats, cacheTimeMs)
|
||||||
delete(a.netInterfaceDeltaTrackers, cacheTimeMs)
|
delete(a.netInterfaceDeltaTrackers, cacheTimeMs)
|
||||||
systemStats.NetworkSent = 0
|
|
||||||
systemStats.NetworkRecv = 0
|
|
||||||
systemStats.Bandwidth[0], systemStats.Bandwidth[1] = 0, 0
|
systemStats.Bandwidth[0], systemStats.Bandwidth[1] = 0, 0
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
systemStats.NetworkSent = networkSentPs
|
|
||||||
systemStats.NetworkRecv = networkRecvPs
|
|
||||||
systemStats.Bandwidth[0], systemStats.Bandwidth[1] = bytesSentPerSecond, bytesRecvPerSecond
|
systemStats.Bandwidth[0], systemStats.Bandwidth[1] = bytesSentPerSecond, bytesRecvPerSecond
|
||||||
nis.BytesSent = totalBytesSent
|
nis.BytesSent = totalBytesSent
|
||||||
nis.BytesRecv = totalBytesRecv
|
nis.BytesRecv = totalBytesRecv
|
||||||
a.netIoStats[cacheTimeMs] = nis
|
a.netIoStats[cacheTimeMs] = nis
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Agent) skipNetworkInterface(v psutilNet.IOCountersStat) bool {
|
// skipNetworkInterface returns true if the network interface should be ignored.
|
||||||
|
func skipNetworkInterface(v psutilNet.IOCountersStat, nicCfg *NicConfig) bool {
|
||||||
|
if nicCfg != nil {
|
||||||
|
if !isValidNic(v.Name, nicCfg) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// In whitelist mode, we honor explicit inclusion without auto-filtering.
|
||||||
|
if !nicCfg.isBlacklist {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// In blacklist mode, still apply the auto-filter below.
|
||||||
|
}
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case strings.HasPrefix(v.Name, "lo"),
|
case strings.HasPrefix(v.Name, "lo"),
|
||||||
strings.HasPrefix(v.Name, "docker"),
|
strings.HasPrefix(v.Name, "docker"),
|
||||||
|
|||||||
@@ -261,6 +261,39 @@ func TestNewNicConfig(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
func TestSkipNetworkInterface(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
nic psutilNet.IOCountersStat
|
||||||
|
nicCfg *NicConfig
|
||||||
|
expectSkip bool
|
||||||
|
}{
|
||||||
|
{"loopback lo", psutilNet.IOCountersStat{Name: "lo", BytesSent: 100, BytesRecv: 100}, nil, true},
|
||||||
|
{"loopback lo0", psutilNet.IOCountersStat{Name: "lo0", BytesSent: 100, BytesRecv: 100}, nil, true},
|
||||||
|
{"docker prefix", psutilNet.IOCountersStat{Name: "docker0", BytesSent: 100, BytesRecv: 100}, nil, true},
|
||||||
|
{"br- prefix", psutilNet.IOCountersStat{Name: "br-lan", BytesSent: 100, BytesRecv: 100}, nil, true},
|
||||||
|
{"veth prefix", psutilNet.IOCountersStat{Name: "veth0abc", BytesSent: 100, BytesRecv: 100}, nil, true},
|
||||||
|
{"bond prefix", psutilNet.IOCountersStat{Name: "bond0", BytesSent: 100, BytesRecv: 100}, nil, true},
|
||||||
|
{"cali prefix", psutilNet.IOCountersStat{Name: "cali1234", BytesSent: 100, BytesRecv: 100}, nil, true},
|
||||||
|
{"zero BytesRecv", psutilNet.IOCountersStat{Name: "eth0", BytesSent: 100, BytesRecv: 0}, nil, true},
|
||||||
|
{"zero BytesSent", psutilNet.IOCountersStat{Name: "eth0", BytesSent: 0, BytesRecv: 100}, nil, true},
|
||||||
|
{"both zero", psutilNet.IOCountersStat{Name: "eth0", BytesSent: 0, BytesRecv: 0}, nil, true},
|
||||||
|
{"normal eth0", psutilNet.IOCountersStat{Name: "eth0", BytesSent: 100, BytesRecv: 200}, nil, false},
|
||||||
|
{"normal wlan0", psutilNet.IOCountersStat{Name: "wlan0", BytesSent: 1, BytesRecv: 1}, nil, false},
|
||||||
|
{"whitelist overrides skip (docker)", psutilNet.IOCountersStat{Name: "docker0", BytesSent: 100, BytesRecv: 100}, newNicConfig("docker0"), false},
|
||||||
|
{"whitelist overrides skip (lo)", psutilNet.IOCountersStat{Name: "lo", BytesSent: 100, BytesRecv: 100}, newNicConfig("lo"), false},
|
||||||
|
{"whitelist exclusion", psutilNet.IOCountersStat{Name: "eth1", BytesSent: 100, BytesRecv: 100}, newNicConfig("eth0"), true},
|
||||||
|
{"blacklist skip lo", psutilNet.IOCountersStat{Name: "lo", BytesSent: 100, BytesRecv: 100}, newNicConfig("-eth0"), true},
|
||||||
|
{"blacklist explicit eth0", psutilNet.IOCountersStat{Name: "eth0", BytesSent: 100, BytesRecv: 100}, newNicConfig("-eth0"), true},
|
||||||
|
{"blacklist allow eth1", psutilNet.IOCountersStat{Name: "eth1", BytesSent: 100, BytesRecv: 100}, newNicConfig("-eth0"), false},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
assert.Equal(t, tt.expectSkip, skipNetworkInterface(tt.nic, tt.nicCfg))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestEnsureNetworkInterfacesMap(t *testing.T) {
|
func TestEnsureNetworkInterfacesMap(t *testing.T) {
|
||||||
var a Agent
|
var a Agent
|
||||||
var stats system.Stats
|
var stats system.Stats
|
||||||
@@ -383,8 +416,6 @@ func TestApplyNetworkTotals(t *testing.T) {
|
|||||||
totalBytesSent uint64
|
totalBytesSent uint64
|
||||||
totalBytesRecv uint64
|
totalBytesRecv uint64
|
||||||
expectReset bool
|
expectReset bool
|
||||||
expectedNetworkSent float64
|
|
||||||
expectedNetworkRecv float64
|
|
||||||
expectedBandwidthSent uint64
|
expectedBandwidthSent uint64
|
||||||
expectedBandwidthRecv uint64
|
expectedBandwidthRecv uint64
|
||||||
}{
|
}{
|
||||||
@@ -395,8 +426,6 @@ func TestApplyNetworkTotals(t *testing.T) {
|
|||||||
totalBytesSent: 10000000,
|
totalBytesSent: 10000000,
|
||||||
totalBytesRecv: 20000000,
|
totalBytesRecv: 20000000,
|
||||||
expectReset: false,
|
expectReset: false,
|
||||||
expectedNetworkSent: 0.95, // ~1 MB/s rounded to 2 decimals
|
|
||||||
expectedNetworkRecv: 1.91, // ~2 MB/s rounded to 2 decimals
|
|
||||||
expectedBandwidthSent: 1000000,
|
expectedBandwidthSent: 1000000,
|
||||||
expectedBandwidthRecv: 2000000,
|
expectedBandwidthRecv: 2000000,
|
||||||
},
|
},
|
||||||
@@ -424,18 +453,6 @@ func TestApplyNetworkTotals(t *testing.T) {
|
|||||||
totalBytesRecv: 20000000,
|
totalBytesRecv: 20000000,
|
||||||
expectReset: true,
|
expectReset: true,
|
||||||
},
|
},
|
||||||
{
|
|
||||||
name: "Valid network stats - at threshold boundary",
|
|
||||||
bytesSentPerSecond: 10485750000, // ~9999.99 MB/s (rounds to 9999.99)
|
|
||||||
bytesRecvPerSecond: 10485750000, // ~9999.99 MB/s (rounds to 9999.99)
|
|
||||||
totalBytesSent: 10000000,
|
|
||||||
totalBytesRecv: 20000000,
|
|
||||||
expectReset: false,
|
|
||||||
expectedNetworkSent: 9999.99,
|
|
||||||
expectedNetworkRecv: 9999.99,
|
|
||||||
expectedBandwidthSent: 10485750000,
|
|
||||||
expectedBandwidthRecv: 10485750000,
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
name: "Zero values",
|
name: "Zero values",
|
||||||
bytesSentPerSecond: 0,
|
bytesSentPerSecond: 0,
|
||||||
@@ -443,8 +460,6 @@ func TestApplyNetworkTotals(t *testing.T) {
|
|||||||
totalBytesSent: 0,
|
totalBytesSent: 0,
|
||||||
totalBytesRecv: 0,
|
totalBytesRecv: 0,
|
||||||
expectReset: false,
|
expectReset: false,
|
||||||
expectedNetworkSent: 0.0,
|
|
||||||
expectedNetworkRecv: 0.0,
|
|
||||||
expectedBandwidthSent: 0,
|
expectedBandwidthSent: 0,
|
||||||
expectedBandwidthRecv: 0,
|
expectedBandwidthRecv: 0,
|
||||||
},
|
},
|
||||||
@@ -481,14 +496,10 @@ func TestApplyNetworkTotals(t *testing.T) {
|
|||||||
// Should have reset network tracking state - maps cleared and stats zeroed
|
// Should have reset network tracking state - maps cleared and stats zeroed
|
||||||
assert.NotContains(t, a.netIoStats, cacheTimeMs, "cache entry should be cleared after reset")
|
assert.NotContains(t, a.netIoStats, cacheTimeMs, "cache entry should be cleared after reset")
|
||||||
assert.NotContains(t, a.netInterfaceDeltaTrackers, cacheTimeMs, "tracker should be cleared on reset")
|
assert.NotContains(t, a.netInterfaceDeltaTrackers, cacheTimeMs, "tracker should be cleared on reset")
|
||||||
assert.Zero(t, systemStats.NetworkSent)
|
|
||||||
assert.Zero(t, systemStats.NetworkRecv)
|
|
||||||
assert.Zero(t, systemStats.Bandwidth[0])
|
assert.Zero(t, systemStats.Bandwidth[0])
|
||||||
assert.Zero(t, systemStats.Bandwidth[1])
|
assert.Zero(t, systemStats.Bandwidth[1])
|
||||||
} else {
|
} else {
|
||||||
// Should have applied stats
|
// Should have applied stats
|
||||||
assert.Equal(t, tt.expectedNetworkSent, systemStats.NetworkSent)
|
|
||||||
assert.Equal(t, tt.expectedNetworkRecv, systemStats.NetworkRecv)
|
|
||||||
assert.Equal(t, tt.expectedBandwidthSent, systemStats.Bandwidth[0])
|
assert.Equal(t, tt.expectedBandwidthSent, systemStats.Bandwidth[0])
|
||||||
assert.Equal(t, tt.expectedBandwidthRecv, systemStats.Bandwidth[1])
|
assert.Equal(t, tt.expectedBandwidthRecv, systemStats.Bandwidth[1])
|
||||||
|
|
||||||
|
|||||||
@@ -2,48 +2,67 @@ package agent
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"path"
|
"path"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
"github.com/henrygd/beszel/internal/entities/system"
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
|
||||||
"github.com/shirou/gopsutil/v4/common"
|
"github.com/shirou/gopsutil/v4/common"
|
||||||
"github.com/shirou/gopsutil/v4/sensors"
|
"github.com/shirou/gopsutil/v4/sensors"
|
||||||
)
|
)
|
||||||
|
|
||||||
type SensorConfig struct {
|
var errTemperatureFetchTimeout = errors.New("temperature collection timed out")
|
||||||
context context.Context
|
|
||||||
sensors map[string]struct{}
|
|
||||||
primarySensor string
|
|
||||||
isBlacklist bool
|
|
||||||
hasWildcards bool
|
|
||||||
skipCollection bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Agent) newSensorConfig() *SensorConfig {
|
|
||||||
primarySensor, _ := GetEnv("PRIMARY_SENSOR")
|
|
||||||
sysSensors, _ := GetEnv("SYS_SENSORS")
|
|
||||||
sensorsEnvVal, sensorsSet := GetEnv("SENSORS")
|
|
||||||
skipCollection := sensorsSet && sensorsEnvVal == ""
|
|
||||||
|
|
||||||
return a.newSensorConfigWithEnv(primarySensor, sysSensors, sensorsEnvVal, skipCollection)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Matches sensors.TemperaturesWithContext to allow for panic recovery (gopsutil/issues/1832)
|
// Matches sensors.TemperaturesWithContext to allow for panic recovery (gopsutil/issues/1832)
|
||||||
type getTempsFn func(ctx context.Context) ([]sensors.TemperatureStat, error)
|
type getTempsFn func(ctx context.Context) ([]sensors.TemperatureStat, error)
|
||||||
|
|
||||||
|
type SensorConfig struct {
|
||||||
|
context context.Context
|
||||||
|
sensors map[string]struct{}
|
||||||
|
primarySensor string
|
||||||
|
timeout time.Duration
|
||||||
|
isBlacklist bool
|
||||||
|
hasWildcards bool
|
||||||
|
skipCollection bool
|
||||||
|
firstRun bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Agent) newSensorConfig() *SensorConfig {
|
||||||
|
primarySensor, _ := utils.GetEnv("PRIMARY_SENSOR")
|
||||||
|
sysSensors, _ := utils.GetEnv("SYS_SENSORS")
|
||||||
|
sensorsEnvVal, sensorsSet := utils.GetEnv("SENSORS")
|
||||||
|
skipCollection := sensorsSet && sensorsEnvVal == ""
|
||||||
|
sensorsTimeout, _ := utils.GetEnv("SENSORS_TIMEOUT")
|
||||||
|
|
||||||
|
return a.newSensorConfigWithEnv(primarySensor, sysSensors, sensorsEnvVal, sensorsTimeout, skipCollection)
|
||||||
|
}
|
||||||
|
|
||||||
// newSensorConfigWithEnv creates a SensorConfig with the provided environment variables
|
// newSensorConfigWithEnv creates a SensorConfig with the provided environment variables
|
||||||
// sensorsSet indicates if the SENSORS environment variable was explicitly set (even to empty string)
|
// sensorsSet indicates if the SENSORS environment variable was explicitly set (even to empty string)
|
||||||
func (a *Agent) newSensorConfigWithEnv(primarySensor, sysSensors, sensorsEnvVal string, skipCollection bool) *SensorConfig {
|
func (a *Agent) newSensorConfigWithEnv(primarySensor, sysSensors, sensorsEnvVal, sensorsTimeout string, skipCollection bool) *SensorConfig {
|
||||||
|
timeout := 2 * time.Second
|
||||||
|
if sensorsTimeout != "" {
|
||||||
|
if d, err := time.ParseDuration(sensorsTimeout); err == nil {
|
||||||
|
timeout = d
|
||||||
|
} else {
|
||||||
|
slog.Warn("Invalid SENSORS_TIMEOUT", "value", sensorsTimeout)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
config := &SensorConfig{
|
config := &SensorConfig{
|
||||||
context: context.Background(),
|
context: context.Background(),
|
||||||
primarySensor: primarySensor,
|
primarySensor: primarySensor,
|
||||||
|
timeout: timeout,
|
||||||
skipCollection: skipCollection,
|
skipCollection: skipCollection,
|
||||||
|
firstRun: true,
|
||||||
sensors: make(map[string]struct{}),
|
sensors: make(map[string]struct{}),
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -85,10 +104,12 @@ func (a *Agent) updateTemperatures(systemStats *system.Stats) {
|
|||||||
// reset high temp
|
// reset high temp
|
||||||
a.systemInfo.DashboardTemp = 0
|
a.systemInfo.DashboardTemp = 0
|
||||||
|
|
||||||
temps, err := a.getTempsWithPanicRecovery(getSensorTemps)
|
temps, err := a.getTempsWithTimeout(getSensorTemps)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// retry once on panic (gopsutil/issues/1832)
|
// retry once on panic (gopsutil/issues/1832)
|
||||||
temps, err = a.getTempsWithPanicRecovery(getSensorTemps)
|
if !errors.Is(err, errTemperatureFetchTimeout) {
|
||||||
|
temps, err = a.getTempsWithTimeout(getSensorTemps)
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Warn("Error updating temperatures", "err", err)
|
slog.Warn("Error updating temperatures", "err", err)
|
||||||
if len(systemStats.Temperatures) > 0 {
|
if len(systemStats.Temperatures) > 0 {
|
||||||
@@ -135,7 +156,7 @@ func (a *Agent) updateTemperatures(systemStats *system.Stats) {
|
|||||||
case sensorName:
|
case sensorName:
|
||||||
a.systemInfo.DashboardTemp = sensor.Temperature
|
a.systemInfo.DashboardTemp = sensor.Temperature
|
||||||
}
|
}
|
||||||
systemStats.Temperatures[sensorName] = twoDecimals(sensor.Temperature)
|
systemStats.Temperatures[sensorName] = utils.TwoDecimals(sensor.Temperature)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -151,6 +172,34 @@ func (a *Agent) getTempsWithPanicRecovery(getTemps getTempsFn) (temps []sensors.
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (a *Agent) getTempsWithTimeout(getTemps getTempsFn) ([]sensors.TemperatureStat, error) {
|
||||||
|
type result struct {
|
||||||
|
temps []sensors.TemperatureStat
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use a longer timeout on the first run to allow for initialization
|
||||||
|
// (e.g. Windows LHM subprocess startup)
|
||||||
|
timeout := a.sensorConfig.timeout
|
||||||
|
if a.sensorConfig.firstRun {
|
||||||
|
a.sensorConfig.firstRun = false
|
||||||
|
timeout = 10 * time.Second
|
||||||
|
}
|
||||||
|
|
||||||
|
resultCh := make(chan result, 1)
|
||||||
|
go func() {
|
||||||
|
temps, err := a.getTempsWithPanicRecovery(getTemps)
|
||||||
|
resultCh <- result{temps: temps, err: err}
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case res := <-resultCh:
|
||||||
|
return res.temps, res.err
|
||||||
|
case <-time.After(timeout):
|
||||||
|
return nil, errTemperatureFetchTimeout
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// isValidSensor checks if a sensor is valid based on the sensor name and the sensor config
|
// isValidSensor checks if a sensor is valid based on the sensor name and the sensor config
|
||||||
func isValidSensor(sensorName string, config *SensorConfig) bool {
|
func isValidSensor(sensorName string, config *SensorConfig) bool {
|
||||||
// if no sensors configured, everything is valid
|
// if no sensors configured, everything is valid
|
||||||
|
|||||||
@@ -1,13 +1,12 @@
|
|||||||
//go:build testing
|
//go:build testing
|
||||||
// +build testing
|
|
||||||
|
|
||||||
package agent
|
package agent
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/henrygd/beszel/internal/entities/system"
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
|
||||||
@@ -169,6 +168,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
|||||||
primarySensor string
|
primarySensor string
|
||||||
sysSensors string
|
sysSensors string
|
||||||
sensors string
|
sensors string
|
||||||
|
sensorsTimeout string
|
||||||
skipCollection bool
|
skipCollection bool
|
||||||
expectedConfig *SensorConfig
|
expectedConfig *SensorConfig
|
||||||
}{
|
}{
|
||||||
@@ -180,12 +180,37 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
|||||||
expectedConfig: &SensorConfig{
|
expectedConfig: &SensorConfig{
|
||||||
context: context.Background(),
|
context: context.Background(),
|
||||||
primarySensor: "",
|
primarySensor: "",
|
||||||
|
timeout: 2 * time.Second,
|
||||||
sensors: map[string]struct{}{},
|
sensors: map[string]struct{}{},
|
||||||
isBlacklist: false,
|
isBlacklist: false,
|
||||||
hasWildcards: false,
|
hasWildcards: false,
|
||||||
skipCollection: false,
|
skipCollection: false,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "Custom timeout",
|
||||||
|
primarySensor: "",
|
||||||
|
sysSensors: "",
|
||||||
|
sensors: "",
|
||||||
|
sensorsTimeout: "5s",
|
||||||
|
expectedConfig: &SensorConfig{
|
||||||
|
context: context.Background(),
|
||||||
|
timeout: 5 * time.Second,
|
||||||
|
sensors: map[string]struct{}{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Invalid timeout falls back to default",
|
||||||
|
primarySensor: "",
|
||||||
|
sysSensors: "",
|
||||||
|
sensors: "",
|
||||||
|
sensorsTimeout: "notaduration",
|
||||||
|
expectedConfig: &SensorConfig{
|
||||||
|
context: context.Background(),
|
||||||
|
timeout: 2 * time.Second,
|
||||||
|
sensors: map[string]struct{}{},
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "Explicitly set to empty string",
|
name: "Explicitly set to empty string",
|
||||||
primarySensor: "",
|
primarySensor: "",
|
||||||
@@ -195,6 +220,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
|||||||
expectedConfig: &SensorConfig{
|
expectedConfig: &SensorConfig{
|
||||||
context: context.Background(),
|
context: context.Background(),
|
||||||
primarySensor: "",
|
primarySensor: "",
|
||||||
|
timeout: 2 * time.Second,
|
||||||
sensors: map[string]struct{}{},
|
sensors: map[string]struct{}{},
|
||||||
isBlacklist: false,
|
isBlacklist: false,
|
||||||
hasWildcards: false,
|
hasWildcards: false,
|
||||||
@@ -209,6 +235,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
|||||||
expectedConfig: &SensorConfig{
|
expectedConfig: &SensorConfig{
|
||||||
context: context.Background(),
|
context: context.Background(),
|
||||||
primarySensor: "cpu_temp",
|
primarySensor: "cpu_temp",
|
||||||
|
timeout: 2 * time.Second,
|
||||||
sensors: map[string]struct{}{},
|
sensors: map[string]struct{}{},
|
||||||
isBlacklist: false,
|
isBlacklist: false,
|
||||||
hasWildcards: false,
|
hasWildcards: false,
|
||||||
@@ -222,6 +249,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
|||||||
expectedConfig: &SensorConfig{
|
expectedConfig: &SensorConfig{
|
||||||
context: context.Background(),
|
context: context.Background(),
|
||||||
primarySensor: "cpu_temp",
|
primarySensor: "cpu_temp",
|
||||||
|
timeout: 2 * time.Second,
|
||||||
sensors: map[string]struct{}{
|
sensors: map[string]struct{}{
|
||||||
"cpu_temp": {},
|
"cpu_temp": {},
|
||||||
"gpu_temp": {},
|
"gpu_temp": {},
|
||||||
@@ -238,6 +266,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
|||||||
expectedConfig: &SensorConfig{
|
expectedConfig: &SensorConfig{
|
||||||
context: context.Background(),
|
context: context.Background(),
|
||||||
primarySensor: "cpu_temp",
|
primarySensor: "cpu_temp",
|
||||||
|
timeout: 2 * time.Second,
|
||||||
sensors: map[string]struct{}{
|
sensors: map[string]struct{}{
|
||||||
"cpu_temp": {},
|
"cpu_temp": {},
|
||||||
"gpu_temp": {},
|
"gpu_temp": {},
|
||||||
@@ -254,6 +283,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
|||||||
expectedConfig: &SensorConfig{
|
expectedConfig: &SensorConfig{
|
||||||
context: context.Background(),
|
context: context.Background(),
|
||||||
primarySensor: "cpu_temp",
|
primarySensor: "cpu_temp",
|
||||||
|
timeout: 2 * time.Second,
|
||||||
sensors: map[string]struct{}{
|
sensors: map[string]struct{}{
|
||||||
"cpu_*": {},
|
"cpu_*": {},
|
||||||
"gpu_temp": {},
|
"gpu_temp": {},
|
||||||
@@ -270,6 +300,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
|||||||
expectedConfig: &SensorConfig{
|
expectedConfig: &SensorConfig{
|
||||||
context: context.Background(),
|
context: context.Background(),
|
||||||
primarySensor: "cpu_temp",
|
primarySensor: "cpu_temp",
|
||||||
|
timeout: 2 * time.Second,
|
||||||
sensors: map[string]struct{}{
|
sensors: map[string]struct{}{
|
||||||
"cpu_*": {},
|
"cpu_*": {},
|
||||||
"gpu_temp": {},
|
"gpu_temp": {},
|
||||||
@@ -285,6 +316,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
|||||||
sensors: "cpu_temp",
|
sensors: "cpu_temp",
|
||||||
expectedConfig: &SensorConfig{
|
expectedConfig: &SensorConfig{
|
||||||
primarySensor: "cpu_temp",
|
primarySensor: "cpu_temp",
|
||||||
|
timeout: 2 * time.Second,
|
||||||
sensors: map[string]struct{}{
|
sensors: map[string]struct{}{
|
||||||
"cpu_temp": {},
|
"cpu_temp": {},
|
||||||
},
|
},
|
||||||
@@ -296,7 +328,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
|||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
result := agent.newSensorConfigWithEnv(tt.primarySensor, tt.sysSensors, tt.sensors, tt.skipCollection)
|
result := agent.newSensorConfigWithEnv(tt.primarySensor, tt.sysSensors, tt.sensors, tt.sensorsTimeout, tt.skipCollection)
|
||||||
|
|
||||||
// Check primary sensor
|
// Check primary sensor
|
||||||
assert.Equal(t, tt.expectedConfig.primarySensor, result.primarySensor)
|
assert.Equal(t, tt.expectedConfig.primarySensor, result.primarySensor)
|
||||||
@@ -315,6 +347,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
|||||||
// Check flags
|
// Check flags
|
||||||
assert.Equal(t, tt.expectedConfig.isBlacklist, result.isBlacklist)
|
assert.Equal(t, tt.expectedConfig.isBlacklist, result.isBlacklist)
|
||||||
assert.Equal(t, tt.expectedConfig.hasWildcards, result.hasWildcards)
|
assert.Equal(t, tt.expectedConfig.hasWildcards, result.hasWildcards)
|
||||||
|
assert.Equal(t, tt.expectedConfig.timeout, result.timeout)
|
||||||
|
|
||||||
// Check context
|
// Check context
|
||||||
if tt.sysSensors != "" {
|
if tt.sysSensors != "" {
|
||||||
@@ -330,40 +363,18 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNewSensorConfig(t *testing.T) {
|
func TestNewSensorConfig(t *testing.T) {
|
||||||
// Save original environment variables
|
|
||||||
originalPrimary, hasPrimary := os.LookupEnv("BESZEL_AGENT_PRIMARY_SENSOR")
|
|
||||||
originalSys, hasSys := os.LookupEnv("BESZEL_AGENT_SYS_SENSORS")
|
|
||||||
originalSensors, hasSensors := os.LookupEnv("BESZEL_AGENT_SENSORS")
|
|
||||||
|
|
||||||
// Restore environment variables after the test
|
|
||||||
defer func() {
|
|
||||||
// Clean up test environment variables
|
|
||||||
os.Unsetenv("BESZEL_AGENT_PRIMARY_SENSOR")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_SYS_SENSORS")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_SENSORS")
|
|
||||||
|
|
||||||
// Restore original values if they existed
|
|
||||||
if hasPrimary {
|
|
||||||
os.Setenv("BESZEL_AGENT_PRIMARY_SENSOR", originalPrimary)
|
|
||||||
}
|
|
||||||
if hasSys {
|
|
||||||
os.Setenv("BESZEL_AGENT_SYS_SENSORS", originalSys)
|
|
||||||
}
|
|
||||||
if hasSensors {
|
|
||||||
os.Setenv("BESZEL_AGENT_SENSORS", originalSensors)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Set test environment variables
|
// Set test environment variables
|
||||||
os.Setenv("BESZEL_AGENT_PRIMARY_SENSOR", "test_primary")
|
t.Setenv("BESZEL_AGENT_PRIMARY_SENSOR", "test_primary")
|
||||||
os.Setenv("BESZEL_AGENT_SYS_SENSORS", "/test/path")
|
t.Setenv("BESZEL_AGENT_SYS_SENSORS", "/test/path")
|
||||||
os.Setenv("BESZEL_AGENT_SENSORS", "test_sensor1,test_*,test_sensor3")
|
t.Setenv("BESZEL_AGENT_SENSORS", "test_sensor1,test_*,test_sensor3")
|
||||||
|
t.Setenv("BESZEL_AGENT_SENSORS_TIMEOUT", "7s")
|
||||||
|
|
||||||
agent := &Agent{}
|
agent := &Agent{}
|
||||||
result := agent.newSensorConfig()
|
result := agent.newSensorConfig()
|
||||||
|
|
||||||
// Verify results
|
// Verify results
|
||||||
assert.Equal(t, "test_primary", result.primarySensor)
|
assert.Equal(t, "test_primary", result.primarySensor)
|
||||||
|
assert.Equal(t, 7*time.Second, result.timeout)
|
||||||
assert.NotNil(t, result.sensors)
|
assert.NotNil(t, result.sensors)
|
||||||
assert.Equal(t, 3, len(result.sensors))
|
assert.Equal(t, 3, len(result.sensors))
|
||||||
assert.True(t, result.hasWildcards)
|
assert.True(t, result.hasWildcards)
|
||||||
@@ -552,3 +563,59 @@ func TestGetTempsWithPanicRecovery(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGetTempsWithTimeout(t *testing.T) {
|
||||||
|
agent := &Agent{
|
||||||
|
sensorConfig: &SensorConfig{
|
||||||
|
context: context.Background(),
|
||||||
|
timeout: 10 * time.Millisecond,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("returns temperatures before timeout", func(t *testing.T) {
|
||||||
|
temps, err := agent.getTempsWithTimeout(func(ctx context.Context) ([]sensors.TemperatureStat, error) {
|
||||||
|
return []sensors.TemperatureStat{{SensorKey: "cpu_temp", Temperature: 42}}, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, temps, 1)
|
||||||
|
assert.Equal(t, "cpu_temp", temps[0].SensorKey)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("returns timeout error when collector hangs", func(t *testing.T) {
|
||||||
|
temps, err := agent.getTempsWithTimeout(func(ctx context.Context) ([]sensors.TemperatureStat, error) {
|
||||||
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
return []sensors.TemperatureStat{{SensorKey: "cpu_temp", Temperature: 42}}, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
assert.Nil(t, temps)
|
||||||
|
assert.ErrorIs(t, err, errTemperatureFetchTimeout)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUpdateTemperaturesSkipsOnTimeout(t *testing.T) {
|
||||||
|
agent := &Agent{
|
||||||
|
systemInfo: system.Info{DashboardTemp: 99},
|
||||||
|
sensorConfig: &SensorConfig{
|
||||||
|
context: context.Background(),
|
||||||
|
timeout: 10 * time.Millisecond,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Cleanup(func() {
|
||||||
|
getSensorTemps = sensors.TemperaturesWithContext
|
||||||
|
})
|
||||||
|
getSensorTemps = func(ctx context.Context) ([]sensors.TemperatureStat, error) {
|
||||||
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
stats := &system.Stats{
|
||||||
|
Temperatures: map[string]float64{"stale": 50},
|
||||||
|
}
|
||||||
|
|
||||||
|
agent.updateTemperatures(stats)
|
||||||
|
|
||||||
|
assert.Equal(t, 0.0, agent.systemInfo.DashboardTemp)
|
||||||
|
assert.Equal(t, map[string]float64{}, stats.Temperatures)
|
||||||
|
}
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/henrygd/beszel"
|
"github.com/henrygd/beszel"
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
"github.com/henrygd/beszel/internal/common"
|
"github.com/henrygd/beszel/internal/common"
|
||||||
"github.com/henrygd/beszel/internal/entities/system"
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
|
||||||
@@ -36,7 +37,7 @@ var hubVersions map[string]semver.Version
|
|||||||
// and begins listening for connections. Returns an error if the server
|
// and begins listening for connections. Returns an error if the server
|
||||||
// is already running or if there's an issue starting the server.
|
// is already running or if there's an issue starting the server.
|
||||||
func (a *Agent) StartServer(opts ServerOptions) error {
|
func (a *Agent) StartServer(opts ServerOptions) error {
|
||||||
if disableSSH, _ := GetEnv("DISABLE_SSH"); disableSSH == "true" {
|
if disableSSH, _ := utils.GetEnv("DISABLE_SSH"); disableSSH == "true" {
|
||||||
return errors.New("SSH disabled")
|
return errors.New("SSH disabled")
|
||||||
}
|
}
|
||||||
if a.server != nil {
|
if a.server != nil {
|
||||||
@@ -192,7 +193,7 @@ func (a *Agent) handleSSHRequest(w io.Writer, req *common.HubRequest[cbor.RawMes
|
|||||||
|
|
||||||
// handleLegacyStats serves the legacy one-shot stats payload for older hubs
|
// handleLegacyStats serves the legacy one-shot stats payload for older hubs
|
||||||
func (a *Agent) handleLegacyStats(w io.Writer, hubVersion semver.Version) error {
|
func (a *Agent) handleLegacyStats(w io.Writer, hubVersion semver.Version) error {
|
||||||
stats := a.gatherStats(common.DataRequestOptions{CacheTimeMs: 60_000})
|
stats := a.gatherStats(common.DataRequestOptions{CacheTimeMs: defaultDataCacheTimeMs})
|
||||||
return a.writeToSession(w, stats, hubVersion)
|
return a.writeToSession(w, stats, hubVersion)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -238,11 +239,11 @@ func ParseKeys(input string) ([]gossh.PublicKey, error) {
|
|||||||
// and finally defaults to ":45876".
|
// and finally defaults to ":45876".
|
||||||
func GetAddress(addr string) string {
|
func GetAddress(addr string) string {
|
||||||
if addr == "" {
|
if addr == "" {
|
||||||
addr, _ = GetEnv("LISTEN")
|
addr, _ = utils.GetEnv("LISTEN")
|
||||||
}
|
}
|
||||||
if addr == "" {
|
if addr == "" {
|
||||||
// Legacy PORT environment variable support
|
// Legacy PORT environment variable support
|
||||||
addr, _ = GetEnv("PORT")
|
addr, _ = utils.GetEnv("PORT")
|
||||||
}
|
}
|
||||||
if addr == "" {
|
if addr == "" {
|
||||||
return ":45876"
|
return ":45876"
|
||||||
@@ -258,7 +259,7 @@ func GetAddress(addr string) string {
|
|||||||
// It checks the NETWORK environment variable first, then infers from
|
// It checks the NETWORK environment variable first, then infers from
|
||||||
// the address format: addresses starting with "/" are "unix", others are "tcp".
|
// the address format: addresses starting with "/" are "unix", others are "tcp".
|
||||||
func GetNetwork(addr string) string {
|
func GetNetwork(addr string) string {
|
||||||
if network, ok := GetEnv("NETWORK"); ok && network != "" {
|
if network, ok := utils.GetEnv("NETWORK"); ok && network != "" {
|
||||||
return network
|
return network
|
||||||
}
|
}
|
||||||
if strings.HasPrefix(addr, "/") {
|
if strings.HasPrefix(addr, "/") {
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build testing
|
//go:build testing
|
||||||
// +build testing
|
|
||||||
|
|
||||||
package agent
|
package agent
|
||||||
|
|
||||||
@@ -184,8 +183,7 @@ func TestStartServer(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestStartServerDisableSSH(t *testing.T) {
|
func TestStartServerDisableSSH(t *testing.T) {
|
||||||
os.Setenv("BESZEL_AGENT_DISABLE_SSH", "true")
|
t.Setenv("BESZEL_AGENT_DISABLE_SSH", "true")
|
||||||
defer os.Unsetenv("BESZEL_AGENT_DISABLE_SSH")
|
|
||||||
|
|
||||||
agent, err := NewAgent("")
|
agent, err := NewAgent("")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|||||||
151
agent/smart.go
151
agent/smart.go
@@ -18,6 +18,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
"github.com/henrygd/beszel/internal/entities/smart"
|
"github.com/henrygd/beszel/internal/entities/smart"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -30,6 +31,9 @@ type SmartManager struct {
|
|||||||
lastScanTime time.Time
|
lastScanTime time.Time
|
||||||
smartctlPath string
|
smartctlPath string
|
||||||
excludedDevices map[string]struct{}
|
excludedDevices map[string]struct{}
|
||||||
|
darwinNvmeOnce sync.Once
|
||||||
|
darwinNvmeCapacity map[string]uint64 // serial → bytes cache, written once via darwinNvmeOnce
|
||||||
|
darwinNvmeProvider func() ([]byte, error) // overridable for testing
|
||||||
}
|
}
|
||||||
|
|
||||||
type scanOutput struct {
|
type scanOutput struct {
|
||||||
@@ -156,7 +160,7 @@ func (sm *SmartManager) ScanDevices(force bool) error {
|
|||||||
currentDevices := sm.devicesSnapshot()
|
currentDevices := sm.devicesSnapshot()
|
||||||
|
|
||||||
var configuredDevices []*DeviceInfo
|
var configuredDevices []*DeviceInfo
|
||||||
if configuredRaw, ok := GetEnv("SMART_DEVICES"); ok {
|
if configuredRaw, ok := utils.GetEnv("SMART_DEVICES"); ok {
|
||||||
slog.Info("SMART_DEVICES", "value", configuredRaw)
|
slog.Info("SMART_DEVICES", "value", configuredRaw)
|
||||||
config := strings.TrimSpace(configuredRaw)
|
config := strings.TrimSpace(configuredRaw)
|
||||||
if config == "" {
|
if config == "" {
|
||||||
@@ -199,6 +203,13 @@ func (sm *SmartManager) ScanDevices(force bool) error {
|
|||||||
hasValidScan = true
|
hasValidScan = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add Linux mdraid arrays by reading sysfs health fields. This does not
|
||||||
|
// require smartctl and does not scan the whole device.
|
||||||
|
if raidDevices := scanMdraidDevices(); len(raidDevices) > 0 {
|
||||||
|
scannedDevices = append(scannedDevices, raidDevices...)
|
||||||
|
hasValidScan = true
|
||||||
|
}
|
||||||
|
|
||||||
finalDevices := mergeDeviceLists(currentDevices, scannedDevices, configuredDevices)
|
finalDevices := mergeDeviceLists(currentDevices, scannedDevices, configuredDevices)
|
||||||
finalDevices = sm.filterExcludedDevices(finalDevices)
|
finalDevices = sm.filterExcludedDevices(finalDevices)
|
||||||
sm.updateSmartDevices(finalDevices)
|
sm.updateSmartDevices(finalDevices)
|
||||||
@@ -215,7 +226,7 @@ func (sm *SmartManager) ScanDevices(force bool) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (sm *SmartManager) parseConfiguredDevices(config string) ([]*DeviceInfo, error) {
|
func (sm *SmartManager) parseConfiguredDevices(config string) ([]*DeviceInfo, error) {
|
||||||
splitChar := os.Getenv("SMART_DEVICES_SEPARATOR")
|
splitChar, _ := utils.GetEnv("SMART_DEVICES_SEPARATOR")
|
||||||
if splitChar == "" {
|
if splitChar == "" {
|
||||||
splitChar = ","
|
splitChar = ","
|
||||||
}
|
}
|
||||||
@@ -253,7 +264,7 @@ func (sm *SmartManager) parseConfiguredDevices(config string) ([]*DeviceInfo, er
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (sm *SmartManager) refreshExcludedDevices() {
|
func (sm *SmartManager) refreshExcludedDevices() {
|
||||||
rawValue, _ := GetEnv("EXCLUDE_SMART")
|
rawValue, _ := utils.GetEnv("EXCLUDE_SMART")
|
||||||
sm.excludedDevices = make(map[string]struct{})
|
sm.excludedDevices = make(map[string]struct{})
|
||||||
|
|
||||||
for entry := range strings.SplitSeq(rawValue, ",") {
|
for entry := range strings.SplitSeq(rawValue, ",") {
|
||||||
@@ -450,6 +461,12 @@ func (sm *SmartManager) CollectSmart(deviceInfo *DeviceInfo) error {
|
|||||||
return errNoValidSmartData
|
return errNoValidSmartData
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// mdraid health is not exposed via SMART; Linux exposes array state in sysfs.
|
||||||
|
if deviceInfo != nil {
|
||||||
|
if ok, err := sm.collectMdraidHealth(deviceInfo); ok {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
// eMMC health is not exposed via SMART on Linux, but the kernel provides
|
// eMMC health is not exposed via SMART on Linux, but the kernel provides
|
||||||
// wear / EOL indicators via sysfs. Prefer that path when available.
|
// wear / EOL indicators via sysfs. Prefer that path when available.
|
||||||
if deviceInfo != nil {
|
if deviceInfo != nil {
|
||||||
@@ -476,7 +493,7 @@ func (sm *SmartManager) CollectSmart(deviceInfo *DeviceInfo) error {
|
|||||||
output, err := cmd.CombinedOutput()
|
output, err := cmd.CombinedOutput()
|
||||||
|
|
||||||
// Check if device is in standby (exit status 2)
|
// Check if device is in standby (exit status 2)
|
||||||
if exitErr, ok := err.(*exec.ExitError); ok && exitErr.ExitCode() == 2 {
|
if exitErr, ok := errors.AsType[*exec.ExitError](err); ok && exitErr.ExitCode() == 2 {
|
||||||
if hasExistingData {
|
if hasExistingData {
|
||||||
// Device is in standby and we have cached data, keep using cache
|
// Device is in standby and we have cached data, keep using cache
|
||||||
return nil
|
return nil
|
||||||
@@ -857,15 +874,18 @@ func (sm *SmartManager) parseSmartForSata(output []byte) (bool, int) {
|
|||||||
smartData.FirmwareVersion = data.FirmwareVersion
|
smartData.FirmwareVersion = data.FirmwareVersion
|
||||||
smartData.Capacity = data.UserCapacity.Bytes
|
smartData.Capacity = data.UserCapacity.Bytes
|
||||||
smartData.Temperature = data.Temperature.Current
|
smartData.Temperature = data.Temperature.Current
|
||||||
if smartData.Temperature == 0 {
|
|
||||||
if temp, ok := temperatureFromAtaDeviceStatistics(data.AtaDeviceStatistics); ok {
|
|
||||||
smartData.Temperature = temp
|
|
||||||
}
|
|
||||||
}
|
|
||||||
smartData.SmartStatus = getSmartStatus(smartData.Temperature, data.SmartStatus.Passed)
|
smartData.SmartStatus = getSmartStatus(smartData.Temperature, data.SmartStatus.Passed)
|
||||||
smartData.DiskName = data.Device.Name
|
smartData.DiskName = data.Device.Name
|
||||||
smartData.DiskType = data.Device.Type
|
smartData.DiskType = data.Device.Type
|
||||||
|
|
||||||
|
// get values from ata_device_statistics if necessary
|
||||||
|
var ataDeviceStats smart.AtaDeviceStatistics
|
||||||
|
if smartData.Temperature == 0 {
|
||||||
|
if temp := findAtaDeviceStatisticsValue(&data, &ataDeviceStats, 5, "Current Temperature", 0, 255); temp != nil {
|
||||||
|
smartData.Temperature = uint8(*temp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// update SmartAttributes
|
// update SmartAttributes
|
||||||
smartData.Attributes = make([]*smart.SmartAttribute, 0, len(data.AtaSmartAttributes.Table))
|
smartData.Attributes = make([]*smart.SmartAttribute, 0, len(data.AtaSmartAttributes.Table))
|
||||||
for _, attr := range data.AtaSmartAttributes.Table {
|
for _, attr := range data.AtaSmartAttributes.Table {
|
||||||
@@ -900,23 +920,20 @@ func getSmartStatus(temperature uint8, passed bool) string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func temperatureFromAtaDeviceStatistics(stats smart.AtaDeviceStatistics) (uint8, bool) {
|
|
||||||
entry := findAtaDeviceStatisticsEntry(stats, 5, "Current Temperature")
|
|
||||||
if entry == nil || entry.Value == nil {
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
if *entry.Value > 255 {
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
return uint8(*entry.Value), true
|
|
||||||
}
|
|
||||||
|
|
||||||
// findAtaDeviceStatisticsEntry centralizes ATA devstat lookups so additional
|
// findAtaDeviceStatisticsEntry centralizes ATA devstat lookups so additional
|
||||||
// metrics can be pulled from the same structure in the future.
|
// metrics can be pulled from the same structure in the future.
|
||||||
func findAtaDeviceStatisticsEntry(stats smart.AtaDeviceStatistics, pageNumber uint8, entryName string) *smart.AtaDeviceStatisticsEntry {
|
func findAtaDeviceStatisticsValue(data *smart.SmartInfoForSata, ataDeviceStats *smart.AtaDeviceStatistics, entryNumber uint8, entryName string, minValue, maxValue int64) *int64 {
|
||||||
for pageIdx := range stats.Pages {
|
if len(ataDeviceStats.Pages) == 0 {
|
||||||
page := &stats.Pages[pageIdx]
|
if len(data.AtaDeviceStatistics) == 0 {
|
||||||
if page.Number != pageNumber {
|
return nil
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(data.AtaDeviceStatistics, ataDeviceStats); err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for pageIdx := range ataDeviceStats.Pages {
|
||||||
|
page := &ataDeviceStats.Pages[pageIdx]
|
||||||
|
if page.Number != entryNumber {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
for entryIdx := range page.Table {
|
for entryIdx := range page.Table {
|
||||||
@@ -924,7 +941,10 @@ func findAtaDeviceStatisticsEntry(stats smart.AtaDeviceStatistics, pageNumber ui
|
|||||||
if !strings.EqualFold(entry.Name, entryName) {
|
if !strings.EqualFold(entry.Name, entryName) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return entry
|
if entry.Value == nil || *entry.Value < minValue || *entry.Value > maxValue {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return entry.Value
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@@ -1016,6 +1036,52 @@ func parseScsiGigabytesProcessed(value string) int64 {
|
|||||||
return parsed
|
return parsed
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// lookupDarwinNvmeCapacity returns the capacity in bytes for a given NVMe serial number on Darwin.
|
||||||
|
// It uses system_profiler SPNVMeDataType to get capacity since Apple SSDs don't report user_capacity
|
||||||
|
// via smartctl. Results are cached after the first call via sync.Once.
|
||||||
|
func (sm *SmartManager) lookupDarwinNvmeCapacity(serial string) uint64 {
|
||||||
|
sm.darwinNvmeOnce.Do(func() {
|
||||||
|
sm.darwinNvmeCapacity = make(map[string]uint64)
|
||||||
|
|
||||||
|
provider := sm.darwinNvmeProvider
|
||||||
|
if provider == nil {
|
||||||
|
provider = func() ([]byte, error) {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
return exec.CommandContext(ctx, "system_profiler", "SPNVMeDataType", "-json").Output()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
out, err := provider()
|
||||||
|
if err != nil {
|
||||||
|
slog.Debug("system_profiler NVMe lookup failed", "err", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var result struct {
|
||||||
|
SPNVMeDataType []struct {
|
||||||
|
Items []struct {
|
||||||
|
DeviceSerial string `json:"device_serial"`
|
||||||
|
SizeInBytes uint64 `json:"size_in_bytes"`
|
||||||
|
} `json:"_items"`
|
||||||
|
} `json:"SPNVMeDataType"`
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(out, &result); err != nil {
|
||||||
|
slog.Debug("system_profiler NVMe parse failed", "err", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, controller := range result.SPNVMeDataType {
|
||||||
|
for _, item := range controller.Items {
|
||||||
|
if item.DeviceSerial != "" && item.SizeInBytes > 0 {
|
||||||
|
sm.darwinNvmeCapacity[item.DeviceSerial] = item.SizeInBytes
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
return sm.darwinNvmeCapacity[serial]
|
||||||
|
}
|
||||||
|
|
||||||
// parseSmartForNvme parses the output of smartctl --all -j /dev/nvmeX and updates the SmartDataMap
|
// parseSmartForNvme parses the output of smartctl --all -j /dev/nvmeX and updates the SmartDataMap
|
||||||
// Returns hasValidData and exitStatus
|
// Returns hasValidData and exitStatus
|
||||||
func (sm *SmartManager) parseSmartForNvme(output []byte) (bool, int) {
|
func (sm *SmartManager) parseSmartForNvme(output []byte) (bool, int) {
|
||||||
@@ -1052,6 +1118,12 @@ func (sm *SmartManager) parseSmartForNvme(output []byte) (bool, int) {
|
|||||||
smartData.SerialNumber = data.SerialNumber
|
smartData.SerialNumber = data.SerialNumber
|
||||||
smartData.FirmwareVersion = data.FirmwareVersion
|
smartData.FirmwareVersion = data.FirmwareVersion
|
||||||
smartData.Capacity = data.UserCapacity.Bytes
|
smartData.Capacity = data.UserCapacity.Bytes
|
||||||
|
if smartData.Capacity == 0 {
|
||||||
|
smartData.Capacity = data.NVMeTotalCapacity
|
||||||
|
}
|
||||||
|
if smartData.Capacity == 0 && (runtime.GOOS == "darwin" || sm.darwinNvmeProvider != nil) {
|
||||||
|
smartData.Capacity = sm.lookupDarwinNvmeCapacity(data.SerialNumber)
|
||||||
|
}
|
||||||
smartData.Temperature = data.NVMeSmartHealthInformationLog.Temperature
|
smartData.Temperature = data.NVMeSmartHealthInformationLog.Temperature
|
||||||
smartData.SmartStatus = getSmartStatus(smartData.Temperature, data.SmartStatus.Passed)
|
smartData.SmartStatus = getSmartStatus(smartData.Temperature, data.SmartStatus.Passed)
|
||||||
smartData.DiskName = data.Device.Name
|
smartData.DiskName = data.Device.Name
|
||||||
@@ -1087,32 +1159,21 @@ func (sm *SmartManager) parseSmartForNvme(output []byte) (bool, int) {
|
|||||||
|
|
||||||
// detectSmartctl checks if smartctl is installed, returns an error if not
|
// detectSmartctl checks if smartctl is installed, returns an error if not
|
||||||
func (sm *SmartManager) detectSmartctl() (string, error) {
|
func (sm *SmartManager) detectSmartctl() (string, error) {
|
||||||
isWindows := runtime.GOOS == "windows"
|
if runtime.GOOS == "windows" {
|
||||||
|
|
||||||
// Load embedded smartctl.exe for Windows amd64 builds.
|
// Load embedded smartctl.exe for Windows amd64 builds.
|
||||||
if isWindows && runtime.GOARCH == "amd64" {
|
if runtime.GOARCH == "amd64" {
|
||||||
if path, err := ensureEmbeddedSmartctl(); err == nil {
|
if path, err := ensureEmbeddedSmartctl(); err == nil {
|
||||||
return path, nil
|
return path, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Try to find smartctl in the default installation location
|
||||||
if path, err := exec.LookPath("smartctl"); err == nil {
|
const location = "C:\\Program Files\\smartmontools\\bin\\smartctl.exe"
|
||||||
return path, nil
|
|
||||||
}
|
|
||||||
locations := []string{}
|
|
||||||
if isWindows {
|
|
||||||
locations = append(locations,
|
|
||||||
"C:\\Program Files\\smartmontools\\bin\\smartctl.exe",
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
locations = append(locations, "/opt/homebrew/bin/smartctl")
|
|
||||||
}
|
|
||||||
for _, location := range locations {
|
|
||||||
if _, err := os.Stat(location); err == nil {
|
if _, err := os.Stat(location); err == nil {
|
||||||
return location, nil
|
return location, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return "", errors.New("smartctl not found")
|
|
||||||
|
return utils.LookPathHomebrew("smartctl")
|
||||||
}
|
}
|
||||||
|
|
||||||
// isNvmeControllerPath checks if the path matches an NVMe controller pattern
|
// isNvmeControllerPath checks if the path matches an NVMe controller pattern
|
||||||
@@ -1146,10 +1207,12 @@ func NewSmartManager() (*SmartManager, error) {
|
|||||||
slog.Debug("smartctl", "path", path, "err", err)
|
slog.Debug("smartctl", "path", path, "err", err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Keep the previous fail-fast behavior unless this Linux host exposes
|
// Keep the previous fail-fast behavior unless this Linux host exposes
|
||||||
// eMMC health via sysfs, in which case smartctl is optional.
|
// eMMC or mdraid health via sysfs, in which case smartctl is optional.
|
||||||
if runtime.GOOS == "linux" && len(scanEmmcDevices()) > 0 {
|
if runtime.GOOS == "linux" {
|
||||||
|
if len(scanEmmcDevices()) > 0 || len(scanMdraidDevices()) > 0 {
|
||||||
return sm, nil
|
return sm, nil
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
sm.smartctlPath = path
|
sm.smartctlPath = path
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build testing
|
//go:build testing
|
||||||
// +build testing
|
|
||||||
|
|
||||||
package agent
|
package agent
|
||||||
|
|
||||||
@@ -122,6 +121,78 @@ func TestParseSmartForSataDeviceStatisticsTemperature(t *testing.T) {
|
|||||||
assert.Equal(t, uint8(22), deviceData.Temperature)
|
assert.Equal(t, uint8(22), deviceData.Temperature)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestParseSmartForSataAtaDeviceStatistics(t *testing.T) {
|
||||||
|
// tests that ata_device_statistics values are parsed correctly
|
||||||
|
jsonPayload := []byte(`{
|
||||||
|
"smartctl": {"exit_status": 0},
|
||||||
|
"device": {"name": "/dev/sdb", "type": "sat"},
|
||||||
|
"model_name": "SanDisk SSD U110 16GB",
|
||||||
|
"serial_number": "lksjfh23lhj",
|
||||||
|
"firmware_version": "U21B001",
|
||||||
|
"user_capacity": {"bytes": 16013942784},
|
||||||
|
"smart_status": {"passed": true},
|
||||||
|
"ata_smart_attributes": {"table": []},
|
||||||
|
"ata_device_statistics": {
|
||||||
|
"pages": [
|
||||||
|
{
|
||||||
|
"number": 5,
|
||||||
|
"name": "Temperature Statistics",
|
||||||
|
"table": [
|
||||||
|
{"name": "Current Temperature", "value": 43, "flags": {"valid": true}},
|
||||||
|
{"name": "Specified Minimum Operating Temperature", "value": -20, "flags": {"valid": true}}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}`)
|
||||||
|
|
||||||
|
sm := &SmartManager{SmartDataMap: make(map[string]*smart.SmartData)}
|
||||||
|
hasData, exitStatus := sm.parseSmartForSata(jsonPayload)
|
||||||
|
require.True(t, hasData)
|
||||||
|
assert.Equal(t, 0, exitStatus)
|
||||||
|
|
||||||
|
deviceData, ok := sm.SmartDataMap["lksjfh23lhj"]
|
||||||
|
require.True(t, ok, "expected smart data entry for serial lksjfh23lhj")
|
||||||
|
assert.Equal(t, uint8(43), deviceData.Temperature)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseSmartForSataNegativeDeviceStatistics(t *testing.T) {
|
||||||
|
// Tests that negative values in ata_device_statistics (e.g. min operating temp)
|
||||||
|
// do not cause the entire SAT parser to fail.
|
||||||
|
jsonPayload := []byte(`{
|
||||||
|
"smartctl": {"exit_status": 0},
|
||||||
|
"device": {"name": "/dev/sdb", "type": "sat"},
|
||||||
|
"model_name": "SanDisk SSD U110 16GB",
|
||||||
|
"serial_number": "NEGATIVE123",
|
||||||
|
"firmware_version": "U21B001",
|
||||||
|
"user_capacity": {"bytes": 16013942784},
|
||||||
|
"smart_status": {"passed": true},
|
||||||
|
"temperature": {"current": 38},
|
||||||
|
"ata_smart_attributes": {"table": []},
|
||||||
|
"ata_device_statistics": {
|
||||||
|
"pages": [
|
||||||
|
{
|
||||||
|
"number": 5,
|
||||||
|
"name": "Temperature Statistics",
|
||||||
|
"table": [
|
||||||
|
{"name": "Current Temperature", "value": 38, "flags": {"valid": true}},
|
||||||
|
{"name": "Specified Minimum Operating Temperature", "value": -20, "flags": {"valid": true}}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}`)
|
||||||
|
|
||||||
|
sm := &SmartManager{SmartDataMap: make(map[string]*smart.SmartData)}
|
||||||
|
hasData, exitStatus := sm.parseSmartForSata(jsonPayload)
|
||||||
|
require.True(t, hasData)
|
||||||
|
assert.Equal(t, 0, exitStatus)
|
||||||
|
|
||||||
|
deviceData, ok := sm.SmartDataMap["NEGATIVE123"]
|
||||||
|
require.True(t, ok, "expected smart data entry for serial NEGATIVE123")
|
||||||
|
assert.Equal(t, uint8(38), deviceData.Temperature)
|
||||||
|
}
|
||||||
|
|
||||||
func TestParseSmartForSataParentheticalRawValue(t *testing.T) {
|
func TestParseSmartForSataParentheticalRawValue(t *testing.T) {
|
||||||
jsonPayload := []byte(`{
|
jsonPayload := []byte(`{
|
||||||
"smartctl": {"exit_status": 0},
|
"smartctl": {"exit_status": 0},
|
||||||
@@ -728,6 +799,182 @@ func TestIsVirtualDeviceScsi(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestFindAtaDeviceStatisticsValue(t *testing.T) {
|
||||||
|
val42 := int64(42)
|
||||||
|
val100 := int64(100)
|
||||||
|
valMinus20 := int64(-20)
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
data smart.SmartInfoForSata
|
||||||
|
ataDeviceStats smart.AtaDeviceStatistics
|
||||||
|
entryNumber uint8
|
||||||
|
entryName string
|
||||||
|
minValue int64
|
||||||
|
maxValue int64
|
||||||
|
expectedValue *int64
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "value in ataDeviceStats",
|
||||||
|
ataDeviceStats: smart.AtaDeviceStatistics{
|
||||||
|
Pages: []smart.AtaDeviceStatisticsPage{
|
||||||
|
{
|
||||||
|
Number: 5,
|
||||||
|
Table: []smart.AtaDeviceStatisticsEntry{
|
||||||
|
{Name: "Current Temperature", Value: &val42},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
entryNumber: 5,
|
||||||
|
entryName: "Current Temperature",
|
||||||
|
minValue: 0,
|
||||||
|
maxValue: 100,
|
||||||
|
expectedValue: &val42,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "value unmarshaled from data",
|
||||||
|
data: smart.SmartInfoForSata{
|
||||||
|
AtaDeviceStatistics: []byte(`{"pages":[{"number":5,"table":[{"name":"Current Temperature","value":100}]}]}`),
|
||||||
|
},
|
||||||
|
entryNumber: 5,
|
||||||
|
entryName: "Current Temperature",
|
||||||
|
minValue: 0,
|
||||||
|
maxValue: 255,
|
||||||
|
expectedValue: &val100,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "value out of range (too high)",
|
||||||
|
ataDeviceStats: smart.AtaDeviceStatistics{
|
||||||
|
Pages: []smart.AtaDeviceStatisticsPage{
|
||||||
|
{
|
||||||
|
Number: 5,
|
||||||
|
Table: []smart.AtaDeviceStatisticsEntry{
|
||||||
|
{Name: "Current Temperature", Value: &val100},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
entryNumber: 5,
|
||||||
|
entryName: "Current Temperature",
|
||||||
|
minValue: 0,
|
||||||
|
maxValue: 50,
|
||||||
|
expectedValue: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "value out of range (too low)",
|
||||||
|
ataDeviceStats: smart.AtaDeviceStatistics{
|
||||||
|
Pages: []smart.AtaDeviceStatisticsPage{
|
||||||
|
{
|
||||||
|
Number: 5,
|
||||||
|
Table: []smart.AtaDeviceStatisticsEntry{
|
||||||
|
{Name: "Min Temp", Value: &valMinus20},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
entryNumber: 5,
|
||||||
|
entryName: "Min Temp",
|
||||||
|
minValue: 0,
|
||||||
|
maxValue: 100,
|
||||||
|
expectedValue: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no statistics available",
|
||||||
|
data: smart.SmartInfoForSata{},
|
||||||
|
entryNumber: 5,
|
||||||
|
entryName: "Current Temperature",
|
||||||
|
minValue: 0,
|
||||||
|
maxValue: 255,
|
||||||
|
expectedValue: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "wrong page number",
|
||||||
|
ataDeviceStats: smart.AtaDeviceStatistics{
|
||||||
|
Pages: []smart.AtaDeviceStatisticsPage{
|
||||||
|
{
|
||||||
|
Number: 1,
|
||||||
|
Table: []smart.AtaDeviceStatisticsEntry{
|
||||||
|
{Name: "Current Temperature", Value: &val42},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
entryNumber: 5,
|
||||||
|
entryName: "Current Temperature",
|
||||||
|
minValue: 0,
|
||||||
|
maxValue: 100,
|
||||||
|
expectedValue: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "wrong entry name",
|
||||||
|
ataDeviceStats: smart.AtaDeviceStatistics{
|
||||||
|
Pages: []smart.AtaDeviceStatisticsPage{
|
||||||
|
{
|
||||||
|
Number: 5,
|
||||||
|
Table: []smart.AtaDeviceStatisticsEntry{
|
||||||
|
{Name: "Other Stat", Value: &val42},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
entryNumber: 5,
|
||||||
|
entryName: "Current Temperature",
|
||||||
|
minValue: 0,
|
||||||
|
maxValue: 100,
|
||||||
|
expectedValue: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "case insensitive name match",
|
||||||
|
ataDeviceStats: smart.AtaDeviceStatistics{
|
||||||
|
Pages: []smart.AtaDeviceStatisticsPage{
|
||||||
|
{
|
||||||
|
Number: 5,
|
||||||
|
Table: []smart.AtaDeviceStatisticsEntry{
|
||||||
|
{Name: "CURRENT TEMPERATURE", Value: &val42},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
entryNumber: 5,
|
||||||
|
entryName: "Current Temperature",
|
||||||
|
minValue: 0,
|
||||||
|
maxValue: 100,
|
||||||
|
expectedValue: &val42,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "entry value is nil",
|
||||||
|
ataDeviceStats: smart.AtaDeviceStatistics{
|
||||||
|
Pages: []smart.AtaDeviceStatisticsPage{
|
||||||
|
{
|
||||||
|
Number: 5,
|
||||||
|
Table: []smart.AtaDeviceStatisticsEntry{
|
||||||
|
{Name: "Current Temperature", Value: nil},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
entryNumber: 5,
|
||||||
|
entryName: "Current Temperature",
|
||||||
|
minValue: 0,
|
||||||
|
maxValue: 100,
|
||||||
|
expectedValue: nil,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
result := findAtaDeviceStatisticsValue(&tt.data, &tt.ataDeviceStats, tt.entryNumber, tt.entryName, tt.minValue, tt.maxValue)
|
||||||
|
if tt.expectedValue == nil {
|
||||||
|
assert.Nil(t, result)
|
||||||
|
} else {
|
||||||
|
require.NotNil(t, result)
|
||||||
|
assert.Equal(t, *tt.expectedValue, *result)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestRefreshExcludedDevices(t *testing.T) {
|
func TestRefreshExcludedDevices(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
@@ -788,7 +1035,7 @@ func TestRefreshExcludedDevices(t *testing.T) {
|
|||||||
t.Setenv("EXCLUDE_SMART", tt.envValue)
|
t.Setenv("EXCLUDE_SMART", tt.envValue)
|
||||||
} else {
|
} else {
|
||||||
// Ensure env var is not set for empty test
|
// Ensure env var is not set for empty test
|
||||||
os.Unsetenv("EXCLUDE_SMART")
|
t.Setenv("EXCLUDE_SMART", "")
|
||||||
}
|
}
|
||||||
|
|
||||||
sm := &SmartManager{}
|
sm := &SmartManager{}
|
||||||
@@ -952,3 +1199,81 @@ func TestIsNvmeControllerPath(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestParseSmartForNvmeAppleSSD(t *testing.T) {
|
||||||
|
// Apple SSDs don't report user_capacity via smartctl; capacity should be fetched
|
||||||
|
// from system_profiler via the darwinNvmeProvider fallback.
|
||||||
|
fixturePath := filepath.Join("test-data", "smart", "apple_nvme.json")
|
||||||
|
data, err := os.ReadFile(fixturePath)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
providerCalls := 0
|
||||||
|
fakeProvider := func() ([]byte, error) {
|
||||||
|
providerCalls++
|
||||||
|
return []byte(`{
|
||||||
|
"SPNVMeDataType": [{
|
||||||
|
"_items": [{
|
||||||
|
"device_serial": "0ba0147940253c15",
|
||||||
|
"size_in_bytes": 251000193024
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
}`), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
sm := &SmartManager{
|
||||||
|
SmartDataMap: make(map[string]*smart.SmartData),
|
||||||
|
darwinNvmeProvider: fakeProvider,
|
||||||
|
}
|
||||||
|
|
||||||
|
hasData, _ := sm.parseSmartForNvme(data)
|
||||||
|
require.True(t, hasData)
|
||||||
|
|
||||||
|
deviceData, ok := sm.SmartDataMap["0ba0147940253c15"]
|
||||||
|
require.True(t, ok)
|
||||||
|
assert.Equal(t, "APPLE SSD AP0256Q", deviceData.ModelName)
|
||||||
|
assert.Equal(t, uint64(251000193024), deviceData.Capacity)
|
||||||
|
assert.Equal(t, uint8(42), deviceData.Temperature)
|
||||||
|
assert.Equal(t, "PASSED", deviceData.SmartStatus)
|
||||||
|
assert.Equal(t, 1, providerCalls, "system_profiler should be called once")
|
||||||
|
|
||||||
|
// Second parse: provider should NOT be called again (cache hit)
|
||||||
|
_, _ = sm.parseSmartForNvme(data)
|
||||||
|
assert.Equal(t, 1, providerCalls, "system_profiler should not be called again after caching")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLookupDarwinNvmeCapacityMultipleDisks(t *testing.T) {
|
||||||
|
fakeProvider := func() ([]byte, error) {
|
||||||
|
return []byte(`{
|
||||||
|
"SPNVMeDataType": [
|
||||||
|
{
|
||||||
|
"_items": [
|
||||||
|
{"device_serial": "serial-disk0", "size_in_bytes": 251000193024},
|
||||||
|
{"device_serial": "serial-disk1", "size_in_bytes": 1000204886016}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"_items": [
|
||||||
|
{"device_serial": "serial-disk2", "size_in_bytes": 512110190592}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}`), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
sm := &SmartManager{darwinNvmeProvider: fakeProvider}
|
||||||
|
assert.Equal(t, uint64(251000193024), sm.lookupDarwinNvmeCapacity("serial-disk0"))
|
||||||
|
assert.Equal(t, uint64(1000204886016), sm.lookupDarwinNvmeCapacity("serial-disk1"))
|
||||||
|
assert.Equal(t, uint64(512110190592), sm.lookupDarwinNvmeCapacity("serial-disk2"))
|
||||||
|
assert.Equal(t, uint64(0), sm.lookupDarwinNvmeCapacity("unknown-serial"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLookupDarwinNvmeCapacityProviderError(t *testing.T) {
|
||||||
|
fakeProvider := func() ([]byte, error) {
|
||||||
|
return nil, errors.New("system_profiler not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
sm := &SmartManager{darwinNvmeProvider: fakeProvider}
|
||||||
|
assert.Equal(t, uint64(0), sm.lookupDarwinNvmeCapacity("any-serial"))
|
||||||
|
// Cache should be initialized even on error so we don't retry (Once already fired)
|
||||||
|
assert.NotNil(t, sm.darwinNvmeCapacity)
|
||||||
|
}
|
||||||
|
|||||||
@@ -7,12 +7,12 @@ import (
|
|||||||
"log/slog"
|
"log/slog"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/henrygd/beszel"
|
"github.com/henrygd/beszel"
|
||||||
"github.com/henrygd/beszel/agent/battery"
|
"github.com/henrygd/beszel/agent/battery"
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
|
"github.com/henrygd/beszel/agent/zfs"
|
||||||
"github.com/henrygd/beszel/internal/entities/container"
|
"github.com/henrygd/beszel/internal/entities/container"
|
||||||
"github.com/henrygd/beszel/internal/entities/system"
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
|
||||||
@@ -22,13 +22,6 @@ import (
|
|||||||
"github.com/shirou/gopsutil/v4/mem"
|
"github.com/shirou/gopsutil/v4/mem"
|
||||||
)
|
)
|
||||||
|
|
||||||
// prevDisk stores previous per-device disk counters for a given cache interval
|
|
||||||
type prevDisk struct {
|
|
||||||
readBytes uint64
|
|
||||||
writeBytes uint64
|
|
||||||
at time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sets initial / non-changing values about the host system
|
// Sets initial / non-changing values about the host system
|
||||||
func (a *Agent) refreshSystemDetails() {
|
func (a *Agent) refreshSystemDetails() {
|
||||||
a.systemInfo.AgentVersion = beszel.Version
|
a.systemInfo.AgentVersion = beszel.Version
|
||||||
@@ -107,13 +100,33 @@ func (a *Agent) refreshSystemDetails() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// zfs
|
// zfs
|
||||||
if _, err := getARCSize(); err != nil {
|
if _, err := zfs.ARCSize(); err != nil {
|
||||||
slog.Debug("Not monitoring ZFS ARC", "err", err)
|
slog.Debug("Not monitoring ZFS ARC", "err", err)
|
||||||
} else {
|
} else {
|
||||||
a.zfs = true
|
a.zfs = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// attachSystemDetails returns details only for fresh default-interval responses.
|
||||||
|
func (a *Agent) attachSystemDetails(data *system.CombinedData, cacheTimeMs uint16, includeRequested bool) *system.CombinedData {
|
||||||
|
if cacheTimeMs != defaultDataCacheTimeMs || (!includeRequested && !a.detailsDirty) {
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
|
||||||
|
// copy data to avoid adding details to the original cached struct
|
||||||
|
response := *data
|
||||||
|
response.Details = &a.systemDetails
|
||||||
|
a.detailsDirty = false
|
||||||
|
return &response
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateSystemDetails applies a mutation to the static details payload and marks
|
||||||
|
// it for inclusion on the next fresh default-interval response.
|
||||||
|
func (a *Agent) updateSystemDetails(updateFunc func(details *system.Details)) {
|
||||||
|
updateFunc(&a.systemDetails)
|
||||||
|
a.detailsDirty = true
|
||||||
|
}
|
||||||
|
|
||||||
// Returns current info, stats about the host system
|
// Returns current info, stats about the host system
|
||||||
func (a *Agent) getSystemStats(cacheTimeMs uint16) system.Stats {
|
func (a *Agent) getSystemStats(cacheTimeMs uint16) system.Stats {
|
||||||
var systemStats system.Stats
|
var systemStats system.Stats
|
||||||
@@ -127,13 +140,13 @@ func (a *Agent) getSystemStats(cacheTimeMs uint16) system.Stats {
|
|||||||
// cpu metrics
|
// cpu metrics
|
||||||
cpuMetrics, err := getCpuMetrics(cacheTimeMs)
|
cpuMetrics, err := getCpuMetrics(cacheTimeMs)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
systemStats.Cpu = twoDecimals(cpuMetrics.Total)
|
systemStats.Cpu = utils.TwoDecimals(cpuMetrics.Total)
|
||||||
systemStats.CpuBreakdown = []float64{
|
systemStats.CpuBreakdown = []float64{
|
||||||
twoDecimals(cpuMetrics.User),
|
utils.TwoDecimals(cpuMetrics.User),
|
||||||
twoDecimals(cpuMetrics.System),
|
utils.TwoDecimals(cpuMetrics.System),
|
||||||
twoDecimals(cpuMetrics.Iowait),
|
utils.TwoDecimals(cpuMetrics.Iowait),
|
||||||
twoDecimals(cpuMetrics.Steal),
|
utils.TwoDecimals(cpuMetrics.Steal),
|
||||||
twoDecimals(cpuMetrics.Idle),
|
utils.TwoDecimals(cpuMetrics.Idle),
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
slog.Error("Error getting cpu metrics", "err", err)
|
slog.Error("Error getting cpu metrics", "err", err)
|
||||||
@@ -157,8 +170,8 @@ func (a *Agent) getSystemStats(cacheTimeMs uint16) system.Stats {
|
|||||||
// memory
|
// memory
|
||||||
if v, err := mem.VirtualMemory(); err == nil {
|
if v, err := mem.VirtualMemory(); err == nil {
|
||||||
// swap
|
// swap
|
||||||
systemStats.Swap = bytesToGigabytes(v.SwapTotal)
|
systemStats.Swap = utils.BytesToGigabytes(v.SwapTotal)
|
||||||
systemStats.SwapUsed = bytesToGigabytes(v.SwapTotal - v.SwapFree - v.SwapCached)
|
systemStats.SwapUsed = utils.BytesToGigabytes(v.SwapTotal - v.SwapFree - v.SwapCached)
|
||||||
// cache + buffers value for default mem calculation
|
// cache + buffers value for default mem calculation
|
||||||
// note: gopsutil automatically adds SReclaimable to v.Cached
|
// note: gopsutil automatically adds SReclaimable to v.Cached
|
||||||
cacheBuff := v.Cached + v.Buffers - v.Shared
|
cacheBuff := v.Cached + v.Buffers - v.Shared
|
||||||
@@ -178,16 +191,16 @@ func (a *Agent) getSystemStats(cacheTimeMs uint16) system.Stats {
|
|||||||
// }
|
// }
|
||||||
// subtract ZFS ARC size from used memory and add as its own category
|
// subtract ZFS ARC size from used memory and add as its own category
|
||||||
if a.zfs {
|
if a.zfs {
|
||||||
if arcSize, _ := getARCSize(); arcSize > 0 && arcSize < v.Used {
|
if arcSize, _ := zfs.ARCSize(); arcSize > 0 && arcSize < v.Used {
|
||||||
v.Used = v.Used - arcSize
|
v.Used = v.Used - arcSize
|
||||||
v.UsedPercent = float64(v.Used) / float64(v.Total) * 100.0
|
v.UsedPercent = float64(v.Used) / float64(v.Total) * 100.0
|
||||||
systemStats.MemZfsArc = bytesToGigabytes(arcSize)
|
systemStats.MemZfsArc = utils.BytesToGigabytes(arcSize)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
systemStats.Mem = bytesToGigabytes(v.Total)
|
systemStats.Mem = utils.BytesToGigabytes(v.Total)
|
||||||
systemStats.MemBuffCache = bytesToGigabytes(cacheBuff)
|
systemStats.MemBuffCache = utils.BytesToGigabytes(cacheBuff)
|
||||||
systemStats.MemUsed = bytesToGigabytes(v.Used)
|
systemStats.MemUsed = utils.BytesToGigabytes(v.Used)
|
||||||
systemStats.MemPct = twoDecimals(v.UsedPercent)
|
systemStats.MemPct = utils.TwoDecimals(v.UsedPercent)
|
||||||
}
|
}
|
||||||
|
|
||||||
// disk usage
|
// disk usage
|
||||||
@@ -250,32 +263,6 @@ func (a *Agent) getSystemStats(cacheTimeMs uint16) system.Stats {
|
|||||||
return systemStats
|
return systemStats
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns the size of the ZFS ARC memory cache in bytes
|
|
||||||
func getARCSize() (uint64, error) {
|
|
||||||
file, err := os.Open("/proc/spl/kstat/zfs/arcstats")
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
// Scan the lines
|
|
||||||
scanner := bufio.NewScanner(file)
|
|
||||||
for scanner.Scan() {
|
|
||||||
line := scanner.Text()
|
|
||||||
if strings.HasPrefix(line, "size") {
|
|
||||||
// Example line: size 4 15032385536
|
|
||||||
fields := strings.Fields(line)
|
|
||||||
if len(fields) < 3 {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
// Return the size as uint64
|
|
||||||
return strconv.ParseUint(fields[2], 10, 64)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0, fmt.Errorf("failed to parse size field")
|
|
||||||
}
|
|
||||||
|
|
||||||
// getOsPrettyName attempts to get the pretty OS name from /etc/os-release on Linux systems
|
// getOsPrettyName attempts to get the pretty OS name from /etc/os-release on Linux systems
|
||||||
func getOsPrettyName() (string, error) {
|
func getOsPrettyName() (string, error) {
|
||||||
file, err := os.Open("/etc/os-release")
|
file, err := os.Open("/etc/os-release")
|
||||||
|
|||||||
61
agent/system_test.go
Normal file
61
agent/system_test.go
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
package agent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/common"
|
||||||
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGatherStatsDoesNotAttachDetailsToCachedRequests(t *testing.T) {
|
||||||
|
agent := &Agent{
|
||||||
|
cache: NewSystemDataCache(),
|
||||||
|
systemDetails: system.Details{Hostname: "updated-host", Podman: true},
|
||||||
|
detailsDirty: true,
|
||||||
|
}
|
||||||
|
cached := &system.CombinedData{
|
||||||
|
Info: system.Info{Hostname: "cached-host"},
|
||||||
|
}
|
||||||
|
agent.cache.Set(cached, defaultDataCacheTimeMs)
|
||||||
|
|
||||||
|
response := agent.gatherStats(common.DataRequestOptions{CacheTimeMs: defaultDataCacheTimeMs})
|
||||||
|
|
||||||
|
assert.Same(t, cached, response)
|
||||||
|
assert.Nil(t, response.Details)
|
||||||
|
assert.True(t, agent.detailsDirty)
|
||||||
|
assert.Equal(t, "cached-host", response.Info.Hostname)
|
||||||
|
assert.Nil(t, cached.Details)
|
||||||
|
|
||||||
|
secondResponse := agent.gatherStats(common.DataRequestOptions{CacheTimeMs: defaultDataCacheTimeMs})
|
||||||
|
assert.Same(t, cached, secondResponse)
|
||||||
|
assert.Nil(t, secondResponse.Details)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUpdateSystemDetailsMarksDetailsDirty(t *testing.T) {
|
||||||
|
agent := &Agent{}
|
||||||
|
|
||||||
|
agent.updateSystemDetails(func(details *system.Details) {
|
||||||
|
details.Hostname = "updated-host"
|
||||||
|
details.Podman = true
|
||||||
|
})
|
||||||
|
|
||||||
|
assert.True(t, agent.detailsDirty)
|
||||||
|
assert.Equal(t, "updated-host", agent.systemDetails.Hostname)
|
||||||
|
assert.True(t, agent.systemDetails.Podman)
|
||||||
|
|
||||||
|
original := &system.CombinedData{}
|
||||||
|
realTimeResponse := agent.attachSystemDetails(original, 1000, true)
|
||||||
|
assert.Same(t, original, realTimeResponse)
|
||||||
|
assert.Nil(t, realTimeResponse.Details)
|
||||||
|
assert.True(t, agent.detailsDirty)
|
||||||
|
|
||||||
|
response := agent.attachSystemDetails(original, defaultDataCacheTimeMs, false)
|
||||||
|
require.NotNil(t, response.Details)
|
||||||
|
assert.NotSame(t, original, response)
|
||||||
|
assert.Equal(t, "updated-host", response.Details.Hostname)
|
||||||
|
assert.True(t, response.Details.Podman)
|
||||||
|
assert.False(t, agent.detailsDirty)
|
||||||
|
assert.Nil(t, original.Details)
|
||||||
|
}
|
||||||
@@ -15,6 +15,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/coreos/go-systemd/v22/dbus"
|
"github.com/coreos/go-systemd/v22/dbus"
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
"github.com/henrygd/beszel/internal/entities/systemd"
|
"github.com/henrygd/beszel/internal/entities/systemd"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -49,7 +50,7 @@ func isSystemdAvailable() bool {
|
|||||||
|
|
||||||
// newSystemdManager creates a new systemdManager.
|
// newSystemdManager creates a new systemdManager.
|
||||||
func newSystemdManager() (*systemdManager, error) {
|
func newSystemdManager() (*systemdManager, error) {
|
||||||
if skipSystemd, _ := GetEnv("SKIP_SYSTEMD"); skipSystemd == "true" {
|
if skipSystemd, _ := utils.GetEnv("SKIP_SYSTEMD"); skipSystemd == "true" {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -294,13 +295,13 @@ func unescapeServiceName(name string) string {
|
|||||||
// otherwise defaults to "*service".
|
// otherwise defaults to "*service".
|
||||||
func getServicePatterns() []string {
|
func getServicePatterns() []string {
|
||||||
patterns := []string{}
|
patterns := []string{}
|
||||||
if envPatterns, _ := GetEnv("SERVICE_PATTERNS"); envPatterns != "" {
|
if envPatterns, _ := utils.GetEnv("SERVICE_PATTERNS"); envPatterns != "" {
|
||||||
for pattern := range strings.SplitSeq(envPatterns, ",") {
|
for pattern := range strings.SplitSeq(envPatterns, ",") {
|
||||||
pattern = strings.TrimSpace(pattern)
|
pattern = strings.TrimSpace(pattern)
|
||||||
if pattern == "" {
|
if pattern == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if !strings.HasSuffix(pattern, ".service") {
|
if !strings.HasSuffix(pattern, "timer") && !strings.HasSuffix(pattern, ".service") {
|
||||||
pattern += ".service"
|
pattern += ".service"
|
||||||
}
|
}
|
||||||
patterns = append(patterns, pattern)
|
patterns = append(patterns, pattern)
|
||||||
|
|||||||
@@ -156,20 +156,23 @@ func TestGetServicePatterns(t *testing.T) {
|
|||||||
expected: []string{"*nginx*.service", "*apache*.service"},
|
expected: []string{"*nginx*.service", "*apache*.service"},
|
||||||
cleanupEnvVars: true,
|
cleanupEnvVars: true,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "opt into timer monitoring",
|
||||||
|
prefixedEnv: "nginx.service,docker,apache.timer",
|
||||||
|
unprefixedEnv: "",
|
||||||
|
expected: []string{"nginx.service", "docker.service", "apache.timer"},
|
||||||
|
cleanupEnvVars: true,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
// Clean up any existing env vars
|
|
||||||
os.Unsetenv("BESZEL_AGENT_SERVICE_PATTERNS")
|
|
||||||
os.Unsetenv("SERVICE_PATTERNS")
|
|
||||||
|
|
||||||
// Set up environment variables
|
// Set up environment variables
|
||||||
if tt.prefixedEnv != "" {
|
if tt.prefixedEnv != "" {
|
||||||
os.Setenv("BESZEL_AGENT_SERVICE_PATTERNS", tt.prefixedEnv)
|
t.Setenv("BESZEL_AGENT_SERVICE_PATTERNS", tt.prefixedEnv)
|
||||||
}
|
}
|
||||||
if tt.unprefixedEnv != "" {
|
if tt.unprefixedEnv != "" {
|
||||||
os.Setenv("SERVICE_PATTERNS", tt.unprefixedEnv)
|
t.Setenv("SERVICE_PATTERNS", tt.unprefixedEnv)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run the function
|
// Run the function
|
||||||
@@ -177,12 +180,6 @@ func TestGetServicePatterns(t *testing.T) {
|
|||||||
|
|
||||||
// Verify results
|
// Verify results
|
||||||
assert.Equal(t, tt.expected, result, "Patterns should match expected values")
|
assert.Equal(t, tt.expected, result, "Patterns should match expected values")
|
||||||
|
|
||||||
// Cleanup
|
|
||||||
if tt.cleanupEnvVars {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_SERVICE_PATTERNS")
|
|
||||||
os.Unsetenv("SERVICE_PATTERNS")
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
51
agent/test-data/smart/apple_nvme.json
Normal file
51
agent/test-data/smart/apple_nvme.json
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
{
|
||||||
|
"json_format_version": [1, 0],
|
||||||
|
"smartctl": {
|
||||||
|
"version": [7, 4],
|
||||||
|
"argv": ["smartctl", "-aix", "-j", "IOService:/AppleARMPE/arm-io@10F00000/AppleT810xIO/ans@77400000/AppleASCWrapV4/iop-ans-nub/RTBuddy(ANS2)/RTBuddyService/AppleANS3NVMeController/NS_01@1"],
|
||||||
|
"exit_status": 4
|
||||||
|
},
|
||||||
|
"device": {
|
||||||
|
"name": "IOService:/AppleARMPE/arm-io@10F00000/AppleT810xIO/ans@77400000/AppleASCWrapV4/iop-ans-nub/RTBuddy(ANS2)/RTBuddyService/AppleANS3NVMeController/NS_01@1",
|
||||||
|
"info_name": "IOService:/AppleARMPE/arm-io@10F00000/AppleT810xIO/ans@77400000/AppleASCWrapV4/iop-ans-nub/RTBuddy(ANS2)/RTBuddyService/AppleANS3NVMeController/NS_01@1",
|
||||||
|
"type": "nvme",
|
||||||
|
"protocol": "NVMe"
|
||||||
|
},
|
||||||
|
"model_name": "APPLE SSD AP0256Q",
|
||||||
|
"serial_number": "0ba0147940253c15",
|
||||||
|
"firmware_version": "555",
|
||||||
|
"smart_support": {
|
||||||
|
"available": true,
|
||||||
|
"enabled": true
|
||||||
|
},
|
||||||
|
"smart_status": {
|
||||||
|
"passed": true,
|
||||||
|
"nvme": {
|
||||||
|
"value": 0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nvme_smart_health_information_log": {
|
||||||
|
"critical_warning": 0,
|
||||||
|
"temperature": 42,
|
||||||
|
"available_spare": 100,
|
||||||
|
"available_spare_threshold": 99,
|
||||||
|
"percentage_used": 1,
|
||||||
|
"data_units_read": 270189386,
|
||||||
|
"data_units_written": 166753862,
|
||||||
|
"host_reads": 7543766995,
|
||||||
|
"host_writes": 3761621926,
|
||||||
|
"controller_busy_time": 0,
|
||||||
|
"power_cycles": 366,
|
||||||
|
"power_on_hours": 2850,
|
||||||
|
"unsafe_shutdowns": 195,
|
||||||
|
"media_errors": 0,
|
||||||
|
"num_err_log_entries": 0
|
||||||
|
},
|
||||||
|
"temperature": {
|
||||||
|
"current": 42
|
||||||
|
},
|
||||||
|
"power_cycle_count": 366,
|
||||||
|
"power_on_time": {
|
||||||
|
"hours": 2850
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
package agent
|
|
||||||
|
|
||||||
import "math"
|
|
||||||
|
|
||||||
func bytesToMegabytes(b float64) float64 {
|
|
||||||
return twoDecimals(b / 1048576)
|
|
||||||
}
|
|
||||||
|
|
||||||
func bytesToGigabytes(b uint64) float64 {
|
|
||||||
return twoDecimals(float64(b) / 1073741824)
|
|
||||||
}
|
|
||||||
|
|
||||||
func twoDecimals(value float64) float64 {
|
|
||||||
return math.Round(value*100) / 100
|
|
||||||
}
|
|
||||||
117
agent/utils/utils.go
Normal file
117
agent/utils/utils.go
Normal file
@@ -0,0 +1,117 @@
|
|||||||
|
// Package utils provides utility functions for the agent.
|
||||||
|
package utils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetEnv retrieves an environment variable with a "BESZEL_AGENT_" prefix, or falls back to the unprefixed key.
|
||||||
|
func GetEnv(key string) (value string, exists bool) {
|
||||||
|
if value, exists = os.LookupEnv("BESZEL_AGENT_" + key); exists {
|
||||||
|
return value, exists
|
||||||
|
}
|
||||||
|
return os.LookupEnv(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BytesToMegabytes converts bytes to megabytes and rounds to two decimal places.
|
||||||
|
func BytesToMegabytes(b float64) float64 {
|
||||||
|
return TwoDecimals(b / 1048576)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BytesToGigabytes converts bytes to gigabytes and rounds to two decimal places.
|
||||||
|
func BytesToGigabytes(b uint64) float64 {
|
||||||
|
return TwoDecimals(float64(b) / 1073741824)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TwoDecimals rounds a float64 value to two decimal places.
|
||||||
|
func TwoDecimals(value float64) float64 {
|
||||||
|
return math.Round(value*100) / 100
|
||||||
|
}
|
||||||
|
|
||||||
|
// func RoundFloat(val float64, precision uint) float64 {
|
||||||
|
// ratio := math.Pow(10, float64(precision))
|
||||||
|
// return math.Round(val*ratio) / ratio
|
||||||
|
// }
|
||||||
|
|
||||||
|
// ReadStringFile returns trimmed file contents or empty string on error.
|
||||||
|
func ReadStringFile(path string) string {
|
||||||
|
content, _ := ReadStringFileOK(path)
|
||||||
|
return content
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadStringFileOK returns trimmed file contents and read success.
|
||||||
|
func ReadStringFileOK(path string) (string, bool) {
|
||||||
|
b, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
return strings.TrimSpace(string(b)), true
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadStringFileLimited reads a file into a string with a maximum size (in bytes) to avoid
|
||||||
|
// allocating large buffers and potential panics with pseudo-files when the size is misreported.
|
||||||
|
func ReadStringFileLimited(path string, maxSize int) (string, error) {
|
||||||
|
f, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
buf := make([]byte, maxSize)
|
||||||
|
n, err := f.Read(buf)
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if n < 0 {
|
||||||
|
return "", fmt.Errorf("%s returned negative bytes: %d", path, n)
|
||||||
|
}
|
||||||
|
return strings.TrimSpace(string(buf[:n])), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileExists reports whether the given path exists.
|
||||||
|
func FileExists(path string) bool {
|
||||||
|
_, err := os.Stat(path)
|
||||||
|
return err == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadUintFile parses a decimal uint64 value from a file.
|
||||||
|
func ReadUintFile(path string) (uint64, bool) {
|
||||||
|
raw, ok := ReadStringFileOK(path)
|
||||||
|
if !ok {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
parsed, err := strconv.ParseUint(raw, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
return parsed, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// LookPathHomebrew is like exec.LookPath but also checks Homebrew paths.
|
||||||
|
func LookPathHomebrew(file string) (string, error) {
|
||||||
|
foundPath, lookPathErr := exec.LookPath(file)
|
||||||
|
if lookPathErr == nil {
|
||||||
|
return foundPath, nil
|
||||||
|
}
|
||||||
|
var homebrewPath string
|
||||||
|
switch runtime.GOOS {
|
||||||
|
case "darwin":
|
||||||
|
homebrewPath = filepath.Join("/opt", "homebrew", "bin", file)
|
||||||
|
case "linux":
|
||||||
|
homebrewPath = filepath.Join("/home", "linuxbrew", ".linuxbrew", "bin", file)
|
||||||
|
}
|
||||||
|
if homebrewPath != "" {
|
||||||
|
if _, err := os.Stat(homebrewPath); err == nil {
|
||||||
|
return homebrewPath, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", lookPathErr
|
||||||
|
}
|
||||||
158
agent/utils/utils_test.go
Normal file
158
agent/utils/utils_test.go
Normal file
@@ -0,0 +1,158 @@
|
|||||||
|
package utils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestTwoDecimals(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
input float64
|
||||||
|
expected float64
|
||||||
|
}{
|
||||||
|
{"round down", 1.234, 1.23},
|
||||||
|
{"round half up", 1.235, 1.24}, // math.Round rounds half up
|
||||||
|
{"no rounding needed", 1.23, 1.23},
|
||||||
|
{"negative number", -1.235, -1.24}, // math.Round rounds half up (more negative)
|
||||||
|
{"zero", 0.0, 0.0},
|
||||||
|
{"large number", 123.456, 123.46}, // rounds 5 up
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
result := TwoDecimals(tt.input)
|
||||||
|
assert.Equal(t, tt.expected, result)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBytesToMegabytes(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
input float64
|
||||||
|
expected float64
|
||||||
|
}{
|
||||||
|
{"1 MB", 1048576, 1.0},
|
||||||
|
{"512 KB", 524288, 0.5},
|
||||||
|
{"zero", 0, 0},
|
||||||
|
{"large value", 1073741824, 1024}, // 1 GB = 1024 MB
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
result := BytesToMegabytes(tt.input)
|
||||||
|
assert.Equal(t, tt.expected, result)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBytesToGigabytes(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
input uint64
|
||||||
|
expected float64
|
||||||
|
}{
|
||||||
|
{"1 GB", 1073741824, 1.0},
|
||||||
|
{"512 MB", 536870912, 0.5},
|
||||||
|
{"0 GB", 0, 0},
|
||||||
|
{"2 GB", 2147483648, 2.0},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
result := BytesToGigabytes(tt.input)
|
||||||
|
assert.Equal(t, tt.expected, result)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFileFunctions(t *testing.T) {
|
||||||
|
tmpDir := t.TempDir()
|
||||||
|
testFilePath := filepath.Join(tmpDir, "test.txt")
|
||||||
|
testContent := "hello world"
|
||||||
|
|
||||||
|
// Test FileExists (false)
|
||||||
|
assert.False(t, FileExists(testFilePath))
|
||||||
|
|
||||||
|
// Test ReadStringFileOK (false)
|
||||||
|
content, ok := ReadStringFileOK(testFilePath)
|
||||||
|
assert.False(t, ok)
|
||||||
|
assert.Empty(t, content)
|
||||||
|
|
||||||
|
// Test ReadStringFile (empty)
|
||||||
|
assert.Empty(t, ReadStringFile(testFilePath))
|
||||||
|
|
||||||
|
// Write file
|
||||||
|
err := os.WriteFile(testFilePath, []byte(testContent+"\n "), 0644)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Test FileExists (true)
|
||||||
|
assert.True(t, FileExists(testFilePath))
|
||||||
|
|
||||||
|
// Test ReadStringFileOK (true)
|
||||||
|
content, ok = ReadStringFileOK(testFilePath)
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, testContent, content)
|
||||||
|
|
||||||
|
// Test ReadStringFile (content)
|
||||||
|
assert.Equal(t, testContent, ReadStringFile(testFilePath))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReadUintFile(t *testing.T) {
|
||||||
|
tmpDir := t.TempDir()
|
||||||
|
|
||||||
|
t.Run("valid uint", func(t *testing.T) {
|
||||||
|
path := filepath.Join(tmpDir, "uint.txt")
|
||||||
|
os.WriteFile(path, []byte(" 12345\n"), 0644)
|
||||||
|
val, ok := ReadUintFile(path)
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, uint64(12345), val)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("invalid uint", func(t *testing.T) {
|
||||||
|
path := filepath.Join(tmpDir, "invalid.txt")
|
||||||
|
os.WriteFile(path, []byte("abc"), 0644)
|
||||||
|
val, ok := ReadUintFile(path)
|
||||||
|
assert.False(t, ok)
|
||||||
|
assert.Equal(t, uint64(0), val)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("missing file", func(t *testing.T) {
|
||||||
|
path := filepath.Join(tmpDir, "missing.txt")
|
||||||
|
val, ok := ReadUintFile(path)
|
||||||
|
assert.False(t, ok)
|
||||||
|
assert.Equal(t, uint64(0), val)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetEnv(t *testing.T) {
|
||||||
|
key := "TEST_VAR"
|
||||||
|
prefixedKey := "BESZEL_AGENT_" + key
|
||||||
|
|
||||||
|
t.Run("prefixed variable exists", func(t *testing.T) {
|
||||||
|
t.Setenv(prefixedKey, "prefixed_val")
|
||||||
|
t.Setenv(key, "unprefixed_val")
|
||||||
|
|
||||||
|
val, exists := GetEnv(key)
|
||||||
|
assert.True(t, exists)
|
||||||
|
assert.Equal(t, "prefixed_val", val)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("only unprefixed variable exists", func(t *testing.T) {
|
||||||
|
t.Setenv(key, "unprefixed_val")
|
||||||
|
|
||||||
|
val, exists := GetEnv(key)
|
||||||
|
assert.True(t, exists)
|
||||||
|
assert.Equal(t, "unprefixed_val", val)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("neither variable exists", func(t *testing.T) {
|
||||||
|
val, exists := GetEnv(key)
|
||||||
|
assert.False(t, exists)
|
||||||
|
assert.Empty(t, val)
|
||||||
|
})
|
||||||
|
}
|
||||||
11
agent/zfs/zfs_freebsd.go
Normal file
11
agent/zfs/zfs_freebsd.go
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
//go:build freebsd
|
||||||
|
|
||||||
|
package zfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
func ARCSize() (uint64, error) {
|
||||||
|
return unix.SysctlUint64("kstat.zfs.misc.arcstats.size")
|
||||||
|
}
|
||||||
34
agent/zfs/zfs_linux.go
Normal file
34
agent/zfs/zfs_linux.go
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
//go:build linux
|
||||||
|
|
||||||
|
// Package zfs provides functions to read ZFS statistics.
|
||||||
|
package zfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func ARCSize() (uint64, error) {
|
||||||
|
file, err := os.Open("/proc/spl/kstat/zfs/arcstats")
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(file)
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := scanner.Text()
|
||||||
|
if strings.HasPrefix(line, "size") {
|
||||||
|
fields := strings.Fields(line)
|
||||||
|
if len(fields) < 3 {
|
||||||
|
return 0, fmt.Errorf("unexpected arcstats size format: %s", line)
|
||||||
|
}
|
||||||
|
return strconv.ParseUint(fields[2], 10, 64)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0, fmt.Errorf("size field not found in arcstats")
|
||||||
|
}
|
||||||
9
agent/zfs/zfs_unsupported.go
Normal file
9
agent/zfs/zfs_unsupported.go
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
//go:build !linux && !freebsd
|
||||||
|
|
||||||
|
package zfs
|
||||||
|
|
||||||
|
import "errors"
|
||||||
|
|
||||||
|
func ARCSize() (uint64, error) {
|
||||||
|
return 0, errors.ErrUnsupported
|
||||||
|
}
|
||||||
@@ -6,7 +6,7 @@ import "github.com/blang/semver"
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
// Version is the current version of the application.
|
// Version is the current version of the application.
|
||||||
Version = "0.18.3"
|
Version = "0.18.7"
|
||||||
// AppName is the name of the application.
|
// AppName is the name of the application.
|
||||||
AppName = "beszel"
|
AppName = "beszel"
|
||||||
)
|
)
|
||||||
|
|||||||
48
go.mod
48
go.mod
@@ -1,28 +1,28 @@
|
|||||||
module github.com/henrygd/beszel
|
module github.com/henrygd/beszel
|
||||||
|
|
||||||
go 1.25.7
|
go 1.26.1
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/blang/semver v3.5.1+incompatible
|
github.com/blang/semver v3.5.1+incompatible
|
||||||
github.com/coreos/go-systemd/v22 v22.7.0
|
github.com/coreos/go-systemd/v22 v22.7.0
|
||||||
github.com/distatus/battery v0.11.0
|
github.com/ebitengine/purego v0.10.0
|
||||||
github.com/ebitengine/purego v0.9.1
|
|
||||||
github.com/fxamacker/cbor/v2 v2.9.0
|
github.com/fxamacker/cbor/v2 v2.9.0
|
||||||
github.com/gliderlabs/ssh v0.3.8
|
github.com/gliderlabs/ssh v0.3.8
|
||||||
github.com/google/uuid v1.6.0
|
github.com/google/uuid v1.6.0
|
||||||
github.com/lxzan/gws v1.8.9
|
github.com/lxzan/gws v1.9.1
|
||||||
github.com/nicholas-fedor/shoutrrr v0.13.1
|
github.com/nicholas-fedor/shoutrrr v0.14.3
|
||||||
github.com/pocketbase/dbx v1.11.0
|
github.com/pocketbase/dbx v1.12.0
|
||||||
github.com/pocketbase/pocketbase v0.36.2
|
github.com/pocketbase/pocketbase v0.36.8
|
||||||
github.com/shirou/gopsutil/v4 v4.26.1
|
github.com/shirou/gopsutil/v4 v4.26.3
|
||||||
github.com/spf13/cast v1.10.0
|
github.com/spf13/cast v1.10.0
|
||||||
github.com/spf13/cobra v1.10.2
|
github.com/spf13/cobra v1.10.2
|
||||||
github.com/spf13/pflag v1.0.10
|
github.com/spf13/pflag v1.0.10
|
||||||
github.com/stretchr/testify v1.11.1
|
github.com/stretchr/testify v1.11.1
|
||||||
golang.org/x/crypto v0.47.0
|
golang.org/x/crypto v0.49.0
|
||||||
golang.org/x/exp v0.0.0-20260112195511-716be5621a96
|
golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90
|
||||||
golang.org/x/sys v0.40.0
|
golang.org/x/sys v0.42.0
|
||||||
gopkg.in/yaml.v3 v3.0.1
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
|
howett.net/plist v1.0.1
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
@@ -30,10 +30,10 @@ require (
|
|||||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||||
github.com/disintegration/imaging v1.6.2 // indirect
|
github.com/disintegration/imaging v1.6.2 // indirect
|
||||||
github.com/dolthub/maphash v0.1.0 // indirect
|
|
||||||
github.com/domodwyer/mailyak/v3 v3.6.2 // indirect
|
github.com/domodwyer/mailyak/v3 v3.6.2 // indirect
|
||||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||||
github.com/fatih/color v1.18.0 // indirect
|
github.com/eclipse/paho.golang v0.23.0 // indirect
|
||||||
|
github.com/fatih/color v1.19.0 // indirect
|
||||||
github.com/gabriel-vasile/mimetype v1.4.13 // indirect
|
github.com/gabriel-vasile/mimetype v1.4.13 // indirect
|
||||||
github.com/ganigeorgiev/fexpr v0.5.0 // indirect
|
github.com/ganigeorgiev/fexpr v0.5.0 // indirect
|
||||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||||
@@ -41,9 +41,10 @@ require (
|
|||||||
github.com/go-sql-driver/mysql v1.9.1 // indirect
|
github.com/go-sql-driver/mysql v1.9.1 // indirect
|
||||||
github.com/godbus/dbus/v5 v5.2.2 // indirect
|
github.com/godbus/dbus/v5 v5.2.2 // indirect
|
||||||
github.com/golang-jwt/jwt/v5 v5.3.1 // indirect
|
github.com/golang-jwt/jwt/v5 v5.3.1 // indirect
|
||||||
|
github.com/gorilla/websocket v1.5.3 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||||
github.com/klauspost/compress v1.18.3 // indirect
|
github.com/klauspost/compress v1.18.5 // indirect
|
||||||
github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3 // indirect
|
github.com/lufia/plan9stats v0.0.0-20260324052639-156f7da3f749 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||||
github.com/ncruces/go-strftime v1.0.0 // indirect
|
github.com/ncruces/go-strftime v1.0.0 // indirect
|
||||||
@@ -54,15 +55,14 @@ require (
|
|||||||
github.com/tklauser/numcpus v0.11.0 // indirect
|
github.com/tklauser/numcpus v0.11.0 // indirect
|
||||||
github.com/x448/float16 v0.8.4 // indirect
|
github.com/x448/float16 v0.8.4 // indirect
|
||||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||||
golang.org/x/image v0.35.0 // indirect
|
golang.org/x/image v0.38.0 // indirect
|
||||||
golang.org/x/net v0.49.0 // indirect
|
golang.org/x/net v0.52.0 // indirect
|
||||||
golang.org/x/oauth2 v0.34.0 // indirect
|
golang.org/x/oauth2 v0.36.0 // indirect
|
||||||
golang.org/x/sync v0.19.0 // indirect
|
golang.org/x/sync v0.20.0 // indirect
|
||||||
golang.org/x/term v0.39.0 // indirect
|
golang.org/x/term v0.41.0 // indirect
|
||||||
golang.org/x/text v0.33.0 // indirect
|
golang.org/x/text v0.35.0 // indirect
|
||||||
howett.net/plist v1.0.1 // indirect
|
modernc.org/libc v1.70.0 // indirect
|
||||||
modernc.org/libc v1.67.6 // indirect
|
|
||||||
modernc.org/mathutil v1.7.1 // indirect
|
modernc.org/mathutil v1.7.1 // indirect
|
||||||
modernc.org/memory v1.11.0 // indirect
|
modernc.org/memory v1.11.0 // indirect
|
||||||
modernc.org/sqlite v1.44.3 // indirect
|
modernc.org/sqlite v1.48.0 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
126
go.sum
126
go.sum
@@ -17,18 +17,16 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
|
|||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c=
|
github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c=
|
||||||
github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4=
|
github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4=
|
||||||
github.com/distatus/battery v0.11.0 h1:KJk89gz90Iq/wJtbjjM9yUzBXV+ASV/EG2WOOL7N8lc=
|
|
||||||
github.com/distatus/battery v0.11.0/go.mod h1:KmVkE8A8hpIX4T78QRdMktYpEp35QfOL8A8dwZBxq2k=
|
|
||||||
github.com/dolthub/maphash v0.1.0 h1:bsQ7JsF4FkkWyrP3oCnFJgrCUAFbFf3kOl4L/QxPDyQ=
|
|
||||||
github.com/dolthub/maphash v0.1.0/go.mod h1:gkg4Ch4CdCDu5h6PMriVLawB7koZ+5ijb9puGMV50a4=
|
|
||||||
github.com/domodwyer/mailyak/v3 v3.6.2 h1:x3tGMsyFhTCaxp6ycgR0FE/bu5QiNp+hetUuCOBXMn8=
|
github.com/domodwyer/mailyak/v3 v3.6.2 h1:x3tGMsyFhTCaxp6ycgR0FE/bu5QiNp+hetUuCOBXMn8=
|
||||||
github.com/domodwyer/mailyak/v3 v3.6.2/go.mod h1:lOm/u9CyCVWHeaAmHIdF4RiKVxKUT/H5XX10lIKAL6c=
|
github.com/domodwyer/mailyak/v3 v3.6.2/go.mod h1:lOm/u9CyCVWHeaAmHIdF4RiKVxKUT/H5XX10lIKAL6c=
|
||||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||||
github.com/ebitengine/purego v0.9.1 h1:a/k2f2HQU3Pi399RPW1MOaZyhKJL9w/xFpKAg4q1s0A=
|
github.com/ebitengine/purego v0.10.0 h1:QIw4xfpWT6GWTzaW5XEKy3HXoqrJGx1ijYHzTF0/ISU=
|
||||||
github.com/ebitengine/purego v0.9.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
github.com/ebitengine/purego v0.10.0/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||||
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
|
github.com/eclipse/paho.golang v0.23.0 h1:KHgl2wz6EJo7cMBmkuhpt7C576vP+kpPv7jjvSyR6Mk=
|
||||||
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
|
github.com/eclipse/paho.golang v0.23.0/go.mod h1:nQRhTkoZv8EAiNs5UU0/WdQIx2NrnWUpL9nsGJTQN04=
|
||||||
|
github.com/fatih/color v1.19.0 h1:Zp3PiM21/9Ld6FzSKyL5c/BULoe/ONr9KlbYVOfG8+w=
|
||||||
|
github.com/fatih/color v1.19.0/go.mod h1:zNk67I0ZUT1bEGsSGyCZYZNrHuTkJJB+r6Q9VuMi0LE=
|
||||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||||
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
|
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
|
||||||
@@ -58,10 +56,12 @@ github.com/golang-jwt/jwt/v5 v5.3.1/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArs
|
|||||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||||
github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83 h1:z2ogiKUYzX5Is6zr/vP9vJGqPwcdqsWjOt+V8J7+bTc=
|
github.com/google/pprof v0.0.0-20260302011040-a15ffb7f9dcc h1:VBbFa1lDYWEeV5FZKUiYKYT0VxCp9twUmmaq9eb8sXw=
|
||||||
github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83/go.mod h1:MxpfABSjhmINe3F1It9d+8exIHFvUqtLIRCdOGNXqiI=
|
github.com/google/pprof v0.0.0-20260302011040-a15ffb7f9dcc/go.mod h1:MxpfABSjhmINe3F1It9d+8exIHFvUqtLIRCdOGNXqiI=
|
||||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||||
|
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||||
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||||
@@ -69,35 +69,35 @@ github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLf
|
|||||||
github.com/jarcoal/httpmock v1.4.1 h1:0Ju+VCFuARfFlhVXFc2HxlcQkfB+Xq12/EotHko+x2A=
|
github.com/jarcoal/httpmock v1.4.1 h1:0Ju+VCFuARfFlhVXFc2HxlcQkfB+Xq12/EotHko+x2A=
|
||||||
github.com/jarcoal/httpmock v1.4.1/go.mod h1:ftW1xULwo+j0R0JJkJIIi7UKigZUXCLLanykgjwBXL0=
|
github.com/jarcoal/httpmock v1.4.1/go.mod h1:ftW1xULwo+j0R0JJkJIIi7UKigZUXCLLanykgjwBXL0=
|
||||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||||
github.com/klauspost/compress v1.18.3 h1:9PJRvfbmTabkOX8moIpXPbMMbYN60bWImDDU7L+/6zw=
|
github.com/klauspost/compress v1.18.5 h1:/h1gH5Ce+VWNLSWqPzOVn6XBO+vJbCNGvjoaGBFW2IE=
|
||||||
github.com/klauspost/compress v1.18.3/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
|
github.com/klauspost/compress v1.18.5/go.mod h1:cwPg85FWrGar70rWktvGQj8/hthj3wpl0PGDogxkrSQ=
|
||||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3 h1:PwQumkgq4/acIiZhtifTV5OUqqiP82UAl0h87xj/l9k=
|
github.com/lufia/plan9stats v0.0.0-20260324052639-156f7da3f749 h1:Qj3hTcdWH8uMZDI41HNuTuJN525C7NBrbtH5kSO6fPk=
|
||||||
github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3/go.mod h1:autxFIvghDt3jPTLoqZ9OZ7s9qTGNAWmYCjVFWPX/zg=
|
github.com/lufia/plan9stats v0.0.0-20260324052639-156f7da3f749/go.mod h1:autxFIvghDt3jPTLoqZ9OZ7s9qTGNAWmYCjVFWPX/zg=
|
||||||
github.com/lxzan/gws v1.8.9 h1:VU3SGUeWlQrEwfUSfokcZep8mdg/BrUF+y73YYshdBM=
|
github.com/lxzan/gws v1.9.1 h1:4lbIp4cW0hOLP3ejFHR/uWRy741AURx7oKkNNi2OT9o=
|
||||||
github.com/lxzan/gws v1.8.9/go.mod h1:d9yHaR1eDTBHagQC6KY7ycUOaz5KWeqQtP3xu7aMK8Y=
|
github.com/lxzan/gws v1.9.1/go.mod h1:gXHSCPmTGryWJ4icuqy8Yho32E4YIMHH0fkDRYJRbdc=
|
||||||
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
||||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
||||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w=
|
github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w=
|
||||||
github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
||||||
github.com/nicholas-fedor/shoutrrr v0.13.1 h1:llEoHNbnMM4GfQ9+2Ns3n6ssvNfi3NPWluM0AQiicoY=
|
github.com/nicholas-fedor/shoutrrr v0.14.3 h1:aBX2iw9a7jl5wfHd3bi9LnS5ucoYIy6KcLH9XVF+gig=
|
||||||
github.com/nicholas-fedor/shoutrrr v0.13.1/go.mod h1:kU4cFJpEAtTzl3iV0l+XUXmM90OlC5T01b7roM4/pYM=
|
github.com/nicholas-fedor/shoutrrr v0.14.3/go.mod h1:U7IywBkLpBV7rgn8iLbQ9/LklJG1gm24bFv5cXXsDKs=
|
||||||
github.com/onsi/ginkgo/v2 v2.27.3 h1:ICsZJ8JoYafeXFFlFAG75a7CxMsJHwgKwtO+82SE9L8=
|
github.com/onsi/ginkgo/v2 v2.28.1 h1:S4hj+HbZp40fNKuLUQOYLDgZLwNUVn19N3Atb98NCyI=
|
||||||
github.com/onsi/ginkgo/v2 v2.27.3/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo=
|
github.com/onsi/ginkgo/v2 v2.28.1/go.mod h1:CLtbVInNckU3/+gC8LzkGUb9oF+e8W8TdUsxPwvdOgE=
|
||||||
github.com/onsi/gomega v1.38.3 h1:eTX+W6dobAYfFeGC2PV6RwXRu/MyT+cQguijutvkpSM=
|
github.com/onsi/gomega v1.39.1 h1:1IJLAad4zjPn2PsnhH70V4DKRFlrCzGBNrNaru+Vf28=
|
||||||
github.com/onsi/gomega v1.38.3/go.mod h1:ZCU1pkQcXDO5Sl9/VVEGlDyp+zm0m1cmeG5TOzLgdh4=
|
github.com/onsi/gomega v1.39.1/go.mod h1:hL6yVALoTOxeWudERyfppUcZXjMwIMLnuSfruD2lcfg=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/pocketbase/dbx v1.11.0 h1:LpZezioMfT3K4tLrqA55wWFw1EtH1pM4tzSVa7kgszU=
|
github.com/pocketbase/dbx v1.12.0 h1:/oLErM+A0b4xI0PWTGPqSDVjzix48PqI/bng2l0PzoA=
|
||||||
github.com/pocketbase/dbx v1.11.0/go.mod h1:xXRCIAKTHMgUCyCKZm55pUOdvFziJjQfXaWKhu2vhMs=
|
github.com/pocketbase/dbx v1.12.0/go.mod h1:xXRCIAKTHMgUCyCKZm55pUOdvFziJjQfXaWKhu2vhMs=
|
||||||
github.com/pocketbase/pocketbase v0.36.2 h1:mzrxnvXKc3yxKlvZdbwoYXkH8kfIETteD0hWdgj0VI4=
|
github.com/pocketbase/pocketbase v0.36.8 h1:gCNqoesZ44saYOD3J7edhi5nDwUWKyQG7boM/kVwz2c=
|
||||||
github.com/pocketbase/pocketbase v0.36.2/go.mod h1:71vSF8whUDzC8mcLFE10+Qatf9JQdeOGIRWawOuLLKM=
|
github.com/pocketbase/pocketbase v0.36.8/go.mod h1:OY4WaXbP0WnF/EXoBbboWJK+ZSZ1A85tiA0sjrTKxTA=
|
||||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
|
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
|
||||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||||
@@ -105,8 +105,8 @@ github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qq
|
|||||||
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
github.com/shirou/gopsutil/v4 v4.26.1 h1:TOkEyriIXk2HX9d4isZJtbjXbEjf5qyKPAzbzY0JWSo=
|
github.com/shirou/gopsutil/v4 v4.26.3 h1:2ESdQt90yU3oXF/CdOlRCJxrP+Am1aBYubTMTfxJ1qc=
|
||||||
github.com/shirou/gopsutil/v4 v4.26.1/go.mod h1:medLI9/UNAb0dOI9Q3/7yWSqKkj00u+1tgY8nvv41pc=
|
github.com/shirou/gopsutil/v4 v4.26.3/go.mod h1:LZ6ewCSkBqUpvSOf+LsTGnRinC6iaNUNMGBtDkJBaLQ=
|
||||||
github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY=
|
github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY=
|
||||||
github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
|
github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
|
||||||
github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
|
github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
|
||||||
@@ -115,6 +115,8 @@ github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
|
|||||||
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
|
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
|
||||||
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/objx v0.5.3 h1:jmXUvGomnU1o3W/V5h2VEradbpJDwGrzugQQvL0POH4=
|
||||||
|
github.com/stretchr/objx v0.5.3/go.mod h1:rDQraq+vQZU7Fde9LOZLr8Tax6zZvy4kuNKF+QYS+U0=
|
||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||||
@@ -126,44 +128,44 @@ github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
|||||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||||
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
|
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
|
||||||
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||||
|
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||||
|
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
|
golang.org/x/crypto v0.49.0 h1:+Ng2ULVvLHnJ/ZFEq4KdcDd/cfjrrjjNSXNzxg0Y4U4=
|
||||||
golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
|
golang.org/x/crypto v0.49.0/go.mod h1:ErX4dUh2UM+CFYiXZRTcMpEcN8b/1gxEuv3nODoYtCA=
|
||||||
golang.org/x/exp v0.0.0-20260112195511-716be5621a96 h1:Z/6YuSHTLOHfNFdb8zVZomZr7cqNgTJvA8+Qz75D8gU=
|
golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90 h1:jiDhWWeC7jfWqR9c/uplMOqJ0sbNlNWv0UkzE0vX1MA=
|
||||||
golang.org/x/exp v0.0.0-20260112195511-716be5621a96/go.mod h1:nzimsREAkjBCIEFtHiYkrJyT+2uy9YZJB7H1k68CXZU=
|
golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90/go.mod h1:xE1HEv6b+1SCZ5/uscMRjUBKtIxworgEcEi+/n9NQDQ=
|
||||||
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||||
golang.org/x/image v0.35.0 h1:LKjiHdgMtO8z7Fh18nGY6KDcoEtVfsgLDPeLyguqb7I=
|
golang.org/x/image v0.38.0 h1:5l+q+Y9JDC7mBOMjo4/aPhMDcxEptsX+Tt3GgRQRPuE=
|
||||||
golang.org/x/image v0.35.0/go.mod h1:MwPLTVgvxSASsxdLzKrl8BRFuyqMyGhLwmC+TO1Sybk=
|
golang.org/x/image v0.38.0/go.mod h1:/3f6vaXC+6CEanU4KJxbcUZyEePbyKbaLoDOe4ehFYY=
|
||||||
golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c=
|
golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI=
|
||||||
golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU=
|
golang.org/x/mod v0.34.0/go.mod h1:ykgH52iCZe79kzLLMhyCUzhMci+nQj+0XkbXpNYtVjY=
|
||||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||||
golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
|
golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0=
|
||||||
golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
|
golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw=
|
||||||
golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
|
golang.org/x/oauth2 v0.36.0 h1:peZ/1z27fi9hUOFCAZaHyrpWG5lwe0RJEEEeH0ThlIs=
|
||||||
golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
golang.org/x/oauth2 v0.36.0/go.mod h1:YDBUJMTkDnJS+A4BP4eZBjCqtokkg1hODuPjwiGPO7Q=
|
||||||
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4=
|
||||||
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
|
golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo=
|
||||||
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw=
|
||||||
golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY=
|
golang.org/x/term v0.41.0 h1:QCgPso/Q3RTJx2Th4bDLqML4W6iJiaXFq2/ftQF13YU=
|
||||||
golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww=
|
golang.org/x/term v0.41.0/go.mod h1:3pfBgksrReYfZ5lvYM0kSO0LIkAl4Yl2bXOkKP7Ec2A=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
|
golang.org/x/text v0.35.0 h1:JOVx6vVDFokkpaq1AEptVzLTpDe9KGpj5tR4/X+ybL8=
|
||||||
golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
|
golang.org/x/text v0.35.0/go.mod h1:khi/HExzZJ2pGnjenulevKNX1W67CUy0AsXcNubPGCA=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc=
|
golang.org/x/tools v0.43.0 h1:12BdW9CeB3Z+J/I/wj34VMl8X+fEXBxVR90JeMX5E7s=
|
||||||
golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg=
|
golang.org/x/tools v0.43.0/go.mod h1:uHkMso649BX2cZK6+RpuIPXS3ho2hZo4FVwfoy1vIk0=
|
||||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||||
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
|
|
||||||
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
@@ -175,18 +177,18 @@ howett.net/plist v1.0.1 h1:37GdZ8tP09Q35o9ych3ehygcsL+HqKSwzctveSlarvM=
|
|||||||
howett.net/plist v1.0.1/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=
|
howett.net/plist v1.0.1/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=
|
||||||
modernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis=
|
modernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis=
|
||||||
modernc.org/cc/v4 v4.27.1/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
|
modernc.org/cc/v4 v4.27.1/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
|
||||||
modernc.org/ccgo/v4 v4.30.1 h1:4r4U1J6Fhj98NKfSjnPUN7Ze2c6MnAdL0hWw6+LrJpc=
|
modernc.org/ccgo/v4 v4.32.0 h1:hjG66bI/kqIPX1b2yT6fr/jt+QedtP2fqojG2VrFuVw=
|
||||||
modernc.org/ccgo/v4 v4.30.1/go.mod h1:bIOeI1JL54Utlxn+LwrFyjCx2n2RDiYEaJVSrgdrRfM=
|
modernc.org/ccgo/v4 v4.32.0/go.mod h1:6F08EBCx5uQc38kMGl+0Nm0oWczoo1c7cgpzEry7Uc0=
|
||||||
modernc.org/fileutil v1.3.40 h1:ZGMswMNc9JOCrcrakF1HrvmergNLAmxOPjizirpfqBA=
|
modernc.org/fileutil v1.4.0 h1:j6ZzNTftVS054gi281TyLjHPp6CPHr2KCxEXjEbD6SM=
|
||||||
modernc.org/fileutil v1.3.40/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc=
|
modernc.org/fileutil v1.4.0/go.mod h1:EqdKFDxiByqxLk8ozOxObDSfcVOv/54xDs/DUHdvCUU=
|
||||||
modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI=
|
modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI=
|
||||||
modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
|
modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
|
||||||
modernc.org/gc/v3 v3.1.1 h1:k8T3gkXWY9sEiytKhcgyiZ2L0DTyCQ/nvX+LoCljoRE=
|
modernc.org/gc/v3 v3.1.2 h1:ZtDCnhonXSZexk/AYsegNRV1lJGgaNZJuKjJSWKyEqo=
|
||||||
modernc.org/gc/v3 v3.1.1/go.mod h1:HFK/6AGESC7Ex+EZJhJ2Gni6cTaYpSMmU/cT9RmlfYY=
|
modernc.org/gc/v3 v3.1.2/go.mod h1:HFK/6AGESC7Ex+EZJhJ2Gni6cTaYpSMmU/cT9RmlfYY=
|
||||||
modernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks=
|
modernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks=
|
||||||
modernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI=
|
modernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI=
|
||||||
modernc.org/libc v1.67.6 h1:eVOQvpModVLKOdT+LvBPjdQqfrZq+pC39BygcT+E7OI=
|
modernc.org/libc v1.70.0 h1:U58NawXqXbgpZ/dcdS9kMshu08aiA6b7gusEusqzNkw=
|
||||||
modernc.org/libc v1.67.6/go.mod h1:JAhxUVlolfYDErnwiqaLvUqc8nfb2r6S6slAgZOnaiE=
|
modernc.org/libc v1.70.0/go.mod h1:OVmxFGP1CI/Z4L3E0Q3Mf1PDE0BucwMkcXjjLntvHJo=
|
||||||
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
|
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
|
||||||
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
|
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
|
||||||
modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI=
|
modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI=
|
||||||
@@ -195,8 +197,8 @@ modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
|
|||||||
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
|
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
|
||||||
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
|
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
|
||||||
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
|
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
|
||||||
modernc.org/sqlite v1.44.3 h1:+39JvV/HWMcYslAwRxHb8067w+2zowvFOUrOWIy9PjY=
|
modernc.org/sqlite v1.48.0 h1:ElZyLop3Q2mHYk5IFPPXADejZrlHu7APbpB0sF78bq4=
|
||||||
modernc.org/sqlite v1.44.3/go.mod h1:CzbrU2lSB1DKUusvwGz7rqEKIq+NUd8GWuBBZDs9/nA=
|
modernc.org/sqlite v1.48.0/go.mod h1:hWjRO6Tj/5Ik8ieqxQybiEOUXy0NJFNp2tpvVpKlvig=
|
||||||
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
|
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
|
||||||
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
|
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
|
||||||
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
||||||
|
|||||||
@@ -21,9 +21,9 @@ type hubLike interface {
|
|||||||
|
|
||||||
type AlertManager struct {
|
type AlertManager struct {
|
||||||
hub hubLike
|
hub hubLike
|
||||||
alertQueue chan alertTask
|
stopOnce sync.Once
|
||||||
stopChan chan struct{}
|
|
||||||
pendingAlerts sync.Map
|
pendingAlerts sync.Map
|
||||||
|
alertsCache *AlertsCache
|
||||||
}
|
}
|
||||||
|
|
||||||
type AlertMessageData struct {
|
type AlertMessageData struct {
|
||||||
@@ -40,16 +40,22 @@ type UserNotificationSettings struct {
|
|||||||
Webhooks []string `json:"webhooks"`
|
Webhooks []string `json:"webhooks"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type SystemAlertFsStats struct {
|
||||||
|
DiskTotal float64 `json:"d"`
|
||||||
|
DiskUsed float64 `json:"du"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Values pulled from system_stats.stats that are relevant to alerts.
|
||||||
type SystemAlertStats struct {
|
type SystemAlertStats struct {
|
||||||
Cpu float64 `json:"cpu"`
|
Cpu float64 `json:"cpu"`
|
||||||
Mem float64 `json:"mp"`
|
Mem float64 `json:"mp"`
|
||||||
Disk float64 `json:"dp"`
|
Disk float64 `json:"dp"`
|
||||||
NetSent float64 `json:"ns"`
|
Bandwidth [2]uint64 `json:"b"`
|
||||||
NetRecv float64 `json:"nr"`
|
|
||||||
GPU map[string]SystemAlertGPUData `json:"g"`
|
GPU map[string]SystemAlertGPUData `json:"g"`
|
||||||
Temperatures map[string]float32 `json:"t"`
|
Temperatures map[string]float32 `json:"t"`
|
||||||
LoadAvg [3]float64 `json:"la"`
|
LoadAvg [3]float64 `json:"la"`
|
||||||
Battery [2]uint8 `json:"bat"`
|
Battery [2]uint8 `json:"bat"`
|
||||||
|
ExtraFs map[string]SystemAlertFsStats `json:"efs"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type SystemAlertGPUData struct {
|
type SystemAlertGPUData struct {
|
||||||
@@ -58,7 +64,7 @@ type SystemAlertGPUData struct {
|
|||||||
|
|
||||||
type SystemAlertData struct {
|
type SystemAlertData struct {
|
||||||
systemRecord *core.Record
|
systemRecord *core.Record
|
||||||
alertRecord *core.Record
|
alertData CachedAlertData
|
||||||
name string
|
name string
|
||||||
unit string
|
unit string
|
||||||
val float64
|
val float64
|
||||||
@@ -93,11 +99,9 @@ var supportsTitle = map[string]struct{}{
|
|||||||
func NewAlertManager(app hubLike) *AlertManager {
|
func NewAlertManager(app hubLike) *AlertManager {
|
||||||
am := &AlertManager{
|
am := &AlertManager{
|
||||||
hub: app,
|
hub: app,
|
||||||
alertQueue: make(chan alertTask, 5),
|
alertsCache: NewAlertsCache(app),
|
||||||
stopChan: make(chan struct{}),
|
|
||||||
}
|
}
|
||||||
am.bindEvents()
|
am.bindEvents()
|
||||||
go am.startWorker()
|
|
||||||
return am
|
return am
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -106,6 +110,19 @@ func (am *AlertManager) bindEvents() {
|
|||||||
am.hub.OnRecordAfterUpdateSuccess("alerts").BindFunc(updateHistoryOnAlertUpdate)
|
am.hub.OnRecordAfterUpdateSuccess("alerts").BindFunc(updateHistoryOnAlertUpdate)
|
||||||
am.hub.OnRecordAfterDeleteSuccess("alerts").BindFunc(resolveHistoryOnAlertDelete)
|
am.hub.OnRecordAfterDeleteSuccess("alerts").BindFunc(resolveHistoryOnAlertDelete)
|
||||||
am.hub.OnRecordAfterUpdateSuccess("smart_devices").BindFunc(am.handleSmartDeviceAlert)
|
am.hub.OnRecordAfterUpdateSuccess("smart_devices").BindFunc(am.handleSmartDeviceAlert)
|
||||||
|
|
||||||
|
am.hub.OnServe().BindFunc(func(e *core.ServeEvent) error {
|
||||||
|
// Populate all alerts into cache on startup
|
||||||
|
_ = am.alertsCache.PopulateFromDB(true)
|
||||||
|
|
||||||
|
if err := resolveStatusAlerts(e.App); err != nil {
|
||||||
|
e.App.Logger().Error("Failed to resolve stale status alerts", "err", err)
|
||||||
|
}
|
||||||
|
if err := am.restorePendingStatusAlerts(); err != nil {
|
||||||
|
e.App.Logger().Error("Failed to restore pending status alerts", "err", err)
|
||||||
|
}
|
||||||
|
return e.Next()
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsNotificationSilenced checks if a notification should be silenced based on configured quiet hours
|
// IsNotificationSilenced checks if a notification should be silenced based on configured quiet hours
|
||||||
@@ -259,13 +276,14 @@ func (am *AlertManager) SendShoutrrrAlert(notificationUrl, title, message, link,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Add link
|
// Add link
|
||||||
if scheme == "ntfy" {
|
switch scheme {
|
||||||
|
case "ntfy":
|
||||||
queryParams.Add("Actions", fmt.Sprintf("view, %s, %s", linkText, link))
|
queryParams.Add("Actions", fmt.Sprintf("view, %s, %s", linkText, link))
|
||||||
} else if scheme == "lark" {
|
case "lark":
|
||||||
queryParams.Add("link", link)
|
queryParams.Add("link", link)
|
||||||
} else if scheme == "bark" {
|
case "bark":
|
||||||
queryParams.Add("url", link)
|
queryParams.Add("url", link)
|
||||||
} else {
|
default:
|
||||||
message += "\n\n" + link
|
message += "\n\n" + link
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -284,17 +302,12 @@ func (am *AlertManager) SendShoutrrrAlert(notificationUrl, title, message, link,
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (am *AlertManager) SendTestNotification(e *core.RequestEvent) error {
|
// setAlertTriggered updates the "triggered" status of an alert record in the database
|
||||||
var data struct {
|
func (am *AlertManager) setAlertTriggered(alert CachedAlertData, triggered bool) error {
|
||||||
URL string `json:"url"`
|
alertRecord, err := am.hub.FindRecordById("alerts", alert.Id)
|
||||||
}
|
|
||||||
err := e.BindBody(&data)
|
|
||||||
if err != nil || data.URL == "" {
|
|
||||||
return e.BadRequestError("URL is required", err)
|
|
||||||
}
|
|
||||||
err = am.SendShoutrrrAlert(data.URL, "Test Alert", "This is a notification from Beszel.", am.hub.Settings().Meta.AppURL, "View Beszel")
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return e.JSON(200, map[string]string{"err": err.Error()})
|
return err
|
||||||
}
|
}
|
||||||
return e.JSON(200, map[string]bool{"err": false})
|
alertRecord.Set("triggered", triggered)
|
||||||
|
return am.hub.Save(alertRecord)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,7 +3,11 @@ package alerts
|
|||||||
import (
|
import (
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"errors"
|
"errors"
|
||||||
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"slices"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/pocketbase/dbx"
|
"github.com/pocketbase/dbx"
|
||||||
"github.com/pocketbase/pocketbase/core"
|
"github.com/pocketbase/pocketbase/core"
|
||||||
@@ -117,3 +121,72 @@ func DeleteUserAlerts(e *core.RequestEvent) error {
|
|||||||
|
|
||||||
return e.JSON(http.StatusOK, map[string]any{"success": true, "count": numDeleted})
|
return e.JSON(http.StatusOK, map[string]any{"success": true, "count": numDeleted})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SendTestNotification handles API request to send a test notification to a specified Shoutrrr URL
|
||||||
|
func (am *AlertManager) SendTestNotification(e *core.RequestEvent) error {
|
||||||
|
var data struct {
|
||||||
|
URL string `json:"url"`
|
||||||
|
}
|
||||||
|
err := e.BindBody(&data)
|
||||||
|
if err != nil || data.URL == "" {
|
||||||
|
return e.BadRequestError("URL is required", err)
|
||||||
|
}
|
||||||
|
// Only allow admins to send test notifications to internal URLs
|
||||||
|
if !e.Auth.IsSuperuser() && e.Auth.GetString("role") != "admin" {
|
||||||
|
internalURL, err := isInternalURL(data.URL)
|
||||||
|
if err != nil {
|
||||||
|
return e.BadRequestError(err.Error(), nil)
|
||||||
|
}
|
||||||
|
if internalURL {
|
||||||
|
return e.ForbiddenError("Only admins can send to internal destinations", nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = am.SendShoutrrrAlert(data.URL, "Test Alert", "This is a notification from Beszel.", am.hub.Settings().Meta.AppURL, "View Beszel")
|
||||||
|
if err != nil {
|
||||||
|
return e.JSON(200, map[string]string{"err": err.Error()})
|
||||||
|
}
|
||||||
|
return e.JSON(200, map[string]bool{"err": false})
|
||||||
|
}
|
||||||
|
|
||||||
|
// isInternalURL checks if the given shoutrrr URL points to an internal destination (localhost or private IP)
|
||||||
|
func isInternalURL(rawURL string) (bool, error) {
|
||||||
|
parsedURL, err := url.Parse(rawURL)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
host := parsedURL.Hostname()
|
||||||
|
if host == "" {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.EqualFold(host, "localhost") {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if ip := net.ParseIP(host); ip != nil {
|
||||||
|
return isInternalIP(ip), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Some Shoutrrr URLs use the host position for service identifiers rather than a
|
||||||
|
// network hostname (for example, discord://token@webhookid). Restrict DNS lookups
|
||||||
|
// to names that look like actual hostnames so valid service URLs keep working.
|
||||||
|
if !strings.Contains(host, ".") {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ips, err := net.LookupIP(host)
|
||||||
|
if err != nil {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if slices.ContainsFunc(ips, isInternalIP) {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isInternalIP(ip net.IP) bool {
|
||||||
|
return ip.IsPrivate() || ip.IsLoopback() || ip.IsUnspecified()
|
||||||
|
}
|
||||||
|
|||||||
501
internal/alerts/alerts_api_test.go
Normal file
501
internal/alerts/alerts_api_test.go
Normal file
@@ -0,0 +1,501 @@
|
|||||||
|
//go:build testing
|
||||||
|
|
||||||
|
package alerts_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/alerts"
|
||||||
|
beszelTests "github.com/henrygd/beszel/internal/tests"
|
||||||
|
pbTests "github.com/pocketbase/pocketbase/tests"
|
||||||
|
|
||||||
|
"github.com/pocketbase/dbx"
|
||||||
|
"github.com/pocketbase/pocketbase/core"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
// marshal to json and return an io.Reader (for use in ApiScenario.Body)
|
||||||
|
func jsonReader(v any) io.Reader {
|
||||||
|
data, err := json.Marshal(v)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return bytes.NewReader(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIsInternalURL(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
url string
|
||||||
|
internal bool
|
||||||
|
}{
|
||||||
|
{name: "loopback ipv4", url: "generic://127.0.0.1", internal: true},
|
||||||
|
{name: "localhost hostname", url: "generic://localhost", internal: true},
|
||||||
|
{name: "localhost hostname", url: "generic+http://localhost/api/v1/postStuff", internal: true},
|
||||||
|
{name: "localhost hostname", url: "generic+http://127.0.0.1:8080/api/v1/postStuff", internal: true},
|
||||||
|
{name: "localhost hostname", url: "generic+https://beszel.dev/api/v1/postStuff", internal: false},
|
||||||
|
{name: "public ipv4", url: "generic://8.8.8.8", internal: false},
|
||||||
|
{name: "token style service url", url: "discord://abc123@123456789", internal: false},
|
||||||
|
{name: "single label service url", url: "slack://token@team/channel", internal: false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, testCase := range testCases {
|
||||||
|
t.Run(testCase.name, func(t *testing.T) {
|
||||||
|
internal, err := alerts.IsInternalURL(testCase.url)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, testCase.internal, internal)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUserAlertsApi(t *testing.T) {
|
||||||
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
hub.StartHub()
|
||||||
|
|
||||||
|
user1, _ := beszelTests.CreateUser(hub, "alertstest@example.com", "password")
|
||||||
|
user1Token, _ := user1.NewAuthToken()
|
||||||
|
|
||||||
|
user2, _ := beszelTests.CreateUser(hub, "alertstest2@example.com", "password")
|
||||||
|
user2Token, _ := user2.NewAuthToken()
|
||||||
|
|
||||||
|
system1, _ := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||||
|
"name": "system1",
|
||||||
|
"users": []string{user1.Id},
|
||||||
|
"host": "127.0.0.1",
|
||||||
|
})
|
||||||
|
|
||||||
|
system2, _ := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||||
|
"name": "system2",
|
||||||
|
"users": []string{user1.Id, user2.Id},
|
||||||
|
"host": "127.0.0.2",
|
||||||
|
})
|
||||||
|
|
||||||
|
userRecords, _ := hub.CountRecords("users")
|
||||||
|
assert.EqualValues(t, 2, userRecords, "all users should be created")
|
||||||
|
|
||||||
|
systemRecords, _ := hub.CountRecords("systems")
|
||||||
|
assert.EqualValues(t, 2, systemRecords, "all systems should be created")
|
||||||
|
|
||||||
|
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
||||||
|
return hub.TestApp
|
||||||
|
}
|
||||||
|
|
||||||
|
scenarios := []beszelTests.ApiScenario{
|
||||||
|
// {
|
||||||
|
// Name: "GET not implemented - returns index",
|
||||||
|
// Method: http.MethodGet,
|
||||||
|
// URL: "/api/beszel/user-alerts",
|
||||||
|
// ExpectedStatus: 200,
|
||||||
|
// ExpectedContent: []string{"<html ", "globalThis.BESZEL"},
|
||||||
|
// TestAppFactory: testAppFactory,
|
||||||
|
// },
|
||||||
|
{
|
||||||
|
Name: "POST no auth",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/user-alerts",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST no body",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/user-alerts",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user1Token,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 400,
|
||||||
|
ExpectedContent: []string{"Bad data"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST bad data",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/user-alerts",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user1Token,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 400,
|
||||||
|
ExpectedContent: []string{"Bad data"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"invalidField": "this should cause validation error",
|
||||||
|
"threshold": "not a number",
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST malformed JSON",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/user-alerts",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user1Token,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 400,
|
||||||
|
ExpectedContent: []string{"Bad data"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Body: strings.NewReader(`{"alertType": "cpu", "threshold": 80, "enabled": true,}`),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST valid alert data multiple systems",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/user-alerts",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user1Token,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"success\":true"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"value": 69,
|
||||||
|
"min": 9,
|
||||||
|
"systems": []string{system1.Id, system2.Id},
|
||||||
|
"overwrite": false,
|
||||||
|
}),
|
||||||
|
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||||
|
// check total alerts
|
||||||
|
alerts, _ := app.CountRecords("alerts")
|
||||||
|
assert.EqualValues(t, 2, alerts, "should have 2 alerts")
|
||||||
|
// check alert has correct values
|
||||||
|
matchingAlerts, _ := app.CountRecords("alerts", dbx.HashExp{"name": "CPU", "user": user1.Id, "system": system1.Id, "value": 69, "min": 9})
|
||||||
|
assert.EqualValues(t, 1, matchingAlerts, "should have 1 alert")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST valid alert data single system",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/user-alerts",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user1Token,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"success\":true"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"name": "Memory",
|
||||||
|
"systems": []string{system1.Id},
|
||||||
|
"value": 90,
|
||||||
|
"min": 10,
|
||||||
|
}),
|
||||||
|
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||||
|
user1Alerts, _ := app.CountRecords("alerts", dbx.HashExp{"user": user1.Id})
|
||||||
|
assert.EqualValues(t, 3, user1Alerts, "should have 3 alerts")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "Overwrite: false, should not overwrite existing alert",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/user-alerts",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user1Token,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"success\":true"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"value": 45,
|
||||||
|
"min": 5,
|
||||||
|
"systems": []string{system1.Id},
|
||||||
|
"overwrite": false,
|
||||||
|
}),
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
beszelTests.ClearCollection(t, app, "alerts")
|
||||||
|
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"system": system1.Id,
|
||||||
|
"user": user1.Id,
|
||||||
|
"value": 80,
|
||||||
|
"min": 10,
|
||||||
|
})
|
||||||
|
},
|
||||||
|
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||||
|
alerts, _ := app.CountRecords("alerts")
|
||||||
|
assert.EqualValues(t, 1, alerts, "should have 1 alert")
|
||||||
|
alert, _ := app.FindFirstRecordByFilter("alerts", "name = 'CPU' && user = {:user}", dbx.Params{"user": user1.Id})
|
||||||
|
assert.EqualValues(t, 80, alert.Get("value"), "should have 80 as value")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "Overwrite: true, should overwrite existing alert",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/user-alerts",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user2Token,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"success\":true"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"value": 45,
|
||||||
|
"min": 5,
|
||||||
|
"systems": []string{system2.Id},
|
||||||
|
"overwrite": true,
|
||||||
|
}),
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
beszelTests.ClearCollection(t, app, "alerts")
|
||||||
|
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"system": system2.Id,
|
||||||
|
"user": user2.Id,
|
||||||
|
"value": 80,
|
||||||
|
"min": 10,
|
||||||
|
})
|
||||||
|
},
|
||||||
|
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||||
|
alerts, _ := app.CountRecords("alerts")
|
||||||
|
assert.EqualValues(t, 1, alerts, "should have 1 alert")
|
||||||
|
alert, _ := app.FindFirstRecordByFilter("alerts", "name = 'CPU' && user = {:user}", dbx.Params{"user": user2.Id})
|
||||||
|
assert.EqualValues(t, 45, alert.Get("value"), "should have 45 as value")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "DELETE no auth",
|
||||||
|
Method: http.MethodDelete,
|
||||||
|
URL: "/api/beszel/user-alerts",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"systems": []string{system1.Id},
|
||||||
|
}),
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
beszelTests.ClearCollection(t, app, "alerts")
|
||||||
|
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"system": system1.Id,
|
||||||
|
"user": user1.Id,
|
||||||
|
"value": 80,
|
||||||
|
"min": 10,
|
||||||
|
})
|
||||||
|
},
|
||||||
|
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||||
|
alerts, _ := app.CountRecords("alerts")
|
||||||
|
assert.EqualValues(t, 1, alerts, "should have 1 alert")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "DELETE alert",
|
||||||
|
Method: http.MethodDelete,
|
||||||
|
URL: "/api/beszel/user-alerts",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user1Token,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"count\":1", "\"success\":true"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"systems": []string{system1.Id},
|
||||||
|
}),
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
beszelTests.ClearCollection(t, app, "alerts")
|
||||||
|
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"system": system1.Id,
|
||||||
|
"user": user1.Id,
|
||||||
|
"value": 80,
|
||||||
|
"min": 10,
|
||||||
|
})
|
||||||
|
},
|
||||||
|
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||||
|
alerts, _ := app.CountRecords("alerts")
|
||||||
|
assert.Zero(t, alerts, "should have 0 alerts")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "DELETE alert multiple systems",
|
||||||
|
Method: http.MethodDelete,
|
||||||
|
URL: "/api/beszel/user-alerts",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user1Token,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"count\":2", "\"success\":true"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"name": "Memory",
|
||||||
|
"systems": []string{system1.Id, system2.Id},
|
||||||
|
}),
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
beszelTests.ClearCollection(t, app, "alerts")
|
||||||
|
for _, systemId := range []string{system1.Id, system2.Id} {
|
||||||
|
_, err := beszelTests.CreateRecord(app, "alerts", map[string]any{
|
||||||
|
"name": "Memory",
|
||||||
|
"system": systemId,
|
||||||
|
"user": user1.Id,
|
||||||
|
"value": 90,
|
||||||
|
"min": 10,
|
||||||
|
})
|
||||||
|
assert.NoError(t, err, "should create alert")
|
||||||
|
}
|
||||||
|
alerts, _ := app.CountRecords("alerts")
|
||||||
|
assert.EqualValues(t, 2, alerts, "should have 2 alerts")
|
||||||
|
},
|
||||||
|
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||||
|
alerts, _ := app.CountRecords("alerts")
|
||||||
|
assert.Zero(t, alerts, "should have 0 alerts")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "User 2 should not be able to delete alert of user 1",
|
||||||
|
Method: http.MethodDelete,
|
||||||
|
URL: "/api/beszel/user-alerts",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user2Token,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"count\":1", "\"success\":true"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"systems": []string{system2.Id},
|
||||||
|
}),
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
beszelTests.ClearCollection(t, app, "alerts")
|
||||||
|
for _, user := range []string{user1.Id, user2.Id} {
|
||||||
|
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"system": system2.Id,
|
||||||
|
"user": user,
|
||||||
|
"value": 80,
|
||||||
|
"min": 10,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
alerts, _ := app.CountRecords("alerts")
|
||||||
|
assert.EqualValues(t, 2, alerts, "should have 2 alerts")
|
||||||
|
user1AlertCount, _ := app.CountRecords("alerts", dbx.HashExp{"user": user1.Id})
|
||||||
|
assert.EqualValues(t, 1, user1AlertCount, "should have 1 alert")
|
||||||
|
user2AlertCount, _ := app.CountRecords("alerts", dbx.HashExp{"user": user2.Id})
|
||||||
|
assert.EqualValues(t, 1, user2AlertCount, "should have 1 alert")
|
||||||
|
},
|
||||||
|
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||||
|
user1AlertCount, _ := app.CountRecords("alerts", dbx.HashExp{"user": user1.Id})
|
||||||
|
assert.EqualValues(t, 1, user1AlertCount, "should have 1 alert")
|
||||||
|
user2AlertCount, _ := app.CountRecords("alerts", dbx.HashExp{"user": user2.Id})
|
||||||
|
assert.Zero(t, user2AlertCount, "should have 0 alerts")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, scenario := range scenarios {
|
||||||
|
scenario.Test(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func TestSendTestNotification(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
userToken, err := user.NewAuthToken()
|
||||||
|
|
||||||
|
adminUser, err := beszelTests.CreateUserWithRole(hub, "admin@example.com", "password123", "admin")
|
||||||
|
assert.NoError(t, err, "Failed to create admin user")
|
||||||
|
adminUserToken, err := adminUser.NewAuthToken()
|
||||||
|
|
||||||
|
superuser, err := beszelTests.CreateSuperuser(hub, "superuser@example.com", "password123")
|
||||||
|
assert.NoError(t, err, "Failed to create superuser")
|
||||||
|
superuserToken, err := superuser.NewAuthToken()
|
||||||
|
assert.NoError(t, err, "Failed to create superuser auth token")
|
||||||
|
|
||||||
|
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
||||||
|
return hub.TestApp
|
||||||
|
}
|
||||||
|
|
||||||
|
scenarios := []beszelTests.ApiScenario{
|
||||||
|
{
|
||||||
|
Name: "POST /test-notification - no auth should fail",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/test-notification",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"url": "generic://127.0.0.1",
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /test-notification - with external auth should succeed",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/test-notification",
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"url": "generic://8.8.8.8",
|
||||||
|
}),
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"err\":"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /test-notification - local url with user auth should fail",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/test-notification",
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"url": "generic://localhost:8010",
|
||||||
|
}),
|
||||||
|
ExpectedStatus: 403,
|
||||||
|
ExpectedContent: []string{"Only admins"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /test-notification - internal url with user auth should fail",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/test-notification",
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"url": "generic+http://192.168.0.5",
|
||||||
|
}),
|
||||||
|
ExpectedStatus: 403,
|
||||||
|
ExpectedContent: []string{"Only admins"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /test-notification - internal url with admin auth should succeed",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/test-notification",
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": adminUserToken,
|
||||||
|
},
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"url": "generic://127.0.0.1",
|
||||||
|
}),
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"err\":"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /test-notification - internal url with superuser auth should succeed",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/test-notification",
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": superuserToken,
|
||||||
|
},
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"url": "generic://127.0.0.1",
|
||||||
|
}),
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"err\":"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, scenario := range scenarios {
|
||||||
|
scenario.Test(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build testing
|
//go:build testing
|
||||||
// +build testing
|
|
||||||
|
|
||||||
package alerts_test
|
package alerts_test
|
||||||
|
|
||||||
|
|||||||
177
internal/alerts/alerts_cache.go
Normal file
177
internal/alerts/alerts_cache.go
Normal file
@@ -0,0 +1,177 @@
|
|||||||
|
package alerts
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/pocketbase/dbx"
|
||||||
|
"github.com/pocketbase/pocketbase/core"
|
||||||
|
"github.com/pocketbase/pocketbase/tools/store"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CachedAlertData represents the relevant fields of an alert record for status checking and updates.
|
||||||
|
type CachedAlertData struct {
|
||||||
|
Id string
|
||||||
|
SystemID string
|
||||||
|
UserID string
|
||||||
|
Name string
|
||||||
|
Value float64
|
||||||
|
Triggered bool
|
||||||
|
Min uint8
|
||||||
|
// Created types.DateTime
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *CachedAlertData) PopulateFromRecord(record *core.Record) {
|
||||||
|
a.Id = record.Id
|
||||||
|
a.SystemID = record.GetString("system")
|
||||||
|
a.UserID = record.GetString("user")
|
||||||
|
a.Name = record.GetString("name")
|
||||||
|
a.Value = record.GetFloat("value")
|
||||||
|
a.Triggered = record.GetBool("triggered")
|
||||||
|
a.Min = uint8(record.GetInt("min"))
|
||||||
|
// a.Created = record.GetDateTime("created")
|
||||||
|
}
|
||||||
|
|
||||||
|
// AlertsCache provides an in-memory cache for system alerts.
|
||||||
|
type AlertsCache struct {
|
||||||
|
app core.App
|
||||||
|
store *store.Store[string, *store.Store[string, CachedAlertData]]
|
||||||
|
populated bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAlertsCache creates a new instance of SystemAlertsCache.
|
||||||
|
func NewAlertsCache(app core.App) *AlertsCache {
|
||||||
|
c := AlertsCache{
|
||||||
|
app: app,
|
||||||
|
store: store.New(map[string]*store.Store[string, CachedAlertData]{}),
|
||||||
|
}
|
||||||
|
return c.bindEvents()
|
||||||
|
}
|
||||||
|
|
||||||
|
// bindEvents sets up event listeners to keep the cache in sync with database changes.
|
||||||
|
func (c *AlertsCache) bindEvents() *AlertsCache {
|
||||||
|
c.app.OnRecordAfterUpdateSuccess("alerts").BindFunc(func(e *core.RecordEvent) error {
|
||||||
|
// c.Delete(e.Record.Original()) // this would be needed if the system field on an existing alert was changed, however we don't currently allow that in the UI so we'll leave it commented out
|
||||||
|
c.Update(e.Record)
|
||||||
|
return e.Next()
|
||||||
|
})
|
||||||
|
c.app.OnRecordAfterDeleteSuccess("alerts").BindFunc(func(e *core.RecordEvent) error {
|
||||||
|
c.Delete(e.Record)
|
||||||
|
return e.Next()
|
||||||
|
})
|
||||||
|
c.app.OnRecordAfterCreateSuccess("alerts").BindFunc(func(e *core.RecordEvent) error {
|
||||||
|
c.Update(e.Record)
|
||||||
|
return e.Next()
|
||||||
|
})
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// PopulateFromDB clears current entries and loads all alerts from the database into the cache.
|
||||||
|
func (c *AlertsCache) PopulateFromDB(force bool) error {
|
||||||
|
if !force && c.populated {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
records, err := c.app.FindAllRecords("alerts")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
c.store.RemoveAll()
|
||||||
|
for _, record := range records {
|
||||||
|
c.Update(record)
|
||||||
|
}
|
||||||
|
c.populated = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update adds or updates an alert record in the cache.
|
||||||
|
func (c *AlertsCache) Update(record *core.Record) {
|
||||||
|
systemID := record.GetString("system")
|
||||||
|
if systemID == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
systemStore, ok := c.store.GetOk(systemID)
|
||||||
|
if !ok {
|
||||||
|
systemStore = store.New(map[string]CachedAlertData{})
|
||||||
|
c.store.Set(systemID, systemStore)
|
||||||
|
}
|
||||||
|
var ca CachedAlertData
|
||||||
|
ca.PopulateFromRecord(record)
|
||||||
|
systemStore.Set(record.Id, ca)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete removes an alert record from the cache.
|
||||||
|
func (c *AlertsCache) Delete(record *core.Record) {
|
||||||
|
systemID := record.GetString("system")
|
||||||
|
if systemID == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if systemStore, ok := c.store.GetOk(systemID); ok {
|
||||||
|
systemStore.Remove(record.Id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSystemAlerts returns all alerts for the specified system, lazy-loading if necessary.
|
||||||
|
func (c *AlertsCache) GetSystemAlerts(systemID string) []CachedAlertData {
|
||||||
|
systemStore, ok := c.store.GetOk(systemID)
|
||||||
|
if !ok {
|
||||||
|
// Populate cache for this system
|
||||||
|
records, err := c.app.FindAllRecords("alerts", dbx.NewExp("system={:system}", dbx.Params{"system": systemID}))
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
systemStore = store.New(map[string]CachedAlertData{})
|
||||||
|
for _, record := range records {
|
||||||
|
var ca CachedAlertData
|
||||||
|
ca.PopulateFromRecord(record)
|
||||||
|
systemStore.Set(record.Id, ca)
|
||||||
|
}
|
||||||
|
c.store.Set(systemID, systemStore)
|
||||||
|
}
|
||||||
|
all := systemStore.GetAll()
|
||||||
|
alerts := make([]CachedAlertData, 0, len(all))
|
||||||
|
for _, alert := range all {
|
||||||
|
alerts = append(alerts, alert)
|
||||||
|
}
|
||||||
|
return alerts
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAlert returns a specific alert by its ID from the cache.
|
||||||
|
func (c *AlertsCache) GetAlert(systemID, alertID string) (CachedAlertData, bool) {
|
||||||
|
if systemStore, ok := c.store.GetOk(systemID); ok {
|
||||||
|
return systemStore.GetOk(alertID)
|
||||||
|
}
|
||||||
|
return CachedAlertData{}, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAlertsByName returns all alerts of a specific type for the specified system.
|
||||||
|
func (c *AlertsCache) GetAlertsByName(systemID, alertName string) []CachedAlertData {
|
||||||
|
allAlerts := c.GetSystemAlerts(systemID)
|
||||||
|
var alerts []CachedAlertData
|
||||||
|
for _, record := range allAlerts {
|
||||||
|
if record.Name == alertName {
|
||||||
|
alerts = append(alerts, record)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return alerts
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAlertsExcludingNames returns all alerts for the specified system excluding the given types.
|
||||||
|
func (c *AlertsCache) GetAlertsExcludingNames(systemID string, excludedNames ...string) []CachedAlertData {
|
||||||
|
excludeMap := make(map[string]struct{})
|
||||||
|
for _, name := range excludedNames {
|
||||||
|
excludeMap[name] = struct{}{}
|
||||||
|
}
|
||||||
|
allAlerts := c.GetSystemAlerts(systemID)
|
||||||
|
var alerts []CachedAlertData
|
||||||
|
for _, record := range allAlerts {
|
||||||
|
if _, excluded := excludeMap[record.Name]; !excluded {
|
||||||
|
alerts = append(alerts, record)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return alerts
|
||||||
|
}
|
||||||
|
|
||||||
|
// Refresh returns the latest cached copy for an alert snapshot if it still exists.
|
||||||
|
func (c *AlertsCache) Refresh(alert CachedAlertData) (CachedAlertData, bool) {
|
||||||
|
if alert.Id == "" {
|
||||||
|
return CachedAlertData{}, false
|
||||||
|
}
|
||||||
|
return c.GetAlert(alert.SystemID, alert.Id)
|
||||||
|
}
|
||||||
215
internal/alerts/alerts_cache_test.go
Normal file
215
internal/alerts/alerts_cache_test.go
Normal file
@@ -0,0 +1,215 @@
|
|||||||
|
//go:build testing
|
||||||
|
|
||||||
|
package alerts_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/alerts"
|
||||||
|
beszelTests "github.com/henrygd/beszel/internal/tests"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSystemAlertsCachePopulateAndFilter(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
systems, err := beszelTests.CreateSystems(hub, 2, user.Id, "up")
|
||||||
|
require.NoError(t, err)
|
||||||
|
system1 := systems[0]
|
||||||
|
system2 := systems[1]
|
||||||
|
|
||||||
|
statusAlert, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": "Status",
|
||||||
|
"system": system1.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"min": 1,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
cpuAlert, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"system": system1.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"value": 80,
|
||||||
|
"min": 1,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
memoryAlert, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": "Memory",
|
||||||
|
"system": system2.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"value": 90,
|
||||||
|
"min": 1,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
cache := alerts.NewAlertsCache(hub)
|
||||||
|
cache.PopulateFromDB(false)
|
||||||
|
|
||||||
|
statusAlerts := cache.GetAlertsByName(system1.Id, "Status")
|
||||||
|
require.Len(t, statusAlerts, 1)
|
||||||
|
assert.Equal(t, statusAlert.Id, statusAlerts[0].Id)
|
||||||
|
|
||||||
|
nonStatusAlerts := cache.GetAlertsExcludingNames(system1.Id, "Status")
|
||||||
|
require.Len(t, nonStatusAlerts, 1)
|
||||||
|
assert.Equal(t, cpuAlert.Id, nonStatusAlerts[0].Id)
|
||||||
|
|
||||||
|
system2Alerts := cache.GetSystemAlerts(system2.Id)
|
||||||
|
require.Len(t, system2Alerts, 1)
|
||||||
|
assert.Equal(t, memoryAlert.Id, system2Alerts[0].Id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSystemAlertsCacheLazyLoadUpdateAndDelete(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
systems, err := beszelTests.CreateSystems(hub, 1, user.Id, "up")
|
||||||
|
require.NoError(t, err)
|
||||||
|
systemRecord := systems[0]
|
||||||
|
|
||||||
|
statusAlert, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": "Status",
|
||||||
|
"system": systemRecord.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"min": 1,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
cache := alerts.NewAlertsCache(hub)
|
||||||
|
require.Len(t, cache.GetSystemAlerts(systemRecord.Id), 1, "first lookup should lazy-load alerts for the system")
|
||||||
|
|
||||||
|
cpuAlert, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"system": systemRecord.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"value": 80,
|
||||||
|
"min": 1,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
cache.Update(cpuAlert)
|
||||||
|
|
||||||
|
nonStatusAlerts := cache.GetAlertsExcludingNames(systemRecord.Id, "Status")
|
||||||
|
require.Len(t, nonStatusAlerts, 1)
|
||||||
|
assert.Equal(t, cpuAlert.Id, nonStatusAlerts[0].Id)
|
||||||
|
|
||||||
|
cache.Delete(statusAlert)
|
||||||
|
assert.Empty(t, cache.GetAlertsByName(systemRecord.Id, "Status"), "deleted alerts should be removed from the in-memory cache")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSystemAlertsCacheRefreshReturnsLatestCopy(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
systems, err := beszelTests.CreateSystems(hub, 1, user.Id, "up")
|
||||||
|
require.NoError(t, err)
|
||||||
|
system := systems[0]
|
||||||
|
|
||||||
|
alert, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": "Status",
|
||||||
|
"system": system.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"min": 1,
|
||||||
|
"triggered": false,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
cache := alerts.NewAlertsCache(hub)
|
||||||
|
snapshot := cache.GetSystemAlerts(system.Id)[0]
|
||||||
|
assert.False(t, snapshot.Triggered)
|
||||||
|
|
||||||
|
alert.Set("triggered", true)
|
||||||
|
require.NoError(t, hub.Save(alert))
|
||||||
|
|
||||||
|
refreshed, ok := cache.Refresh(snapshot)
|
||||||
|
require.True(t, ok)
|
||||||
|
assert.Equal(t, snapshot.Id, refreshed.Id)
|
||||||
|
assert.True(t, refreshed.Triggered, "refresh should return the updated cached value rather than the stale snapshot")
|
||||||
|
|
||||||
|
require.NoError(t, hub.Delete(alert))
|
||||||
|
_, ok = cache.Refresh(snapshot)
|
||||||
|
assert.False(t, ok, "refresh should report false when the cached alert no longer exists")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAlertManagerCacheLifecycle(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
systems, err := beszelTests.CreateSystems(hub, 1, user.Id, "up")
|
||||||
|
require.NoError(t, err)
|
||||||
|
system := systems[0]
|
||||||
|
|
||||||
|
// Create an alert
|
||||||
|
alert, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"system": system.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"value": 80,
|
||||||
|
"min": 1,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
am := hub.AlertManager
|
||||||
|
cache := am.GetSystemAlertsCache()
|
||||||
|
|
||||||
|
// Verify it's in cache (it should be since CreateRecord triggers the event)
|
||||||
|
assert.Len(t, cache.GetSystemAlerts(system.Id), 1)
|
||||||
|
assert.Equal(t, alert.Id, cache.GetSystemAlerts(system.Id)[0].Id)
|
||||||
|
assert.EqualValues(t, 80, cache.GetSystemAlerts(system.Id)[0].Value)
|
||||||
|
|
||||||
|
// Update the alert through PocketBase to trigger events
|
||||||
|
alert.Set("value", 85)
|
||||||
|
require.NoError(t, hub.Save(alert))
|
||||||
|
|
||||||
|
// Check if updated value is reflected (or at least that it's still there)
|
||||||
|
cachedAlerts := cache.GetSystemAlerts(system.Id)
|
||||||
|
assert.Len(t, cachedAlerts, 1)
|
||||||
|
assert.EqualValues(t, 85, cachedAlerts[0].Value)
|
||||||
|
|
||||||
|
// Delete the alert through PocketBase to trigger events
|
||||||
|
require.NoError(t, hub.Delete(alert))
|
||||||
|
|
||||||
|
// Verify it's removed from cache
|
||||||
|
assert.Empty(t, cache.GetSystemAlerts(system.Id), "alert should be removed from cache after PocketBase delete")
|
||||||
|
}
|
||||||
|
|
||||||
|
// func TestAlertManagerCacheMovesAlertToNewSystemOnUpdate(t *testing.T) {
|
||||||
|
// hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
// defer hub.Cleanup()
|
||||||
|
|
||||||
|
// systems, err := beszelTests.CreateSystems(hub, 2, user.Id, "up")
|
||||||
|
// require.NoError(t, err)
|
||||||
|
// system1 := systems[0]
|
||||||
|
// system2 := systems[1]
|
||||||
|
|
||||||
|
// alert, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
// "name": "CPU",
|
||||||
|
// "system": system1.Id,
|
||||||
|
// "user": user.Id,
|
||||||
|
// "value": 80,
|
||||||
|
// "min": 1,
|
||||||
|
// })
|
||||||
|
// require.NoError(t, err)
|
||||||
|
|
||||||
|
// am := hub.AlertManager
|
||||||
|
// cache := am.GetSystemAlertsCache()
|
||||||
|
|
||||||
|
// // Initially in system1 cache
|
||||||
|
// assert.Len(t, cache.Get(system1.Id), 1)
|
||||||
|
// assert.Empty(t, cache.Get(system2.Id))
|
||||||
|
|
||||||
|
// // Move alert to system2
|
||||||
|
// alert.Set("system", system2.Id)
|
||||||
|
// require.NoError(t, hub.Save(alert))
|
||||||
|
|
||||||
|
// // DEBUG: print if it is found
|
||||||
|
// // fmt.Printf("system1 alerts after update: %v\n", cache.Get(system1.Id))
|
||||||
|
|
||||||
|
// // Should be removed from system1 and present in system2
|
||||||
|
// assert.Empty(t, cache.GetType(system1.Id, "CPU"), "updated alerts should be evicted from the previous system cache")
|
||||||
|
// require.Len(t, cache.Get(system2.Id), 1)
|
||||||
|
// assert.Equal(t, alert.Id, cache.Get(system2.Id)[0].Id)
|
||||||
|
// }
|
||||||
155
internal/alerts/alerts_disk_test.go
Normal file
155
internal/alerts/alerts_disk_test.go
Normal file
@@ -0,0 +1,155 @@
|
|||||||
|
//go:build testing
|
||||||
|
|
||||||
|
package alerts_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
beszelTests "github.com/henrygd/beszel/internal/tests"
|
||||||
|
|
||||||
|
"github.com/pocketbase/dbx"
|
||||||
|
"github.com/pocketbase/pocketbase/tools/types"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestDiskAlertExtraFsMultiMinute tests that multi-minute disk alerts correctly use
|
||||||
|
// historical per-minute values for extra (non-root) filesystems, not the current live snapshot.
|
||||||
|
func TestDiskAlertExtraFsMultiMinute(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
systems, err := beszelTests.CreateSystems(hub, 1, user.Id, "up")
|
||||||
|
require.NoError(t, err)
|
||||||
|
systemRecord := systems[0]
|
||||||
|
|
||||||
|
// Disk alert: threshold 80%, min=2 (requires historical averaging)
|
||||||
|
diskAlert, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": "Disk",
|
||||||
|
"system": systemRecord.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"value": 80, // threshold: 80%
|
||||||
|
"min": 2, // 2 minutes - requires historical averaging
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.False(t, diskAlert.GetBool("triggered"), "Alert should not be triggered initially")
|
||||||
|
|
||||||
|
am := hub.GetAlertManager()
|
||||||
|
now := time.Now().UTC()
|
||||||
|
|
||||||
|
extraFsHigh := map[string]*system.FsStats{
|
||||||
|
"/mnt/data": {DiskTotal: 1000, DiskUsed: 920}, // 92% - above threshold
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert 4 historical records spread over 3 minutes (same pattern as battery tests).
|
||||||
|
// The oldest record must predate (now - 2min) so the alert time window is valid.
|
||||||
|
recordTimes := []time.Duration{
|
||||||
|
-180 * time.Second, // 3 min ago - anchors oldest record before alert.time
|
||||||
|
-90 * time.Second,
|
||||||
|
-60 * time.Second,
|
||||||
|
-30 * time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, offset := range recordTimes {
|
||||||
|
stats := system.Stats{
|
||||||
|
DiskPct: 30, // root disk at 30% - below threshold
|
||||||
|
ExtraFs: extraFsHigh,
|
||||||
|
}
|
||||||
|
statsJSON, _ := json.Marshal(stats)
|
||||||
|
|
||||||
|
recordTime := now.Add(offset)
|
||||||
|
record, err := beszelTests.CreateRecord(hub, "system_stats", map[string]any{
|
||||||
|
"system": systemRecord.Id,
|
||||||
|
"type": "1m",
|
||||||
|
"stats": string(statsJSON),
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
record.SetRaw("created", recordTime.Format(types.DefaultDateLayout))
|
||||||
|
err = hub.SaveNoValidate(record)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
combinedDataHigh := &system.CombinedData{
|
||||||
|
Stats: system.Stats{
|
||||||
|
DiskPct: 30,
|
||||||
|
ExtraFs: extraFsHigh,
|
||||||
|
},
|
||||||
|
Info: system.Info{
|
||||||
|
DiskPct: 30,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
systemRecord.Set("updated", now)
|
||||||
|
err = hub.SaveNoValidate(systemRecord)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = am.HandleSystemAlerts(systemRecord, combinedDataHigh)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
time.Sleep(20 * time.Millisecond)
|
||||||
|
|
||||||
|
diskAlert, err = hub.FindFirstRecordByFilter("alerts", "id={:id}", dbx.Params{"id": diskAlert.Id})
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.True(t, diskAlert.GetBool("triggered"),
|
||||||
|
"Alert SHOULD be triggered when extra disk average (92%%) exceeds threshold (80%%)")
|
||||||
|
|
||||||
|
// --- Resolution: extra disk drops to 50%, alert should resolve ---
|
||||||
|
|
||||||
|
extraFsLow := map[string]*system.FsStats{
|
||||||
|
"/mnt/data": {DiskTotal: 1000, DiskUsed: 500}, // 50% - below threshold
|
||||||
|
}
|
||||||
|
|
||||||
|
newNow := now.Add(2 * time.Minute)
|
||||||
|
recordTimesLow := []time.Duration{
|
||||||
|
-180 * time.Second,
|
||||||
|
-90 * time.Second,
|
||||||
|
-60 * time.Second,
|
||||||
|
-30 * time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, offset := range recordTimesLow {
|
||||||
|
stats := system.Stats{
|
||||||
|
DiskPct: 30,
|
||||||
|
ExtraFs: extraFsLow,
|
||||||
|
}
|
||||||
|
statsJSON, _ := json.Marshal(stats)
|
||||||
|
|
||||||
|
recordTime := newNow.Add(offset)
|
||||||
|
record, err := beszelTests.CreateRecord(hub, "system_stats", map[string]any{
|
||||||
|
"system": systemRecord.Id,
|
||||||
|
"type": "1m",
|
||||||
|
"stats": string(statsJSON),
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
record.SetRaw("created", recordTime.Format(types.DefaultDateLayout))
|
||||||
|
err = hub.SaveNoValidate(record)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
combinedDataLow := &system.CombinedData{
|
||||||
|
Stats: system.Stats{
|
||||||
|
DiskPct: 30,
|
||||||
|
ExtraFs: extraFsLow,
|
||||||
|
},
|
||||||
|
Info: system.Info{
|
||||||
|
DiskPct: 30,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
systemRecord.Set("updated", newNow)
|
||||||
|
err = hub.SaveNoValidate(systemRecord)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = am.HandleSystemAlerts(systemRecord, combinedDataLow)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
time.Sleep(20 * time.Millisecond)
|
||||||
|
|
||||||
|
diskAlert, err = hub.FindFirstRecordByFilter("alerts", "id={:id}", dbx.Params{"id": diskAlert.Id})
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.False(t, diskAlert.GetBool("triggered"),
|
||||||
|
"Alert should be resolved when extra disk average (50%%) drops below threshold (80%%)")
|
||||||
|
}
|
||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build testing
|
//go:build testing
|
||||||
// +build testing
|
|
||||||
|
|
||||||
package alerts_test
|
package alerts_test
|
||||||
|
|
||||||
@@ -50,7 +49,7 @@ func TestAlertSilencedOneTime(t *testing.T) {
|
|||||||
|
|
||||||
// Get alert manager
|
// Get alert manager
|
||||||
am := alerts.NewAlertManager(hub)
|
am := alerts.NewAlertManager(hub)
|
||||||
defer am.StopWorker()
|
defer am.Stop()
|
||||||
|
|
||||||
// Test that alert is silenced
|
// Test that alert is silenced
|
||||||
silenced := am.IsNotificationSilenced(user.Id, system.Id)
|
silenced := am.IsNotificationSilenced(user.Id, system.Id)
|
||||||
@@ -107,7 +106,7 @@ func TestAlertSilencedDaily(t *testing.T) {
|
|||||||
|
|
||||||
// Get alert manager
|
// Get alert manager
|
||||||
am := alerts.NewAlertManager(hub)
|
am := alerts.NewAlertManager(hub)
|
||||||
defer am.StopWorker()
|
defer am.Stop()
|
||||||
|
|
||||||
// Get current hour and create a window that includes current time
|
// Get current hour and create a window that includes current time
|
||||||
now := time.Now().UTC()
|
now := time.Now().UTC()
|
||||||
@@ -171,7 +170,7 @@ func TestAlertSilencedDailyMidnightCrossing(t *testing.T) {
|
|||||||
|
|
||||||
// Get alert manager
|
// Get alert manager
|
||||||
am := alerts.NewAlertManager(hub)
|
am := alerts.NewAlertManager(hub)
|
||||||
defer am.StopWorker()
|
defer am.Stop()
|
||||||
|
|
||||||
// Create a window that crosses midnight: 22:00 - 02:00
|
// Create a window that crosses midnight: 22:00 - 02:00
|
||||||
startTime := time.Date(2000, 1, 1, 22, 0, 0, 0, time.UTC)
|
startTime := time.Date(2000, 1, 1, 22, 0, 0, 0, time.UTC)
|
||||||
@@ -212,7 +211,7 @@ func TestAlertSilencedGlobal(t *testing.T) {
|
|||||||
|
|
||||||
// Get alert manager
|
// Get alert manager
|
||||||
am := alerts.NewAlertManager(hub)
|
am := alerts.NewAlertManager(hub)
|
||||||
defer am.StopWorker()
|
defer am.Stop()
|
||||||
|
|
||||||
// Create a global quiet hours window (no system specified)
|
// Create a global quiet hours window (no system specified)
|
||||||
now := time.Now().UTC()
|
now := time.Now().UTC()
|
||||||
@@ -251,7 +250,7 @@ func TestAlertSilencedSystemSpecific(t *testing.T) {
|
|||||||
|
|
||||||
// Get alert manager
|
// Get alert manager
|
||||||
am := alerts.NewAlertManager(hub)
|
am := alerts.NewAlertManager(hub)
|
||||||
defer am.StopWorker()
|
defer am.Stop()
|
||||||
|
|
||||||
// Create a system-specific quiet hours window for system1 only
|
// Create a system-specific quiet hours window for system1 only
|
||||||
now := time.Now().UTC()
|
now := time.Now().UTC()
|
||||||
@@ -297,7 +296,7 @@ func TestAlertSilencedMultiUser(t *testing.T) {
|
|||||||
|
|
||||||
// Get alert manager
|
// Get alert manager
|
||||||
am := alerts.NewAlertManager(hub)
|
am := alerts.NewAlertManager(hub)
|
||||||
defer am.StopWorker()
|
defer am.Stop()
|
||||||
|
|
||||||
// Create a quiet hours window for user1 only
|
// Create a quiet hours window for user1 only
|
||||||
now := time.Now().UTC()
|
now := time.Now().UTC()
|
||||||
@@ -418,7 +417,7 @@ func TestAlertSilencedNoWindows(t *testing.T) {
|
|||||||
|
|
||||||
// Get alert manager
|
// Get alert manager
|
||||||
am := alerts.NewAlertManager(hub)
|
am := alerts.NewAlertManager(hub)
|
||||||
defer am.StopWorker()
|
defer am.Stop()
|
||||||
|
|
||||||
// Without any quiet hours windows, alert should NOT be silenced
|
// Without any quiet hours windows, alert should NOT be silenced
|
||||||
silenced := am.IsNotificationSilenced(user.Id, system.Id)
|
silenced := am.IsNotificationSilenced(user.Id, system.Id)
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build testing
|
//go:build testing
|
||||||
// +build testing
|
|
||||||
|
|
||||||
package alerts_test
|
package alerts_test
|
||||||
|
|
||||||
|
|||||||
@@ -5,67 +5,28 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pocketbase/dbx"
|
|
||||||
"github.com/pocketbase/pocketbase/core"
|
"github.com/pocketbase/pocketbase/core"
|
||||||
)
|
)
|
||||||
|
|
||||||
type alertTask struct {
|
|
||||||
action string // "schedule" or "cancel"
|
|
||||||
systemName string
|
|
||||||
alertRecord *core.Record
|
|
||||||
delay time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
type alertInfo struct {
|
type alertInfo struct {
|
||||||
systemName string
|
systemName string
|
||||||
alertRecord *core.Record
|
alertData CachedAlertData
|
||||||
expireTime time.Time
|
expireTime time.Time
|
||||||
|
timer *time.Timer
|
||||||
}
|
}
|
||||||
|
|
||||||
// startWorker is a long-running goroutine that processes alert tasks
|
// Stop cancels all pending status alert timers.
|
||||||
// every x seconds. It must be running to process status alerts.
|
func (am *AlertManager) Stop() {
|
||||||
func (am *AlertManager) startWorker() {
|
am.stopOnce.Do(func() {
|
||||||
processPendingAlerts := time.Tick(15 * time.Second)
|
am.pendingAlerts.Range(func(key, value any) bool {
|
||||||
|
|
||||||
// check for status alerts that are not resolved when system comes up
|
|
||||||
// (can be removed if we figure out core bug in #1052)
|
|
||||||
checkStatusAlerts := time.Tick(561 * time.Second)
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-am.stopChan:
|
|
||||||
return
|
|
||||||
case task := <-am.alertQueue:
|
|
||||||
switch task.action {
|
|
||||||
case "schedule":
|
|
||||||
am.pendingAlerts.Store(task.alertRecord.Id, &alertInfo{
|
|
||||||
systemName: task.systemName,
|
|
||||||
alertRecord: task.alertRecord,
|
|
||||||
expireTime: time.Now().Add(task.delay),
|
|
||||||
})
|
|
||||||
case "cancel":
|
|
||||||
am.pendingAlerts.Delete(task.alertRecord.Id)
|
|
||||||
}
|
|
||||||
case <-checkStatusAlerts:
|
|
||||||
resolveStatusAlerts(am.hub)
|
|
||||||
case <-processPendingAlerts:
|
|
||||||
// Check for expired alerts every tick
|
|
||||||
now := time.Now()
|
|
||||||
for key, value := range am.pendingAlerts.Range {
|
|
||||||
info := value.(*alertInfo)
|
info := value.(*alertInfo)
|
||||||
if now.After(info.expireTime) {
|
if info.timer != nil {
|
||||||
// Downtime delay has passed, process alert
|
info.timer.Stop()
|
||||||
am.sendStatusAlert("down", info.systemName, info.alertRecord)
|
}
|
||||||
am.pendingAlerts.Delete(key)
|
am.pendingAlerts.Delete(key)
|
||||||
}
|
return true
|
||||||
}
|
})
|
||||||
}
|
})
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// StopWorker shuts down the AlertManager.worker goroutine
|
|
||||||
func (am *AlertManager) StopWorker() {
|
|
||||||
close(am.stopChan)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// HandleStatusAlerts manages the logic when system status changes.
|
// HandleStatusAlerts manages the logic when system status changes.
|
||||||
@@ -74,82 +35,116 @@ func (am *AlertManager) HandleStatusAlerts(newStatus string, systemRecord *core.
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
alertRecords, err := am.getSystemStatusAlerts(systemRecord.Id)
|
alerts := am.alertsCache.GetAlertsByName(systemRecord.Id, "Status")
|
||||||
if err != nil {
|
if len(alerts) == 0 {
|
||||||
return err
|
|
||||||
}
|
|
||||||
if len(alertRecords) == 0 {
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
systemName := systemRecord.GetString("name")
|
systemName := systemRecord.GetString("name")
|
||||||
if newStatus == "down" {
|
if newStatus == "down" {
|
||||||
am.handleSystemDown(systemName, alertRecords)
|
am.handleSystemDown(systemName, alerts)
|
||||||
} else {
|
} else {
|
||||||
am.handleSystemUp(systemName, alertRecords)
|
am.handleSystemUp(systemName, alerts)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getSystemStatusAlerts retrieves all "Status" alert records for a given system ID.
|
// handleSystemDown manages the logic when a system status changes to "down". It schedules pending alerts for each alert record.
|
||||||
func (am *AlertManager) getSystemStatusAlerts(systemID string) ([]*core.Record, error) {
|
func (am *AlertManager) handleSystemDown(systemName string, alerts []CachedAlertData) {
|
||||||
alertRecords, err := am.hub.FindAllRecords("alerts", dbx.HashExp{
|
for _, alertData := range alerts {
|
||||||
"system": systemID,
|
min := max(1, int(alertData.Min))
|
||||||
"name": "Status",
|
am.schedulePendingStatusAlert(systemName, alertData, time.Duration(min)*time.Minute)
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
return alertRecords, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Schedules delayed "down" alerts for each alert record.
|
// schedulePendingStatusAlert sets up a timer to send a "down" alert after the specified delay if the system is still down.
|
||||||
func (am *AlertManager) handleSystemDown(systemName string, alertRecords []*core.Record) {
|
// It returns true if the alert was scheduled, or false if an alert was already pending for the given alert record.
|
||||||
for _, alertRecord := range alertRecords {
|
func (am *AlertManager) schedulePendingStatusAlert(systemName string, alertData CachedAlertData, delay time.Duration) bool {
|
||||||
// Continue if alert is already scheduled
|
alert := &alertInfo{
|
||||||
if _, exists := am.pendingAlerts.Load(alertRecord.Id); exists {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Schedule by adding to queue
|
|
||||||
min := max(1, alertRecord.GetInt("min"))
|
|
||||||
am.alertQueue <- alertTask{
|
|
||||||
action: "schedule",
|
|
||||||
systemName: systemName,
|
systemName: systemName,
|
||||||
alertRecord: alertRecord,
|
alertData: alertData,
|
||||||
delay: time.Duration(min) * time.Minute,
|
expireTime: time.Now().Add(delay),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
storedAlert, loaded := am.pendingAlerts.LoadOrStore(alertData.Id, alert)
|
||||||
|
if loaded {
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
stored := storedAlert.(*alertInfo)
|
||||||
|
stored.timer = time.AfterFunc(time.Until(stored.expireTime), func() {
|
||||||
|
am.processPendingAlert(alertData.Id)
|
||||||
|
})
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// handleSystemUp manages the logic when a system status changes to "up".
|
// handleSystemUp manages the logic when a system status changes to "up".
|
||||||
// It cancels any pending alerts and sends "up" alerts.
|
// It cancels any pending alerts and sends "up" alerts.
|
||||||
func (am *AlertManager) handleSystemUp(systemName string, alertRecords []*core.Record) {
|
func (am *AlertManager) handleSystemUp(systemName string, alerts []CachedAlertData) {
|
||||||
for _, alertRecord := range alertRecords {
|
for _, alertData := range alerts {
|
||||||
alertRecordID := alertRecord.Id
|
|
||||||
// If alert exists for record, delete and continue (down alert not sent)
|
// If alert exists for record, delete and continue (down alert not sent)
|
||||||
if _, exists := am.pendingAlerts.Load(alertRecordID); exists {
|
if am.cancelPendingAlert(alertData.Id) {
|
||||||
am.alertQueue <- alertTask{
|
|
||||||
action: "cancel",
|
|
||||||
alertRecord: alertRecord,
|
|
||||||
}
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// No alert scheduled for this record, send "up" alert
|
if !alertData.Triggered {
|
||||||
if err := am.sendStatusAlert("up", systemName, alertRecord); err != nil {
|
continue
|
||||||
|
}
|
||||||
|
if err := am.sendStatusAlert("up", systemName, alertData); err != nil {
|
||||||
am.hub.Logger().Error("Failed to send alert", "err", err)
|
am.hub.Logger().Error("Failed to send alert", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// sendStatusAlert sends a status alert ("up" or "down") to the users associated with the alert records.
|
// cancelPendingAlert stops the timer and removes the pending alert for the given alert ID. Returns true if a pending alert was found and cancelled.
|
||||||
func (am *AlertManager) sendStatusAlert(alertStatus string, systemName string, alertRecord *core.Record) error {
|
func (am *AlertManager) cancelPendingAlert(alertID string) bool {
|
||||||
switch alertStatus {
|
value, loaded := am.pendingAlerts.LoadAndDelete(alertID)
|
||||||
case "up":
|
if !loaded {
|
||||||
alertRecord.Set("triggered", false)
|
return false
|
||||||
case "down":
|
}
|
||||||
alertRecord.Set("triggered", true)
|
|
||||||
|
info := value.(*alertInfo)
|
||||||
|
if info.timer != nil {
|
||||||
|
info.timer.Stop()
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// CancelPendingStatusAlerts cancels all pending status alert timers for a given system.
|
||||||
|
// This is called when a system is paused to prevent delayed alerts from firing.
|
||||||
|
func (am *AlertManager) CancelPendingStatusAlerts(systemID string) {
|
||||||
|
am.pendingAlerts.Range(func(key, value any) bool {
|
||||||
|
info := value.(*alertInfo)
|
||||||
|
if info.alertData.SystemID == systemID {
|
||||||
|
am.cancelPendingAlert(key.(string))
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// processPendingAlert sends a "down" alert if the pending alert has expired and the system is still down.
|
||||||
|
func (am *AlertManager) processPendingAlert(alertID string) {
|
||||||
|
value, loaded := am.pendingAlerts.LoadAndDelete(alertID)
|
||||||
|
if !loaded {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
info := value.(*alertInfo)
|
||||||
|
refreshedAlertData, ok := am.alertsCache.Refresh(info.alertData)
|
||||||
|
if !ok || refreshedAlertData.Triggered {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := am.sendStatusAlert("down", info.systemName, refreshedAlertData); err != nil {
|
||||||
|
am.hub.Logger().Error("Failed to send alert", "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// sendStatusAlert sends a status alert ("up" or "down") to the users associated with the alert records.
|
||||||
|
func (am *AlertManager) sendStatusAlert(alertStatus string, systemName string, alertData CachedAlertData) error {
|
||||||
|
// Update trigger state for alert record before sending alert
|
||||||
|
triggered := alertStatus == "down"
|
||||||
|
if err := am.setAlertTriggered(alertData, triggered); err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
am.hub.Save(alertRecord)
|
|
||||||
|
|
||||||
var emoji string
|
var emoji string
|
||||||
if alertStatus == "up" {
|
if alertStatus == "up" {
|
||||||
@@ -162,10 +157,10 @@ func (am *AlertManager) sendStatusAlert(alertStatus string, systemName string, a
|
|||||||
message := strings.TrimSuffix(title, emoji)
|
message := strings.TrimSuffix(title, emoji)
|
||||||
|
|
||||||
// Get system ID for the link
|
// Get system ID for the link
|
||||||
systemID := alertRecord.GetString("system")
|
systemID := alertData.SystemID
|
||||||
|
|
||||||
return am.SendAlert(AlertMessageData{
|
return am.SendAlert(AlertMessageData{
|
||||||
UserID: alertRecord.GetString("user"),
|
UserID: alertData.UserID,
|
||||||
SystemID: systemID,
|
SystemID: systemID,
|
||||||
Title: title,
|
Title: title,
|
||||||
Message: message,
|
Message: message,
|
||||||
@@ -174,8 +169,8 @@ func (am *AlertManager) sendStatusAlert(alertStatus string, systemName string, a
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// resolveStatusAlerts resolves any status alerts that weren't resolved
|
// resolveStatusAlerts resolves any triggered status alerts that weren't resolved
|
||||||
// when system came up (https://github.com/henrygd/beszel/issues/1052)
|
// when system came up (https://github.com/henrygd/beszel/issues/1052).
|
||||||
func resolveStatusAlerts(app core.App) error {
|
func resolveStatusAlerts(app core.App) error {
|
||||||
db := app.DB()
|
db := app.DB()
|
||||||
// Find all active status alerts where the system is actually up
|
// Find all active status alerts where the system is actually up
|
||||||
@@ -205,3 +200,40 @@ func resolveStatusAlerts(app core.App) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// restorePendingStatusAlerts re-queues untriggered status alerts for systems that
|
||||||
|
// are still down after a hub restart. This rebuilds the lost in-memory timer state.
|
||||||
|
func (am *AlertManager) restorePendingStatusAlerts() error {
|
||||||
|
type pendingStatusAlert struct {
|
||||||
|
AlertID string `db:"alert_id"`
|
||||||
|
SystemID string `db:"system_id"`
|
||||||
|
SystemName string `db:"system_name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var pending []pendingStatusAlert
|
||||||
|
err := am.hub.DB().NewQuery(`
|
||||||
|
SELECT a.id AS alert_id, a.system AS system_id, s.name AS system_name
|
||||||
|
FROM alerts a
|
||||||
|
JOIN systems s ON a.system = s.id
|
||||||
|
WHERE a.name = 'Status'
|
||||||
|
AND a.triggered = false
|
||||||
|
AND s.status = 'down'
|
||||||
|
`).All(&pending)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure cache is populated before trying to restore pending alerts
|
||||||
|
_ = am.alertsCache.PopulateFromDB(false)
|
||||||
|
|
||||||
|
for _, item := range pending {
|
||||||
|
alertData, ok := am.alertsCache.GetAlert(item.SystemID, item.AlertID)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
min := max(1, int(alertData.Min))
|
||||||
|
am.schedulePendingStatusAlert(item.SystemName, alertData, time.Duration(min)*time.Minute)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
1008
internal/alerts/alerts_status_test.go
Normal file
1008
internal/alerts/alerts_status_test.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -11,15 +11,11 @@ import (
|
|||||||
"github.com/pocketbase/dbx"
|
"github.com/pocketbase/dbx"
|
||||||
"github.com/pocketbase/pocketbase/core"
|
"github.com/pocketbase/pocketbase/core"
|
||||||
"github.com/pocketbase/pocketbase/tools/types"
|
"github.com/pocketbase/pocketbase/tools/types"
|
||||||
"github.com/spf13/cast"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func (am *AlertManager) HandleSystemAlerts(systemRecord *core.Record, data *system.CombinedData) error {
|
func (am *AlertManager) HandleSystemAlerts(systemRecord *core.Record, data *system.CombinedData) error {
|
||||||
alertRecords, err := am.hub.FindAllRecords("alerts",
|
alerts := am.alertsCache.GetAlertsExcludingNames(systemRecord.Id, "Status")
|
||||||
dbx.NewExp("system={:system} AND name!='Status'", dbx.Params{"system": systemRecord.Id}),
|
if len(alerts) == 0 {
|
||||||
)
|
|
||||||
if err != nil || len(alertRecords) == 0 {
|
|
||||||
// log.Println("no alerts found for system")
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -27,8 +23,8 @@ func (am *AlertManager) HandleSystemAlerts(systemRecord *core.Record, data *syst
|
|||||||
now := systemRecord.GetDateTime("updated").Time().UTC()
|
now := systemRecord.GetDateTime("updated").Time().UTC()
|
||||||
oldestTime := now
|
oldestTime := now
|
||||||
|
|
||||||
for _, alertRecord := range alertRecords {
|
for _, alertData := range alerts {
|
||||||
name := alertRecord.GetString("name")
|
name := alertData.Name
|
||||||
var val float64
|
var val float64
|
||||||
unit := "%"
|
unit := "%"
|
||||||
|
|
||||||
@@ -38,7 +34,7 @@ func (am *AlertManager) HandleSystemAlerts(systemRecord *core.Record, data *syst
|
|||||||
case "Memory":
|
case "Memory":
|
||||||
val = data.Info.MemPct
|
val = data.Info.MemPct
|
||||||
case "Bandwidth":
|
case "Bandwidth":
|
||||||
val = data.Info.Bandwidth
|
val = float64(data.Info.BandwidthBytes) / (1024 * 1024)
|
||||||
unit = " MB/s"
|
unit = " MB/s"
|
||||||
case "Disk":
|
case "Disk":
|
||||||
maxUsedPct := data.Info.DiskPct
|
maxUsedPct := data.Info.DiskPct
|
||||||
@@ -73,8 +69,8 @@ func (am *AlertManager) HandleSystemAlerts(systemRecord *core.Record, data *syst
|
|||||||
val = float64(data.Stats.Battery[0])
|
val = float64(data.Stats.Battery[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
triggered := alertRecord.GetBool("triggered")
|
triggered := alertData.Triggered
|
||||||
threshold := alertRecord.GetFloat("value")
|
threshold := alertData.Value
|
||||||
|
|
||||||
// Battery alert has inverted logic: trigger when value is BELOW threshold
|
// Battery alert has inverted logic: trigger when value is BELOW threshold
|
||||||
lowAlert := isLowAlert(name)
|
lowAlert := isLowAlert(name)
|
||||||
@@ -92,11 +88,11 @@ func (am *AlertManager) HandleSystemAlerts(systemRecord *core.Record, data *syst
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
min := max(1, cast.ToUint8(alertRecord.Get("min")))
|
min := max(1, alertData.Min)
|
||||||
|
|
||||||
alert := SystemAlertData{
|
alert := SystemAlertData{
|
||||||
systemRecord: systemRecord,
|
systemRecord: systemRecord,
|
||||||
alertRecord: alertRecord,
|
alertData: alertData,
|
||||||
name: name,
|
name: name,
|
||||||
unit: unit,
|
unit: unit,
|
||||||
val: val,
|
val: val,
|
||||||
@@ -129,7 +125,7 @@ func (am *AlertManager) HandleSystemAlerts(systemRecord *core.Record, data *syst
|
|||||||
Created types.DateTime `db:"created"`
|
Created types.DateTime `db:"created"`
|
||||||
}{}
|
}{}
|
||||||
|
|
||||||
err = am.hub.DB().
|
err := am.hub.DB().
|
||||||
Select("stats", "created").
|
Select("stats", "created").
|
||||||
From("system_stats").
|
From("system_stats").
|
||||||
Where(dbx.NewExp(
|
Where(dbx.NewExp(
|
||||||
@@ -192,23 +188,25 @@ func (am *AlertManager) HandleSystemAlerts(systemRecord *core.Record, data *syst
|
|||||||
case "Memory":
|
case "Memory":
|
||||||
alert.val += stats.Mem
|
alert.val += stats.Mem
|
||||||
case "Bandwidth":
|
case "Bandwidth":
|
||||||
alert.val += stats.NetSent + stats.NetRecv
|
alert.val += float64(stats.Bandwidth[0]+stats.Bandwidth[1]) / (1024 * 1024)
|
||||||
case "Disk":
|
case "Disk":
|
||||||
if alert.mapSums == nil {
|
if alert.mapSums == nil {
|
||||||
alert.mapSums = make(map[string]float32, len(data.Stats.ExtraFs)+1)
|
alert.mapSums = make(map[string]float32, len(stats.ExtraFs)+1)
|
||||||
}
|
}
|
||||||
// add root disk
|
// add root disk
|
||||||
if _, ok := alert.mapSums["root"]; !ok {
|
if _, ok := alert.mapSums["root"]; !ok {
|
||||||
alert.mapSums["root"] = 0.0
|
alert.mapSums["root"] = 0.0
|
||||||
}
|
}
|
||||||
alert.mapSums["root"] += float32(stats.Disk)
|
alert.mapSums["root"] += float32(stats.Disk)
|
||||||
// add extra disks
|
// add extra disks from historical record
|
||||||
for key, fs := range data.Stats.ExtraFs {
|
for key, fs := range stats.ExtraFs {
|
||||||
|
if fs.DiskTotal > 0 {
|
||||||
if _, ok := alert.mapSums[key]; !ok {
|
if _, ok := alert.mapSums[key]; !ok {
|
||||||
alert.mapSums[key] = 0.0
|
alert.mapSums[key] = 0.0
|
||||||
}
|
}
|
||||||
alert.mapSums[key] += float32(fs.DiskUsed / fs.DiskTotal * 100)
|
alert.mapSums[key] += float32(fs.DiskUsed / fs.DiskTotal * 100)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
case "Temperature":
|
case "Temperature":
|
||||||
if alert.mapSums == nil {
|
if alert.mapSums == nil {
|
||||||
alert.mapSums = make(map[string]float32, len(stats.Temperatures))
|
alert.mapSums = make(map[string]float32, len(stats.Temperatures))
|
||||||
@@ -342,13 +340,12 @@ func (am *AlertManager) sendSystemAlert(alert SystemAlertData) {
|
|||||||
}
|
}
|
||||||
body := fmt.Sprintf("%s averaged %.2f%s for the previous %v %s.", alert.descriptor, alert.val, alert.unit, alert.min, minutesLabel)
|
body := fmt.Sprintf("%s averaged %.2f%s for the previous %v %s.", alert.descriptor, alert.val, alert.unit, alert.min, minutesLabel)
|
||||||
|
|
||||||
alert.alertRecord.Set("triggered", alert.triggered)
|
if err := am.setAlertTriggered(alert.alertData, alert.triggered); err != nil {
|
||||||
if err := am.hub.Save(alert.alertRecord); err != nil {
|
|
||||||
// app.Logger().Error("failed to save alert record", "err", err)
|
// app.Logger().Error("failed to save alert record", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
am.SendAlert(AlertMessageData{
|
am.SendAlert(AlertMessageData{
|
||||||
UserID: alert.alertRecord.GetString("user"),
|
UserID: alert.alertData.UserID,
|
||||||
SystemID: alert.systemRecord.Id,
|
SystemID: alert.systemRecord.Id,
|
||||||
Title: subject,
|
Title: subject,
|
||||||
Message: body,
|
Message: body,
|
||||||
|
|||||||
218
internal/alerts/alerts_system_test.go
Normal file
218
internal/alerts/alerts_system_test.go
Normal file
@@ -0,0 +1,218 @@
|
|||||||
|
//go:build testing
|
||||||
|
|
||||||
|
package alerts_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"testing/synctest"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
beszelTests "github.com/henrygd/beszel/internal/tests"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
type systemAlertValueSetter[T any] func(info *system.Info, stats *system.Stats, value T)
|
||||||
|
|
||||||
|
type systemAlertTestFixture struct {
|
||||||
|
hub *beszelTests.TestHub
|
||||||
|
alertID string
|
||||||
|
submit func(*system.CombinedData) error
|
||||||
|
}
|
||||||
|
|
||||||
|
func createCombinedData[T any](value T, setValue systemAlertValueSetter[T]) *system.CombinedData {
|
||||||
|
var data system.CombinedData
|
||||||
|
setValue(&data.Info, &data.Stats, value)
|
||||||
|
return &data
|
||||||
|
}
|
||||||
|
|
||||||
|
func newSystemAlertTestFixture(t *testing.T, alertName string, min int, threshold float64) *systemAlertTestFixture {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
|
||||||
|
systems, err := beszelTests.CreateSystems(hub, 1, user.Id, "up")
|
||||||
|
require.NoError(t, err)
|
||||||
|
systemRecord := systems[0]
|
||||||
|
|
||||||
|
sysManagerSystem, err := hub.GetSystemManager().GetSystemFromStore(systemRecord.Id)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, sysManagerSystem)
|
||||||
|
sysManagerSystem.StopUpdater()
|
||||||
|
|
||||||
|
userSettings, err := hub.FindFirstRecordByFilter("user_settings", "user={:user}", map[string]any{"user": user.Id})
|
||||||
|
require.NoError(t, err)
|
||||||
|
userSettings.Set("settings", `{"emails":["test@example.com"],"webhooks":[]}`)
|
||||||
|
require.NoError(t, hub.Save(userSettings))
|
||||||
|
|
||||||
|
alertRecord, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": alertName,
|
||||||
|
"system": systemRecord.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"min": min,
|
||||||
|
"value": threshold,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
assert.False(t, alertRecord.GetBool("triggered"), "Alert should not be triggered initially")
|
||||||
|
|
||||||
|
alertsCache := hub.GetAlertManager().GetSystemAlertsCache()
|
||||||
|
cachedAlerts := alertsCache.GetAlertsExcludingNames(systemRecord.Id, "Status")
|
||||||
|
assert.Len(t, cachedAlerts, 1, "Alert should be in cache")
|
||||||
|
|
||||||
|
return &systemAlertTestFixture{
|
||||||
|
hub: hub,
|
||||||
|
alertID: alertRecord.Id,
|
||||||
|
submit: func(data *system.CombinedData) error {
|
||||||
|
_, err := sysManagerSystem.CreateRecords(data)
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fixture *systemAlertTestFixture) cleanup() {
|
||||||
|
fixture.hub.Cleanup()
|
||||||
|
}
|
||||||
|
|
||||||
|
func submitValue[T any](fixture *systemAlertTestFixture, t *testing.T, value T, setValue systemAlertValueSetter[T]) {
|
||||||
|
t.Helper()
|
||||||
|
require.NoError(t, fixture.submit(createCombinedData(value, setValue)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fixture *systemAlertTestFixture) assertTriggered(t *testing.T, triggered bool, message string) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
alertRecord, err := fixture.hub.FindRecordById("alerts", fixture.alertID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, triggered, alertRecord.GetBool("triggered"), message)
|
||||||
|
}
|
||||||
|
|
||||||
|
func waitForSystemAlert(d time.Duration) {
|
||||||
|
time.Sleep(d)
|
||||||
|
synctest.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func testOneMinuteSystemAlert[T any](t *testing.T, alertName string, threshold float64, setValue systemAlertValueSetter[T], triggerValue, resolveValue T) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
synctest.Test(t, func(t *testing.T) {
|
||||||
|
fixture := newSystemAlertTestFixture(t, alertName, 1, threshold)
|
||||||
|
defer fixture.cleanup()
|
||||||
|
|
||||||
|
submitValue(fixture, t, triggerValue, setValue)
|
||||||
|
waitForSystemAlert(time.Second)
|
||||||
|
|
||||||
|
fixture.assertTriggered(t, true, "Alert should be triggered")
|
||||||
|
assert.Equal(t, 1, fixture.hub.TestMailer.TotalSend(), "An email should have been sent")
|
||||||
|
|
||||||
|
submitValue(fixture, t, resolveValue, setValue)
|
||||||
|
waitForSystemAlert(time.Second)
|
||||||
|
|
||||||
|
fixture.assertTriggered(t, false, "Alert should be untriggered")
|
||||||
|
assert.Equal(t, 2, fixture.hub.TestMailer.TotalSend(), "A second email should have been sent for untriggering the alert")
|
||||||
|
|
||||||
|
waitForSystemAlert(time.Minute)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testMultiMinuteSystemAlert[T any](t *testing.T, alertName string, threshold float64, min int, setValue systemAlertValueSetter[T], baselineValue, triggerValue, resolveValue T) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
synctest.Test(t, func(t *testing.T) {
|
||||||
|
fixture := newSystemAlertTestFixture(t, alertName, min, threshold)
|
||||||
|
defer fixture.cleanup()
|
||||||
|
|
||||||
|
submitValue(fixture, t, baselineValue, setValue)
|
||||||
|
waitForSystemAlert(time.Minute + time.Second)
|
||||||
|
fixture.assertTriggered(t, false, "Alert should not be triggered yet")
|
||||||
|
|
||||||
|
submitValue(fixture, t, triggerValue, setValue)
|
||||||
|
waitForSystemAlert(time.Minute)
|
||||||
|
fixture.assertTriggered(t, false, "Alert should not be triggered until the history window is full")
|
||||||
|
|
||||||
|
submitValue(fixture, t, triggerValue, setValue)
|
||||||
|
waitForSystemAlert(time.Second)
|
||||||
|
fixture.assertTriggered(t, true, "Alert should be triggered")
|
||||||
|
assert.Equal(t, 1, fixture.hub.TestMailer.TotalSend(), "An email should have been sent")
|
||||||
|
|
||||||
|
submitValue(fixture, t, resolveValue, setValue)
|
||||||
|
waitForSystemAlert(time.Second)
|
||||||
|
fixture.assertTriggered(t, false, "Alert should be untriggered")
|
||||||
|
assert.Equal(t, 2, fixture.hub.TestMailer.TotalSend(), "A second email should have been sent for untriggering the alert")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func setCPUAlertValue(info *system.Info, stats *system.Stats, value float64) {
|
||||||
|
info.Cpu = value
|
||||||
|
stats.Cpu = value
|
||||||
|
}
|
||||||
|
|
||||||
|
func setMemoryAlertValue(info *system.Info, stats *system.Stats, value float64) {
|
||||||
|
info.MemPct = value
|
||||||
|
stats.MemPct = value
|
||||||
|
}
|
||||||
|
|
||||||
|
func setDiskAlertValue(info *system.Info, stats *system.Stats, value float64) {
|
||||||
|
info.DiskPct = value
|
||||||
|
stats.DiskPct = value
|
||||||
|
}
|
||||||
|
|
||||||
|
func setBandwidthAlertValue(info *system.Info, stats *system.Stats, value [2]uint64) {
|
||||||
|
info.BandwidthBytes = value[0] + value[1]
|
||||||
|
stats.Bandwidth = value
|
||||||
|
}
|
||||||
|
|
||||||
|
func megabytesToBytes(mb uint64) uint64 {
|
||||||
|
return mb * 1024 * 1024
|
||||||
|
}
|
||||||
|
|
||||||
|
func setGPUAlertValue(info *system.Info, stats *system.Stats, value float64) {
|
||||||
|
info.GpuPct = value
|
||||||
|
stats.GPUData = map[string]system.GPUData{
|
||||||
|
"GPU0": {Usage: value},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func setTemperatureAlertValue(info *system.Info, stats *system.Stats, value float64) {
|
||||||
|
info.DashboardTemp = value
|
||||||
|
stats.Temperatures = map[string]float64{
|
||||||
|
"Temp0": value,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func setLoadAvgAlertValue(info *system.Info, stats *system.Stats, value [3]float64) {
|
||||||
|
info.LoadAvg = value
|
||||||
|
stats.LoadAvg = value
|
||||||
|
}
|
||||||
|
|
||||||
|
func setBatteryAlertValue(info *system.Info, stats *system.Stats, value [2]uint8) {
|
||||||
|
info.Battery = value
|
||||||
|
stats.Battery = value
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSystemAlertsOneMin(t *testing.T) {
|
||||||
|
testOneMinuteSystemAlert(t, "CPU", 50, setCPUAlertValue, 51, 49)
|
||||||
|
testOneMinuteSystemAlert(t, "Memory", 50, setMemoryAlertValue, 51, 49)
|
||||||
|
testOneMinuteSystemAlert(t, "Disk", 50, setDiskAlertValue, 51, 49)
|
||||||
|
testOneMinuteSystemAlert(t, "Bandwidth", 50, setBandwidthAlertValue, [2]uint64{megabytesToBytes(26), megabytesToBytes(25)}, [2]uint64{megabytesToBytes(25), megabytesToBytes(24)})
|
||||||
|
testOneMinuteSystemAlert(t, "GPU", 50, setGPUAlertValue, 51, 49)
|
||||||
|
testOneMinuteSystemAlert(t, "Temperature", 70, setTemperatureAlertValue, 71, 69)
|
||||||
|
testOneMinuteSystemAlert(t, "LoadAvg1", 4, setLoadAvgAlertValue, [3]float64{4.1, 0, 0}, [3]float64{3.9, 0, 0})
|
||||||
|
testOneMinuteSystemAlert(t, "LoadAvg5", 4, setLoadAvgAlertValue, [3]float64{0, 4.1, 0}, [3]float64{0, 3.9, 0})
|
||||||
|
testOneMinuteSystemAlert(t, "LoadAvg15", 4, setLoadAvgAlertValue, [3]float64{0, 0, 4.1}, [3]float64{0, 0, 3.9})
|
||||||
|
testOneMinuteSystemAlert(t, "Battery", 20, setBatteryAlertValue, [2]uint8{19, 0}, [2]uint8{21, 0})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSystemAlertsTwoMin(t *testing.T) {
|
||||||
|
testMultiMinuteSystemAlert(t, "CPU", 50, 2, setCPUAlertValue, 10, 51, 48)
|
||||||
|
testMultiMinuteSystemAlert(t, "Memory", 50, 2, setMemoryAlertValue, 10, 51, 48)
|
||||||
|
testMultiMinuteSystemAlert(t, "Disk", 50, 2, setDiskAlertValue, 10, 51, 48)
|
||||||
|
testMultiMinuteSystemAlert(t, "Bandwidth", 50, 2, setBandwidthAlertValue, [2]uint64{megabytesToBytes(10), megabytesToBytes(10)}, [2]uint64{megabytesToBytes(26), megabytesToBytes(25)}, [2]uint64{megabytesToBytes(10), megabytesToBytes(10)})
|
||||||
|
testMultiMinuteSystemAlert(t, "GPU", 50, 2, setGPUAlertValue, 10, 51, 48)
|
||||||
|
testMultiMinuteSystemAlert(t, "Temperature", 70, 2, setTemperatureAlertValue, 10, 71, 67)
|
||||||
|
testMultiMinuteSystemAlert(t, "LoadAvg1", 4, 2, setLoadAvgAlertValue, [3]float64{0, 0, 0}, [3]float64{4.1, 0, 0}, [3]float64{3.5, 0, 0})
|
||||||
|
testMultiMinuteSystemAlert(t, "LoadAvg5", 4, 2, setLoadAvgAlertValue, [3]float64{0, 2, 0}, [3]float64{0, 4.1, 0}, [3]float64{0, 3.5, 0})
|
||||||
|
testMultiMinuteSystemAlert(t, "LoadAvg15", 4, 2, setLoadAvgAlertValue, [3]float64{0, 0, 2}, [3]float64{0, 0, 4.1}, [3]float64{0, 0, 3.5})
|
||||||
|
testMultiMinuteSystemAlert(t, "Battery", 20, 2, setBatteryAlertValue, [2]uint8{21, 0}, [2]uint8{19, 0}, [2]uint8{25, 1})
|
||||||
|
}
|
||||||
@@ -1,456 +1,19 @@
|
|||||||
//go:build testing
|
//go:build testing
|
||||||
// +build testing
|
|
||||||
|
|
||||||
package alerts_test
|
package alerts_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
"testing"
|
||||||
"testing/synctest"
|
"testing/synctest"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/henrygd/beszel/internal/alerts"
|
|
||||||
beszelTests "github.com/henrygd/beszel/internal/tests"
|
beszelTests "github.com/henrygd/beszel/internal/tests"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/alerts"
|
||||||
"github.com/pocketbase/dbx"
|
"github.com/pocketbase/dbx"
|
||||||
"github.com/pocketbase/pocketbase/core"
|
|
||||||
pbTests "github.com/pocketbase/pocketbase/tests"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
// marshal to json and return an io.Reader (for use in ApiScenario.Body)
|
|
||||||
func jsonReader(v any) io.Reader {
|
|
||||||
data, err := json.Marshal(v)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return bytes.NewReader(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUserAlertsApi(t *testing.T) {
|
|
||||||
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
|
||||||
defer hub.Cleanup()
|
|
||||||
|
|
||||||
hub.StartHub()
|
|
||||||
|
|
||||||
user1, _ := beszelTests.CreateUser(hub, "alertstest@example.com", "password")
|
|
||||||
user1Token, _ := user1.NewAuthToken()
|
|
||||||
|
|
||||||
user2, _ := beszelTests.CreateUser(hub, "alertstest2@example.com", "password")
|
|
||||||
user2Token, _ := user2.NewAuthToken()
|
|
||||||
|
|
||||||
system1, _ := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
|
||||||
"name": "system1",
|
|
||||||
"users": []string{user1.Id},
|
|
||||||
"host": "127.0.0.1",
|
|
||||||
})
|
|
||||||
|
|
||||||
system2, _ := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
|
||||||
"name": "system2",
|
|
||||||
"users": []string{user1.Id, user2.Id},
|
|
||||||
"host": "127.0.0.2",
|
|
||||||
})
|
|
||||||
|
|
||||||
userRecords, _ := hub.CountRecords("users")
|
|
||||||
assert.EqualValues(t, 2, userRecords, "all users should be created")
|
|
||||||
|
|
||||||
systemRecords, _ := hub.CountRecords("systems")
|
|
||||||
assert.EqualValues(t, 2, systemRecords, "all systems should be created")
|
|
||||||
|
|
||||||
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
|
||||||
return hub.TestApp
|
|
||||||
}
|
|
||||||
|
|
||||||
scenarios := []beszelTests.ApiScenario{
|
|
||||||
// {
|
|
||||||
// Name: "GET not implemented - returns index",
|
|
||||||
// Method: http.MethodGet,
|
|
||||||
// URL: "/api/beszel/user-alerts",
|
|
||||||
// ExpectedStatus: 200,
|
|
||||||
// ExpectedContent: []string{"<html ", "globalThis.BESZEL"},
|
|
||||||
// TestAppFactory: testAppFactory,
|
|
||||||
// },
|
|
||||||
{
|
|
||||||
Name: "POST no auth",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/user-alerts",
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "POST no body",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/user-alerts",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": user1Token,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 400,
|
|
||||||
ExpectedContent: []string{"Bad data"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "POST bad data",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/user-alerts",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": user1Token,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 400,
|
|
||||||
ExpectedContent: []string{"Bad data"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"invalidField": "this should cause validation error",
|
|
||||||
"threshold": "not a number",
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "POST malformed JSON",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/user-alerts",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": user1Token,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 400,
|
|
||||||
ExpectedContent: []string{"Bad data"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Body: strings.NewReader(`{"alertType": "cpu", "threshold": 80, "enabled": true,}`),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "POST valid alert data multiple systems",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/user-alerts",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": user1Token,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"\"success\":true"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"value": 69,
|
|
||||||
"min": 9,
|
|
||||||
"systems": []string{system1.Id, system2.Id},
|
|
||||||
"overwrite": false,
|
|
||||||
}),
|
|
||||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
|
||||||
// check total alerts
|
|
||||||
alerts, _ := app.CountRecords("alerts")
|
|
||||||
assert.EqualValues(t, 2, alerts, "should have 2 alerts")
|
|
||||||
// check alert has correct values
|
|
||||||
matchingAlerts, _ := app.CountRecords("alerts", dbx.HashExp{"name": "CPU", "user": user1.Id, "system": system1.Id, "value": 69, "min": 9})
|
|
||||||
assert.EqualValues(t, 1, matchingAlerts, "should have 1 alert")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "POST valid alert data single system",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/user-alerts",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": user1Token,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"\"success\":true"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"name": "Memory",
|
|
||||||
"systems": []string{system1.Id},
|
|
||||||
"value": 90,
|
|
||||||
"min": 10,
|
|
||||||
}),
|
|
||||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
|
||||||
user1Alerts, _ := app.CountRecords("alerts", dbx.HashExp{"user": user1.Id})
|
|
||||||
assert.EqualValues(t, 3, user1Alerts, "should have 3 alerts")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "Overwrite: false, should not overwrite existing alert",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/user-alerts",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": user1Token,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"\"success\":true"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"value": 45,
|
|
||||||
"min": 5,
|
|
||||||
"systems": []string{system1.Id},
|
|
||||||
"overwrite": false,
|
|
||||||
}),
|
|
||||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
|
||||||
beszelTests.ClearCollection(t, app, "alerts")
|
|
||||||
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"system": system1.Id,
|
|
||||||
"user": user1.Id,
|
|
||||||
"value": 80,
|
|
||||||
"min": 10,
|
|
||||||
})
|
|
||||||
},
|
|
||||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
|
||||||
alerts, _ := app.CountRecords("alerts")
|
|
||||||
assert.EqualValues(t, 1, alerts, "should have 1 alert")
|
|
||||||
alert, _ := app.FindFirstRecordByFilter("alerts", "name = 'CPU' && user = {:user}", dbx.Params{"user": user1.Id})
|
|
||||||
assert.EqualValues(t, 80, alert.Get("value"), "should have 80 as value")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "Overwrite: true, should overwrite existing alert",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/user-alerts",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": user2Token,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"\"success\":true"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"value": 45,
|
|
||||||
"min": 5,
|
|
||||||
"systems": []string{system2.Id},
|
|
||||||
"overwrite": true,
|
|
||||||
}),
|
|
||||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
|
||||||
beszelTests.ClearCollection(t, app, "alerts")
|
|
||||||
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"system": system2.Id,
|
|
||||||
"user": user2.Id,
|
|
||||||
"value": 80,
|
|
||||||
"min": 10,
|
|
||||||
})
|
|
||||||
},
|
|
||||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
|
||||||
alerts, _ := app.CountRecords("alerts")
|
|
||||||
assert.EqualValues(t, 1, alerts, "should have 1 alert")
|
|
||||||
alert, _ := app.FindFirstRecordByFilter("alerts", "name = 'CPU' && user = {:user}", dbx.Params{"user": user2.Id})
|
|
||||||
assert.EqualValues(t, 45, alert.Get("value"), "should have 45 as value")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "DELETE no auth",
|
|
||||||
Method: http.MethodDelete,
|
|
||||||
URL: "/api/beszel/user-alerts",
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"systems": []string{system1.Id},
|
|
||||||
}),
|
|
||||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
|
||||||
beszelTests.ClearCollection(t, app, "alerts")
|
|
||||||
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"system": system1.Id,
|
|
||||||
"user": user1.Id,
|
|
||||||
"value": 80,
|
|
||||||
"min": 10,
|
|
||||||
})
|
|
||||||
},
|
|
||||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
|
||||||
alerts, _ := app.CountRecords("alerts")
|
|
||||||
assert.EqualValues(t, 1, alerts, "should have 1 alert")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "DELETE alert",
|
|
||||||
Method: http.MethodDelete,
|
|
||||||
URL: "/api/beszel/user-alerts",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": user1Token,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"\"count\":1", "\"success\":true"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"systems": []string{system1.Id},
|
|
||||||
}),
|
|
||||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
|
||||||
beszelTests.ClearCollection(t, app, "alerts")
|
|
||||||
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"system": system1.Id,
|
|
||||||
"user": user1.Id,
|
|
||||||
"value": 80,
|
|
||||||
"min": 10,
|
|
||||||
})
|
|
||||||
},
|
|
||||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
|
||||||
alerts, _ := app.CountRecords("alerts")
|
|
||||||
assert.Zero(t, alerts, "should have 0 alerts")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "DELETE alert multiple systems",
|
|
||||||
Method: http.MethodDelete,
|
|
||||||
URL: "/api/beszel/user-alerts",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": user1Token,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"\"count\":2", "\"success\":true"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"name": "Memory",
|
|
||||||
"systems": []string{system1.Id, system2.Id},
|
|
||||||
}),
|
|
||||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
|
||||||
beszelTests.ClearCollection(t, app, "alerts")
|
|
||||||
for _, systemId := range []string{system1.Id, system2.Id} {
|
|
||||||
_, err := beszelTests.CreateRecord(app, "alerts", map[string]any{
|
|
||||||
"name": "Memory",
|
|
||||||
"system": systemId,
|
|
||||||
"user": user1.Id,
|
|
||||||
"value": 90,
|
|
||||||
"min": 10,
|
|
||||||
})
|
|
||||||
assert.NoError(t, err, "should create alert")
|
|
||||||
}
|
|
||||||
alerts, _ := app.CountRecords("alerts")
|
|
||||||
assert.EqualValues(t, 2, alerts, "should have 2 alerts")
|
|
||||||
},
|
|
||||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
|
||||||
alerts, _ := app.CountRecords("alerts")
|
|
||||||
assert.Zero(t, alerts, "should have 0 alerts")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "User 2 should not be able to delete alert of user 1",
|
|
||||||
Method: http.MethodDelete,
|
|
||||||
URL: "/api/beszel/user-alerts",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": user2Token,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"\"count\":1", "\"success\":true"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"systems": []string{system2.Id},
|
|
||||||
}),
|
|
||||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
|
||||||
beszelTests.ClearCollection(t, app, "alerts")
|
|
||||||
for _, user := range []string{user1.Id, user2.Id} {
|
|
||||||
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"system": system2.Id,
|
|
||||||
"user": user,
|
|
||||||
"value": 80,
|
|
||||||
"min": 10,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
alerts, _ := app.CountRecords("alerts")
|
|
||||||
assert.EqualValues(t, 2, alerts, "should have 2 alerts")
|
|
||||||
user1AlertCount, _ := app.CountRecords("alerts", dbx.HashExp{"user": user1.Id})
|
|
||||||
assert.EqualValues(t, 1, user1AlertCount, "should have 1 alert")
|
|
||||||
user2AlertCount, _ := app.CountRecords("alerts", dbx.HashExp{"user": user2.Id})
|
|
||||||
assert.EqualValues(t, 1, user2AlertCount, "should have 1 alert")
|
|
||||||
},
|
|
||||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
|
||||||
user1AlertCount, _ := app.CountRecords("alerts", dbx.HashExp{"user": user1.Id})
|
|
||||||
assert.EqualValues(t, 1, user1AlertCount, "should have 1 alert")
|
|
||||||
user2AlertCount, _ := app.CountRecords("alerts", dbx.HashExp{"user": user2.Id})
|
|
||||||
assert.Zero(t, user2AlertCount, "should have 0 alerts")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, scenario := range scenarios {
|
|
||||||
scenario.Test(t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestStatusAlerts(t *testing.T) {
|
|
||||||
synctest.Test(t, func(t *testing.T) {
|
|
||||||
hub, user := beszelTests.GetHubWithUser(t)
|
|
||||||
defer hub.Cleanup()
|
|
||||||
|
|
||||||
systems, err := beszelTests.CreateSystems(hub, 4, user.Id, "paused")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
var alerts []*core.Record
|
|
||||||
for i, system := range systems {
|
|
||||||
alert, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
|
||||||
"name": "Status",
|
|
||||||
"system": system.Id,
|
|
||||||
"user": user.Id,
|
|
||||||
"min": i + 1,
|
|
||||||
})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
alerts = append(alerts, alert)
|
|
||||||
}
|
|
||||||
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
|
||||||
|
|
||||||
for _, alert := range alerts {
|
|
||||||
assert.False(t, alert.GetBool("triggered"), "Alert should not be triggered immediately")
|
|
||||||
}
|
|
||||||
if hub.TestMailer.TotalSend() != 0 {
|
|
||||||
assert.Zero(t, hub.TestMailer.TotalSend(), "Expected 0 messages, got %d", hub.TestMailer.TotalSend())
|
|
||||||
}
|
|
||||||
for _, system := range systems {
|
|
||||||
assert.EqualValues(t, "paused", system.GetString("status"), "System should be paused")
|
|
||||||
}
|
|
||||||
for _, system := range systems {
|
|
||||||
system.Set("status", "up")
|
|
||||||
err = hub.SaveNoValidate(system)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
assert.EqualValues(t, 0, hub.GetPendingAlertsCount(), "should have 0 alerts in the pendingAlerts map")
|
|
||||||
for _, system := range systems {
|
|
||||||
system.Set("status", "down")
|
|
||||||
err = hub.SaveNoValidate(system)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
// after 30 seconds, should have 4 alerts in the pendingAlerts map, no triggered alerts
|
|
||||||
time.Sleep(time.Second * 30)
|
|
||||||
assert.EqualValues(t, 4, hub.GetPendingAlertsCount(), "should have 4 alerts in the pendingAlerts map")
|
|
||||||
triggeredCount, err := hub.CountRecords("alerts", dbx.HashExp{"triggered": true})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.EqualValues(t, 0, triggeredCount, "should have 0 alert triggered")
|
|
||||||
assert.EqualValues(t, 0, hub.TestMailer.TotalSend(), "should have 0 messages sent")
|
|
||||||
// after 1:30 seconds, should have 1 triggered alert and 3 pending alerts
|
|
||||||
time.Sleep(time.Second * 60)
|
|
||||||
assert.EqualValues(t, 3, hub.GetPendingAlertsCount(), "should have 3 alerts in the pendingAlerts map")
|
|
||||||
triggeredCount, err = hub.CountRecords("alerts", dbx.HashExp{"triggered": true})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.EqualValues(t, 1, triggeredCount, "should have 1 alert triggered")
|
|
||||||
assert.EqualValues(t, 1, hub.TestMailer.TotalSend(), "should have 1 messages sent")
|
|
||||||
// after 2:30 seconds, should have 2 triggered alerts and 2 pending alerts
|
|
||||||
time.Sleep(time.Second * 60)
|
|
||||||
assert.EqualValues(t, 2, hub.GetPendingAlertsCount(), "should have 2 alerts in the pendingAlerts map")
|
|
||||||
triggeredCount, err = hub.CountRecords("alerts", dbx.HashExp{"triggered": true})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.EqualValues(t, 2, triggeredCount, "should have 2 alert triggered")
|
|
||||||
assert.EqualValues(t, 2, hub.TestMailer.TotalSend(), "should have 2 messages sent")
|
|
||||||
// now we will bring the remaning systems back up
|
|
||||||
for _, system := range systems {
|
|
||||||
system.Set("status", "up")
|
|
||||||
err = hub.SaveNoValidate(system)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
// should have 0 alerts in the pendingAlerts map and 0 alerts triggered
|
|
||||||
assert.EqualValues(t, 0, hub.GetPendingAlertsCount(), "should have 0 alerts in the pendingAlerts map")
|
|
||||||
triggeredCount, err = hub.CountRecords("alerts", dbx.HashExp{"triggered": true})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Zero(t, triggeredCount, "should have 0 alert triggered")
|
|
||||||
// 4 messages sent, 2 down alerts and 2 up alerts for first 2 systems
|
|
||||||
assert.EqualValues(t, 4, hub.TestMailer.TotalSend(), "should have 4 messages sent")
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAlertsHistory(t *testing.T) {
|
func TestAlertsHistory(t *testing.T) {
|
||||||
synctest.Test(t, func(t *testing.T) {
|
synctest.Test(t, func(t *testing.T) {
|
||||||
hub, user := beszelTests.GetHubWithUser(t)
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
@@ -579,102 +142,46 @@ func TestAlertsHistory(t *testing.T) {
|
|||||||
assert.EqualValues(t, 2, totalHistoryCount, "Should have 2 total alert history records")
|
assert.EqualValues(t, 2, totalHistoryCount, "Should have 2 total alert history records")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
func TestResolveStatusAlerts(t *testing.T) {
|
|
||||||
hub, user := beszelTests.GetHubWithUser(t)
|
func TestSetAlertTriggered(t *testing.T) {
|
||||||
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
defer hub.Cleanup()
|
defer hub.Cleanup()
|
||||||
|
|
||||||
// Create a systemUp
|
hub.StartHub()
|
||||||
systemUp, err := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
|
||||||
|
user, _ := beszelTests.CreateUser(hub, "test@example.com", "password")
|
||||||
|
system, _ := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||||
"name": "test-system",
|
"name": "test-system",
|
||||||
"users": []string{user.Id},
|
"users": []string{user.Id},
|
||||||
"host": "127.0.0.1",
|
"host": "127.0.0.1",
|
||||||
"status": "up",
|
|
||||||
})
|
})
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
systemDown, err := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
alertRecord, _ := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
"name": "test-system-2",
|
"name": "CPU",
|
||||||
"users": []string{user.Id},
|
"system": system.Id,
|
||||||
"host": "127.0.0.2",
|
|
||||||
"status": "up",
|
|
||||||
})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
// Create a status alertUp for the system
|
|
||||||
alertUp, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
|
||||||
"name": "Status",
|
|
||||||
"system": systemUp.Id,
|
|
||||||
"user": user.Id,
|
"user": user.Id,
|
||||||
"min": 1,
|
"value": 80,
|
||||||
|
"triggered": false,
|
||||||
})
|
})
|
||||||
|
|
||||||
|
am := alerts.NewAlertManager(hub)
|
||||||
|
|
||||||
|
var alert alerts.CachedAlertData
|
||||||
|
alert.PopulateFromRecord(alertRecord)
|
||||||
|
|
||||||
|
// Test triggering the alert
|
||||||
|
err := am.SetAlertTriggered(alert, true)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
alertDown, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
updatedRecord, err := hub.FindRecordById("alerts", alert.Id)
|
||||||
"name": "Status",
|
assert.NoError(t, err)
|
||||||
"system": systemDown.Id,
|
assert.True(t, updatedRecord.GetBool("triggered"))
|
||||||
"user": user.Id,
|
|
||||||
"min": 1,
|
// Test un-triggering the alert
|
||||||
})
|
err = am.SetAlertTriggered(alert, false)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// Verify alert is not triggered initially
|
updatedRecord, err = hub.FindRecordById("alerts", alert.Id)
|
||||||
assert.False(t, alertUp.GetBool("triggered"), "Alert should not be triggered initially")
|
|
||||||
|
|
||||||
// Set the system to 'up' (this should not trigger the alert)
|
|
||||||
systemUp.Set("status", "up")
|
|
||||||
err = hub.SaveNoValidate(systemUp)
|
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
assert.False(t, updatedRecord.GetBool("triggered"))
|
||||||
systemDown.Set("status", "down")
|
|
||||||
err = hub.SaveNoValidate(systemDown)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
// Wait a moment for any processing
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
|
||||||
|
|
||||||
// Verify alertUp is still not triggered after setting system to up
|
|
||||||
alertUp, err = hub.FindFirstRecordByFilter("alerts", "id={:id}", dbx.Params{"id": alertUp.Id})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.False(t, alertUp.GetBool("triggered"), "Alert should not be triggered when system is up")
|
|
||||||
|
|
||||||
// Manually set both alerts triggered to true
|
|
||||||
alertUp.Set("triggered", true)
|
|
||||||
err = hub.SaveNoValidate(alertUp)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
alertDown.Set("triggered", true)
|
|
||||||
err = hub.SaveNoValidate(alertDown)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
// Verify we have exactly one alert with triggered true
|
|
||||||
triggeredCount, err := hub.CountRecords("alerts", dbx.HashExp{"triggered": true})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.EqualValues(t, 2, triggeredCount, "Should have exactly two alerts with triggered true")
|
|
||||||
|
|
||||||
// Verify the specific alertUp is triggered
|
|
||||||
alertUp, err = hub.FindFirstRecordByFilter("alerts", "id={:id}", dbx.Params{"id": alertUp.Id})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.True(t, alertUp.GetBool("triggered"), "Alert should be triggered")
|
|
||||||
|
|
||||||
// Verify we have two unresolved alert history records
|
|
||||||
alertHistoryCount, err := hub.CountRecords("alerts_history", dbx.HashExp{"resolved": ""})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.EqualValues(t, 2, alertHistoryCount, "Should have exactly two unresolved alert history records")
|
|
||||||
|
|
||||||
err = alerts.ResolveStatusAlerts(hub)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
// Verify alertUp is not triggered after resolving
|
|
||||||
alertUp, err = hub.FindFirstRecordByFilter("alerts", "id={:id}", dbx.Params{"id": alertUp.Id})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.False(t, alertUp.GetBool("triggered"), "Alert should not be triggered after resolving")
|
|
||||||
// Verify alertDown is still triggered
|
|
||||||
alertDown, err = hub.FindFirstRecordByFilter("alerts", "id={:id}", dbx.Params{"id": alertDown.Id})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.True(t, alertDown.GetBool("triggered"), "Alert should still be triggered after resolving")
|
|
||||||
|
|
||||||
// Verify we have one unresolved alert history record
|
|
||||||
alertHistoryCount, err = hub.CountRecords("alerts_history", dbx.HashExp{"resolved": ""})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.EqualValues(t, 1, alertHistoryCount, "Should have exactly one unresolved alert history record")
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build testing
|
//go:build testing
|
||||||
// +build testing
|
|
||||||
|
|
||||||
package alerts
|
package alerts
|
||||||
|
|
||||||
@@ -10,6 +9,18 @@ import (
|
|||||||
"github.com/pocketbase/pocketbase/core"
|
"github.com/pocketbase/pocketbase/core"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func NewTestAlertManagerWithoutWorker(app hubLike) *AlertManager {
|
||||||
|
return &AlertManager{
|
||||||
|
hub: app,
|
||||||
|
alertsCache: NewAlertsCache(app),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSystemAlertsCache returns the internal system alerts cache.
|
||||||
|
func (am *AlertManager) GetSystemAlertsCache() *AlertsCache {
|
||||||
|
return am.alertsCache
|
||||||
|
}
|
||||||
|
|
||||||
func (am *AlertManager) GetAlertManager() *AlertManager {
|
func (am *AlertManager) GetAlertManager() *AlertManager {
|
||||||
return am
|
return am
|
||||||
}
|
}
|
||||||
@@ -28,19 +39,18 @@ func (am *AlertManager) GetPendingAlertsCount() int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ProcessPendingAlerts manually processes all expired alerts (for testing)
|
// ProcessPendingAlerts manually processes all expired alerts (for testing)
|
||||||
func (am *AlertManager) ProcessPendingAlerts() ([]*core.Record, error) {
|
func (am *AlertManager) ProcessPendingAlerts() ([]CachedAlertData, error) {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
var lastErr error
|
var lastErr error
|
||||||
var processedAlerts []*core.Record
|
var processedAlerts []CachedAlertData
|
||||||
am.pendingAlerts.Range(func(key, value any) bool {
|
am.pendingAlerts.Range(func(key, value any) bool {
|
||||||
info := value.(*alertInfo)
|
info := value.(*alertInfo)
|
||||||
if now.After(info.expireTime) {
|
if now.After(info.expireTime) {
|
||||||
// Downtime delay has passed, process alert
|
if info.timer != nil {
|
||||||
if err := am.sendStatusAlert("down", info.systemName, info.alertRecord); err != nil {
|
info.timer.Stop()
|
||||||
lastErr = err
|
|
||||||
}
|
}
|
||||||
processedAlerts = append(processedAlerts, info.alertRecord)
|
am.processPendingAlert(key.(string))
|
||||||
am.pendingAlerts.Delete(key)
|
processedAlerts = append(processedAlerts, info.alertData)
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
@@ -57,6 +67,35 @@ func (am *AlertManager) ForceExpirePendingAlerts() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (am *AlertManager) ResetPendingAlertTimer(alertID string, delay time.Duration) bool {
|
||||||
|
value, loaded := am.pendingAlerts.Load(alertID)
|
||||||
|
if !loaded {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
info := value.(*alertInfo)
|
||||||
|
if info.timer != nil {
|
||||||
|
info.timer.Stop()
|
||||||
|
}
|
||||||
|
info.expireTime = time.Now().Add(delay)
|
||||||
|
info.timer = time.AfterFunc(delay, func() {
|
||||||
|
am.processPendingAlert(alertID)
|
||||||
|
})
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
func ResolveStatusAlerts(app core.App) error {
|
func ResolveStatusAlerts(app core.App) error {
|
||||||
return resolveStatusAlerts(app)
|
return resolveStatusAlerts(app)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (am *AlertManager) RestorePendingStatusAlerts() error {
|
||||||
|
return am.restorePendingStatusAlerts()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (am *AlertManager) SetAlertTriggered(alert CachedAlertData, triggered bool) error {
|
||||||
|
return am.setAlertTriggered(alert, triggered)
|
||||||
|
}
|
||||||
|
|
||||||
|
func IsInternalURL(rawURL string) (bool, error) {
|
||||||
|
return isInternalURL(rawURL)
|
||||||
|
}
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ import (
|
|||||||
"github.com/henrygd/beszel"
|
"github.com/henrygd/beszel"
|
||||||
"github.com/henrygd/beszel/agent"
|
"github.com/henrygd/beszel/agent"
|
||||||
"github.com/henrygd/beszel/agent/health"
|
"github.com/henrygd/beszel/agent/health"
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
"golang.org/x/crypto/ssh"
|
"golang.org/x/crypto/ssh"
|
||||||
)
|
)
|
||||||
@@ -116,12 +117,12 @@ func (opts *cmdOptions) loadPublicKeys() ([]ssh.PublicKey, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Try environment variable
|
// Try environment variable
|
||||||
if key, ok := agent.GetEnv("KEY"); ok && key != "" {
|
if key, ok := utils.GetEnv("KEY"); ok && key != "" {
|
||||||
return agent.ParseKeys(key)
|
return agent.ParseKeys(key)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try key file
|
// Try key file
|
||||||
keyFile, ok := agent.GetEnv("KEY_FILE")
|
keyFile, ok := utils.GetEnv("KEY_FILE")
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("no key provided: must set -key flag, KEY env var, or KEY_FILE env var. Use 'beszel-agent help' for usage")
|
return nil, fmt.Errorf("no key provided: must set -key flag, KEY env var, or KEY_FILE env var. Use 'beszel-agent help' for usage")
|
||||||
}
|
}
|
||||||
@@ -194,6 +195,6 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := a.Start(serverConfig); err != nil {
|
if err := a.Start(serverConfig); err != nil {
|
||||||
log.Fatal("Failed to start server: ", err)
|
log.Fatal("Failed to start: ", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -28,8 +28,8 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
baseApp := getBaseApp()
|
baseApp := getBaseApp()
|
||||||
h := hub.NewHub(baseApp)
|
hub := hub.NewHub(baseApp)
|
||||||
if err := h.StartHub(); err != nil {
|
if err := hub.StartHub(); err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,10 +10,19 @@ type ApiInfo struct {
|
|||||||
Status string
|
Status string
|
||||||
State string
|
State string
|
||||||
Image string
|
Image string
|
||||||
|
Health struct {
|
||||||
|
Status string
|
||||||
|
// FailingStreak int
|
||||||
|
}
|
||||||
|
Ports []struct {
|
||||||
|
// PrivatePort uint16
|
||||||
|
PublicPort uint16
|
||||||
|
IP string
|
||||||
|
// Type string
|
||||||
|
}
|
||||||
// ImageID string
|
// ImageID string
|
||||||
// Command string
|
// Command string
|
||||||
// Created int64
|
// Created int64
|
||||||
// Ports []Port
|
|
||||||
// SizeRw int64 `json:",omitempty"`
|
// SizeRw int64 `json:",omitempty"`
|
||||||
// SizeRootFs int64 `json:",omitempty"`
|
// SizeRootFs int64 `json:",omitempty"`
|
||||||
// Labels map[string]string
|
// Labels map[string]string
|
||||||
@@ -140,6 +149,7 @@ type Stats struct {
|
|||||||
Status string `json:"-" cbor:"6,keyasint"`
|
Status string `json:"-" cbor:"6,keyasint"`
|
||||||
Id string `json:"-" cbor:"7,keyasint"`
|
Id string `json:"-" cbor:"7,keyasint"`
|
||||||
Image string `json:"-" cbor:"8,keyasint"`
|
Image string `json:"-" cbor:"8,keyasint"`
|
||||||
|
Ports string `json:"-" cbor:"10,keyasint"`
|
||||||
// PrevCpu [2]uint64 `json:"-"`
|
// PrevCpu [2]uint64 `json:"-"`
|
||||||
CpuSystem uint64 `json:"-"`
|
CpuSystem uint64 `json:"-"`
|
||||||
CpuContainer uint64 `json:"-"`
|
CpuContainer uint64 `json:"-"`
|
||||||
|
|||||||
@@ -144,7 +144,7 @@ type AtaDeviceStatisticsPage struct {
|
|||||||
|
|
||||||
type AtaDeviceStatisticsEntry struct {
|
type AtaDeviceStatisticsEntry struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Value *uint64 `json:"value,omitempty"`
|
Value *int64 `json:"value,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type AtaSmartAttribute struct {
|
type AtaSmartAttribute struct {
|
||||||
@@ -357,7 +357,7 @@ type SmartInfoForSata struct {
|
|||||||
// AtaSmartData AtaSmartData `json:"ata_smart_data"`
|
// AtaSmartData AtaSmartData `json:"ata_smart_data"`
|
||||||
// AtaSctCapabilities AtaSctCapabilities `json:"ata_sct_capabilities"`
|
// AtaSctCapabilities AtaSctCapabilities `json:"ata_sct_capabilities"`
|
||||||
AtaSmartAttributes AtaSmartAttributes `json:"ata_smart_attributes"`
|
AtaSmartAttributes AtaSmartAttributes `json:"ata_smart_attributes"`
|
||||||
AtaDeviceStatistics AtaDeviceStatistics `json:"ata_device_statistics"`
|
AtaDeviceStatistics json.RawMessage `json:"ata_device_statistics"`
|
||||||
// PowerOnTime PowerOnTimeInfo `json:"power_on_time"`
|
// PowerOnTime PowerOnTimeInfo `json:"power_on_time"`
|
||||||
// PowerCycleCount uint16 `json:"power_cycle_count"`
|
// PowerCycleCount uint16 `json:"power_cycle_count"`
|
||||||
Temperature TemperatureInfo `json:"temperature"`
|
Temperature TemperatureInfo `json:"temperature"`
|
||||||
@@ -494,7 +494,7 @@ type SmartInfoForNvme struct {
|
|||||||
FirmwareVersion string `json:"firmware_version"`
|
FirmwareVersion string `json:"firmware_version"`
|
||||||
// NVMePCIVendor NVMePCIVendor `json:"nvme_pci_vendor"`
|
// NVMePCIVendor NVMePCIVendor `json:"nvme_pci_vendor"`
|
||||||
// NVMeIEEEOUIIdentifier uint32 `json:"nvme_ieee_oui_identifier"`
|
// NVMeIEEEOUIIdentifier uint32 `json:"nvme_ieee_oui_identifier"`
|
||||||
// NVMeTotalCapacity uint64 `json:"nvme_total_capacity"`
|
NVMeTotalCapacity uint64 `json:"nvme_total_capacity"`
|
||||||
// NVMeUnallocatedCapacity uint64 `json:"nvme_unallocated_capacity"`
|
// NVMeUnallocatedCapacity uint64 `json:"nvme_unallocated_capacity"`
|
||||||
// NVMeControllerID uint16 `json:"nvme_controller_id"`
|
// NVMeControllerID uint16 `json:"nvme_controller_id"`
|
||||||
// NVMeVersion VersionStringInfo `json:"nvme_version"`
|
// NVMeVersion VersionStringInfo `json:"nvme_version"`
|
||||||
|
|||||||
@@ -12,8 +12,9 @@ import (
|
|||||||
|
|
||||||
type Stats struct {
|
type Stats struct {
|
||||||
Cpu float64 `json:"cpu" cbor:"0,keyasint"`
|
Cpu float64 `json:"cpu" cbor:"0,keyasint"`
|
||||||
MaxCpu float64 `json:"cpum,omitempty" cbor:"1,keyasint,omitempty"`
|
MaxCpu float64 `json:"cpum,omitempty" cbor:"-"`
|
||||||
Mem float64 `json:"m" cbor:"2,keyasint"`
|
Mem float64 `json:"m" cbor:"2,keyasint"`
|
||||||
|
MaxMem float64 `json:"mm,omitempty" cbor:"-"`
|
||||||
MemUsed float64 `json:"mu" cbor:"3,keyasint"`
|
MemUsed float64 `json:"mu" cbor:"3,keyasint"`
|
||||||
MemPct float64 `json:"mp" cbor:"4,keyasint"`
|
MemPct float64 `json:"mp" cbor:"4,keyasint"`
|
||||||
MemBuffCache float64 `json:"mb" cbor:"5,keyasint"`
|
MemBuffCache float64 `json:"mb" cbor:"5,keyasint"`
|
||||||
@@ -23,31 +24,32 @@ type Stats struct {
|
|||||||
DiskTotal float64 `json:"d" cbor:"9,keyasint"`
|
DiskTotal float64 `json:"d" cbor:"9,keyasint"`
|
||||||
DiskUsed float64 `json:"du" cbor:"10,keyasint"`
|
DiskUsed float64 `json:"du" cbor:"10,keyasint"`
|
||||||
DiskPct float64 `json:"dp" cbor:"11,keyasint"`
|
DiskPct float64 `json:"dp" cbor:"11,keyasint"`
|
||||||
DiskReadPs float64 `json:"dr" cbor:"12,keyasint"`
|
DiskReadPs float64 `json:"dr,omitzero" cbor:"12,keyasint,omitzero"`
|
||||||
DiskWritePs float64 `json:"dw" cbor:"13,keyasint"`
|
DiskWritePs float64 `json:"dw,omitzero" cbor:"13,keyasint,omitzero"`
|
||||||
MaxDiskReadPs float64 `json:"drm,omitempty" cbor:"14,keyasint,omitempty"`
|
MaxDiskReadPs float64 `json:"drm,omitempty" cbor:"-"`
|
||||||
MaxDiskWritePs float64 `json:"dwm,omitempty" cbor:"15,keyasint,omitempty"`
|
MaxDiskWritePs float64 `json:"dwm,omitempty" cbor:"-"`
|
||||||
NetworkSent float64 `json:"ns,omitzero" cbor:"16,keyasint,omitzero"`
|
NetworkSent float64 `json:"ns,omitzero" cbor:"16,keyasint,omitzero"`
|
||||||
NetworkRecv float64 `json:"nr,omitzero" cbor:"17,keyasint,omitzero"`
|
NetworkRecv float64 `json:"nr,omitzero" cbor:"17,keyasint,omitzero"`
|
||||||
MaxNetworkSent float64 `json:"nsm,omitempty" cbor:"18,keyasint,omitempty"`
|
MaxNetworkSent float64 `json:"nsm,omitempty" cbor:"-"`
|
||||||
MaxNetworkRecv float64 `json:"nrm,omitempty" cbor:"19,keyasint,omitempty"`
|
MaxNetworkRecv float64 `json:"nrm,omitempty" cbor:"-"`
|
||||||
Temperatures map[string]float64 `json:"t,omitempty" cbor:"20,keyasint,omitempty"`
|
Temperatures map[string]float64 `json:"t,omitempty" cbor:"20,keyasint,omitempty"`
|
||||||
ExtraFs map[string]*FsStats `json:"efs,omitempty" cbor:"21,keyasint,omitempty"`
|
ExtraFs map[string]*FsStats `json:"efs,omitempty" cbor:"21,keyasint,omitempty"`
|
||||||
GPUData map[string]GPUData `json:"g,omitempty" cbor:"22,keyasint,omitempty"`
|
GPUData map[string]GPUData `json:"g,omitempty" cbor:"22,keyasint,omitempty"`
|
||||||
LoadAvg1 float64 `json:"l1,omitempty" cbor:"23,keyasint,omitempty"`
|
// LoadAvg1 float64 `json:"l1,omitempty" cbor:"23,keyasint,omitempty"`
|
||||||
LoadAvg5 float64 `json:"l5,omitempty" cbor:"24,keyasint,omitempty"`
|
// LoadAvg5 float64 `json:"l5,omitempty" cbor:"24,keyasint,omitempty"`
|
||||||
LoadAvg15 float64 `json:"l15,omitempty" cbor:"25,keyasint,omitempty"`
|
// LoadAvg15 float64 `json:"l15,omitempty" cbor:"25,keyasint,omitempty"`
|
||||||
Bandwidth [2]uint64 `json:"b,omitzero" cbor:"26,keyasint,omitzero"` // [sent bytes, recv bytes]
|
Bandwidth [2]uint64 `json:"b,omitzero" cbor:"26,keyasint,omitzero"` // [sent bytes, recv bytes]
|
||||||
MaxBandwidth [2]uint64 `json:"bm,omitzero" cbor:"27,keyasint,omitzero"` // [sent bytes, recv bytes]
|
MaxBandwidth [2]uint64 `json:"bm,omitzero" cbor:"-"` // [sent bytes, recv bytes]
|
||||||
// TODO: remove other load fields in future release in favor of load avg array
|
// TODO: remove other load fields in future release in favor of load avg array
|
||||||
LoadAvg [3]float64 `json:"la,omitempty" cbor:"28,keyasint"`
|
LoadAvg [3]float64 `json:"la,omitempty" cbor:"28,keyasint"`
|
||||||
Battery [2]uint8 `json:"bat,omitzero" cbor:"29,keyasint,omitzero"` // [percent, charge state, current]
|
Battery [2]uint8 `json:"bat,omitzero" cbor:"29,keyasint,omitzero"` // [percent, charge state, current]
|
||||||
MaxMem float64 `json:"mm,omitempty" cbor:"30,keyasint,omitempty"`
|
|
||||||
NetworkInterfaces map[string][4]uint64 `json:"ni,omitempty" cbor:"31,keyasint,omitempty"` // [upload bytes, download bytes, total upload, total download]
|
NetworkInterfaces map[string][4]uint64 `json:"ni,omitempty" cbor:"31,keyasint,omitempty"` // [upload bytes, download bytes, total upload, total download]
|
||||||
DiskIO [2]uint64 `json:"dio,omitzero" cbor:"32,keyasint,omitzero"` // [read bytes, write bytes]
|
DiskIO [2]uint64 `json:"dio,omitzero" cbor:"32,keyasint,omitzero"` // [read bytes, write bytes]
|
||||||
MaxDiskIO [2]uint64 `json:"diom,omitzero" cbor:"-"` // [max read bytes, max write bytes]
|
MaxDiskIO [2]uint64 `json:"diom,omitzero" cbor:"-"` // [max read bytes, max write bytes]
|
||||||
CpuBreakdown []float64 `json:"cpub,omitempty" cbor:"33,keyasint,omitempty"` // [user, system, iowait, steal, idle]
|
CpuBreakdown []float64 `json:"cpub,omitempty" cbor:"33,keyasint,omitempty"` // [user, system, iowait, steal, idle]
|
||||||
CpuCoresUsage Uint8Slice `json:"cpus,omitempty" cbor:"34,keyasint,omitempty"` // per-core busy usage [CPU0..]
|
CpuCoresUsage Uint8Slice `json:"cpus,omitempty" cbor:"34,keyasint,omitempty"` // per-core busy usage [CPU0..]
|
||||||
|
DiskIoStats [6]float64 `json:"dios,omitzero" cbor:"35,keyasint,omitzero"` // [read time %, write time %, io utilization %, r_await ms, w_await ms, weighted io %]
|
||||||
|
MaxDiskIoStats [6]float64 `json:"diosm,omitzero" cbor:"-"` // max values for DiskIoStats
|
||||||
}
|
}
|
||||||
|
|
||||||
// Uint8Slice wraps []uint8 to customize JSON encoding while keeping CBOR efficient.
|
// Uint8Slice wraps []uint8 to customize JSON encoding while keeping CBOR efficient.
|
||||||
@@ -90,13 +92,15 @@ type FsStats struct {
|
|||||||
TotalWrite uint64 `json:"-"`
|
TotalWrite uint64 `json:"-"`
|
||||||
DiskReadPs float64 `json:"r" cbor:"2,keyasint"`
|
DiskReadPs float64 `json:"r" cbor:"2,keyasint"`
|
||||||
DiskWritePs float64 `json:"w" cbor:"3,keyasint"`
|
DiskWritePs float64 `json:"w" cbor:"3,keyasint"`
|
||||||
MaxDiskReadPS float64 `json:"rm,omitempty" cbor:"4,keyasint,omitempty"`
|
MaxDiskReadPS float64 `json:"rm,omitempty" cbor:"-"`
|
||||||
MaxDiskWritePS float64 `json:"wm,omitempty" cbor:"5,keyasint,omitempty"`
|
MaxDiskWritePS float64 `json:"wm,omitempty" cbor:"-"`
|
||||||
// TODO: remove DiskReadPs and DiskWritePs in future release in favor of DiskReadBytes and DiskWriteBytes
|
// TODO: remove DiskReadPs and DiskWritePs in future release in favor of DiskReadBytes and DiskWriteBytes
|
||||||
DiskReadBytes uint64 `json:"rb" cbor:"6,keyasint,omitempty"`
|
DiskReadBytes uint64 `json:"rb" cbor:"6,keyasint,omitempty"`
|
||||||
DiskWriteBytes uint64 `json:"wb" cbor:"7,keyasint,omitempty"`
|
DiskWriteBytes uint64 `json:"wb" cbor:"7,keyasint,omitempty"`
|
||||||
MaxDiskReadBytes uint64 `json:"rbm,omitempty" cbor:"-"`
|
MaxDiskReadBytes uint64 `json:"rbm,omitempty" cbor:"-"`
|
||||||
MaxDiskWriteBytes uint64 `json:"wbm,omitempty" cbor:"-"`
|
MaxDiskWriteBytes uint64 `json:"wbm,omitempty" cbor:"-"`
|
||||||
|
DiskIoStats [6]float64 `json:"dios,omitzero" cbor:"8,keyasint,omitzero"` // [read time %, write time %, io utilization %, r_await ms, w_await ms, weighted io %]
|
||||||
|
MaxDiskIoStats [6]float64 `json:"diosm,omitzero" cbor:"-"` // max values for DiskIoStats
|
||||||
}
|
}
|
||||||
|
|
||||||
type NetIoStats struct {
|
type NetIoStats struct {
|
||||||
@@ -135,17 +139,17 @@ type Info struct {
|
|||||||
Cpu float64 `json:"cpu" cbor:"6,keyasint"`
|
Cpu float64 `json:"cpu" cbor:"6,keyasint"`
|
||||||
MemPct float64 `json:"mp" cbor:"7,keyasint"`
|
MemPct float64 `json:"mp" cbor:"7,keyasint"`
|
||||||
DiskPct float64 `json:"dp" cbor:"8,keyasint"`
|
DiskPct float64 `json:"dp" cbor:"8,keyasint"`
|
||||||
Bandwidth float64 `json:"b" cbor:"9,keyasint"`
|
Bandwidth float64 `json:"b,omitzero" cbor:"9,keyasint"` // deprecated in favor of BandwidthBytes
|
||||||
AgentVersion string `json:"v" cbor:"10,keyasint"`
|
AgentVersion string `json:"v" cbor:"10,keyasint"`
|
||||||
Podman bool `json:"p,omitempty" cbor:"11,keyasint,omitempty"` // deprecated - moved to Details struct
|
Podman bool `json:"p,omitempty" cbor:"11,keyasint,omitempty"` // deprecated - moved to Details struct
|
||||||
GpuPct float64 `json:"g,omitempty" cbor:"12,keyasint,omitempty"`
|
GpuPct float64 `json:"g,omitempty" cbor:"12,keyasint,omitempty"`
|
||||||
DashboardTemp float64 `json:"dt,omitempty" cbor:"13,keyasint,omitempty"`
|
DashboardTemp float64 `json:"dt,omitempty" cbor:"13,keyasint,omitempty"`
|
||||||
Os Os `json:"os,omitempty" cbor:"14,keyasint,omitempty"` // deprecated - moved to Details struct
|
Os Os `json:"os,omitempty" cbor:"14,keyasint,omitempty"` // deprecated - moved to Details struct
|
||||||
LoadAvg1 float64 `json:"l1,omitempty" cbor:"15,keyasint,omitempty"` // deprecated - use `la` array instead
|
// LoadAvg1 float64 `json:"l1,omitempty" cbor:"15,keyasint,omitempty"` // deprecated - use `la` array instead
|
||||||
LoadAvg5 float64 `json:"l5,omitempty" cbor:"16,keyasint,omitempty"` // deprecated - use `la` array instead
|
// LoadAvg5 float64 `json:"l5,omitempty" cbor:"16,keyasint,omitempty"` // deprecated - use `la` array instead
|
||||||
LoadAvg15 float64 `json:"l15,omitempty" cbor:"17,keyasint,omitempty"` // deprecated - use `la` array instead
|
// LoadAvg15 float64 `json:"l15,omitempty" cbor:"17,keyasint,omitempty"` // deprecated - use `la` array instead
|
||||||
BandwidthBytes uint64 `json:"bb" cbor:"18,keyasint"`
|
|
||||||
|
|
||||||
|
BandwidthBytes uint64 `json:"bb" cbor:"18,keyasint"`
|
||||||
LoadAvg [3]float64 `json:"la,omitempty" cbor:"19,keyasint"`
|
LoadAvg [3]float64 `json:"la,omitempty" cbor:"19,keyasint"`
|
||||||
ConnectionType ConnectionType `json:"ct,omitempty" cbor:"20,keyasint,omitempty,omitzero"`
|
ConnectionType ConnectionType `json:"ct,omitempty" cbor:"20,keyasint,omitempty,omitzero"`
|
||||||
ExtraFsPct map[string]float64 `json:"efs,omitempty" cbor:"21,keyasint,omitempty"`
|
ExtraFsPct map[string]float64 `json:"efs,omitempty" cbor:"21,keyasint,omitempty"`
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ func ColorPrint(color, text string) {
|
|||||||
fmt.Println(color + text + colorReset)
|
fmt.Println(color + text + colorReset)
|
||||||
}
|
}
|
||||||
|
|
||||||
func ColorPrintf(color, format string, args ...interface{}) {
|
func ColorPrintf(color, format string, args ...any) {
|
||||||
fmt.Printf(color+format+colorReset+"\n", args...)
|
fmt.Printf(color+format+colorReset+"\n", args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -110,21 +110,13 @@ func (p *updater) update() (updated bool, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var latest *release
|
var latest *release
|
||||||
var useMirror bool
|
|
||||||
|
|
||||||
// Determine the API endpoint based on UseMirror flag
|
apiURL := getApiURL(p.config.UseMirror, p.config.Owner, p.config.Repo)
|
||||||
apiURL := fmt.Sprintf("https://api.github.com/repos/%s/%s/releases/latest", p.config.Owner, p.config.Repo)
|
|
||||||
if p.config.UseMirror {
|
if p.config.UseMirror {
|
||||||
useMirror = true
|
|
||||||
apiURL = fmt.Sprintf("https://gh.beszel.dev/repos/%s/%s/releases/latest?api=true", p.config.Owner, p.config.Repo)
|
|
||||||
ColorPrint(ColorYellow, "Using mirror for update.")
|
ColorPrint(ColorYellow, "Using mirror for update.")
|
||||||
}
|
}
|
||||||
|
|
||||||
latest, err = fetchLatestRelease(
|
latest, err = FetchLatestRelease(p.config.Context, p.config.HttpClient, apiURL)
|
||||||
p.config.Context,
|
|
||||||
p.config.HttpClient,
|
|
||||||
apiURL,
|
|
||||||
)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@@ -150,7 +142,7 @@ func (p *updater) update() (updated bool, err error) {
|
|||||||
|
|
||||||
// download the release asset
|
// download the release asset
|
||||||
assetPath := filepath.Join(releaseDir, asset.Name)
|
assetPath := filepath.Join(releaseDir, asset.Name)
|
||||||
if err := downloadFile(p.config.Context, p.config.HttpClient, asset.DownloadUrl, assetPath, useMirror); err != nil {
|
if err := downloadFile(p.config.Context, p.config.HttpClient, asset.DownloadUrl, assetPath, p.config.UseMirror); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -226,11 +218,11 @@ func (p *updater) update() (updated bool, err error) {
|
|||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func fetchLatestRelease(
|
func FetchLatestRelease(ctx context.Context, client HttpClient, url string) (*release, error) {
|
||||||
ctx context.Context,
|
if url == "" {
|
||||||
client HttpClient,
|
url = getApiURL(false, "henrygd", "beszel")
|
||||||
url string,
|
}
|
||||||
) (*release, error) {
|
|
||||||
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
|
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -375,3 +367,10 @@ func isGlibc() bool {
|
|||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getApiURL(useMirror bool, owner, repo string) string {
|
||||||
|
if useMirror {
|
||||||
|
return fmt.Sprintf("https://gh.beszel.dev/repos/%s/%s/releases/latest?api=true", owner, repo)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("https://api.github.com/repos/%s/%s/releases/latest", owner, repo)
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build testing
|
//go:build testing
|
||||||
// +build testing
|
|
||||||
|
|
||||||
package hub
|
package hub
|
||||||
|
|
||||||
@@ -10,6 +9,7 @@ import (
|
|||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@@ -32,7 +32,27 @@ func createTestHub(t testing.TB) (*Hub, *pbtests.TestApp, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
return NewHub(testApp), testApp, nil
|
return NewHub(testApp), testApp, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// cleanupTestHub stops background system goroutines before tearing down the app.
|
||||||
|
func cleanupTestHub(hub *Hub, testApp *pbtests.TestApp) {
|
||||||
|
if hub != nil {
|
||||||
|
sm := hub.GetSystemManager()
|
||||||
|
sm.RemoveAllSystems()
|
||||||
|
// Give updater goroutines a brief window to observe cancellation before DB teardown.
|
||||||
|
for range 20 {
|
||||||
|
if sm.GetSystemCount() == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
runtime.Gosched()
|
||||||
|
time.Sleep(5 * time.Millisecond)
|
||||||
|
}
|
||||||
|
time.Sleep(20 * time.Millisecond)
|
||||||
|
}
|
||||||
|
if testApp != nil {
|
||||||
|
testApp.Cleanup()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Helper function to create a test record
|
// Helper function to create a test record
|
||||||
@@ -64,7 +84,7 @@ func TestValidateAgentHeaders(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer testApp.Cleanup()
|
defer cleanupTestHub(hub, testApp)
|
||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
name string
|
name string
|
||||||
@@ -145,7 +165,7 @@ func TestGetAllFingerprintRecordsByToken(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer testApp.Cleanup()
|
defer cleanupTestHub(hub, testApp)
|
||||||
|
|
||||||
// create test user
|
// create test user
|
||||||
userRecord, err := createTestUser(testApp)
|
userRecord, err := createTestUser(testApp)
|
||||||
@@ -235,7 +255,7 @@ func TestSetFingerprint(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer testApp.Cleanup()
|
defer cleanupTestHub(hub, testApp)
|
||||||
|
|
||||||
// Create test user
|
// Create test user
|
||||||
userRecord, err := createTestUser(testApp)
|
userRecord, err := createTestUser(testApp)
|
||||||
@@ -315,7 +335,7 @@ func TestCreateSystemFromAgentData(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer testApp.Cleanup()
|
defer cleanupTestHub(hub, testApp)
|
||||||
|
|
||||||
// Create test user
|
// Create test user
|
||||||
userRecord, err := createTestUser(testApp)
|
userRecord, err := createTestUser(testApp)
|
||||||
@@ -425,7 +445,7 @@ func TestUniversalTokenFlow(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer testApp.Cleanup()
|
defer cleanupTestHub(nil, testApp)
|
||||||
|
|
||||||
// Create test user
|
// Create test user
|
||||||
userRecord, err := createTestUser(testApp)
|
userRecord, err := createTestUser(testApp)
|
||||||
@@ -493,7 +513,7 @@ func TestAgentConnect(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer testApp.Cleanup()
|
defer cleanupTestHub(hub, testApp)
|
||||||
|
|
||||||
// Create test user
|
// Create test user
|
||||||
userRecord, err := createTestUser(testApp)
|
userRecord, err := createTestUser(testApp)
|
||||||
@@ -652,7 +672,7 @@ func TestHandleAgentConnect(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer testApp.Cleanup()
|
defer cleanupTestHub(hub, testApp)
|
||||||
|
|
||||||
// Create test user
|
// Create test user
|
||||||
userRecord, err := createTestUser(testApp)
|
userRecord, err := createTestUser(testApp)
|
||||||
@@ -737,7 +757,7 @@ func TestAgentWebSocketIntegration(t *testing.T) {
|
|||||||
// Create hub and test app
|
// Create hub and test app
|
||||||
hub, testApp, err := createTestHub(t)
|
hub, testApp, err := createTestHub(t)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer testApp.Cleanup()
|
defer cleanupTestHub(hub, testApp)
|
||||||
|
|
||||||
// Get the hub's SSH key
|
// Get the hub's SSH key
|
||||||
hubSigner, err := hub.GetSSHKey("")
|
hubSigner, err := hub.GetSSHKey("")
|
||||||
@@ -877,12 +897,8 @@ func TestAgentWebSocketIntegration(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Set up environment variables for the agent
|
// Set up environment variables for the agent
|
||||||
os.Setenv("BESZEL_AGENT_HUB_URL", ts.URL)
|
t.Setenv("BESZEL_AGENT_HUB_URL", ts.URL)
|
||||||
os.Setenv("BESZEL_AGENT_TOKEN", tc.agentToken)
|
t.Setenv("BESZEL_AGENT_TOKEN", tc.agentToken)
|
||||||
defer func() {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Start agent in background
|
// Start agent in background
|
||||||
done := make(chan error, 1)
|
done := make(chan error, 1)
|
||||||
@@ -897,7 +913,7 @@ func TestAgentWebSocketIntegration(t *testing.T) {
|
|||||||
|
|
||||||
// Wait for connection result
|
// Wait for connection result
|
||||||
maxWait := 2 * time.Second
|
maxWait := 2 * time.Second
|
||||||
time.Sleep(20 * time.Millisecond)
|
time.Sleep(40 * time.Millisecond)
|
||||||
checkInterval := 20 * time.Millisecond
|
checkInterval := 20 * time.Millisecond
|
||||||
timeout := time.After(maxWait)
|
timeout := time.After(maxWait)
|
||||||
ticker := time.Tick(checkInterval)
|
ticker := time.Tick(checkInterval)
|
||||||
@@ -942,6 +958,8 @@ func TestAgentWebSocketIntegration(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
time.Sleep(20 * time.Millisecond)
|
||||||
|
|
||||||
// Verify fingerprint state by re-reading the specific record
|
// Verify fingerprint state by re-reading the specific record
|
||||||
updatedFingerprintRecord, err := testApp.FindRecordById("fingerprints", fingerprintRecord.Id)
|
updatedFingerprintRecord, err := testApp.FindRecordById("fingerprints", fingerprintRecord.Id)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -976,7 +994,7 @@ func TestMultipleSystemsWithSameUniversalToken(t *testing.T) {
|
|||||||
// Create hub and test app
|
// Create hub and test app
|
||||||
hub, testApp, err := createTestHub(t)
|
hub, testApp, err := createTestHub(t)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer testApp.Cleanup()
|
defer cleanupTestHub(hub, testApp)
|
||||||
|
|
||||||
// Get the hub's SSH key
|
// Get the hub's SSH key
|
||||||
hubSigner, err := hub.GetSSHKey("")
|
hubSigner, err := hub.GetSSHKey("")
|
||||||
@@ -1058,12 +1076,8 @@ func TestMultipleSystemsWithSameUniversalToken(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Set up environment variables for the agent
|
// Set up environment variables for the agent
|
||||||
os.Setenv("BESZEL_AGENT_HUB_URL", ts.URL)
|
t.Setenv("BESZEL_AGENT_HUB_URL", ts.URL)
|
||||||
os.Setenv("BESZEL_AGENT_TOKEN", universalToken)
|
t.Setenv("BESZEL_AGENT_TOKEN", universalToken)
|
||||||
defer func() {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Count systems before connection
|
// Count systems before connection
|
||||||
systemsBefore, err := testApp.FindRecordsByFilter("systems", "users ~ {:userId}", "", -1, 0, map[string]any{"userId": userRecord.Id})
|
systemsBefore, err := testApp.FindRecordsByFilter("systems", "users ~ {:userId}", "", -1, 0, map[string]any{"userId": userRecord.Id})
|
||||||
@@ -1144,6 +1158,8 @@ func TestMultipleSystemsWithSameUniversalToken(t *testing.T) {
|
|||||||
assert.Equal(t, systemCount, systemsAfterCount, "Total system count should remain the same")
|
assert.Equal(t, systemCount, systemsAfterCount, "Total system count should remain the same")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
time.Sleep(20 * time.Millisecond)
|
||||||
|
|
||||||
// Verify that a fingerprint record exists for this fingerprint
|
// Verify that a fingerprint record exists for this fingerprint
|
||||||
fingerprints, err := testApp.FindRecordsByFilter("fingerprints", "token = {:token} && fingerprint = {:fingerprint}", "", -1, 0, map[string]any{
|
fingerprints, err := testApp.FindRecordsByFilter("fingerprints", "token = {:token} && fingerprint = {:fingerprint}", "", -1, 0, map[string]any{
|
||||||
"token": universalToken,
|
"token": universalToken,
|
||||||
@@ -1176,7 +1192,7 @@ func TestPermanentUniversalTokenFromDB(t *testing.T) {
|
|||||||
// Create hub and test app
|
// Create hub and test app
|
||||||
hub, testApp, err := createTestHub(t)
|
hub, testApp, err := createTestHub(t)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer testApp.Cleanup()
|
defer cleanupTestHub(hub, testApp)
|
||||||
|
|
||||||
// Get the hub's SSH key
|
// Get the hub's SSH key
|
||||||
hubSigner, err := hub.GetSSHKey("")
|
hubSigner, err := hub.GetSSHKey("")
|
||||||
@@ -1219,12 +1235,8 @@ func TestPermanentUniversalTokenFromDB(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Set up environment variables for the agent
|
// Set up environment variables for the agent
|
||||||
os.Setenv("BESZEL_AGENT_HUB_URL", ts.URL)
|
t.Setenv("BESZEL_AGENT_HUB_URL", ts.URL)
|
||||||
os.Setenv("BESZEL_AGENT_TOKEN", universalToken)
|
t.Setenv("BESZEL_AGENT_TOKEN", universalToken)
|
||||||
defer func() {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Start agent in background
|
// Start agent in background
|
||||||
done := make(chan error, 1)
|
done := make(chan error, 1)
|
||||||
@@ -1273,7 +1285,7 @@ verify:
|
|||||||
func TestFindOrCreateSystemForToken(t *testing.T) {
|
func TestFindOrCreateSystemForToken(t *testing.T) {
|
||||||
hub, testApp, err := createTestHub(t)
|
hub, testApp, err := createTestHub(t)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
defer testApp.Cleanup()
|
defer cleanupTestHub(hub, testApp)
|
||||||
|
|
||||||
// Create test user
|
// Create test user
|
||||||
userRecord, err := createTestUser(testApp)
|
userRecord, err := createTestUser(testApp)
|
||||||
|
|||||||
391
internal/hub/api.go
Normal file
391
internal/hub/api.go
Normal file
@@ -0,0 +1,391 @@
|
|||||||
|
package hub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net/http"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/blang/semver"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/henrygd/beszel"
|
||||||
|
"github.com/henrygd/beszel/internal/alerts"
|
||||||
|
"github.com/henrygd/beszel/internal/ghupdate"
|
||||||
|
"github.com/henrygd/beszel/internal/hub/config"
|
||||||
|
"github.com/henrygd/beszel/internal/hub/systems"
|
||||||
|
"github.com/henrygd/beszel/internal/hub/utils"
|
||||||
|
"github.com/pocketbase/dbx"
|
||||||
|
"github.com/pocketbase/pocketbase/apis"
|
||||||
|
"github.com/pocketbase/pocketbase/core"
|
||||||
|
)
|
||||||
|
|
||||||
|
// UpdateInfo holds information about the latest update check
|
||||||
|
type UpdateInfo struct {
|
||||||
|
lastCheck time.Time
|
||||||
|
Version string `json:"v"`
|
||||||
|
Url string `json:"url"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var containerIDPattern = regexp.MustCompile(`^[a-fA-F0-9]{12,64}$`)
|
||||||
|
|
||||||
|
// Middleware to allow only admin role users
|
||||||
|
var requireAdminRole = customAuthMiddleware(func(e *core.RequestEvent) bool {
|
||||||
|
return e.Auth.GetString("role") == "admin"
|
||||||
|
})
|
||||||
|
|
||||||
|
// Middleware to exclude readonly users
|
||||||
|
var excludeReadOnlyRole = customAuthMiddleware(func(e *core.RequestEvent) bool {
|
||||||
|
return e.Auth.GetString("role") != "readonly"
|
||||||
|
})
|
||||||
|
|
||||||
|
// customAuthMiddleware handles boilerplate for custom authentication middlewares. fn should
|
||||||
|
// return true if the request is allowed, false otherwise. e.Auth is guaranteed to be non-nil.
|
||||||
|
func customAuthMiddleware(fn func(*core.RequestEvent) bool) func(*core.RequestEvent) error {
|
||||||
|
return func(e *core.RequestEvent) error {
|
||||||
|
if e.Auth == nil {
|
||||||
|
return e.UnauthorizedError("The request requires valid record authorization token.", nil)
|
||||||
|
}
|
||||||
|
if !fn(e) {
|
||||||
|
return e.ForbiddenError("The authorized record is not allowed to perform this action.", nil)
|
||||||
|
}
|
||||||
|
return e.Next()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// registerMiddlewares registers custom middlewares
|
||||||
|
func (h *Hub) registerMiddlewares(se *core.ServeEvent) {
|
||||||
|
// authorizes request with user matching the provided email
|
||||||
|
authorizeRequestWithEmail := func(e *core.RequestEvent, email string) (err error) {
|
||||||
|
if e.Auth != nil || email == "" {
|
||||||
|
return e.Next()
|
||||||
|
}
|
||||||
|
isAuthRefresh := e.Request.URL.Path == "/api/collections/users/auth-refresh" && e.Request.Method == http.MethodPost
|
||||||
|
e.Auth, err = e.App.FindAuthRecordByEmail("users", email)
|
||||||
|
if err != nil || !isAuthRefresh {
|
||||||
|
return e.Next()
|
||||||
|
}
|
||||||
|
// auth refresh endpoint, make sure token is set in header
|
||||||
|
token, _ := e.Auth.NewAuthToken()
|
||||||
|
e.Request.Header.Set("Authorization", token)
|
||||||
|
return e.Next()
|
||||||
|
}
|
||||||
|
// authenticate with trusted header
|
||||||
|
if autoLogin, _ := utils.GetEnv("AUTO_LOGIN"); autoLogin != "" {
|
||||||
|
se.Router.BindFunc(func(e *core.RequestEvent) error {
|
||||||
|
return authorizeRequestWithEmail(e, autoLogin)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
// authenticate with trusted header
|
||||||
|
if trustedHeader, _ := utils.GetEnv("TRUSTED_AUTH_HEADER"); trustedHeader != "" {
|
||||||
|
se.Router.BindFunc(func(e *core.RequestEvent) error {
|
||||||
|
return authorizeRequestWithEmail(e, e.Request.Header.Get(trustedHeader))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// registerApiRoutes registers custom API routes
|
||||||
|
func (h *Hub) registerApiRoutes(se *core.ServeEvent) error {
|
||||||
|
// auth protected routes
|
||||||
|
apiAuth := se.Router.Group("/api/beszel")
|
||||||
|
apiAuth.Bind(apis.RequireAuth())
|
||||||
|
// auth optional routes
|
||||||
|
apiNoAuth := se.Router.Group("/api/beszel")
|
||||||
|
|
||||||
|
// create first user endpoint only needed if no users exist
|
||||||
|
if totalUsers, _ := se.App.CountRecords("users"); totalUsers == 0 {
|
||||||
|
apiNoAuth.POST("/create-user", h.um.CreateFirstUser)
|
||||||
|
}
|
||||||
|
// check if first time setup on login page
|
||||||
|
apiNoAuth.GET("/first-run", func(e *core.RequestEvent) error {
|
||||||
|
total, err := e.App.CountRecords("users")
|
||||||
|
return e.JSON(http.StatusOK, map[string]bool{"firstRun": err == nil && total == 0})
|
||||||
|
})
|
||||||
|
// get public key and version
|
||||||
|
apiAuth.GET("/info", h.getInfo)
|
||||||
|
apiAuth.GET("/getkey", h.getInfo) // deprecated - keep for compatibility w/ integrations
|
||||||
|
// check for updates
|
||||||
|
if optIn, _ := utils.GetEnv("CHECK_UPDATES"); optIn == "true" {
|
||||||
|
var updateInfo UpdateInfo
|
||||||
|
apiAuth.GET("/update", updateInfo.getUpdate)
|
||||||
|
}
|
||||||
|
// send test notification
|
||||||
|
apiAuth.POST("/test-notification", h.SendTestNotification)
|
||||||
|
// heartbeat status and test
|
||||||
|
apiAuth.GET("/heartbeat-status", h.getHeartbeatStatus).BindFunc(requireAdminRole)
|
||||||
|
apiAuth.POST("/test-heartbeat", h.testHeartbeat).BindFunc(requireAdminRole)
|
||||||
|
// get config.yml content
|
||||||
|
apiAuth.GET("/config-yaml", config.GetYamlConfig).BindFunc(requireAdminRole)
|
||||||
|
// handle agent websocket connection
|
||||||
|
apiNoAuth.GET("/agent-connect", h.handleAgentConnect)
|
||||||
|
// get or create universal tokens
|
||||||
|
apiAuth.GET("/universal-token", h.getUniversalToken).BindFunc(excludeReadOnlyRole)
|
||||||
|
// update / delete user alerts
|
||||||
|
apiAuth.POST("/user-alerts", alerts.UpsertUserAlerts)
|
||||||
|
apiAuth.DELETE("/user-alerts", alerts.DeleteUserAlerts)
|
||||||
|
// refresh SMART devices for a system
|
||||||
|
apiAuth.POST("/smart/refresh", h.refreshSmartData).BindFunc(excludeReadOnlyRole)
|
||||||
|
// get systemd service details
|
||||||
|
apiAuth.GET("/systemd/info", h.getSystemdInfo)
|
||||||
|
// /containers routes
|
||||||
|
if enabled, _ := utils.GetEnv("CONTAINER_DETAILS"); enabled != "false" {
|
||||||
|
// get container logs
|
||||||
|
apiAuth.GET("/containers/logs", h.getContainerLogs)
|
||||||
|
// get container info
|
||||||
|
apiAuth.GET("/containers/info", h.getContainerInfo)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getInfo returns data needed by authenticated users, such as the public key and current version
|
||||||
|
func (h *Hub) getInfo(e *core.RequestEvent) error {
|
||||||
|
type infoResponse struct {
|
||||||
|
Key string `json:"key"`
|
||||||
|
Version string `json:"v"`
|
||||||
|
CheckUpdate bool `json:"cu"`
|
||||||
|
}
|
||||||
|
info := infoResponse{
|
||||||
|
Key: h.pubKey,
|
||||||
|
Version: beszel.Version,
|
||||||
|
}
|
||||||
|
if optIn, _ := utils.GetEnv("CHECK_UPDATES"); optIn == "true" {
|
||||||
|
info.CheckUpdate = true
|
||||||
|
}
|
||||||
|
return e.JSON(http.StatusOK, info)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getUpdate checks for the latest release on GitHub and returns update info if a newer version is available
|
||||||
|
func (info *UpdateInfo) getUpdate(e *core.RequestEvent) error {
|
||||||
|
if time.Since(info.lastCheck) < 6*time.Hour {
|
||||||
|
return e.JSON(http.StatusOK, info)
|
||||||
|
}
|
||||||
|
info.lastCheck = time.Now()
|
||||||
|
latestRelease, err := ghupdate.FetchLatestRelease(context.Background(), http.DefaultClient, "")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
currentVersion, err := semver.Parse(strings.TrimPrefix(beszel.Version, "v"))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
latestVersion, err := semver.Parse(strings.TrimPrefix(latestRelease.Tag, "v"))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if latestVersion.GT(currentVersion) {
|
||||||
|
info.Version = strings.TrimPrefix(latestRelease.Tag, "v")
|
||||||
|
info.Url = latestRelease.Url
|
||||||
|
}
|
||||||
|
return e.JSON(http.StatusOK, info)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetUniversalToken handles the universal token API endpoint (create, read, delete)
|
||||||
|
func (h *Hub) getUniversalToken(e *core.RequestEvent) error {
|
||||||
|
if e.Auth.IsSuperuser() {
|
||||||
|
return e.ForbiddenError("Superusers cannot use universal tokens", nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
tokenMap := universalTokenMap.GetMap()
|
||||||
|
userID := e.Auth.Id
|
||||||
|
query := e.Request.URL.Query()
|
||||||
|
token := query.Get("token")
|
||||||
|
enable := query.Get("enable")
|
||||||
|
permanent := query.Get("permanent")
|
||||||
|
|
||||||
|
// helper for deleting any existing permanent token record for this user
|
||||||
|
deletePermanent := func() error {
|
||||||
|
rec, err := h.FindFirstRecordByFilter("universal_tokens", "user = {:user}", dbx.Params{"user": userID})
|
||||||
|
if err != nil {
|
||||||
|
return nil // no record
|
||||||
|
}
|
||||||
|
return h.Delete(rec)
|
||||||
|
}
|
||||||
|
|
||||||
|
// helper for upserting a permanent token record for this user
|
||||||
|
upsertPermanent := func(token string) error {
|
||||||
|
rec, err := h.FindFirstRecordByFilter("universal_tokens", "user = {:user}", dbx.Params{"user": userID})
|
||||||
|
if err == nil {
|
||||||
|
rec.Set("token", token)
|
||||||
|
return h.Save(rec)
|
||||||
|
}
|
||||||
|
|
||||||
|
col, err := h.FindCachedCollectionByNameOrId("universal_tokens")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
newRec := core.NewRecord(col)
|
||||||
|
newRec.Set("user", userID)
|
||||||
|
newRec.Set("token", token)
|
||||||
|
return h.Save(newRec)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disable universal tokens (both ephemeral and permanent)
|
||||||
|
if enable == "0" {
|
||||||
|
tokenMap.RemovebyValue(userID)
|
||||||
|
_ = deletePermanent()
|
||||||
|
return e.JSON(http.StatusOK, map[string]any{"token": token, "active": false, "permanent": false})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enable universal token (ephemeral or permanent)
|
||||||
|
if enable == "1" {
|
||||||
|
if token == "" {
|
||||||
|
token = uuid.New().String()
|
||||||
|
}
|
||||||
|
|
||||||
|
if permanent == "1" {
|
||||||
|
// make token permanent (persist across restarts)
|
||||||
|
tokenMap.RemovebyValue(userID)
|
||||||
|
if err := upsertPermanent(token); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return e.JSON(http.StatusOK, map[string]any{"token": token, "active": true, "permanent": true})
|
||||||
|
}
|
||||||
|
|
||||||
|
// default: ephemeral mode (1 hour)
|
||||||
|
_ = deletePermanent()
|
||||||
|
tokenMap.Set(token, userID, time.Hour)
|
||||||
|
return e.JSON(http.StatusOK, map[string]any{"token": token, "active": true, "permanent": false})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read current state
|
||||||
|
// Prefer permanent token if it exists.
|
||||||
|
if rec, err := h.FindFirstRecordByFilter("universal_tokens", "user = {:user}", dbx.Params{"user": userID}); err == nil {
|
||||||
|
dbToken := rec.GetString("token")
|
||||||
|
// If no token was provided, or the caller is asking about their permanent token, return it.
|
||||||
|
if token == "" || token == dbToken {
|
||||||
|
return e.JSON(http.StatusOK, map[string]any{"token": dbToken, "active": true, "permanent": true})
|
||||||
|
}
|
||||||
|
// Token doesn't match their permanent token (avoid leaking other info)
|
||||||
|
return e.JSON(http.StatusOK, map[string]any{"token": token, "active": false, "permanent": false})
|
||||||
|
}
|
||||||
|
|
||||||
|
// No permanent token; fall back to ephemeral token map.
|
||||||
|
if token == "" {
|
||||||
|
// return existing token if it exists
|
||||||
|
if token, _, ok := tokenMap.GetByValue(userID); ok {
|
||||||
|
return e.JSON(http.StatusOK, map[string]any{"token": token, "active": true, "permanent": false})
|
||||||
|
}
|
||||||
|
// if no token is provided, generate a new one
|
||||||
|
token = uuid.New().String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Token is considered active only if it belongs to the current user.
|
||||||
|
activeUser, ok := tokenMap.GetOk(token)
|
||||||
|
active := ok && activeUser == userID
|
||||||
|
response := map[string]any{"token": token, "active": active, "permanent": false}
|
||||||
|
return e.JSON(http.StatusOK, response)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getHeartbeatStatus returns current heartbeat configuration and whether it's enabled
|
||||||
|
func (h *Hub) getHeartbeatStatus(e *core.RequestEvent) error {
|
||||||
|
if h.hb == nil {
|
||||||
|
return e.JSON(http.StatusOK, map[string]any{
|
||||||
|
"enabled": false,
|
||||||
|
"msg": "Set HEARTBEAT_URL to enable outbound heartbeat monitoring",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
cfg := h.hb.GetConfig()
|
||||||
|
return e.JSON(http.StatusOK, map[string]any{
|
||||||
|
"enabled": true,
|
||||||
|
"url": cfg.URL,
|
||||||
|
"interval": cfg.Interval,
|
||||||
|
"method": cfg.Method,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// testHeartbeat triggers a single heartbeat ping and returns the result
|
||||||
|
func (h *Hub) testHeartbeat(e *core.RequestEvent) error {
|
||||||
|
if h.hb == nil {
|
||||||
|
return e.JSON(http.StatusOK, map[string]any{
|
||||||
|
"err": "Heartbeat not configured. Set HEARTBEAT_URL environment variable.",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err := h.hb.Send(); err != nil {
|
||||||
|
return e.JSON(http.StatusOK, map[string]any{"err": err.Error()})
|
||||||
|
}
|
||||||
|
return e.JSON(http.StatusOK, map[string]any{"err": false})
|
||||||
|
}
|
||||||
|
|
||||||
|
// containerRequestHandler handles both container logs and info requests
|
||||||
|
func (h *Hub) containerRequestHandler(e *core.RequestEvent, fetchFunc func(*systems.System, string) (string, error), responseKey string) error {
|
||||||
|
systemID := e.Request.URL.Query().Get("system")
|
||||||
|
containerID := e.Request.URL.Query().Get("container")
|
||||||
|
|
||||||
|
if systemID == "" || containerID == "" || !containerIDPattern.MatchString(containerID) {
|
||||||
|
return e.BadRequestError("Invalid system or container parameter", nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
system, err := h.sm.GetSystem(systemID)
|
||||||
|
if err != nil || !system.HasUser(e.App, e.Auth) {
|
||||||
|
return e.NotFoundError("", nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := fetchFunc(system, containerID)
|
||||||
|
if err != nil {
|
||||||
|
return e.InternalServerError("", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return e.JSON(http.StatusOK, map[string]string{responseKey: data})
|
||||||
|
}
|
||||||
|
|
||||||
|
// getContainerLogs handles GET /api/beszel/containers/logs requests
|
||||||
|
func (h *Hub) getContainerLogs(e *core.RequestEvent) error {
|
||||||
|
return h.containerRequestHandler(e, func(system *systems.System, containerID string) (string, error) {
|
||||||
|
return system.FetchContainerLogsFromAgent(containerID)
|
||||||
|
}, "logs")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Hub) getContainerInfo(e *core.RequestEvent) error {
|
||||||
|
return h.containerRequestHandler(e, func(system *systems.System, containerID string) (string, error) {
|
||||||
|
return system.FetchContainerInfoFromAgent(containerID)
|
||||||
|
}, "info")
|
||||||
|
}
|
||||||
|
|
||||||
|
// getSystemdInfo handles GET /api/beszel/systemd/info requests
|
||||||
|
func (h *Hub) getSystemdInfo(e *core.RequestEvent) error {
|
||||||
|
query := e.Request.URL.Query()
|
||||||
|
systemID := query.Get("system")
|
||||||
|
serviceName := query.Get("service")
|
||||||
|
|
||||||
|
if systemID == "" || serviceName == "" {
|
||||||
|
return e.BadRequestError("Invalid system or service parameter", nil)
|
||||||
|
}
|
||||||
|
system, err := h.sm.GetSystem(systemID)
|
||||||
|
if err != nil || !system.HasUser(e.App, e.Auth) {
|
||||||
|
return e.NotFoundError("", nil)
|
||||||
|
}
|
||||||
|
// verify service exists before fetching details
|
||||||
|
_, err = e.App.FindFirstRecordByFilter("systemd_services", "system = {:system} && name = {:name}", dbx.Params{
|
||||||
|
"system": systemID,
|
||||||
|
"name": serviceName,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return e.NotFoundError("", err)
|
||||||
|
}
|
||||||
|
details, err := system.FetchSystemdInfoFromAgent(serviceName)
|
||||||
|
if err != nil {
|
||||||
|
return e.InternalServerError("", err)
|
||||||
|
}
|
||||||
|
e.Response.Header().Set("Cache-Control", "public, max-age=60")
|
||||||
|
return e.JSON(http.StatusOK, map[string]any{"details": details})
|
||||||
|
}
|
||||||
|
|
||||||
|
// refreshSmartData handles POST /api/beszel/smart/refresh requests
|
||||||
|
// Fetches fresh SMART data from the agent and updates the collection
|
||||||
|
func (h *Hub) refreshSmartData(e *core.RequestEvent) error {
|
||||||
|
systemID := e.Request.URL.Query().Get("system")
|
||||||
|
if systemID == "" {
|
||||||
|
return e.BadRequestError("Invalid system parameter", nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
system, err := h.sm.GetSystem(systemID)
|
||||||
|
if err != nil || !system.HasUser(e.App, e.Auth) {
|
||||||
|
return e.NotFoundError("", nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := system.FetchAndSaveSmartDevices(); err != nil {
|
||||||
|
return e.InternalServerError("", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return e.JSON(http.StatusOK, map[string]string{"status": "ok"})
|
||||||
|
}
|
||||||
972
internal/hub/api_test.go
Normal file
972
internal/hub/api_test.go
Normal file
@@ -0,0 +1,972 @@
|
|||||||
|
package hub_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
beszelTests "github.com/henrygd/beszel/internal/tests"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/migrations"
|
||||||
|
"github.com/pocketbase/pocketbase/core"
|
||||||
|
pbTests "github.com/pocketbase/pocketbase/tests"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// marshal to json and return an io.Reader (for use in ApiScenario.Body)
|
||||||
|
func jsonReader(v any) io.Reader {
|
||||||
|
data, err := json.Marshal(v)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return bytes.NewReader(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestApiRoutesAuthentication(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
userToken, err := user.NewAuthToken()
|
||||||
|
require.NoError(t, err, "Failed to create auth token")
|
||||||
|
|
||||||
|
// Create test user and get auth token
|
||||||
|
user2, err := beszelTests.CreateUser(hub, "testuser@example.com", "password123")
|
||||||
|
require.NoError(t, err, "Failed to create test user")
|
||||||
|
user2Token, err := user2.NewAuthToken()
|
||||||
|
require.NoError(t, err, "Failed to create user2 auth token")
|
||||||
|
|
||||||
|
adminUser, err := beszelTests.CreateUserWithRole(hub, "admin@example.com", "password123", "admin")
|
||||||
|
require.NoError(t, err, "Failed to create admin user")
|
||||||
|
adminUserToken, err := adminUser.NewAuthToken()
|
||||||
|
|
||||||
|
readOnlyUser, err := beszelTests.CreateUserWithRole(hub, "readonly@example.com", "password123", "readonly")
|
||||||
|
require.NoError(t, err, "Failed to create readonly user")
|
||||||
|
readOnlyUserToken, err := readOnlyUser.NewAuthToken()
|
||||||
|
require.NoError(t, err, "Failed to create readonly user auth token")
|
||||||
|
|
||||||
|
superuser, err := beszelTests.CreateSuperuser(hub, "superuser@example.com", "password123")
|
||||||
|
require.NoError(t, err, "Failed to create superuser")
|
||||||
|
superuserToken, err := superuser.NewAuthToken()
|
||||||
|
require.NoError(t, err, "Failed to create superuser auth token")
|
||||||
|
|
||||||
|
// Create test system
|
||||||
|
system, err := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||||
|
"name": "test-system",
|
||||||
|
"users": []string{user.Id},
|
||||||
|
"host": "127.0.0.1",
|
||||||
|
})
|
||||||
|
require.NoError(t, err, "Failed to create test system")
|
||||||
|
|
||||||
|
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
||||||
|
return hub.TestApp
|
||||||
|
}
|
||||||
|
|
||||||
|
scenarios := []beszelTests.ApiScenario{
|
||||||
|
// Auth Protected Routes - Should require authentication
|
||||||
|
{
|
||||||
|
Name: "GET /config-yaml - no auth should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/config-yaml",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /config-yaml - with user auth should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/config-yaml",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 403,
|
||||||
|
ExpectedContent: []string{"The authorized record is not allowed to perform this action."},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /config-yaml - with admin auth should succeed",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/config-yaml",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": adminUserToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"test-system"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /heartbeat-status - no auth should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/heartbeat-status",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /heartbeat-status - with user auth should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/heartbeat-status",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 403,
|
||||||
|
ExpectedContent: []string{"The authorized record is not allowed to perform this action."},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /heartbeat-status - with admin auth should succeed",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/heartbeat-status",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": adminUserToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{`"enabled":false`},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /test-heartbeat - with user auth should fail",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/test-heartbeat",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 403,
|
||||||
|
ExpectedContent: []string{"The authorized record is not allowed to perform this action."},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /test-heartbeat - with admin auth should report disabled state",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/test-heartbeat",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": adminUserToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"Heartbeat not configured"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /universal-token - no auth should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/universal-token",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /universal-token - with auth should succeed",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/universal-token",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"active", "token", "permanent"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /universal-token - enable permanent should succeed",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/universal-token?enable=1&permanent=1&token=permanent-token-123",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"permanent\":true", "permanent-token-123"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /universal-token - superuser should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/universal-token",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": superuserToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 403,
|
||||||
|
ExpectedContent: []string{"Superusers cannot use universal tokens"},
|
||||||
|
TestAppFactory: func(t testing.TB) *pbTests.TestApp {
|
||||||
|
return hub.TestApp
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /universal-token - with readonly auth should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/universal-token",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": readOnlyUserToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 403,
|
||||||
|
ExpectedContent: []string{"The authorized record is not allowed to perform this action."},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /smart/refresh - missing system should fail 400 with user auth",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/smart/refresh",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 400,
|
||||||
|
ExpectedContent: []string{"Invalid", "system", "parameter"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /smart/refresh - with readonly auth should fail",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: fmt.Sprintf("/api/beszel/smart/refresh?system=%s", system.Id),
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": readOnlyUserToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 403,
|
||||||
|
ExpectedContent: []string{"The authorized record is not allowed to perform this action."},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /smart/refresh - non-user system should fail",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: fmt.Sprintf("/api/beszel/smart/refresh?system=%s", system.Id),
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user2Token,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 404,
|
||||||
|
ExpectedContent: []string{"The requested resource wasn't found."},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /smart/refresh - good user should pass validation",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: fmt.Sprintf("/api/beszel/smart/refresh?system=%s", system.Id),
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 500,
|
||||||
|
ExpectedContent: []string{"Something went wrong while processing your request."},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /user-alerts - no auth should fail",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/user-alerts",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"value": 80,
|
||||||
|
"min": 10,
|
||||||
|
"systems": []string{system.Id},
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /user-alerts - with auth should succeed",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/user-alerts",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"success\":true"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"value": 80,
|
||||||
|
"min": 10,
|
||||||
|
"systems": []string{system.Id},
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "DELETE /user-alerts - no auth should fail",
|
||||||
|
Method: http.MethodDelete,
|
||||||
|
URL: "/api/beszel/user-alerts",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"systems": []string{system.Id},
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "DELETE /user-alerts - with auth should succeed",
|
||||||
|
Method: http.MethodDelete,
|
||||||
|
URL: "/api/beszel/user-alerts",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"success\":true"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"systems": []string{system.Id},
|
||||||
|
}),
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
// Create an alert to delete
|
||||||
|
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"system": system.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"value": 80,
|
||||||
|
"min": 10,
|
||||||
|
})
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /containers/logs - no auth should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/containers/logs?system=test-system&container=abababababab",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /containers/logs - request for valid non-user system should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: fmt.Sprintf("/api/beszel/containers/logs?system=%s&container=abababababab", system.Id),
|
||||||
|
ExpectedStatus: 404,
|
||||||
|
ExpectedContent: []string{"The requested resource wasn't found."},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user2Token,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /containers/info - request for valid non-user system should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: fmt.Sprintf("/api/beszel/containers/info?system=%s&container=abababababab", system.Id),
|
||||||
|
ExpectedStatus: 404,
|
||||||
|
ExpectedContent: []string{"The requested resource wasn't found."},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user2Token,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /containers/info - SHARE_ALL_SYSTEMS allows non-member user",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: fmt.Sprintf("/api/beszel/containers/info?system=%s&container=abababababab", system.Id),
|
||||||
|
ExpectedStatus: 500,
|
||||||
|
ExpectedContent: []string{"Something went wrong while processing your request."},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user2Token,
|
||||||
|
},
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
t.Setenv("SHARE_ALL_SYSTEMS", "true")
|
||||||
|
},
|
||||||
|
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||||
|
t.Setenv("SHARE_ALL_SYSTEMS", "")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /containers/logs - with auth but missing system param should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/containers/logs?container=abababababab",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 400,
|
||||||
|
ExpectedContent: []string{"Invalid", "parameter"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /containers/logs - with auth but missing container param should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/containers/logs?system=test-system",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 400,
|
||||||
|
ExpectedContent: []string{"Invalid", "parameter"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /containers/logs - with auth but invalid system should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/containers/logs?system=invalid-system&container=0123456789ab",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 404,
|
||||||
|
ExpectedContent: []string{"The requested resource wasn't found."},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /containers/logs - traversal container should fail validation",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/containers/logs?system=" + system.Id + "&container=..%2F..%2Fversion",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 400,
|
||||||
|
ExpectedContent: []string{"Invalid", "parameter"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /containers/info - traversal container should fail validation",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/containers/info?system=" + system.Id + "&container=../../version?x=",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 400,
|
||||||
|
ExpectedContent: []string{"Invalid", "parameter"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /containers/info - non-hex container should fail validation",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/containers/info?system=" + system.Id + "&container=container_name",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 400,
|
||||||
|
ExpectedContent: []string{"Invalid", "parameter"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /containers/logs - good user should pass validation",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/containers/logs?system=" + system.Id + "&container=0123456789ab",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 500,
|
||||||
|
ExpectedContent: []string{"Something went wrong while processing your request."},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /containers/info - good user should pass validation",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/containers/info?system=" + system.Id + "&container=0123456789ab",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 500,
|
||||||
|
ExpectedContent: []string{"Something went wrong while processing your request."},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
// /systemd routes
|
||||||
|
{
|
||||||
|
Name: "GET /systemd/info - no auth should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: fmt.Sprintf("/api/beszel/systemd/info?system=%s&service=nginx.service", system.Id),
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /systemd/info - request for valid non-user system should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: fmt.Sprintf("/api/beszel/systemd/info?system=%s&service=nginx.service", system.Id),
|
||||||
|
ExpectedStatus: 404,
|
||||||
|
ExpectedContent: []string{"The requested resource wasn't found."},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user2Token,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /systemd/info - with auth but missing system param should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/systemd/info?service=nginx.service",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 400,
|
||||||
|
ExpectedContent: []string{"Invalid", "parameter"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /systemd/info - with auth but missing service param should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: fmt.Sprintf("/api/beszel/systemd/info?system=%s", system.Id),
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 400,
|
||||||
|
ExpectedContent: []string{"Invalid", "parameter"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /systemd/info - with auth but invalid system should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/systemd/info?system=invalid-system&service=nginx.service",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 404,
|
||||||
|
ExpectedContent: []string{"The requested resource wasn't found."},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /systemd/info - service not in systemd_services collection should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: fmt.Sprintf("/api/beszel/systemd/info?system=%s&service=notregistered.service", system.Id),
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 404,
|
||||||
|
ExpectedContent: []string{"The requested resource wasn't found."},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /systemd/info - with auth and existing service record should pass validation",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: fmt.Sprintf("/api/beszel/systemd/info?system=%s&service=nginx.service", system.Id),
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 500,
|
||||||
|
ExpectedContent: []string{"Something went wrong while processing your request."},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
beszelTests.CreateRecord(app, "systemd_services", map[string]any{
|
||||||
|
"system": system.Id,
|
||||||
|
"name": "nginx.service",
|
||||||
|
"state": 0,
|
||||||
|
"sub": 1,
|
||||||
|
})
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Auth Optional Routes - Should work without authentication
|
||||||
|
{
|
||||||
|
Name: "GET /getkey - no auth should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/getkey",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /getkey - with auth should also succeed",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/getkey",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"key\":", "\"v\":"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /info - should return the same as /getkey",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/info",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"key\":", "\"v\":"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /first-run - no auth should succeed",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/first-run",
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"firstRun\":false"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /first-run - with auth should also succeed",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/first-run",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"firstRun\":false"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /agent-connect - no auth should succeed (websocket upgrade fails but route is accessible)",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/agent-connect",
|
||||||
|
ExpectedStatus: 400,
|
||||||
|
ExpectedContent: []string{},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /test-notification - invalid auth token should fail",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/test-notification",
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"url": "generic://127.0.0.1",
|
||||||
|
}),
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": "invalid-token",
|
||||||
|
},
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /user-alerts - invalid auth token should fail",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/user-alerts",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": "invalid-token",
|
||||||
|
},
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"value": 80,
|
||||||
|
"min": 10,
|
||||||
|
"systems": []string{system.Id},
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
// this works but diff behavior on prod vs dev.
|
||||||
|
// dev returns 502; prod returns 200 with static html page 404
|
||||||
|
// TODO: align dev and prod behavior and re-enable this test
|
||||||
|
// {
|
||||||
|
// Name: "GET /update - shouldn't exist without CHECK_UPDATES env var",
|
||||||
|
// Method: http.MethodGet,
|
||||||
|
// URL: "/api/beszel/update",
|
||||||
|
// NotExpectedContent: []string{"v:", "\"v\":"},
|
||||||
|
// ExpectedStatus: 502,
|
||||||
|
// TestAppFactory: testAppFactory,
|
||||||
|
// },
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, scenario := range scenarios {
|
||||||
|
scenario.Test(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFirstUserCreation(t *testing.T) {
|
||||||
|
t.Run("CreateUserEndpoint available when no users exist", func(t *testing.T) {
|
||||||
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
hub.StartHub()
|
||||||
|
|
||||||
|
testAppFactoryExisting := func(t testing.TB) *pbTests.TestApp {
|
||||||
|
return hub.TestApp
|
||||||
|
}
|
||||||
|
|
||||||
|
scenarios := []beszelTests.ApiScenario{
|
||||||
|
{
|
||||||
|
Name: "POST /create-user - should be available when no users exist",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/create-user",
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"email": "firstuser@example.com",
|
||||||
|
"password": "password123",
|
||||||
|
}),
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"User created"},
|
||||||
|
TestAppFactory: testAppFactoryExisting,
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
userCount, err := hub.CountRecords("users")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Zero(t, userCount, "Should start with no users")
|
||||||
|
superusers, err := hub.FindAllRecords(core.CollectionNameSuperusers)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.EqualValues(t, 1, len(superusers), "Should start with one temporary superuser")
|
||||||
|
require.EqualValues(t, migrations.TempAdminEmail, superusers[0].GetString("email"), "Should have created one temporary superuser")
|
||||||
|
},
|
||||||
|
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||||
|
userCount, err := hub.CountRecords("users")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.EqualValues(t, 1, userCount, "Should have created one user")
|
||||||
|
superusers, err := hub.FindAllRecords(core.CollectionNameSuperusers)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.EqualValues(t, 1, len(superusers), "Should have created one superuser")
|
||||||
|
require.EqualValues(t, "firstuser@example.com", superusers[0].GetString("email"), "Should have created one superuser")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /create-user - should not be available when users exist",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/create-user",
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"email": "firstuser@example.com",
|
||||||
|
"password": "password123",
|
||||||
|
}),
|
||||||
|
ExpectedStatus: 404,
|
||||||
|
ExpectedContent: []string{"wasn't found"},
|
||||||
|
TestAppFactory: testAppFactoryExisting,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, scenario := range scenarios {
|
||||||
|
scenario.Test(t)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("CreateUserEndpoint not available when USER_EMAIL, USER_PASSWORD are set", func(t *testing.T) {
|
||||||
|
t.Setenv("BESZEL_HUB_USER_EMAIL", "me@example.com")
|
||||||
|
t.Setenv("BESZEL_HUB_USER_PASSWORD", "password123")
|
||||||
|
|
||||||
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
hub.StartHub()
|
||||||
|
|
||||||
|
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
||||||
|
return hub.TestApp
|
||||||
|
}
|
||||||
|
|
||||||
|
scenario := beszelTests.ApiScenario{
|
||||||
|
Name: "POST /create-user - should not be available when USER_EMAIL, USER_PASSWORD are set",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/create-user",
|
||||||
|
ExpectedStatus: 404,
|
||||||
|
ExpectedContent: []string{"wasn't found"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
users, err := hub.FindAllRecords("users")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.EqualValues(t, 1, len(users), "Should start with one user")
|
||||||
|
require.EqualValues(t, "me@example.com", users[0].GetString("email"), "Should have created one user")
|
||||||
|
superusers, err := hub.FindAllRecords(core.CollectionNameSuperusers)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.EqualValues(t, 1, len(superusers), "Should start with one superuser")
|
||||||
|
require.EqualValues(t, "me@example.com", superusers[0].GetString("email"), "Should have created one superuser")
|
||||||
|
},
|
||||||
|
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||||
|
users, err := hub.FindAllRecords("users")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.EqualValues(t, 1, len(users), "Should still have one user")
|
||||||
|
require.EqualValues(t, "me@example.com", users[0].GetString("email"), "Should have created one user")
|
||||||
|
superusers, err := hub.FindAllRecords(core.CollectionNameSuperusers)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.EqualValues(t, 1, len(superusers), "Should still have one superuser")
|
||||||
|
require.EqualValues(t, "me@example.com", superusers[0].GetString("email"), "Should have created one superuser")
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
scenario.Test(t)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCreateUserEndpointAvailability(t *testing.T) {
|
||||||
|
t.Run("CreateUserEndpoint available when no users exist", func(t *testing.T) {
|
||||||
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
// Ensure no users exist
|
||||||
|
userCount, err := hub.CountRecords("users")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Zero(t, userCount, "Should start with no users")
|
||||||
|
|
||||||
|
hub.StartHub()
|
||||||
|
|
||||||
|
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
||||||
|
return hub.TestApp
|
||||||
|
}
|
||||||
|
|
||||||
|
scenario := beszelTests.ApiScenario{
|
||||||
|
Name: "POST /create-user - should be available when no users exist",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/create-user",
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"email": "firstuser@example.com",
|
||||||
|
"password": "password123",
|
||||||
|
}),
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"User created"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
}
|
||||||
|
|
||||||
|
scenario.Test(t)
|
||||||
|
|
||||||
|
// Verify user was created
|
||||||
|
userCount, err = hub.CountRecords("users")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.EqualValues(t, 1, userCount, "Should have created one user")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("CreateUserEndpoint not available when users exist", func(t *testing.T) {
|
||||||
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
// Create a user first
|
||||||
|
_, err := beszelTests.CreateUser(hub, "existing@example.com", "password")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
hub.StartHub()
|
||||||
|
|
||||||
|
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
||||||
|
return hub.TestApp
|
||||||
|
}
|
||||||
|
|
||||||
|
scenario := beszelTests.ApiScenario{
|
||||||
|
Name: "POST /create-user - should not be available when users exist",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/create-user",
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"email": "another@example.com",
|
||||||
|
"password": "password123",
|
||||||
|
}),
|
||||||
|
ExpectedStatus: 404,
|
||||||
|
ExpectedContent: []string{"wasn't found"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
}
|
||||||
|
|
||||||
|
scenario.Test(t)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAutoLoginMiddleware(t *testing.T) {
|
||||||
|
var hubs []*beszelTests.TestHub
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
for _, hub := range hubs {
|
||||||
|
hub.Cleanup()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
t.Setenv("AUTO_LOGIN", "user@test.com")
|
||||||
|
|
||||||
|
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
||||||
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
hubs = append(hubs, hub)
|
||||||
|
hub.StartHub()
|
||||||
|
return hub.TestApp
|
||||||
|
}
|
||||||
|
|
||||||
|
scenarios := []beszelTests.ApiScenario{
|
||||||
|
{
|
||||||
|
Name: "GET /getkey - without auto login should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/getkey",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /getkey - with auto login should fail if no matching user",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/getkey",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /getkey - with auto login should succeed",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/getkey",
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"key\":", "\"v\":"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
beszelTests.CreateUser(app, "user@test.com", "password123")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, scenario := range scenarios {
|
||||||
|
scenario.Test(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTrustedHeaderMiddleware(t *testing.T) {
|
||||||
|
var hubs []*beszelTests.TestHub
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
for _, hub := range hubs {
|
||||||
|
hub.Cleanup()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
t.Setenv("TRUSTED_AUTH_HEADER", "X-Beszel-Trusted")
|
||||||
|
|
||||||
|
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
||||||
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
hubs = append(hubs, hub)
|
||||||
|
hub.StartHub()
|
||||||
|
return hub.TestApp
|
||||||
|
}
|
||||||
|
|
||||||
|
scenarios := []beszelTests.ApiScenario{
|
||||||
|
{
|
||||||
|
Name: "GET /getkey - without trusted header should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/getkey",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /getkey - with trusted header should fail if no matching user",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/getkey",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"X-Beszel-Trusted": "user@test.com",
|
||||||
|
},
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /getkey - with trusted header should succeed",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/getkey",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"X-Beszel-Trusted": "user@test.com",
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"key\":", "\"v\":"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
beszelTests.CreateUser(app, "user@test.com", "password123")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, scenario := range scenarios {
|
||||||
|
scenario.Test(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUpdateEndpoint(t *testing.T) {
|
||||||
|
t.Setenv("CHECK_UPDATES", "true")
|
||||||
|
|
||||||
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
defer hub.Cleanup()
|
||||||
|
hub.StartHub()
|
||||||
|
|
||||||
|
// Create test user and get auth token
|
||||||
|
// user, err := beszelTests.CreateUser(hub, "testuser@example.com", "password123")
|
||||||
|
// require.NoError(t, err, "Failed to create test user")
|
||||||
|
// userToken, err := user.NewAuthToken()
|
||||||
|
|
||||||
|
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
||||||
|
return hub.TestApp
|
||||||
|
}
|
||||||
|
|
||||||
|
scenarios := []beszelTests.ApiScenario{
|
||||||
|
{
|
||||||
|
Name: "update endpoint shouldn't work without auth",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/update",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
// leave this out for now since it actually makes a request to github
|
||||||
|
// {
|
||||||
|
// Name: "GET /update - with valid auth should succeed",
|
||||||
|
// Method: http.MethodGet,
|
||||||
|
// URL: "/api/beszel/update",
|
||||||
|
// Headers: map[string]string{
|
||||||
|
// "Authorization": userToken,
|
||||||
|
// },
|
||||||
|
// ExpectedStatus: 200,
|
||||||
|
// ExpectedContent: []string{`"v":`},
|
||||||
|
// TestAppFactory: testAppFactory,
|
||||||
|
// },
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, scenario := range scenarios {
|
||||||
|
scenario.Test(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
131
internal/hub/collections.go
Normal file
131
internal/hub/collections.go
Normal file
@@ -0,0 +1,131 @@
|
|||||||
|
package hub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/henrygd/beszel/internal/hub/utils"
|
||||||
|
"github.com/pocketbase/pocketbase/core"
|
||||||
|
)
|
||||||
|
|
||||||
|
type collectionRules struct {
|
||||||
|
list *string
|
||||||
|
view *string
|
||||||
|
create *string
|
||||||
|
update *string
|
||||||
|
delete *string
|
||||||
|
}
|
||||||
|
|
||||||
|
// setCollectionAuthSettings applies Beszel's collection auth settings.
|
||||||
|
func setCollectionAuthSettings(app core.App) error {
|
||||||
|
usersCollection, err := app.FindCollectionByNameOrId("users")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
superusersCollection, err := app.FindCollectionByNameOrId(core.CollectionNameSuperusers)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// disable email auth if DISABLE_PASSWORD_AUTH env var is set
|
||||||
|
disablePasswordAuth, _ := utils.GetEnv("DISABLE_PASSWORD_AUTH")
|
||||||
|
usersCollection.PasswordAuth.Enabled = disablePasswordAuth != "true"
|
||||||
|
usersCollection.PasswordAuth.IdentityFields = []string{"email"}
|
||||||
|
// allow oauth user creation if USER_CREATION is set
|
||||||
|
if userCreation, _ := utils.GetEnv("USER_CREATION"); userCreation == "true" {
|
||||||
|
cr := "@request.context = 'oauth2'"
|
||||||
|
usersCollection.CreateRule = &cr
|
||||||
|
} else {
|
||||||
|
usersCollection.CreateRule = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// enable mfaOtp mfa if MFA_OTP env var is set
|
||||||
|
mfaOtp, _ := utils.GetEnv("MFA_OTP")
|
||||||
|
usersCollection.OTP.Length = 6
|
||||||
|
superusersCollection.OTP.Length = 6
|
||||||
|
usersCollection.OTP.Enabled = mfaOtp == "true"
|
||||||
|
usersCollection.MFA.Enabled = mfaOtp == "true"
|
||||||
|
superusersCollection.OTP.Enabled = mfaOtp == "true" || mfaOtp == "superusers"
|
||||||
|
superusersCollection.MFA.Enabled = mfaOtp == "true" || mfaOtp == "superusers"
|
||||||
|
if err := app.Save(superusersCollection); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := app.Save(usersCollection); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// When SHARE_ALL_SYSTEMS is enabled, any authenticated user can read
|
||||||
|
// system-scoped data. Write rules continue to block readonly users.
|
||||||
|
shareAllSystems, _ := utils.GetEnv("SHARE_ALL_SYSTEMS")
|
||||||
|
|
||||||
|
authenticatedRule := "@request.auth.id != \"\""
|
||||||
|
systemsMemberRule := authenticatedRule + " && users.id ?= @request.auth.id"
|
||||||
|
systemMemberRule := authenticatedRule + " && system.users.id ?= @request.auth.id"
|
||||||
|
|
||||||
|
systemsReadRule := systemsMemberRule
|
||||||
|
systemScopedReadRule := systemMemberRule
|
||||||
|
if shareAllSystems == "true" {
|
||||||
|
systemsReadRule = authenticatedRule
|
||||||
|
systemScopedReadRule = authenticatedRule
|
||||||
|
}
|
||||||
|
systemsWriteRule := systemsReadRule + " && @request.auth.role != \"readonly\""
|
||||||
|
systemScopedWriteRule := systemScopedReadRule + " && @request.auth.role != \"readonly\""
|
||||||
|
|
||||||
|
if err := applyCollectionRules(app, []string{"systems"}, collectionRules{
|
||||||
|
list: &systemsReadRule,
|
||||||
|
view: &systemsReadRule,
|
||||||
|
create: &systemsWriteRule,
|
||||||
|
update: &systemsWriteRule,
|
||||||
|
delete: &systemsWriteRule,
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := applyCollectionRules(app, []string{"containers", "container_stats", "system_stats", "systemd_services"}, collectionRules{
|
||||||
|
list: &systemScopedReadRule,
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := applyCollectionRules(app, []string{"smart_devices"}, collectionRules{
|
||||||
|
list: &systemScopedReadRule,
|
||||||
|
view: &systemScopedReadRule,
|
||||||
|
delete: &systemScopedWriteRule,
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := applyCollectionRules(app, []string{"fingerprints"}, collectionRules{
|
||||||
|
list: &systemScopedReadRule,
|
||||||
|
view: &systemScopedReadRule,
|
||||||
|
create: &systemScopedWriteRule,
|
||||||
|
update: &systemScopedWriteRule,
|
||||||
|
delete: &systemScopedWriteRule,
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := applyCollectionRules(app, []string{"system_details"}, collectionRules{
|
||||||
|
list: &systemScopedReadRule,
|
||||||
|
view: &systemScopedReadRule,
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func applyCollectionRules(app core.App, collectionNames []string, rules collectionRules) error {
|
||||||
|
for _, collectionName := range collectionNames {
|
||||||
|
collection, err := app.FindCollectionByNameOrId(collectionName)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
collection.ListRule = rules.list
|
||||||
|
collection.ViewRule = rules.view
|
||||||
|
collection.CreateRule = rules.create
|
||||||
|
collection.UpdateRule = rules.update
|
||||||
|
collection.DeleteRule = rules.delete
|
||||||
|
if err := app.Save(collection); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
527
internal/hub/collections_test.go
Normal file
527
internal/hub/collections_test.go
Normal file
@@ -0,0 +1,527 @@
|
|||||||
|
//go:build testing
|
||||||
|
|
||||||
|
package hub_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
beszelTests "github.com/henrygd/beszel/internal/tests"
|
||||||
|
"github.com/pocketbase/pocketbase/core"
|
||||||
|
pbTests "github.com/pocketbase/pocketbase/tests"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCollectionRulesDefault(t *testing.T) {
|
||||||
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
const isUserMatchesUser = `@request.auth.id != "" && user = @request.auth.id`
|
||||||
|
|
||||||
|
const isUserInUsers = `@request.auth.id != "" && users.id ?= @request.auth.id`
|
||||||
|
const isUserInUsersNotReadonly = `@request.auth.id != "" && users.id ?= @request.auth.id && @request.auth.role != "readonly"`
|
||||||
|
|
||||||
|
const isUserInSystemUsers = `@request.auth.id != "" && system.users.id ?= @request.auth.id`
|
||||||
|
const isUserInSystemUsersNotReadonly = `@request.auth.id != "" && system.users.id ?= @request.auth.id && @request.auth.role != "readonly"`
|
||||||
|
|
||||||
|
// users collection
|
||||||
|
usersCollection, err := hub.FindCollectionByNameOrId("users")
|
||||||
|
assert.NoError(t, err, "Failed to find users collection")
|
||||||
|
assert.True(t, usersCollection.PasswordAuth.Enabled)
|
||||||
|
assert.Equal(t, usersCollection.PasswordAuth.IdentityFields, []string{"email"})
|
||||||
|
assert.Nil(t, usersCollection.CreateRule)
|
||||||
|
assert.False(t, usersCollection.MFA.Enabled)
|
||||||
|
|
||||||
|
// superusers collection
|
||||||
|
superusersCollection, err := hub.FindCollectionByNameOrId(core.CollectionNameSuperusers)
|
||||||
|
assert.NoError(t, err, "Failed to find superusers collection")
|
||||||
|
assert.True(t, superusersCollection.PasswordAuth.Enabled)
|
||||||
|
assert.Equal(t, superusersCollection.PasswordAuth.IdentityFields, []string{"email"})
|
||||||
|
assert.Nil(t, superusersCollection.CreateRule)
|
||||||
|
assert.False(t, superusersCollection.MFA.Enabled)
|
||||||
|
|
||||||
|
// alerts collection
|
||||||
|
alertsCollection, err := hub.FindCollectionByNameOrId("alerts")
|
||||||
|
require.NoError(t, err, "Failed to find alerts collection")
|
||||||
|
assert.Equal(t, isUserMatchesUser, *alertsCollection.ListRule)
|
||||||
|
assert.Nil(t, alertsCollection.ViewRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *alertsCollection.CreateRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *alertsCollection.UpdateRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *alertsCollection.DeleteRule)
|
||||||
|
|
||||||
|
// alerts_history collection
|
||||||
|
alertsHistoryCollection, err := hub.FindCollectionByNameOrId("alerts_history")
|
||||||
|
require.NoError(t, err, "Failed to find alerts_history collection")
|
||||||
|
assert.Equal(t, isUserMatchesUser, *alertsHistoryCollection.ListRule)
|
||||||
|
assert.Nil(t, alertsHistoryCollection.ViewRule)
|
||||||
|
assert.Nil(t, alertsHistoryCollection.CreateRule)
|
||||||
|
assert.Nil(t, alertsHistoryCollection.UpdateRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *alertsHistoryCollection.DeleteRule)
|
||||||
|
|
||||||
|
// containers collection
|
||||||
|
containersCollection, err := hub.FindCollectionByNameOrId("containers")
|
||||||
|
require.NoError(t, err, "Failed to find containers collection")
|
||||||
|
assert.Equal(t, isUserInSystemUsers, *containersCollection.ListRule)
|
||||||
|
assert.Nil(t, containersCollection.ViewRule)
|
||||||
|
assert.Nil(t, containersCollection.CreateRule)
|
||||||
|
assert.Nil(t, containersCollection.UpdateRule)
|
||||||
|
assert.Nil(t, containersCollection.DeleteRule)
|
||||||
|
|
||||||
|
// container_stats collection
|
||||||
|
containerStatsCollection, err := hub.FindCollectionByNameOrId("container_stats")
|
||||||
|
require.NoError(t, err, "Failed to find container_stats collection")
|
||||||
|
assert.Equal(t, isUserInSystemUsers, *containerStatsCollection.ListRule)
|
||||||
|
assert.Nil(t, containerStatsCollection.ViewRule)
|
||||||
|
assert.Nil(t, containerStatsCollection.CreateRule)
|
||||||
|
assert.Nil(t, containerStatsCollection.UpdateRule)
|
||||||
|
assert.Nil(t, containerStatsCollection.DeleteRule)
|
||||||
|
|
||||||
|
// fingerprints collection
|
||||||
|
fingerprintsCollection, err := hub.FindCollectionByNameOrId("fingerprints")
|
||||||
|
require.NoError(t, err, "Failed to find fingerprints collection")
|
||||||
|
assert.Equal(t, isUserInSystemUsers, *fingerprintsCollection.ListRule)
|
||||||
|
assert.Equal(t, isUserInSystemUsers, *fingerprintsCollection.ViewRule)
|
||||||
|
assert.Equal(t, isUserInSystemUsersNotReadonly, *fingerprintsCollection.CreateRule)
|
||||||
|
assert.Equal(t, isUserInSystemUsersNotReadonly, *fingerprintsCollection.UpdateRule)
|
||||||
|
assert.Equal(t, isUserInSystemUsersNotReadonly, *fingerprintsCollection.DeleteRule)
|
||||||
|
|
||||||
|
// quiet_hours collection
|
||||||
|
quietHoursCollection, err := hub.FindCollectionByNameOrId("quiet_hours")
|
||||||
|
require.NoError(t, err, "Failed to find quiet_hours collection")
|
||||||
|
assert.Equal(t, isUserMatchesUser, *quietHoursCollection.ListRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *quietHoursCollection.ViewRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *quietHoursCollection.CreateRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *quietHoursCollection.UpdateRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *quietHoursCollection.DeleteRule)
|
||||||
|
|
||||||
|
// smart_devices collection
|
||||||
|
smartDevicesCollection, err := hub.FindCollectionByNameOrId("smart_devices")
|
||||||
|
require.NoError(t, err, "Failed to find smart_devices collection")
|
||||||
|
assert.Equal(t, isUserInSystemUsers, *smartDevicesCollection.ListRule)
|
||||||
|
assert.Equal(t, isUserInSystemUsers, *smartDevicesCollection.ViewRule)
|
||||||
|
assert.Nil(t, smartDevicesCollection.CreateRule)
|
||||||
|
assert.Nil(t, smartDevicesCollection.UpdateRule)
|
||||||
|
assert.Equal(t, isUserInSystemUsersNotReadonly, *smartDevicesCollection.DeleteRule)
|
||||||
|
|
||||||
|
// system_details collection
|
||||||
|
systemDetailsCollection, err := hub.FindCollectionByNameOrId("system_details")
|
||||||
|
require.NoError(t, err, "Failed to find system_details collection")
|
||||||
|
assert.Equal(t, isUserInSystemUsers, *systemDetailsCollection.ListRule)
|
||||||
|
assert.Equal(t, isUserInSystemUsers, *systemDetailsCollection.ViewRule)
|
||||||
|
assert.Nil(t, systemDetailsCollection.CreateRule)
|
||||||
|
assert.Nil(t, systemDetailsCollection.UpdateRule)
|
||||||
|
assert.Nil(t, systemDetailsCollection.DeleteRule)
|
||||||
|
|
||||||
|
// system_stats collection
|
||||||
|
systemStatsCollection, err := hub.FindCollectionByNameOrId("system_stats")
|
||||||
|
require.NoError(t, err, "Failed to find system_stats collection")
|
||||||
|
assert.Equal(t, isUserInSystemUsers, *systemStatsCollection.ListRule)
|
||||||
|
assert.Nil(t, systemStatsCollection.ViewRule)
|
||||||
|
assert.Nil(t, systemStatsCollection.CreateRule)
|
||||||
|
assert.Nil(t, systemStatsCollection.UpdateRule)
|
||||||
|
assert.Nil(t, systemStatsCollection.DeleteRule)
|
||||||
|
|
||||||
|
// systemd_services collection
|
||||||
|
systemdServicesCollection, err := hub.FindCollectionByNameOrId("systemd_services")
|
||||||
|
require.NoError(t, err, "Failed to find systemd_services collection")
|
||||||
|
assert.Equal(t, isUserInSystemUsers, *systemdServicesCollection.ListRule)
|
||||||
|
assert.Nil(t, systemdServicesCollection.ViewRule)
|
||||||
|
assert.Nil(t, systemdServicesCollection.CreateRule)
|
||||||
|
assert.Nil(t, systemdServicesCollection.UpdateRule)
|
||||||
|
assert.Nil(t, systemdServicesCollection.DeleteRule)
|
||||||
|
|
||||||
|
// systems collection
|
||||||
|
systemsCollection, err := hub.FindCollectionByNameOrId("systems")
|
||||||
|
require.NoError(t, err, "Failed to find systems collection")
|
||||||
|
assert.Equal(t, isUserInUsers, *systemsCollection.ListRule)
|
||||||
|
assert.Equal(t, isUserInUsers, *systemsCollection.ViewRule)
|
||||||
|
assert.Equal(t, isUserInUsersNotReadonly, *systemsCollection.CreateRule)
|
||||||
|
assert.Equal(t, isUserInUsersNotReadonly, *systemsCollection.UpdateRule)
|
||||||
|
assert.Equal(t, isUserInUsersNotReadonly, *systemsCollection.DeleteRule)
|
||||||
|
|
||||||
|
// universal_tokens collection
|
||||||
|
universalTokensCollection, err := hub.FindCollectionByNameOrId("universal_tokens")
|
||||||
|
require.NoError(t, err, "Failed to find universal_tokens collection")
|
||||||
|
assert.Nil(t, universalTokensCollection.ListRule)
|
||||||
|
assert.Nil(t, universalTokensCollection.ViewRule)
|
||||||
|
assert.Nil(t, universalTokensCollection.CreateRule)
|
||||||
|
assert.Nil(t, universalTokensCollection.UpdateRule)
|
||||||
|
assert.Nil(t, universalTokensCollection.DeleteRule)
|
||||||
|
|
||||||
|
// user_settings collection
|
||||||
|
userSettingsCollection, err := hub.FindCollectionByNameOrId("user_settings")
|
||||||
|
require.NoError(t, err, "Failed to find user_settings collection")
|
||||||
|
assert.Equal(t, isUserMatchesUser, *userSettingsCollection.ListRule)
|
||||||
|
assert.Nil(t, userSettingsCollection.ViewRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *userSettingsCollection.CreateRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *userSettingsCollection.UpdateRule)
|
||||||
|
assert.Nil(t, userSettingsCollection.DeleteRule)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCollectionRulesShareAllSystems(t *testing.T) {
|
||||||
|
t.Setenv("SHARE_ALL_SYSTEMS", "true")
|
||||||
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
const isUser = `@request.auth.id != ""`
|
||||||
|
const isUserNotReadonly = `@request.auth.id != "" && @request.auth.role != "readonly"`
|
||||||
|
|
||||||
|
const isUserMatchesUser = `@request.auth.id != "" && user = @request.auth.id`
|
||||||
|
|
||||||
|
// alerts collection
|
||||||
|
alertsCollection, err := hub.FindCollectionByNameOrId("alerts")
|
||||||
|
require.NoError(t, err, "Failed to find alerts collection")
|
||||||
|
assert.Equal(t, isUserMatchesUser, *alertsCollection.ListRule)
|
||||||
|
assert.Nil(t, alertsCollection.ViewRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *alertsCollection.CreateRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *alertsCollection.UpdateRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *alertsCollection.DeleteRule)
|
||||||
|
|
||||||
|
// alerts_history collection
|
||||||
|
alertsHistoryCollection, err := hub.FindCollectionByNameOrId("alerts_history")
|
||||||
|
require.NoError(t, err, "Failed to find alerts_history collection")
|
||||||
|
assert.Equal(t, isUserMatchesUser, *alertsHistoryCollection.ListRule)
|
||||||
|
assert.Nil(t, alertsHistoryCollection.ViewRule)
|
||||||
|
assert.Nil(t, alertsHistoryCollection.CreateRule)
|
||||||
|
assert.Nil(t, alertsHistoryCollection.UpdateRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *alertsHistoryCollection.DeleteRule)
|
||||||
|
|
||||||
|
// containers collection
|
||||||
|
containersCollection, err := hub.FindCollectionByNameOrId("containers")
|
||||||
|
require.NoError(t, err, "Failed to find containers collection")
|
||||||
|
assert.Equal(t, isUser, *containersCollection.ListRule)
|
||||||
|
assert.Nil(t, containersCollection.ViewRule)
|
||||||
|
assert.Nil(t, containersCollection.CreateRule)
|
||||||
|
assert.Nil(t, containersCollection.UpdateRule)
|
||||||
|
assert.Nil(t, containersCollection.DeleteRule)
|
||||||
|
|
||||||
|
// container_stats collection
|
||||||
|
containerStatsCollection, err := hub.FindCollectionByNameOrId("container_stats")
|
||||||
|
require.NoError(t, err, "Failed to find container_stats collection")
|
||||||
|
assert.Equal(t, isUser, *containerStatsCollection.ListRule)
|
||||||
|
assert.Nil(t, containerStatsCollection.ViewRule)
|
||||||
|
assert.Nil(t, containerStatsCollection.CreateRule)
|
||||||
|
assert.Nil(t, containerStatsCollection.UpdateRule)
|
||||||
|
assert.Nil(t, containerStatsCollection.DeleteRule)
|
||||||
|
|
||||||
|
// fingerprints collection
|
||||||
|
fingerprintsCollection, err := hub.FindCollectionByNameOrId("fingerprints")
|
||||||
|
require.NoError(t, err, "Failed to find fingerprints collection")
|
||||||
|
assert.Equal(t, isUser, *fingerprintsCollection.ListRule)
|
||||||
|
assert.Equal(t, isUser, *fingerprintsCollection.ViewRule)
|
||||||
|
assert.Equal(t, isUserNotReadonly, *fingerprintsCollection.CreateRule)
|
||||||
|
assert.Equal(t, isUserNotReadonly, *fingerprintsCollection.UpdateRule)
|
||||||
|
assert.Equal(t, isUserNotReadonly, *fingerprintsCollection.DeleteRule)
|
||||||
|
|
||||||
|
// quiet_hours collection
|
||||||
|
quietHoursCollection, err := hub.FindCollectionByNameOrId("quiet_hours")
|
||||||
|
require.NoError(t, err, "Failed to find quiet_hours collection")
|
||||||
|
assert.Equal(t, isUserMatchesUser, *quietHoursCollection.ListRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *quietHoursCollection.ViewRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *quietHoursCollection.CreateRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *quietHoursCollection.UpdateRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *quietHoursCollection.DeleteRule)
|
||||||
|
|
||||||
|
// smart_devices collection
|
||||||
|
smartDevicesCollection, err := hub.FindCollectionByNameOrId("smart_devices")
|
||||||
|
require.NoError(t, err, "Failed to find smart_devices collection")
|
||||||
|
assert.Equal(t, isUser, *smartDevicesCollection.ListRule)
|
||||||
|
assert.Equal(t, isUser, *smartDevicesCollection.ViewRule)
|
||||||
|
assert.Nil(t, smartDevicesCollection.CreateRule)
|
||||||
|
assert.Nil(t, smartDevicesCollection.UpdateRule)
|
||||||
|
assert.Equal(t, isUserNotReadonly, *smartDevicesCollection.DeleteRule)
|
||||||
|
|
||||||
|
// system_details collection
|
||||||
|
systemDetailsCollection, err := hub.FindCollectionByNameOrId("system_details")
|
||||||
|
require.NoError(t, err, "Failed to find system_details collection")
|
||||||
|
assert.Equal(t, isUser, *systemDetailsCollection.ListRule)
|
||||||
|
assert.Equal(t, isUser, *systemDetailsCollection.ViewRule)
|
||||||
|
assert.Nil(t, systemDetailsCollection.CreateRule)
|
||||||
|
assert.Nil(t, systemDetailsCollection.UpdateRule)
|
||||||
|
assert.Nil(t, systemDetailsCollection.DeleteRule)
|
||||||
|
|
||||||
|
// system_stats collection
|
||||||
|
systemStatsCollection, err := hub.FindCollectionByNameOrId("system_stats")
|
||||||
|
require.NoError(t, err, "Failed to find system_stats collection")
|
||||||
|
assert.Equal(t, isUser, *systemStatsCollection.ListRule)
|
||||||
|
assert.Nil(t, systemStatsCollection.ViewRule)
|
||||||
|
assert.Nil(t, systemStatsCollection.CreateRule)
|
||||||
|
assert.Nil(t, systemStatsCollection.UpdateRule)
|
||||||
|
assert.Nil(t, systemStatsCollection.DeleteRule)
|
||||||
|
|
||||||
|
// systemd_services collection
|
||||||
|
systemdServicesCollection, err := hub.FindCollectionByNameOrId("systemd_services")
|
||||||
|
require.NoError(t, err, "Failed to find systemd_services collection")
|
||||||
|
assert.Equal(t, isUser, *systemdServicesCollection.ListRule)
|
||||||
|
assert.Nil(t, systemdServicesCollection.ViewRule)
|
||||||
|
assert.Nil(t, systemdServicesCollection.CreateRule)
|
||||||
|
assert.Nil(t, systemdServicesCollection.UpdateRule)
|
||||||
|
assert.Nil(t, systemdServicesCollection.DeleteRule)
|
||||||
|
|
||||||
|
// systems collection
|
||||||
|
systemsCollection, err := hub.FindCollectionByNameOrId("systems")
|
||||||
|
require.NoError(t, err, "Failed to find systems collection")
|
||||||
|
assert.Equal(t, isUser, *systemsCollection.ListRule)
|
||||||
|
assert.Equal(t, isUser, *systemsCollection.ViewRule)
|
||||||
|
assert.Equal(t, isUserNotReadonly, *systemsCollection.CreateRule)
|
||||||
|
assert.Equal(t, isUserNotReadonly, *systemsCollection.UpdateRule)
|
||||||
|
assert.Equal(t, isUserNotReadonly, *systemsCollection.DeleteRule)
|
||||||
|
|
||||||
|
// universal_tokens collection
|
||||||
|
universalTokensCollection, err := hub.FindCollectionByNameOrId("universal_tokens")
|
||||||
|
require.NoError(t, err, "Failed to find universal_tokens collection")
|
||||||
|
assert.Nil(t, universalTokensCollection.ListRule)
|
||||||
|
assert.Nil(t, universalTokensCollection.ViewRule)
|
||||||
|
assert.Nil(t, universalTokensCollection.CreateRule)
|
||||||
|
assert.Nil(t, universalTokensCollection.UpdateRule)
|
||||||
|
assert.Nil(t, universalTokensCollection.DeleteRule)
|
||||||
|
|
||||||
|
// user_settings collection
|
||||||
|
userSettingsCollection, err := hub.FindCollectionByNameOrId("user_settings")
|
||||||
|
require.NoError(t, err, "Failed to find user_settings collection")
|
||||||
|
assert.Equal(t, isUserMatchesUser, *userSettingsCollection.ListRule)
|
||||||
|
assert.Nil(t, userSettingsCollection.ViewRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *userSettingsCollection.CreateRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *userSettingsCollection.UpdateRule)
|
||||||
|
assert.Nil(t, userSettingsCollection.DeleteRule)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDisablePasswordAuth(t *testing.T) {
|
||||||
|
t.Setenv("DISABLE_PASSWORD_AUTH", "true")
|
||||||
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
usersCollection, err := hub.FindCollectionByNameOrId("users")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.False(t, usersCollection.PasswordAuth.Enabled)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUserCreation(t *testing.T) {
|
||||||
|
t.Setenv("USER_CREATION", "true")
|
||||||
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
usersCollection, err := hub.FindCollectionByNameOrId("users")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, "@request.context = 'oauth2'", *usersCollection.CreateRule)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMFAOtp(t *testing.T) {
|
||||||
|
t.Setenv("MFA_OTP", "true")
|
||||||
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
usersCollection, err := hub.FindCollectionByNameOrId("users")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, usersCollection.OTP.Enabled)
|
||||||
|
assert.True(t, usersCollection.MFA.Enabled)
|
||||||
|
|
||||||
|
superusersCollection, err := hub.FindCollectionByNameOrId(core.CollectionNameSuperusers)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, superusersCollection.OTP.Enabled)
|
||||||
|
assert.True(t, superusersCollection.MFA.Enabled)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestApiCollectionsAuthRules(t *testing.T) {
|
||||||
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
hub.StartHub()
|
||||||
|
|
||||||
|
user1, _ := beszelTests.CreateUser(hub, "user1@example.com", "password")
|
||||||
|
user1Token, _ := user1.NewAuthToken()
|
||||||
|
|
||||||
|
user2, _ := beszelTests.CreateUser(hub, "user2@example.com", "password")
|
||||||
|
// user2Token, _ := user2.NewAuthToken()
|
||||||
|
|
||||||
|
userReadonly, _ := beszelTests.CreateUserWithRole(hub, "userreadonly@example.com", "password", "readonly")
|
||||||
|
userReadonlyToken, _ := userReadonly.NewAuthToken()
|
||||||
|
|
||||||
|
userOneSystem, _ := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||||
|
"name": "system1",
|
||||||
|
"users": []string{user1.Id},
|
||||||
|
"host": "127.0.0.1",
|
||||||
|
})
|
||||||
|
|
||||||
|
sharedSystem, _ := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||||
|
"name": "system2",
|
||||||
|
"users": []string{user1.Id, user2.Id},
|
||||||
|
"host": "127.0.0.2",
|
||||||
|
})
|
||||||
|
|
||||||
|
userTwoSystem, _ := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||||
|
"name": "system3",
|
||||||
|
"users": []string{user2.Id},
|
||||||
|
"host": "127.0.0.2",
|
||||||
|
})
|
||||||
|
|
||||||
|
userRecords, _ := hub.CountRecords("users")
|
||||||
|
assert.EqualValues(t, 3, userRecords, "all users should be created")
|
||||||
|
|
||||||
|
systemRecords, _ := hub.CountRecords("systems")
|
||||||
|
assert.EqualValues(t, 3, systemRecords, "all systems should be created")
|
||||||
|
|
||||||
|
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
||||||
|
return hub.TestApp
|
||||||
|
}
|
||||||
|
|
||||||
|
scenarios := []beszelTests.ApiScenario{
|
||||||
|
{
|
||||||
|
Name: "Unauthorized user cannot list systems",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/collections/systems/records",
|
||||||
|
ExpectedStatus: 200, // https://github.com/pocketbase/pocketbase/discussions/1570
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
ExpectedContent: []string{`"items":[]`, `"totalItems":0`},
|
||||||
|
NotExpectedContent: []string{userOneSystem.Id, sharedSystem.Id, userTwoSystem.Id},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "Unauthorized user cannot delete a system",
|
||||||
|
Method: http.MethodDelete,
|
||||||
|
URL: fmt.Sprintf("/api/collections/systems/records/%s", userOneSystem.Id),
|
||||||
|
ExpectedStatus: 404,
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
ExpectedContent: []string{"resource wasn't found"},
|
||||||
|
NotExpectedContent: []string{userOneSystem.Id},
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
systemsCount, _ := app.CountRecords("systems")
|
||||||
|
assert.EqualValues(t, 3, systemsCount, "should have 3 systems before deletion")
|
||||||
|
},
|
||||||
|
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||||
|
systemsCount, _ := app.CountRecords("systems")
|
||||||
|
assert.EqualValues(t, 3, systemsCount, "should still have 3 systems after failed deletion")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "User 1 can list their own systems",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/collections/systems/records",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user1Token,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{userOneSystem.Id, sharedSystem.Id},
|
||||||
|
NotExpectedContent: []string{userTwoSystem.Id},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "User 1 cannot list user 2's system",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/collections/systems/records",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user1Token,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{userOneSystem.Id, sharedSystem.Id},
|
||||||
|
NotExpectedContent: []string{userTwoSystem.Id},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "User 1 can see user 2's system if SHARE_ALL_SYSTEMS is enabled",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/collections/systems/records",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user1Token,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{userOneSystem.Id, sharedSystem.Id, userTwoSystem.Id},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
t.Setenv("SHARE_ALL_SYSTEMS", "true")
|
||||||
|
hub.SetCollectionAuthSettings()
|
||||||
|
},
|
||||||
|
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||||
|
t.Setenv("SHARE_ALL_SYSTEMS", "")
|
||||||
|
hub.SetCollectionAuthSettings()
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "User 1 can delete their own system",
|
||||||
|
Method: http.MethodDelete,
|
||||||
|
URL: fmt.Sprintf("/api/collections/systems/records/%s", userOneSystem.Id),
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user1Token,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 204,
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
systemsCount, _ := app.CountRecords("systems")
|
||||||
|
assert.EqualValues(t, 3, systemsCount, "should have 3 systems before deletion")
|
||||||
|
},
|
||||||
|
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||||
|
systemsCount, _ := app.CountRecords("systems")
|
||||||
|
assert.EqualValues(t, 2, systemsCount, "should have 2 systems after deletion")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "User 1 cannot delete user 2's system",
|
||||||
|
Method: http.MethodDelete,
|
||||||
|
URL: fmt.Sprintf("/api/collections/systems/records/%s", userTwoSystem.Id),
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user1Token,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 404,
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
ExpectedContent: []string{"resource wasn't found"},
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
systemsCount, _ := app.CountRecords("systems")
|
||||||
|
assert.EqualValues(t, 2, systemsCount)
|
||||||
|
},
|
||||||
|
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||||
|
systemsCount, _ := app.CountRecords("systems")
|
||||||
|
assert.EqualValues(t, 2, systemsCount)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "Readonly cannot delete a system even if SHARE_ALL_SYSTEMS is enabled",
|
||||||
|
Method: http.MethodDelete,
|
||||||
|
URL: fmt.Sprintf("/api/collections/systems/records/%s", sharedSystem.Id),
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userReadonlyToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 404,
|
||||||
|
ExpectedContent: []string{"resource wasn't found"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
t.Setenv("SHARE_ALL_SYSTEMS", "true")
|
||||||
|
hub.SetCollectionAuthSettings()
|
||||||
|
systemsCount, _ := app.CountRecords("systems")
|
||||||
|
assert.EqualValues(t, 2, systemsCount)
|
||||||
|
},
|
||||||
|
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||||
|
t.Setenv("SHARE_ALL_SYSTEMS", "")
|
||||||
|
hub.SetCollectionAuthSettings()
|
||||||
|
systemsCount, _ := app.CountRecords("systems")
|
||||||
|
assert.EqualValues(t, 2, systemsCount)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "User 1 can delete user 2's system if SHARE_ALL_SYSTEMS is enabled",
|
||||||
|
Method: http.MethodDelete,
|
||||||
|
URL: fmt.Sprintf("/api/collections/systems/records/%s", userTwoSystem.Id),
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user1Token,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 204,
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
t.Setenv("SHARE_ALL_SYSTEMS", "true")
|
||||||
|
hub.SetCollectionAuthSettings()
|
||||||
|
systemsCount, _ := app.CountRecords("systems")
|
||||||
|
assert.EqualValues(t, 2, systemsCount)
|
||||||
|
},
|
||||||
|
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||||
|
t.Setenv("SHARE_ALL_SYSTEMS", "")
|
||||||
|
hub.SetCollectionAuthSettings()
|
||||||
|
systemsCount, _ := app.CountRecords("systems")
|
||||||
|
assert.EqualValues(t, 1, systemsCount)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, scenario := range scenarios {
|
||||||
|
scenario.Test(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -279,9 +279,6 @@ func createFingerprintRecord(app core.App, systemID, token string) error {
|
|||||||
|
|
||||||
// Returns the current config.yml file as a JSON object
|
// Returns the current config.yml file as a JSON object
|
||||||
func GetYamlConfig(e *core.RequestEvent) error {
|
func GetYamlConfig(e *core.RequestEvent) error {
|
||||||
if e.Auth.GetString("role") != "admin" {
|
|
||||||
return e.ForbiddenError("Requires admin role", nil)
|
|
||||||
}
|
|
||||||
configContent, err := generateYAML(e.App)
|
configContent, err := generateYAML(e.App)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build testing
|
//go:build testing
|
||||||
// +build testing
|
|
||||||
|
|
||||||
package config_test
|
package config_test
|
||||||
|
|
||||||
|
|||||||
@@ -1,35 +1,39 @@
|
|||||||
|
// Package expirymap provides a thread-safe map with expiring entries.
|
||||||
|
// It supports TTL-based expiration with both lazy cleanup on access
|
||||||
|
// and periodic background cleanup.
|
||||||
package expirymap
|
package expirymap
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"reflect"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pocketbase/pocketbase/tools/store"
|
"github.com/pocketbase/pocketbase/tools/store"
|
||||||
)
|
)
|
||||||
|
|
||||||
type val[T any] struct {
|
type val[T comparable] struct {
|
||||||
value T
|
value T
|
||||||
expires time.Time
|
expires time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
type ExpiryMap[T any] struct {
|
type ExpiryMap[T comparable] struct {
|
||||||
store *store.Store[string, *val[T]]
|
store *store.Store[string, val[T]]
|
||||||
cleanupInterval time.Duration
|
stopChan chan struct{}
|
||||||
|
stopOnce sync.Once
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new expiry map with custom cleanup interval
|
// New creates a new expiry map with custom cleanup interval
|
||||||
func New[T any](cleanupInterval time.Duration) *ExpiryMap[T] {
|
func New[T comparable](cleanupInterval time.Duration) *ExpiryMap[T] {
|
||||||
m := &ExpiryMap[T]{
|
m := &ExpiryMap[T]{
|
||||||
store: store.New(map[string]*val[T]{}),
|
store: store.New(map[string]val[T]{}),
|
||||||
cleanupInterval: cleanupInterval,
|
stopChan: make(chan struct{}),
|
||||||
}
|
}
|
||||||
m.startCleaner()
|
go m.startCleaner(cleanupInterval)
|
||||||
return m
|
return m
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set stores a value with the given TTL
|
// Set stores a value with the given TTL
|
||||||
func (m *ExpiryMap[T]) Set(key string, value T, ttl time.Duration) {
|
func (m *ExpiryMap[T]) Set(key string, value T, ttl time.Duration) {
|
||||||
m.store.Set(key, &val[T]{
|
m.store.Set(key, val[T]{
|
||||||
value: value,
|
value: value,
|
||||||
expires: time.Now().Add(ttl),
|
expires: time.Now().Add(ttl),
|
||||||
})
|
})
|
||||||
@@ -55,7 +59,7 @@ func (m *ExpiryMap[T]) GetOk(key string) (T, bool) {
|
|||||||
// GetByValue retrieves a value by value
|
// GetByValue retrieves a value by value
|
||||||
func (m *ExpiryMap[T]) GetByValue(val T) (key string, value T, ok bool) {
|
func (m *ExpiryMap[T]) GetByValue(val T) (key string, value T, ok bool) {
|
||||||
for key, v := range m.store.GetAll() {
|
for key, v := range m.store.GetAll() {
|
||||||
if reflect.DeepEqual(v.value, val) {
|
if v.value == val {
|
||||||
// check if expired
|
// check if expired
|
||||||
if v.expires.Before(time.Now()) {
|
if v.expires.Before(time.Now()) {
|
||||||
m.store.Remove(key)
|
m.store.Remove(key)
|
||||||
@@ -75,7 +79,7 @@ func (m *ExpiryMap[T]) Remove(key string) {
|
|||||||
// RemovebyValue removes a value by value
|
// RemovebyValue removes a value by value
|
||||||
func (m *ExpiryMap[T]) RemovebyValue(value T) (T, bool) {
|
func (m *ExpiryMap[T]) RemovebyValue(value T) (T, bool) {
|
||||||
for key, val := range m.store.GetAll() {
|
for key, val := range m.store.GetAll() {
|
||||||
if reflect.DeepEqual(val.value, value) {
|
if val.value == value {
|
||||||
m.store.Remove(key)
|
m.store.Remove(key)
|
||||||
return val.value, true
|
return val.value, true
|
||||||
}
|
}
|
||||||
@@ -84,13 +88,23 @@ func (m *ExpiryMap[T]) RemovebyValue(value T) (T, bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// startCleaner runs the background cleanup process
|
// startCleaner runs the background cleanup process
|
||||||
func (m *ExpiryMap[T]) startCleaner() {
|
func (m *ExpiryMap[T]) startCleaner(interval time.Duration) {
|
||||||
go func() {
|
tick := time.Tick(interval)
|
||||||
tick := time.Tick(m.cleanupInterval)
|
for {
|
||||||
for range tick {
|
select {
|
||||||
|
case <-tick:
|
||||||
m.cleanup()
|
m.cleanup()
|
||||||
|
case <-m.stopChan:
|
||||||
|
return
|
||||||
}
|
}
|
||||||
}()
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StopCleaner stops the background cleanup process
|
||||||
|
func (m *ExpiryMap[T]) StopCleaner() {
|
||||||
|
m.stopOnce.Do(func() {
|
||||||
|
close(m.stopChan)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// cleanup removes all expired entries
|
// cleanup removes all expired entries
|
||||||
@@ -102,3 +116,12 @@ func (m *ExpiryMap[T]) cleanup() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UpdateExpiration updates the expiration time of a key
|
||||||
|
func (m *ExpiryMap[T]) UpdateExpiration(key string, ttl time.Duration) {
|
||||||
|
value, ok := m.store.GetOk(key)
|
||||||
|
if ok {
|
||||||
|
value.expires = time.Now().Add(ttl)
|
||||||
|
m.store.Set(key, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,10 +1,10 @@
|
|||||||
//go:build testing
|
//go:build testing
|
||||||
// +build testing
|
|
||||||
|
|
||||||
package expirymap
|
package expirymap
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
"testing/synctest"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
@@ -178,6 +178,33 @@ func TestExpiryMap_GenericTypes(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestExpiryMap_UpdateExpiration(t *testing.T) {
|
||||||
|
em := New[string](time.Hour)
|
||||||
|
|
||||||
|
// Set a value with short TTL
|
||||||
|
em.Set("key1", "value1", time.Millisecond*50)
|
||||||
|
|
||||||
|
// Verify it exists
|
||||||
|
assert.True(t, em.Has("key1"))
|
||||||
|
|
||||||
|
// Update expiration to a longer TTL
|
||||||
|
em.UpdateExpiration("key1", time.Hour)
|
||||||
|
|
||||||
|
// Wait for the original TTL to pass
|
||||||
|
time.Sleep(time.Millisecond * 100)
|
||||||
|
|
||||||
|
// Should still exist because expiration was updated
|
||||||
|
assert.True(t, em.Has("key1"))
|
||||||
|
value, ok := em.GetOk("key1")
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "value1", value)
|
||||||
|
|
||||||
|
// Try updating non-existent key (should not panic)
|
||||||
|
assert.NotPanics(t, func() {
|
||||||
|
em.UpdateExpiration("nonexistent", time.Hour)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestExpiryMap_ZeroValues(t *testing.T) {
|
func TestExpiryMap_ZeroValues(t *testing.T) {
|
||||||
em := New[string](time.Hour)
|
em := New[string](time.Hour)
|
||||||
|
|
||||||
@@ -474,3 +501,52 @@ func TestExpiryMap_ValueOperations_Integration(t *testing.T) {
|
|||||||
assert.Equal(t, "unique", value)
|
assert.Equal(t, "unique", value)
|
||||||
assert.Equal(t, "key2", key)
|
assert.Equal(t, "key2", key)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestExpiryMap_Cleaner(t *testing.T) {
|
||||||
|
synctest.Test(t, func(t *testing.T) {
|
||||||
|
em := New[string](time.Second)
|
||||||
|
defer em.StopCleaner()
|
||||||
|
|
||||||
|
em.Set("test", "value", 500*time.Millisecond)
|
||||||
|
|
||||||
|
// Wait 600ms, value is expired but cleaner hasn't run yet (interval is 1s)
|
||||||
|
time.Sleep(600 * time.Millisecond)
|
||||||
|
synctest.Wait()
|
||||||
|
|
||||||
|
// Map should still hold the value in its internal store before lazy access or cleaner
|
||||||
|
assert.Equal(t, 1, len(em.store.GetAll()), "store should still have 1 item before cleaner runs")
|
||||||
|
|
||||||
|
// Wait another 500ms so cleaner (1s interval) runs
|
||||||
|
time.Sleep(500 * time.Millisecond)
|
||||||
|
synctest.Wait() // Wait for background goroutine to process the tick
|
||||||
|
|
||||||
|
assert.Equal(t, 0, len(em.store.GetAll()), "store should be empty after cleaner runs")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExpiryMap_StopCleaner(t *testing.T) {
|
||||||
|
em := New[string](time.Hour)
|
||||||
|
|
||||||
|
// Initially, stopChan is open, reading would block
|
||||||
|
select {
|
||||||
|
case <-em.stopChan:
|
||||||
|
t.Fatal("stopChan should be open initially")
|
||||||
|
default:
|
||||||
|
// success
|
||||||
|
}
|
||||||
|
|
||||||
|
em.StopCleaner()
|
||||||
|
|
||||||
|
// After StopCleaner, stopChan is closed, reading returns immediately
|
||||||
|
select {
|
||||||
|
case <-em.stopChan:
|
||||||
|
// success
|
||||||
|
default:
|
||||||
|
t.Fatal("stopChan was not closed by StopCleaner")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calling StopCleaner again should NOT panic thanks to sync.Once
|
||||||
|
assert.NotPanics(t, func() {
|
||||||
|
em.StopCleaner()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|||||||
303
internal/hub/heartbeat/heartbeat.go
Normal file
303
internal/hub/heartbeat/heartbeat.go
Normal file
@@ -0,0 +1,303 @@
|
|||||||
|
// Package heartbeat sends periodic outbound pings to an external monitoring
|
||||||
|
// endpoint (e.g. BetterStack, Uptime Kuma, Healthchecks.io) so operators can
|
||||||
|
// monitor Beszel without exposing it to the internet.
|
||||||
|
package heartbeat
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel"
|
||||||
|
"github.com/pocketbase/pocketbase/core"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Default values for heartbeat configuration.
|
||||||
|
const (
|
||||||
|
defaultInterval = 60 // seconds
|
||||||
|
httpTimeout = 10 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
// Payload is the JSON body sent with each heartbeat request.
|
||||||
|
type Payload struct {
|
||||||
|
// Status is "ok" when all non-paused systems are up, "warn" when alerts
|
||||||
|
// are triggered but no systems are down, and "error" when any system is down.
|
||||||
|
Status string `json:"status"`
|
||||||
|
Timestamp string `json:"timestamp"`
|
||||||
|
Msg string `json:"msg"`
|
||||||
|
Systems SystemsSummary `json:"systems"`
|
||||||
|
Down []SystemInfo `json:"down_systems,omitempty"`
|
||||||
|
Alerts []AlertInfo `json:"triggered_alerts,omitempty"`
|
||||||
|
Version string `json:"beszel_version"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SystemsSummary contains counts of systems by status.
|
||||||
|
type SystemsSummary struct {
|
||||||
|
Total int `json:"total"`
|
||||||
|
Up int `json:"up"`
|
||||||
|
Down int `json:"down"`
|
||||||
|
Paused int `json:"paused"`
|
||||||
|
Pending int `json:"pending"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SystemInfo identifies a system that is currently down.
|
||||||
|
type SystemInfo struct {
|
||||||
|
ID string `json:"id" db:"id"`
|
||||||
|
Name string `json:"name" db:"name"`
|
||||||
|
Host string `json:"host" db:"host"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// AlertInfo describes a currently triggered alert.
|
||||||
|
type AlertInfo struct {
|
||||||
|
SystemID string `json:"system_id"`
|
||||||
|
SystemName string `json:"system_name"`
|
||||||
|
AlertName string `json:"alert_name"`
|
||||||
|
Threshold float64 `json:"threshold"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Config holds heartbeat settings read from environment variables.
|
||||||
|
type Config struct {
|
||||||
|
URL string // endpoint to ping
|
||||||
|
Interval int // seconds between pings
|
||||||
|
Method string // HTTP method (GET or POST, default POST)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Heartbeat manages the periodic outbound health check.
|
||||||
|
type Heartbeat struct {
|
||||||
|
app core.App
|
||||||
|
config Config
|
||||||
|
client *http.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a Heartbeat if configuration is present.
|
||||||
|
// Returns nil if HEARTBEAT_URL is not set (feature disabled).
|
||||||
|
func New(app core.App, getEnv func(string) (string, bool)) *Heartbeat {
|
||||||
|
url, _ := getEnv("HEARTBEAT_URL")
|
||||||
|
url = strings.TrimSpace(url)
|
||||||
|
if app == nil || url == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
interval := defaultInterval
|
||||||
|
if v, ok := getEnv("HEARTBEAT_INTERVAL"); ok {
|
||||||
|
if parsed, err := strconv.Atoi(v); err == nil && parsed > 0 {
|
||||||
|
interval = parsed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
method := http.MethodPost
|
||||||
|
if v, ok := getEnv("HEARTBEAT_METHOD"); ok {
|
||||||
|
v = strings.ToUpper(strings.TrimSpace(v))
|
||||||
|
if v == http.MethodGet || v == http.MethodHead {
|
||||||
|
method = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Heartbeat{
|
||||||
|
app: app,
|
||||||
|
config: Config{
|
||||||
|
URL: url,
|
||||||
|
Interval: interval,
|
||||||
|
Method: method,
|
||||||
|
},
|
||||||
|
client: &http.Client{Timeout: httpTimeout},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start begins the heartbeat loop. It blocks and should be called in a goroutine.
|
||||||
|
// The loop runs until the provided stop channel is closed.
|
||||||
|
func (hb *Heartbeat) Start(stop <-chan struct{}) {
|
||||||
|
sanitizedURL := sanitizeHeartbeatURL(hb.config.URL)
|
||||||
|
hb.app.Logger().Info("Heartbeat enabled",
|
||||||
|
"url", sanitizedURL,
|
||||||
|
"interval", fmt.Sprintf("%ds", hb.config.Interval),
|
||||||
|
"method", hb.config.Method,
|
||||||
|
)
|
||||||
|
|
||||||
|
// Send an initial heartbeat immediately on startup.
|
||||||
|
hb.send()
|
||||||
|
|
||||||
|
ticker := time.NewTicker(time.Duration(hb.config.Interval) * time.Second)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-stop:
|
||||||
|
return
|
||||||
|
case <-ticker.C:
|
||||||
|
hb.send()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send performs a single heartbeat ping. Exposed for the test-heartbeat API endpoint.
|
||||||
|
func (hb *Heartbeat) Send() error {
|
||||||
|
return hb.send()
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetConfig returns the current heartbeat configuration.
|
||||||
|
func (hb *Heartbeat) GetConfig() Config {
|
||||||
|
return hb.config
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hb *Heartbeat) send() error {
|
||||||
|
var req *http.Request
|
||||||
|
var err error
|
||||||
|
method := normalizeMethod(hb.config.Method)
|
||||||
|
|
||||||
|
if method == http.MethodGet || method == http.MethodHead {
|
||||||
|
req, err = http.NewRequest(method, hb.config.URL, nil)
|
||||||
|
} else {
|
||||||
|
payload, payloadErr := hb.buildPayload()
|
||||||
|
if payloadErr != nil {
|
||||||
|
hb.app.Logger().Error("Heartbeat: failed to build payload", "err", payloadErr)
|
||||||
|
return payloadErr
|
||||||
|
}
|
||||||
|
|
||||||
|
body, jsonErr := json.Marshal(payload)
|
||||||
|
if jsonErr != nil {
|
||||||
|
hb.app.Logger().Error("Heartbeat: failed to marshal payload", "err", jsonErr)
|
||||||
|
return jsonErr
|
||||||
|
}
|
||||||
|
req, err = http.NewRequest(http.MethodPost, hb.config.URL, bytes.NewReader(body))
|
||||||
|
if err == nil {
|
||||||
|
req.Header.Set("Content-Type", "application/json")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
hb.app.Logger().Error("Heartbeat: failed to create request", "err", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Header.Set("User-Agent", "Beszel-Heartbeat")
|
||||||
|
|
||||||
|
resp, err := hb.client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
hb.app.Logger().Error("Heartbeat: request failed", "url", sanitizeHeartbeatURL(hb.config.URL), "err", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode >= 400 {
|
||||||
|
hb.app.Logger().Warn("Heartbeat: non-success response",
|
||||||
|
"url", sanitizeHeartbeatURL(hb.config.URL),
|
||||||
|
"status", resp.StatusCode,
|
||||||
|
)
|
||||||
|
return fmt.Errorf("heartbeat endpoint returned status %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hb *Heartbeat) buildPayload() (*Payload, error) {
|
||||||
|
db := hb.app.DB()
|
||||||
|
|
||||||
|
// Count systems by status.
|
||||||
|
var systemCounts []struct {
|
||||||
|
Status string `db:"status"`
|
||||||
|
Count int `db:"cnt"`
|
||||||
|
}
|
||||||
|
err := db.NewQuery("SELECT status, COUNT(*) as cnt FROM systems GROUP BY status").All(&systemCounts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("query system counts: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
summary := SystemsSummary{}
|
||||||
|
for _, sc := range systemCounts {
|
||||||
|
switch sc.Status {
|
||||||
|
case "up":
|
||||||
|
summary.Up = sc.Count
|
||||||
|
case "down":
|
||||||
|
summary.Down = sc.Count
|
||||||
|
case "paused":
|
||||||
|
summary.Paused = sc.Count
|
||||||
|
case "pending":
|
||||||
|
summary.Pending = sc.Count
|
||||||
|
}
|
||||||
|
summary.Total += sc.Count
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get names of down systems.
|
||||||
|
var downSystems []SystemInfo
|
||||||
|
if summary.Down > 0 {
|
||||||
|
err = db.NewQuery("SELECT id, name, host FROM systems WHERE status = 'down'").All(&downSystems)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("query down systems: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get triggered alerts with system names.
|
||||||
|
var triggeredAlerts []struct {
|
||||||
|
SystemID string `db:"system"`
|
||||||
|
SystemName string `db:"system_name"`
|
||||||
|
AlertName string `db:"name"`
|
||||||
|
Value float64 `db:"value"`
|
||||||
|
}
|
||||||
|
err = db.NewQuery(`
|
||||||
|
SELECT a.system, s.name as system_name, a.name, a.value
|
||||||
|
FROM alerts a
|
||||||
|
JOIN systems s ON a.system = s.id
|
||||||
|
WHERE a.triggered = true
|
||||||
|
`).All(&triggeredAlerts)
|
||||||
|
if err != nil {
|
||||||
|
// Non-fatal: alerts info is supplementary.
|
||||||
|
triggeredAlerts = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
alerts := make([]AlertInfo, 0, len(triggeredAlerts))
|
||||||
|
for _, ta := range triggeredAlerts {
|
||||||
|
alerts = append(alerts, AlertInfo{
|
||||||
|
SystemID: ta.SystemID,
|
||||||
|
SystemName: ta.SystemName,
|
||||||
|
AlertName: ta.AlertName,
|
||||||
|
Threshold: ta.Value,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine overall status.
|
||||||
|
status := "ok"
|
||||||
|
msg := "All systems operational"
|
||||||
|
if summary.Down > 0 {
|
||||||
|
status = "error"
|
||||||
|
names := make([]string, len(downSystems))
|
||||||
|
for i, ds := range downSystems {
|
||||||
|
names[i] = ds.Name
|
||||||
|
}
|
||||||
|
msg = fmt.Sprintf("%d system(s) down: %s", summary.Down, strings.Join(names, ", "))
|
||||||
|
} else if len(alerts) > 0 {
|
||||||
|
status = "warn"
|
||||||
|
msg = fmt.Sprintf("%d alert(s) triggered", len(alerts))
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Payload{
|
||||||
|
Status: status,
|
||||||
|
Timestamp: time.Now().UTC().Format(time.RFC3339),
|
||||||
|
Msg: msg,
|
||||||
|
Systems: summary,
|
||||||
|
Down: downSystems,
|
||||||
|
Alerts: alerts,
|
||||||
|
Version: beszel.Version,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func normalizeMethod(method string) string {
|
||||||
|
upper := strings.ToUpper(strings.TrimSpace(method))
|
||||||
|
if upper == http.MethodGet || upper == http.MethodHead || upper == http.MethodPost {
|
||||||
|
return upper
|
||||||
|
}
|
||||||
|
return http.MethodPost
|
||||||
|
}
|
||||||
|
|
||||||
|
func sanitizeHeartbeatURL(rawURL string) string {
|
||||||
|
parsed, err := url.Parse(strings.TrimSpace(rawURL))
|
||||||
|
if err != nil || parsed.Scheme == "" || parsed.Host == "" {
|
||||||
|
return "<invalid-url>"
|
||||||
|
}
|
||||||
|
return parsed.Scheme + "://" + parsed.Host
|
||||||
|
}
|
||||||
257
internal/hub/heartbeat/heartbeat_test.go
Normal file
257
internal/hub/heartbeat/heartbeat_test.go
Normal file
@@ -0,0 +1,257 @@
|
|||||||
|
//go:build testing
|
||||||
|
|
||||||
|
package heartbeat_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/hub/heartbeat"
|
||||||
|
beszeltests "github.com/henrygd/beszel/internal/tests"
|
||||||
|
"github.com/pocketbase/pocketbase/core"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNew(t *testing.T) {
|
||||||
|
t.Run("returns nil when app is missing", func(t *testing.T) {
|
||||||
|
hb := heartbeat.New(nil, envGetter(map[string]string{
|
||||||
|
"HEARTBEAT_URL": "https://heartbeat.example.com/ping",
|
||||||
|
}))
|
||||||
|
assert.Nil(t, hb)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("returns nil when URL is missing", func(t *testing.T) {
|
||||||
|
app := newTestHub(t)
|
||||||
|
hb := heartbeat.New(app.App, func(string) (string, bool) {
|
||||||
|
return "", false
|
||||||
|
})
|
||||||
|
assert.Nil(t, hb)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("parses and normalizes config values", func(t *testing.T) {
|
||||||
|
app := newTestHub(t)
|
||||||
|
env := map[string]string{
|
||||||
|
"HEARTBEAT_URL": " https://heartbeat.example.com/ping ",
|
||||||
|
"HEARTBEAT_INTERVAL": "90",
|
||||||
|
"HEARTBEAT_METHOD": "head",
|
||||||
|
}
|
||||||
|
getEnv := func(key string) (string, bool) {
|
||||||
|
v, ok := env[key]
|
||||||
|
return v, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
hb := heartbeat.New(app.App, getEnv)
|
||||||
|
require.NotNil(t, hb)
|
||||||
|
cfg := hb.GetConfig()
|
||||||
|
assert.Equal(t, "https://heartbeat.example.com/ping", cfg.URL)
|
||||||
|
assert.Equal(t, 90, cfg.Interval)
|
||||||
|
assert.Equal(t, http.MethodHead, cfg.Method)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSendGETDoesNotRequireAppOrDB(t *testing.T) {
|
||||||
|
app := newTestHub(t)
|
||||||
|
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
assert.Equal(t, http.MethodGet, r.Method)
|
||||||
|
assert.Equal(t, "Beszel-Heartbeat", r.Header.Get("User-Agent"))
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
}))
|
||||||
|
defer server.Close()
|
||||||
|
|
||||||
|
hb := heartbeat.New(app.App, envGetter(map[string]string{
|
||||||
|
"HEARTBEAT_URL": server.URL,
|
||||||
|
"HEARTBEAT_METHOD": "GET",
|
||||||
|
}))
|
||||||
|
require.NotNil(t, hb)
|
||||||
|
|
||||||
|
require.NoError(t, hb.Send())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSendReturnsErrorOnHTTPFailureStatus(t *testing.T) {
|
||||||
|
app := newTestHub(t)
|
||||||
|
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.WriteHeader(http.StatusInternalServerError)
|
||||||
|
}))
|
||||||
|
defer server.Close()
|
||||||
|
|
||||||
|
hb := heartbeat.New(app.App, envGetter(map[string]string{
|
||||||
|
"HEARTBEAT_URL": server.URL,
|
||||||
|
"HEARTBEAT_METHOD": "GET",
|
||||||
|
}))
|
||||||
|
require.NotNil(t, hb)
|
||||||
|
|
||||||
|
err := hb.Send()
|
||||||
|
require.Error(t, err)
|
||||||
|
assert.ErrorContains(t, err, "heartbeat endpoint returned status 500")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSendPOSTBuildsExpectedStatuses(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
setup func(t *testing.T, app *beszeltests.TestHub, user *core.Record)
|
||||||
|
expectStatus string
|
||||||
|
expectMsgPart string
|
||||||
|
expectDown int
|
||||||
|
expectAlerts int
|
||||||
|
expectTotal int
|
||||||
|
expectUp int
|
||||||
|
expectPaused int
|
||||||
|
expectPending int
|
||||||
|
expectDownSumm int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "error when at least one system is down",
|
||||||
|
setup: func(t *testing.T, app *beszeltests.TestHub, user *core.Record) {
|
||||||
|
downSystem := createTestSystem(t, app, user.Id, "db-1", "10.0.0.1", "down")
|
||||||
|
_ = createTestSystem(t, app, user.Id, "web-1", "10.0.0.2", "up")
|
||||||
|
createTriggeredAlert(t, app, user.Id, downSystem.Id, "CPU", 95)
|
||||||
|
},
|
||||||
|
expectStatus: "error",
|
||||||
|
expectMsgPart: "1 system(s) down",
|
||||||
|
expectDown: 1,
|
||||||
|
expectAlerts: 1,
|
||||||
|
expectTotal: 2,
|
||||||
|
expectUp: 1,
|
||||||
|
expectDownSumm: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "warn when only alerts are triggered",
|
||||||
|
setup: func(t *testing.T, app *beszeltests.TestHub, user *core.Record) {
|
||||||
|
system := createTestSystem(t, app, user.Id, "api-1", "10.1.0.1", "up")
|
||||||
|
createTriggeredAlert(t, app, user.Id, system.Id, "CPU", 90)
|
||||||
|
},
|
||||||
|
expectStatus: "warn",
|
||||||
|
expectMsgPart: "1 alert(s) triggered",
|
||||||
|
expectDown: 0,
|
||||||
|
expectAlerts: 1,
|
||||||
|
expectTotal: 1,
|
||||||
|
expectUp: 1,
|
||||||
|
expectDownSumm: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ok when no down systems and no alerts",
|
||||||
|
setup: func(t *testing.T, app *beszeltests.TestHub, user *core.Record) {
|
||||||
|
_ = createTestSystem(t, app, user.Id, "node-1", "10.2.0.1", "up")
|
||||||
|
_ = createTestSystem(t, app, user.Id, "node-2", "10.2.0.2", "paused")
|
||||||
|
_ = createTestSystem(t, app, user.Id, "node-3", "10.2.0.3", "pending")
|
||||||
|
},
|
||||||
|
expectStatus: "ok",
|
||||||
|
expectMsgPart: "All systems operational",
|
||||||
|
expectDown: 0,
|
||||||
|
expectAlerts: 0,
|
||||||
|
expectTotal: 3,
|
||||||
|
expectUp: 1,
|
||||||
|
expectPaused: 1,
|
||||||
|
expectPending: 1,
|
||||||
|
expectDownSumm: 0,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
app := newTestHub(t)
|
||||||
|
user := createTestUser(t, app)
|
||||||
|
tt.setup(t, app, user)
|
||||||
|
|
||||||
|
type requestCapture struct {
|
||||||
|
method string
|
||||||
|
userAgent string
|
||||||
|
contentType string
|
||||||
|
payload heartbeat.Payload
|
||||||
|
}
|
||||||
|
|
||||||
|
captured := make(chan requestCapture, 1)
|
||||||
|
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
defer r.Body.Close()
|
||||||
|
body, err := io.ReadAll(r.Body)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
var payload heartbeat.Payload
|
||||||
|
require.NoError(t, json.Unmarshal(body, &payload))
|
||||||
|
captured <- requestCapture{
|
||||||
|
method: r.Method,
|
||||||
|
userAgent: r.Header.Get("User-Agent"),
|
||||||
|
contentType: r.Header.Get("Content-Type"),
|
||||||
|
payload: payload,
|
||||||
|
}
|
||||||
|
w.WriteHeader(http.StatusNoContent)
|
||||||
|
}))
|
||||||
|
defer server.Close()
|
||||||
|
|
||||||
|
hb := heartbeat.New(app.App, envGetter(map[string]string{
|
||||||
|
"HEARTBEAT_URL": server.URL,
|
||||||
|
"HEARTBEAT_METHOD": "POST",
|
||||||
|
}))
|
||||||
|
require.NotNil(t, hb)
|
||||||
|
require.NoError(t, hb.Send())
|
||||||
|
|
||||||
|
req := <-captured
|
||||||
|
assert.Equal(t, http.MethodPost, req.method)
|
||||||
|
assert.Equal(t, "Beszel-Heartbeat", req.userAgent)
|
||||||
|
assert.Equal(t, "application/json", req.contentType)
|
||||||
|
|
||||||
|
assert.Equal(t, tt.expectStatus, req.payload.Status)
|
||||||
|
assert.Contains(t, req.payload.Msg, tt.expectMsgPart)
|
||||||
|
assert.Equal(t, tt.expectDown, len(req.payload.Down))
|
||||||
|
assert.Equal(t, tt.expectAlerts, len(req.payload.Alerts))
|
||||||
|
assert.Equal(t, tt.expectTotal, req.payload.Systems.Total)
|
||||||
|
assert.Equal(t, tt.expectUp, req.payload.Systems.Up)
|
||||||
|
assert.Equal(t, tt.expectDownSumm, req.payload.Systems.Down)
|
||||||
|
assert.Equal(t, tt.expectPaused, req.payload.Systems.Paused)
|
||||||
|
assert.Equal(t, tt.expectPending, req.payload.Systems.Pending)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTestHub(t *testing.T) *beszeltests.TestHub {
|
||||||
|
t.Helper()
|
||||||
|
app, err := beszeltests.NewTestHub(t.TempDir())
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Cleanup(app.Cleanup)
|
||||||
|
return app
|
||||||
|
}
|
||||||
|
|
||||||
|
func createTestUser(t *testing.T, app *beszeltests.TestHub) *core.Record {
|
||||||
|
t.Helper()
|
||||||
|
user, err := beszeltests.CreateUser(app.App, "admin@example.com", "password123")
|
||||||
|
require.NoError(t, err)
|
||||||
|
return user
|
||||||
|
}
|
||||||
|
|
||||||
|
func createTestSystem(t *testing.T, app *beszeltests.TestHub, userID, name, host, status string) *core.Record {
|
||||||
|
t.Helper()
|
||||||
|
system, err := beszeltests.CreateRecord(app.App, "systems", map[string]any{
|
||||||
|
"name": name,
|
||||||
|
"host": host,
|
||||||
|
"port": "45876",
|
||||||
|
"users": []string{userID},
|
||||||
|
"status": status,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
return system
|
||||||
|
}
|
||||||
|
|
||||||
|
func createTriggeredAlert(t *testing.T, app *beszeltests.TestHub, userID, systemID, name string, threshold float64) *core.Record {
|
||||||
|
t.Helper()
|
||||||
|
alert, err := beszeltests.CreateRecord(app.App, "alerts", map[string]any{
|
||||||
|
"name": name,
|
||||||
|
"system": systemID,
|
||||||
|
"user": userID,
|
||||||
|
"value": threshold,
|
||||||
|
"min": 0,
|
||||||
|
"triggered": true,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
return alert
|
||||||
|
}
|
||||||
|
|
||||||
|
func envGetter(values map[string]string) func(string) (string, bool) {
|
||||||
|
return func(key string) (string, bool) {
|
||||||
|
v, ok := values[key]
|
||||||
|
return v, ok
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -4,35 +4,35 @@ package hub
|
|||||||
import (
|
import (
|
||||||
"crypto/ed25519"
|
"crypto/ed25519"
|
||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/henrygd/beszel"
|
|
||||||
"github.com/henrygd/beszel/internal/alerts"
|
"github.com/henrygd/beszel/internal/alerts"
|
||||||
"github.com/henrygd/beszel/internal/hub/config"
|
"github.com/henrygd/beszel/internal/hub/config"
|
||||||
|
"github.com/henrygd/beszel/internal/hub/heartbeat"
|
||||||
"github.com/henrygd/beszel/internal/hub/systems"
|
"github.com/henrygd/beszel/internal/hub/systems"
|
||||||
|
"github.com/henrygd/beszel/internal/hub/utils"
|
||||||
"github.com/henrygd/beszel/internal/records"
|
"github.com/henrygd/beszel/internal/records"
|
||||||
"github.com/henrygd/beszel/internal/users"
|
"github.com/henrygd/beszel/internal/users"
|
||||||
|
|
||||||
"github.com/google/uuid"
|
|
||||||
"github.com/pocketbase/dbx"
|
|
||||||
"github.com/pocketbase/pocketbase"
|
"github.com/pocketbase/pocketbase"
|
||||||
"github.com/pocketbase/pocketbase/apis"
|
|
||||||
"github.com/pocketbase/pocketbase/core"
|
"github.com/pocketbase/pocketbase/core"
|
||||||
"golang.org/x/crypto/ssh"
|
"golang.org/x/crypto/ssh"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Hub is the application. It embeds the PocketBase app and keeps references to subcomponents.
|
||||||
type Hub struct {
|
type Hub struct {
|
||||||
core.App
|
core.App
|
||||||
*alerts.AlertManager
|
*alerts.AlertManager
|
||||||
um *users.UserManager
|
um *users.UserManager
|
||||||
rm *records.RecordManager
|
rm *records.RecordManager
|
||||||
sm *systems.SystemManager
|
sm *systems.SystemManager
|
||||||
|
hb *heartbeat.Heartbeat
|
||||||
|
hbStop chan struct{}
|
||||||
pubKey string
|
pubKey string
|
||||||
signer ssh.Signer
|
signer ssh.Signer
|
||||||
appURL string
|
appURL string
|
||||||
@@ -40,32 +40,41 @@ type Hub struct {
|
|||||||
|
|
||||||
// NewHub creates a new Hub instance with default configuration
|
// NewHub creates a new Hub instance with default configuration
|
||||||
func NewHub(app core.App) *Hub {
|
func NewHub(app core.App) *Hub {
|
||||||
hub := &Hub{}
|
hub := &Hub{App: app}
|
||||||
hub.App = app
|
|
||||||
|
|
||||||
hub.AlertManager = alerts.NewAlertManager(hub)
|
hub.AlertManager = alerts.NewAlertManager(hub)
|
||||||
hub.um = users.NewUserManager(hub)
|
hub.um = users.NewUserManager(hub)
|
||||||
hub.rm = records.NewRecordManager(hub)
|
hub.rm = records.NewRecordManager(hub)
|
||||||
hub.sm = systems.NewSystemManager(hub)
|
hub.sm = systems.NewSystemManager(hub)
|
||||||
hub.appURL, _ = GetEnv("APP_URL")
|
hub.hb = heartbeat.New(app, utils.GetEnv)
|
||||||
|
if hub.hb != nil {
|
||||||
|
hub.hbStop = make(chan struct{})
|
||||||
|
}
|
||||||
|
_ = onAfterBootstrapAndMigrations(app, hub.initialize)
|
||||||
return hub
|
return hub
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetEnv retrieves an environment variable with a "BESZEL_HUB_" prefix, or falls back to the unprefixed key.
|
// onAfterBootstrapAndMigrations ensures the provided function runs after the database is set up and migrations are applied.
|
||||||
func GetEnv(key string) (value string, exists bool) {
|
// This is a workaround for behavior in PocketBase where onBootstrap runs before migrations, forcing use of onServe for this purpose.
|
||||||
if value, exists = os.LookupEnv("BESZEL_HUB_" + key); exists {
|
// However, PB's tests.TestApp is already bootstrapped, generally doesn't serve, but does handle migrations.
|
||||||
return value, exists
|
// So this ensures that the provided function runs at the right time either way, after DB is ready and migrations are done.
|
||||||
|
func onAfterBootstrapAndMigrations(app core.App, fn func(app core.App) error) error {
|
||||||
|
// pb tests.TestApp is already bootstrapped and doesn't serve
|
||||||
|
if app.IsBootstrapped() {
|
||||||
|
return fn(app)
|
||||||
}
|
}
|
||||||
// Fallback to the old unprefixed key
|
// Must use OnServe because OnBootstrap appears to run before migrations, even if calling e.Next() before anything else
|
||||||
return os.LookupEnv(key)
|
app.OnServe().BindFunc(func(e *core.ServeEvent) error {
|
||||||
}
|
if err := fn(e.App); err != nil {
|
||||||
|
|
||||||
func (h *Hub) StartHub() error {
|
|
||||||
h.App.OnServe().BindFunc(func(e *core.ServeEvent) error {
|
|
||||||
// initialize settings / collections
|
|
||||||
if err := h.initialize(e); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
return e.Next()
|
||||||
|
})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartHub sets up event handlers and starts the PocketBase server
|
||||||
|
func (h *Hub) StartHub() error {
|
||||||
|
h.App.OnServe().BindFunc(func(e *core.ServeEvent) error {
|
||||||
// sync systems with config
|
// sync systems with config
|
||||||
if err := config.SyncSystems(e); err != nil {
|
if err := config.SyncSystems(e); err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -88,6 +97,10 @@ func (h *Hub) StartHub() error {
|
|||||||
if err := h.sm.Initialize(); err != nil {
|
if err := h.sm.Initialize(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
// start heartbeat if configured
|
||||||
|
if h.hb != nil {
|
||||||
|
go h.hb.Start(h.hbStop)
|
||||||
|
}
|
||||||
return e.Next()
|
return e.Next()
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -96,132 +109,29 @@ func (h *Hub) StartHub() error {
|
|||||||
h.App.OnRecordCreate("users").BindFunc(h.um.InitializeUserRole)
|
h.App.OnRecordCreate("users").BindFunc(h.um.InitializeUserRole)
|
||||||
h.App.OnRecordCreate("user_settings").BindFunc(h.um.InitializeUserSettings)
|
h.App.OnRecordCreate("user_settings").BindFunc(h.um.InitializeUserSettings)
|
||||||
|
|
||||||
if pb, ok := h.App.(*pocketbase.PocketBase); ok {
|
pb, ok := h.App.(*pocketbase.PocketBase)
|
||||||
// log.Println("Starting pocketbase")
|
if !ok {
|
||||||
err := pb.Start()
|
return errors.New("not a pocketbase app")
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
}
|
return pb.Start()
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// initialize sets up initial configuration (collections, settings, etc.)
|
// initialize sets up initial configuration (collections, settings, etc.)
|
||||||
func (h *Hub) initialize(e *core.ServeEvent) error {
|
func (h *Hub) initialize(app core.App) error {
|
||||||
// set general settings
|
// set general settings
|
||||||
settings := e.App.Settings()
|
settings := app.Settings()
|
||||||
// batch requests (for global alerts)
|
// batch requests (for alerts)
|
||||||
settings.Batch.Enabled = true
|
settings.Batch.Enabled = true
|
||||||
// set URL if BASE_URL env is set
|
// set URL if APP_URL env is set
|
||||||
if h.appURL != "" {
|
if appURL, isSet := utils.GetEnv("APP_URL"); isSet {
|
||||||
settings.Meta.AppURL = h.appURL
|
h.appURL = appURL
|
||||||
|
settings.Meta.AppURL = appURL
|
||||||
}
|
}
|
||||||
if err := e.App.Save(settings); err != nil {
|
if err := app.Save(settings); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// set auth settings
|
// set auth settings
|
||||||
if err := setCollectionAuthSettings(e.App); err != nil {
|
return setCollectionAuthSettings(app)
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// setCollectionAuthSettings sets up default authentication settings for the app
|
|
||||||
func setCollectionAuthSettings(app core.App) error {
|
|
||||||
usersCollection, err := app.FindCollectionByNameOrId("users")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
superusersCollection, err := app.FindCollectionByNameOrId(core.CollectionNameSuperusers)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// disable email auth if DISABLE_PASSWORD_AUTH env var is set
|
|
||||||
disablePasswordAuth, _ := GetEnv("DISABLE_PASSWORD_AUTH")
|
|
||||||
usersCollection.PasswordAuth.Enabled = disablePasswordAuth != "true"
|
|
||||||
usersCollection.PasswordAuth.IdentityFields = []string{"email"}
|
|
||||||
// allow oauth user creation if USER_CREATION is set
|
|
||||||
if userCreation, _ := GetEnv("USER_CREATION"); userCreation == "true" {
|
|
||||||
cr := "@request.context = 'oauth2'"
|
|
||||||
usersCollection.CreateRule = &cr
|
|
||||||
} else {
|
|
||||||
usersCollection.CreateRule = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// enable mfaOtp mfa if MFA_OTP env var is set
|
|
||||||
mfaOtp, _ := GetEnv("MFA_OTP")
|
|
||||||
usersCollection.OTP.Length = 6
|
|
||||||
superusersCollection.OTP.Length = 6
|
|
||||||
usersCollection.OTP.Enabled = mfaOtp == "true"
|
|
||||||
usersCollection.MFA.Enabled = mfaOtp == "true"
|
|
||||||
superusersCollection.OTP.Enabled = mfaOtp == "true" || mfaOtp == "superusers"
|
|
||||||
superusersCollection.MFA.Enabled = mfaOtp == "true" || mfaOtp == "superusers"
|
|
||||||
if err := app.Save(superusersCollection); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := app.Save(usersCollection); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
shareAllSystems, _ := GetEnv("SHARE_ALL_SYSTEMS")
|
|
||||||
|
|
||||||
// allow all users to access systems if SHARE_ALL_SYSTEMS is set
|
|
||||||
systemsCollection, err := app.FindCollectionByNameOrId("systems")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
var systemsReadRule string
|
|
||||||
if shareAllSystems == "true" {
|
|
||||||
systemsReadRule = "@request.auth.id != \"\""
|
|
||||||
} else {
|
|
||||||
systemsReadRule = "@request.auth.id != \"\" && users.id ?= @request.auth.id"
|
|
||||||
}
|
|
||||||
updateDeleteRule := systemsReadRule + " && @request.auth.role != \"readonly\""
|
|
||||||
systemsCollection.ListRule = &systemsReadRule
|
|
||||||
systemsCollection.ViewRule = &systemsReadRule
|
|
||||||
systemsCollection.UpdateRule = &updateDeleteRule
|
|
||||||
systemsCollection.DeleteRule = &updateDeleteRule
|
|
||||||
if err := app.Save(systemsCollection); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// allow all users to access all containers if SHARE_ALL_SYSTEMS is set
|
|
||||||
containersCollection, err := app.FindCollectionByNameOrId("containers")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
containersListRule := strings.Replace(systemsReadRule, "users.id", "system.users.id", 1)
|
|
||||||
containersCollection.ListRule = &containersListRule
|
|
||||||
if err := app.Save(containersCollection); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// allow all users to access system-related collections if SHARE_ALL_SYSTEMS is set
|
|
||||||
// these collections all have a "system" relation field
|
|
||||||
systemRelatedCollections := []string{"system_details", "smart_devices", "systemd_services"}
|
|
||||||
for _, collectionName := range systemRelatedCollections {
|
|
||||||
collection, err := app.FindCollectionByNameOrId(collectionName)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
collection.ListRule = &containersListRule
|
|
||||||
// set viewRule for collections that need it (system_details, smart_devices)
|
|
||||||
if collection.ViewRule != nil {
|
|
||||||
collection.ViewRule = &containersListRule
|
|
||||||
}
|
|
||||||
// set deleteRule for smart_devices (allows user to dismiss disk warnings)
|
|
||||||
if collectionName == "smart_devices" {
|
|
||||||
deleteRule := containersListRule + " && @request.auth.role != \"readonly\""
|
|
||||||
collection.DeleteRule = &deleteRule
|
|
||||||
}
|
|
||||||
if err := app.Save(collection); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// registerCronJobs sets up scheduled tasks
|
// registerCronJobs sets up scheduled tasks
|
||||||
@@ -233,254 +143,7 @@ func (h *Hub) registerCronJobs(_ *core.ServeEvent) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// custom middlewares
|
// GetSSHKey generates key pair if it doesn't exist and returns signer
|
||||||
func (h *Hub) registerMiddlewares(se *core.ServeEvent) {
|
|
||||||
// authorizes request with user matching the provided email
|
|
||||||
authorizeRequestWithEmail := func(e *core.RequestEvent, email string) (err error) {
|
|
||||||
if e.Auth != nil || email == "" {
|
|
||||||
return e.Next()
|
|
||||||
}
|
|
||||||
isAuthRefresh := e.Request.URL.Path == "/api/collections/users/auth-refresh" && e.Request.Method == http.MethodPost
|
|
||||||
e.Auth, err = e.App.FindFirstRecordByData("users", "email", email)
|
|
||||||
if err != nil || !isAuthRefresh {
|
|
||||||
return e.Next()
|
|
||||||
}
|
|
||||||
// auth refresh endpoint, make sure token is set in header
|
|
||||||
token, _ := e.Auth.NewAuthToken()
|
|
||||||
e.Request.Header.Set("Authorization", token)
|
|
||||||
return e.Next()
|
|
||||||
}
|
|
||||||
// authenticate with trusted header
|
|
||||||
if autoLogin, _ := GetEnv("AUTO_LOGIN"); autoLogin != "" {
|
|
||||||
se.Router.BindFunc(func(e *core.RequestEvent) error {
|
|
||||||
return authorizeRequestWithEmail(e, autoLogin)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
// authenticate with trusted header
|
|
||||||
if trustedHeader, _ := GetEnv("TRUSTED_AUTH_HEADER"); trustedHeader != "" {
|
|
||||||
se.Router.BindFunc(func(e *core.RequestEvent) error {
|
|
||||||
return authorizeRequestWithEmail(e, e.Request.Header.Get(trustedHeader))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// custom api routes
|
|
||||||
func (h *Hub) registerApiRoutes(se *core.ServeEvent) error {
|
|
||||||
// auth protected routes
|
|
||||||
apiAuth := se.Router.Group("/api/beszel")
|
|
||||||
apiAuth.Bind(apis.RequireAuth())
|
|
||||||
// auth optional routes
|
|
||||||
apiNoAuth := se.Router.Group("/api/beszel")
|
|
||||||
|
|
||||||
// create first user endpoint only needed if no users exist
|
|
||||||
if totalUsers, _ := se.App.CountRecords("users"); totalUsers == 0 {
|
|
||||||
apiNoAuth.POST("/create-user", h.um.CreateFirstUser)
|
|
||||||
}
|
|
||||||
// check if first time setup on login page
|
|
||||||
apiNoAuth.GET("/first-run", func(e *core.RequestEvent) error {
|
|
||||||
total, err := e.App.CountRecords("users")
|
|
||||||
return e.JSON(http.StatusOK, map[string]bool{"firstRun": err == nil && total == 0})
|
|
||||||
})
|
|
||||||
// get public key and version
|
|
||||||
apiAuth.GET("/getkey", func(e *core.RequestEvent) error {
|
|
||||||
return e.JSON(http.StatusOK, map[string]string{"key": h.pubKey, "v": beszel.Version})
|
|
||||||
})
|
|
||||||
// send test notification
|
|
||||||
apiAuth.POST("/test-notification", h.SendTestNotification)
|
|
||||||
// get config.yml content
|
|
||||||
apiAuth.GET("/config-yaml", config.GetYamlConfig)
|
|
||||||
// handle agent websocket connection
|
|
||||||
apiNoAuth.GET("/agent-connect", h.handleAgentConnect)
|
|
||||||
// get or create universal tokens
|
|
||||||
apiAuth.GET("/universal-token", h.getUniversalToken)
|
|
||||||
// update / delete user alerts
|
|
||||||
apiAuth.POST("/user-alerts", alerts.UpsertUserAlerts)
|
|
||||||
apiAuth.DELETE("/user-alerts", alerts.DeleteUserAlerts)
|
|
||||||
// refresh SMART devices for a system
|
|
||||||
apiAuth.POST("/smart/refresh", h.refreshSmartData)
|
|
||||||
// get systemd service details
|
|
||||||
apiAuth.GET("/systemd/info", h.getSystemdInfo)
|
|
||||||
// /containers routes
|
|
||||||
if enabled, _ := GetEnv("CONTAINER_DETAILS"); enabled != "false" {
|
|
||||||
// get container logs
|
|
||||||
apiAuth.GET("/containers/logs", h.getContainerLogs)
|
|
||||||
// get container info
|
|
||||||
apiAuth.GET("/containers/info", h.getContainerInfo)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handler for universal token API endpoint (create, read, delete)
|
|
||||||
func (h *Hub) getUniversalToken(e *core.RequestEvent) error {
|
|
||||||
tokenMap := universalTokenMap.GetMap()
|
|
||||||
userID := e.Auth.Id
|
|
||||||
query := e.Request.URL.Query()
|
|
||||||
token := query.Get("token")
|
|
||||||
enable := query.Get("enable")
|
|
||||||
permanent := query.Get("permanent")
|
|
||||||
|
|
||||||
// helper for deleting any existing permanent token record for this user
|
|
||||||
deletePermanent := func() error {
|
|
||||||
rec, err := h.FindFirstRecordByFilter("universal_tokens", "user = {:user}", dbx.Params{"user": userID})
|
|
||||||
if err != nil {
|
|
||||||
return nil // no record
|
|
||||||
}
|
|
||||||
return h.Delete(rec)
|
|
||||||
}
|
|
||||||
|
|
||||||
// helper for upserting a permanent token record for this user
|
|
||||||
upsertPermanent := func(token string) error {
|
|
||||||
rec, err := h.FindFirstRecordByFilter("universal_tokens", "user = {:user}", dbx.Params{"user": userID})
|
|
||||||
if err == nil {
|
|
||||||
rec.Set("token", token)
|
|
||||||
return h.Save(rec)
|
|
||||||
}
|
|
||||||
|
|
||||||
col, err := h.FindCachedCollectionByNameOrId("universal_tokens")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
newRec := core.NewRecord(col)
|
|
||||||
newRec.Set("user", userID)
|
|
||||||
newRec.Set("token", token)
|
|
||||||
return h.Save(newRec)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Disable universal tokens (both ephemeral and permanent)
|
|
||||||
if enable == "0" {
|
|
||||||
tokenMap.RemovebyValue(userID)
|
|
||||||
_ = deletePermanent()
|
|
||||||
return e.JSON(http.StatusOK, map[string]any{"token": token, "active": false, "permanent": false})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Enable universal token (ephemeral or permanent)
|
|
||||||
if enable == "1" {
|
|
||||||
if token == "" {
|
|
||||||
token = uuid.New().String()
|
|
||||||
}
|
|
||||||
|
|
||||||
if permanent == "1" {
|
|
||||||
// make token permanent (persist across restarts)
|
|
||||||
tokenMap.RemovebyValue(userID)
|
|
||||||
if err := upsertPermanent(token); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return e.JSON(http.StatusOK, map[string]any{"token": token, "active": true, "permanent": true})
|
|
||||||
}
|
|
||||||
|
|
||||||
// default: ephemeral mode (1 hour)
|
|
||||||
_ = deletePermanent()
|
|
||||||
tokenMap.Set(token, userID, time.Hour)
|
|
||||||
return e.JSON(http.StatusOK, map[string]any{"token": token, "active": true, "permanent": false})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read current state
|
|
||||||
// Prefer permanent token if it exists.
|
|
||||||
if rec, err := h.FindFirstRecordByFilter("universal_tokens", "user = {:user}", dbx.Params{"user": userID}); err == nil {
|
|
||||||
dbToken := rec.GetString("token")
|
|
||||||
// If no token was provided, or the caller is asking about their permanent token, return it.
|
|
||||||
if token == "" || token == dbToken {
|
|
||||||
return e.JSON(http.StatusOK, map[string]any{"token": dbToken, "active": true, "permanent": true})
|
|
||||||
}
|
|
||||||
// Token doesn't match their permanent token (avoid leaking other info)
|
|
||||||
return e.JSON(http.StatusOK, map[string]any{"token": token, "active": false, "permanent": false})
|
|
||||||
}
|
|
||||||
|
|
||||||
// No permanent token; fall back to ephemeral token map.
|
|
||||||
if token == "" {
|
|
||||||
// return existing token if it exists
|
|
||||||
if token, _, ok := tokenMap.GetByValue(userID); ok {
|
|
||||||
return e.JSON(http.StatusOK, map[string]any{"token": token, "active": true, "permanent": false})
|
|
||||||
}
|
|
||||||
// if no token is provided, generate a new one
|
|
||||||
token = uuid.New().String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Token is considered active only if it belongs to the current user.
|
|
||||||
activeUser, ok := tokenMap.GetOk(token)
|
|
||||||
active := ok && activeUser == userID
|
|
||||||
response := map[string]any{"token": token, "active": active, "permanent": false}
|
|
||||||
return e.JSON(http.StatusOK, response)
|
|
||||||
}
|
|
||||||
|
|
||||||
// containerRequestHandler handles both container logs and info requests
|
|
||||||
func (h *Hub) containerRequestHandler(e *core.RequestEvent, fetchFunc func(*systems.System, string) (string, error), responseKey string) error {
|
|
||||||
systemID := e.Request.URL.Query().Get("system")
|
|
||||||
containerID := e.Request.URL.Query().Get("container")
|
|
||||||
|
|
||||||
if systemID == "" || containerID == "" {
|
|
||||||
return e.JSON(http.StatusBadRequest, map[string]string{"error": "system and container parameters are required"})
|
|
||||||
}
|
|
||||||
|
|
||||||
system, err := h.sm.GetSystem(systemID)
|
|
||||||
if err != nil {
|
|
||||||
return e.JSON(http.StatusNotFound, map[string]string{"error": "system not found"})
|
|
||||||
}
|
|
||||||
|
|
||||||
data, err := fetchFunc(system, containerID)
|
|
||||||
if err != nil {
|
|
||||||
return e.JSON(http.StatusNotFound, map[string]string{"error": err.Error()})
|
|
||||||
}
|
|
||||||
|
|
||||||
return e.JSON(http.StatusOK, map[string]string{responseKey: data})
|
|
||||||
}
|
|
||||||
|
|
||||||
// getContainerLogs handles GET /api/beszel/containers/logs requests
|
|
||||||
func (h *Hub) getContainerLogs(e *core.RequestEvent) error {
|
|
||||||
return h.containerRequestHandler(e, func(system *systems.System, containerID string) (string, error) {
|
|
||||||
return system.FetchContainerLogsFromAgent(containerID)
|
|
||||||
}, "logs")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Hub) getContainerInfo(e *core.RequestEvent) error {
|
|
||||||
return h.containerRequestHandler(e, func(system *systems.System, containerID string) (string, error) {
|
|
||||||
return system.FetchContainerInfoFromAgent(containerID)
|
|
||||||
}, "info")
|
|
||||||
}
|
|
||||||
|
|
||||||
// getSystemdInfo handles GET /api/beszel/systemd/info requests
|
|
||||||
func (h *Hub) getSystemdInfo(e *core.RequestEvent) error {
|
|
||||||
query := e.Request.URL.Query()
|
|
||||||
systemID := query.Get("system")
|
|
||||||
serviceName := query.Get("service")
|
|
||||||
|
|
||||||
if systemID == "" || serviceName == "" {
|
|
||||||
return e.JSON(http.StatusBadRequest, map[string]string{"error": "system and service parameters are required"})
|
|
||||||
}
|
|
||||||
system, err := h.sm.GetSystem(systemID)
|
|
||||||
if err != nil {
|
|
||||||
return e.JSON(http.StatusNotFound, map[string]string{"error": "system not found"})
|
|
||||||
}
|
|
||||||
details, err := system.FetchSystemdInfoFromAgent(serviceName)
|
|
||||||
if err != nil {
|
|
||||||
return e.JSON(http.StatusNotFound, map[string]string{"error": err.Error()})
|
|
||||||
}
|
|
||||||
e.Response.Header().Set("Cache-Control", "public, max-age=60")
|
|
||||||
return e.JSON(http.StatusOK, map[string]any{"details": details})
|
|
||||||
}
|
|
||||||
|
|
||||||
// refreshSmartData handles POST /api/beszel/smart/refresh requests
|
|
||||||
// Fetches fresh SMART data from the agent and updates the collection
|
|
||||||
func (h *Hub) refreshSmartData(e *core.RequestEvent) error {
|
|
||||||
systemID := e.Request.URL.Query().Get("system")
|
|
||||||
if systemID == "" {
|
|
||||||
return e.JSON(http.StatusBadRequest, map[string]string{"error": "system parameter is required"})
|
|
||||||
}
|
|
||||||
|
|
||||||
system, err := h.sm.GetSystem(systemID)
|
|
||||||
if err != nil {
|
|
||||||
return e.JSON(http.StatusNotFound, map[string]string{"error": "system not found"})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fetch and save SMART devices
|
|
||||||
if err := system.FetchAndSaveSmartDevices(); err != nil {
|
|
||||||
return e.JSON(http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
|
||||||
}
|
|
||||||
|
|
||||||
return e.JSON(http.StatusOK, map[string]string{"status": "ok"})
|
|
||||||
}
|
|
||||||
|
|
||||||
// generates key pair if it doesn't exist and returns signer
|
|
||||||
func (h *Hub) GetSSHKey(dataDir string) (ssh.Signer, error) {
|
func (h *Hub) GetSSHKey(dataDir string) (ssh.Signer, error) {
|
||||||
if h.signer != nil {
|
if h.signer != nil {
|
||||||
return h.signer, nil
|
return h.signer, nil
|
||||||
|
|||||||
@@ -1,39 +1,22 @@
|
|||||||
//go:build testing
|
//go:build testing
|
||||||
// +build testing
|
|
||||||
|
|
||||||
package hub_test
|
package hub_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"crypto/ed25519"
|
"crypto/ed25519"
|
||||||
"encoding/json"
|
|
||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/henrygd/beszel/internal/migrations"
|
|
||||||
beszelTests "github.com/henrygd/beszel/internal/tests"
|
beszelTests "github.com/henrygd/beszel/internal/tests"
|
||||||
|
|
||||||
"github.com/pocketbase/pocketbase/core"
|
|
||||||
pbTests "github.com/pocketbase/pocketbase/tests"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"golang.org/x/crypto/ssh"
|
"golang.org/x/crypto/ssh"
|
||||||
)
|
)
|
||||||
|
|
||||||
// marshal to json and return an io.Reader (for use in ApiScenario.Body)
|
|
||||||
func jsonReader(v any) io.Reader {
|
|
||||||
data, err := json.Marshal(v)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return bytes.NewReader(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMakeLink(t *testing.T) {
|
func TestMakeLink(t *testing.T) {
|
||||||
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
|
||||||
@@ -266,614 +249,20 @@ func TestGetSSHKey(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestApiRoutesAuthentication(t *testing.T) {
|
func TestAppUrl(t *testing.T) {
|
||||||
|
t.Run("no APP_URL does't change app url", func(t *testing.T) {
|
||||||
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
defer hub.Cleanup()
|
defer hub.Cleanup()
|
||||||
|
|
||||||
hub.StartHub()
|
settings := hub.Settings()
|
||||||
|
assert.Equal(t, "http://localhost:8090", settings.Meta.AppURL)
|
||||||
// Create test user and get auth token
|
|
||||||
user, err := beszelTests.CreateUser(hub, "testuser@example.com", "password123")
|
|
||||||
require.NoError(t, err, "Failed to create test user")
|
|
||||||
|
|
||||||
adminUser, err := beszelTests.CreateRecord(hub, "users", map[string]any{
|
|
||||||
"email": "admin@example.com",
|
|
||||||
"password": "password123",
|
|
||||||
"role": "admin",
|
|
||||||
})
|
})
|
||||||
require.NoError(t, err, "Failed to create admin user")
|
t.Run("APP_URL changes app url", func(t *testing.T) {
|
||||||
adminUserToken, err := adminUser.NewAuthToken()
|
t.Setenv("APP_URL", "http://example.com/app")
|
||||||
|
|
||||||
// superUser, err := beszelTests.CreateRecord(hub, core.CollectionNameSuperusers, map[string]any{
|
|
||||||
// "email": "superuser@example.com",
|
|
||||||
// "password": "password123",
|
|
||||||
// })
|
|
||||||
// require.NoError(t, err, "Failed to create superuser")
|
|
||||||
|
|
||||||
userToken, err := user.NewAuthToken()
|
|
||||||
require.NoError(t, err, "Failed to create auth token")
|
|
||||||
|
|
||||||
// Create test system for user-alerts endpoints
|
|
||||||
system, err := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
|
||||||
"name": "test-system",
|
|
||||||
"users": []string{user.Id},
|
|
||||||
"host": "127.0.0.1",
|
|
||||||
})
|
|
||||||
require.NoError(t, err, "Failed to create test system")
|
|
||||||
|
|
||||||
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
|
||||||
return hub.TestApp
|
|
||||||
}
|
|
||||||
|
|
||||||
scenarios := []beszelTests.ApiScenario{
|
|
||||||
// Auth Protected Routes - Should require authentication
|
|
||||||
{
|
|
||||||
Name: "POST /test-notification - no auth should fail",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/test-notification",
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"url": "generic://127.0.0.1",
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "POST /test-notification - with auth should succeed",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/test-notification",
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"url": "generic://127.0.0.1",
|
|
||||||
}),
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"sending message"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /config-yaml - no auth should fail",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/config-yaml",
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /config-yaml - with user auth should fail",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/config-yaml",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 403,
|
|
||||||
ExpectedContent: []string{"Requires admin"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /config-yaml - with admin auth should succeed",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/config-yaml",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": adminUserToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"test-system"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /universal-token - no auth should fail",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/universal-token",
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /universal-token - with auth should succeed",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/universal-token",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"active", "token", "permanent"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /universal-token - enable permanent should succeed",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/universal-token?enable=1&permanent=1&token=permanent-token-123",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"\"permanent\":true", "permanent-token-123"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "POST /user-alerts - no auth should fail",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/user-alerts",
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"value": 80,
|
|
||||||
"min": 10,
|
|
||||||
"systems": []string{system.Id},
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "POST /user-alerts - with auth should succeed",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/user-alerts",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"\"success\":true"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"value": 80,
|
|
||||||
"min": 10,
|
|
||||||
"systems": []string{system.Id},
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "DELETE /user-alerts - no auth should fail",
|
|
||||||
Method: http.MethodDelete,
|
|
||||||
URL: "/api/beszel/user-alerts",
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"systems": []string{system.Id},
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "DELETE /user-alerts - with auth should succeed",
|
|
||||||
Method: http.MethodDelete,
|
|
||||||
URL: "/api/beszel/user-alerts",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"\"success\":true"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"systems": []string{system.Id},
|
|
||||||
}),
|
|
||||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
|
||||||
// Create an alert to delete
|
|
||||||
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"system": system.Id,
|
|
||||||
"user": user.Id,
|
|
||||||
"value": 80,
|
|
||||||
"min": 10,
|
|
||||||
})
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /containers/logs - no auth should fail",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/containers/logs?system=test-system&container=test-container",
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /containers/logs - with auth but missing system param should fail",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/containers/logs?container=test-container",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 400,
|
|
||||||
ExpectedContent: []string{"system and container parameters are required"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /containers/logs - with auth but missing container param should fail",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/containers/logs?system=test-system",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 400,
|
|
||||||
ExpectedContent: []string{"system and container parameters are required"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /containers/logs - with auth but invalid system should fail",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/containers/logs?system=invalid-system&container=test-container",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 404,
|
|
||||||
ExpectedContent: []string{"system not found"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
|
|
||||||
// Auth Optional Routes - Should work without authentication
|
|
||||||
{
|
|
||||||
Name: "GET /getkey - no auth should fail",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/getkey",
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /getkey - with auth should also succeed",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/getkey",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"\"key\":", "\"v\":"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /first-run - no auth should succeed",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/first-run",
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"\"firstRun\":false"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /first-run - with auth should also succeed",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/first-run",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"\"firstRun\":false"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /agent-connect - no auth should succeed (websocket upgrade fails but route is accessible)",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/agent-connect",
|
|
||||||
ExpectedStatus: 400,
|
|
||||||
ExpectedContent: []string{},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "POST /test-notification - invalid auth token should fail",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/test-notification",
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"url": "generic://127.0.0.1",
|
|
||||||
}),
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": "invalid-token",
|
|
||||||
},
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "POST /user-alerts - invalid auth token should fail",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/user-alerts",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": "invalid-token",
|
|
||||||
},
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"value": 80,
|
|
||||||
"min": 10,
|
|
||||||
"systems": []string{system.Id},
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, scenario := range scenarios {
|
|
||||||
scenario.Test(t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFirstUserCreation(t *testing.T) {
|
|
||||||
t.Run("CreateUserEndpoint available when no users exist", func(t *testing.T) {
|
|
||||||
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
defer hub.Cleanup()
|
defer hub.Cleanup()
|
||||||
|
|
||||||
hub.StartHub()
|
settings := hub.Settings()
|
||||||
|
assert.Equal(t, "http://example.com/app", settings.Meta.AppURL)
|
||||||
testAppFactoryExisting := func(t testing.TB) *pbTests.TestApp {
|
|
||||||
return hub.TestApp
|
|
||||||
}
|
|
||||||
|
|
||||||
scenarios := []beszelTests.ApiScenario{
|
|
||||||
{
|
|
||||||
Name: "POST /create-user - should be available when no users exist",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/create-user",
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"email": "firstuser@example.com",
|
|
||||||
"password": "password123",
|
|
||||||
}),
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"User created"},
|
|
||||||
TestAppFactory: testAppFactoryExisting,
|
|
||||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
|
||||||
userCount, err := hub.CountRecords("users")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Zero(t, userCount, "Should start with no users")
|
|
||||||
superusers, err := hub.FindAllRecords(core.CollectionNameSuperusers)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.EqualValues(t, 1, len(superusers), "Should start with one temporary superuser")
|
|
||||||
require.EqualValues(t, migrations.TempAdminEmail, superusers[0].GetString("email"), "Should have created one temporary superuser")
|
|
||||||
},
|
|
||||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
|
||||||
userCount, err := hub.CountRecords("users")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.EqualValues(t, 1, userCount, "Should have created one user")
|
|
||||||
superusers, err := hub.FindAllRecords(core.CollectionNameSuperusers)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.EqualValues(t, 1, len(superusers), "Should have created one superuser")
|
|
||||||
require.EqualValues(t, "firstuser@example.com", superusers[0].GetString("email"), "Should have created one superuser")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "POST /create-user - should not be available when users exist",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/create-user",
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"email": "firstuser@example.com",
|
|
||||||
"password": "password123",
|
|
||||||
}),
|
|
||||||
ExpectedStatus: 404,
|
|
||||||
ExpectedContent: []string{"wasn't found"},
|
|
||||||
TestAppFactory: testAppFactoryExisting,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, scenario := range scenarios {
|
|
||||||
scenario.Test(t)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("CreateUserEndpoint not available when USER_EMAIL, USER_PASSWORD are set", func(t *testing.T) {
|
|
||||||
os.Setenv("BESZEL_HUB_USER_EMAIL", "me@example.com")
|
|
||||||
os.Setenv("BESZEL_HUB_USER_PASSWORD", "password123")
|
|
||||||
defer os.Unsetenv("BESZEL_HUB_USER_EMAIL")
|
|
||||||
defer os.Unsetenv("BESZEL_HUB_USER_PASSWORD")
|
|
||||||
|
|
||||||
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
|
||||||
defer hub.Cleanup()
|
|
||||||
|
|
||||||
hub.StartHub()
|
|
||||||
|
|
||||||
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
|
||||||
return hub.TestApp
|
|
||||||
}
|
|
||||||
|
|
||||||
scenario := beszelTests.ApiScenario{
|
|
||||||
Name: "POST /create-user - should not be available when USER_EMAIL, USER_PASSWORD are set",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/create-user",
|
|
||||||
ExpectedStatus: 404,
|
|
||||||
ExpectedContent: []string{"wasn't found"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
|
||||||
users, err := hub.FindAllRecords("users")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.EqualValues(t, 1, len(users), "Should start with one user")
|
|
||||||
require.EqualValues(t, "me@example.com", users[0].GetString("email"), "Should have created one user")
|
|
||||||
superusers, err := hub.FindAllRecords(core.CollectionNameSuperusers)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.EqualValues(t, 1, len(superusers), "Should start with one superuser")
|
|
||||||
require.EqualValues(t, "me@example.com", superusers[0].GetString("email"), "Should have created one superuser")
|
|
||||||
},
|
|
||||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
|
||||||
users, err := hub.FindAllRecords("users")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.EqualValues(t, 1, len(users), "Should still have one user")
|
|
||||||
require.EqualValues(t, "me@example.com", users[0].GetString("email"), "Should have created one user")
|
|
||||||
superusers, err := hub.FindAllRecords(core.CollectionNameSuperusers)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.EqualValues(t, 1, len(superusers), "Should still have one superuser")
|
|
||||||
require.EqualValues(t, "me@example.com", superusers[0].GetString("email"), "Should have created one superuser")
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
scenario.Test(t)
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCreateUserEndpointAvailability(t *testing.T) {
|
|
||||||
t.Run("CreateUserEndpoint available when no users exist", func(t *testing.T) {
|
|
||||||
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
|
||||||
defer hub.Cleanup()
|
|
||||||
|
|
||||||
// Ensure no users exist
|
|
||||||
userCount, err := hub.CountRecords("users")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Zero(t, userCount, "Should start with no users")
|
|
||||||
|
|
||||||
hub.StartHub()
|
|
||||||
|
|
||||||
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
|
||||||
return hub.TestApp
|
|
||||||
}
|
|
||||||
|
|
||||||
scenario := beszelTests.ApiScenario{
|
|
||||||
Name: "POST /create-user - should be available when no users exist",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/create-user",
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"email": "firstuser@example.com",
|
|
||||||
"password": "password123",
|
|
||||||
}),
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"User created"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
}
|
|
||||||
|
|
||||||
scenario.Test(t)
|
|
||||||
|
|
||||||
// Verify user was created
|
|
||||||
userCount, err = hub.CountRecords("users")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.EqualValues(t, 1, userCount, "Should have created one user")
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("CreateUserEndpoint not available when users exist", func(t *testing.T) {
|
|
||||||
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
|
||||||
defer hub.Cleanup()
|
|
||||||
|
|
||||||
// Create a user first
|
|
||||||
_, err := beszelTests.CreateUser(hub, "existing@example.com", "password")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
hub.StartHub()
|
|
||||||
|
|
||||||
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
|
||||||
return hub.TestApp
|
|
||||||
}
|
|
||||||
|
|
||||||
scenario := beszelTests.ApiScenario{
|
|
||||||
Name: "POST /create-user - should not be available when users exist",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/create-user",
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"email": "another@example.com",
|
|
||||||
"password": "password123",
|
|
||||||
}),
|
|
||||||
ExpectedStatus: 404,
|
|
||||||
ExpectedContent: []string{"wasn't found"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
}
|
|
||||||
|
|
||||||
scenario.Test(t)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAutoLoginMiddleware(t *testing.T) {
|
|
||||||
var hubs []*beszelTests.TestHub
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
defer os.Unsetenv("AUTO_LOGIN")
|
|
||||||
for _, hub := range hubs {
|
|
||||||
hub.Cleanup()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
os.Setenv("AUTO_LOGIN", "user@test.com")
|
|
||||||
|
|
||||||
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
|
||||||
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
|
||||||
hubs = append(hubs, hub)
|
|
||||||
hub.StartHub()
|
|
||||||
return hub.TestApp
|
|
||||||
}
|
|
||||||
|
|
||||||
scenarios := []beszelTests.ApiScenario{
|
|
||||||
{
|
|
||||||
Name: "GET /getkey - without auto login should fail",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/getkey",
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /getkey - with auto login should fail if no matching user",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/getkey",
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /getkey - with auto login should succeed",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/getkey",
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"\"key\":", "\"v\":"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
|
||||||
beszelTests.CreateUser(app, "user@test.com", "password123")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, scenario := range scenarios {
|
|
||||||
scenario.Test(t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTrustedHeaderMiddleware(t *testing.T) {
|
|
||||||
var hubs []*beszelTests.TestHub
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
defer os.Unsetenv("TRUSTED_AUTH_HEADER")
|
|
||||||
for _, hub := range hubs {
|
|
||||||
hub.Cleanup()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
os.Setenv("TRUSTED_AUTH_HEADER", "X-Beszel-Trusted")
|
|
||||||
|
|
||||||
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
|
||||||
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
|
||||||
hubs = append(hubs, hub)
|
|
||||||
hub.StartHub()
|
|
||||||
return hub.TestApp
|
|
||||||
}
|
|
||||||
|
|
||||||
scenarios := []beszelTests.ApiScenario{
|
|
||||||
{
|
|
||||||
Name: "GET /getkey - without trusted header should fail",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/getkey",
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /getkey - with trusted header should fail if no matching user",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/getkey",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"X-Beszel-Trusted": "user@test.com",
|
|
||||||
},
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /getkey - with trusted header should succeed",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/getkey",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"X-Beszel-Trusted": "user@test.com",
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"\"key\":", "\"v\":"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
|
||||||
beszelTests.CreateUser(app, "user@test.com", "password123")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, scenario := range scenarios {
|
|
||||||
scenario.Test(t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,9 +1,10 @@
|
|||||||
//go:build testing
|
//go:build testing
|
||||||
// +build testing
|
|
||||||
|
|
||||||
package hub
|
package hub
|
||||||
|
|
||||||
import "github.com/henrygd/beszel/internal/hub/systems"
|
import (
|
||||||
|
"github.com/henrygd/beszel/internal/hub/systems"
|
||||||
|
)
|
||||||
|
|
||||||
// TESTING ONLY: GetSystemManager returns the system manager
|
// TESTING ONLY: GetSystemManager returns the system manager
|
||||||
func (h *Hub) GetSystemManager() *systems.SystemManager {
|
func (h *Hub) GetSystemManager() *systems.SystemManager {
|
||||||
@@ -19,3 +20,7 @@ func (h *Hub) GetPubkey() string {
|
|||||||
func (h *Hub) SetPubkey(pubkey string) {
|
func (h *Hub) SetPubkey(pubkey string) {
|
||||||
h.pubKey = pubkey
|
h.pubKey = pubkey
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (h *Hub) SetCollectionAuthSettings() error {
|
||||||
|
return setCollectionAuthSettings(h)
|
||||||
|
}
|
||||||
|
|||||||
42
internal/hub/server.go
Normal file
42
internal/hub/server.go
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
package hub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel"
|
||||||
|
"github.com/henrygd/beszel/internal/hub/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PublicAppInfo defines the structure of the public app information that will be injected into the HTML
|
||||||
|
type PublicAppInfo struct {
|
||||||
|
BASE_PATH string
|
||||||
|
HUB_VERSION string
|
||||||
|
HUB_URL string
|
||||||
|
OAUTH_DISABLE_POPUP bool `json:"OAUTH_DISABLE_POPUP,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// modifyIndexHTML injects the public app information into the index.html content
|
||||||
|
func modifyIndexHTML(hub *Hub, html []byte) string {
|
||||||
|
info := getPublicAppInfo(hub)
|
||||||
|
content, err := json.Marshal(info)
|
||||||
|
if err != nil {
|
||||||
|
return string(html)
|
||||||
|
}
|
||||||
|
htmlContent := strings.ReplaceAll(string(html), "./", info.BASE_PATH)
|
||||||
|
return strings.Replace(htmlContent, "\"{info}\"", string(content), 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getPublicAppInfo(hub *Hub) PublicAppInfo {
|
||||||
|
parsedURL, _ := url.Parse(hub.appURL)
|
||||||
|
info := PublicAppInfo{
|
||||||
|
BASE_PATH: strings.TrimSuffix(parsedURL.Path, "/") + "/",
|
||||||
|
HUB_VERSION: beszel.Version,
|
||||||
|
HUB_URL: hub.appURL,
|
||||||
|
}
|
||||||
|
if val, _ := utils.GetEnv("OAUTH_DISABLE_POPUP"); val == "true" {
|
||||||
|
info.OAUTH_DISABLE_POPUP = true
|
||||||
|
}
|
||||||
|
return info
|
||||||
|
}
|
||||||
@@ -5,14 +5,11 @@ package hub
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log/slog"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httputil"
|
"net/http/httputil"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/henrygd/beszel"
|
|
||||||
|
|
||||||
"github.com/pocketbase/pocketbase/core"
|
"github.com/pocketbase/pocketbase/core"
|
||||||
"github.com/pocketbase/pocketbase/tools/osutils"
|
"github.com/pocketbase/pocketbase/tools/osutils"
|
||||||
)
|
)
|
||||||
@@ -39,7 +36,7 @@ func (rm *responseModifier) RoundTrip(req *http.Request) (*http.Response, error)
|
|||||||
}
|
}
|
||||||
resp.Body.Close()
|
resp.Body.Close()
|
||||||
// Create a new response with the modified body
|
// Create a new response with the modified body
|
||||||
modifiedBody := rm.modifyHTML(string(body))
|
modifiedBody := modifyIndexHTML(rm.hub, body)
|
||||||
resp.Body = io.NopCloser(strings.NewReader(modifiedBody))
|
resp.Body = io.NopCloser(strings.NewReader(modifiedBody))
|
||||||
resp.ContentLength = int64(len(modifiedBody))
|
resp.ContentLength = int64(len(modifiedBody))
|
||||||
resp.Header.Set("Content-Length", fmt.Sprintf("%d", len(modifiedBody)))
|
resp.Header.Set("Content-Length", fmt.Sprintf("%d", len(modifiedBody)))
|
||||||
@@ -47,22 +44,8 @@ func (rm *responseModifier) RoundTrip(req *http.Request) (*http.Response, error)
|
|||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rm *responseModifier) modifyHTML(html string) string {
|
|
||||||
parsedURL, err := url.Parse(rm.hub.appURL)
|
|
||||||
if err != nil {
|
|
||||||
return html
|
|
||||||
}
|
|
||||||
// fix base paths in html if using subpath
|
|
||||||
basePath := strings.TrimSuffix(parsedURL.Path, "/") + "/"
|
|
||||||
html = strings.ReplaceAll(html, "./", basePath)
|
|
||||||
html = strings.Replace(html, "{{V}}", beszel.Version, 1)
|
|
||||||
html = strings.Replace(html, "{{HUB_URL}}", rm.hub.appURL, 1)
|
|
||||||
return html
|
|
||||||
}
|
|
||||||
|
|
||||||
// startServer sets up the development server for Beszel
|
// startServer sets up the development server for Beszel
|
||||||
func (h *Hub) startServer(se *core.ServeEvent) error {
|
func (h *Hub) startServer(se *core.ServeEvent) error {
|
||||||
slog.Info("starting server", "appURL", h.appURL)
|
|
||||||
proxy := httputil.NewSingleHostReverseProxy(&url.URL{
|
proxy := httputil.NewSingleHostReverseProxy(&url.URL{
|
||||||
Scheme: "http",
|
Scheme: "http",
|
||||||
Host: "localhost:5173",
|
Host: "localhost:5173",
|
||||||
|
|||||||
@@ -5,10 +5,9 @@ package hub
|
|||||||
import (
|
import (
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/henrygd/beszel"
|
"github.com/henrygd/beszel/internal/hub/utils"
|
||||||
"github.com/henrygd/beszel/internal/site"
|
"github.com/henrygd/beszel/internal/site"
|
||||||
|
|
||||||
"github.com/pocketbase/pocketbase/apis"
|
"github.com/pocketbase/pocketbase/apis"
|
||||||
@@ -17,22 +16,13 @@ import (
|
|||||||
|
|
||||||
// startServer sets up the production server for Beszel
|
// startServer sets up the production server for Beszel
|
||||||
func (h *Hub) startServer(se *core.ServeEvent) error {
|
func (h *Hub) startServer(se *core.ServeEvent) error {
|
||||||
// parse app url
|
|
||||||
parsedURL, err := url.Parse(h.appURL)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// fix base paths in html if using subpath
|
|
||||||
basePath := strings.TrimSuffix(parsedURL.Path, "/") + "/"
|
|
||||||
indexFile, _ := fs.ReadFile(site.DistDirFS, "index.html")
|
indexFile, _ := fs.ReadFile(site.DistDirFS, "index.html")
|
||||||
html := strings.ReplaceAll(string(indexFile), "./", basePath)
|
html := modifyIndexHTML(h, indexFile)
|
||||||
html = strings.Replace(html, "{{V}}", beszel.Version, 1)
|
|
||||||
html = strings.Replace(html, "{{HUB_URL}}", h.appURL, 1)
|
|
||||||
// set up static asset serving
|
// set up static asset serving
|
||||||
staticPaths := [2]string{"/static/", "/assets/"}
|
staticPaths := [2]string{"/static/", "/assets/"}
|
||||||
serveStatic := apis.Static(site.DistDirFS, false)
|
serveStatic := apis.Static(site.DistDirFS, false)
|
||||||
// get CSP configuration
|
// get CSP configuration
|
||||||
csp, cspExists := GetEnv("CSP")
|
csp, cspExists := utils.GetEnv("CSP")
|
||||||
// add route
|
// add route
|
||||||
se.Router.GET("/{path...}", func(e *core.RequestEvent) error {
|
se.Router.GET("/{path...}", func(e *core.RequestEvent) error {
|
||||||
// serve static assets if path is in staticPaths
|
// serve static assets if path is in staticPaths
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ import (
|
|||||||
|
|
||||||
"github.com/henrygd/beszel/internal/common"
|
"github.com/henrygd/beszel/internal/common"
|
||||||
"github.com/henrygd/beszel/internal/hub/transport"
|
"github.com/henrygd/beszel/internal/hub/transport"
|
||||||
|
"github.com/henrygd/beszel/internal/hub/utils"
|
||||||
"github.com/henrygd/beszel/internal/hub/ws"
|
"github.com/henrygd/beszel/internal/hub/ws"
|
||||||
|
|
||||||
"github.com/henrygd/beszel/internal/entities/container"
|
"github.com/henrygd/beszel/internal/entities/container"
|
||||||
@@ -48,7 +49,6 @@ type System struct {
|
|||||||
detailsFetched atomic.Bool // True if static system details have been fetched and saved
|
detailsFetched atomic.Bool // True if static system details have been fetched and saved
|
||||||
smartFetching atomic.Bool // True if SMART devices are currently being fetched
|
smartFetching atomic.Bool // True if SMART devices are currently being fetched
|
||||||
smartInterval time.Duration // Interval for periodic SMART data updates
|
smartInterval time.Duration // Interval for periodic SMART data updates
|
||||||
lastSmartFetch atomic.Int64 // Unix milliseconds of last SMART data fetch
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sm *SystemManager) NewSystem(systemId string) *System {
|
func (sm *SystemManager) NewSystem(systemId string) *System {
|
||||||
@@ -134,19 +134,34 @@ func (sys *System) update() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ensure deprecated fields from older agents are migrated to current fields
|
||||||
|
migrateDeprecatedFields(data, !sys.detailsFetched.Load())
|
||||||
|
|
||||||
// create system records
|
// create system records
|
||||||
_, err = sys.createRecords(data)
|
_, err = sys.createRecords(data)
|
||||||
|
|
||||||
|
// if details were included and fetched successfully, mark details as fetched and update smart interval if set by agent
|
||||||
|
if err == nil && data.Details != nil {
|
||||||
|
sys.detailsFetched.Store(true)
|
||||||
|
// update smart interval if it's set on the agent side
|
||||||
|
if data.Details.SmartInterval > 0 {
|
||||||
|
sys.smartInterval = data.Details.SmartInterval
|
||||||
|
sys.manager.hub.Logger().Info("SMART interval updated from agent details", "system", sys.Id, "interval", sys.smartInterval.String())
|
||||||
|
// make sure we reset expiration of lastFetch to remain as long as the new smart interval
|
||||||
|
// to prevent premature expiration leading to new fetch if interval is different.
|
||||||
|
sys.manager.smartFetchMap.UpdateExpiration(sys.Id, sys.smartInterval+time.Minute)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Fetch and save SMART devices when system first comes online or at intervals
|
// Fetch and save SMART devices when system first comes online or at intervals
|
||||||
if backgroundSmartFetchEnabled() {
|
if backgroundSmartFetchEnabled() && sys.detailsFetched.Load() {
|
||||||
if sys.smartInterval <= 0 {
|
if sys.smartInterval <= 0 {
|
||||||
sys.smartInterval = time.Hour
|
sys.smartInterval = time.Hour
|
||||||
}
|
}
|
||||||
lastFetch := sys.lastSmartFetch.Load()
|
if sys.shouldFetchSmart() && sys.smartFetching.CompareAndSwap(false, true) {
|
||||||
if time.Since(time.UnixMilli(lastFetch)) >= sys.smartInterval && sys.smartFetching.CompareAndSwap(false, true) {
|
sys.manager.hub.Logger().Info("SMART fetch", "system", sys.Id, "interval", sys.smartInterval.String())
|
||||||
go func() {
|
go func() {
|
||||||
defer sys.smartFetching.Store(false)
|
defer sys.smartFetching.Store(false)
|
||||||
sys.lastSmartFetch.Store(time.Now().UnixMilli())
|
|
||||||
_ = sys.FetchAndSaveSmartDevices()
|
_ = sys.FetchAndSaveSmartDevices()
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
@@ -170,7 +185,7 @@ func (sys *System) handlePaused() {
|
|||||||
|
|
||||||
// createRecords updates the system record and adds system_stats and container_stats records
|
// createRecords updates the system record and adds system_stats and container_stats records
|
||||||
func (sys *System) createRecords(data *system.CombinedData) (*core.Record, error) {
|
func (sys *System) createRecords(data *system.CombinedData) (*core.Record, error) {
|
||||||
systemRecord, err := sys.getRecord()
|
systemRecord, err := sys.getRecord(sys.manager.hub)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -221,11 +236,6 @@ func (sys *System) createRecords(data *system.CombinedData) (*core.Record, error
|
|||||||
if err := createSystemDetailsRecord(txApp, data.Details, sys.Id); err != nil {
|
if err := createSystemDetailsRecord(txApp, data.Details, sys.Id); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
sys.detailsFetched.Store(true)
|
|
||||||
// update smart interval if it's set on the agent side
|
|
||||||
if data.Details.SmartInterval > 0 {
|
|
||||||
sys.smartInterval = data.Details.SmartInterval
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// update system record (do this last because it triggers alerts and we need above records to be inserted first)
|
// update system record (do this last because it triggers alerts and we need above records to be inserted first)
|
||||||
@@ -309,10 +319,11 @@ func createContainerRecords(app core.App, data []*container.Stats, systemId stri
|
|||||||
valueStrings := make([]string, 0, len(data))
|
valueStrings := make([]string, 0, len(data))
|
||||||
for i, container := range data {
|
for i, container := range data {
|
||||||
suffix := fmt.Sprintf("%d", i)
|
suffix := fmt.Sprintf("%d", i)
|
||||||
valueStrings = append(valueStrings, fmt.Sprintf("({:id%[1]s}, {:system}, {:name%[1]s}, {:image%[1]s}, {:status%[1]s}, {:health%[1]s}, {:cpu%[1]s}, {:memory%[1]s}, {:net%[1]s}, {:updated})", suffix))
|
valueStrings = append(valueStrings, fmt.Sprintf("({:id%[1]s}, {:system}, {:name%[1]s}, {:image%[1]s}, {:ports%[1]s}, {:status%[1]s}, {:health%[1]s}, {:cpu%[1]s}, {:memory%[1]s}, {:net%[1]s}, {:updated})", suffix))
|
||||||
params["id"+suffix] = container.Id
|
params["id"+suffix] = container.Id
|
||||||
params["name"+suffix] = container.Name
|
params["name"+suffix] = container.Name
|
||||||
params["image"+suffix] = container.Image
|
params["image"+suffix] = container.Image
|
||||||
|
params["ports"+suffix] = container.Ports
|
||||||
params["status"+suffix] = container.Status
|
params["status"+suffix] = container.Status
|
||||||
params["health"+suffix] = container.Health
|
params["health"+suffix] = container.Health
|
||||||
params["cpu"+suffix] = container.Cpu
|
params["cpu"+suffix] = container.Cpu
|
||||||
@@ -324,7 +335,7 @@ func createContainerRecords(app core.App, data []*container.Stats, systemId stri
|
|||||||
params["net"+suffix] = netBytes
|
params["net"+suffix] = netBytes
|
||||||
}
|
}
|
||||||
queryString := fmt.Sprintf(
|
queryString := fmt.Sprintf(
|
||||||
"INSERT INTO containers (id, system, name, image, status, health, cpu, memory, net, updated) VALUES %s ON CONFLICT(id) DO UPDATE SET system = excluded.system, name = excluded.name, image = excluded.image, status = excluded.status, health = excluded.health, cpu = excluded.cpu, memory = excluded.memory, net = excluded.net, updated = excluded.updated",
|
"INSERT INTO containers (id, system, name, image, ports, status, health, cpu, memory, net, updated) VALUES %s ON CONFLICT(id) DO UPDATE SET system = excluded.system, name = excluded.name, image = excluded.image, ports = excluded.ports, status = excluded.status, health = excluded.health, cpu = excluded.cpu, memory = excluded.memory, net = excluded.net, updated = excluded.updated",
|
||||||
strings.Join(valueStrings, ","),
|
strings.Join(valueStrings, ","),
|
||||||
)
|
)
|
||||||
_, err := app.DB().NewQuery(queryString).Bind(params).Execute()
|
_, err := app.DB().NewQuery(queryString).Bind(params).Execute()
|
||||||
@@ -333,8 +344,8 @@ func createContainerRecords(app core.App, data []*container.Stats, systemId stri
|
|||||||
|
|
||||||
// getRecord retrieves the system record from the database.
|
// getRecord retrieves the system record from the database.
|
||||||
// If the record is not found, it removes the system from the manager.
|
// If the record is not found, it removes the system from the manager.
|
||||||
func (sys *System) getRecord() (*core.Record, error) {
|
func (sys *System) getRecord(app core.App) (*core.Record, error) {
|
||||||
record, err := sys.manager.hub.FindRecordById("systems", sys.Id)
|
record, err := app.FindRecordById("systems", sys.Id)
|
||||||
if err != nil || record == nil {
|
if err != nil || record == nil {
|
||||||
_ = sys.manager.RemoveSystem(sys.Id)
|
_ = sys.manager.RemoveSystem(sys.Id)
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -342,6 +353,27 @@ func (sys *System) getRecord() (*core.Record, error) {
|
|||||||
return record, nil
|
return record, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HasUser checks if the given user is in the system's users list.
|
||||||
|
// Returns true if SHARE_ALL_SYSTEMS is enabled (any authenticated user can access any system).
|
||||||
|
func (sys *System) HasUser(app core.App, user *core.Record) bool {
|
||||||
|
if user == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if v, _ := utils.GetEnv("SHARE_ALL_SYSTEMS"); v == "true" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
var recordData = struct {
|
||||||
|
Users string
|
||||||
|
}{}
|
||||||
|
err := app.DB().NewQuery("SELECT users FROM systems WHERE id={:id}").
|
||||||
|
Bind(dbx.Params{"id": sys.Id}).
|
||||||
|
One(&recordData)
|
||||||
|
if err != nil || recordData.Users == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return strings.Contains(recordData.Users, user.Id)
|
||||||
|
}
|
||||||
|
|
||||||
// setDown marks a system as down in the database.
|
// setDown marks a system as down in the database.
|
||||||
// It takes the original error that caused the system to go down and returns any error
|
// It takes the original error that caused the system to go down and returns any error
|
||||||
// encountered during the process of updating the system status.
|
// encountered during the process of updating the system status.
|
||||||
@@ -349,7 +381,7 @@ func (sys *System) setDown(originalError error) error {
|
|||||||
if sys.Status == down || sys.Status == paused {
|
if sys.Status == down || sys.Status == paused {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
record, err := sys.getRecord()
|
record, err := sys.getRecord(sys.manager.hub)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -633,6 +665,7 @@ func (s *System) createSSHClient() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
s.agentVersion, _ = extractAgentVersion(string(s.client.Conn.ServerVersion()))
|
s.agentVersion, _ = extractAgentVersion(string(s.client.Conn.ServerVersion()))
|
||||||
|
s.manager.resetFailedSmartFetchState(s.Id)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -703,3 +736,50 @@ func getJitter() <-chan time.Time {
|
|||||||
msDelay := (interval * minPercent / 100) + rand.Intn(interval*jitterRange/100)
|
msDelay := (interval * minPercent / 100) + rand.Intn(interval*jitterRange/100)
|
||||||
return time.After(time.Duration(msDelay) * time.Millisecond)
|
return time.After(time.Duration(msDelay) * time.Millisecond)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// migrateDeprecatedFields moves values from deprecated fields to their new locations if the new
|
||||||
|
// fields are not already populated. Deprecated fields and refs may be removed at least 30 days
|
||||||
|
// and one minor version release after the release that includes the migration.
|
||||||
|
//
|
||||||
|
// This is run when processing incoming system data from agents, which may be on older versions.
|
||||||
|
func migrateDeprecatedFields(cd *system.CombinedData, createDetails bool) {
|
||||||
|
// migration added 0.19.0
|
||||||
|
if cd.Stats.Bandwidth[0] == 0 && cd.Stats.Bandwidth[1] == 0 {
|
||||||
|
cd.Stats.Bandwidth[0] = uint64(cd.Stats.NetworkSent * 1024 * 1024)
|
||||||
|
cd.Stats.Bandwidth[1] = uint64(cd.Stats.NetworkRecv * 1024 * 1024)
|
||||||
|
cd.Stats.NetworkSent, cd.Stats.NetworkRecv = 0, 0
|
||||||
|
}
|
||||||
|
// migration added 0.19.0
|
||||||
|
if cd.Info.BandwidthBytes == 0 {
|
||||||
|
cd.Info.BandwidthBytes = uint64(cd.Info.Bandwidth * 1024 * 1024)
|
||||||
|
cd.Info.Bandwidth = 0
|
||||||
|
}
|
||||||
|
// migration added 0.19.0
|
||||||
|
if cd.Stats.DiskIO[0] == 0 && cd.Stats.DiskIO[1] == 0 {
|
||||||
|
cd.Stats.DiskIO[0] = uint64(cd.Stats.DiskReadPs * 1024 * 1024)
|
||||||
|
cd.Stats.DiskIO[1] = uint64(cd.Stats.DiskWritePs * 1024 * 1024)
|
||||||
|
cd.Stats.DiskReadPs, cd.Stats.DiskWritePs = 0, 0
|
||||||
|
}
|
||||||
|
// migration added 0.19.0 - Move deprecated Info fields to Details struct
|
||||||
|
if cd.Details == nil && cd.Info.Hostname != "" {
|
||||||
|
if createDetails {
|
||||||
|
cd.Details = &system.Details{
|
||||||
|
Hostname: cd.Info.Hostname,
|
||||||
|
Kernel: cd.Info.KernelVersion,
|
||||||
|
Cores: cd.Info.Cores,
|
||||||
|
Threads: cd.Info.Threads,
|
||||||
|
CpuModel: cd.Info.CpuModel,
|
||||||
|
Podman: cd.Info.Podman,
|
||||||
|
Os: cd.Info.Os,
|
||||||
|
MemoryTotal: uint64(cd.Stats.Mem * 1024 * 1024 * 1024),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// zero the deprecated fields to prevent saving them in systems.info DB json payload
|
||||||
|
cd.Info.Hostname = ""
|
||||||
|
cd.Info.KernelVersion = ""
|
||||||
|
cd.Info.Cores = 0
|
||||||
|
cd.Info.CpuModel = ""
|
||||||
|
cd.Info.Podman = false
|
||||||
|
cd.Info.Os = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user