mirror of
https://github.com/henrygd/beszel.git
synced 2026-04-25 14:01:49 +02:00
Compare commits
92 Commits
v0.18.5
...
dev-probes
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9f7c1b22bb | ||
|
|
0d440e5fb9 | ||
|
|
5fc774666f | ||
|
|
8f03cbf11c | ||
|
|
1c5808f430 | ||
|
|
a35cc6ef39 | ||
|
|
16e0f6c4a2 | ||
|
|
6472af1ba4 | ||
|
|
e931165566 | ||
|
|
48fe407292 | ||
|
|
a95376b4a2 | ||
|
|
732983493a | ||
|
|
264b17f429 | ||
|
|
cef5ab10a5 | ||
|
|
3a881e1d5e | ||
|
|
209bb4ebb4 | ||
|
|
e71ffd4d2a | ||
|
|
ea19ef6334 | ||
|
|
40da2b4358 | ||
|
|
d0d5912d85 | ||
|
|
4162186ae0 | ||
|
|
a71617e058 | ||
|
|
578ba985e9 | ||
|
|
e5507fa106 | ||
|
|
a024c3cfd0 | ||
|
|
07466804e7 | ||
|
|
981c788d6f | ||
|
|
f5576759de | ||
|
|
be0b708064 | ||
|
|
485830452e | ||
|
|
2fd00cd0b5 | ||
|
|
853a294157 | ||
|
|
aa9ab49654 | ||
|
|
9a5959b57e | ||
|
|
50f8548479 | ||
|
|
bc0581ea61 | ||
|
|
ab3a3de46c | ||
|
|
1556e53926 | ||
|
|
e3ade3aeb8 | ||
|
|
b013f06956 | ||
|
|
fab5e8a656 | ||
|
|
3a0896e57e | ||
|
|
7fdc403470 | ||
|
|
e833d44c43 | ||
|
|
77dd4bdaf5 | ||
|
|
ecba63c4bb | ||
|
|
f9feaf5343 | ||
|
|
ddf5e925c8 | ||
|
|
865e6db90f | ||
|
|
a42d899e64 | ||
|
|
3eaf12a7d5 | ||
|
|
3793b27958 | ||
|
|
5b02158228 | ||
|
|
0ae8c42ae0 | ||
|
|
ea80f3c5a2 | ||
|
|
c3dffff5e4 | ||
|
|
06fdd0e7a8 | ||
|
|
6e3fd90834 | ||
|
|
5ab82183fa | ||
|
|
a68e02ca84 | ||
|
|
0f2e16c63c | ||
|
|
c4009f2b43 | ||
|
|
ef0c1420d1 | ||
|
|
eb9a8e1ef9 | ||
|
|
6b5e6ffa9a | ||
|
|
d656036d3b | ||
|
|
80b73c7faf | ||
|
|
afe9eb7a70 | ||
|
|
7f565a3086 | ||
|
|
77862d4cb1 | ||
|
|
e158a9001b | ||
|
|
f670e868e4 | ||
|
|
0fff699bf6 | ||
|
|
ba10da1b9f | ||
|
|
7f4f14b505 | ||
|
|
2fda4ff264 | ||
|
|
20b0b40ec8 | ||
|
|
d548a012b4 | ||
|
|
ce5d1217dd | ||
|
|
cef09d7cb1 | ||
|
|
f6440acb43 | ||
|
|
5463a38f0f | ||
|
|
80135fdad3 | ||
|
|
5db4eb4346 | ||
|
|
f6c5e2928a | ||
|
|
6a207c33fa | ||
|
|
9f19afccde | ||
|
|
f25f2469e3 | ||
|
|
5bd43ed461 | ||
|
|
afdc3f7779 | ||
|
|
a227c77526 | ||
|
|
8202d746af |
@@ -19,6 +19,8 @@ import (
|
||||
gossh "golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
const defaultDataCacheTimeMs uint16 = 60_000
|
||||
|
||||
type Agent struct {
|
||||
sync.Mutex // Used to lock agent while collecting data
|
||||
debug bool // true if LOG_LEVEL is set to debug
|
||||
@@ -36,6 +38,7 @@ type Agent struct {
|
||||
sensorConfig *SensorConfig // Sensors config
|
||||
systemInfo system.Info // Host system info (dynamic)
|
||||
systemDetails system.Details // Host system details (static, once-per-connection)
|
||||
detailsDirty bool // Whether system details have changed and need to be resent
|
||||
gpuManager *GPUManager // Manages GPU data
|
||||
cache *systemDataCache // Cache for system stats based on cache time
|
||||
connectionManager *ConnectionManager // Channel to signal connection events
|
||||
@@ -45,6 +48,7 @@ type Agent struct {
|
||||
keys []gossh.PublicKey // SSH public keys
|
||||
smartManager *SmartManager // Manages SMART data
|
||||
systemdManager *systemdManager // Manages systemd services
|
||||
probeManager *ProbeManager // Manages network probes
|
||||
}
|
||||
|
||||
// NewAgent creates a new agent with the given data directory for persisting data.
|
||||
@@ -97,7 +101,7 @@ func NewAgent(dataDir ...string) (agent *Agent, err error) {
|
||||
slog.Debug(beszel.Version)
|
||||
|
||||
// initialize docker manager
|
||||
agent.dockerManager = newDockerManager()
|
||||
agent.dockerManager = newDockerManager(agent)
|
||||
|
||||
// initialize system info
|
||||
agent.refreshSystemDetails()
|
||||
@@ -118,6 +122,9 @@ func NewAgent(dataDir ...string) (agent *Agent, err error) {
|
||||
// initialize handler registry
|
||||
agent.handlerRegistry = NewHandlerRegistry()
|
||||
|
||||
// initialize probe manager
|
||||
agent.probeManager = newProbeManager()
|
||||
|
||||
// initialize disk info
|
||||
agent.initializeDiskInfo()
|
||||
|
||||
@@ -142,7 +149,7 @@ func NewAgent(dataDir ...string) (agent *Agent, err error) {
|
||||
|
||||
// if debugging, print stats
|
||||
if agent.debug {
|
||||
slog.Debug("Stats", "data", agent.gatherStats(common.DataRequestOptions{CacheTimeMs: 60_000, IncludeDetails: true}))
|
||||
slog.Debug("Stats", "data", agent.gatherStats(common.DataRequestOptions{CacheTimeMs: defaultDataCacheTimeMs, IncludeDetails: true}))
|
||||
}
|
||||
|
||||
return agent, nil
|
||||
@@ -164,11 +171,6 @@ func (a *Agent) gatherStats(options common.DataRequestOptions) *system.CombinedD
|
||||
Info: a.systemInfo,
|
||||
}
|
||||
|
||||
// Include static system details only when requested
|
||||
if options.IncludeDetails {
|
||||
data.Details = &a.systemDetails
|
||||
}
|
||||
|
||||
// slog.Info("System data", "data", data, "cacheTimeMs", cacheTimeMs)
|
||||
|
||||
if a.dockerManager != nil {
|
||||
@@ -180,8 +182,13 @@ func (a *Agent) gatherStats(options common.DataRequestOptions) *system.CombinedD
|
||||
}
|
||||
}
|
||||
|
||||
if a.probeManager != nil {
|
||||
data.Probes = a.probeManager.GetResults(cacheTimeMs)
|
||||
slog.Debug("Probes", "data", data.Probes)
|
||||
}
|
||||
|
||||
// skip updating systemd services if cache time is not the default 60sec interval
|
||||
if a.systemdManager != nil && cacheTimeMs == 60_000 {
|
||||
if a.systemdManager != nil && cacheTimeMs == defaultDataCacheTimeMs {
|
||||
totalCount := uint16(a.systemdManager.getServiceStatsCount())
|
||||
if totalCount > 0 {
|
||||
numFailed := a.systemdManager.getFailedServiceCount()
|
||||
@@ -212,7 +219,8 @@ func (a *Agent) gatherStats(options common.DataRequestOptions) *system.CombinedD
|
||||
slog.Debug("Extra FS", "data", data.Stats.ExtraFs)
|
||||
|
||||
a.cache.Set(data, cacheTimeMs)
|
||||
return data
|
||||
|
||||
return a.attachSystemDetails(data, cacheTimeMs, options.IncludeDetails)
|
||||
}
|
||||
|
||||
// Start initializes and starts the agent with optional WebSocket connection
|
||||
|
||||
@@ -1,84 +1,11 @@
|
||||
//go:build !freebsd
|
||||
|
||||
// Package battery provides functions to check if the system has a battery and to get the battery stats.
|
||||
// Package battery provides functions to check if the system has a battery and return the charge state and percentage.
|
||||
package battery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log/slog"
|
||||
"math"
|
||||
|
||||
"github.com/distatus/battery"
|
||||
const (
|
||||
stateUnknown uint8 = iota
|
||||
stateEmpty
|
||||
stateFull
|
||||
stateCharging
|
||||
stateDischarging
|
||||
stateIdle
|
||||
)
|
||||
|
||||
var (
|
||||
systemHasBattery = false
|
||||
haveCheckedBattery = false
|
||||
)
|
||||
|
||||
// HasReadableBattery checks if the system has a battery and returns true if it does.
|
||||
func HasReadableBattery() bool {
|
||||
if haveCheckedBattery {
|
||||
return systemHasBattery
|
||||
}
|
||||
haveCheckedBattery = true
|
||||
batteries, err := battery.GetAll()
|
||||
for _, bat := range batteries {
|
||||
if bat != nil && (bat.Full > 0 || bat.Design > 0) {
|
||||
systemHasBattery = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !systemHasBattery {
|
||||
slog.Debug("No battery found", "err", err)
|
||||
}
|
||||
return systemHasBattery
|
||||
}
|
||||
|
||||
// GetBatteryStats returns the current battery percent and charge state
|
||||
// percent = (current charge of all batteries) / (sum of designed/full capacity of all batteries)
|
||||
func GetBatteryStats() (batteryPercent uint8, batteryState uint8, err error) {
|
||||
if !HasReadableBattery() {
|
||||
return batteryPercent, batteryState, errors.ErrUnsupported
|
||||
}
|
||||
batteries, err := battery.GetAll()
|
||||
// we'll handle errors later by skipping batteries with errors, rather
|
||||
// than skipping everything because of the presence of some errors.
|
||||
if len(batteries) == 0 {
|
||||
return batteryPercent, batteryState, errors.New("no batteries")
|
||||
}
|
||||
|
||||
totalCapacity := float64(0)
|
||||
totalCharge := float64(0)
|
||||
errs, partialErrs := err.(battery.Errors)
|
||||
|
||||
batteryState = math.MaxUint8
|
||||
|
||||
for i, bat := range batteries {
|
||||
if partialErrs && errs[i] != nil {
|
||||
// if there were some errors, like missing data, skip it
|
||||
continue
|
||||
}
|
||||
if bat == nil || bat.Full == 0 {
|
||||
// skip batteries with no capacity. Charge is unlikely to ever be zero, but
|
||||
// we can't guarantee that, so don't skip based on charge.
|
||||
continue
|
||||
}
|
||||
totalCapacity += bat.Full
|
||||
totalCharge += min(bat.Current, bat.Full)
|
||||
if bat.State.Raw >= 0 {
|
||||
batteryState = uint8(bat.State.Raw)
|
||||
}
|
||||
}
|
||||
|
||||
if totalCapacity == 0 || batteryState == math.MaxUint8 {
|
||||
// for macs there's sometimes a ghost battery with 0 capacity
|
||||
// https://github.com/distatus/battery/issues/34
|
||||
// Instead of skipping over those batteries, we'll check for total 0 capacity
|
||||
// and return an error. This also prevents a divide by zero.
|
||||
return batteryPercent, batteryState, errors.New("no battery capacity")
|
||||
}
|
||||
|
||||
batteryPercent = uint8(totalCharge / totalCapacity * 100)
|
||||
return batteryPercent, batteryState, nil
|
||||
}
|
||||
|
||||
96
agent/battery/battery_darwin.go
Normal file
96
agent/battery/battery_darwin.go
Normal file
@@ -0,0 +1,96 @@
|
||||
//go:build darwin
|
||||
|
||||
package battery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log/slog"
|
||||
"math"
|
||||
"os/exec"
|
||||
"sync"
|
||||
|
||||
"howett.net/plist"
|
||||
)
|
||||
|
||||
type macBattery struct {
|
||||
CurrentCapacity int `plist:"CurrentCapacity"`
|
||||
MaxCapacity int `plist:"MaxCapacity"`
|
||||
FullyCharged bool `plist:"FullyCharged"`
|
||||
IsCharging bool `plist:"IsCharging"`
|
||||
ExternalConnected bool `plist:"ExternalConnected"`
|
||||
}
|
||||
|
||||
func readMacBatteries() ([]macBattery, error) {
|
||||
out, err := exec.Command("ioreg", "-n", "AppleSmartBattery", "-r", "-a").Output()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(out) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
var batteries []macBattery
|
||||
if _, err := plist.Unmarshal(out, &batteries); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return batteries, nil
|
||||
}
|
||||
|
||||
// HasReadableBattery checks if the system has a battery and returns true if it does.
|
||||
var HasReadableBattery = sync.OnceValue(func() bool {
|
||||
systemHasBattery := false
|
||||
batteries, err := readMacBatteries()
|
||||
slog.Debug("Batteries", "batteries", batteries, "err", err)
|
||||
for _, bat := range batteries {
|
||||
if bat.MaxCapacity > 0 {
|
||||
systemHasBattery = true
|
||||
break
|
||||
}
|
||||
}
|
||||
return systemHasBattery
|
||||
})
|
||||
|
||||
// GetBatteryStats returns the current battery percent and charge state.
|
||||
// Uses CurrentCapacity/MaxCapacity to match the value macOS displays.
|
||||
func GetBatteryStats() (batteryPercent uint8, batteryState uint8, err error) {
|
||||
if !HasReadableBattery() {
|
||||
return batteryPercent, batteryState, errors.ErrUnsupported
|
||||
}
|
||||
batteries, err := readMacBatteries()
|
||||
if len(batteries) == 0 {
|
||||
return batteryPercent, batteryState, errors.New("no batteries")
|
||||
}
|
||||
|
||||
totalCapacity := 0
|
||||
totalCharge := 0
|
||||
batteryState = math.MaxUint8
|
||||
|
||||
for _, bat := range batteries {
|
||||
if bat.MaxCapacity == 0 {
|
||||
// skip ghost batteries with 0 capacity
|
||||
// https://github.com/distatus/battery/issues/34
|
||||
continue
|
||||
}
|
||||
totalCapacity += bat.MaxCapacity
|
||||
totalCharge += min(bat.CurrentCapacity, bat.MaxCapacity)
|
||||
|
||||
switch {
|
||||
case !bat.ExternalConnected:
|
||||
batteryState = stateDischarging
|
||||
case bat.IsCharging:
|
||||
batteryState = stateCharging
|
||||
case bat.CurrentCapacity == 0:
|
||||
batteryState = stateEmpty
|
||||
case !bat.FullyCharged:
|
||||
batteryState = stateIdle
|
||||
default:
|
||||
batteryState = stateFull
|
||||
}
|
||||
}
|
||||
|
||||
if totalCapacity == 0 || batteryState == math.MaxUint8 {
|
||||
return batteryPercent, batteryState, errors.New("no battery capacity")
|
||||
}
|
||||
|
||||
batteryPercent = uint8(float64(totalCharge) / float64(totalCapacity) * 100)
|
||||
return batteryPercent, batteryState, nil
|
||||
}
|
||||
120
agent/battery/battery_linux.go
Normal file
120
agent/battery/battery_linux.go
Normal file
@@ -0,0 +1,120 @@
|
||||
//go:build linux
|
||||
|
||||
package battery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log/slog"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/henrygd/beszel/agent/utils"
|
||||
)
|
||||
|
||||
// getBatteryPaths returns the paths of all batteries in /sys/class/power_supply
|
||||
var getBatteryPaths func() ([]string, error)
|
||||
|
||||
// HasReadableBattery checks if the system has a battery and returns true if it does.
|
||||
var HasReadableBattery func() bool
|
||||
|
||||
func init() {
|
||||
resetBatteryState("/sys/class/power_supply")
|
||||
}
|
||||
|
||||
// resetBatteryState resets the sync.Once functions to a fresh state.
|
||||
// Tests call this after swapping sysfsPowerSupply so the new path is picked up.
|
||||
func resetBatteryState(sysfsPowerSupplyPath string) {
|
||||
getBatteryPaths = sync.OnceValues(func() ([]string, error) {
|
||||
entries, err := os.ReadDir(sysfsPowerSupplyPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var paths []string
|
||||
for _, e := range entries {
|
||||
path := filepath.Join(sysfsPowerSupplyPath, e.Name())
|
||||
if utils.ReadStringFile(filepath.Join(path, "type")) == "Battery" {
|
||||
paths = append(paths, path)
|
||||
}
|
||||
}
|
||||
return paths, nil
|
||||
})
|
||||
HasReadableBattery = sync.OnceValue(func() bool {
|
||||
systemHasBattery := false
|
||||
paths, err := getBatteryPaths()
|
||||
for _, path := range paths {
|
||||
if _, ok := utils.ReadStringFileOK(filepath.Join(path, "capacity")); ok {
|
||||
systemHasBattery = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !systemHasBattery {
|
||||
slog.Debug("No battery found", "err", err)
|
||||
}
|
||||
return systemHasBattery
|
||||
})
|
||||
}
|
||||
|
||||
func parseSysfsState(status string) uint8 {
|
||||
switch status {
|
||||
case "Empty":
|
||||
return stateEmpty
|
||||
case "Full":
|
||||
return stateFull
|
||||
case "Charging":
|
||||
return stateCharging
|
||||
case "Discharging":
|
||||
return stateDischarging
|
||||
case "Not charging":
|
||||
return stateIdle
|
||||
default:
|
||||
return stateUnknown
|
||||
}
|
||||
}
|
||||
|
||||
// GetBatteryStats returns the current battery percent and charge state.
|
||||
// Reads /sys/class/power_supply/*/capacity directly so the kernel-reported
|
||||
// value is used, which is always 0-100 and matches what the OS displays.
|
||||
func GetBatteryStats() (batteryPercent uint8, batteryState uint8, err error) {
|
||||
if !HasReadableBattery() {
|
||||
return batteryPercent, batteryState, errors.ErrUnsupported
|
||||
}
|
||||
paths, err := getBatteryPaths()
|
||||
if err != nil {
|
||||
return batteryPercent, batteryState, err
|
||||
}
|
||||
if len(paths) == 0 {
|
||||
return batteryPercent, batteryState, errors.New("no batteries")
|
||||
}
|
||||
|
||||
batteryState = math.MaxUint8
|
||||
totalPercent := 0
|
||||
count := 0
|
||||
|
||||
for _, path := range paths {
|
||||
capStr, ok := utils.ReadStringFileOK(filepath.Join(path, "capacity"))
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
cap, parseErr := strconv.Atoi(capStr)
|
||||
if parseErr != nil {
|
||||
continue
|
||||
}
|
||||
totalPercent += cap
|
||||
count++
|
||||
|
||||
state := parseSysfsState(utils.ReadStringFile(filepath.Join(path, "status")))
|
||||
if state != stateUnknown {
|
||||
batteryState = state
|
||||
}
|
||||
}
|
||||
|
||||
if count == 0 || batteryState == math.MaxUint8 {
|
||||
return batteryPercent, batteryState, errors.New("no battery capacity")
|
||||
}
|
||||
|
||||
batteryPercent = uint8(totalPercent / count)
|
||||
return batteryPercent, batteryState, nil
|
||||
}
|
||||
201
agent/battery/battery_linux_test.go
Normal file
201
agent/battery/battery_linux_test.go
Normal file
@@ -0,0 +1,201 @@
|
||||
//go:build testing && linux
|
||||
|
||||
package battery
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// setupFakeSysfs creates a temporary sysfs-like tree under t.TempDir(),
|
||||
// swaps sysfsPowerSupply, resets the sync.Once caches, and restores
|
||||
// everything on cleanup. Returns a helper to create battery directories.
|
||||
func setupFakeSysfs(t *testing.T) (tmpDir string, addBattery func(name, capacity, status string)) {
|
||||
t.Helper()
|
||||
|
||||
tmp := t.TempDir()
|
||||
resetBatteryState(tmp)
|
||||
|
||||
write := func(path, content string) {
|
||||
t.Helper()
|
||||
dir := filepath.Dir(path)
|
||||
if err := os.MkdirAll(dir, 0o755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.WriteFile(path, []byte(content), 0o644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
addBattery = func(name, capacity, status string) {
|
||||
t.Helper()
|
||||
batDir := filepath.Join(tmp, name)
|
||||
write(filepath.Join(batDir, "type"), "Battery")
|
||||
write(filepath.Join(batDir, "capacity"), capacity)
|
||||
write(filepath.Join(batDir, "status"), status)
|
||||
}
|
||||
|
||||
return tmp, addBattery
|
||||
}
|
||||
|
||||
func TestParseSysfsState(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
want uint8
|
||||
}{
|
||||
{"Empty", stateEmpty},
|
||||
{"Full", stateFull},
|
||||
{"Charging", stateCharging},
|
||||
{"Discharging", stateDischarging},
|
||||
{"Not charging", stateIdle},
|
||||
{"", stateUnknown},
|
||||
{"SomethingElse", stateUnknown},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
assert.Equal(t, tt.want, parseSysfsState(tt.input), "parseSysfsState(%q)", tt.input)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetBatteryStats_SingleBattery(t *testing.T) {
|
||||
_, addBattery := setupFakeSysfs(t)
|
||||
addBattery("BAT0", "72", "Discharging")
|
||||
|
||||
pct, state, err := GetBatteryStats()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint8(72), pct)
|
||||
assert.Equal(t, stateDischarging, state)
|
||||
}
|
||||
|
||||
func TestGetBatteryStats_MultipleBatteries(t *testing.T) {
|
||||
_, addBattery := setupFakeSysfs(t)
|
||||
addBattery("BAT0", "80", "Charging")
|
||||
addBattery("BAT1", "40", "Charging")
|
||||
|
||||
pct, state, err := GetBatteryStats()
|
||||
assert.NoError(t, err)
|
||||
// average of 80 and 40 = 60
|
||||
assert.EqualValues(t, 60, pct)
|
||||
assert.Equal(t, stateCharging, state)
|
||||
}
|
||||
|
||||
func TestGetBatteryStats_FullBattery(t *testing.T) {
|
||||
_, addBattery := setupFakeSysfs(t)
|
||||
addBattery("BAT0", "100", "Full")
|
||||
|
||||
pct, state, err := GetBatteryStats()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint8(100), pct)
|
||||
assert.Equal(t, stateFull, state)
|
||||
}
|
||||
|
||||
func TestGetBatteryStats_EmptyBattery(t *testing.T) {
|
||||
_, addBattery := setupFakeSysfs(t)
|
||||
addBattery("BAT0", "0", "Empty")
|
||||
|
||||
pct, state, err := GetBatteryStats()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint8(0), pct)
|
||||
assert.Equal(t, stateEmpty, state)
|
||||
}
|
||||
|
||||
func TestGetBatteryStats_NotCharging(t *testing.T) {
|
||||
_, addBattery := setupFakeSysfs(t)
|
||||
addBattery("BAT0", "80", "Not charging")
|
||||
|
||||
pct, state, err := GetBatteryStats()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint8(80), pct)
|
||||
assert.Equal(t, stateIdle, state)
|
||||
}
|
||||
|
||||
func TestGetBatteryStats_NoBatteries(t *testing.T) {
|
||||
setupFakeSysfs(t) // empty directory, no batteries
|
||||
|
||||
_, _, err := GetBatteryStats()
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestGetBatteryStats_NonBatterySupplyIgnored(t *testing.T) {
|
||||
tmp, addBattery := setupFakeSysfs(t)
|
||||
|
||||
// Add a real battery
|
||||
addBattery("BAT0", "55", "Charging")
|
||||
|
||||
// Add an AC adapter (type != Battery) - should be ignored
|
||||
acDir := filepath.Join(tmp, "AC0")
|
||||
if err := os.MkdirAll(acDir, 0o755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(acDir, "type"), []byte("Mains"), 0o644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pct, state, err := GetBatteryStats()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint8(55), pct)
|
||||
assert.Equal(t, stateCharging, state)
|
||||
}
|
||||
|
||||
func TestGetBatteryStats_InvalidCapacitySkipped(t *testing.T) {
|
||||
tmp, addBattery := setupFakeSysfs(t)
|
||||
|
||||
// One battery with valid capacity
|
||||
addBattery("BAT0", "90", "Discharging")
|
||||
|
||||
// Another with invalid capacity text
|
||||
badDir := filepath.Join(tmp, "BAT1")
|
||||
if err := os.MkdirAll(badDir, 0o755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(badDir, "type"), []byte("Battery"), 0o644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(badDir, "capacity"), []byte("not-a-number"), 0o644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(badDir, "status"), []byte("Discharging"), 0o644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pct, _, err := GetBatteryStats()
|
||||
assert.NoError(t, err)
|
||||
// Only BAT0 counted
|
||||
assert.Equal(t, uint8(90), pct)
|
||||
}
|
||||
|
||||
func TestGetBatteryStats_UnknownStatusOnly(t *testing.T) {
|
||||
_, addBattery := setupFakeSysfs(t)
|
||||
addBattery("BAT0", "50", "SomethingWeird")
|
||||
|
||||
_, _, err := GetBatteryStats()
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestHasReadableBattery_True(t *testing.T) {
|
||||
_, addBattery := setupFakeSysfs(t)
|
||||
addBattery("BAT0", "50", "Charging")
|
||||
|
||||
assert.True(t, HasReadableBattery())
|
||||
}
|
||||
|
||||
func TestHasReadableBattery_False(t *testing.T) {
|
||||
setupFakeSysfs(t) // no batteries
|
||||
|
||||
assert.False(t, HasReadableBattery())
|
||||
}
|
||||
|
||||
func TestHasReadableBattery_NoCapacityFile(t *testing.T) {
|
||||
tmp, _ := setupFakeSysfs(t)
|
||||
|
||||
// Battery dir with type file but no capacity file
|
||||
batDir := filepath.Join(tmp, "BAT0")
|
||||
err := os.MkdirAll(batDir, 0o755)
|
||||
assert.NoError(t, err)
|
||||
err = os.WriteFile(filepath.Join(batDir, "type"), []byte("Battery"), 0o644)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.False(t, HasReadableBattery())
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build freebsd
|
||||
//go:build !darwin && !linux && !windows
|
||||
|
||||
package battery
|
||||
|
||||
298
agent/battery/battery_windows.go
Normal file
298
agent/battery/battery_windows.go
Normal file
@@ -0,0 +1,298 @@
|
||||
//go:build windows
|
||||
|
||||
// Most of the Windows battery code is based on
|
||||
// distatus/battery by Karol 'Kenji Takahashi' Woźniak
|
||||
|
||||
package battery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log/slog"
|
||||
"math"
|
||||
"sync"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
type batteryQueryInformation struct {
|
||||
BatteryTag uint32
|
||||
InformationLevel int32
|
||||
AtRate int32
|
||||
}
|
||||
|
||||
type batteryInformation struct {
|
||||
Capabilities uint32
|
||||
Technology uint8
|
||||
Reserved [3]uint8
|
||||
Chemistry [4]uint8
|
||||
DesignedCapacity uint32
|
||||
FullChargedCapacity uint32
|
||||
DefaultAlert1 uint32
|
||||
DefaultAlert2 uint32
|
||||
CriticalBias uint32
|
||||
CycleCount uint32
|
||||
}
|
||||
|
||||
type batteryWaitStatus struct {
|
||||
BatteryTag uint32
|
||||
Timeout uint32
|
||||
PowerState uint32
|
||||
LowCapacity uint32
|
||||
HighCapacity uint32
|
||||
}
|
||||
|
||||
type batteryStatus struct {
|
||||
PowerState uint32
|
||||
Capacity uint32
|
||||
Voltage uint32
|
||||
Rate int32
|
||||
}
|
||||
|
||||
type winGUID struct {
|
||||
Data1 uint32
|
||||
Data2 uint16
|
||||
Data3 uint16
|
||||
Data4 [8]byte
|
||||
}
|
||||
|
||||
type spDeviceInterfaceData struct {
|
||||
cbSize uint32
|
||||
InterfaceClassGuid winGUID
|
||||
Flags uint32
|
||||
Reserved uint
|
||||
}
|
||||
|
||||
var guidDeviceBattery = winGUID{
|
||||
0x72631e54,
|
||||
0x78A4,
|
||||
0x11d0,
|
||||
[8]byte{0xbc, 0xf7, 0x00, 0xaa, 0x00, 0xb7, 0xb3, 0x2a},
|
||||
}
|
||||
|
||||
var (
|
||||
setupapi = &windows.LazyDLL{Name: "setupapi.dll", System: true}
|
||||
setupDiGetClassDevsW = setupapi.NewProc("SetupDiGetClassDevsW")
|
||||
setupDiEnumDeviceInterfaces = setupapi.NewProc("SetupDiEnumDeviceInterfaces")
|
||||
setupDiGetDeviceInterfaceDetailW = setupapi.NewProc("SetupDiGetDeviceInterfaceDetailW")
|
||||
setupDiDestroyDeviceInfoList = setupapi.NewProc("SetupDiDestroyDeviceInfoList")
|
||||
)
|
||||
|
||||
// winBatteryGet reads one battery by index. Returns (fullCapacity, currentCapacity, state, error).
|
||||
// Returns error == errNotFound when there are no more batteries.
|
||||
var errNotFound = errors.New("no more batteries")
|
||||
|
||||
func setupDiSetup(proc *windows.LazyProc, nargs, a1, a2, a3, a4, a5, a6 uintptr) (uintptr, error) {
|
||||
_ = nargs
|
||||
r1, _, errno := syscall.SyscallN(proc.Addr(), a1, a2, a3, a4, a5, a6)
|
||||
if windows.Handle(r1) == windows.InvalidHandle {
|
||||
if errno != 0 {
|
||||
return 0, error(errno)
|
||||
}
|
||||
return 0, syscall.EINVAL
|
||||
}
|
||||
return r1, nil
|
||||
}
|
||||
|
||||
func setupDiCall(proc *windows.LazyProc, nargs, a1, a2, a3, a4, a5, a6 uintptr) syscall.Errno {
|
||||
_ = nargs
|
||||
r1, _, errno := syscall.SyscallN(proc.Addr(), a1, a2, a3, a4, a5, a6)
|
||||
if r1 == 0 {
|
||||
if errno != 0 {
|
||||
return errno
|
||||
}
|
||||
return syscall.EINVAL
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func readWinBatteryState(powerState uint32) uint8 {
|
||||
switch {
|
||||
case powerState&0x00000004 != 0:
|
||||
return stateCharging
|
||||
case powerState&0x00000008 != 0:
|
||||
return stateEmpty
|
||||
case powerState&0x00000002 != 0:
|
||||
return stateDischarging
|
||||
case powerState&0x00000001 != 0:
|
||||
return stateFull
|
||||
default:
|
||||
return stateUnknown
|
||||
}
|
||||
}
|
||||
|
||||
func winBatteryGet(idx int) (full, current uint32, state uint8, err error) {
|
||||
hdev, err := setupDiSetup(
|
||||
setupDiGetClassDevsW,
|
||||
4,
|
||||
uintptr(unsafe.Pointer(&guidDeviceBattery)),
|
||||
0, 0,
|
||||
2|16, // DIGCF_PRESENT|DIGCF_DEVICEINTERFACE
|
||||
0, 0,
|
||||
)
|
||||
if err != nil {
|
||||
return 0, 0, stateUnknown, err
|
||||
}
|
||||
defer syscall.SyscallN(setupDiDestroyDeviceInfoList.Addr(), hdev)
|
||||
|
||||
var did spDeviceInterfaceData
|
||||
did.cbSize = uint32(unsafe.Sizeof(did))
|
||||
errno := setupDiCall(
|
||||
setupDiEnumDeviceInterfaces,
|
||||
5,
|
||||
hdev, 0,
|
||||
uintptr(unsafe.Pointer(&guidDeviceBattery)),
|
||||
uintptr(idx),
|
||||
uintptr(unsafe.Pointer(&did)),
|
||||
0,
|
||||
)
|
||||
if errno == 259 { // ERROR_NO_MORE_ITEMS
|
||||
return 0, 0, stateUnknown, errNotFound
|
||||
}
|
||||
if errno != 0 {
|
||||
return 0, 0, stateUnknown, errno
|
||||
}
|
||||
|
||||
var cbRequired uint32
|
||||
errno = setupDiCall(
|
||||
setupDiGetDeviceInterfaceDetailW,
|
||||
6,
|
||||
hdev,
|
||||
uintptr(unsafe.Pointer(&did)),
|
||||
0, 0,
|
||||
uintptr(unsafe.Pointer(&cbRequired)),
|
||||
0,
|
||||
)
|
||||
if errno != 0 && errno != 122 { // ERROR_INSUFFICIENT_BUFFER
|
||||
return 0, 0, stateUnknown, errno
|
||||
}
|
||||
didd := make([]uint16, cbRequired/2)
|
||||
cbSize := (*uint32)(unsafe.Pointer(&didd[0]))
|
||||
if unsafe.Sizeof(uint(0)) == 8 {
|
||||
*cbSize = 8
|
||||
} else {
|
||||
*cbSize = 6
|
||||
}
|
||||
errno = setupDiCall(
|
||||
setupDiGetDeviceInterfaceDetailW,
|
||||
6,
|
||||
hdev,
|
||||
uintptr(unsafe.Pointer(&did)),
|
||||
uintptr(unsafe.Pointer(&didd[0])),
|
||||
uintptr(cbRequired),
|
||||
uintptr(unsafe.Pointer(&cbRequired)),
|
||||
0,
|
||||
)
|
||||
if errno != 0 {
|
||||
return 0, 0, stateUnknown, errno
|
||||
}
|
||||
devicePath := &didd[2:][0]
|
||||
|
||||
handle, err := windows.CreateFile(
|
||||
devicePath,
|
||||
windows.GENERIC_READ|windows.GENERIC_WRITE,
|
||||
windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE,
|
||||
nil,
|
||||
windows.OPEN_EXISTING,
|
||||
windows.FILE_ATTRIBUTE_NORMAL,
|
||||
0,
|
||||
)
|
||||
if err != nil {
|
||||
return 0, 0, stateUnknown, err
|
||||
}
|
||||
defer windows.CloseHandle(handle)
|
||||
|
||||
var dwOut uint32
|
||||
var dwWait uint32
|
||||
var bqi batteryQueryInformation
|
||||
err = windows.DeviceIoControl(
|
||||
handle,
|
||||
2703424, // IOCTL_BATTERY_QUERY_TAG
|
||||
(*byte)(unsafe.Pointer(&dwWait)),
|
||||
uint32(unsafe.Sizeof(dwWait)),
|
||||
(*byte)(unsafe.Pointer(&bqi.BatteryTag)),
|
||||
uint32(unsafe.Sizeof(bqi.BatteryTag)),
|
||||
&dwOut, nil,
|
||||
)
|
||||
if err != nil || bqi.BatteryTag == 0 {
|
||||
return 0, 0, stateUnknown, errors.New("battery tag not returned")
|
||||
}
|
||||
|
||||
var bi batteryInformation
|
||||
if err = windows.DeviceIoControl(
|
||||
handle,
|
||||
2703428, // IOCTL_BATTERY_QUERY_INFORMATION
|
||||
(*byte)(unsafe.Pointer(&bqi)),
|
||||
uint32(unsafe.Sizeof(bqi)),
|
||||
(*byte)(unsafe.Pointer(&bi)),
|
||||
uint32(unsafe.Sizeof(bi)),
|
||||
&dwOut, nil,
|
||||
); err != nil {
|
||||
return 0, 0, stateUnknown, err
|
||||
}
|
||||
|
||||
bws := batteryWaitStatus{BatteryTag: bqi.BatteryTag}
|
||||
var bs batteryStatus
|
||||
if err = windows.DeviceIoControl(
|
||||
handle,
|
||||
2703436, // IOCTL_BATTERY_QUERY_STATUS
|
||||
(*byte)(unsafe.Pointer(&bws)),
|
||||
uint32(unsafe.Sizeof(bws)),
|
||||
(*byte)(unsafe.Pointer(&bs)),
|
||||
uint32(unsafe.Sizeof(bs)),
|
||||
&dwOut, nil,
|
||||
); err != nil {
|
||||
return 0, 0, stateUnknown, err
|
||||
}
|
||||
|
||||
if bs.Capacity == 0xffffffff { // BATTERY_UNKNOWN_CAPACITY
|
||||
return 0, 0, stateUnknown, errors.New("battery capacity unknown")
|
||||
}
|
||||
|
||||
return bi.FullChargedCapacity, bs.Capacity, readWinBatteryState(bs.PowerState), nil
|
||||
}
|
||||
|
||||
// HasReadableBattery checks if the system has a battery and returns true if it does.
|
||||
var HasReadableBattery = sync.OnceValue(func() bool {
|
||||
systemHasBattery := false
|
||||
full, _, _, err := winBatteryGet(0)
|
||||
if err == nil && full > 0 {
|
||||
systemHasBattery = true
|
||||
}
|
||||
if !systemHasBattery {
|
||||
slog.Debug("No battery found", "err", err)
|
||||
}
|
||||
return systemHasBattery
|
||||
})
|
||||
|
||||
// GetBatteryStats returns the current battery percent and charge state.
|
||||
func GetBatteryStats() (batteryPercent uint8, batteryState uint8, err error) {
|
||||
if !HasReadableBattery() {
|
||||
return batteryPercent, batteryState, errors.ErrUnsupported
|
||||
}
|
||||
|
||||
totalFull := uint32(0)
|
||||
totalCurrent := uint32(0)
|
||||
batteryState = math.MaxUint8
|
||||
|
||||
for i := 0; ; i++ {
|
||||
full, current, state, bErr := winBatteryGet(i)
|
||||
if errors.Is(bErr, errNotFound) {
|
||||
break
|
||||
}
|
||||
if bErr != nil || full == 0 {
|
||||
continue
|
||||
}
|
||||
totalFull += full
|
||||
totalCurrent += min(current, full)
|
||||
batteryState = state
|
||||
}
|
||||
|
||||
if totalFull == 0 || batteryState == math.MaxUint8 {
|
||||
return batteryPercent, batteryState, errors.New("no battery capacity")
|
||||
}
|
||||
|
||||
batteryPercent = uint8(float64(totalCurrent) / float64(totalFull) * 100)
|
||||
return batteryPercent, batteryState, nil
|
||||
}
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
"github.com/lxzan/gws"
|
||||
"golang.org/x/crypto/ssh"
|
||||
"golang.org/x/net/proxy"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -104,6 +105,11 @@ func (client *WebSocketClient) getOptions() *gws.ClientOption {
|
||||
}
|
||||
client.hubURL.Path = path.Join(client.hubURL.Path, "api/beszel/agent-connect")
|
||||
|
||||
// make sure BESZEL_AGENT_ALL_PROXY works (GWS only checks ALL_PROXY)
|
||||
if val := os.Getenv("BESZEL_AGENT_ALL_PROXY"); val != "" {
|
||||
os.Setenv("ALL_PROXY", val)
|
||||
}
|
||||
|
||||
client.options = &gws.ClientOption{
|
||||
Addr: client.hubURL.String(),
|
||||
TlsConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
@@ -112,6 +118,9 @@ func (client *WebSocketClient) getOptions() *gws.ClientOption {
|
||||
"X-Token": []string{client.token},
|
||||
"X-Beszel": []string{beszel.Version},
|
||||
},
|
||||
NewDialer: func() (gws.Dialer, error) {
|
||||
return proxy.FromEnvironment(), nil
|
||||
},
|
||||
}
|
||||
return client.options
|
||||
}
|
||||
|
||||
@@ -4,11 +4,15 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"log/slog"
|
||||
"net"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/agent/health"
|
||||
"github.com/henrygd/beszel/agent/utils"
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
)
|
||||
|
||||
@@ -111,11 +115,35 @@ func (c *ConnectionManager) Start(serverOptions ServerOptions) error {
|
||||
_ = health.Update()
|
||||
case <-sigCtx.Done():
|
||||
slog.Info("Shutting down", "cause", context.Cause(sigCtx))
|
||||
return c.stop()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// stop does not stop the connection manager itself, just any active connections. The manager will attempt to reconnect after stopping, so this should only be called immediately before shutting down the entire agent.
|
||||
//
|
||||
// If we need or want to expose a graceful Stop method in the future, do something like this to actually stop the manager:
|
||||
//
|
||||
// func (c *ConnectionManager) Start(serverOptions ServerOptions) error {
|
||||
// ctx, cancel := context.WithCancel(context.Background())
|
||||
// c.cancel = cancel
|
||||
//
|
||||
// for {
|
||||
// select {
|
||||
// case <-ctx.Done():
|
||||
// return c.stop()
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// func (c *ConnectionManager) Stop() {
|
||||
// c.cancel()
|
||||
// }
|
||||
func (c *ConnectionManager) stop() error {
|
||||
_ = c.agent.StopServer()
|
||||
c.agent.probeManager.Stop()
|
||||
c.closeWebSocket()
|
||||
return health.CleanUp()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handleEvent processes connection events and updates the connection state accordingly.
|
||||
@@ -185,10 +213,17 @@ func (c *ConnectionManager) connect() {
|
||||
|
||||
// Try WebSocket first, if it fails, start SSH server
|
||||
err := c.startWebSocketConnection()
|
||||
if err != nil && c.State == Disconnected {
|
||||
if err != nil {
|
||||
if shouldExitOnErr(err) {
|
||||
time.Sleep(2 * time.Second) // prevent tight restart loop
|
||||
_ = c.stop()
|
||||
os.Exit(1)
|
||||
}
|
||||
if c.State == Disconnected {
|
||||
c.startSSHServer()
|
||||
c.startWsTicker()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// startWebSocketConnection attempts to establish a WebSocket connection to the hub.
|
||||
@@ -224,3 +259,14 @@ func (c *ConnectionManager) closeWebSocket() {
|
||||
c.wsClient.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// shouldExitOnErr checks if the error is a DNS resolution failure and if the
|
||||
// EXIT_ON_DNS_ERROR env var is set. https://github.com/henrygd/beszel/issues/1924.
|
||||
func shouldExitOnErr(err error) bool {
|
||||
if val, _ := utils.GetEnv("EXIT_ON_DNS_ERROR"); val == "true" {
|
||||
if opErr, ok := errors.AsType[*net.OpError](err); ok {
|
||||
return strings.Contains(opErr.Err.Error(), "lookup")
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ package agent
|
||||
|
||||
import (
|
||||
"crypto/ed25519"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
@@ -298,3 +299,65 @@ func TestConnectionManager_ConnectFlow(t *testing.T) {
|
||||
cm.connect()
|
||||
}, "Connect should not panic without WebSocket client")
|
||||
}
|
||||
|
||||
func TestShouldExitOnErr(t *testing.T) {
|
||||
createDialErr := func(msg string) error {
|
||||
return &net.OpError{
|
||||
Op: "dial",
|
||||
Net: "tcp",
|
||||
Err: errors.New(msg),
|
||||
}
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
err error
|
||||
envValue string
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "no env var",
|
||||
err: createDialErr("lookup lkahsdfasdf: no such host"),
|
||||
envValue: "",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "env var false",
|
||||
err: createDialErr("lookup lkahsdfasdf: no such host"),
|
||||
envValue: "false",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "env var true, matching error",
|
||||
err: createDialErr("lookup lkahsdfasdf: no such host"),
|
||||
envValue: "true",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "env var true, matching error with extra context",
|
||||
err: createDialErr("lookup beszel.server.lan on [::1]:53: read udp [::1]:44557->[::1]:53: read: connection refused"),
|
||||
envValue: "true",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "env var true, non-matching error",
|
||||
err: errors.New("connection refused"),
|
||||
envValue: "true",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "env var true, dial but not lookup",
|
||||
err: createDialErr("connection timeout"),
|
||||
envValue: "true",
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Setenv("EXIT_ON_DNS_ERROR", tt.envValue)
|
||||
result := shouldExitOnErr(tt.err)
|
||||
assert.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
111
agent/disk.go
111
agent/disk.go
@@ -1,6 +1,7 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -33,6 +34,34 @@ type diskDiscovery struct {
|
||||
ctx fsRegistrationContext
|
||||
}
|
||||
|
||||
// prevDisk stores previous per-device disk counters for a given cache interval
|
||||
type prevDisk struct {
|
||||
readBytes uint64
|
||||
writeBytes uint64
|
||||
readTime uint64 // cumulative ms spent on reads (from ReadTime)
|
||||
writeTime uint64 // cumulative ms spent on writes (from WriteTime)
|
||||
ioTime uint64 // cumulative ms spent doing I/O (from IoTime)
|
||||
weightedIO uint64 // cumulative weighted ms (queue-depth × ms, from WeightedIO)
|
||||
readCount uint64 // cumulative read operation count
|
||||
writeCount uint64 // cumulative write operation count
|
||||
at time.Time
|
||||
}
|
||||
|
||||
// prevDiskFromCounter creates a prevDisk snapshot from a disk.IOCountersStat at time t.
|
||||
func prevDiskFromCounter(d disk.IOCountersStat, t time.Time) prevDisk {
|
||||
return prevDisk{
|
||||
readBytes: d.ReadBytes,
|
||||
writeBytes: d.WriteBytes,
|
||||
readTime: d.ReadTime,
|
||||
writeTime: d.WriteTime,
|
||||
ioTime: d.IoTime,
|
||||
weightedIO: d.WeightedIO,
|
||||
readCount: d.ReadCount,
|
||||
writeCount: d.WriteCount,
|
||||
at: t,
|
||||
}
|
||||
}
|
||||
|
||||
// parseFilesystemEntry parses a filesystem entry in the format "device__customname"
|
||||
// Returns the device/filesystem part and the custom name part
|
||||
func parseFilesystemEntry(entry string) (device, customName string) {
|
||||
@@ -238,9 +267,11 @@ func (d *diskDiscovery) addConfiguredExtraFilesystems(extraFilesystems string) {
|
||||
|
||||
// addPartitionExtraFs registers partitions mounted under /extra-filesystems so
|
||||
// their display names can come from the folder name while their I/O keys still
|
||||
// prefer the underlying partition device.
|
||||
// prefer the underlying partition device. Only direct children are matched to
|
||||
// avoid registering nested virtual mounts (e.g. /proc, /sys) that are returned by
|
||||
// disk.Partitions(true) when the host root is bind-mounted in /extra-filesystems.
|
||||
func (d *diskDiscovery) addPartitionExtraFs(p disk.PartitionStat) {
|
||||
if !strings.HasPrefix(p.Mountpoint, d.ctx.efPath) {
|
||||
if filepath.Dir(p.Mountpoint) != d.ctx.efPath {
|
||||
return
|
||||
}
|
||||
device, customName := extraFilesystemPartitionInfo(p)
|
||||
@@ -273,7 +304,7 @@ func (a *Agent) initializeDiskInfo() {
|
||||
hasRoot := false
|
||||
isWindows := runtime.GOOS == "windows"
|
||||
|
||||
partitions, err := disk.Partitions(false)
|
||||
partitions, err := disk.PartitionsWithContext(context.Background(), true)
|
||||
if err != nil {
|
||||
slog.Error("Error getting disk partitions", "err", err)
|
||||
}
|
||||
@@ -578,16 +609,29 @@ func (a *Agent) updateDiskIo(cacheTimeMs uint16, systemStats *system.Stats) {
|
||||
prev, hasPrev := a.diskPrev[cacheTimeMs][name]
|
||||
if !hasPrev {
|
||||
// Seed from agent-level fsStats if present, else seed from current
|
||||
prev = prevDisk{readBytes: stats.TotalRead, writeBytes: stats.TotalWrite, at: stats.Time}
|
||||
prev = prevDisk{
|
||||
readBytes: stats.TotalRead,
|
||||
writeBytes: stats.TotalWrite,
|
||||
readTime: d.ReadTime,
|
||||
writeTime: d.WriteTime,
|
||||
ioTime: d.IoTime,
|
||||
weightedIO: d.WeightedIO,
|
||||
readCount: d.ReadCount,
|
||||
writeCount: d.WriteCount,
|
||||
at: stats.Time,
|
||||
}
|
||||
if prev.at.IsZero() {
|
||||
prev = prevDisk{readBytes: d.ReadBytes, writeBytes: d.WriteBytes, at: now}
|
||||
prev = prevDiskFromCounter(d, now)
|
||||
}
|
||||
}
|
||||
|
||||
msElapsed := uint64(now.Sub(prev.at).Milliseconds())
|
||||
|
||||
// Update per-interval snapshot
|
||||
a.diskPrev[cacheTimeMs][name] = prevDiskFromCounter(d, now)
|
||||
|
||||
// Avoid division by zero or clock issues
|
||||
if msElapsed < 100 {
|
||||
// Avoid division by zero or clock issues; update snapshot and continue
|
||||
a.diskPrev[cacheTimeMs][name] = prevDisk{readBytes: d.ReadBytes, writeBytes: d.WriteBytes, at: now}
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -599,15 +643,38 @@ func (a *Agent) updateDiskIo(cacheTimeMs uint16, systemStats *system.Stats) {
|
||||
// validate values
|
||||
if readMbPerSecond > 50_000 || writeMbPerSecond > 50_000 {
|
||||
slog.Warn("Invalid disk I/O. Resetting.", "name", d.Name, "read", readMbPerSecond, "write", writeMbPerSecond)
|
||||
// Reset interval snapshot and seed from current
|
||||
a.diskPrev[cacheTimeMs][name] = prevDisk{readBytes: d.ReadBytes, writeBytes: d.WriteBytes, at: now}
|
||||
// also refresh agent baseline to avoid future negatives
|
||||
a.initializeDiskIoStats(ioCounters)
|
||||
continue
|
||||
}
|
||||
|
||||
// Update per-interval snapshot
|
||||
a.diskPrev[cacheTimeMs][name] = prevDisk{readBytes: d.ReadBytes, writeBytes: d.WriteBytes, at: now}
|
||||
// These properties are calculated differently on different platforms,
|
||||
// but generally represent cumulative time spent doing reads/writes on the device.
|
||||
// This can surpass 100% if there are multiple concurrent I/O operations.
|
||||
// Linux kernel docs:
|
||||
// This is the total number of milliseconds spent by all reads (as
|
||||
// measured from __make_request() to end_that_request_last()).
|
||||
// https://www.kernel.org/doc/Documentation/iostats.txt (fields 4, 8)
|
||||
diskReadTime := utils.TwoDecimals(float64(d.ReadTime-prev.readTime) / float64(msElapsed) * 100)
|
||||
diskWriteTime := utils.TwoDecimals(float64(d.WriteTime-prev.writeTime) / float64(msElapsed) * 100)
|
||||
|
||||
// I/O utilization %: fraction of wall time the device had any I/O in progress (0-100).
|
||||
diskIoUtilPct := utils.TwoDecimals(float64(d.IoTime-prev.ioTime) / float64(msElapsed) * 100)
|
||||
|
||||
// Weighted I/O: queue-depth weighted I/O time, normalized to interval (can exceed 100%).
|
||||
// Linux kernel field 11: incremented by iops_in_progress × ms_since_last_update.
|
||||
// Used to display queue depth. Multipled by 100 to increase accuracy of digit truncation (divided by 100 in UI).
|
||||
diskWeightedIO := utils.TwoDecimals(float64(d.WeightedIO-prev.weightedIO) / float64(msElapsed) * 100)
|
||||
|
||||
// r_await / w_await: average time per read/write operation in milliseconds.
|
||||
// Equivalent to r_await and w_await in iostat.
|
||||
var rAwait, wAwait float64
|
||||
if deltaReadCount := d.ReadCount - prev.readCount; deltaReadCount > 0 {
|
||||
rAwait = utils.TwoDecimals(float64(d.ReadTime-prev.readTime) / float64(deltaReadCount))
|
||||
}
|
||||
if deltaWriteCount := d.WriteCount - prev.writeCount; deltaWriteCount > 0 {
|
||||
wAwait = utils.TwoDecimals(float64(d.WriteTime-prev.writeTime) / float64(deltaWriteCount))
|
||||
}
|
||||
|
||||
// Update global fsStats baseline for cross-interval correctness
|
||||
stats.Time = now
|
||||
@@ -617,20 +684,40 @@ func (a *Agent) updateDiskIo(cacheTimeMs uint16, systemStats *system.Stats) {
|
||||
stats.DiskWritePs = writeMbPerSecond
|
||||
stats.DiskReadBytes = diskIORead
|
||||
stats.DiskWriteBytes = diskIOWrite
|
||||
stats.DiskIoStats[0] = diskReadTime
|
||||
stats.DiskIoStats[1] = diskWriteTime
|
||||
stats.DiskIoStats[2] = diskIoUtilPct
|
||||
stats.DiskIoStats[3] = rAwait
|
||||
stats.DiskIoStats[4] = wAwait
|
||||
stats.DiskIoStats[5] = diskWeightedIO
|
||||
|
||||
if stats.Root {
|
||||
systemStats.DiskReadPs = stats.DiskReadPs
|
||||
systemStats.DiskWritePs = stats.DiskWritePs
|
||||
systemStats.DiskIO[0] = diskIORead
|
||||
systemStats.DiskIO[1] = diskIOWrite
|
||||
systemStats.DiskIoStats[0] = diskReadTime
|
||||
systemStats.DiskIoStats[1] = diskWriteTime
|
||||
systemStats.DiskIoStats[2] = diskIoUtilPct
|
||||
systemStats.DiskIoStats[3] = rAwait
|
||||
systemStats.DiskIoStats[4] = wAwait
|
||||
systemStats.DiskIoStats[5] = diskWeightedIO
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// getRootMountPoint returns the appropriate root mount point for the system
|
||||
// getRootMountPoint returns the appropriate root mount point for the system.
|
||||
// On Windows it returns the system drive (e.g. "C:").
|
||||
// For immutable systems like Fedora Silverblue, it returns /sysroot instead of /
|
||||
func (a *Agent) getRootMountPoint() string {
|
||||
if runtime.GOOS == "windows" {
|
||||
if sd := os.Getenv("SystemDrive"); sd != "" {
|
||||
return sd
|
||||
}
|
||||
return "C:"
|
||||
}
|
||||
|
||||
// 1. Check if /etc/os-release contains indicators of an immutable system
|
||||
if osReleaseContent, err := os.ReadFile("/etc/os-release"); err == nil {
|
||||
content := string(osReleaseContent)
|
||||
|
||||
@@ -530,6 +530,87 @@ func TestAddExtraFilesystemFolders(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestAddPartitionExtraFs(t *testing.T) {
|
||||
makeDiscovery := func(agent *Agent) diskDiscovery {
|
||||
return diskDiscovery{
|
||||
agent: agent,
|
||||
ctx: fsRegistrationContext{
|
||||
isWindows: false,
|
||||
efPath: "/extra-filesystems",
|
||||
diskIoCounters: map[string]disk.IOCountersStat{
|
||||
"nvme0n1p1": {Name: "nvme0n1p1"},
|
||||
"nvme1n1": {Name: "nvme1n1"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("registers direct child of extra-filesystems", func(t *testing.T) {
|
||||
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||
d := makeDiscovery(agent)
|
||||
|
||||
d.addPartitionExtraFs(disk.PartitionStat{
|
||||
Device: "/dev/nvme0n1p1",
|
||||
Mountpoint: "/extra-filesystems/nvme0n1p1__caddy1-root",
|
||||
})
|
||||
|
||||
stats, exists := agent.fsStats["nvme0n1p1"]
|
||||
assert.True(t, exists)
|
||||
assert.Equal(t, "/extra-filesystems/nvme0n1p1__caddy1-root", stats.Mountpoint)
|
||||
assert.Equal(t, "caddy1-root", stats.Name)
|
||||
})
|
||||
|
||||
t.Run("skips nested mount under extra-filesystem bind mount", func(t *testing.T) {
|
||||
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||
d := makeDiscovery(agent)
|
||||
|
||||
// These simulate the virtual mounts that appear when host / is bind-mounted
|
||||
// with disk.Partitions(all=true) — e.g. /proc, /sys, /dev visible under the mount.
|
||||
for _, nested := range []string{
|
||||
"/extra-filesystems/nvme0n1p1__caddy1-root/proc",
|
||||
"/extra-filesystems/nvme0n1p1__caddy1-root/sys",
|
||||
"/extra-filesystems/nvme0n1p1__caddy1-root/dev",
|
||||
"/extra-filesystems/nvme0n1p1__caddy1-root/run",
|
||||
} {
|
||||
d.addPartitionExtraFs(disk.PartitionStat{Device: "tmpfs", Mountpoint: nested})
|
||||
}
|
||||
|
||||
assert.Empty(t, agent.fsStats)
|
||||
})
|
||||
|
||||
t.Run("registers both direct children, skips their nested mounts", func(t *testing.T) {
|
||||
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||
d := makeDiscovery(agent)
|
||||
|
||||
partitions := []disk.PartitionStat{
|
||||
{Device: "/dev/nvme0n1p1", Mountpoint: "/extra-filesystems/nvme0n1p1__caddy1-root"},
|
||||
{Device: "/dev/nvme1n1", Mountpoint: "/extra-filesystems/nvme1n1__caddy1-docker"},
|
||||
{Device: "proc", Mountpoint: "/extra-filesystems/nvme0n1p1__caddy1-root/proc"},
|
||||
{Device: "sysfs", Mountpoint: "/extra-filesystems/nvme0n1p1__caddy1-root/sys"},
|
||||
{Device: "overlay", Mountpoint: "/extra-filesystems/nvme0n1p1__caddy1-root/var/lib/docker"},
|
||||
}
|
||||
for _, p := range partitions {
|
||||
d.addPartitionExtraFs(p)
|
||||
}
|
||||
|
||||
assert.Len(t, agent.fsStats, 2)
|
||||
assert.Equal(t, "caddy1-root", agent.fsStats["nvme0n1p1"].Name)
|
||||
assert.Equal(t, "caddy1-docker", agent.fsStats["nvme1n1"].Name)
|
||||
})
|
||||
|
||||
t.Run("skips partition not under extra-filesystems", func(t *testing.T) {
|
||||
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||
d := makeDiscovery(agent)
|
||||
|
||||
d.addPartitionExtraFs(disk.PartitionStat{
|
||||
Device: "/dev/nvme0n1p1",
|
||||
Mountpoint: "/",
|
||||
})
|
||||
|
||||
assert.Empty(t, agent.fsStats)
|
||||
})
|
||||
}
|
||||
|
||||
func TestFindIoDevice(t *testing.T) {
|
||||
t.Run("matches by device name", func(t *testing.T) {
|
||||
ioCounters := map[string]disk.IOCountersStat{
|
||||
|
||||
132
agent/docker.go
132
agent/docker.go
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/henrygd/beszel/agent/deltatracker"
|
||||
"github.com/henrygd/beszel/agent/utils"
|
||||
"github.com/henrygd/beszel/internal/entities/container"
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
|
||||
"github.com/blang/semver"
|
||||
)
|
||||
@@ -52,6 +53,7 @@ const (
|
||||
)
|
||||
|
||||
type dockerManager struct {
|
||||
agent *Agent // Used to propagate system detail changes back to the agent
|
||||
client *http.Client // Client to query Docker API
|
||||
wg sync.WaitGroup // WaitGroup to wait for all goroutines to finish
|
||||
sem chan struct{} // Semaphore to limit concurrent container requests
|
||||
@@ -60,6 +62,7 @@ type dockerManager struct {
|
||||
containerStatsMap map[string]*container.Stats // Keeps track of container stats
|
||||
validIds map[string]struct{} // Map of valid container ids, used to prune invalid containers from containerStatsMap
|
||||
goodDockerVersion bool // Whether docker version is at least 25.0.0 (one-shot works correctly)
|
||||
dockerVersionChecked bool // Whether a version probe has completed successfully
|
||||
isWindows bool // Whether the Docker Engine API is running on Windows
|
||||
buf *bytes.Buffer // Buffer to store and read response bodies
|
||||
decoder *json.Decoder // Reusable JSON decoder that reads from buf
|
||||
@@ -78,7 +81,6 @@ type dockerManager struct {
|
||||
networkSentTrackers map[uint16]*deltatracker.DeltaTracker[string, uint64]
|
||||
networkRecvTrackers map[uint16]*deltatracker.DeltaTracker[string, uint64]
|
||||
lastNetworkReadTime map[uint16]map[string]time.Time // cacheTimeMs -> containerId -> last network read time
|
||||
retrySleep func(time.Duration)
|
||||
}
|
||||
|
||||
// userAgentRoundTripper is a custom http.RoundTripper that adds a User-Agent header to all requests
|
||||
@@ -87,6 +89,14 @@ type userAgentRoundTripper struct {
|
||||
userAgent string
|
||||
}
|
||||
|
||||
// dockerVersionResponse contains the /version fields used for engine checks.
|
||||
type dockerVersionResponse struct {
|
||||
Version string `json:"Version"`
|
||||
Components []struct {
|
||||
Name string `json:"Name"`
|
||||
} `json:"Components"`
|
||||
}
|
||||
|
||||
// RoundTrip implements the http.RoundTripper interface
|
||||
func (u *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
req.Header.Set("User-Agent", u.userAgent)
|
||||
@@ -134,7 +144,14 @@ func (dm *dockerManager) getDockerStats(cacheTimeMs uint16) ([]*container.Stats,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dm.isWindows = strings.Contains(resp.Header.Get("Server"), "windows")
|
||||
// Detect Podman and Windows from Server header
|
||||
serverHeader := resp.Header.Get("Server")
|
||||
if !dm.usingPodman && detectPodmanFromHeader(serverHeader) {
|
||||
dm.setIsPodman()
|
||||
}
|
||||
dm.isWindows = strings.Contains(serverHeader, "windows")
|
||||
|
||||
dm.ensureDockerVersionChecked()
|
||||
|
||||
containersLength := len(dm.apiContainerList)
|
||||
|
||||
@@ -588,7 +605,7 @@ func (dm *dockerManager) deleteContainerStatsSync(id string) {
|
||||
}
|
||||
|
||||
// Creates a new http client for Docker or Podman API
|
||||
func newDockerManager() *dockerManager {
|
||||
func newDockerManager(agent *Agent) *dockerManager {
|
||||
dockerHost, exists := utils.GetEnv("DOCKER_HOST")
|
||||
if exists {
|
||||
// return nil if set to empty string
|
||||
@@ -654,6 +671,7 @@ func newDockerManager() *dockerManager {
|
||||
}
|
||||
|
||||
manager := &dockerManager{
|
||||
agent: agent,
|
||||
client: &http.Client{
|
||||
Timeout: timeout,
|
||||
Transport: userAgentTransport,
|
||||
@@ -671,51 +689,54 @@ func newDockerManager() *dockerManager {
|
||||
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||
lastNetworkReadTime: make(map[uint16]map[string]time.Time),
|
||||
retrySleep: time.Sleep,
|
||||
}
|
||||
|
||||
// If using podman, return client
|
||||
if strings.Contains(dockerHost, "podman") {
|
||||
manager.usingPodman = true
|
||||
manager.goodDockerVersion = true
|
||||
return manager
|
||||
}
|
||||
|
||||
// run version check in goroutine to avoid blocking (server may not be ready and requires retries)
|
||||
go manager.checkDockerVersion()
|
||||
|
||||
// give version check a chance to complete before returning
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
// Best-effort startup probe. If the engine is not ready yet, getDockerStats will
|
||||
// retry after the first successful /containers/json request.
|
||||
_, _ = manager.checkDockerVersion()
|
||||
|
||||
return manager
|
||||
}
|
||||
|
||||
// checkDockerVersion checks Docker version and sets goodDockerVersion if at least 25.0.0.
|
||||
// Versions before 25.0.0 have a bug with one-shot which requires all requests to be made in one batch.
|
||||
func (dm *dockerManager) checkDockerVersion() {
|
||||
var err error
|
||||
var resp *http.Response
|
||||
var versionInfo struct {
|
||||
Version string `json:"Version"`
|
||||
func (dm *dockerManager) checkDockerVersion() (bool, error) {
|
||||
resp, err := dm.client.Get("http://localhost/version")
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
const versionMaxTries = 2
|
||||
for i := 1; i <= versionMaxTries; i++ {
|
||||
resp, err = dm.client.Get("http://localhost/version")
|
||||
if err == nil && resp.StatusCode == http.StatusOK {
|
||||
break
|
||||
}
|
||||
if resp != nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
status := resp.Status
|
||||
resp.Body.Close()
|
||||
return false, fmt.Errorf("docker version request failed: %s", status)
|
||||
}
|
||||
if i < versionMaxTries {
|
||||
slog.Debug("Failed to get Docker version; retrying", "attempt", i, "err", err, "response", resp)
|
||||
dm.retrySleep(5 * time.Second)
|
||||
|
||||
var versionInfo dockerVersionResponse
|
||||
serverHeader := resp.Header.Get("Server")
|
||||
if err := dm.decode(resp, &versionInfo); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
if err != nil || resp.StatusCode != http.StatusOK {
|
||||
|
||||
dm.applyDockerVersionInfo(serverHeader, &versionInfo)
|
||||
dm.dockerVersionChecked = true
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// ensureDockerVersionChecked retries the version probe after a successful
|
||||
// container list request.
|
||||
func (dm *dockerManager) ensureDockerVersionChecked() {
|
||||
if dm.dockerVersionChecked {
|
||||
return
|
||||
}
|
||||
if err := dm.decode(resp, &versionInfo); err != nil {
|
||||
if _, err := dm.checkDockerVersion(); err != nil {
|
||||
slog.Debug("Failed to get Docker version", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// applyDockerVersionInfo updates version-dependent behavior from engine metadata.
|
||||
func (dm *dockerManager) applyDockerVersionInfo(serverHeader string, versionInfo *dockerVersionResponse) {
|
||||
if detectPodmanEngine(serverHeader, versionInfo) {
|
||||
dm.setIsPodman()
|
||||
return
|
||||
}
|
||||
// if version > 24, one-shot works correctly and we can limit concurrent operations
|
||||
@@ -941,3 +962,46 @@ func (dm *dockerManager) GetHostInfo() (info container.HostInfo, err error) {
|
||||
func (dm *dockerManager) IsPodman() bool {
|
||||
return dm.usingPodman
|
||||
}
|
||||
|
||||
// setIsPodman sets the manager to Podman mode and updates system details accordingly.
|
||||
func (dm *dockerManager) setIsPodman() {
|
||||
if dm.usingPodman {
|
||||
return
|
||||
}
|
||||
dm.usingPodman = true
|
||||
dm.goodDockerVersion = true
|
||||
dm.dockerVersionChecked = true
|
||||
// keep system details updated - this may be detected late if server isn't ready when
|
||||
// agent starts, so make sure we notify the hub if this happens later.
|
||||
if dm.agent != nil {
|
||||
dm.agent.updateSystemDetails(func(details *system.Details) {
|
||||
details.Podman = true
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// detectPodmanFromHeader identifies Podman from the Docker API server header.
|
||||
func detectPodmanFromHeader(server string) bool {
|
||||
return strings.HasPrefix(server, "Libpod")
|
||||
}
|
||||
|
||||
// detectPodmanFromVersion identifies Podman from the version payload.
|
||||
func detectPodmanFromVersion(versionInfo *dockerVersionResponse) bool {
|
||||
if versionInfo == nil {
|
||||
return false
|
||||
}
|
||||
for _, component := range versionInfo.Components {
|
||||
if strings.HasPrefix(component.Name, "Podman") {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// detectPodmanEngine checks both header and version metadata for Podman.
|
||||
func detectPodmanEngine(serverHeader string, versionInfo *dockerVersionResponse) bool {
|
||||
if detectPodmanFromHeader(serverHeader) {
|
||||
return true
|
||||
}
|
||||
return detectPodmanFromVersion(versionInfo)
|
||||
}
|
||||
|
||||
@@ -540,58 +540,52 @@ func TestDockerManagerCreation(t *testing.T) {
|
||||
func TestCheckDockerVersion(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
responses []struct {
|
||||
statusCode int
|
||||
body string
|
||||
}
|
||||
server string
|
||||
expectSuccess bool
|
||||
expectedGood bool
|
||||
expectedRequests int
|
||||
expectedPodman bool
|
||||
expectError bool
|
||||
expectedRequest string
|
||||
}{
|
||||
{
|
||||
name: "200 with good version on first try",
|
||||
responses: []struct {
|
||||
statusCode int
|
||||
body string
|
||||
}{
|
||||
{http.StatusOK, `{"Version":"25.0.1"}`},
|
||||
},
|
||||
name: "good docker version",
|
||||
statusCode: http.StatusOK,
|
||||
body: `{"Version":"25.0.1"}`,
|
||||
expectSuccess: true,
|
||||
expectedGood: true,
|
||||
expectedRequests: 1,
|
||||
expectedPodman: false,
|
||||
expectedRequest: "/version",
|
||||
},
|
||||
{
|
||||
name: "200 with old version on first try",
|
||||
responses: []struct {
|
||||
statusCode int
|
||||
body string
|
||||
}{
|
||||
{http.StatusOK, `{"Version":"24.0.7"}`},
|
||||
},
|
||||
name: "old docker version",
|
||||
statusCode: http.StatusOK,
|
||||
body: `{"Version":"24.0.7"}`,
|
||||
expectSuccess: true,
|
||||
expectedGood: false,
|
||||
expectedRequests: 1,
|
||||
expectedPodman: false,
|
||||
expectedRequest: "/version",
|
||||
},
|
||||
{
|
||||
name: "non-200 then 200 with good version",
|
||||
responses: []struct {
|
||||
statusCode int
|
||||
body string
|
||||
}{
|
||||
{http.StatusServiceUnavailable, `"not ready"`},
|
||||
{http.StatusOK, `{"Version":"25.1.0"}`},
|
||||
},
|
||||
name: "podman from server header",
|
||||
statusCode: http.StatusOK,
|
||||
body: `{"Version":"5.5.0"}`,
|
||||
server: "Libpod/5.5.0",
|
||||
expectSuccess: true,
|
||||
expectedGood: true,
|
||||
expectedRequests: 2,
|
||||
expectedPodman: true,
|
||||
expectedRequest: "/version",
|
||||
},
|
||||
{
|
||||
name: "non-200 on all retries",
|
||||
responses: []struct {
|
||||
statusCode int
|
||||
body string
|
||||
}{
|
||||
{http.StatusInternalServerError, `"error"`},
|
||||
{http.StatusUnauthorized, `"error"`},
|
||||
},
|
||||
name: "non-200 response",
|
||||
statusCode: http.StatusServiceUnavailable,
|
||||
body: `"not ready"`,
|
||||
expectSuccess: false,
|
||||
expectedGood: false,
|
||||
expectedRequests: 2,
|
||||
expectedPodman: false,
|
||||
expectError: true,
|
||||
expectedRequest: "/version",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -599,13 +593,13 @@ func TestCheckDockerVersion(t *testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
requestCount := 0
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
idx := requestCount
|
||||
requestCount++
|
||||
if idx >= len(tt.responses) {
|
||||
idx = len(tt.responses) - 1
|
||||
assert.Equal(t, tt.expectedRequest, r.URL.EscapedPath())
|
||||
if tt.server != "" {
|
||||
w.Header().Set("Server", tt.server)
|
||||
}
|
||||
w.WriteHeader(tt.responses[idx].statusCode)
|
||||
fmt.Fprint(w, tt.responses[idx].body)
|
||||
w.WriteHeader(tt.statusCode)
|
||||
fmt.Fprint(w, tt.body)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
@@ -617,17 +611,24 @@ func TestCheckDockerVersion(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
retrySleep: func(time.Duration) {},
|
||||
}
|
||||
|
||||
dm.checkDockerVersion()
|
||||
success, err := dm.checkDockerVersion()
|
||||
|
||||
assert.Equal(t, tt.expectSuccess, success)
|
||||
assert.Equal(t, tt.expectSuccess, dm.dockerVersionChecked)
|
||||
assert.Equal(t, tt.expectedGood, dm.goodDockerVersion)
|
||||
assert.Equal(t, tt.expectedRequests, requestCount)
|
||||
assert.Equal(t, tt.expectedPodman, dm.usingPodman)
|
||||
assert.Equal(t, 1, requestCount)
|
||||
if tt.expectError {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("request error on all retries", func(t *testing.T) {
|
||||
t.Run("request error", func(t *testing.T) {
|
||||
requestCount := 0
|
||||
dm := &dockerManager{
|
||||
client: &http.Client{
|
||||
@@ -638,16 +639,171 @@ func TestCheckDockerVersion(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
retrySleep: func(time.Duration) {},
|
||||
}
|
||||
|
||||
dm.checkDockerVersion()
|
||||
success, err := dm.checkDockerVersion()
|
||||
|
||||
assert.False(t, success)
|
||||
require.Error(t, err)
|
||||
assert.False(t, dm.dockerVersionChecked)
|
||||
assert.False(t, dm.goodDockerVersion)
|
||||
assert.Equal(t, 2, requestCount)
|
||||
assert.False(t, dm.usingPodman)
|
||||
assert.Equal(t, 1, requestCount)
|
||||
})
|
||||
}
|
||||
|
||||
// newDockerManagerForVersionTest creates a dockerManager wired to a test server.
|
||||
func newDockerManagerForVersionTest(server *httptest.Server) *dockerManager {
|
||||
return &dockerManager{
|
||||
client: &http.Client{
|
||||
Transport: &http.Transport{
|
||||
DialContext: func(_ context.Context, network, _ string) (net.Conn, error) {
|
||||
return net.Dial(network, server.Listener.Addr().String())
|
||||
},
|
||||
},
|
||||
},
|
||||
containerStatsMap: make(map[string]*container.Stats),
|
||||
lastCpuContainer: make(map[uint16]map[string]uint64),
|
||||
lastCpuSystem: make(map[uint16]map[string]uint64),
|
||||
lastCpuReadTime: make(map[uint16]map[string]time.Time),
|
||||
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||
lastNetworkReadTime: make(map[uint16]map[string]time.Time),
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetDockerStatsChecksDockerVersionAfterContainerList(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
containerServer string
|
||||
versionServer string
|
||||
versionBody string
|
||||
expectedGood bool
|
||||
expectedPodman bool
|
||||
}{
|
||||
{
|
||||
name: "200 with good version on first try",
|
||||
versionBody: `{"Version":"25.0.1"}`,
|
||||
expectedGood: true,
|
||||
expectedPodman: false,
|
||||
},
|
||||
{
|
||||
name: "200 with old version on first try",
|
||||
versionBody: `{"Version":"24.0.7"}`,
|
||||
expectedGood: false,
|
||||
expectedPodman: false,
|
||||
},
|
||||
{
|
||||
name: "podman detected from server header",
|
||||
containerServer: "Libpod/5.5.0",
|
||||
expectedGood: true,
|
||||
expectedPodman: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
requestCounts := map[string]int{}
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
requestCounts[r.URL.EscapedPath()]++
|
||||
switch r.URL.EscapedPath() {
|
||||
case "/containers/json":
|
||||
if tt.containerServer != "" {
|
||||
w.Header().Set("Server", tt.containerServer)
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprint(w, `[]`)
|
||||
case "/version":
|
||||
if tt.versionServer != "" {
|
||||
w.Header().Set("Server", tt.versionServer)
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprint(w, tt.versionBody)
|
||||
default:
|
||||
t.Fatalf("unexpected path: %s", r.URL.EscapedPath())
|
||||
}
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
dm := newDockerManagerForVersionTest(server)
|
||||
|
||||
stats, err := dm.getDockerStats(defaultCacheTimeMs)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, stats)
|
||||
assert.True(t, dm.dockerVersionChecked)
|
||||
assert.Equal(t, tt.expectedGood, dm.goodDockerVersion)
|
||||
assert.Equal(t, tt.expectedPodman, dm.usingPodman)
|
||||
assert.Equal(t, 1, requestCounts["/containers/json"])
|
||||
if tt.expectedPodman {
|
||||
assert.Equal(t, 0, requestCounts["/version"])
|
||||
} else {
|
||||
assert.Equal(t, 1, requestCounts["/version"])
|
||||
}
|
||||
|
||||
stats, err = dm.getDockerStats(defaultCacheTimeMs)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, stats)
|
||||
assert.Equal(t, tt.expectedGood, dm.goodDockerVersion)
|
||||
assert.Equal(t, tt.expectedPodman, dm.usingPodman)
|
||||
assert.Equal(t, 2, requestCounts["/containers/json"])
|
||||
if tt.expectedPodman {
|
||||
assert.Equal(t, 0, requestCounts["/version"])
|
||||
} else {
|
||||
assert.Equal(t, 1, requestCounts["/version"])
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestGetDockerStatsRetriesVersionCheckUntilSuccess(t *testing.T) {
|
||||
requestCounts := map[string]int{}
|
||||
versionStatuses := []int{http.StatusServiceUnavailable, http.StatusOK}
|
||||
versionBodies := []string{`"not ready"`, `{"Version":"25.1.0"}`}
|
||||
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
requestCounts[r.URL.EscapedPath()]++
|
||||
switch r.URL.EscapedPath() {
|
||||
case "/containers/json":
|
||||
w.WriteHeader(http.StatusOK)
|
||||
fmt.Fprint(w, `[]`)
|
||||
case "/version":
|
||||
idx := requestCounts["/version"] - 1
|
||||
if idx >= len(versionStatuses) {
|
||||
idx = len(versionStatuses) - 1
|
||||
}
|
||||
w.WriteHeader(versionStatuses[idx])
|
||||
fmt.Fprint(w, versionBodies[idx])
|
||||
default:
|
||||
t.Fatalf("unexpected path: %s", r.URL.EscapedPath())
|
||||
}
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
dm := newDockerManagerForVersionTest(server)
|
||||
|
||||
stats, err := dm.getDockerStats(defaultCacheTimeMs)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, stats)
|
||||
assert.False(t, dm.dockerVersionChecked)
|
||||
assert.False(t, dm.goodDockerVersion)
|
||||
assert.Equal(t, 1, requestCounts["/version"])
|
||||
|
||||
stats, err = dm.getDockerStats(defaultCacheTimeMs)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, stats)
|
||||
assert.True(t, dm.dockerVersionChecked)
|
||||
assert.True(t, dm.goodDockerVersion)
|
||||
assert.Equal(t, 2, requestCounts["/containers/json"])
|
||||
assert.Equal(t, 2, requestCounts["/version"])
|
||||
|
||||
stats, err = dm.getDockerStats(defaultCacheTimeMs)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, stats)
|
||||
assert.Equal(t, 3, requestCounts["/containers/json"])
|
||||
assert.Equal(t, 2, requestCounts["/version"])
|
||||
}
|
||||
|
||||
func TestCycleCpuDeltas(t *testing.T) {
|
||||
dm := &dockerManager{
|
||||
lastCpuContainer: map[uint16]map[string]uint64{
|
||||
|
||||
11
agent/gpu.go
11
agent/gpu.go
@@ -542,7 +542,7 @@ func (gm *GPUManager) collectorDefinitions(caps gpuCapabilities) map[collectorSo
|
||||
return map[collectorSource]collectorDefinition{
|
||||
collectorSourceNVML: {
|
||||
group: collectorGroupNvidia,
|
||||
available: caps.hasNvidiaSmi,
|
||||
available: true,
|
||||
start: func(_ func()) bool {
|
||||
return gm.startNvmlCollector()
|
||||
},
|
||||
@@ -734,9 +734,6 @@ func NewGPUManager() (*GPUManager, error) {
|
||||
}
|
||||
var gm GPUManager
|
||||
caps := gm.discoverGpuCapabilities()
|
||||
if !hasAnyGpuCollector(caps) {
|
||||
return nil, fmt.Errorf(noGPUFoundMsg)
|
||||
}
|
||||
gm.GpuDataMap = make(map[string]*system.GPUData)
|
||||
|
||||
// Jetson devices should always use tegrastats (ignore GPU_COLLECTOR).
|
||||
@@ -745,7 +742,7 @@ func NewGPUManager() (*GPUManager, error) {
|
||||
return &gm, nil
|
||||
}
|
||||
|
||||
// if GPU_COLLECTOR is set, start user-defined collectors.
|
||||
// Respect explicit collector selection before capability auto-detection.
|
||||
if collectorConfig, ok := utils.GetEnv("GPU_COLLECTOR"); ok && strings.TrimSpace(collectorConfig) != "" {
|
||||
priorities := parseCollectorPriority(collectorConfig)
|
||||
if gm.startCollectorsByPriority(priorities, caps) == 0 {
|
||||
@@ -754,6 +751,10 @@ func NewGPUManager() (*GPUManager, error) {
|
||||
return &gm, nil
|
||||
}
|
||||
|
||||
if !hasAnyGpuCollector(caps) {
|
||||
return nil, fmt.Errorf(noGPUFoundMsg)
|
||||
}
|
||||
|
||||
// auto-detect and start collectors when GPU_COLLECTOR is unset.
|
||||
if gm.startCollectorsByPriority(gm.resolveLegacyCollectorPriority(caps), caps) == 0 {
|
||||
return nil, fmt.Errorf(noGPUFoundMsg)
|
||||
|
||||
@@ -156,6 +156,7 @@ func (gm *GPUManager) updateAmdGpuData(cardPath string) bool {
|
||||
func readSysfsFloat(path string) (float64, error) {
|
||||
val, err := utils.ReadStringFileLimited(path, 64)
|
||||
if err != nil {
|
||||
slog.Debug("Failed to read sysfs value", "path", path, "error", err)
|
||||
return 0, err
|
||||
}
|
||||
return strconv.ParseFloat(val, 64)
|
||||
|
||||
@@ -1461,6 +1461,25 @@ func TestNewGPUManagerConfiguredCollectorsMustStart(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestCollectorDefinitionsNvmlDoesNotRequireNvidiaSmi(t *testing.T) {
|
||||
gm := &GPUManager{}
|
||||
definitions := gm.collectorDefinitions(gpuCapabilities{})
|
||||
require.Contains(t, definitions, collectorSourceNVML)
|
||||
assert.True(t, definitions[collectorSourceNVML].available)
|
||||
}
|
||||
|
||||
func TestNewGPUManagerConfiguredNvmlBypassesCapabilityGate(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
t.Setenv("PATH", dir)
|
||||
t.Setenv("BESZEL_AGENT_GPU_COLLECTOR", "nvml")
|
||||
|
||||
gm, err := NewGPUManager()
|
||||
require.Nil(t, gm)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "no configured GPU collectors are available")
|
||||
assert.NotContains(t, err.Error(), noGPUFoundMsg)
|
||||
}
|
||||
|
||||
func TestNewGPUManagerJetsonIgnoresCollectorConfig(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
t.Setenv("PATH", dir)
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
"github.com/henrygd/beszel/internal/common"
|
||||
"github.com/henrygd/beszel/internal/entities/probe"
|
||||
"github.com/henrygd/beszel/internal/entities/smart"
|
||||
|
||||
"log/slog"
|
||||
@@ -51,6 +52,7 @@ func NewHandlerRegistry() *HandlerRegistry {
|
||||
registry.Register(common.GetContainerInfo, &GetContainerInfoHandler{})
|
||||
registry.Register(common.GetSmartData, &GetSmartDataHandler{})
|
||||
registry.Register(common.GetSystemdInfo, &GetSystemdInfoHandler{})
|
||||
registry.Register(common.SyncNetworkProbes, &SyncNetworkProbesHandler{})
|
||||
|
||||
return registry
|
||||
}
|
||||
@@ -203,3 +205,22 @@ func (h *GetSystemdInfoHandler) Handle(hctx *HandlerContext) error {
|
||||
|
||||
return hctx.SendResponse(details, hctx.RequestID)
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// SyncNetworkProbesHandler handles probe configuration sync from hub
|
||||
type SyncNetworkProbesHandler struct{}
|
||||
|
||||
func (h *SyncNetworkProbesHandler) Handle(hctx *HandlerContext) error {
|
||||
var req probe.SyncRequest
|
||||
if err := cbor.Unmarshal(hctx.Request.Data, &req); err != nil {
|
||||
return err
|
||||
}
|
||||
resp, err := hctx.Agent.probeManager.ApplySync(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
slog.Info("network probes synced", "action", req.Action)
|
||||
return hctx.SendResponse(resp, hctx.RequestID)
|
||||
}
|
||||
|
||||
@@ -8,6 +8,6 @@
|
||||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="LibreHardwareMonitorLib" Version="0.9.5" />
|
||||
<PackageReference Include="LibreHardwareMonitorLib" Version="0.9.6" />
|
||||
</ItemGroup>
|
||||
</Project>
|
||||
|
||||
494
agent/probe.go
Normal file
494
agent/probe.go
Normal file
@@ -0,0 +1,494 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"log/slog"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/probe"
|
||||
)
|
||||
|
||||
// Probe functionality overview:
|
||||
// Probes run at user-defined intervals (e.g., every 10s).
|
||||
// To keep memory usage low and constant, data is stored in two layers:
|
||||
// 1. Raw samples: The most recent individual results (kept for probeRawRetention).
|
||||
// 2. Minute buckets: A fixed-size ring buffer of 61 buckets, each representing one
|
||||
// wall-clock minute. Samples collected within the same minute are aggregated
|
||||
// (sum, min, max, count) into a single bucket.
|
||||
//
|
||||
// Short-term requests (<= 2m) use raw samples for perfect accuracy.
|
||||
// Long-term requests (up to 1h) use the minute buckets to avoid storing thousands
|
||||
// of individual data points.
|
||||
|
||||
const (
|
||||
// probeRawRetention is the duration to keep individual samples for high-precision short-term requests
|
||||
probeRawRetention = 80 * time.Second
|
||||
// probeMinuteBucketLen is the number of 1-minute buckets to keep (1 hour + 1 for partials)
|
||||
probeMinuteBucketLen int32 = 61
|
||||
)
|
||||
|
||||
// ProbeManager manages network probe tasks.
|
||||
type ProbeManager struct {
|
||||
mu sync.RWMutex
|
||||
probes map[string]*probeTask // key = probe.Config.Key()
|
||||
httpClient *http.Client
|
||||
}
|
||||
|
||||
// probeTask owns retention buffers and cancellation for a single probe config.
|
||||
type probeTask struct {
|
||||
config probe.Config
|
||||
cancel chan struct{}
|
||||
mu sync.Mutex
|
||||
samples []probeSample
|
||||
buckets [probeMinuteBucketLen]probeBucket
|
||||
}
|
||||
|
||||
// probeSample stores one probe attempt and its collection time.
|
||||
type probeSample struct {
|
||||
responseMs float64 // -1 means loss
|
||||
timestamp time.Time
|
||||
}
|
||||
|
||||
// probeBucket stores one minute of aggregated probe data.
|
||||
type probeBucket struct {
|
||||
minute int32
|
||||
filled bool
|
||||
stats probeAggregate
|
||||
}
|
||||
|
||||
// probeAggregate accumulates successful response stats and total sample counts.
|
||||
type probeAggregate struct {
|
||||
sumMs float64
|
||||
minMs float64
|
||||
maxMs float64
|
||||
totalCount int
|
||||
successCount int
|
||||
}
|
||||
|
||||
func newProbeManager() *ProbeManager {
|
||||
return &ProbeManager{
|
||||
probes: make(map[string]*probeTask),
|
||||
httpClient: &http.Client{Timeout: 10 * time.Second},
|
||||
}
|
||||
}
|
||||
|
||||
func newProbeTask(config probe.Config) *probeTask {
|
||||
return &probeTask{
|
||||
config: config,
|
||||
cancel: make(chan struct{}),
|
||||
samples: make([]probeSample, 0, 64),
|
||||
}
|
||||
}
|
||||
|
||||
// newProbeAggregate initializes an aggregate with an unset minimum value.
|
||||
func newProbeAggregate() probeAggregate {
|
||||
return probeAggregate{minMs: math.MaxFloat64}
|
||||
}
|
||||
|
||||
// addResponse folds a single probe sample into the aggregate.
|
||||
func (agg *probeAggregate) addResponse(responseMs float64) {
|
||||
agg.totalCount++
|
||||
if responseMs < 0 {
|
||||
return
|
||||
}
|
||||
agg.successCount++
|
||||
agg.sumMs += responseMs
|
||||
if responseMs < agg.minMs {
|
||||
agg.minMs = responseMs
|
||||
}
|
||||
if responseMs > agg.maxMs {
|
||||
agg.maxMs = responseMs
|
||||
}
|
||||
}
|
||||
|
||||
// addAggregate merges another aggregate into this one.
|
||||
func (agg *probeAggregate) addAggregate(other probeAggregate) {
|
||||
if other.totalCount == 0 {
|
||||
return
|
||||
}
|
||||
agg.totalCount += other.totalCount
|
||||
agg.successCount += other.successCount
|
||||
agg.sumMs += other.sumMs
|
||||
if other.successCount == 0 {
|
||||
return
|
||||
}
|
||||
if agg.minMs == math.MaxFloat64 || other.minMs < agg.minMs {
|
||||
agg.minMs = other.minMs
|
||||
}
|
||||
if other.maxMs > agg.maxMs {
|
||||
agg.maxMs = other.maxMs
|
||||
}
|
||||
}
|
||||
|
||||
// hasData reports whether the aggregate contains any samples.
|
||||
func (agg probeAggregate) hasData() bool {
|
||||
return agg.totalCount > 0
|
||||
}
|
||||
|
||||
// result converts the aggregate into the probe result slice format.
|
||||
func (agg probeAggregate) result() probe.Result {
|
||||
avg := agg.avgResponse()
|
||||
minMs := 0.0
|
||||
if agg.successCount > 0 {
|
||||
minMs = math.Round(agg.minMs*100) / 100
|
||||
}
|
||||
return probe.Result{
|
||||
avg,
|
||||
minMs,
|
||||
math.Round(agg.maxMs*100) / 100,
|
||||
agg.lossPercentage(),
|
||||
}
|
||||
}
|
||||
|
||||
// avgResponse returns the rounded average of successful samples.
|
||||
func (agg probeAggregate) avgResponse() float64 {
|
||||
if agg.successCount == 0 {
|
||||
return 0
|
||||
}
|
||||
return math.Round(agg.sumMs/float64(agg.successCount)*100) / 100
|
||||
}
|
||||
|
||||
// lossPercentage returns the rounded failure rate for the aggregate.
|
||||
func (agg probeAggregate) lossPercentage() float64 {
|
||||
if agg.totalCount == 0 {
|
||||
return 0
|
||||
}
|
||||
return math.Round(float64(agg.totalCount-agg.successCount)/float64(agg.totalCount)*10000) / 100
|
||||
}
|
||||
|
||||
// SyncProbes replaces all probe tasks with the given configs.
|
||||
func (pm *ProbeManager) SyncProbes(configs []probe.Config) {
|
||||
pm.mu.Lock()
|
||||
defer pm.mu.Unlock()
|
||||
|
||||
// Build set of new keys
|
||||
newKeys := make(map[string]probe.Config, len(configs))
|
||||
for _, cfg := range configs {
|
||||
if cfg.ID == "" {
|
||||
continue
|
||||
}
|
||||
newKeys[cfg.ID] = cfg
|
||||
}
|
||||
|
||||
// Stop removed probes
|
||||
for key, task := range pm.probes {
|
||||
if _, exists := newKeys[key]; !exists {
|
||||
close(task.cancel)
|
||||
delete(pm.probes, key)
|
||||
}
|
||||
}
|
||||
|
||||
// Start new probes and restart tasks whose config changed.
|
||||
for key, cfg := range newKeys {
|
||||
task, exists := pm.probes[key]
|
||||
if exists && task.config == cfg {
|
||||
continue
|
||||
}
|
||||
if exists {
|
||||
close(task.cancel)
|
||||
}
|
||||
task = newProbeTask(cfg)
|
||||
pm.probes[key] = task
|
||||
go pm.runProbe(task, true)
|
||||
}
|
||||
}
|
||||
|
||||
// ApplySync applies a full or incremental probe sync request.
|
||||
func (pm *ProbeManager) ApplySync(req probe.SyncRequest) (probe.SyncResponse, error) {
|
||||
switch req.Action {
|
||||
case probe.SyncActionReplace:
|
||||
pm.SyncProbes(req.Configs)
|
||||
return probe.SyncResponse{}, nil
|
||||
case probe.SyncActionUpsert:
|
||||
result, err := pm.UpsertProbe(req.Config, req.RunNow)
|
||||
if err != nil {
|
||||
return probe.SyncResponse{}, err
|
||||
}
|
||||
if result == nil {
|
||||
return probe.SyncResponse{}, nil
|
||||
}
|
||||
return probe.SyncResponse{Result: *result}, nil
|
||||
case probe.SyncActionDelete:
|
||||
if req.Config.ID == "" {
|
||||
return probe.SyncResponse{}, errors.New("missing probe ID for delete action")
|
||||
}
|
||||
pm.DeleteProbe(req.Config.ID)
|
||||
return probe.SyncResponse{}, nil
|
||||
default:
|
||||
return probe.SyncResponse{}, fmt.Errorf("unknown probe sync action: %d", req.Action)
|
||||
}
|
||||
}
|
||||
|
||||
// UpsertProbe creates or replaces a single probe task.
|
||||
func (pm *ProbeManager) UpsertProbe(config probe.Config, runNow bool) (*probe.Result, error) {
|
||||
if config.ID == "" {
|
||||
return nil, errors.New("missing probe ID")
|
||||
}
|
||||
|
||||
pm.mu.Lock()
|
||||
task, exists := pm.probes[config.ID]
|
||||
startTask := false
|
||||
if exists && task.config == config {
|
||||
pm.mu.Unlock()
|
||||
if !runNow {
|
||||
return nil, nil
|
||||
}
|
||||
return pm.runProbeNow(task), nil
|
||||
}
|
||||
if exists {
|
||||
close(task.cancel)
|
||||
}
|
||||
task = newProbeTask(config)
|
||||
pm.probes[config.ID] = task
|
||||
startTask = true
|
||||
pm.mu.Unlock()
|
||||
|
||||
if runNow {
|
||||
result := pm.runProbeNow(task)
|
||||
if startTask {
|
||||
go pm.runProbe(task, false)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
if startTask {
|
||||
go pm.runProbe(task, true)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// DeleteProbe stops and removes a single probe task.
|
||||
func (pm *ProbeManager) DeleteProbe(id string) {
|
||||
if id == "" {
|
||||
return
|
||||
}
|
||||
pm.mu.Lock()
|
||||
defer pm.mu.Unlock()
|
||||
if task, exists := pm.probes[id]; exists {
|
||||
close(task.cancel)
|
||||
delete(pm.probes, id)
|
||||
}
|
||||
}
|
||||
|
||||
// GetResults returns aggregated results for all probes over the last supplied duration in ms.
|
||||
func (pm *ProbeManager) GetResults(durationMs uint16) map[string]probe.Result {
|
||||
pm.mu.RLock()
|
||||
defer pm.mu.RUnlock()
|
||||
|
||||
results := make(map[string]probe.Result, len(pm.probes))
|
||||
now := time.Now()
|
||||
duration := time.Duration(durationMs) * time.Millisecond
|
||||
|
||||
for _, task := range pm.probes {
|
||||
task.mu.Lock()
|
||||
result, ok := task.resultLocked(duration, now)
|
||||
task.mu.Unlock()
|
||||
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
results[task.config.ID] = result
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
// Stop stops all probe tasks.
|
||||
func (pm *ProbeManager) Stop() {
|
||||
pm.mu.Lock()
|
||||
defer pm.mu.Unlock()
|
||||
for key, task := range pm.probes {
|
||||
close(task.cancel)
|
||||
delete(pm.probes, key)
|
||||
}
|
||||
}
|
||||
|
||||
// runProbe executes a single probe task in a loop.
|
||||
func (pm *ProbeManager) runProbe(task *probeTask, runImmediately bool) {
|
||||
interval := time.Duration(task.config.Interval) * time.Second
|
||||
if interval < time.Second {
|
||||
interval = 10 * time.Second
|
||||
}
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
if runImmediately {
|
||||
pm.executeProbe(task)
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-task.cancel:
|
||||
return
|
||||
case <-ticker.C:
|
||||
pm.executeProbe(task)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pm *ProbeManager) runProbeNow(task *probeTask) *probe.Result {
|
||||
pm.executeProbe(task)
|
||||
task.mu.Lock()
|
||||
defer task.mu.Unlock()
|
||||
result, ok := task.resultLocked(time.Minute, time.Now())
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return &result
|
||||
}
|
||||
|
||||
// aggregateLocked collects probe data for the requested time window.
|
||||
func (task *probeTask) aggregateLocked(duration time.Duration, now time.Time) probeAggregate {
|
||||
cutoff := now.Add(-duration)
|
||||
// Keep short windows exact; longer windows read from minute buckets to avoid raw-sample retention.
|
||||
if duration <= probeRawRetention {
|
||||
return aggregateSamplesSince(task.samples, cutoff)
|
||||
}
|
||||
return aggregateBucketsSince(task.buckets[:], cutoff, now)
|
||||
}
|
||||
|
||||
func (task *probeTask) resultLocked(duration time.Duration, now time.Time) (probe.Result, bool) {
|
||||
agg := task.aggregateLocked(duration, now)
|
||||
hourAgg := task.aggregateLocked(time.Hour, now)
|
||||
if !agg.hasData() {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
result := agg.result()
|
||||
hourAvg := hourAgg.avgResponse()
|
||||
hourLoss := hourAgg.lossPercentage()
|
||||
if hourAgg.successCount > 0 {
|
||||
return probe.Result{
|
||||
result[0],
|
||||
hourAvg,
|
||||
math.Round(hourAgg.minMs*100) / 100,
|
||||
math.Round(hourAgg.maxMs*100) / 100,
|
||||
hourLoss,
|
||||
}, true
|
||||
}
|
||||
return probe.Result{result[0], hourAvg, 0, 0, hourLoss}, true
|
||||
}
|
||||
|
||||
// aggregateSamplesSince aggregates raw samples newer than the cutoff.
|
||||
func aggregateSamplesSince(samples []probeSample, cutoff time.Time) probeAggregate {
|
||||
agg := newProbeAggregate()
|
||||
for _, sample := range samples {
|
||||
if sample.timestamp.Before(cutoff) {
|
||||
continue
|
||||
}
|
||||
agg.addResponse(sample.responseMs)
|
||||
}
|
||||
return agg
|
||||
}
|
||||
|
||||
// aggregateBucketsSince aggregates minute buckets overlapping the requested window.
|
||||
func aggregateBucketsSince(buckets []probeBucket, cutoff, now time.Time) probeAggregate {
|
||||
agg := newProbeAggregate()
|
||||
startMinute := int32(cutoff.Unix() / 60)
|
||||
endMinute := int32(now.Unix() / 60)
|
||||
for _, bucket := range buckets {
|
||||
if !bucket.filled || bucket.minute < startMinute || bucket.minute > endMinute {
|
||||
continue
|
||||
}
|
||||
agg.addAggregate(bucket.stats)
|
||||
}
|
||||
return agg
|
||||
}
|
||||
|
||||
// addSampleLocked stores a fresh sample in both raw and per-minute retention buffers.
|
||||
func (task *probeTask) addSampleLocked(sample probeSample) {
|
||||
cutoff := sample.timestamp.Add(-probeRawRetention)
|
||||
start := 0
|
||||
for i := range task.samples {
|
||||
if !task.samples[i].timestamp.Before(cutoff) {
|
||||
start = i
|
||||
break
|
||||
}
|
||||
if i == len(task.samples)-1 {
|
||||
start = len(task.samples)
|
||||
}
|
||||
}
|
||||
if start > 0 {
|
||||
size := copy(task.samples, task.samples[start:])
|
||||
task.samples = task.samples[:size]
|
||||
}
|
||||
task.samples = append(task.samples, sample)
|
||||
|
||||
minute := int32(sample.timestamp.Unix() / 60)
|
||||
// Each slot stores one wall-clock minute, so the ring stays fixed-size at ~1h per probe.
|
||||
bucket := &task.buckets[minute%probeMinuteBucketLen]
|
||||
if !bucket.filled || bucket.minute != minute {
|
||||
bucket.minute = minute
|
||||
bucket.filled = true
|
||||
bucket.stats = newProbeAggregate()
|
||||
}
|
||||
bucket.stats.addResponse(sample.responseMs)
|
||||
}
|
||||
|
||||
// executeProbe runs the configured probe and records the sample.
|
||||
func (pm *ProbeManager) executeProbe(task *probeTask) {
|
||||
var responseMs float64
|
||||
|
||||
switch task.config.Protocol {
|
||||
case "icmp":
|
||||
responseMs = probeICMP(task.config.Target)
|
||||
case "tcp":
|
||||
responseMs = probeTCP(task.config.Target, task.config.Port)
|
||||
case "http":
|
||||
responseMs = probeHTTP(pm.httpClient, task.config.Target)
|
||||
default:
|
||||
slog.Warn("unknown probe protocol", "protocol", task.config.Protocol)
|
||||
return
|
||||
}
|
||||
|
||||
sample := probeSample{
|
||||
responseMs: responseMs,
|
||||
timestamp: time.Now(),
|
||||
}
|
||||
|
||||
task.mu.Lock()
|
||||
task.addSampleLocked(sample)
|
||||
task.mu.Unlock()
|
||||
}
|
||||
|
||||
// probeTCP measures pure TCP handshake response (excluding DNS resolution).
|
||||
// Returns -1 on failure.
|
||||
func probeTCP(target string, port uint16) float64 {
|
||||
// Resolve DNS first, outside the timing window
|
||||
ips, err := net.LookupHost(target)
|
||||
if err != nil || len(ips) == 0 {
|
||||
return -1
|
||||
}
|
||||
addr := net.JoinHostPort(ips[0], fmt.Sprintf("%d", port))
|
||||
|
||||
// Measure only the TCP handshake
|
||||
start := time.Now()
|
||||
conn, err := net.DialTimeout("tcp", addr, 3*time.Second)
|
||||
if err != nil {
|
||||
return -1
|
||||
}
|
||||
conn.Close()
|
||||
return float64(time.Since(start).Microseconds()) / 1000.0
|
||||
}
|
||||
|
||||
// probeHTTP measures HTTP GET request response. Returns -1 on failure.
|
||||
func probeHTTP(client *http.Client, url string) float64 {
|
||||
if client == nil {
|
||||
client = http.DefaultClient
|
||||
}
|
||||
start := time.Now()
|
||||
resp, err := client.Get(url)
|
||||
if err != nil {
|
||||
return -1
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode >= 400 {
|
||||
return -1
|
||||
}
|
||||
return float64(time.Since(start).Microseconds()) / 1000.0
|
||||
}
|
||||
242
agent/probe_ping.go
Normal file
242
agent/probe_ping.go
Normal file
@@ -0,0 +1,242 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/icmp"
|
||||
"golang.org/x/net/ipv4"
|
||||
"golang.org/x/net/ipv6"
|
||||
|
||||
"log/slog"
|
||||
)
|
||||
|
||||
var pingTimeRegex = regexp.MustCompile(`time[=<]([\d.]+)\s*ms`)
|
||||
|
||||
type icmpPacketConn interface {
|
||||
Close() error
|
||||
}
|
||||
|
||||
// icmpMethod tracks which ICMP approach to use. Once a method succeeds or
|
||||
// all native methods fail, the choice is cached so subsequent probes skip
|
||||
// the trial-and-error overhead.
|
||||
type icmpMethod int
|
||||
|
||||
const (
|
||||
icmpUntried icmpMethod = iota // haven't tried yet
|
||||
icmpRaw // privileged raw socket
|
||||
icmpDatagram // unprivileged datagram socket
|
||||
icmpExecFallback // shell out to system ping command
|
||||
)
|
||||
|
||||
// icmpFamily holds the network parameters and cached detection result for one address family.
|
||||
type icmpFamily struct {
|
||||
rawNetwork string // e.g. "ip4:icmp" or "ip6:ipv6-icmp"
|
||||
dgramNetwork string // e.g. "udp4" or "udp6"
|
||||
listenAddr string // "0.0.0.0" or "::"
|
||||
echoType icmp.Type // outgoing echo request type
|
||||
replyType icmp.Type // expected echo reply type
|
||||
proto int // IANA protocol number for parsing replies
|
||||
isIPv6 bool
|
||||
mode icmpMethod // cached detection result (guarded by icmpModeMu)
|
||||
}
|
||||
|
||||
var (
|
||||
icmpV4 = icmpFamily{
|
||||
rawNetwork: "ip4:icmp",
|
||||
dgramNetwork: "udp4",
|
||||
listenAddr: "0.0.0.0",
|
||||
echoType: ipv4.ICMPTypeEcho,
|
||||
replyType: ipv4.ICMPTypeEchoReply,
|
||||
proto: 1,
|
||||
}
|
||||
icmpV6 = icmpFamily{
|
||||
rawNetwork: "ip6:ipv6-icmp",
|
||||
dgramNetwork: "udp6",
|
||||
listenAddr: "::",
|
||||
echoType: ipv6.ICMPTypeEchoRequest,
|
||||
replyType: ipv6.ICMPTypeEchoReply,
|
||||
proto: 58,
|
||||
isIPv6: true,
|
||||
}
|
||||
icmpModeMu sync.Mutex
|
||||
icmpListen = func(network, listenAddr string) (icmpPacketConn, error) {
|
||||
return icmp.ListenPacket(network, listenAddr)
|
||||
}
|
||||
)
|
||||
|
||||
// probeICMP sends an ICMP echo request and measures round-trip response.
|
||||
// Supports both IPv4 and IPv6 targets. The ICMP method (raw socket,
|
||||
// unprivileged datagram, or exec fallback) is detected once per address
|
||||
// family and cached for subsequent probes.
|
||||
// Returns response in milliseconds, or -1 on failure.
|
||||
func probeICMP(target string) float64 {
|
||||
family, ip := resolveICMPTarget(target)
|
||||
if family == nil {
|
||||
return -1
|
||||
}
|
||||
|
||||
icmpModeMu.Lock()
|
||||
if family.mode == icmpUntried {
|
||||
family.mode = detectICMPMode(family, icmpListen)
|
||||
}
|
||||
mode := family.mode
|
||||
icmpModeMu.Unlock()
|
||||
|
||||
switch mode {
|
||||
case icmpRaw:
|
||||
return probeICMPNative(family.rawNetwork, family, &net.IPAddr{IP: ip})
|
||||
case icmpDatagram:
|
||||
return probeICMPNative(family.dgramNetwork, family, &net.UDPAddr{IP: ip})
|
||||
case icmpExecFallback:
|
||||
return probeICMPExec(target, family.isIPv6)
|
||||
default:
|
||||
return -1
|
||||
}
|
||||
}
|
||||
|
||||
// resolveICMPTarget resolves a target hostname or IP to determine the address
|
||||
// family and concrete IP address. Prefers IPv4 for dual-stack hostnames.
|
||||
func resolveICMPTarget(target string) (*icmpFamily, net.IP) {
|
||||
if ip := net.ParseIP(target); ip != nil {
|
||||
if ip.To4() != nil {
|
||||
return &icmpV4, ip.To4()
|
||||
}
|
||||
return &icmpV6, ip
|
||||
}
|
||||
|
||||
ips, err := net.LookupIP(target)
|
||||
if err != nil || len(ips) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
for _, ip := range ips {
|
||||
if v4 := ip.To4(); v4 != nil {
|
||||
return &icmpV4, v4
|
||||
}
|
||||
}
|
||||
return &icmpV6, ips[0]
|
||||
}
|
||||
|
||||
func detectICMPMode(family *icmpFamily, listen func(network, listenAddr string) (icmpPacketConn, error)) icmpMethod {
|
||||
label := "IPv4"
|
||||
if family.isIPv6 {
|
||||
label = "IPv6"
|
||||
}
|
||||
|
||||
if conn, err := listen(family.rawNetwork, family.listenAddr); err == nil {
|
||||
conn.Close()
|
||||
slog.Info("ICMP probe using raw socket", "family", label)
|
||||
return icmpRaw
|
||||
} else {
|
||||
slog.Debug("ICMP raw socket unavailable", "family", label, "err", err)
|
||||
}
|
||||
|
||||
if conn, err := listen(family.dgramNetwork, family.listenAddr); err == nil {
|
||||
conn.Close()
|
||||
slog.Info("ICMP probe using unprivileged datagram socket", "family", label)
|
||||
return icmpDatagram
|
||||
} else {
|
||||
slog.Debug("ICMP datagram socket unavailable", "family", label, "err", err)
|
||||
}
|
||||
|
||||
slog.Info("ICMP probe falling back to system ping command", "family", label)
|
||||
return icmpExecFallback
|
||||
}
|
||||
|
||||
// probeICMPNative sends an ICMP echo request using Go's x/net/icmp package.
|
||||
func probeICMPNative(network string, family *icmpFamily, dst net.Addr) float64 {
|
||||
conn, err := icmp.ListenPacket(network, family.listenAddr)
|
||||
if err != nil {
|
||||
return -1
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
// Build ICMP echo request
|
||||
msg := &icmp.Message{
|
||||
Type: family.echoType,
|
||||
Code: 0,
|
||||
Body: &icmp.Echo{
|
||||
ID: os.Getpid() & 0xffff,
|
||||
Seq: 1,
|
||||
Data: []byte("beszel-probe"),
|
||||
},
|
||||
}
|
||||
msgBytes, err := msg.Marshal(nil)
|
||||
if err != nil {
|
||||
return -1
|
||||
}
|
||||
|
||||
// Set deadline before sending
|
||||
conn.SetDeadline(time.Now().Add(3 * time.Second))
|
||||
|
||||
start := time.Now()
|
||||
if _, err := conn.WriteTo(msgBytes, dst); err != nil {
|
||||
return -1
|
||||
}
|
||||
|
||||
// Read reply
|
||||
buf := make([]byte, 1500)
|
||||
for {
|
||||
n, _, err := conn.ReadFrom(buf)
|
||||
if err != nil {
|
||||
return -1
|
||||
}
|
||||
|
||||
reply, err := icmp.ParseMessage(family.proto, buf[:n])
|
||||
if err != nil {
|
||||
return -1
|
||||
}
|
||||
|
||||
if reply.Type == family.replyType {
|
||||
return float64(time.Since(start).Microseconds()) / 1000.0
|
||||
}
|
||||
// Ignore non-echo-reply messages (e.g. destination unreachable) and keep reading
|
||||
}
|
||||
}
|
||||
|
||||
// probeICMPExec falls back to the system ping command. Returns -1 on failure.
|
||||
func probeICMPExec(target string, isIPv6 bool) float64 {
|
||||
var cmd *exec.Cmd
|
||||
switch runtime.GOOS {
|
||||
case "windows":
|
||||
if isIPv6 {
|
||||
cmd = exec.Command("ping", "-6", "-n", "1", "-w", "3000", target)
|
||||
} else {
|
||||
cmd = exec.Command("ping", "-n", "1", "-w", "3000", target)
|
||||
}
|
||||
default: // linux, darwin, freebsd
|
||||
if isIPv6 {
|
||||
cmd = exec.Command("ping", "-6", "-c", "1", "-W", "3", target)
|
||||
} else {
|
||||
cmd = exec.Command("ping", "-c", "1", "-W", "3", target)
|
||||
}
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
// If ping fails but we got output, still try to parse
|
||||
if len(output) == 0 {
|
||||
return -1
|
||||
}
|
||||
}
|
||||
|
||||
matches := pingTimeRegex.FindSubmatch(output)
|
||||
if len(matches) >= 2 {
|
||||
if ms, err := strconv.ParseFloat(string(matches[1]), 64); err == nil {
|
||||
return ms
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback: use wall clock time if ping succeeded but parsing failed
|
||||
if err == nil {
|
||||
return float64(time.Since(start).Microseconds()) / 1000.0
|
||||
}
|
||||
return -1
|
||||
}
|
||||
118
agent/probe_ping_test.go
Normal file
118
agent/probe_ping_test.go
Normal file
@@ -0,0 +1,118 @@
|
||||
//go:build testing
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type testICMPPacketConn struct{}
|
||||
|
||||
func (testICMPPacketConn) Close() error { return nil }
|
||||
|
||||
func TestDetectICMPMode(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
family *icmpFamily
|
||||
rawErr error
|
||||
udpErr error
|
||||
want icmpMethod
|
||||
wantNetworks []string
|
||||
}{
|
||||
{
|
||||
name: "IPv4 prefers raw socket when available",
|
||||
family: &icmpV4,
|
||||
want: icmpRaw,
|
||||
wantNetworks: []string{"ip4:icmp"},
|
||||
},
|
||||
{
|
||||
name: "IPv4 uses datagram when raw unavailable",
|
||||
family: &icmpV4,
|
||||
rawErr: errors.New("operation not permitted"),
|
||||
want: icmpDatagram,
|
||||
wantNetworks: []string{"ip4:icmp", "udp4"},
|
||||
},
|
||||
{
|
||||
name: "IPv4 falls back to exec when both unavailable",
|
||||
family: &icmpV4,
|
||||
rawErr: errors.New("operation not permitted"),
|
||||
udpErr: errors.New("protocol not supported"),
|
||||
want: icmpExecFallback,
|
||||
wantNetworks: []string{"ip4:icmp", "udp4"},
|
||||
},
|
||||
{
|
||||
name: "IPv6 prefers raw socket when available",
|
||||
family: &icmpV6,
|
||||
want: icmpRaw,
|
||||
wantNetworks: []string{"ip6:ipv6-icmp"},
|
||||
},
|
||||
{
|
||||
name: "IPv6 uses datagram when raw unavailable",
|
||||
family: &icmpV6,
|
||||
rawErr: errors.New("operation not permitted"),
|
||||
want: icmpDatagram,
|
||||
wantNetworks: []string{"ip6:ipv6-icmp", "udp6"},
|
||||
},
|
||||
{
|
||||
name: "IPv6 falls back to exec when both unavailable",
|
||||
family: &icmpV6,
|
||||
rawErr: errors.New("operation not permitted"),
|
||||
udpErr: errors.New("protocol not supported"),
|
||||
want: icmpExecFallback,
|
||||
wantNetworks: []string{"ip6:ipv6-icmp", "udp6"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
calls := make([]string, 0, 2)
|
||||
listen := func(network, listenAddr string) (icmpPacketConn, error) {
|
||||
require.Equal(t, tt.family.listenAddr, listenAddr)
|
||||
calls = append(calls, network)
|
||||
switch network {
|
||||
case tt.family.rawNetwork:
|
||||
if tt.rawErr != nil {
|
||||
return nil, tt.rawErr
|
||||
}
|
||||
case tt.family.dgramNetwork:
|
||||
if tt.udpErr != nil {
|
||||
return nil, tt.udpErr
|
||||
}
|
||||
default:
|
||||
t.Fatalf("unexpected network %q", network)
|
||||
}
|
||||
return testICMPPacketConn{}, nil
|
||||
}
|
||||
|
||||
assert.Equal(t, tt.want, detectICMPMode(tt.family, listen))
|
||||
assert.Equal(t, tt.wantNetworks, calls)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveICMPTarget(t *testing.T) {
|
||||
t.Run("IPv4 literal", func(t *testing.T) {
|
||||
family, ip := resolveICMPTarget("127.0.0.1")
|
||||
require.NotNil(t, family)
|
||||
assert.False(t, family.isIPv6)
|
||||
assert.Equal(t, "127.0.0.1", ip.String())
|
||||
})
|
||||
|
||||
t.Run("IPv6 literal", func(t *testing.T) {
|
||||
family, ip := resolveICMPTarget("::1")
|
||||
require.NotNil(t, family)
|
||||
assert.True(t, family.isIPv6)
|
||||
assert.Equal(t, "::1", ip.String())
|
||||
})
|
||||
|
||||
t.Run("IPv4-mapped IPv6 resolves as IPv4", func(t *testing.T) {
|
||||
family, ip := resolveICMPTarget("::ffff:127.0.0.1")
|
||||
require.NotNil(t, family)
|
||||
assert.False(t, family.isIPv6)
|
||||
assert.Equal(t, "127.0.0.1", ip.String())
|
||||
})
|
||||
}
|
||||
293
agent/probe_test.go
Normal file
293
agent/probe_test.go
Normal file
@@ -0,0 +1,293 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/probe"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestProbeTaskAggregateLockedUsesRawSamplesForShortWindows(t *testing.T) {
|
||||
now := time.Date(2026, time.April, 21, 12, 0, 0, 0, time.UTC)
|
||||
task := &probeTask{}
|
||||
|
||||
task.addSampleLocked(probeSample{responseMs: 10, timestamp: now.Add(-90 * time.Second)})
|
||||
task.addSampleLocked(probeSample{responseMs: 20, timestamp: now.Add(-30 * time.Second)})
|
||||
task.addSampleLocked(probeSample{responseMs: -1, timestamp: now.Add(-10 * time.Second)})
|
||||
|
||||
agg := task.aggregateLocked(time.Minute, now)
|
||||
require.True(t, agg.hasData())
|
||||
assert.Equal(t, 2, agg.totalCount)
|
||||
assert.Equal(t, 1, agg.successCount)
|
||||
assert.Equal(t, 20.0, agg.result()[0])
|
||||
assert.Equal(t, 20.0, agg.result()[1])
|
||||
assert.Equal(t, 20.0, agg.result()[2])
|
||||
assert.Equal(t, 50.0, agg.result()[3])
|
||||
}
|
||||
|
||||
func TestProbeTaskAggregateLockedUsesMinuteBucketsForLongWindows(t *testing.T) {
|
||||
now := time.Date(2026, time.April, 21, 12, 0, 30, 0, time.UTC)
|
||||
task := &probeTask{}
|
||||
|
||||
task.addSampleLocked(probeSample{responseMs: 10, timestamp: now.Add(-11 * time.Minute)})
|
||||
task.addSampleLocked(probeSample{responseMs: 20, timestamp: now.Add(-9 * time.Minute)})
|
||||
task.addSampleLocked(probeSample{responseMs: 40, timestamp: now.Add(-5 * time.Minute)})
|
||||
task.addSampleLocked(probeSample{responseMs: -1, timestamp: now.Add(-90 * time.Second)})
|
||||
task.addSampleLocked(probeSample{responseMs: 30, timestamp: now.Add(-30 * time.Second)})
|
||||
|
||||
agg := task.aggregateLocked(10*time.Minute, now)
|
||||
require.True(t, agg.hasData())
|
||||
assert.Equal(t, 4, agg.totalCount)
|
||||
assert.Equal(t, 3, agg.successCount)
|
||||
assert.Equal(t, 30.0, agg.result()[0])
|
||||
assert.Equal(t, 20.0, agg.result()[1])
|
||||
assert.Equal(t, 40.0, agg.result()[2])
|
||||
assert.Equal(t, 25.0, agg.result()[3])
|
||||
}
|
||||
|
||||
func TestProbeTaskAddSampleLockedTrimsRawSamplesButKeepsBucketHistory(t *testing.T) {
|
||||
now := time.Date(2026, time.April, 21, 12, 0, 0, 0, time.UTC)
|
||||
task := &probeTask{}
|
||||
|
||||
task.addSampleLocked(probeSample{responseMs: 10, timestamp: now.Add(-10 * time.Minute)})
|
||||
task.addSampleLocked(probeSample{responseMs: 20, timestamp: now})
|
||||
|
||||
require.Len(t, task.samples, 1)
|
||||
assert.Equal(t, 20.0, task.samples[0].responseMs)
|
||||
|
||||
agg := task.aggregateLocked(10*time.Minute, now)
|
||||
require.True(t, agg.hasData())
|
||||
assert.Equal(t, 2, agg.totalCount)
|
||||
assert.Equal(t, 2, agg.successCount)
|
||||
assert.Equal(t, 15.0, agg.result()[0])
|
||||
assert.Equal(t, 10.0, agg.result()[1])
|
||||
assert.Equal(t, 20.0, agg.result()[2])
|
||||
assert.Equal(t, 0.0, agg.result()[3])
|
||||
}
|
||||
|
||||
func TestProbeManagerGetResultsIncludesHourResponseRange(t *testing.T) {
|
||||
now := time.Now().UTC()
|
||||
task := &probeTask{config: probe.Config{ID: "probe-1"}}
|
||||
task.addSampleLocked(probeSample{responseMs: 10, timestamp: now.Add(-30 * time.Minute)})
|
||||
task.addSampleLocked(probeSample{responseMs: 20, timestamp: now.Add(-9 * time.Minute)})
|
||||
task.addSampleLocked(probeSample{responseMs: 40, timestamp: now.Add(-5 * time.Minute)})
|
||||
task.addSampleLocked(probeSample{responseMs: -1, timestamp: now.Add(-90 * time.Second)})
|
||||
task.addSampleLocked(probeSample{responseMs: 30, timestamp: now.Add(-30 * time.Second)})
|
||||
|
||||
pm := &ProbeManager{probes: map[string]*probeTask{"icmp:example.com": task}}
|
||||
|
||||
results := pm.GetResults(uint16(time.Minute / time.Millisecond))
|
||||
result, ok := results["probe-1"]
|
||||
require.True(t, ok)
|
||||
require.Len(t, result, 5)
|
||||
assert.Equal(t, 30.0, result[0])
|
||||
assert.Equal(t, 25.0, result[1])
|
||||
assert.Equal(t, 10.0, result[2])
|
||||
assert.Equal(t, 40.0, result[3])
|
||||
assert.Equal(t, 20.0, result[4])
|
||||
}
|
||||
|
||||
func TestProbeManagerGetResultsIncludesLossOnlyHourData(t *testing.T) {
|
||||
now := time.Now().UTC()
|
||||
task := &probeTask{config: probe.Config{ID: "probe-1"}}
|
||||
task.addSampleLocked(probeSample{responseMs: -1, timestamp: now.Add(-30 * time.Second)})
|
||||
task.addSampleLocked(probeSample{responseMs: -1, timestamp: now.Add(-10 * time.Second)})
|
||||
|
||||
pm := &ProbeManager{probes: map[string]*probeTask{"icmp:example.com": task}}
|
||||
|
||||
results := pm.GetResults(uint16(time.Minute / time.Millisecond))
|
||||
result, ok := results["probe-1"]
|
||||
require.True(t, ok)
|
||||
require.Len(t, result, 5)
|
||||
assert.Equal(t, 0.0, result[0])
|
||||
assert.Equal(t, 0.0, result[1])
|
||||
assert.Equal(t, 0.0, result[2])
|
||||
assert.Equal(t, 0.0, result[3])
|
||||
assert.Equal(t, 100.0, result[4])
|
||||
}
|
||||
|
||||
func TestProbeConfigResultKeyUsesSyncedID(t *testing.T) {
|
||||
cfg := probe.Config{ID: "probe-1", Target: "1.1.1.1", Protocol: "icmp", Interval: 10}
|
||||
assert.Equal(t, "probe-1", cfg.ID)
|
||||
}
|
||||
|
||||
func TestProbeManagerSyncProbesSkipsConfigsWithoutStableID(t *testing.T) {
|
||||
validCfg := probe.Config{ID: "probe-1", Target: "ignored", Protocol: "noop", Interval: 10}
|
||||
invalidCfg := probe.Config{Target: "ignored", Protocol: "noop", Interval: 10}
|
||||
|
||||
pm := newProbeManager()
|
||||
pm.SyncProbes([]probe.Config{validCfg, invalidCfg})
|
||||
defer pm.Stop()
|
||||
|
||||
_, validExists := pm.probes[validCfg.ID]
|
||||
_, invalidExists := pm.probes[invalidCfg.ID]
|
||||
assert.True(t, validExists)
|
||||
assert.False(t, invalidExists)
|
||||
}
|
||||
|
||||
func TestProbeManagerSyncProbesStopsRemovedTasksButKeepsExisting(t *testing.T) {
|
||||
keepCfg := probe.Config{ID: "probe-1", Target: "ignored", Protocol: "noop", Interval: 10}
|
||||
removeCfg := probe.Config{ID: "probe-2", Target: "ignored", Protocol: "noop", Interval: 10}
|
||||
|
||||
keptTask := &probeTask{config: keepCfg, cancel: make(chan struct{})}
|
||||
removedTask := &probeTask{config: removeCfg, cancel: make(chan struct{})}
|
||||
pm := &ProbeManager{
|
||||
probes: map[string]*probeTask{
|
||||
keepCfg.ID: keptTask,
|
||||
removeCfg.ID: removedTask,
|
||||
},
|
||||
}
|
||||
|
||||
pm.SyncProbes([]probe.Config{keepCfg})
|
||||
|
||||
assert.Same(t, keptTask, pm.probes[keepCfg.ID])
|
||||
_, exists := pm.probes[removeCfg.ID]
|
||||
assert.False(t, exists)
|
||||
|
||||
select {
|
||||
case <-removedTask.cancel:
|
||||
default:
|
||||
t.Fatal("expected removed probe task to be cancelled")
|
||||
}
|
||||
|
||||
select {
|
||||
case <-keptTask.cancel:
|
||||
t.Fatal("expected existing probe task to remain active")
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func TestProbeManagerSyncProbesRestartsChangedConfig(t *testing.T) {
|
||||
originalCfg := probe.Config{ID: "probe-1", Target: "ignored-a", Protocol: "noop", Interval: 10}
|
||||
updatedCfg := probe.Config{ID: "probe-1", Target: "ignored-b", Protocol: "noop", Interval: 10}
|
||||
originalTask := &probeTask{config: originalCfg, cancel: make(chan struct{})}
|
||||
pm := &ProbeManager{
|
||||
probes: map[string]*probeTask{
|
||||
originalCfg.ID: originalTask,
|
||||
},
|
||||
}
|
||||
|
||||
pm.SyncProbes([]probe.Config{updatedCfg})
|
||||
defer pm.Stop()
|
||||
|
||||
restartedTask := pm.probes[updatedCfg.ID]
|
||||
assert.NotSame(t, originalTask, restartedTask)
|
||||
assert.Equal(t, updatedCfg, restartedTask.config)
|
||||
|
||||
select {
|
||||
case <-originalTask.cancel:
|
||||
default:
|
||||
t.Fatal("expected changed probe task to be cancelled")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProbeManagerApplySyncUpsertRunsImmediatelyAndReturnsResult(t *testing.T) {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
pm := &ProbeManager{
|
||||
probes: make(map[string]*probeTask),
|
||||
httpClient: server.Client(),
|
||||
}
|
||||
|
||||
resp, err := pm.ApplySync(probe.SyncRequest{
|
||||
Action: probe.SyncActionUpsert,
|
||||
Config: probe.Config{ID: "probe-1", Target: server.URL, Protocol: "http", Interval: 10},
|
||||
RunNow: true,
|
||||
})
|
||||
defer pm.Stop()
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Len(t, resp.Result, 5)
|
||||
assert.GreaterOrEqual(t, resp.Result[0], 0.0)
|
||||
assert.Equal(t, 0.0, resp.Result[4])
|
||||
|
||||
task := pm.probes["probe-1"]
|
||||
require.NotNil(t, task)
|
||||
task.mu.Lock()
|
||||
defer task.mu.Unlock()
|
||||
require.Len(t, task.samples, 1)
|
||||
}
|
||||
|
||||
func TestProbeManagerApplySyncDeleteRemovesTask(t *testing.T) {
|
||||
config := probe.Config{ID: "probe-1", Target: "1.1.1.1", Protocol: "icmp", Interval: 10}
|
||||
task := &probeTask{config: config, cancel: make(chan struct{})}
|
||||
pm := &ProbeManager{
|
||||
probes: map[string]*probeTask{config.ID: task},
|
||||
}
|
||||
|
||||
_, err := pm.ApplySync(probe.SyncRequest{
|
||||
Action: probe.SyncActionDelete,
|
||||
Config: probe.Config{ID: config.ID},
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
_, exists := pm.probes[config.ID]
|
||||
assert.False(t, exists)
|
||||
|
||||
select {
|
||||
case <-task.cancel:
|
||||
default:
|
||||
t.Fatal("expected deleted probe task to be cancelled")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProbeHTTP(t *testing.T) {
|
||||
t.Run("success", func(t *testing.T) {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
responseMs := probeHTTP(server.Client(), server.URL)
|
||||
assert.GreaterOrEqual(t, responseMs, 0.0)
|
||||
})
|
||||
|
||||
t.Run("server error", func(t *testing.T) {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
http.Error(w, "boom", http.StatusInternalServerError)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
assert.Equal(t, -1.0, probeHTTP(server.Client(), server.URL))
|
||||
})
|
||||
}
|
||||
|
||||
func TestProbeTCP(t *testing.T) {
|
||||
t.Run("success", func(t *testing.T) {
|
||||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
defer listener.Close()
|
||||
|
||||
accepted := make(chan struct{})
|
||||
go func() {
|
||||
defer close(accepted)
|
||||
conn, err := listener.Accept()
|
||||
if err == nil {
|
||||
_ = conn.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
port := uint16(listener.Addr().(*net.TCPAddr).Port)
|
||||
responseMs := probeTCP("127.0.0.1", port)
|
||||
assert.GreaterOrEqual(t, responseMs, 0.0)
|
||||
<-accepted
|
||||
})
|
||||
|
||||
t.Run("connection failure", func(t *testing.T) {
|
||||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
|
||||
port := uint16(listener.Addr().(*net.TCPAddr).Port)
|
||||
require.NoError(t, listener.Close())
|
||||
|
||||
assert.Equal(t, -1.0, probeTCP("127.0.0.1", port))
|
||||
})
|
||||
}
|
||||
@@ -19,13 +19,20 @@ import (
|
||||
"github.com/shirou/gopsutil/v4/sensors"
|
||||
)
|
||||
|
||||
var errTemperatureFetchTimeout = errors.New("temperature collection timed out")
|
||||
|
||||
// Matches sensors.TemperaturesWithContext to allow for panic recovery (gopsutil/issues/1832)
|
||||
type getTempsFn func(ctx context.Context) ([]sensors.TemperatureStat, error)
|
||||
|
||||
type SensorConfig struct {
|
||||
context context.Context
|
||||
sensors map[string]struct{}
|
||||
primarySensor string
|
||||
timeout time.Duration
|
||||
isBlacklist bool
|
||||
hasWildcards bool
|
||||
skipCollection bool
|
||||
firstRun bool
|
||||
}
|
||||
|
||||
func (a *Agent) newSensorConfig() *SensorConfig {
|
||||
@@ -33,25 +40,29 @@ func (a *Agent) newSensorConfig() *SensorConfig {
|
||||
sysSensors, _ := utils.GetEnv("SYS_SENSORS")
|
||||
sensorsEnvVal, sensorsSet := utils.GetEnv("SENSORS")
|
||||
skipCollection := sensorsSet && sensorsEnvVal == ""
|
||||
sensorsTimeout, _ := utils.GetEnv("SENSORS_TIMEOUT")
|
||||
|
||||
return a.newSensorConfigWithEnv(primarySensor, sysSensors, sensorsEnvVal, skipCollection)
|
||||
return a.newSensorConfigWithEnv(primarySensor, sysSensors, sensorsEnvVal, sensorsTimeout, skipCollection)
|
||||
}
|
||||
|
||||
// Matches sensors.TemperaturesWithContext to allow for panic recovery (gopsutil/issues/1832)
|
||||
type getTempsFn func(ctx context.Context) ([]sensors.TemperatureStat, error)
|
||||
|
||||
var (
|
||||
errTemperatureFetchTimeout = errors.New("temperature collection timed out")
|
||||
temperatureFetchTimeout = 2 * time.Second
|
||||
)
|
||||
|
||||
// newSensorConfigWithEnv creates a SensorConfig with the provided environment variables
|
||||
// sensorsSet indicates if the SENSORS environment variable was explicitly set (even to empty string)
|
||||
func (a *Agent) newSensorConfigWithEnv(primarySensor, sysSensors, sensorsEnvVal string, skipCollection bool) *SensorConfig {
|
||||
func (a *Agent) newSensorConfigWithEnv(primarySensor, sysSensors, sensorsEnvVal, sensorsTimeout string, skipCollection bool) *SensorConfig {
|
||||
timeout := 2 * time.Second
|
||||
if sensorsTimeout != "" {
|
||||
if d, err := time.ParseDuration(sensorsTimeout); err == nil {
|
||||
timeout = d
|
||||
} else {
|
||||
slog.Warn("Invalid SENSORS_TIMEOUT", "value", sensorsTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
config := &SensorConfig{
|
||||
context: context.Background(),
|
||||
primarySensor: primarySensor,
|
||||
timeout: timeout,
|
||||
skipCollection: skipCollection,
|
||||
firstRun: true,
|
||||
sensors: make(map[string]struct{}),
|
||||
}
|
||||
|
||||
@@ -167,6 +178,14 @@ func (a *Agent) getTempsWithTimeout(getTemps getTempsFn) ([]sensors.TemperatureS
|
||||
err error
|
||||
}
|
||||
|
||||
// Use a longer timeout on the first run to allow for initialization
|
||||
// (e.g. Windows LHM subprocess startup)
|
||||
timeout := a.sensorConfig.timeout
|
||||
if a.sensorConfig.firstRun {
|
||||
a.sensorConfig.firstRun = false
|
||||
timeout = 10 * time.Second
|
||||
}
|
||||
|
||||
resultCh := make(chan result, 1)
|
||||
go func() {
|
||||
temps, err := a.getTempsWithPanicRecovery(getTemps)
|
||||
@@ -176,7 +195,7 @@ func (a *Agent) getTempsWithTimeout(getTemps getTempsFn) ([]sensors.TemperatureS
|
||||
select {
|
||||
case res := <-resultCh:
|
||||
return res.temps, res.err
|
||||
case <-time.After(temperatureFetchTimeout):
|
||||
case <-time.After(timeout):
|
||||
return nil, errTemperatureFetchTimeout
|
||||
}
|
||||
}
|
||||
|
||||
@@ -168,6 +168,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
||||
primarySensor string
|
||||
sysSensors string
|
||||
sensors string
|
||||
sensorsTimeout string
|
||||
skipCollection bool
|
||||
expectedConfig *SensorConfig
|
||||
}{
|
||||
@@ -179,12 +180,37 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
||||
expectedConfig: &SensorConfig{
|
||||
context: context.Background(),
|
||||
primarySensor: "",
|
||||
timeout: 2 * time.Second,
|
||||
sensors: map[string]struct{}{},
|
||||
isBlacklist: false,
|
||||
hasWildcards: false,
|
||||
skipCollection: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Custom timeout",
|
||||
primarySensor: "",
|
||||
sysSensors: "",
|
||||
sensors: "",
|
||||
sensorsTimeout: "5s",
|
||||
expectedConfig: &SensorConfig{
|
||||
context: context.Background(),
|
||||
timeout: 5 * time.Second,
|
||||
sensors: map[string]struct{}{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Invalid timeout falls back to default",
|
||||
primarySensor: "",
|
||||
sysSensors: "",
|
||||
sensors: "",
|
||||
sensorsTimeout: "notaduration",
|
||||
expectedConfig: &SensorConfig{
|
||||
context: context.Background(),
|
||||
timeout: 2 * time.Second,
|
||||
sensors: map[string]struct{}{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Explicitly set to empty string",
|
||||
primarySensor: "",
|
||||
@@ -194,6 +220,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
||||
expectedConfig: &SensorConfig{
|
||||
context: context.Background(),
|
||||
primarySensor: "",
|
||||
timeout: 2 * time.Second,
|
||||
sensors: map[string]struct{}{},
|
||||
isBlacklist: false,
|
||||
hasWildcards: false,
|
||||
@@ -208,6 +235,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
||||
expectedConfig: &SensorConfig{
|
||||
context: context.Background(),
|
||||
primarySensor: "cpu_temp",
|
||||
timeout: 2 * time.Second,
|
||||
sensors: map[string]struct{}{},
|
||||
isBlacklist: false,
|
||||
hasWildcards: false,
|
||||
@@ -221,6 +249,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
||||
expectedConfig: &SensorConfig{
|
||||
context: context.Background(),
|
||||
primarySensor: "cpu_temp",
|
||||
timeout: 2 * time.Second,
|
||||
sensors: map[string]struct{}{
|
||||
"cpu_temp": {},
|
||||
"gpu_temp": {},
|
||||
@@ -237,6 +266,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
||||
expectedConfig: &SensorConfig{
|
||||
context: context.Background(),
|
||||
primarySensor: "cpu_temp",
|
||||
timeout: 2 * time.Second,
|
||||
sensors: map[string]struct{}{
|
||||
"cpu_temp": {},
|
||||
"gpu_temp": {},
|
||||
@@ -253,6 +283,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
||||
expectedConfig: &SensorConfig{
|
||||
context: context.Background(),
|
||||
primarySensor: "cpu_temp",
|
||||
timeout: 2 * time.Second,
|
||||
sensors: map[string]struct{}{
|
||||
"cpu_*": {},
|
||||
"gpu_temp": {},
|
||||
@@ -269,6 +300,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
||||
expectedConfig: &SensorConfig{
|
||||
context: context.Background(),
|
||||
primarySensor: "cpu_temp",
|
||||
timeout: 2 * time.Second,
|
||||
sensors: map[string]struct{}{
|
||||
"cpu_*": {},
|
||||
"gpu_temp": {},
|
||||
@@ -284,6 +316,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
||||
sensors: "cpu_temp",
|
||||
expectedConfig: &SensorConfig{
|
||||
primarySensor: "cpu_temp",
|
||||
timeout: 2 * time.Second,
|
||||
sensors: map[string]struct{}{
|
||||
"cpu_temp": {},
|
||||
},
|
||||
@@ -295,7 +328,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := agent.newSensorConfigWithEnv(tt.primarySensor, tt.sysSensors, tt.sensors, tt.skipCollection)
|
||||
result := agent.newSensorConfigWithEnv(tt.primarySensor, tt.sysSensors, tt.sensors, tt.sensorsTimeout, tt.skipCollection)
|
||||
|
||||
// Check primary sensor
|
||||
assert.Equal(t, tt.expectedConfig.primarySensor, result.primarySensor)
|
||||
@@ -314,6 +347,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
||||
// Check flags
|
||||
assert.Equal(t, tt.expectedConfig.isBlacklist, result.isBlacklist)
|
||||
assert.Equal(t, tt.expectedConfig.hasWildcards, result.hasWildcards)
|
||||
assert.Equal(t, tt.expectedConfig.timeout, result.timeout)
|
||||
|
||||
// Check context
|
||||
if tt.sysSensors != "" {
|
||||
@@ -333,12 +367,14 @@ func TestNewSensorConfig(t *testing.T) {
|
||||
t.Setenv("BESZEL_AGENT_PRIMARY_SENSOR", "test_primary")
|
||||
t.Setenv("BESZEL_AGENT_SYS_SENSORS", "/test/path")
|
||||
t.Setenv("BESZEL_AGENT_SENSORS", "test_sensor1,test_*,test_sensor3")
|
||||
t.Setenv("BESZEL_AGENT_SENSORS_TIMEOUT", "7s")
|
||||
|
||||
agent := &Agent{}
|
||||
result := agent.newSensorConfig()
|
||||
|
||||
// Verify results
|
||||
assert.Equal(t, "test_primary", result.primarySensor)
|
||||
assert.Equal(t, 7*time.Second, result.timeout)
|
||||
assert.NotNil(t, result.sensors)
|
||||
assert.Equal(t, 3, len(result.sensors))
|
||||
assert.True(t, result.hasWildcards)
|
||||
@@ -532,15 +568,10 @@ func TestGetTempsWithTimeout(t *testing.T) {
|
||||
agent := &Agent{
|
||||
sensorConfig: &SensorConfig{
|
||||
context: context.Background(),
|
||||
timeout: 10 * time.Millisecond,
|
||||
},
|
||||
}
|
||||
|
||||
originalTimeout := temperatureFetchTimeout
|
||||
t.Cleanup(func() {
|
||||
temperatureFetchTimeout = originalTimeout
|
||||
})
|
||||
temperatureFetchTimeout = 10 * time.Millisecond
|
||||
|
||||
t.Run("returns temperatures before timeout", func(t *testing.T) {
|
||||
temps, err := agent.getTempsWithTimeout(func(ctx context.Context) ([]sensors.TemperatureStat, error) {
|
||||
return []sensors.TemperatureStat{{SensorKey: "cpu_temp", Temperature: 42}}, nil
|
||||
@@ -567,15 +598,13 @@ func TestUpdateTemperaturesSkipsOnTimeout(t *testing.T) {
|
||||
systemInfo: system.Info{DashboardTemp: 99},
|
||||
sensorConfig: &SensorConfig{
|
||||
context: context.Background(),
|
||||
timeout: 10 * time.Millisecond,
|
||||
},
|
||||
}
|
||||
|
||||
originalTimeout := temperatureFetchTimeout
|
||||
t.Cleanup(func() {
|
||||
temperatureFetchTimeout = originalTimeout
|
||||
getSensorTemps = sensors.TemperaturesWithContext
|
||||
})
|
||||
temperatureFetchTimeout = 10 * time.Millisecond
|
||||
getSensorTemps = func(ctx context.Context) ([]sensors.TemperatureStat, error) {
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
return nil, nil
|
||||
|
||||
@@ -193,7 +193,7 @@ func (a *Agent) handleSSHRequest(w io.Writer, req *common.HubRequest[cbor.RawMes
|
||||
|
||||
// handleLegacyStats serves the legacy one-shot stats payload for older hubs
|
||||
func (a *Agent) handleLegacyStats(w io.Writer, hubVersion semver.Version) error {
|
||||
stats := a.gatherStats(common.DataRequestOptions{CacheTimeMs: 60_000})
|
||||
stats := a.gatherStats(common.DataRequestOptions{CacheTimeMs: defaultDataCacheTimeMs})
|
||||
return a.writeToSession(w, stats, hubVersion)
|
||||
}
|
||||
|
||||
|
||||
@@ -31,6 +31,9 @@ type SmartManager struct {
|
||||
lastScanTime time.Time
|
||||
smartctlPath string
|
||||
excludedDevices map[string]struct{}
|
||||
darwinNvmeOnce sync.Once
|
||||
darwinNvmeCapacity map[string]uint64 // serial → bytes cache, written once via darwinNvmeOnce
|
||||
darwinNvmeProvider func() ([]byte, error) // overridable for testing
|
||||
}
|
||||
|
||||
type scanOutput struct {
|
||||
@@ -1033,6 +1036,52 @@ func parseScsiGigabytesProcessed(value string) int64 {
|
||||
return parsed
|
||||
}
|
||||
|
||||
// lookupDarwinNvmeCapacity returns the capacity in bytes for a given NVMe serial number on Darwin.
|
||||
// It uses system_profiler SPNVMeDataType to get capacity since Apple SSDs don't report user_capacity
|
||||
// via smartctl. Results are cached after the first call via sync.Once.
|
||||
func (sm *SmartManager) lookupDarwinNvmeCapacity(serial string) uint64 {
|
||||
sm.darwinNvmeOnce.Do(func() {
|
||||
sm.darwinNvmeCapacity = make(map[string]uint64)
|
||||
|
||||
provider := sm.darwinNvmeProvider
|
||||
if provider == nil {
|
||||
provider = func() ([]byte, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
return exec.CommandContext(ctx, "system_profiler", "SPNVMeDataType", "-json").Output()
|
||||
}
|
||||
}
|
||||
|
||||
out, err := provider()
|
||||
if err != nil {
|
||||
slog.Debug("system_profiler NVMe lookup failed", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
var result struct {
|
||||
SPNVMeDataType []struct {
|
||||
Items []struct {
|
||||
DeviceSerial string `json:"device_serial"`
|
||||
SizeInBytes uint64 `json:"size_in_bytes"`
|
||||
} `json:"_items"`
|
||||
} `json:"SPNVMeDataType"`
|
||||
}
|
||||
if err := json.Unmarshal(out, &result); err != nil {
|
||||
slog.Debug("system_profiler NVMe parse failed", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, controller := range result.SPNVMeDataType {
|
||||
for _, item := range controller.Items {
|
||||
if item.DeviceSerial != "" && item.SizeInBytes > 0 {
|
||||
sm.darwinNvmeCapacity[item.DeviceSerial] = item.SizeInBytes
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
return sm.darwinNvmeCapacity[serial]
|
||||
}
|
||||
|
||||
// parseSmartForNvme parses the output of smartctl --all -j /dev/nvmeX and updates the SmartDataMap
|
||||
// Returns hasValidData and exitStatus
|
||||
func (sm *SmartManager) parseSmartForNvme(output []byte) (bool, int) {
|
||||
@@ -1069,6 +1118,12 @@ func (sm *SmartManager) parseSmartForNvme(output []byte) (bool, int) {
|
||||
smartData.SerialNumber = data.SerialNumber
|
||||
smartData.FirmwareVersion = data.FirmwareVersion
|
||||
smartData.Capacity = data.UserCapacity.Bytes
|
||||
if smartData.Capacity == 0 {
|
||||
smartData.Capacity = data.NVMeTotalCapacity
|
||||
}
|
||||
if smartData.Capacity == 0 && (runtime.GOOS == "darwin" || sm.darwinNvmeProvider != nil) {
|
||||
smartData.Capacity = sm.lookupDarwinNvmeCapacity(data.SerialNumber)
|
||||
}
|
||||
smartData.Temperature = data.NVMeSmartHealthInformationLog.Temperature
|
||||
smartData.SmartStatus = getSmartStatus(smartData.Temperature, data.SmartStatus.Passed)
|
||||
smartData.DiskName = data.Device.Name
|
||||
|
||||
@@ -1199,3 +1199,81 @@ func TestIsNvmeControllerPath(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseSmartForNvmeAppleSSD(t *testing.T) {
|
||||
// Apple SSDs don't report user_capacity via smartctl; capacity should be fetched
|
||||
// from system_profiler via the darwinNvmeProvider fallback.
|
||||
fixturePath := filepath.Join("test-data", "smart", "apple_nvme.json")
|
||||
data, err := os.ReadFile(fixturePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
providerCalls := 0
|
||||
fakeProvider := func() ([]byte, error) {
|
||||
providerCalls++
|
||||
return []byte(`{
|
||||
"SPNVMeDataType": [{
|
||||
"_items": [{
|
||||
"device_serial": "0ba0147940253c15",
|
||||
"size_in_bytes": 251000193024
|
||||
}]
|
||||
}]
|
||||
}`), nil
|
||||
}
|
||||
|
||||
sm := &SmartManager{
|
||||
SmartDataMap: make(map[string]*smart.SmartData),
|
||||
darwinNvmeProvider: fakeProvider,
|
||||
}
|
||||
|
||||
hasData, _ := sm.parseSmartForNvme(data)
|
||||
require.True(t, hasData)
|
||||
|
||||
deviceData, ok := sm.SmartDataMap["0ba0147940253c15"]
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, "APPLE SSD AP0256Q", deviceData.ModelName)
|
||||
assert.Equal(t, uint64(251000193024), deviceData.Capacity)
|
||||
assert.Equal(t, uint8(42), deviceData.Temperature)
|
||||
assert.Equal(t, "PASSED", deviceData.SmartStatus)
|
||||
assert.Equal(t, 1, providerCalls, "system_profiler should be called once")
|
||||
|
||||
// Second parse: provider should NOT be called again (cache hit)
|
||||
_, _ = sm.parseSmartForNvme(data)
|
||||
assert.Equal(t, 1, providerCalls, "system_profiler should not be called again after caching")
|
||||
}
|
||||
|
||||
func TestLookupDarwinNvmeCapacityMultipleDisks(t *testing.T) {
|
||||
fakeProvider := func() ([]byte, error) {
|
||||
return []byte(`{
|
||||
"SPNVMeDataType": [
|
||||
{
|
||||
"_items": [
|
||||
{"device_serial": "serial-disk0", "size_in_bytes": 251000193024},
|
||||
{"device_serial": "serial-disk1", "size_in_bytes": 1000204886016}
|
||||
]
|
||||
},
|
||||
{
|
||||
"_items": [
|
||||
{"device_serial": "serial-disk2", "size_in_bytes": 512110190592}
|
||||
]
|
||||
}
|
||||
]
|
||||
}`), nil
|
||||
}
|
||||
|
||||
sm := &SmartManager{darwinNvmeProvider: fakeProvider}
|
||||
assert.Equal(t, uint64(251000193024), sm.lookupDarwinNvmeCapacity("serial-disk0"))
|
||||
assert.Equal(t, uint64(1000204886016), sm.lookupDarwinNvmeCapacity("serial-disk1"))
|
||||
assert.Equal(t, uint64(512110190592), sm.lookupDarwinNvmeCapacity("serial-disk2"))
|
||||
assert.Equal(t, uint64(0), sm.lookupDarwinNvmeCapacity("unknown-serial"))
|
||||
}
|
||||
|
||||
func TestLookupDarwinNvmeCapacityProviderError(t *testing.T) {
|
||||
fakeProvider := func() ([]byte, error) {
|
||||
return nil, errors.New("system_profiler not found")
|
||||
}
|
||||
|
||||
sm := &SmartManager{darwinNvmeProvider: fakeProvider}
|
||||
assert.Equal(t, uint64(0), sm.lookupDarwinNvmeCapacity("any-serial"))
|
||||
// Cache should be initialized even on error so we don't retry (Once already fired)
|
||||
assert.NotNil(t, sm.darwinNvmeCapacity)
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel"
|
||||
"github.com/henrygd/beszel/agent/battery"
|
||||
@@ -23,13 +22,6 @@ import (
|
||||
"github.com/shirou/gopsutil/v4/mem"
|
||||
)
|
||||
|
||||
// prevDisk stores previous per-device disk counters for a given cache interval
|
||||
type prevDisk struct {
|
||||
readBytes uint64
|
||||
writeBytes uint64
|
||||
at time.Time
|
||||
}
|
||||
|
||||
// Sets initial / non-changing values about the host system
|
||||
func (a *Agent) refreshSystemDetails() {
|
||||
a.systemInfo.AgentVersion = beszel.Version
|
||||
@@ -115,6 +107,26 @@ func (a *Agent) refreshSystemDetails() {
|
||||
}
|
||||
}
|
||||
|
||||
// attachSystemDetails returns details only for fresh default-interval responses.
|
||||
func (a *Agent) attachSystemDetails(data *system.CombinedData, cacheTimeMs uint16, includeRequested bool) *system.CombinedData {
|
||||
if cacheTimeMs != defaultDataCacheTimeMs || (!includeRequested && !a.detailsDirty) {
|
||||
return data
|
||||
}
|
||||
|
||||
// copy data to avoid adding details to the original cached struct
|
||||
response := *data
|
||||
response.Details = &a.systemDetails
|
||||
a.detailsDirty = false
|
||||
return &response
|
||||
}
|
||||
|
||||
// updateSystemDetails applies a mutation to the static details payload and marks
|
||||
// it for inclusion on the next fresh default-interval response.
|
||||
func (a *Agent) updateSystemDetails(updateFunc func(details *system.Details)) {
|
||||
updateFunc(&a.systemDetails)
|
||||
a.detailsDirty = true
|
||||
}
|
||||
|
||||
// Returns current info, stats about the host system
|
||||
func (a *Agent) getSystemStats(cacheTimeMs uint16) system.Stats {
|
||||
var systemStats system.Stats
|
||||
|
||||
61
agent/system_test.go
Normal file
61
agent/system_test.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/henrygd/beszel/internal/common"
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGatherStatsDoesNotAttachDetailsToCachedRequests(t *testing.T) {
|
||||
agent := &Agent{
|
||||
cache: NewSystemDataCache(),
|
||||
systemDetails: system.Details{Hostname: "updated-host", Podman: true},
|
||||
detailsDirty: true,
|
||||
}
|
||||
cached := &system.CombinedData{
|
||||
Info: system.Info{Hostname: "cached-host"},
|
||||
}
|
||||
agent.cache.Set(cached, defaultDataCacheTimeMs)
|
||||
|
||||
response := agent.gatherStats(common.DataRequestOptions{CacheTimeMs: defaultDataCacheTimeMs})
|
||||
|
||||
assert.Same(t, cached, response)
|
||||
assert.Nil(t, response.Details)
|
||||
assert.True(t, agent.detailsDirty)
|
||||
assert.Equal(t, "cached-host", response.Info.Hostname)
|
||||
assert.Nil(t, cached.Details)
|
||||
|
||||
secondResponse := agent.gatherStats(common.DataRequestOptions{CacheTimeMs: defaultDataCacheTimeMs})
|
||||
assert.Same(t, cached, secondResponse)
|
||||
assert.Nil(t, secondResponse.Details)
|
||||
}
|
||||
|
||||
func TestUpdateSystemDetailsMarksDetailsDirty(t *testing.T) {
|
||||
agent := &Agent{}
|
||||
|
||||
agent.updateSystemDetails(func(details *system.Details) {
|
||||
details.Hostname = "updated-host"
|
||||
details.Podman = true
|
||||
})
|
||||
|
||||
assert.True(t, agent.detailsDirty)
|
||||
assert.Equal(t, "updated-host", agent.systemDetails.Hostname)
|
||||
assert.True(t, agent.systemDetails.Podman)
|
||||
|
||||
original := &system.CombinedData{}
|
||||
realTimeResponse := agent.attachSystemDetails(original, 1000, true)
|
||||
assert.Same(t, original, realTimeResponse)
|
||||
assert.Nil(t, realTimeResponse.Details)
|
||||
assert.True(t, agent.detailsDirty)
|
||||
|
||||
response := agent.attachSystemDetails(original, defaultDataCacheTimeMs, false)
|
||||
require.NotNil(t, response.Details)
|
||||
assert.NotSame(t, original, response)
|
||||
assert.Equal(t, "updated-host", response.Details.Hostname)
|
||||
assert.True(t, response.Details.Podman)
|
||||
assert.False(t, agent.detailsDirty)
|
||||
assert.Nil(t, original.Details)
|
||||
}
|
||||
51
agent/test-data/smart/apple_nvme.json
Normal file
51
agent/test-data/smart/apple_nvme.json
Normal file
@@ -0,0 +1,51 @@
|
||||
{
|
||||
"json_format_version": [1, 0],
|
||||
"smartctl": {
|
||||
"version": [7, 4],
|
||||
"argv": ["smartctl", "-aix", "-j", "IOService:/AppleARMPE/arm-io@10F00000/AppleT810xIO/ans@77400000/AppleASCWrapV4/iop-ans-nub/RTBuddy(ANS2)/RTBuddyService/AppleANS3NVMeController/NS_01@1"],
|
||||
"exit_status": 4
|
||||
},
|
||||
"device": {
|
||||
"name": "IOService:/AppleARMPE/arm-io@10F00000/AppleT810xIO/ans@77400000/AppleASCWrapV4/iop-ans-nub/RTBuddy(ANS2)/RTBuddyService/AppleANS3NVMeController/NS_01@1",
|
||||
"info_name": "IOService:/AppleARMPE/arm-io@10F00000/AppleT810xIO/ans@77400000/AppleASCWrapV4/iop-ans-nub/RTBuddy(ANS2)/RTBuddyService/AppleANS3NVMeController/NS_01@1",
|
||||
"type": "nvme",
|
||||
"protocol": "NVMe"
|
||||
},
|
||||
"model_name": "APPLE SSD AP0256Q",
|
||||
"serial_number": "0ba0147940253c15",
|
||||
"firmware_version": "555",
|
||||
"smart_support": {
|
||||
"available": true,
|
||||
"enabled": true
|
||||
},
|
||||
"smart_status": {
|
||||
"passed": true,
|
||||
"nvme": {
|
||||
"value": 0
|
||||
}
|
||||
},
|
||||
"nvme_smart_health_information_log": {
|
||||
"critical_warning": 0,
|
||||
"temperature": 42,
|
||||
"available_spare": 100,
|
||||
"available_spare_threshold": 99,
|
||||
"percentage_used": 1,
|
||||
"data_units_read": 270189386,
|
||||
"data_units_written": 166753862,
|
||||
"host_reads": 7543766995,
|
||||
"host_writes": 3761621926,
|
||||
"controller_busy_time": 0,
|
||||
"power_cycles": 366,
|
||||
"power_on_hours": 2850,
|
||||
"unsafe_shutdowns": 195,
|
||||
"media_errors": 0,
|
||||
"num_err_log_entries": 0
|
||||
},
|
||||
"temperature": {
|
||||
"current": 42
|
||||
},
|
||||
"power_cycle_count": 366,
|
||||
"power_on_time": {
|
||||
"hours": 2850
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,8 @@
|
||||
// Package utils provides utility functions for the agent.
|
||||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
@@ -68,6 +70,9 @@ func ReadStringFileLimited(path string, maxSize int) (string, error) {
|
||||
if err != nil && err != io.EOF {
|
||||
return "", err
|
||||
}
|
||||
if n < 0 {
|
||||
return "", fmt.Errorf("%s returned negative bytes: %d", path, n)
|
||||
}
|
||||
return strings.TrimSpace(string(buf[:n])), nil
|
||||
}
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ import "github.com/blang/semver"
|
||||
|
||||
const (
|
||||
// Version is the current version of the application.
|
||||
Version = "0.18.5"
|
||||
Version = "0.18.7"
|
||||
// AppName is the name of the application.
|
||||
AppName = "beszel"
|
||||
)
|
||||
|
||||
13
go.mod
13
go.mod
@@ -5,24 +5,25 @@ go 1.26.1
|
||||
require (
|
||||
github.com/blang/semver v3.5.1+incompatible
|
||||
github.com/coreos/go-systemd/v22 v22.7.0
|
||||
github.com/distatus/battery v0.11.0
|
||||
github.com/ebitengine/purego v0.10.0
|
||||
github.com/fxamacker/cbor/v2 v2.9.0
|
||||
github.com/gliderlabs/ssh v0.3.8
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/lxzan/gws v1.9.1
|
||||
github.com/nicholas-fedor/shoutrrr v0.14.1
|
||||
github.com/nicholas-fedor/shoutrrr v0.14.3
|
||||
github.com/pocketbase/dbx v1.12.0
|
||||
github.com/pocketbase/pocketbase v0.36.7
|
||||
github.com/shirou/gopsutil/v4 v4.26.2
|
||||
github.com/pocketbase/pocketbase v0.36.8
|
||||
github.com/shirou/gopsutil/v4 v4.26.3
|
||||
github.com/spf13/cast v1.10.0
|
||||
github.com/spf13/cobra v1.10.2
|
||||
github.com/spf13/pflag v1.0.10
|
||||
github.com/stretchr/testify v1.11.1
|
||||
golang.org/x/crypto v0.49.0
|
||||
golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90
|
||||
golang.org/x/net v0.52.0
|
||||
golang.org/x/sys v0.42.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
howett.net/plist v1.0.1
|
||||
)
|
||||
|
||||
require (
|
||||
@@ -56,14 +57,12 @@ require (
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
golang.org/x/image v0.38.0 // indirect
|
||||
golang.org/x/net v0.52.0 // indirect
|
||||
golang.org/x/oauth2 v0.36.0 // indirect
|
||||
golang.org/x/sync v0.20.0 // indirect
|
||||
golang.org/x/term v0.41.0 // indirect
|
||||
golang.org/x/text v0.35.0 // indirect
|
||||
howett.net/plist v1.0.1 // indirect
|
||||
modernc.org/libc v1.70.0 // indirect
|
||||
modernc.org/mathutil v1.7.1 // indirect
|
||||
modernc.org/memory v1.11.0 // indirect
|
||||
modernc.org/sqlite v1.46.2 // indirect
|
||||
modernc.org/sqlite v1.48.0 // indirect
|
||||
)
|
||||
|
||||
18
go.sum
18
go.sum
@@ -17,8 +17,6 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c=
|
||||
github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4=
|
||||
github.com/distatus/battery v0.11.0 h1:KJk89gz90Iq/wJtbjjM9yUzBXV+ASV/EG2WOOL7N8lc=
|
||||
github.com/distatus/battery v0.11.0/go.mod h1:KmVkE8A8hpIX4T78QRdMktYpEp35QfOL8A8dwZBxq2k=
|
||||
github.com/domodwyer/mailyak/v3 v3.6.2 h1:x3tGMsyFhTCaxp6ycgR0FE/bu5QiNp+hetUuCOBXMn8=
|
||||
github.com/domodwyer/mailyak/v3 v3.6.2/go.mod h1:lOm/u9CyCVWHeaAmHIdF4RiKVxKUT/H5XX10lIKAL6c=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
@@ -87,8 +85,8 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w=
|
||||
github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
||||
github.com/nicholas-fedor/shoutrrr v0.14.1 h1:6sx4cJNfNuUtD6ygGlB0dqcCQ+abfsUh+b+6jgujf6A=
|
||||
github.com/nicholas-fedor/shoutrrr v0.14.1/go.mod h1:U7IywBkLpBV7rgn8iLbQ9/LklJG1gm24bFv5cXXsDKs=
|
||||
github.com/nicholas-fedor/shoutrrr v0.14.3 h1:aBX2iw9a7jl5wfHd3bi9LnS5ucoYIy6KcLH9XVF+gig=
|
||||
github.com/nicholas-fedor/shoutrrr v0.14.3/go.mod h1:U7IywBkLpBV7rgn8iLbQ9/LklJG1gm24bFv5cXXsDKs=
|
||||
github.com/onsi/ginkgo/v2 v2.28.1 h1:S4hj+HbZp40fNKuLUQOYLDgZLwNUVn19N3Atb98NCyI=
|
||||
github.com/onsi/ginkgo/v2 v2.28.1/go.mod h1:CLtbVInNckU3/+gC8LzkGUb9oF+e8W8TdUsxPwvdOgE=
|
||||
github.com/onsi/gomega v1.39.1 h1:1IJLAad4zjPn2PsnhH70V4DKRFlrCzGBNrNaru+Vf28=
|
||||
@@ -98,8 +96,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pocketbase/dbx v1.12.0 h1:/oLErM+A0b4xI0PWTGPqSDVjzix48PqI/bng2l0PzoA=
|
||||
github.com/pocketbase/dbx v1.12.0/go.mod h1:xXRCIAKTHMgUCyCKZm55pUOdvFziJjQfXaWKhu2vhMs=
|
||||
github.com/pocketbase/pocketbase v0.36.7 h1:MrViB7BptPYrf2Nt25pJEYBqUdFjuhRKu1p5GTrkvPA=
|
||||
github.com/pocketbase/pocketbase v0.36.7/go.mod h1:qX4HuVjoKXtEg41fSJVM0JLfGWXbBmHxVv/FaE446r4=
|
||||
github.com/pocketbase/pocketbase v0.36.8 h1:gCNqoesZ44saYOD3J7edhi5nDwUWKyQG7boM/kVwz2c=
|
||||
github.com/pocketbase/pocketbase v0.36.8/go.mod h1:OY4WaXbP0WnF/EXoBbboWJK+ZSZ1A85tiA0sjrTKxTA=
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
@@ -107,8 +105,8 @@ github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qq
|
||||
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/shirou/gopsutil/v4 v4.26.2 h1:X8i6sicvUFih4BmYIGT1m2wwgw2VG9YgrDTi7cIRGUI=
|
||||
github.com/shirou/gopsutil/v4 v4.26.2/go.mod h1:LZ6ewCSkBqUpvSOf+LsTGnRinC6iaNUNMGBtDkJBaLQ=
|
||||
github.com/shirou/gopsutil/v4 v4.26.3 h1:2ESdQt90yU3oXF/CdOlRCJxrP+Am1aBYubTMTfxJ1qc=
|
||||
github.com/shirou/gopsutil/v4 v4.26.3/go.mod h1:LZ6ewCSkBqUpvSOf+LsTGnRinC6iaNUNMGBtDkJBaLQ=
|
||||
github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY=
|
||||
github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
|
||||
github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
|
||||
@@ -199,8 +197,8 @@ modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
|
||||
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
|
||||
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
|
||||
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
|
||||
modernc.org/sqlite v1.46.2 h1:gkXQ6R0+AjxFC/fTDaeIVLbNLNrRoOK7YYVz5BKhTcE=
|
||||
modernc.org/sqlite v1.46.2/go.mod h1:hWjRO6Tj/5Ik8ieqxQybiEOUXy0NJFNp2tpvVpKlvig=
|
||||
modernc.org/sqlite v1.48.0 h1:ElZyLop3Q2mHYk5IFPPXADejZrlHu7APbpB0sF78bq4=
|
||||
modernc.org/sqlite v1.48.0/go.mod h1:hWjRO6Tj/5Ik8ieqxQybiEOUXy0NJFNp2tpvVpKlvig=
|
||||
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
|
||||
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
|
||||
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
||||
|
||||
@@ -302,21 +302,6 @@ func (am *AlertManager) SendShoutrrrAlert(notificationUrl, title, message, link,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (am *AlertManager) SendTestNotification(e *core.RequestEvent) error {
|
||||
var data struct {
|
||||
URL string `json:"url"`
|
||||
}
|
||||
err := e.BindBody(&data)
|
||||
if err != nil || data.URL == "" {
|
||||
return e.BadRequestError("URL is required", err)
|
||||
}
|
||||
err = am.SendShoutrrrAlert(data.URL, "Test Alert", "This is a notification from Beszel.", am.hub.Settings().Meta.AppURL, "View Beszel")
|
||||
if err != nil {
|
||||
return e.JSON(200, map[string]string{"err": err.Error()})
|
||||
}
|
||||
return e.JSON(200, map[string]bool{"err": false})
|
||||
}
|
||||
|
||||
// setAlertTriggered updates the "triggered" status of an alert record in the database
|
||||
func (am *AlertManager) setAlertTriggered(alert CachedAlertData, triggered bool) error {
|
||||
alertRecord, err := am.hub.FindRecordById("alerts", alert.Id)
|
||||
|
||||
@@ -3,7 +3,11 @@ package alerts
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/pocketbase/dbx"
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
@@ -117,3 +121,72 @@ func DeleteUserAlerts(e *core.RequestEvent) error {
|
||||
|
||||
return e.JSON(http.StatusOK, map[string]any{"success": true, "count": numDeleted})
|
||||
}
|
||||
|
||||
// SendTestNotification handles API request to send a test notification to a specified Shoutrrr URL
|
||||
func (am *AlertManager) SendTestNotification(e *core.RequestEvent) error {
|
||||
var data struct {
|
||||
URL string `json:"url"`
|
||||
}
|
||||
err := e.BindBody(&data)
|
||||
if err != nil || data.URL == "" {
|
||||
return e.BadRequestError("URL is required", err)
|
||||
}
|
||||
// Only allow admins to send test notifications to internal URLs
|
||||
if !e.Auth.IsSuperuser() && e.Auth.GetString("role") != "admin" {
|
||||
internalURL, err := isInternalURL(data.URL)
|
||||
if err != nil {
|
||||
return e.BadRequestError(err.Error(), nil)
|
||||
}
|
||||
if internalURL {
|
||||
return e.ForbiddenError("Only admins can send to internal destinations", nil)
|
||||
}
|
||||
}
|
||||
err = am.SendShoutrrrAlert(data.URL, "Test Alert", "This is a notification from Beszel.", am.hub.Settings().Meta.AppURL, "View Beszel")
|
||||
if err != nil {
|
||||
return e.JSON(200, map[string]string{"err": err.Error()})
|
||||
}
|
||||
return e.JSON(200, map[string]bool{"err": false})
|
||||
}
|
||||
|
||||
// isInternalURL checks if the given shoutrrr URL points to an internal destination (localhost or private IP)
|
||||
func isInternalURL(rawURL string) (bool, error) {
|
||||
parsedURL, err := url.Parse(rawURL)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
host := parsedURL.Hostname()
|
||||
if host == "" {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if strings.EqualFold(host, "localhost") {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
return isInternalIP(ip), nil
|
||||
}
|
||||
|
||||
// Some Shoutrrr URLs use the host position for service identifiers rather than a
|
||||
// network hostname (for example, discord://token@webhookid). Restrict DNS lookups
|
||||
// to names that look like actual hostnames so valid service URLs keep working.
|
||||
if !strings.Contains(host, ".") {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
ips, err := net.LookupIP(host)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if slices.ContainsFunc(ips, isInternalIP) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func isInternalIP(ip net.IP) bool {
|
||||
return ip.IsPrivate() || ip.IsLoopback() || ip.IsUnspecified()
|
||||
}
|
||||
|
||||
501
internal/alerts/alerts_api_test.go
Normal file
501
internal/alerts/alerts_api_test.go
Normal file
@@ -0,0 +1,501 @@
|
||||
//go:build testing
|
||||
|
||||
package alerts_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/henrygd/beszel/internal/alerts"
|
||||
beszelTests "github.com/henrygd/beszel/internal/tests"
|
||||
pbTests "github.com/pocketbase/pocketbase/tests"
|
||||
|
||||
"github.com/pocketbase/dbx"
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// marshal to json and return an io.Reader (for use in ApiScenario.Body)
|
||||
func jsonReader(v any) io.Reader {
|
||||
data, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bytes.NewReader(data)
|
||||
}
|
||||
|
||||
func TestIsInternalURL(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
url string
|
||||
internal bool
|
||||
}{
|
||||
{name: "loopback ipv4", url: "generic://127.0.0.1", internal: true},
|
||||
{name: "localhost hostname", url: "generic://localhost", internal: true},
|
||||
{name: "localhost hostname", url: "generic+http://localhost/api/v1/postStuff", internal: true},
|
||||
{name: "localhost hostname", url: "generic+http://127.0.0.1:8080/api/v1/postStuff", internal: true},
|
||||
{name: "localhost hostname", url: "generic+https://beszel.dev/api/v1/postStuff", internal: false},
|
||||
{name: "public ipv4", url: "generic://8.8.8.8", internal: false},
|
||||
{name: "token style service url", url: "discord://abc123@123456789", internal: false},
|
||||
{name: "single label service url", url: "slack://token@team/channel", internal: false},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
internal, err := alerts.IsInternalURL(testCase.url)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, testCase.internal, internal)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUserAlertsApi(t *testing.T) {
|
||||
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||
defer hub.Cleanup()
|
||||
|
||||
hub.StartHub()
|
||||
|
||||
user1, _ := beszelTests.CreateUser(hub, "alertstest@example.com", "password")
|
||||
user1Token, _ := user1.NewAuthToken()
|
||||
|
||||
user2, _ := beszelTests.CreateUser(hub, "alertstest2@example.com", "password")
|
||||
user2Token, _ := user2.NewAuthToken()
|
||||
|
||||
system1, _ := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "system1",
|
||||
"users": []string{user1.Id},
|
||||
"host": "127.0.0.1",
|
||||
})
|
||||
|
||||
system2, _ := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "system2",
|
||||
"users": []string{user1.Id, user2.Id},
|
||||
"host": "127.0.0.2",
|
||||
})
|
||||
|
||||
userRecords, _ := hub.CountRecords("users")
|
||||
assert.EqualValues(t, 2, userRecords, "all users should be created")
|
||||
|
||||
systemRecords, _ := hub.CountRecords("systems")
|
||||
assert.EqualValues(t, 2, systemRecords, "all systems should be created")
|
||||
|
||||
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
||||
return hub.TestApp
|
||||
}
|
||||
|
||||
scenarios := []beszelTests.ApiScenario{
|
||||
// {
|
||||
// Name: "GET not implemented - returns index",
|
||||
// Method: http.MethodGet,
|
||||
// URL: "/api/beszel/user-alerts",
|
||||
// ExpectedStatus: 200,
|
||||
// ExpectedContent: []string{"<html ", "globalThis.BESZEL"},
|
||||
// TestAppFactory: testAppFactory,
|
||||
// },
|
||||
{
|
||||
Name: "POST no auth",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
ExpectedStatus: 401,
|
||||
ExpectedContent: []string{"requires valid"},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
Name: "POST no body",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user1Token,
|
||||
},
|
||||
ExpectedStatus: 400,
|
||||
ExpectedContent: []string{"Bad data"},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
Name: "POST bad data",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user1Token,
|
||||
},
|
||||
ExpectedStatus: 400,
|
||||
ExpectedContent: []string{"Bad data"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"invalidField": "this should cause validation error",
|
||||
"threshold": "not a number",
|
||||
}),
|
||||
},
|
||||
{
|
||||
Name: "POST malformed JSON",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user1Token,
|
||||
},
|
||||
ExpectedStatus: 400,
|
||||
ExpectedContent: []string{"Bad data"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: strings.NewReader(`{"alertType": "cpu", "threshold": 80, "enabled": true,}`),
|
||||
},
|
||||
{
|
||||
Name: "POST valid alert data multiple systems",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user1Token,
|
||||
},
|
||||
ExpectedStatus: 200,
|
||||
ExpectedContent: []string{"\"success\":true"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"name": "CPU",
|
||||
"value": 69,
|
||||
"min": 9,
|
||||
"systems": []string{system1.Id, system2.Id},
|
||||
"overwrite": false,
|
||||
}),
|
||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||
// check total alerts
|
||||
alerts, _ := app.CountRecords("alerts")
|
||||
assert.EqualValues(t, 2, alerts, "should have 2 alerts")
|
||||
// check alert has correct values
|
||||
matchingAlerts, _ := app.CountRecords("alerts", dbx.HashExp{"name": "CPU", "user": user1.Id, "system": system1.Id, "value": 69, "min": 9})
|
||||
assert.EqualValues(t, 1, matchingAlerts, "should have 1 alert")
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "POST valid alert data single system",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user1Token,
|
||||
},
|
||||
ExpectedStatus: 200,
|
||||
ExpectedContent: []string{"\"success\":true"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"name": "Memory",
|
||||
"systems": []string{system1.Id},
|
||||
"value": 90,
|
||||
"min": 10,
|
||||
}),
|
||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||
user1Alerts, _ := app.CountRecords("alerts", dbx.HashExp{"user": user1.Id})
|
||||
assert.EqualValues(t, 3, user1Alerts, "should have 3 alerts")
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "Overwrite: false, should not overwrite existing alert",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user1Token,
|
||||
},
|
||||
ExpectedStatus: 200,
|
||||
ExpectedContent: []string{"\"success\":true"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"name": "CPU",
|
||||
"value": 45,
|
||||
"min": 5,
|
||||
"systems": []string{system1.Id},
|
||||
"overwrite": false,
|
||||
}),
|
||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||
beszelTests.ClearCollection(t, app, "alerts")
|
||||
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
||||
"name": "CPU",
|
||||
"system": system1.Id,
|
||||
"user": user1.Id,
|
||||
"value": 80,
|
||||
"min": 10,
|
||||
})
|
||||
},
|
||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||
alerts, _ := app.CountRecords("alerts")
|
||||
assert.EqualValues(t, 1, alerts, "should have 1 alert")
|
||||
alert, _ := app.FindFirstRecordByFilter("alerts", "name = 'CPU' && user = {:user}", dbx.Params{"user": user1.Id})
|
||||
assert.EqualValues(t, 80, alert.Get("value"), "should have 80 as value")
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "Overwrite: true, should overwrite existing alert",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user2Token,
|
||||
},
|
||||
ExpectedStatus: 200,
|
||||
ExpectedContent: []string{"\"success\":true"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"name": "CPU",
|
||||
"value": 45,
|
||||
"min": 5,
|
||||
"systems": []string{system2.Id},
|
||||
"overwrite": true,
|
||||
}),
|
||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||
beszelTests.ClearCollection(t, app, "alerts")
|
||||
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
||||
"name": "CPU",
|
||||
"system": system2.Id,
|
||||
"user": user2.Id,
|
||||
"value": 80,
|
||||
"min": 10,
|
||||
})
|
||||
},
|
||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||
alerts, _ := app.CountRecords("alerts")
|
||||
assert.EqualValues(t, 1, alerts, "should have 1 alert")
|
||||
alert, _ := app.FindFirstRecordByFilter("alerts", "name = 'CPU' && user = {:user}", dbx.Params{"user": user2.Id})
|
||||
assert.EqualValues(t, 45, alert.Get("value"), "should have 45 as value")
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "DELETE no auth",
|
||||
Method: http.MethodDelete,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
ExpectedStatus: 401,
|
||||
ExpectedContent: []string{"requires valid"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"name": "CPU",
|
||||
"systems": []string{system1.Id},
|
||||
}),
|
||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||
beszelTests.ClearCollection(t, app, "alerts")
|
||||
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
||||
"name": "CPU",
|
||||
"system": system1.Id,
|
||||
"user": user1.Id,
|
||||
"value": 80,
|
||||
"min": 10,
|
||||
})
|
||||
},
|
||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||
alerts, _ := app.CountRecords("alerts")
|
||||
assert.EqualValues(t, 1, alerts, "should have 1 alert")
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "DELETE alert",
|
||||
Method: http.MethodDelete,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user1Token,
|
||||
},
|
||||
ExpectedStatus: 200,
|
||||
ExpectedContent: []string{"\"count\":1", "\"success\":true"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"name": "CPU",
|
||||
"systems": []string{system1.Id},
|
||||
}),
|
||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||
beszelTests.ClearCollection(t, app, "alerts")
|
||||
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
||||
"name": "CPU",
|
||||
"system": system1.Id,
|
||||
"user": user1.Id,
|
||||
"value": 80,
|
||||
"min": 10,
|
||||
})
|
||||
},
|
||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||
alerts, _ := app.CountRecords("alerts")
|
||||
assert.Zero(t, alerts, "should have 0 alerts")
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "DELETE alert multiple systems",
|
||||
Method: http.MethodDelete,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user1Token,
|
||||
},
|
||||
ExpectedStatus: 200,
|
||||
ExpectedContent: []string{"\"count\":2", "\"success\":true"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"name": "Memory",
|
||||
"systems": []string{system1.Id, system2.Id},
|
||||
}),
|
||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||
beszelTests.ClearCollection(t, app, "alerts")
|
||||
for _, systemId := range []string{system1.Id, system2.Id} {
|
||||
_, err := beszelTests.CreateRecord(app, "alerts", map[string]any{
|
||||
"name": "Memory",
|
||||
"system": systemId,
|
||||
"user": user1.Id,
|
||||
"value": 90,
|
||||
"min": 10,
|
||||
})
|
||||
assert.NoError(t, err, "should create alert")
|
||||
}
|
||||
alerts, _ := app.CountRecords("alerts")
|
||||
assert.EqualValues(t, 2, alerts, "should have 2 alerts")
|
||||
},
|
||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||
alerts, _ := app.CountRecords("alerts")
|
||||
assert.Zero(t, alerts, "should have 0 alerts")
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "User 2 should not be able to delete alert of user 1",
|
||||
Method: http.MethodDelete,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user2Token,
|
||||
},
|
||||
ExpectedStatus: 200,
|
||||
ExpectedContent: []string{"\"count\":1", "\"success\":true"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"name": "CPU",
|
||||
"systems": []string{system2.Id},
|
||||
}),
|
||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||
beszelTests.ClearCollection(t, app, "alerts")
|
||||
for _, user := range []string{user1.Id, user2.Id} {
|
||||
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
||||
"name": "CPU",
|
||||
"system": system2.Id,
|
||||
"user": user,
|
||||
"value": 80,
|
||||
"min": 10,
|
||||
})
|
||||
}
|
||||
alerts, _ := app.CountRecords("alerts")
|
||||
assert.EqualValues(t, 2, alerts, "should have 2 alerts")
|
||||
user1AlertCount, _ := app.CountRecords("alerts", dbx.HashExp{"user": user1.Id})
|
||||
assert.EqualValues(t, 1, user1AlertCount, "should have 1 alert")
|
||||
user2AlertCount, _ := app.CountRecords("alerts", dbx.HashExp{"user": user2.Id})
|
||||
assert.EqualValues(t, 1, user2AlertCount, "should have 1 alert")
|
||||
},
|
||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||
user1AlertCount, _ := app.CountRecords("alerts", dbx.HashExp{"user": user1.Id})
|
||||
assert.EqualValues(t, 1, user1AlertCount, "should have 1 alert")
|
||||
user2AlertCount, _ := app.CountRecords("alerts", dbx.HashExp{"user": user2.Id})
|
||||
assert.Zero(t, user2AlertCount, "should have 0 alerts")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, scenario := range scenarios {
|
||||
scenario.Test(t)
|
||||
}
|
||||
}
|
||||
func TestSendTestNotification(t *testing.T) {
|
||||
hub, user := beszelTests.GetHubWithUser(t)
|
||||
defer hub.Cleanup()
|
||||
|
||||
userToken, err := user.NewAuthToken()
|
||||
|
||||
adminUser, err := beszelTests.CreateUserWithRole(hub, "admin@example.com", "password123", "admin")
|
||||
assert.NoError(t, err, "Failed to create admin user")
|
||||
adminUserToken, err := adminUser.NewAuthToken()
|
||||
|
||||
superuser, err := beszelTests.CreateSuperuser(hub, "superuser@example.com", "password123")
|
||||
assert.NoError(t, err, "Failed to create superuser")
|
||||
superuserToken, err := superuser.NewAuthToken()
|
||||
assert.NoError(t, err, "Failed to create superuser auth token")
|
||||
|
||||
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
||||
return hub.TestApp
|
||||
}
|
||||
|
||||
scenarios := []beszelTests.ApiScenario{
|
||||
{
|
||||
Name: "POST /test-notification - no auth should fail",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/test-notification",
|
||||
ExpectedStatus: 401,
|
||||
ExpectedContent: []string{"requires valid"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"url": "generic://127.0.0.1",
|
||||
}),
|
||||
},
|
||||
{
|
||||
Name: "POST /test-notification - with external auth should succeed",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/test-notification",
|
||||
TestAppFactory: testAppFactory,
|
||||
Headers: map[string]string{
|
||||
"Authorization": userToken,
|
||||
},
|
||||
Body: jsonReader(map[string]any{
|
||||
"url": "generic://8.8.8.8",
|
||||
}),
|
||||
ExpectedStatus: 200,
|
||||
ExpectedContent: []string{"\"err\":"},
|
||||
},
|
||||
{
|
||||
Name: "POST /test-notification - local url with user auth should fail",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/test-notification",
|
||||
TestAppFactory: testAppFactory,
|
||||
Headers: map[string]string{
|
||||
"Authorization": userToken,
|
||||
},
|
||||
Body: jsonReader(map[string]any{
|
||||
"url": "generic://localhost:8010",
|
||||
}),
|
||||
ExpectedStatus: 403,
|
||||
ExpectedContent: []string{"Only admins"},
|
||||
},
|
||||
{
|
||||
Name: "POST /test-notification - internal url with user auth should fail",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/test-notification",
|
||||
TestAppFactory: testAppFactory,
|
||||
Headers: map[string]string{
|
||||
"Authorization": userToken,
|
||||
},
|
||||
Body: jsonReader(map[string]any{
|
||||
"url": "generic+http://192.168.0.5",
|
||||
}),
|
||||
ExpectedStatus: 403,
|
||||
ExpectedContent: []string{"Only admins"},
|
||||
},
|
||||
{
|
||||
Name: "POST /test-notification - internal url with admin auth should succeed",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/test-notification",
|
||||
TestAppFactory: testAppFactory,
|
||||
Headers: map[string]string{
|
||||
"Authorization": adminUserToken,
|
||||
},
|
||||
Body: jsonReader(map[string]any{
|
||||
"url": "generic://127.0.0.1",
|
||||
}),
|
||||
ExpectedStatus: 200,
|
||||
ExpectedContent: []string{"\"err\":"},
|
||||
},
|
||||
{
|
||||
Name: "POST /test-notification - internal url with superuser auth should succeed",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/test-notification",
|
||||
TestAppFactory: testAppFactory,
|
||||
Headers: map[string]string{
|
||||
"Authorization": superuserToken,
|
||||
},
|
||||
Body: jsonReader(map[string]any{
|
||||
"url": "generic://127.0.0.1",
|
||||
}),
|
||||
ExpectedStatus: 200,
|
||||
ExpectedContent: []string{"\"err\":"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, scenario := range scenarios {
|
||||
scenario.Test(t)
|
||||
}
|
||||
}
|
||||
@@ -109,6 +109,18 @@ func (am *AlertManager) cancelPendingAlert(alertID string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// CancelPendingStatusAlerts cancels all pending status alert timers for a given system.
|
||||
// This is called when a system is paused to prevent delayed alerts from firing.
|
||||
func (am *AlertManager) CancelPendingStatusAlerts(systemID string) {
|
||||
am.pendingAlerts.Range(func(key, value any) bool {
|
||||
info := value.(*alertInfo)
|
||||
if info.alertData.SystemID == systemID {
|
||||
am.cancelPendingAlert(key.(string))
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
// processPendingAlert sends a "down" alert if the pending alert has expired and the system is still down.
|
||||
func (am *AlertManager) processPendingAlert(alertID string) {
|
||||
value, loaded := am.pendingAlerts.LoadAndDelete(alertID)
|
||||
|
||||
@@ -941,3 +941,68 @@ func TestStatusAlertClearedBeforeSend(t *testing.T) {
|
||||
assert.EqualValues(t, 0, alertHistoryCount, "Should have no unresolved alert history records since alert never triggered")
|
||||
})
|
||||
}
|
||||
|
||||
func TestCancelPendingStatusAlertsClearsAllAlertsForSystem(t *testing.T) {
|
||||
hub, user := beszelTests.GetHubWithUser(t)
|
||||
defer hub.Cleanup()
|
||||
|
||||
userSettings, err := hub.FindFirstRecordByFilter("user_settings", "user={:user}", map[string]any{"user": user.Id})
|
||||
require.NoError(t, err)
|
||||
userSettings.Set("settings", `{"emails":["test@example.com"],"webhooks":[]}`)
|
||||
require.NoError(t, hub.Save(userSettings))
|
||||
|
||||
systemCollection, err := hub.FindCollectionByNameOrId("systems")
|
||||
require.NoError(t, err)
|
||||
|
||||
system1 := core.NewRecord(systemCollection)
|
||||
system1.Set("name", "system-1")
|
||||
system1.Set("status", "up")
|
||||
system1.Set("host", "127.0.0.1")
|
||||
system1.Set("users", []string{user.Id})
|
||||
require.NoError(t, hub.Save(system1))
|
||||
|
||||
system2 := core.NewRecord(systemCollection)
|
||||
system2.Set("name", "system-2")
|
||||
system2.Set("status", "up")
|
||||
system2.Set("host", "127.0.0.2")
|
||||
system2.Set("users", []string{user.Id})
|
||||
require.NoError(t, hub.Save(system2))
|
||||
|
||||
alertCollection, err := hub.FindCollectionByNameOrId("alerts")
|
||||
require.NoError(t, err)
|
||||
|
||||
alert1 := core.NewRecord(alertCollection)
|
||||
alert1.Set("user", user.Id)
|
||||
alert1.Set("system", system1.Id)
|
||||
alert1.Set("name", "Status")
|
||||
alert1.Set("triggered", false)
|
||||
alert1.Set("min", 5)
|
||||
require.NoError(t, hub.Save(alert1))
|
||||
|
||||
alert2 := core.NewRecord(alertCollection)
|
||||
alert2.Set("user", user.Id)
|
||||
alert2.Set("system", system2.Id)
|
||||
alert2.Set("name", "Status")
|
||||
alert2.Set("triggered", false)
|
||||
alert2.Set("min", 5)
|
||||
require.NoError(t, hub.Save(alert2))
|
||||
|
||||
am := alerts.NewTestAlertManagerWithoutWorker(hub)
|
||||
initialEmailCount := hub.TestMailer.TotalSend()
|
||||
|
||||
// Both systems go down
|
||||
require.NoError(t, am.HandleStatusAlerts("down", system1))
|
||||
require.NoError(t, am.HandleStatusAlerts("down", system2))
|
||||
assert.Equal(t, 2, am.GetPendingAlertsCount(), "both systems should have pending alerts")
|
||||
|
||||
// System 1 is paused — cancel its pending alerts
|
||||
am.CancelPendingStatusAlerts(system1.Id)
|
||||
assert.Equal(t, 1, am.GetPendingAlertsCount(), "only system2 alert should remain pending after pausing system1")
|
||||
|
||||
// Expire and process remaining alerts — only system2 should fire
|
||||
am.ForceExpirePendingAlerts()
|
||||
processed, err := am.ProcessPendingAlerts()
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, processed, 1, "only the non-paused system's alert should be processed")
|
||||
assert.Equal(t, initialEmailCount+1, hub.TestMailer.TotalSend(), "only system2 should send a down notification")
|
||||
}
|
||||
|
||||
@@ -3,11 +3,6 @@
|
||||
package alerts_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
"testing/synctest"
|
||||
"time"
|
||||
@@ -16,359 +11,9 @@ import (
|
||||
|
||||
"github.com/henrygd/beszel/internal/alerts"
|
||||
"github.com/pocketbase/dbx"
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
pbTests "github.com/pocketbase/pocketbase/tests"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// marshal to json and return an io.Reader (for use in ApiScenario.Body)
|
||||
func jsonReader(v any) io.Reader {
|
||||
data, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bytes.NewReader(data)
|
||||
}
|
||||
|
||||
func TestUserAlertsApi(t *testing.T) {
|
||||
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||
defer hub.Cleanup()
|
||||
|
||||
hub.StartHub()
|
||||
|
||||
user1, _ := beszelTests.CreateUser(hub, "alertstest@example.com", "password")
|
||||
user1Token, _ := user1.NewAuthToken()
|
||||
|
||||
user2, _ := beszelTests.CreateUser(hub, "alertstest2@example.com", "password")
|
||||
user2Token, _ := user2.NewAuthToken()
|
||||
|
||||
system1, _ := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "system1",
|
||||
"users": []string{user1.Id},
|
||||
"host": "127.0.0.1",
|
||||
})
|
||||
|
||||
system2, _ := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "system2",
|
||||
"users": []string{user1.Id, user2.Id},
|
||||
"host": "127.0.0.2",
|
||||
})
|
||||
|
||||
userRecords, _ := hub.CountRecords("users")
|
||||
assert.EqualValues(t, 2, userRecords, "all users should be created")
|
||||
|
||||
systemRecords, _ := hub.CountRecords("systems")
|
||||
assert.EqualValues(t, 2, systemRecords, "all systems should be created")
|
||||
|
||||
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
||||
return hub.TestApp
|
||||
}
|
||||
|
||||
scenarios := []beszelTests.ApiScenario{
|
||||
// {
|
||||
// Name: "GET not implemented - returns index",
|
||||
// Method: http.MethodGet,
|
||||
// URL: "/api/beszel/user-alerts",
|
||||
// ExpectedStatus: 200,
|
||||
// ExpectedContent: []string{"<html ", "globalThis.BESZEL"},
|
||||
// TestAppFactory: testAppFactory,
|
||||
// },
|
||||
{
|
||||
Name: "POST no auth",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
ExpectedStatus: 401,
|
||||
ExpectedContent: []string{"requires valid"},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
Name: "POST no body",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user1Token,
|
||||
},
|
||||
ExpectedStatus: 400,
|
||||
ExpectedContent: []string{"Bad data"},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
Name: "POST bad data",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user1Token,
|
||||
},
|
||||
ExpectedStatus: 400,
|
||||
ExpectedContent: []string{"Bad data"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"invalidField": "this should cause validation error",
|
||||
"threshold": "not a number",
|
||||
}),
|
||||
},
|
||||
{
|
||||
Name: "POST malformed JSON",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user1Token,
|
||||
},
|
||||
ExpectedStatus: 400,
|
||||
ExpectedContent: []string{"Bad data"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: strings.NewReader(`{"alertType": "cpu", "threshold": 80, "enabled": true,}`),
|
||||
},
|
||||
{
|
||||
Name: "POST valid alert data multiple systems",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user1Token,
|
||||
},
|
||||
ExpectedStatus: 200,
|
||||
ExpectedContent: []string{"\"success\":true"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"name": "CPU",
|
||||
"value": 69,
|
||||
"min": 9,
|
||||
"systems": []string{system1.Id, system2.Id},
|
||||
"overwrite": false,
|
||||
}),
|
||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||
// check total alerts
|
||||
alerts, _ := app.CountRecords("alerts")
|
||||
assert.EqualValues(t, 2, alerts, "should have 2 alerts")
|
||||
// check alert has correct values
|
||||
matchingAlerts, _ := app.CountRecords("alerts", dbx.HashExp{"name": "CPU", "user": user1.Id, "system": system1.Id, "value": 69, "min": 9})
|
||||
assert.EqualValues(t, 1, matchingAlerts, "should have 1 alert")
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "POST valid alert data single system",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user1Token,
|
||||
},
|
||||
ExpectedStatus: 200,
|
||||
ExpectedContent: []string{"\"success\":true"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"name": "Memory",
|
||||
"systems": []string{system1.Id},
|
||||
"value": 90,
|
||||
"min": 10,
|
||||
}),
|
||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||
user1Alerts, _ := app.CountRecords("alerts", dbx.HashExp{"user": user1.Id})
|
||||
assert.EqualValues(t, 3, user1Alerts, "should have 3 alerts")
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "Overwrite: false, should not overwrite existing alert",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user1Token,
|
||||
},
|
||||
ExpectedStatus: 200,
|
||||
ExpectedContent: []string{"\"success\":true"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"name": "CPU",
|
||||
"value": 45,
|
||||
"min": 5,
|
||||
"systems": []string{system1.Id},
|
||||
"overwrite": false,
|
||||
}),
|
||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||
beszelTests.ClearCollection(t, app, "alerts")
|
||||
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
||||
"name": "CPU",
|
||||
"system": system1.Id,
|
||||
"user": user1.Id,
|
||||
"value": 80,
|
||||
"min": 10,
|
||||
})
|
||||
},
|
||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||
alerts, _ := app.CountRecords("alerts")
|
||||
assert.EqualValues(t, 1, alerts, "should have 1 alert")
|
||||
alert, _ := app.FindFirstRecordByFilter("alerts", "name = 'CPU' && user = {:user}", dbx.Params{"user": user1.Id})
|
||||
assert.EqualValues(t, 80, alert.Get("value"), "should have 80 as value")
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "Overwrite: true, should overwrite existing alert",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user2Token,
|
||||
},
|
||||
ExpectedStatus: 200,
|
||||
ExpectedContent: []string{"\"success\":true"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"name": "CPU",
|
||||
"value": 45,
|
||||
"min": 5,
|
||||
"systems": []string{system2.Id},
|
||||
"overwrite": true,
|
||||
}),
|
||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||
beszelTests.ClearCollection(t, app, "alerts")
|
||||
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
||||
"name": "CPU",
|
||||
"system": system2.Id,
|
||||
"user": user2.Id,
|
||||
"value": 80,
|
||||
"min": 10,
|
||||
})
|
||||
},
|
||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||
alerts, _ := app.CountRecords("alerts")
|
||||
assert.EqualValues(t, 1, alerts, "should have 1 alert")
|
||||
alert, _ := app.FindFirstRecordByFilter("alerts", "name = 'CPU' && user = {:user}", dbx.Params{"user": user2.Id})
|
||||
assert.EqualValues(t, 45, alert.Get("value"), "should have 45 as value")
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "DELETE no auth",
|
||||
Method: http.MethodDelete,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
ExpectedStatus: 401,
|
||||
ExpectedContent: []string{"requires valid"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"name": "CPU",
|
||||
"systems": []string{system1.Id},
|
||||
}),
|
||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||
beszelTests.ClearCollection(t, app, "alerts")
|
||||
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
||||
"name": "CPU",
|
||||
"system": system1.Id,
|
||||
"user": user1.Id,
|
||||
"value": 80,
|
||||
"min": 10,
|
||||
})
|
||||
},
|
||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||
alerts, _ := app.CountRecords("alerts")
|
||||
assert.EqualValues(t, 1, alerts, "should have 1 alert")
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "DELETE alert",
|
||||
Method: http.MethodDelete,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user1Token,
|
||||
},
|
||||
ExpectedStatus: 200,
|
||||
ExpectedContent: []string{"\"count\":1", "\"success\":true"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"name": "CPU",
|
||||
"systems": []string{system1.Id},
|
||||
}),
|
||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||
beszelTests.ClearCollection(t, app, "alerts")
|
||||
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
||||
"name": "CPU",
|
||||
"system": system1.Id,
|
||||
"user": user1.Id,
|
||||
"value": 80,
|
||||
"min": 10,
|
||||
})
|
||||
},
|
||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||
alerts, _ := app.CountRecords("alerts")
|
||||
assert.Zero(t, alerts, "should have 0 alerts")
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "DELETE alert multiple systems",
|
||||
Method: http.MethodDelete,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user1Token,
|
||||
},
|
||||
ExpectedStatus: 200,
|
||||
ExpectedContent: []string{"\"count\":2", "\"success\":true"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"name": "Memory",
|
||||
"systems": []string{system1.Id, system2.Id},
|
||||
}),
|
||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||
beszelTests.ClearCollection(t, app, "alerts")
|
||||
for _, systemId := range []string{system1.Id, system2.Id} {
|
||||
_, err := beszelTests.CreateRecord(app, "alerts", map[string]any{
|
||||
"name": "Memory",
|
||||
"system": systemId,
|
||||
"user": user1.Id,
|
||||
"value": 90,
|
||||
"min": 10,
|
||||
})
|
||||
assert.NoError(t, err, "should create alert")
|
||||
}
|
||||
alerts, _ := app.CountRecords("alerts")
|
||||
assert.EqualValues(t, 2, alerts, "should have 2 alerts")
|
||||
},
|
||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||
alerts, _ := app.CountRecords("alerts")
|
||||
assert.Zero(t, alerts, "should have 0 alerts")
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "User 2 should not be able to delete alert of user 1",
|
||||
Method: http.MethodDelete,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user2Token,
|
||||
},
|
||||
ExpectedStatus: 200,
|
||||
ExpectedContent: []string{"\"count\":1", "\"success\":true"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"name": "CPU",
|
||||
"systems": []string{system2.Id},
|
||||
}),
|
||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||
beszelTests.ClearCollection(t, app, "alerts")
|
||||
for _, user := range []string{user1.Id, user2.Id} {
|
||||
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
||||
"name": "CPU",
|
||||
"system": system2.Id,
|
||||
"user": user,
|
||||
"value": 80,
|
||||
"min": 10,
|
||||
})
|
||||
}
|
||||
alerts, _ := app.CountRecords("alerts")
|
||||
assert.EqualValues(t, 2, alerts, "should have 2 alerts")
|
||||
user1AlertCount, _ := app.CountRecords("alerts", dbx.HashExp{"user": user1.Id})
|
||||
assert.EqualValues(t, 1, user1AlertCount, "should have 1 alert")
|
||||
user2AlertCount, _ := app.CountRecords("alerts", dbx.HashExp{"user": user2.Id})
|
||||
assert.EqualValues(t, 1, user2AlertCount, "should have 1 alert")
|
||||
},
|
||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||
user1AlertCount, _ := app.CountRecords("alerts", dbx.HashExp{"user": user1.Id})
|
||||
assert.EqualValues(t, 1, user1AlertCount, "should have 1 alert")
|
||||
user2AlertCount, _ := app.CountRecords("alerts", dbx.HashExp{"user": user2.Id})
|
||||
assert.Zero(t, user2AlertCount, "should have 0 alerts")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, scenario := range scenarios {
|
||||
scenario.Test(t)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAlertsHistory(t *testing.T) {
|
||||
synctest.Test(t, func(t *testing.T) {
|
||||
hub, user := beszelTests.GetHubWithUser(t)
|
||||
|
||||
@@ -95,3 +95,7 @@ func (am *AlertManager) RestorePendingStatusAlerts() error {
|
||||
func (am *AlertManager) SetAlertTriggered(alert CachedAlertData, triggered bool) error {
|
||||
return am.setAlertTriggered(alert, triggered)
|
||||
}
|
||||
|
||||
func IsInternalURL(rawURL string) (bool, error) {
|
||||
return isInternalURL(rawURL)
|
||||
}
|
||||
|
||||
@@ -195,6 +195,6 @@ func main() {
|
||||
}
|
||||
|
||||
if err := a.Start(serverConfig); err != nil {
|
||||
log.Fatal("Failed to start server: ", err)
|
||||
log.Fatal("Failed to start: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,6 +22,8 @@ const (
|
||||
GetSmartData
|
||||
// Request detailed systemd service info from agent
|
||||
GetSystemdInfo
|
||||
// Sync network probe configuration to agent
|
||||
SyncNetworkProbes
|
||||
// Add new actions here...
|
||||
)
|
||||
|
||||
|
||||
56
internal/entities/probe/probe.go
Normal file
56
internal/entities/probe/probe.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package probe
|
||||
|
||||
type SyncAction uint8
|
||||
|
||||
const (
|
||||
// SyncActionReplace indicates a full sync where the provided configs should replace all existing probes for the system.
|
||||
SyncActionReplace SyncAction = iota
|
||||
// SyncActionUpsert indicates an incremental sync where the provided config should be added or updated.
|
||||
SyncActionUpsert
|
||||
// SyncActionDelete indicates an incremental sync where the provided config should be removed.
|
||||
SyncActionDelete
|
||||
)
|
||||
|
||||
// Config defines a network probe task sent from hub to agent.
|
||||
type Config struct {
|
||||
// ID is the stable network_probes record ID generated by the hub.
|
||||
ID string `cbor:"0,keyasint"`
|
||||
Target string `cbor:"1,keyasint"`
|
||||
Protocol string `cbor:"2,keyasint"` // "icmp", "tcp", or "http"
|
||||
Port uint16 `cbor:"3,keyasint,omitempty"`
|
||||
Interval uint16 `cbor:"4,keyasint"` // seconds
|
||||
}
|
||||
|
||||
// SyncRequest defines an incremental or full probe sync request sent to the agent.
|
||||
type SyncRequest struct {
|
||||
Action SyncAction `cbor:"0,keyasint"`
|
||||
Config Config `cbor:"1,keyasint,omitempty"`
|
||||
Configs []Config `cbor:"2,keyasint,omitempty"`
|
||||
RunNow bool `cbor:"3,keyasint,omitempty"`
|
||||
}
|
||||
|
||||
// SyncResponse returns the immediate result for an upsert when requested.
|
||||
type SyncResponse struct {
|
||||
Result Result `cbor:"0,keyasint,omitempty"`
|
||||
}
|
||||
|
||||
// Result holds aggregated probe results for a single target.
|
||||
//
|
||||
// 0: avg response in ms
|
||||
//
|
||||
// 1: average response over the last hour in ms
|
||||
//
|
||||
// 2: min response over the last hour in ms
|
||||
//
|
||||
// 3: max response over the last hour in ms
|
||||
//
|
||||
// 4: packet loss percentage over the last hour (0-100)
|
||||
type Result []float64
|
||||
|
||||
// Get returns the value at the specified index or 0 if the index is out of range.
|
||||
func (r Result) Get(index int) float64 {
|
||||
if index < len(r) {
|
||||
return r[index]
|
||||
}
|
||||
return 0
|
||||
}
|
||||
@@ -494,7 +494,7 @@ type SmartInfoForNvme struct {
|
||||
FirmwareVersion string `json:"firmware_version"`
|
||||
// NVMePCIVendor NVMePCIVendor `json:"nvme_pci_vendor"`
|
||||
// NVMeIEEEOUIIdentifier uint32 `json:"nvme_ieee_oui_identifier"`
|
||||
// NVMeTotalCapacity uint64 `json:"nvme_total_capacity"`
|
||||
NVMeTotalCapacity uint64 `json:"nvme_total_capacity"`
|
||||
// NVMeUnallocatedCapacity uint64 `json:"nvme_unallocated_capacity"`
|
||||
// NVMeControllerID uint16 `json:"nvme_controller_id"`
|
||||
// NVMeVersion VersionStringInfo `json:"nvme_version"`
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/container"
|
||||
"github.com/henrygd/beszel/internal/entities/probe"
|
||||
"github.com/henrygd/beszel/internal/entities/systemd"
|
||||
)
|
||||
|
||||
@@ -48,6 +49,8 @@ type Stats struct {
|
||||
MaxDiskIO [2]uint64 `json:"diom,omitzero" cbor:"-"` // [max read bytes, max write bytes]
|
||||
CpuBreakdown []float64 `json:"cpub,omitempty" cbor:"33,keyasint,omitempty"` // [user, system, iowait, steal, idle]
|
||||
CpuCoresUsage Uint8Slice `json:"cpus,omitempty" cbor:"34,keyasint,omitempty"` // per-core busy usage [CPU0..]
|
||||
DiskIoStats [6]float64 `json:"dios,omitzero" cbor:"35,keyasint,omitzero"` // [read time %, write time %, io utilization %, r_await ms, w_await ms, weighted io %]
|
||||
MaxDiskIoStats [6]float64 `json:"diosm,omitzero" cbor:"-"` // max values for DiskIoStats
|
||||
}
|
||||
|
||||
// Uint8Slice wraps []uint8 to customize JSON encoding while keeping CBOR efficient.
|
||||
@@ -97,6 +100,8 @@ type FsStats struct {
|
||||
DiskWriteBytes uint64 `json:"wb" cbor:"7,keyasint,omitempty"`
|
||||
MaxDiskReadBytes uint64 `json:"rbm,omitempty" cbor:"-"`
|
||||
MaxDiskWriteBytes uint64 `json:"wbm,omitempty" cbor:"-"`
|
||||
DiskIoStats [6]float64 `json:"dios,omitzero" cbor:"8,keyasint,omitzero"` // [read time %, write time %, io utilization %, r_await ms, w_await ms, weighted io %]
|
||||
MaxDiskIoStats [6]float64 `json:"diosm,omitzero" cbor:"-"` // max values for DiskIoStats
|
||||
}
|
||||
|
||||
type NetIoStats struct {
|
||||
@@ -175,4 +180,5 @@ type CombinedData struct {
|
||||
Containers []*container.Stats `json:"container" cbor:"2,keyasint"`
|
||||
SystemdServices []*systemd.Service `json:"systemd,omitempty" cbor:"3,keyasint,omitempty"`
|
||||
Details *Details `cbor:"4,keyasint,omitempty"`
|
||||
Probes map[string]probe.Result `cbor:"5,keyasint,omitempty"`
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package hub
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -13,6 +14,7 @@ import (
|
||||
"github.com/henrygd/beszel/internal/ghupdate"
|
||||
"github.com/henrygd/beszel/internal/hub/config"
|
||||
"github.com/henrygd/beszel/internal/hub/systems"
|
||||
"github.com/henrygd/beszel/internal/hub/utils"
|
||||
"github.com/pocketbase/dbx"
|
||||
"github.com/pocketbase/pocketbase/apis"
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
@@ -25,6 +27,32 @@ type UpdateInfo struct {
|
||||
Url string `json:"url"`
|
||||
}
|
||||
|
||||
var containerIDPattern = regexp.MustCompile(`^[a-fA-F0-9]{12,64}$`)
|
||||
|
||||
// Middleware to allow only admin role users
|
||||
var requireAdminRole = customAuthMiddleware(func(e *core.RequestEvent) bool {
|
||||
return e.Auth.GetString("role") == "admin"
|
||||
})
|
||||
|
||||
// Middleware to exclude readonly users
|
||||
var excludeReadOnlyRole = customAuthMiddleware(func(e *core.RequestEvent) bool {
|
||||
return e.Auth.GetString("role") != "readonly"
|
||||
})
|
||||
|
||||
// customAuthMiddleware handles boilerplate for custom authentication middlewares. fn should
|
||||
// return true if the request is allowed, false otherwise. e.Auth is guaranteed to be non-nil.
|
||||
func customAuthMiddleware(fn func(*core.RequestEvent) bool) func(*core.RequestEvent) error {
|
||||
return func(e *core.RequestEvent) error {
|
||||
if e.Auth == nil {
|
||||
return e.UnauthorizedError("The request requires valid record authorization token.", nil)
|
||||
}
|
||||
if !fn(e) {
|
||||
return e.ForbiddenError("The authorized record is not allowed to perform this action.", nil)
|
||||
}
|
||||
return e.Next()
|
||||
}
|
||||
}
|
||||
|
||||
// registerMiddlewares registers custom middlewares
|
||||
func (h *Hub) registerMiddlewares(se *core.ServeEvent) {
|
||||
// authorizes request with user matching the provided email
|
||||
@@ -33,7 +61,7 @@ func (h *Hub) registerMiddlewares(se *core.ServeEvent) {
|
||||
return e.Next()
|
||||
}
|
||||
isAuthRefresh := e.Request.URL.Path == "/api/collections/users/auth-refresh" && e.Request.Method == http.MethodPost
|
||||
e.Auth, err = e.App.FindFirstRecordByData("users", "email", email)
|
||||
e.Auth, err = e.App.FindAuthRecordByEmail("users", email)
|
||||
if err != nil || !isAuthRefresh {
|
||||
return e.Next()
|
||||
}
|
||||
@@ -43,13 +71,13 @@ func (h *Hub) registerMiddlewares(se *core.ServeEvent) {
|
||||
return e.Next()
|
||||
}
|
||||
// authenticate with trusted header
|
||||
if autoLogin, _ := GetEnv("AUTO_LOGIN"); autoLogin != "" {
|
||||
if autoLogin, _ := utils.GetEnv("AUTO_LOGIN"); autoLogin != "" {
|
||||
se.Router.BindFunc(func(e *core.RequestEvent) error {
|
||||
return authorizeRequestWithEmail(e, autoLogin)
|
||||
})
|
||||
}
|
||||
// authenticate with trusted header
|
||||
if trustedHeader, _ := GetEnv("TRUSTED_AUTH_HEADER"); trustedHeader != "" {
|
||||
if trustedHeader, _ := utils.GetEnv("TRUSTED_AUTH_HEADER"); trustedHeader != "" {
|
||||
se.Router.BindFunc(func(e *core.RequestEvent) error {
|
||||
return authorizeRequestWithEmail(e, e.Request.Header.Get(trustedHeader))
|
||||
})
|
||||
@@ -77,30 +105,30 @@ func (h *Hub) registerApiRoutes(se *core.ServeEvent) error {
|
||||
apiAuth.GET("/info", h.getInfo)
|
||||
apiAuth.GET("/getkey", h.getInfo) // deprecated - keep for compatibility w/ integrations
|
||||
// check for updates
|
||||
if optIn, _ := GetEnv("CHECK_UPDATES"); optIn == "true" {
|
||||
if optIn, _ := utils.GetEnv("CHECK_UPDATES"); optIn == "true" {
|
||||
var updateInfo UpdateInfo
|
||||
apiAuth.GET("/update", updateInfo.getUpdate)
|
||||
}
|
||||
// send test notification
|
||||
apiAuth.POST("/test-notification", h.SendTestNotification)
|
||||
// heartbeat status and test
|
||||
apiAuth.GET("/heartbeat-status", h.getHeartbeatStatus)
|
||||
apiAuth.POST("/test-heartbeat", h.testHeartbeat)
|
||||
apiAuth.GET("/heartbeat-status", h.getHeartbeatStatus).BindFunc(requireAdminRole)
|
||||
apiAuth.POST("/test-heartbeat", h.testHeartbeat).BindFunc(requireAdminRole)
|
||||
// get config.yml content
|
||||
apiAuth.GET("/config-yaml", config.GetYamlConfig)
|
||||
apiAuth.GET("/config-yaml", config.GetYamlConfig).BindFunc(requireAdminRole)
|
||||
// handle agent websocket connection
|
||||
apiNoAuth.GET("/agent-connect", h.handleAgentConnect)
|
||||
// get or create universal tokens
|
||||
apiAuth.GET("/universal-token", h.getUniversalToken)
|
||||
apiAuth.GET("/universal-token", h.getUniversalToken).BindFunc(excludeReadOnlyRole)
|
||||
// update / delete user alerts
|
||||
apiAuth.POST("/user-alerts", alerts.UpsertUserAlerts)
|
||||
apiAuth.DELETE("/user-alerts", alerts.DeleteUserAlerts)
|
||||
// refresh SMART devices for a system
|
||||
apiAuth.POST("/smart/refresh", h.refreshSmartData)
|
||||
apiAuth.POST("/smart/refresh", h.refreshSmartData).BindFunc(excludeReadOnlyRole)
|
||||
// get systemd service details
|
||||
apiAuth.GET("/systemd/info", h.getSystemdInfo)
|
||||
// /containers routes
|
||||
if enabled, _ := GetEnv("CONTAINER_DETAILS"); enabled != "false" {
|
||||
if enabled, _ := utils.GetEnv("CONTAINER_DETAILS"); enabled != "false" {
|
||||
// get container logs
|
||||
apiAuth.GET("/containers/logs", h.getContainerLogs)
|
||||
// get container info
|
||||
@@ -120,7 +148,7 @@ func (h *Hub) getInfo(e *core.RequestEvent) error {
|
||||
Key: h.pubKey,
|
||||
Version: beszel.Version,
|
||||
}
|
||||
if optIn, _ := GetEnv("CHECK_UPDATES"); optIn == "true" {
|
||||
if optIn, _ := utils.GetEnv("CHECK_UPDATES"); optIn == "true" {
|
||||
info.CheckUpdate = true
|
||||
}
|
||||
return e.JSON(http.StatusOK, info)
|
||||
@@ -153,6 +181,10 @@ func (info *UpdateInfo) getUpdate(e *core.RequestEvent) error {
|
||||
|
||||
// GetUniversalToken handles the universal token API endpoint (create, read, delete)
|
||||
func (h *Hub) getUniversalToken(e *core.RequestEvent) error {
|
||||
if e.Auth.IsSuperuser() {
|
||||
return e.ForbiddenError("Superusers cannot use universal tokens", nil)
|
||||
}
|
||||
|
||||
tokenMap := universalTokenMap.GetMap()
|
||||
userID := e.Auth.Id
|
||||
query := e.Request.URL.Query()
|
||||
@@ -246,9 +278,6 @@ func (h *Hub) getUniversalToken(e *core.RequestEvent) error {
|
||||
|
||||
// getHeartbeatStatus returns current heartbeat configuration and whether it's enabled
|
||||
func (h *Hub) getHeartbeatStatus(e *core.RequestEvent) error {
|
||||
if e.Auth.GetString("role") != "admin" {
|
||||
return e.ForbiddenError("Requires admin role", nil)
|
||||
}
|
||||
if h.hb == nil {
|
||||
return e.JSON(http.StatusOK, map[string]any{
|
||||
"enabled": false,
|
||||
@@ -266,9 +295,6 @@ func (h *Hub) getHeartbeatStatus(e *core.RequestEvent) error {
|
||||
|
||||
// testHeartbeat triggers a single heartbeat ping and returns the result
|
||||
func (h *Hub) testHeartbeat(e *core.RequestEvent) error {
|
||||
if e.Auth.GetString("role") != "admin" {
|
||||
return e.ForbiddenError("Requires admin role", nil)
|
||||
}
|
||||
if h.hb == nil {
|
||||
return e.JSON(http.StatusOK, map[string]any{
|
||||
"err": "Heartbeat not configured. Set HEARTBEAT_URL environment variable.",
|
||||
@@ -285,21 +311,18 @@ func (h *Hub) containerRequestHandler(e *core.RequestEvent, fetchFunc func(*syst
|
||||
systemID := e.Request.URL.Query().Get("system")
|
||||
containerID := e.Request.URL.Query().Get("container")
|
||||
|
||||
if systemID == "" || containerID == "" {
|
||||
return e.JSON(http.StatusBadRequest, map[string]string{"error": "system and container parameters are required"})
|
||||
}
|
||||
if !containerIDPattern.MatchString(containerID) {
|
||||
return e.JSON(http.StatusBadRequest, map[string]string{"error": "invalid container parameter"})
|
||||
if systemID == "" || containerID == "" || !containerIDPattern.MatchString(containerID) {
|
||||
return e.BadRequestError("Invalid system or container parameter", nil)
|
||||
}
|
||||
|
||||
system, err := h.sm.GetSystem(systemID)
|
||||
if err != nil {
|
||||
return e.JSON(http.StatusNotFound, map[string]string{"error": "system not found"})
|
||||
if err != nil || !system.HasUser(e.App, e.Auth) {
|
||||
return e.NotFoundError("", nil)
|
||||
}
|
||||
|
||||
data, err := fetchFunc(system, containerID)
|
||||
if err != nil {
|
||||
return e.JSON(http.StatusNotFound, map[string]string{"error": err.Error()})
|
||||
return e.InternalServerError("", err)
|
||||
}
|
||||
|
||||
return e.JSON(http.StatusOK, map[string]string{responseKey: data})
|
||||
@@ -325,15 +348,23 @@ func (h *Hub) getSystemdInfo(e *core.RequestEvent) error {
|
||||
serviceName := query.Get("service")
|
||||
|
||||
if systemID == "" || serviceName == "" {
|
||||
return e.JSON(http.StatusBadRequest, map[string]string{"error": "system and service parameters are required"})
|
||||
return e.BadRequestError("Invalid system or service parameter", nil)
|
||||
}
|
||||
system, err := h.sm.GetSystem(systemID)
|
||||
if err != nil || !system.HasUser(e.App, e.Auth) {
|
||||
return e.NotFoundError("", nil)
|
||||
}
|
||||
// verify service exists before fetching details
|
||||
_, err = e.App.FindFirstRecordByFilter("systemd_services", "system = {:system} && name = {:name}", dbx.Params{
|
||||
"system": systemID,
|
||||
"name": serviceName,
|
||||
})
|
||||
if err != nil {
|
||||
return e.JSON(http.StatusNotFound, map[string]string{"error": "system not found"})
|
||||
return e.NotFoundError("", err)
|
||||
}
|
||||
details, err := system.FetchSystemdInfoFromAgent(serviceName)
|
||||
if err != nil {
|
||||
return e.JSON(http.StatusNotFound, map[string]string{"error": err.Error()})
|
||||
return e.InternalServerError("", err)
|
||||
}
|
||||
e.Response.Header().Set("Cache-Control", "public, max-age=60")
|
||||
return e.JSON(http.StatusOK, map[string]any{"details": details})
|
||||
@@ -344,17 +375,16 @@ func (h *Hub) getSystemdInfo(e *core.RequestEvent) error {
|
||||
func (h *Hub) refreshSmartData(e *core.RequestEvent) error {
|
||||
systemID := e.Request.URL.Query().Get("system")
|
||||
if systemID == "" {
|
||||
return e.JSON(http.StatusBadRequest, map[string]string{"error": "system parameter is required"})
|
||||
return e.BadRequestError("Invalid system parameter", nil)
|
||||
}
|
||||
|
||||
system, err := h.sm.GetSystem(systemID)
|
||||
if err != nil {
|
||||
return e.JSON(http.StatusNotFound, map[string]string{"error": "system not found"})
|
||||
if err != nil || !system.HasUser(e.App, e.Auth) {
|
||||
return e.NotFoundError("", nil)
|
||||
}
|
||||
|
||||
// Fetch and save SMART devices
|
||||
if err := system.FetchAndSaveSmartDevices(); err != nil {
|
||||
return e.JSON(http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
||||
return e.InternalServerError("", err)
|
||||
}
|
||||
|
||||
return e.JSON(http.StatusOK, map[string]string{"status": "ok"})
|
||||
|
||||
@@ -3,6 +3,7 @@ package hub_test
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"testing"
|
||||
@@ -25,33 +26,33 @@ func jsonReader(v any) io.Reader {
|
||||
}
|
||||
|
||||
func TestApiRoutesAuthentication(t *testing.T) {
|
||||
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||
hub, user := beszelTests.GetHubWithUser(t)
|
||||
defer hub.Cleanup()
|
||||
|
||||
hub.StartHub()
|
||||
|
||||
// Create test user and get auth token
|
||||
user, err := beszelTests.CreateUser(hub, "testuser@example.com", "password123")
|
||||
require.NoError(t, err, "Failed to create test user")
|
||||
|
||||
adminUser, err := beszelTests.CreateRecord(hub, "users", map[string]any{
|
||||
"email": "admin@example.com",
|
||||
"password": "password123",
|
||||
"role": "admin",
|
||||
})
|
||||
require.NoError(t, err, "Failed to create admin user")
|
||||
adminUserToken, err := adminUser.NewAuthToken()
|
||||
|
||||
// superUser, err := beszelTests.CreateRecord(hub, core.CollectionNameSuperusers, map[string]any{
|
||||
// "email": "superuser@example.com",
|
||||
// "password": "password123",
|
||||
// })
|
||||
// require.NoError(t, err, "Failed to create superuser")
|
||||
|
||||
userToken, err := user.NewAuthToken()
|
||||
require.NoError(t, err, "Failed to create auth token")
|
||||
|
||||
// Create test system for user-alerts endpoints
|
||||
// Create test user and get auth token
|
||||
user2, err := beszelTests.CreateUser(hub, "testuser@example.com", "password123")
|
||||
require.NoError(t, err, "Failed to create test user")
|
||||
user2Token, err := user2.NewAuthToken()
|
||||
require.NoError(t, err, "Failed to create user2 auth token")
|
||||
|
||||
adminUser, err := beszelTests.CreateUserWithRole(hub, "admin@example.com", "password123", "admin")
|
||||
require.NoError(t, err, "Failed to create admin user")
|
||||
adminUserToken, err := adminUser.NewAuthToken()
|
||||
|
||||
readOnlyUser, err := beszelTests.CreateUserWithRole(hub, "readonly@example.com", "password123", "readonly")
|
||||
require.NoError(t, err, "Failed to create readonly user")
|
||||
readOnlyUserToken, err := readOnlyUser.NewAuthToken()
|
||||
require.NoError(t, err, "Failed to create readonly user auth token")
|
||||
|
||||
superuser, err := beszelTests.CreateSuperuser(hub, "superuser@example.com", "password123")
|
||||
require.NoError(t, err, "Failed to create superuser")
|
||||
superuserToken, err := superuser.NewAuthToken()
|
||||
require.NoError(t, err, "Failed to create superuser auth token")
|
||||
|
||||
// Create test system
|
||||
system, err := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "test-system",
|
||||
"users": []string{user.Id},
|
||||
@@ -65,31 +66,6 @@ func TestApiRoutesAuthentication(t *testing.T) {
|
||||
|
||||
scenarios := []beszelTests.ApiScenario{
|
||||
// Auth Protected Routes - Should require authentication
|
||||
{
|
||||
Name: "POST /test-notification - no auth should fail",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/test-notification",
|
||||
ExpectedStatus: 401,
|
||||
ExpectedContent: []string{"requires valid"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"url": "generic://127.0.0.1",
|
||||
}),
|
||||
},
|
||||
{
|
||||
Name: "POST /test-notification - with auth should succeed",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/test-notification",
|
||||
TestAppFactory: testAppFactory,
|
||||
Headers: map[string]string{
|
||||
"Authorization": userToken,
|
||||
},
|
||||
Body: jsonReader(map[string]any{
|
||||
"url": "generic://127.0.0.1",
|
||||
}),
|
||||
ExpectedStatus: 200,
|
||||
ExpectedContent: []string{"sending message"},
|
||||
},
|
||||
{
|
||||
Name: "GET /config-yaml - no auth should fail",
|
||||
Method: http.MethodGet,
|
||||
@@ -106,7 +82,7 @@ func TestApiRoutesAuthentication(t *testing.T) {
|
||||
"Authorization": userToken,
|
||||
},
|
||||
ExpectedStatus: 403,
|
||||
ExpectedContent: []string{"Requires admin"},
|
||||
ExpectedContent: []string{"The authorized record is not allowed to perform this action."},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
@@ -136,7 +112,7 @@ func TestApiRoutesAuthentication(t *testing.T) {
|
||||
"Authorization": userToken,
|
||||
},
|
||||
ExpectedStatus: 403,
|
||||
ExpectedContent: []string{"Requires admin role"},
|
||||
ExpectedContent: []string{"The authorized record is not allowed to perform this action."},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
@@ -158,7 +134,7 @@ func TestApiRoutesAuthentication(t *testing.T) {
|
||||
"Authorization": userToken,
|
||||
},
|
||||
ExpectedStatus: 403,
|
||||
ExpectedContent: []string{"Requires admin role"},
|
||||
ExpectedContent: []string{"The authorized record is not allowed to perform this action."},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
@@ -202,6 +178,74 @@ func TestApiRoutesAuthentication(t *testing.T) {
|
||||
ExpectedContent: []string{"\"permanent\":true", "permanent-token-123"},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
Name: "GET /universal-token - superuser should fail",
|
||||
Method: http.MethodGet,
|
||||
URL: "/api/beszel/universal-token",
|
||||
Headers: map[string]string{
|
||||
"Authorization": superuserToken,
|
||||
},
|
||||
ExpectedStatus: 403,
|
||||
ExpectedContent: []string{"Superusers cannot use universal tokens"},
|
||||
TestAppFactory: func(t testing.TB) *pbTests.TestApp {
|
||||
return hub.TestApp
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "GET /universal-token - with readonly auth should fail",
|
||||
Method: http.MethodGet,
|
||||
URL: "/api/beszel/universal-token",
|
||||
Headers: map[string]string{
|
||||
"Authorization": readOnlyUserToken,
|
||||
},
|
||||
ExpectedStatus: 403,
|
||||
ExpectedContent: []string{"The authorized record is not allowed to perform this action."},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
Name: "POST /smart/refresh - missing system should fail 400 with user auth",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/smart/refresh",
|
||||
Headers: map[string]string{
|
||||
"Authorization": userToken,
|
||||
},
|
||||
ExpectedStatus: 400,
|
||||
ExpectedContent: []string{"Invalid", "system", "parameter"},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
Name: "POST /smart/refresh - with readonly auth should fail",
|
||||
Method: http.MethodPost,
|
||||
URL: fmt.Sprintf("/api/beszel/smart/refresh?system=%s", system.Id),
|
||||
Headers: map[string]string{
|
||||
"Authorization": readOnlyUserToken,
|
||||
},
|
||||
ExpectedStatus: 403,
|
||||
ExpectedContent: []string{"The authorized record is not allowed to perform this action."},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
Name: "POST /smart/refresh - non-user system should fail",
|
||||
Method: http.MethodPost,
|
||||
URL: fmt.Sprintf("/api/beszel/smart/refresh?system=%s", system.Id),
|
||||
Headers: map[string]string{
|
||||
"Authorization": user2Token,
|
||||
},
|
||||
ExpectedStatus: 404,
|
||||
ExpectedContent: []string{"The requested resource wasn't found."},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
Name: "POST /smart/refresh - good user should pass validation",
|
||||
Method: http.MethodPost,
|
||||
URL: fmt.Sprintf("/api/beszel/smart/refresh?system=%s", system.Id),
|
||||
Headers: map[string]string{
|
||||
"Authorization": userToken,
|
||||
},
|
||||
ExpectedStatus: 500,
|
||||
ExpectedContent: []string{"Something went wrong while processing your request."},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
Name: "POST /user-alerts - no auth should fail",
|
||||
Method: http.MethodPost,
|
||||
@@ -273,20 +317,59 @@ func TestApiRoutesAuthentication(t *testing.T) {
|
||||
{
|
||||
Name: "GET /containers/logs - no auth should fail",
|
||||
Method: http.MethodGet,
|
||||
URL: "/api/beszel/containers/logs?system=test-system&container=test-container",
|
||||
URL: "/api/beszel/containers/logs?system=test-system&container=abababababab",
|
||||
ExpectedStatus: 401,
|
||||
ExpectedContent: []string{"requires valid"},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
Name: "GET /containers/logs - request for valid non-user system should fail",
|
||||
Method: http.MethodGet,
|
||||
URL: fmt.Sprintf("/api/beszel/containers/logs?system=%s&container=abababababab", system.Id),
|
||||
ExpectedStatus: 404,
|
||||
ExpectedContent: []string{"The requested resource wasn't found."},
|
||||
TestAppFactory: testAppFactory,
|
||||
Headers: map[string]string{
|
||||
"Authorization": user2Token,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "GET /containers/info - request for valid non-user system should fail",
|
||||
Method: http.MethodGet,
|
||||
URL: fmt.Sprintf("/api/beszel/containers/info?system=%s&container=abababababab", system.Id),
|
||||
ExpectedStatus: 404,
|
||||
ExpectedContent: []string{"The requested resource wasn't found."},
|
||||
TestAppFactory: testAppFactory,
|
||||
Headers: map[string]string{
|
||||
"Authorization": user2Token,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "GET /containers/info - SHARE_ALL_SYSTEMS allows non-member user",
|
||||
Method: http.MethodGet,
|
||||
URL: fmt.Sprintf("/api/beszel/containers/info?system=%s&container=abababababab", system.Id),
|
||||
ExpectedStatus: 500,
|
||||
ExpectedContent: []string{"Something went wrong while processing your request."},
|
||||
TestAppFactory: testAppFactory,
|
||||
Headers: map[string]string{
|
||||
"Authorization": user2Token,
|
||||
},
|
||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||
t.Setenv("SHARE_ALL_SYSTEMS", "true")
|
||||
},
|
||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||
t.Setenv("SHARE_ALL_SYSTEMS", "")
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "GET /containers/logs - with auth but missing system param should fail",
|
||||
Method: http.MethodGet,
|
||||
URL: "/api/beszel/containers/logs?container=test-container",
|
||||
URL: "/api/beszel/containers/logs?container=abababababab",
|
||||
Headers: map[string]string{
|
||||
"Authorization": userToken,
|
||||
},
|
||||
ExpectedStatus: 400,
|
||||
ExpectedContent: []string{"system and container parameters are required"},
|
||||
ExpectedContent: []string{"Invalid", "parameter"},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
@@ -297,7 +380,7 @@ func TestApiRoutesAuthentication(t *testing.T) {
|
||||
"Authorization": userToken,
|
||||
},
|
||||
ExpectedStatus: 400,
|
||||
ExpectedContent: []string{"system and container parameters are required"},
|
||||
ExpectedContent: []string{"Invalid", "parameter"},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
@@ -308,7 +391,7 @@ func TestApiRoutesAuthentication(t *testing.T) {
|
||||
"Authorization": userToken,
|
||||
},
|
||||
ExpectedStatus: 404,
|
||||
ExpectedContent: []string{"system not found"},
|
||||
ExpectedContent: []string{"The requested resource wasn't found."},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
@@ -319,7 +402,7 @@ func TestApiRoutesAuthentication(t *testing.T) {
|
||||
"Authorization": userToken,
|
||||
},
|
||||
ExpectedStatus: 400,
|
||||
ExpectedContent: []string{"invalid container parameter"},
|
||||
ExpectedContent: []string{"Invalid", "parameter"},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
@@ -330,7 +413,7 @@ func TestApiRoutesAuthentication(t *testing.T) {
|
||||
"Authorization": userToken,
|
||||
},
|
||||
ExpectedStatus: 400,
|
||||
ExpectedContent: []string{"invalid container parameter"},
|
||||
ExpectedContent: []string{"Invalid", "parameter"},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
@@ -341,9 +424,114 @@ func TestApiRoutesAuthentication(t *testing.T) {
|
||||
"Authorization": userToken,
|
||||
},
|
||||
ExpectedStatus: 400,
|
||||
ExpectedContent: []string{"invalid container parameter"},
|
||||
ExpectedContent: []string{"Invalid", "parameter"},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
Name: "GET /containers/logs - good user should pass validation",
|
||||
Method: http.MethodGet,
|
||||
URL: "/api/beszel/containers/logs?system=" + system.Id + "&container=0123456789ab",
|
||||
Headers: map[string]string{
|
||||
"Authorization": userToken,
|
||||
},
|
||||
ExpectedStatus: 500,
|
||||
ExpectedContent: []string{"Something went wrong while processing your request."},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
Name: "GET /containers/info - good user should pass validation",
|
||||
Method: http.MethodGet,
|
||||
URL: "/api/beszel/containers/info?system=" + system.Id + "&container=0123456789ab",
|
||||
Headers: map[string]string{
|
||||
"Authorization": userToken,
|
||||
},
|
||||
ExpectedStatus: 500,
|
||||
ExpectedContent: []string{"Something went wrong while processing your request."},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
// /systemd routes
|
||||
{
|
||||
Name: "GET /systemd/info - no auth should fail",
|
||||
Method: http.MethodGet,
|
||||
URL: fmt.Sprintf("/api/beszel/systemd/info?system=%s&service=nginx.service", system.Id),
|
||||
ExpectedStatus: 401,
|
||||
ExpectedContent: []string{"requires valid"},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
Name: "GET /systemd/info - request for valid non-user system should fail",
|
||||
Method: http.MethodGet,
|
||||
URL: fmt.Sprintf("/api/beszel/systemd/info?system=%s&service=nginx.service", system.Id),
|
||||
ExpectedStatus: 404,
|
||||
ExpectedContent: []string{"The requested resource wasn't found."},
|
||||
TestAppFactory: testAppFactory,
|
||||
Headers: map[string]string{
|
||||
"Authorization": user2Token,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "GET /systemd/info - with auth but missing system param should fail",
|
||||
Method: http.MethodGet,
|
||||
URL: "/api/beszel/systemd/info?service=nginx.service",
|
||||
Headers: map[string]string{
|
||||
"Authorization": userToken,
|
||||
},
|
||||
ExpectedStatus: 400,
|
||||
ExpectedContent: []string{"Invalid", "parameter"},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
Name: "GET /systemd/info - with auth but missing service param should fail",
|
||||
Method: http.MethodGet,
|
||||
URL: fmt.Sprintf("/api/beszel/systemd/info?system=%s", system.Id),
|
||||
Headers: map[string]string{
|
||||
"Authorization": userToken,
|
||||
},
|
||||
ExpectedStatus: 400,
|
||||
ExpectedContent: []string{"Invalid", "parameter"},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
Name: "GET /systemd/info - with auth but invalid system should fail",
|
||||
Method: http.MethodGet,
|
||||
URL: "/api/beszel/systemd/info?system=invalid-system&service=nginx.service",
|
||||
Headers: map[string]string{
|
||||
"Authorization": userToken,
|
||||
},
|
||||
ExpectedStatus: 404,
|
||||
ExpectedContent: []string{"The requested resource wasn't found."},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
Name: "GET /systemd/info - service not in systemd_services collection should fail",
|
||||
Method: http.MethodGet,
|
||||
URL: fmt.Sprintf("/api/beszel/systemd/info?system=%s&service=notregistered.service", system.Id),
|
||||
Headers: map[string]string{
|
||||
"Authorization": userToken,
|
||||
},
|
||||
ExpectedStatus: 404,
|
||||
ExpectedContent: []string{"The requested resource wasn't found."},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
Name: "GET /systemd/info - with auth and existing service record should pass validation",
|
||||
Method: http.MethodGet,
|
||||
URL: fmt.Sprintf("/api/beszel/systemd/info?system=%s&service=nginx.service", system.Id),
|
||||
Headers: map[string]string{
|
||||
"Authorization": userToken,
|
||||
},
|
||||
ExpectedStatus: 500,
|
||||
ExpectedContent: []string{"Something went wrong while processing your request."},
|
||||
TestAppFactory: testAppFactory,
|
||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||
beszelTests.CreateRecord(app, "systemd_services", map[string]any{
|
||||
"system": system.Id,
|
||||
"name": "nginx.service",
|
||||
"state": 0,
|
||||
"sub": 1,
|
||||
})
|
||||
},
|
||||
},
|
||||
|
||||
// Auth Optional Routes - Should work without authentication
|
||||
{
|
||||
@@ -434,13 +622,17 @@ func TestApiRoutesAuthentication(t *testing.T) {
|
||||
"systems": []string{system.Id},
|
||||
}),
|
||||
},
|
||||
{
|
||||
Name: "GET /update - shouldn't exist without CHECK_UPDATES env var",
|
||||
Method: http.MethodGet,
|
||||
URL: "/api/beszel/update",
|
||||
ExpectedStatus: 502,
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
// this works but diff behavior on prod vs dev.
|
||||
// dev returns 502; prod returns 200 with static html page 404
|
||||
// TODO: align dev and prod behavior and re-enable this test
|
||||
// {
|
||||
// Name: "GET /update - shouldn't exist without CHECK_UPDATES env var",
|
||||
// Method: http.MethodGet,
|
||||
// URL: "/api/beszel/update",
|
||||
// NotExpectedContent: []string{"v:", "\"v\":"},
|
||||
// ExpectedStatus: 502,
|
||||
// TestAppFactory: testAppFactory,
|
||||
// },
|
||||
}
|
||||
|
||||
for _, scenario := range scenarios {
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
package hub
|
||||
|
||||
import "github.com/pocketbase/pocketbase/core"
|
||||
import (
|
||||
"github.com/henrygd/beszel/internal/hub/utils"
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
)
|
||||
|
||||
type collectionRules struct {
|
||||
list *string
|
||||
@@ -22,11 +25,11 @@ func setCollectionAuthSettings(app core.App) error {
|
||||
}
|
||||
|
||||
// disable email auth if DISABLE_PASSWORD_AUTH env var is set
|
||||
disablePasswordAuth, _ := GetEnv("DISABLE_PASSWORD_AUTH")
|
||||
disablePasswordAuth, _ := utils.GetEnv("DISABLE_PASSWORD_AUTH")
|
||||
usersCollection.PasswordAuth.Enabled = disablePasswordAuth != "true"
|
||||
usersCollection.PasswordAuth.IdentityFields = []string{"email"}
|
||||
// allow oauth user creation if USER_CREATION is set
|
||||
if userCreation, _ := GetEnv("USER_CREATION"); userCreation == "true" {
|
||||
if userCreation, _ := utils.GetEnv("USER_CREATION"); userCreation == "true" {
|
||||
cr := "@request.context = 'oauth2'"
|
||||
usersCollection.CreateRule = &cr
|
||||
} else {
|
||||
@@ -34,7 +37,7 @@ func setCollectionAuthSettings(app core.App) error {
|
||||
}
|
||||
|
||||
// enable mfaOtp mfa if MFA_OTP env var is set
|
||||
mfaOtp, _ := GetEnv("MFA_OTP")
|
||||
mfaOtp, _ := utils.GetEnv("MFA_OTP")
|
||||
usersCollection.OTP.Length = 6
|
||||
superusersCollection.OTP.Length = 6
|
||||
usersCollection.OTP.Enabled = mfaOtp == "true"
|
||||
@@ -50,7 +53,7 @@ func setCollectionAuthSettings(app core.App) error {
|
||||
|
||||
// When SHARE_ALL_SYSTEMS is enabled, any authenticated user can read
|
||||
// system-scoped data. Write rules continue to block readonly users.
|
||||
shareAllSystems, _ := GetEnv("SHARE_ALL_SYSTEMS")
|
||||
shareAllSystems, _ := utils.GetEnv("SHARE_ALL_SYSTEMS")
|
||||
|
||||
authenticatedRule := "@request.auth.id != \"\""
|
||||
systemsMemberRule := authenticatedRule + " && users.id ?= @request.auth.id"
|
||||
@@ -75,7 +78,7 @@ func setCollectionAuthSettings(app core.App) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := applyCollectionRules(app, []string{"containers", "container_stats", "system_stats", "systemd_services"}, collectionRules{
|
||||
if err := applyCollectionRules(app, []string{"containers", "container_stats", "system_stats", "systemd_services", "network_probe_stats"}, collectionRules{
|
||||
list: &systemScopedReadRule,
|
||||
}); err != nil {
|
||||
return err
|
||||
@@ -89,7 +92,7 @@ func setCollectionAuthSettings(app core.App) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := applyCollectionRules(app, []string{"fingerprints"}, collectionRules{
|
||||
if err := applyCollectionRules(app, []string{"fingerprints", "network_probes"}, collectionRules{
|
||||
list: &systemScopedReadRule,
|
||||
view: &systemScopedReadRule,
|
||||
create: &systemScopedWriteRule,
|
||||
|
||||
@@ -279,9 +279,6 @@ func createFingerprintRecord(app core.App, systemID, token string) error {
|
||||
|
||||
// Returns the current config.yml file as a JSON object
|
||||
func GetYamlConfig(e *core.RequestEvent) error {
|
||||
if e.Auth.GetString("role") != "admin" {
|
||||
return e.ForbiddenError("Requires admin role", nil)
|
||||
}
|
||||
configContent, err := generateYAML(e.App)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -9,13 +9,13 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/henrygd/beszel/internal/alerts"
|
||||
"github.com/henrygd/beszel/internal/hub/config"
|
||||
"github.com/henrygd/beszel/internal/hub/heartbeat"
|
||||
"github.com/henrygd/beszel/internal/hub/systems"
|
||||
"github.com/henrygd/beszel/internal/hub/utils"
|
||||
"github.com/henrygd/beszel/internal/records"
|
||||
"github.com/henrygd/beszel/internal/users"
|
||||
|
||||
@@ -38,8 +38,6 @@ type Hub struct {
|
||||
appURL string
|
||||
}
|
||||
|
||||
var containerIDPattern = regexp.MustCompile(`^[a-fA-F0-9]{12,64}$`)
|
||||
|
||||
// NewHub creates a new Hub instance with default configuration
|
||||
func NewHub(app core.App) *Hub {
|
||||
hub := &Hub{App: app}
|
||||
@@ -47,7 +45,7 @@ func NewHub(app core.App) *Hub {
|
||||
hub.um = users.NewUserManager(hub)
|
||||
hub.rm = records.NewRecordManager(hub)
|
||||
hub.sm = systems.NewSystemManager(hub)
|
||||
hub.hb = heartbeat.New(app, GetEnv)
|
||||
hub.hb = heartbeat.New(app, utils.GetEnv)
|
||||
if hub.hb != nil {
|
||||
hub.hbStop = make(chan struct{})
|
||||
}
|
||||
@@ -55,15 +53,6 @@ func NewHub(app core.App) *Hub {
|
||||
return hub
|
||||
}
|
||||
|
||||
// GetEnv retrieves an environment variable with a "BESZEL_HUB_" prefix, or falls back to the unprefixed key.
|
||||
func GetEnv(key string) (value string, exists bool) {
|
||||
if value, exists = os.LookupEnv("BESZEL_HUB_" + key); exists {
|
||||
return value, exists
|
||||
}
|
||||
// Fallback to the old unprefixed key
|
||||
return os.LookupEnv(key)
|
||||
}
|
||||
|
||||
// onAfterBootstrapAndMigrations ensures the provided function runs after the database is set up and migrations are applied.
|
||||
// This is a workaround for behavior in PocketBase where onBootstrap runs before migrations, forcing use of onServe for this purpose.
|
||||
// However, PB's tests.TestApp is already bootstrapped, generally doesn't serve, but does handle migrations.
|
||||
@@ -92,6 +81,7 @@ func (h *Hub) StartHub() error {
|
||||
}
|
||||
// register middlewares
|
||||
h.registerMiddlewares(e)
|
||||
// bind events that aren't set up in different
|
||||
// register api routes
|
||||
if err := h.registerApiRoutes(e); err != nil {
|
||||
return err
|
||||
@@ -120,6 +110,8 @@ func (h *Hub) StartHub() error {
|
||||
h.App.OnRecordCreate("users").BindFunc(h.um.InitializeUserRole)
|
||||
h.App.OnRecordCreate("user_settings").BindFunc(h.um.InitializeUserSettings)
|
||||
|
||||
bindNetworkProbesEvents(h)
|
||||
|
||||
pb, ok := h.App.(*pocketbase.PocketBase)
|
||||
if !ok {
|
||||
return errors.New("not a pocketbase app")
|
||||
@@ -134,7 +126,7 @@ func (h *Hub) initialize(app core.App) error {
|
||||
// batch requests (for alerts)
|
||||
settings.Batch.Enabled = true
|
||||
// set URL if APP_URL env is set
|
||||
if appURL, isSet := GetEnv("APP_URL"); isSet {
|
||||
if appURL, isSet := utils.GetEnv("APP_URL"); isSet {
|
||||
h.appURL = appURL
|
||||
settings.Meta.AppURL = appURL
|
||||
}
|
||||
|
||||
120
internal/hub/probes.go
Normal file
120
internal/hub/probes.go
Normal file
@@ -0,0 +1,120 @@
|
||||
package hub
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/probe"
|
||||
"github.com/henrygd/beszel/internal/hub/systems"
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
)
|
||||
|
||||
// generateProbeID creates a stable hash ID for a probe based on its configuration and the system it belongs to.
|
||||
func generateProbeID(systemId string, config probe.Config) string {
|
||||
intervalStr := strconv.FormatUint(uint64(config.Interval), 10)
|
||||
portStr := strconv.FormatUint(uint64(config.Port), 10)
|
||||
return systems.MakeStableHashId(systemId, config.Protocol, config.Target, portStr, intervalStr)
|
||||
}
|
||||
|
||||
// bindNetworkProbesEvents keeps probe records and agent probe state in sync.
|
||||
func bindNetworkProbesEvents(hub *Hub) {
|
||||
// on create, make sure the id is set to a stable hash
|
||||
hub.OnRecordCreate("network_probes").BindFunc(func(e *core.RecordEvent) error {
|
||||
systemID := e.Record.GetString("system")
|
||||
config := probeConfigFromRecord(e.Record)
|
||||
id := generateProbeID(systemID, *config)
|
||||
e.Record.Set("id", id)
|
||||
return e.Next()
|
||||
})
|
||||
|
||||
// sync probe to agent on creation and persist the first result immediately when available
|
||||
hub.OnRecordCreateRequest("network_probes").BindFunc(func(e *core.RecordRequestEvent) error {
|
||||
err := e.Next()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !e.Record.GetBool("enabled") {
|
||||
return nil
|
||||
}
|
||||
result, err := hub.upsertNetworkProbe(e.Record, true)
|
||||
if err != nil {
|
||||
hub.Logger().Warn("failed to sync probe to agent", "system", e.Record.GetString("system"), "probe", e.Record.Id, "err", err)
|
||||
return nil
|
||||
}
|
||||
if result == nil {
|
||||
return nil
|
||||
}
|
||||
setProbeResultFields(e.Record, *result)
|
||||
if err := e.App.SaveNoValidate(e.Record); err != nil {
|
||||
hub.Logger().Warn("failed to save initial probe result", "system", e.Record.GetString("system"), "probe", e.Record.Id, "err", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
hub.OnRecordUpdateRequest("network_probes").BindFunc(func(e *core.RecordRequestEvent) error {
|
||||
err := e.Next()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e.Record.GetBool("enabled") {
|
||||
_, err = hub.upsertNetworkProbe(e.Record, false)
|
||||
} else {
|
||||
err = hub.deleteNetworkProbe(e.Record)
|
||||
}
|
||||
if err != nil {
|
||||
hub.Logger().Warn("failed to sync updated probe to agent", "system", e.Record.GetString("system"), "probe", e.Record.Id, "err", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
// sync probe to agent on delete
|
||||
hub.OnRecordDeleteRequest("network_probes").BindFunc(func(e *core.RecordRequestEvent) error {
|
||||
err := e.Next()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := hub.deleteNetworkProbe(e.Record); err != nil {
|
||||
hub.Logger().Warn("failed to delete probe on agent", "system", e.Record.GetString("system"), "probe", e.Record.Id, "err", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// probeConfigFromRecord builds a probe config from a network_probes record.
|
||||
func probeConfigFromRecord(record *core.Record) *probe.Config {
|
||||
return &probe.Config{
|
||||
ID: record.Id,
|
||||
Target: record.GetString("target"),
|
||||
Protocol: record.GetString("protocol"),
|
||||
Port: uint16(record.GetInt("port")),
|
||||
Interval: uint16(record.GetInt("interval")),
|
||||
}
|
||||
}
|
||||
|
||||
// setProbeResultFields stores the latest probe result values on the record.
|
||||
func setProbeResultFields(record *core.Record, result probe.Result) {
|
||||
record.Set("res", result.Get(0))
|
||||
record.Set("resAvg1h", result.Get(1))
|
||||
record.Set("resMin1h", result.Get(2))
|
||||
record.Set("resMax1h", result.Get(3))
|
||||
record.Set("loss1h", result.Get(4))
|
||||
}
|
||||
|
||||
// upsertNetworkProbe applies the record's probe config to the target system.
|
||||
func (h *Hub) upsertNetworkProbe(record *core.Record, runNow bool) (*probe.Result, error) {
|
||||
systemID := record.GetString("system")
|
||||
system, err := h.sm.GetSystem(systemID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return system.UpsertNetworkProbe(*probeConfigFromRecord(record), runNow)
|
||||
}
|
||||
|
||||
// deleteNetworkProbe removes the record's probe from the target system.
|
||||
func (h *Hub) deleteNetworkProbe(record *core.Record) error {
|
||||
systemID := record.GetString("system")
|
||||
system, err := h.sm.GetSystem(systemID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return system.DeleteNetworkProbe(record.Id)
|
||||
}
|
||||
79
internal/hub/probes_test.go
Normal file
79
internal/hub/probes_test.go
Normal file
@@ -0,0 +1,79 @@
|
||||
package hub
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/probe"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGenerateProbeID(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
systemID string
|
||||
config probe.Config
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "HTTP probe on example.com",
|
||||
systemID: "sys123",
|
||||
config: probe.Config{
|
||||
Protocol: "http",
|
||||
Target: "example.com",
|
||||
Port: 80,
|
||||
Interval: 60,
|
||||
},
|
||||
expected: "d5f27931",
|
||||
},
|
||||
{
|
||||
name: "HTTP probe on example.com with different system ID",
|
||||
systemID: "sys1234",
|
||||
config: probe.Config{
|
||||
Protocol: "http",
|
||||
Target: "example.com",
|
||||
Port: 80,
|
||||
Interval: 60,
|
||||
},
|
||||
expected: "6f8b17f1",
|
||||
},
|
||||
{
|
||||
name: "Same probe, different interval",
|
||||
systemID: "sys1234",
|
||||
config: probe.Config{
|
||||
Protocol: "http",
|
||||
Target: "example.com",
|
||||
Port: 80,
|
||||
Interval: 120,
|
||||
},
|
||||
expected: "6d4baf8",
|
||||
},
|
||||
{
|
||||
name: "ICMP probe on 1.1.1.1",
|
||||
systemID: "sys456",
|
||||
config: probe.Config{
|
||||
Protocol: "icmp",
|
||||
Target: "1.1.1.1",
|
||||
Port: 0,
|
||||
Interval: 10,
|
||||
},
|
||||
expected: "80b5836b",
|
||||
}, {
|
||||
name: "ICMP probe on 1.1.1.1 with different system ID",
|
||||
systemID: "sys4567",
|
||||
config: probe.Config{
|
||||
Protocol: "icmp",
|
||||
Target: "1.1.1.1",
|
||||
Port: 0,
|
||||
Interval: 10,
|
||||
},
|
||||
expected: "a6652680",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := generateProbeID(tt.systemID, tt.config)
|
||||
assert.Equal(t, tt.expected, got, "generateProbeID() = %v, want %v", got, tt.expected)
|
||||
})
|
||||
}
|
||||
}
|
||||
42
internal/hub/server.go
Normal file
42
internal/hub/server.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package hub
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/henrygd/beszel"
|
||||
"github.com/henrygd/beszel/internal/hub/utils"
|
||||
)
|
||||
|
||||
// PublicAppInfo defines the structure of the public app information that will be injected into the HTML
|
||||
type PublicAppInfo struct {
|
||||
BASE_PATH string
|
||||
HUB_VERSION string
|
||||
HUB_URL string
|
||||
OAUTH_DISABLE_POPUP bool `json:"OAUTH_DISABLE_POPUP,omitempty"`
|
||||
}
|
||||
|
||||
// modifyIndexHTML injects the public app information into the index.html content
|
||||
func modifyIndexHTML(hub *Hub, html []byte) string {
|
||||
info := getPublicAppInfo(hub)
|
||||
content, err := json.Marshal(info)
|
||||
if err != nil {
|
||||
return string(html)
|
||||
}
|
||||
htmlContent := strings.ReplaceAll(string(html), "./", info.BASE_PATH)
|
||||
return strings.Replace(htmlContent, "\"{info}\"", string(content), 1)
|
||||
}
|
||||
|
||||
func getPublicAppInfo(hub *Hub) PublicAppInfo {
|
||||
parsedURL, _ := url.Parse(hub.appURL)
|
||||
info := PublicAppInfo{
|
||||
BASE_PATH: strings.TrimSuffix(parsedURL.Path, "/") + "/",
|
||||
HUB_VERSION: beszel.Version,
|
||||
HUB_URL: hub.appURL,
|
||||
}
|
||||
if val, _ := utils.GetEnv("OAUTH_DISABLE_POPUP"); val == "true" {
|
||||
info.OAUTH_DISABLE_POPUP = true
|
||||
}
|
||||
return info
|
||||
}
|
||||
@@ -5,14 +5,11 @@ package hub
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/henrygd/beszel"
|
||||
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
"github.com/pocketbase/pocketbase/tools/osutils"
|
||||
)
|
||||
@@ -39,7 +36,7 @@ func (rm *responseModifier) RoundTrip(req *http.Request) (*http.Response, error)
|
||||
}
|
||||
resp.Body.Close()
|
||||
// Create a new response with the modified body
|
||||
modifiedBody := rm.modifyHTML(string(body))
|
||||
modifiedBody := modifyIndexHTML(rm.hub, body)
|
||||
resp.Body = io.NopCloser(strings.NewReader(modifiedBody))
|
||||
resp.ContentLength = int64(len(modifiedBody))
|
||||
resp.Header.Set("Content-Length", fmt.Sprintf("%d", len(modifiedBody)))
|
||||
@@ -47,22 +44,8 @@ func (rm *responseModifier) RoundTrip(req *http.Request) (*http.Response, error)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (rm *responseModifier) modifyHTML(html string) string {
|
||||
parsedURL, err := url.Parse(rm.hub.appURL)
|
||||
if err != nil {
|
||||
return html
|
||||
}
|
||||
// fix base paths in html if using subpath
|
||||
basePath := strings.TrimSuffix(parsedURL.Path, "/") + "/"
|
||||
html = strings.ReplaceAll(html, "./", basePath)
|
||||
html = strings.Replace(html, "{{V}}", beszel.Version, 1)
|
||||
html = strings.Replace(html, "{{HUB_URL}}", rm.hub.appURL, 1)
|
||||
return html
|
||||
}
|
||||
|
||||
// startServer sets up the development server for Beszel
|
||||
func (h *Hub) startServer(se *core.ServeEvent) error {
|
||||
slog.Info("starting server", "appURL", h.appURL)
|
||||
proxy := httputil.NewSingleHostReverseProxy(&url.URL{
|
||||
Scheme: "http",
|
||||
Host: "localhost:5173",
|
||||
|
||||
@@ -5,10 +5,9 @@ package hub
|
||||
import (
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/henrygd/beszel"
|
||||
"github.com/henrygd/beszel/internal/hub/utils"
|
||||
"github.com/henrygd/beszel/internal/site"
|
||||
|
||||
"github.com/pocketbase/pocketbase/apis"
|
||||
@@ -17,22 +16,13 @@ import (
|
||||
|
||||
// startServer sets up the production server for Beszel
|
||||
func (h *Hub) startServer(se *core.ServeEvent) error {
|
||||
// parse app url
|
||||
parsedURL, err := url.Parse(h.appURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// fix base paths in html if using subpath
|
||||
basePath := strings.TrimSuffix(parsedURL.Path, "/") + "/"
|
||||
indexFile, _ := fs.ReadFile(site.DistDirFS, "index.html")
|
||||
html := strings.ReplaceAll(string(indexFile), "./", basePath)
|
||||
html = strings.Replace(html, "{{V}}", beszel.Version, 1)
|
||||
html = strings.Replace(html, "{{HUB_URL}}", h.appURL, 1)
|
||||
html := modifyIndexHTML(h, indexFile)
|
||||
// set up static asset serving
|
||||
staticPaths := [2]string{"/static/", "/assets/"}
|
||||
serveStatic := apis.Static(site.DistDirFS, false)
|
||||
// get CSP configuration
|
||||
csp, cspExists := GetEnv("CSP")
|
||||
csp, cspExists := utils.GetEnv("CSP")
|
||||
// add route
|
||||
se.Router.GET("/{path...}", func(e *core.RequestEvent) error {
|
||||
// serve static assets if path is in staticPaths
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"log/slog"
|
||||
"math/rand"
|
||||
"net"
|
||||
"strings"
|
||||
@@ -14,9 +15,11 @@ import (
|
||||
|
||||
"github.com/henrygd/beszel/internal/common"
|
||||
"github.com/henrygd/beszel/internal/hub/transport"
|
||||
"github.com/henrygd/beszel/internal/hub/utils"
|
||||
"github.com/henrygd/beszel/internal/hub/ws"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/container"
|
||||
"github.com/henrygd/beszel/internal/entities/probe"
|
||||
"github.com/henrygd/beszel/internal/entities/smart"
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
"github.com/henrygd/beszel/internal/entities/systemd"
|
||||
@@ -28,6 +31,7 @@ import (
|
||||
"github.com/lxzan/gws"
|
||||
"github.com/pocketbase/dbx"
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
"github.com/pocketbase/pocketbase/tools/types"
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
@@ -145,6 +149,7 @@ func (sys *System) update() error {
|
||||
// update smart interval if it's set on the agent side
|
||||
if data.Details.SmartInterval > 0 {
|
||||
sys.smartInterval = data.Details.SmartInterval
|
||||
sys.manager.hub.Logger().Info("SMART interval updated from agent details", "system", sys.Id, "interval", sys.smartInterval.String())
|
||||
// make sure we reset expiration of lastFetch to remain as long as the new smart interval
|
||||
// to prevent premature expiration leading to new fetch if interval is different.
|
||||
sys.manager.smartFetchMap.UpdateExpiration(sys.Id, sys.smartInterval+time.Minute)
|
||||
@@ -156,11 +161,10 @@ func (sys *System) update() error {
|
||||
if sys.smartInterval <= 0 {
|
||||
sys.smartInterval = time.Hour
|
||||
}
|
||||
lastFetch, _ := sys.manager.smartFetchMap.GetOk(sys.Id)
|
||||
if time.Since(time.UnixMilli(lastFetch-1e4)) >= sys.smartInterval && sys.smartFetching.CompareAndSwap(false, true) {
|
||||
if sys.shouldFetchSmart() && sys.smartFetching.CompareAndSwap(false, true) {
|
||||
sys.manager.hub.Logger().Info("SMART fetch", "system", sys.Id, "interval", sys.smartInterval.String())
|
||||
go func() {
|
||||
defer sys.smartFetching.Store(false)
|
||||
sys.manager.smartFetchMap.Set(sys.Id, time.Now().UnixMilli(), sys.smartInterval+time.Minute)
|
||||
_ = sys.FetchAndSaveSmartDevices()
|
||||
}()
|
||||
}
|
||||
@@ -184,7 +188,7 @@ func (sys *System) handlePaused() {
|
||||
|
||||
// createRecords updates the system record and adds system_stats and container_stats records
|
||||
func (sys *System) createRecords(data *system.CombinedData) (*core.Record, error) {
|
||||
systemRecord, err := sys.getRecord()
|
||||
systemRecord, err := sys.getRecord(sys.manager.hub)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -237,6 +241,12 @@ func (sys *System) createRecords(data *system.CombinedData) (*core.Record, error
|
||||
}
|
||||
}
|
||||
|
||||
if data.Probes != nil {
|
||||
if err := updateNetworkProbesRecords(txApp, data.Probes, sys.Id); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// update system record (do this last because it triggers alerts and we need above records to be inserted first)
|
||||
systemRecord.Set("status", up)
|
||||
systemRecord.Set("info", data.Info)
|
||||
@@ -288,7 +298,7 @@ func createSystemdStatsRecords(app core.App, data []*systemd.Service, systemId s
|
||||
for i, service := range data {
|
||||
suffix := fmt.Sprintf("%d", i)
|
||||
valueStrings = append(valueStrings, fmt.Sprintf("({:id%[1]s}, {:system}, {:name%[1]s}, {:state%[1]s}, {:sub%[1]s}, {:cpu%[1]s}, {:cpuPeak%[1]s}, {:memory%[1]s}, {:memPeak%[1]s}, {:updated})", suffix))
|
||||
params["id"+suffix] = makeStableHashId(systemId, service.Name)
|
||||
params["id"+suffix] = MakeStableHashId(systemId, service.Name)
|
||||
params["name"+suffix] = service.Name
|
||||
params["state"+suffix] = service.State
|
||||
params["sub"+suffix] = service.Sub
|
||||
@@ -305,6 +315,88 @@ func createSystemdStatsRecords(app core.App, data []*systemd.Service, systemId s
|
||||
return err
|
||||
}
|
||||
|
||||
func updateNetworkProbesRecords(app core.App, data map[string]probe.Result, systemId string) error {
|
||||
if len(data) == 0 {
|
||||
return nil
|
||||
}
|
||||
var err error
|
||||
collectionName := "network_probes"
|
||||
|
||||
// If realtime updates are active, we save via PocketBase records to trigger realtime events.
|
||||
// Otherwise we can do a more efficient direct update via SQL
|
||||
realtimeActive := utils.RealtimeActiveForCollection(app, collectionName, func(filterQuery string) bool {
|
||||
slog.Info("Checking realtime subscription filter for network probes", "filterQuery", filterQuery)
|
||||
return !strings.Contains(filterQuery, "system") || strings.Contains(filterQuery, systemId)
|
||||
})
|
||||
|
||||
var db dbx.Builder
|
||||
var nowString string
|
||||
var updateQuery *dbx.Query
|
||||
if !realtimeActive {
|
||||
db = app.DB()
|
||||
nowString = time.Now().UTC().Format(types.DefaultDateLayout)
|
||||
sql := fmt.Sprintf("UPDATE %s SET resAvg={:res}, resMin1h={:resMin1h}, resMax1h={:resMax1h}, resAvg1h={:resAvg1h}, loss1h={:loss1h}, updated={:updated} WHERE id={:id}", collectionName)
|
||||
updateQuery = db.NewQuery(sql)
|
||||
}
|
||||
|
||||
// insert network probe stats records
|
||||
switch realtimeActive {
|
||||
case true:
|
||||
collection, _ := app.FindCachedCollectionByNameOrId("network_probe_stats")
|
||||
record := core.NewRecord(collection)
|
||||
record.Set("system", systemId)
|
||||
record.Set("stats", data)
|
||||
record.Set("type", "1m")
|
||||
err = app.SaveNoValidate(record)
|
||||
default:
|
||||
if dataJSON, marshalErr := json.Marshal(data); marshalErr == nil {
|
||||
sql := "INSERT INTO network_probe_stats (system, stats, type, created) VALUES ({:system}, {:stats}, {:type}, {:created})"
|
||||
insertQuery := db.NewQuery(sql)
|
||||
_, err = insertQuery.Bind(dbx.Params{
|
||||
"system": systemId,
|
||||
"stats": dataJSON,
|
||||
"type": "1m",
|
||||
"created": nowString,
|
||||
}).Execute()
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
app.Logger().Error("Failed to update probe stats", "system", systemId, "err", err)
|
||||
}
|
||||
|
||||
// update network_probes records
|
||||
for id, values := range data {
|
||||
switch realtimeActive {
|
||||
case true:
|
||||
var record *core.Record
|
||||
record, err = app.FindRecordById(collectionName, id)
|
||||
if err == nil {
|
||||
record.Set("res", values.Get(0))
|
||||
record.Set("resAvg1h", values.Get(1))
|
||||
record.Set("resMin1h", values.Get(2))
|
||||
record.Set("resMax1h", values.Get(3))
|
||||
record.Set("loss1h", values.Get(4))
|
||||
err = app.SaveNoValidate(record)
|
||||
}
|
||||
default:
|
||||
_, err = updateQuery.Bind(dbx.Params{
|
||||
"id": id,
|
||||
"res": values.Get(0),
|
||||
"resAvg1h": values.Get(1),
|
||||
"resMin1h": values.Get(2),
|
||||
"resMax1h": values.Get(3),
|
||||
"loss1h": values.Get(4),
|
||||
"updated": nowString,
|
||||
}).Execute()
|
||||
}
|
||||
if err != nil {
|
||||
app.Logger().Warn("Failed to update probe", "system", systemId, "probe", id, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// createContainerRecords creates container records
|
||||
func createContainerRecords(app core.App, data []*container.Stats, systemId string) error {
|
||||
if len(data) == 0 {
|
||||
@@ -343,8 +435,8 @@ func createContainerRecords(app core.App, data []*container.Stats, systemId stri
|
||||
|
||||
// getRecord retrieves the system record from the database.
|
||||
// If the record is not found, it removes the system from the manager.
|
||||
func (sys *System) getRecord() (*core.Record, error) {
|
||||
record, err := sys.manager.hub.FindRecordById("systems", sys.Id)
|
||||
func (sys *System) getRecord(app core.App) (*core.Record, error) {
|
||||
record, err := app.FindRecordById("systems", sys.Id)
|
||||
if err != nil || record == nil {
|
||||
_ = sys.manager.RemoveSystem(sys.Id)
|
||||
return nil, err
|
||||
@@ -352,6 +444,27 @@ func (sys *System) getRecord() (*core.Record, error) {
|
||||
return record, nil
|
||||
}
|
||||
|
||||
// HasUser checks if the given user is in the system's users list.
|
||||
// Returns true if SHARE_ALL_SYSTEMS is enabled (any authenticated user can access any system).
|
||||
func (sys *System) HasUser(app core.App, user *core.Record) bool {
|
||||
if user == nil {
|
||||
return false
|
||||
}
|
||||
if v, _ := utils.GetEnv("SHARE_ALL_SYSTEMS"); v == "true" {
|
||||
return true
|
||||
}
|
||||
var recordData = struct {
|
||||
Users string
|
||||
}{}
|
||||
err := app.DB().NewQuery("SELECT users FROM systems WHERE id={:id}").
|
||||
Bind(dbx.Params{"id": sys.Id}).
|
||||
One(&recordData)
|
||||
if err != nil || recordData.Users == "" {
|
||||
return false
|
||||
}
|
||||
return strings.Contains(recordData.Users, user.Id)
|
||||
}
|
||||
|
||||
// setDown marks a system as down in the database.
|
||||
// It takes the original error that caused the system to go down and returns any error
|
||||
// encountered during the process of updating the system status.
|
||||
@@ -359,7 +472,7 @@ func (sys *System) setDown(originalError error) error {
|
||||
if sys.Status == down || sys.Status == paused {
|
||||
return nil
|
||||
}
|
||||
record, err := sys.getRecord()
|
||||
record, err := sys.getRecord(sys.manager.hub)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -518,7 +631,7 @@ func (sys *System) FetchSmartDataFromAgent() (map[string]smart.SmartData, error)
|
||||
return result, err
|
||||
}
|
||||
|
||||
func makeStableHashId(strings ...string) string {
|
||||
func MakeStableHashId(strings ...string) string {
|
||||
hash := fnv.New32a()
|
||||
for _, str := range strings {
|
||||
hash.Write([]byte(str))
|
||||
@@ -643,6 +756,7 @@ func (s *System) createSSHClient() error {
|
||||
return err
|
||||
}
|
||||
s.agentVersion, _ = extractAgentVersion(string(s.client.Conn.ServerVersion()))
|
||||
s.manager.resetFailedSmartFetchState(s.Id)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/henrygd/beszel/internal/hub/ws"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/probe"
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
"github.com/henrygd/beszel/internal/hub/expirymap"
|
||||
|
||||
@@ -15,6 +16,7 @@ import (
|
||||
"github.com/henrygd/beszel"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/pocketbase/dbx"
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
"github.com/pocketbase/pocketbase/tools/store"
|
||||
"golang.org/x/crypto/ssh"
|
||||
@@ -44,7 +46,7 @@ type SystemManager struct {
|
||||
hub hubLike // Hub interface for database and alert operations
|
||||
systems *store.Store[string, *System] // Thread-safe store of active systems
|
||||
sshConfig *ssh.ClientConfig // SSH client configuration for system connections
|
||||
smartFetchMap *expirymap.ExpiryMap[int64] // Stores last SMART fetch time per system ID
|
||||
smartFetchMap *expirymap.ExpiryMap[smartFetchState] // Stores last SMART fetch time/result; TTL is only for cleanup
|
||||
}
|
||||
|
||||
// hubLike defines the interface requirements for the hub dependency.
|
||||
@@ -54,6 +56,7 @@ type hubLike interface {
|
||||
GetSSHKey(dataDir string) (ssh.Signer, error)
|
||||
HandleSystemAlerts(systemRecord *core.Record, data *system.CombinedData) error
|
||||
HandleStatusAlerts(status string, systemRecord *core.Record) error
|
||||
CancelPendingStatusAlerts(systemID string)
|
||||
}
|
||||
|
||||
// NewSystemManager creates a new SystemManager instance with the provided hub.
|
||||
@@ -62,7 +65,7 @@ func NewSystemManager(hub hubLike) *SystemManager {
|
||||
return &SystemManager{
|
||||
systems: store.New(map[string]*System{}),
|
||||
hub: hub,
|
||||
smartFetchMap: expirymap.New[int64](time.Hour),
|
||||
smartFetchMap: expirymap.New[smartFetchState](time.Hour),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -189,6 +192,7 @@ func (sm *SystemManager) onRecordAfterUpdateSuccess(e *core.RecordEvent) error {
|
||||
system.closeSSHConnection()
|
||||
}
|
||||
_ = deactivateAlerts(e.App, e.Record.Id)
|
||||
sm.hub.CancelPendingStatusAlerts(e.Record.Id)
|
||||
return e.Next()
|
||||
case pending:
|
||||
// Resume monitoring, preferring existing WebSocket connection
|
||||
@@ -306,6 +310,7 @@ func (sm *SystemManager) AddWebSocketSystem(systemId string, agentVersion semver
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sm.resetFailedSmartFetchState(systemId)
|
||||
|
||||
system := sm.NewSystem(systemId)
|
||||
system.WsConn = wsConn
|
||||
@@ -314,9 +319,39 @@ func (sm *SystemManager) AddWebSocketSystem(systemId string, agentVersion semver
|
||||
if err := sm.AddRecord(systemRecord, system); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Sync network probes to the newly connected agent
|
||||
go func() {
|
||||
configs := sm.GetProbeConfigsForSystem(systemId)
|
||||
if len(configs) > 0 {
|
||||
if err := system.SyncNetworkProbes(configs); err != nil {
|
||||
sm.hub.Logger().Warn("failed to sync probes to agent", "system", systemId, "err", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// resetFailedSmartFetchState clears only failed SMART cooldown entries so a fresh
|
||||
// agent reconnect retries SMART discovery immediately after configuration changes.
|
||||
func (sm *SystemManager) resetFailedSmartFetchState(systemID string) {
|
||||
state, ok := sm.smartFetchMap.GetOk(systemID)
|
||||
if ok && !state.Successful {
|
||||
sm.smartFetchMap.Remove(systemID)
|
||||
}
|
||||
}
|
||||
|
||||
// GetProbeConfigsForSystem returns all enabled probe configs for a system.
|
||||
func (sm *SystemManager) GetProbeConfigsForSystem(systemID string) []probe.Config {
|
||||
var configs []probe.Config
|
||||
_ = sm.hub.DB().
|
||||
NewQuery("SELECT id, target, protocol, port, interval FROM network_probes WHERE system = {:system} AND enabled = true").
|
||||
Bind(dbx.Params{"system": systemID}).
|
||||
All(&configs)
|
||||
return configs
|
||||
}
|
||||
|
||||
// createSSHClientConfig initializes the SSH client configuration for connecting to an agent's server
|
||||
func (sm *SystemManager) createSSHClientConfig() error {
|
||||
privateKey, err := sm.hub.GetSSHKey("")
|
||||
|
||||
48
internal/hub/systems/system_probes.go
Normal file
48
internal/hub/systems/system_probes.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package systems
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/internal/common"
|
||||
"github.com/henrygd/beszel/internal/entities/probe"
|
||||
)
|
||||
|
||||
// SyncNetworkProbes sends probe configurations to the agent.
|
||||
func (sys *System) SyncNetworkProbes(configs []probe.Config) error {
|
||||
_, err := sys.syncNetworkProbes(probe.SyncRequest{Action: probe.SyncActionReplace, Configs: configs})
|
||||
return err
|
||||
}
|
||||
|
||||
// UpsertNetworkProbe sends a single probe configuration change to the agent.
|
||||
func (sys *System) UpsertNetworkProbe(config probe.Config, runNow bool) (*probe.Result, error) {
|
||||
resp, err := sys.syncNetworkProbes(probe.SyncRequest{
|
||||
Action: probe.SyncActionUpsert,
|
||||
Config: config,
|
||||
RunNow: runNow,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(resp.Result) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
result := resp.Result
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// DeleteNetworkProbe removes a single probe task from the agent.
|
||||
func (sys *System) DeleteNetworkProbe(id string) error {
|
||||
_, err := sys.syncNetworkProbes(probe.SyncRequest{
|
||||
Action: probe.SyncActionDelete,
|
||||
Config: probe.Config{ID: id},
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (sys *System) syncNetworkProbes(req probe.SyncRequest) (probe.SyncResponse, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
var result probe.SyncResponse
|
||||
return result, sys.request(ctx, common.SyncNetworkProbes, req, &result)
|
||||
}
|
||||
@@ -4,18 +4,61 @@ import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/smart"
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
)
|
||||
|
||||
type smartFetchState struct {
|
||||
LastAttempt int64
|
||||
Successful bool
|
||||
}
|
||||
|
||||
// FetchAndSaveSmartDevices fetches SMART data from the agent and saves it to the database
|
||||
func (sys *System) FetchAndSaveSmartDevices() error {
|
||||
smartData, err := sys.FetchSmartDataFromAgent()
|
||||
if err != nil || len(smartData) == 0 {
|
||||
if err != nil {
|
||||
sys.recordSmartFetchResult(err, 0)
|
||||
return err
|
||||
}
|
||||
return sys.saveSmartDevices(smartData)
|
||||
err = sys.saveSmartDevices(smartData)
|
||||
sys.recordSmartFetchResult(err, len(smartData))
|
||||
return err
|
||||
}
|
||||
|
||||
// recordSmartFetchResult stores a cooldown entry for the SMART interval and marks
|
||||
// whether the last fetch produced any devices, so failed setup can retry on reconnect.
|
||||
func (sys *System) recordSmartFetchResult(err error, deviceCount int) {
|
||||
if sys.manager == nil {
|
||||
return
|
||||
}
|
||||
interval := sys.smartFetchInterval()
|
||||
success := err == nil && deviceCount > 0
|
||||
if sys.manager.hub != nil {
|
||||
sys.manager.hub.Logger().Info("SMART fetch result", "system", sys.Id, "success", success, "devices", deviceCount, "interval", interval.String(), "err", err)
|
||||
}
|
||||
sys.manager.smartFetchMap.Set(sys.Id, smartFetchState{LastAttempt: time.Now().UnixMilli(), Successful: success}, interval+time.Minute)
|
||||
}
|
||||
|
||||
// shouldFetchSmart returns true when there is no active SMART cooldown entry for this system.
|
||||
func (sys *System) shouldFetchSmart() bool {
|
||||
if sys.manager == nil {
|
||||
return true
|
||||
}
|
||||
state, ok := sys.manager.smartFetchMap.GetOk(sys.Id)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
return !time.UnixMilli(state.LastAttempt).Add(sys.smartFetchInterval()).After(time.Now())
|
||||
}
|
||||
|
||||
// smartFetchInterval returns the agent-provided SMART interval or the default when unset.
|
||||
func (sys *System) smartFetchInterval() time.Duration {
|
||||
if sys.smartInterval > 0 {
|
||||
return sys.smartInterval
|
||||
}
|
||||
return time.Hour
|
||||
}
|
||||
|
||||
// saveSmartDevices saves SMART device data to the smart_devices collection
|
||||
@@ -41,7 +84,7 @@ func (sys *System) saveSmartDevices(smartData map[string]smart.SmartData) error
|
||||
|
||||
func (sys *System) upsertSmartDeviceRecord(collection *core.Collection, deviceKey string, device smart.SmartData) error {
|
||||
hub := sys.manager.hub
|
||||
recordID := makeStableHashId(sys.Id, deviceKey)
|
||||
recordID := MakeStableHashId(sys.Id, deviceKey)
|
||||
|
||||
record, err := hub.FindRecordById(collection, recordID)
|
||||
if err != nil {
|
||||
|
||||
94
internal/hub/systems/system_smart_test.go
Normal file
94
internal/hub/systems/system_smart_test.go
Normal file
@@ -0,0 +1,94 @@
|
||||
//go:build testing
|
||||
|
||||
package systems
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/internal/hub/expirymap"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestRecordSmartFetchResult(t *testing.T) {
|
||||
sm := &SystemManager{smartFetchMap: expirymap.New[smartFetchState](time.Hour)}
|
||||
t.Cleanup(sm.smartFetchMap.StopCleaner)
|
||||
|
||||
sys := &System{
|
||||
Id: "system-1",
|
||||
manager: sm,
|
||||
smartInterval: time.Hour,
|
||||
}
|
||||
|
||||
// Successful fetch with devices
|
||||
sys.recordSmartFetchResult(nil, 5)
|
||||
state, ok := sm.smartFetchMap.GetOk(sys.Id)
|
||||
assert.True(t, ok, "expected smart fetch result to be stored")
|
||||
assert.True(t, state.Successful, "expected successful fetch state to be recorded")
|
||||
|
||||
// Failed fetch
|
||||
sys.recordSmartFetchResult(errors.New("failed"), 0)
|
||||
state, ok = sm.smartFetchMap.GetOk(sys.Id)
|
||||
assert.True(t, ok, "expected failed smart fetch state to be stored")
|
||||
assert.False(t, state.Successful, "expected failed smart fetch state to be marked unsuccessful")
|
||||
|
||||
// Successful fetch but no devices
|
||||
sys.recordSmartFetchResult(nil, 0)
|
||||
state, ok = sm.smartFetchMap.GetOk(sys.Id)
|
||||
assert.True(t, ok, "expected fetch with zero devices to be stored")
|
||||
assert.False(t, state.Successful, "expected fetch with zero devices to be marked unsuccessful")
|
||||
}
|
||||
|
||||
func TestShouldFetchSmart(t *testing.T) {
|
||||
sm := &SystemManager{smartFetchMap: expirymap.New[smartFetchState](time.Hour)}
|
||||
t.Cleanup(sm.smartFetchMap.StopCleaner)
|
||||
|
||||
sys := &System{
|
||||
Id: "system-1",
|
||||
manager: sm,
|
||||
smartInterval: time.Hour,
|
||||
}
|
||||
|
||||
assert.True(t, sys.shouldFetchSmart(), "expected initial smart fetch to be allowed")
|
||||
|
||||
sys.recordSmartFetchResult(errors.New("failed"), 0)
|
||||
assert.False(t, sys.shouldFetchSmart(), "expected smart fetch to be blocked while interval entry exists")
|
||||
|
||||
sm.smartFetchMap.Remove(sys.Id)
|
||||
assert.True(t, sys.shouldFetchSmart(), "expected smart fetch to be allowed after interval entry is cleared")
|
||||
}
|
||||
|
||||
func TestShouldFetchSmart_IgnoresExtendedTTLWhenFetchIsDue(t *testing.T) {
|
||||
sm := &SystemManager{smartFetchMap: expirymap.New[smartFetchState](time.Hour)}
|
||||
t.Cleanup(sm.smartFetchMap.StopCleaner)
|
||||
|
||||
sys := &System{
|
||||
Id: "system-1",
|
||||
manager: sm,
|
||||
smartInterval: time.Hour,
|
||||
}
|
||||
|
||||
sm.smartFetchMap.Set(sys.Id, smartFetchState{
|
||||
LastAttempt: time.Now().Add(-2 * time.Hour).UnixMilli(),
|
||||
Successful: true,
|
||||
}, 10*time.Minute)
|
||||
sm.smartFetchMap.UpdateExpiration(sys.Id, 3*time.Hour)
|
||||
|
||||
assert.True(t, sys.shouldFetchSmart(), "expected fetch time to take precedence over updated TTL")
|
||||
}
|
||||
|
||||
func TestResetFailedSmartFetchState(t *testing.T) {
|
||||
sm := &SystemManager{smartFetchMap: expirymap.New[smartFetchState](time.Hour)}
|
||||
t.Cleanup(sm.smartFetchMap.StopCleaner)
|
||||
|
||||
sm.smartFetchMap.Set("system-1", smartFetchState{LastAttempt: time.Now().UnixMilli(), Successful: false}, time.Hour)
|
||||
sm.resetFailedSmartFetchState("system-1")
|
||||
_, ok := sm.smartFetchMap.GetOk("system-1")
|
||||
assert.False(t, ok, "expected failed smart fetch state to be cleared on reconnect")
|
||||
|
||||
sm.smartFetchMap.Set("system-1", smartFetchState{LastAttempt: time.Now().UnixMilli(), Successful: true}, time.Hour)
|
||||
sm.resetFailedSmartFetchState("system-1")
|
||||
_, ok = sm.smartFetchMap.GetOk("system-1")
|
||||
assert.True(t, ok, "expected successful smart fetch state to be preserved")
|
||||
}
|
||||
@@ -14,9 +14,9 @@ func TestGetSystemdServiceId(t *testing.T) {
|
||||
serviceName := "nginx.service"
|
||||
|
||||
// Call multiple times and ensure same result
|
||||
id1 := makeStableHashId(systemId, serviceName)
|
||||
id2 := makeStableHashId(systemId, serviceName)
|
||||
id3 := makeStableHashId(systemId, serviceName)
|
||||
id1 := MakeStableHashId(systemId, serviceName)
|
||||
id2 := MakeStableHashId(systemId, serviceName)
|
||||
id3 := MakeStableHashId(systemId, serviceName)
|
||||
|
||||
assert.Equal(t, id1, id2)
|
||||
assert.Equal(t, id2, id3)
|
||||
@@ -29,10 +29,10 @@ func TestGetSystemdServiceId(t *testing.T) {
|
||||
serviceName1 := "nginx.service"
|
||||
serviceName2 := "apache.service"
|
||||
|
||||
id1 := makeStableHashId(systemId1, serviceName1)
|
||||
id2 := makeStableHashId(systemId2, serviceName1)
|
||||
id3 := makeStableHashId(systemId1, serviceName2)
|
||||
id4 := makeStableHashId(systemId2, serviceName2)
|
||||
id1 := MakeStableHashId(systemId1, serviceName1)
|
||||
id2 := MakeStableHashId(systemId2, serviceName1)
|
||||
id3 := MakeStableHashId(systemId1, serviceName2)
|
||||
id4 := MakeStableHashId(systemId2, serviceName2)
|
||||
|
||||
// All IDs should be different
|
||||
assert.NotEqual(t, id1, id2)
|
||||
@@ -56,14 +56,14 @@ func TestGetSystemdServiceId(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
id := makeStableHashId(tc.systemId, tc.serviceName)
|
||||
id := MakeStableHashId(tc.systemId, tc.serviceName)
|
||||
// FNV-32 produces 8 hex characters
|
||||
assert.Len(t, id, 8, "ID should be 8 characters for systemId='%s', serviceName='%s'", tc.systemId, tc.serviceName)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("hexadecimal output", func(t *testing.T) {
|
||||
id := makeStableHashId("test-system", "test-service")
|
||||
id := MakeStableHashId("test-system", "test-service")
|
||||
assert.NotEmpty(t, id)
|
||||
|
||||
// Should only contain hexadecimal characters
|
||||
|
||||
@@ -421,3 +421,60 @@ func testOld(t *testing.T, hub *tests.TestHub) {
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestHasUser(t *testing.T) {
|
||||
hub, err := tests.NewTestHub(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
defer hub.Cleanup()
|
||||
|
||||
sm := hub.GetSystemManager()
|
||||
err = sm.Initialize()
|
||||
require.NoError(t, err)
|
||||
|
||||
user1, err := tests.CreateUser(hub, "user1@test.com", "password123")
|
||||
require.NoError(t, err)
|
||||
user2, err := tests.CreateUser(hub, "user2@test.com", "password123")
|
||||
require.NoError(t, err)
|
||||
|
||||
systemRecord, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "has-user-test",
|
||||
"host": "127.0.0.1",
|
||||
"port": "33914",
|
||||
"users": []string{user1.Id},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
sys, err := sm.GetSystemFromStore(systemRecord.Id)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("user in list returns true", func(t *testing.T) {
|
||||
assert.True(t, sys.HasUser(hub, user1))
|
||||
})
|
||||
|
||||
t.Run("user not in list returns false", func(t *testing.T) {
|
||||
assert.False(t, sys.HasUser(hub, user2))
|
||||
})
|
||||
|
||||
t.Run("unknown user ID returns false", func(t *testing.T) {
|
||||
assert.False(t, sys.HasUser(hub, nil))
|
||||
})
|
||||
|
||||
t.Run("SHARE_ALL_SYSTEMS=true grants access to non-member", func(t *testing.T) {
|
||||
t.Setenv("SHARE_ALL_SYSTEMS", "true")
|
||||
assert.True(t, sys.HasUser(hub, user2))
|
||||
})
|
||||
|
||||
t.Run("BESZEL_HUB_SHARE_ALL_SYSTEMS=true grants access to non-member", func(t *testing.T) {
|
||||
t.Setenv("BESZEL_HUB_SHARE_ALL_SYSTEMS", "true")
|
||||
assert.True(t, sys.HasUser(hub, user2))
|
||||
})
|
||||
|
||||
t.Run("additional user works", func(t *testing.T) {
|
||||
assert.False(t, sys.HasUser(hub, user2))
|
||||
systemRecord.Set("users", []string{user1.Id, user2.Id})
|
||||
err = hub.Save(systemRecord)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, sys.HasUser(hub, user1))
|
||||
assert.True(t, sys.HasUser(hub, user2))
|
||||
})
|
||||
}
|
||||
|
||||
39
internal/hub/utils/utils.go
Normal file
39
internal/hub/utils/utils.go
Normal file
@@ -0,0 +1,39 @@
|
||||
// Package utils provides utility functions for the hub.
|
||||
package utils
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
)
|
||||
|
||||
// GetEnv retrieves an environment variable with a "BESZEL_HUB_" prefix, or falls back to the unprefixed key.
|
||||
func GetEnv(key string) (value string, exists bool) {
|
||||
if value, exists = os.LookupEnv("BESZEL_HUB_" + key); exists {
|
||||
return value, exists
|
||||
}
|
||||
return os.LookupEnv(key)
|
||||
}
|
||||
|
||||
// realtimeActiveForCollection checks if there are active WebSocket subscriptions for the given collection.
|
||||
func RealtimeActiveForCollection(app core.App, collectionName string, validateFn func(filterQuery string) bool) bool {
|
||||
broker := app.SubscriptionsBroker()
|
||||
if broker.TotalClients() == 0 {
|
||||
return false
|
||||
}
|
||||
for _, client := range broker.Clients() {
|
||||
subs := client.Subscriptions(collectionName)
|
||||
if len(subs) > 0 {
|
||||
if validateFn == nil {
|
||||
return true
|
||||
}
|
||||
for k := range subs {
|
||||
filter := subs[k].Query["filter"]
|
||||
if validateFn(filter) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -1699,6 +1699,223 @@ func init() {
|
||||
"type": "base",
|
||||
"updateRule": null,
|
||||
"viewRule": null
|
||||
},
|
||||
{
|
||||
"id": "np_probes_001",
|
||||
"listRule": null,
|
||||
"viewRule": null,
|
||||
"createRule": null,
|
||||
"updateRule": null,
|
||||
"deleteRule": null,
|
||||
"name": "network_probes",
|
||||
"type": "base",
|
||||
"fields": [
|
||||
{
|
||||
"autogeneratePattern": "[a-z0-9]{15}",
|
||||
"hidden": false,
|
||||
"id": "text3208210256",
|
||||
"max": 15,
|
||||
"min": 15,
|
||||
"name": "id",
|
||||
"pattern": "^[a-z0-9]+$",
|
||||
"presentable": false,
|
||||
"primaryKey": true,
|
||||
"required": true,
|
||||
"system": true,
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"cascadeDelete": true,
|
||||
"collectionId": "2hz5ncl8tizk5nx",
|
||||
"hidden": false,
|
||||
"id": "np_system",
|
||||
"maxSelect": 1,
|
||||
"minSelect": 0,
|
||||
"name": "system",
|
||||
"presentable": false,
|
||||
"required": true,
|
||||
"system": false,
|
||||
"type": "relation"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "np_name",
|
||||
"max": 200,
|
||||
"min": 0,
|
||||
"name": "name",
|
||||
"pattern": "",
|
||||
"presentable": false,
|
||||
"primaryKey": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "np_target",
|
||||
"max": 500,
|
||||
"min": 1,
|
||||
"name": "target",
|
||||
"pattern": "",
|
||||
"presentable": false,
|
||||
"primaryKey": false,
|
||||
"required": true,
|
||||
"system": false,
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "np_protocol",
|
||||
"maxSelect": 1,
|
||||
"name": "protocol",
|
||||
"presentable": false,
|
||||
"required": true,
|
||||
"system": false,
|
||||
"type": "select",
|
||||
"values": ["icmp", "tcp", "http"]
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "np_port",
|
||||
"max": 65535,
|
||||
"min": 0,
|
||||
"name": "port",
|
||||
"onlyInt": true,
|
||||
"presentable": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "number"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "np_interval",
|
||||
"max": 3600,
|
||||
"min": 1,
|
||||
"name": "interval",
|
||||
"onlyInt": true,
|
||||
"presentable": false,
|
||||
"required": true,
|
||||
"system": false,
|
||||
"type": "number"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "np_enabled",
|
||||
"name": "enabled",
|
||||
"presentable": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "bool"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "autodate2990389176",
|
||||
"name": "created",
|
||||
"onCreate": true,
|
||||
"onUpdate": false,
|
||||
"presentable": false,
|
||||
"system": false,
|
||||
"type": "autodate"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "autodate3332085495",
|
||||
"name": "updated",
|
||||
"onCreate": true,
|
||||
"onUpdate": true,
|
||||
"presentable": false,
|
||||
"system": false,
|
||||
"type": "autodate"
|
||||
}
|
||||
],
|
||||
"indexes": [
|
||||
"CREATE INDEX ` + "`" + `idx_np_system_enabled` + "`" + ` ON ` + "`" + `network_probes` + "`" + ` (\n ` + "`" + `system` + "`" + `,\n ` + "`" + `enabled` + "`" + `\n)"
|
||||
],
|
||||
"system": false
|
||||
},
|
||||
{
|
||||
"id": "np_stats_001",
|
||||
"listRule": null,
|
||||
"viewRule": null,
|
||||
"createRule": null,
|
||||
"updateRule": null,
|
||||
"deleteRule": null,
|
||||
"name": "network_probe_stats",
|
||||
"type": "base",
|
||||
"fields": [
|
||||
{
|
||||
"autogeneratePattern": "[a-z0-9]{15}",
|
||||
"hidden": false,
|
||||
"id": "text3208210256",
|
||||
"max": 15,
|
||||
"min": 15,
|
||||
"name": "id",
|
||||
"pattern": "^[a-z0-9]+$",
|
||||
"presentable": false,
|
||||
"primaryKey": true,
|
||||
"required": true,
|
||||
"system": true,
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"cascadeDelete": true,
|
||||
"collectionId": "2hz5ncl8tizk5nx",
|
||||
"hidden": false,
|
||||
"id": "nps_system",
|
||||
"maxSelect": 1,
|
||||
"minSelect": 0,
|
||||
"name": "system",
|
||||
"presentable": false,
|
||||
"required": true,
|
||||
"system": false,
|
||||
"type": "relation"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "nps_stats",
|
||||
"maxSize": 2000000,
|
||||
"name": "stats",
|
||||
"presentable": false,
|
||||
"required": true,
|
||||
"system": false,
|
||||
"type": "json"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "nps_type",
|
||||
"maxSelect": 1,
|
||||
"name": "type",
|
||||
"presentable": false,
|
||||
"required": true,
|
||||
"system": false,
|
||||
"type": "select",
|
||||
"values": ["1m", "10m", "20m", "120m", "480m"]
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "autodate2990389176",
|
||||
"name": "created",
|
||||
"onCreate": true,
|
||||
"onUpdate": false,
|
||||
"presentable": false,
|
||||
"system": false,
|
||||
"type": "autodate"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "autodate3332085495",
|
||||
"name": "updated",
|
||||
"onCreate": true,
|
||||
"onUpdate": true,
|
||||
"presentable": false,
|
||||
"system": false,
|
||||
"type": "autodate"
|
||||
}
|
||||
],
|
||||
"indexes": [
|
||||
"CREATE INDEX ` + "`" + `idx_nps_system_type_created` + "`" + ` ON ` + "`" + `network_probe_stats` + "`" + ` (\n ` + "`" + `system` + "`" + `,\n ` + "`" + `type` + "`" + `,\n ` + "`" + `created` + "`" + `\n)"
|
||||
],
|
||||
"system": false
|
||||
}
|
||||
]`
|
||||
|
||||
|
||||
58
internal/records/probe_averaging_test.go
Normal file
58
internal/records/probe_averaging_test.go
Normal file
@@ -0,0 +1,58 @@
|
||||
//go:build testing
|
||||
|
||||
package records_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/henrygd/beszel/internal/records"
|
||||
"github.com/henrygd/beszel/internal/tests"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAverageProbeStats(t *testing.T) {
|
||||
hub, err := tests.NewTestHub(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
defer hub.Cleanup()
|
||||
|
||||
rm := records.NewRecordManager(hub)
|
||||
user, err := tests.CreateUser(hub, "probe-avg@example.com", "testtesttest")
|
||||
require.NoError(t, err)
|
||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "probe-avg-system",
|
||||
"host": "localhost",
|
||||
"port": "45876",
|
||||
"status": "up",
|
||||
"users": []string{user.Id},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
recordA, err := tests.CreateRecord(hub, "network_probe_stats", map[string]any{
|
||||
"system": system.Id,
|
||||
"type": "1m",
|
||||
"stats": `{"icmp:1.1.1.1":[10,80,8,14,1]}`,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
recordB, err := tests.CreateRecord(hub, "network_probe_stats", map[string]any{
|
||||
"system": system.Id,
|
||||
"type": "1m",
|
||||
"stats": `{"icmp:1.1.1.1":[40,100,9,50,5]}`,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
result := rm.AverageProbeStats(hub.DB(), records.RecordIds{
|
||||
{Id: recordA.Id},
|
||||
{Id: recordB.Id},
|
||||
})
|
||||
|
||||
stats, ok := result["icmp:1.1.1.1"]
|
||||
require.True(t, ok)
|
||||
require.Len(t, stats, 5)
|
||||
assert.Equal(t, 25.0, stats[0])
|
||||
assert.Equal(t, 90.0, stats[1])
|
||||
assert.Equal(t, 8.0, stats[2])
|
||||
assert.Equal(t, 50.0, stats[3])
|
||||
assert.Equal(t, 3.0, stats[4])
|
||||
}
|
||||
@@ -3,13 +3,12 @@ package records
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"math"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/container"
|
||||
"github.com/henrygd/beszel/internal/entities/probe"
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
|
||||
"github.com/pocketbase/dbx"
|
||||
@@ -39,16 +38,6 @@ type StatsRecord struct {
|
||||
Stats []byte `db:"stats"`
|
||||
}
|
||||
|
||||
// global variables for reusing allocations
|
||||
var (
|
||||
statsRecord StatsRecord
|
||||
containerStats []container.Stats
|
||||
sumStats system.Stats
|
||||
tempStats system.Stats
|
||||
queryParams = make(dbx.Params, 1)
|
||||
containerSums = make(map[string]*container.Stats)
|
||||
)
|
||||
|
||||
// Create longer records by averaging shorter records
|
||||
func (rm *RecordManager) CreateLongerRecords() {
|
||||
// start := time.Now()
|
||||
@@ -82,7 +71,7 @@ func (rm *RecordManager) CreateLongerRecords() {
|
||||
// wrap the operations in a transaction
|
||||
rm.app.RunInTransaction(func(txApp core.App) error {
|
||||
var err error
|
||||
collections := [2]*core.Collection{}
|
||||
collections := [3]*core.Collection{}
|
||||
collections[0], err = txApp.FindCachedCollectionByNameOrId("system_stats")
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -91,6 +80,10 @@ func (rm *RecordManager) CreateLongerRecords() {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
collections[2], err = txApp.FindCachedCollectionByNameOrId("network_probe_stats")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var systems RecordIds
|
||||
db := txApp.DB()
|
||||
|
||||
@@ -150,8 +143,9 @@ func (rm *RecordManager) CreateLongerRecords() {
|
||||
case "system_stats":
|
||||
longerRecord.Set("stats", rm.AverageSystemStats(db, recordIds))
|
||||
case "container_stats":
|
||||
|
||||
longerRecord.Set("stats", rm.AverageContainerStats(db, recordIds))
|
||||
case "network_probe_stats":
|
||||
longerRecord.Set("stats", rm.AverageProbeStats(db, recordIds))
|
||||
}
|
||||
if err := txApp.SaveNoValidate(longerRecord); err != nil {
|
||||
log.Println("failed to save longer record", "err", err)
|
||||
@@ -163,41 +157,47 @@ func (rm *RecordManager) CreateLongerRecords() {
|
||||
return nil
|
||||
})
|
||||
|
||||
statsRecord.Stats = statsRecord.Stats[:0]
|
||||
|
||||
// log.Println("finished creating longer records", "time (ms)", time.Since(start).Milliseconds())
|
||||
}
|
||||
|
||||
// Calculate the average stats of a list of system_stats records without reflect
|
||||
func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *system.Stats {
|
||||
// Clear/reset global structs for reuse
|
||||
sumStats = system.Stats{}
|
||||
tempStats = system.Stats{}
|
||||
sum := &sumStats
|
||||
stats := &tempStats
|
||||
stats := make([]system.Stats, 0, len(records))
|
||||
var row StatsRecord
|
||||
params := make(dbx.Params, 1)
|
||||
for _, rec := range records {
|
||||
row.Stats = row.Stats[:0]
|
||||
params["id"] = rec.Id
|
||||
db.NewQuery("SELECT stats FROM system_stats WHERE id = {:id}").Bind(params).One(&row)
|
||||
var s system.Stats
|
||||
if err := json.Unmarshal(row.Stats, &s); err != nil {
|
||||
continue
|
||||
}
|
||||
stats = append(stats, s)
|
||||
}
|
||||
result := AverageSystemStatsSlice(stats)
|
||||
return &result
|
||||
}
|
||||
|
||||
// AverageSystemStatsSlice computes the average of a slice of system stats.
|
||||
func AverageSystemStatsSlice(records []system.Stats) system.Stats {
|
||||
var sum system.Stats
|
||||
count := float64(len(records))
|
||||
if count == 0 {
|
||||
return sum
|
||||
}
|
||||
|
||||
// necessary because uint8 is not big enough for the sum
|
||||
batterySum := 0
|
||||
// accumulate per-core usage across records
|
||||
var cpuCoresSums []uint64
|
||||
// accumulate cpu breakdown [user, system, iowait, steal, idle]
|
||||
var cpuBreakdownSums []float64
|
||||
|
||||
count := float64(len(records))
|
||||
tempCount := float64(0)
|
||||
|
||||
// Accumulate totals
|
||||
for _, record := range records {
|
||||
id := record.Id
|
||||
// clear global statsRecord for reuse
|
||||
statsRecord.Stats = statsRecord.Stats[:0]
|
||||
// reset tempStats each iteration to avoid omitzero fields retaining stale values
|
||||
*stats = system.Stats{}
|
||||
|
||||
queryParams["id"] = id
|
||||
db.NewQuery("SELECT stats FROM system_stats WHERE id = {:id}").Bind(queryParams).One(&statsRecord)
|
||||
if err := json.Unmarshal(statsRecord.Stats, stats); err != nil {
|
||||
continue
|
||||
}
|
||||
for i := range records {
|
||||
stats := &records[i]
|
||||
|
||||
sum.Cpu += stats.Cpu
|
||||
// accumulate cpu time breakdowns if present
|
||||
@@ -205,8 +205,8 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
||||
if len(cpuBreakdownSums) < len(stats.CpuBreakdown) {
|
||||
cpuBreakdownSums = append(cpuBreakdownSums, make([]float64, len(stats.CpuBreakdown)-len(cpuBreakdownSums))...)
|
||||
}
|
||||
for i, v := range stats.CpuBreakdown {
|
||||
cpuBreakdownSums[i] += v
|
||||
for j, v := range stats.CpuBreakdown {
|
||||
cpuBreakdownSums[j] += v
|
||||
}
|
||||
}
|
||||
sum.Mem += stats.Mem
|
||||
@@ -230,6 +230,9 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
||||
sum.Bandwidth[1] += stats.Bandwidth[1]
|
||||
sum.DiskIO[0] += stats.DiskIO[0]
|
||||
sum.DiskIO[1] += stats.DiskIO[1]
|
||||
for i := range stats.DiskIoStats {
|
||||
sum.DiskIoStats[i] += stats.DiskIoStats[i]
|
||||
}
|
||||
batterySum += int(stats.Battery[0])
|
||||
sum.Battery[1] = stats.Battery[1]
|
||||
|
||||
@@ -239,8 +242,8 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
||||
// extend slices to accommodate core count
|
||||
cpuCoresSums = append(cpuCoresSums, make([]uint64, len(stats.CpuCoresUsage)-len(cpuCoresSums))...)
|
||||
}
|
||||
for i, v := range stats.CpuCoresUsage {
|
||||
cpuCoresSums[i] += uint64(v)
|
||||
for j, v := range stats.CpuCoresUsage {
|
||||
cpuCoresSums[j] += uint64(v)
|
||||
}
|
||||
}
|
||||
// Set peak values
|
||||
@@ -254,6 +257,9 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
||||
sum.MaxBandwidth[1] = max(sum.MaxBandwidth[1], stats.MaxBandwidth[1], stats.Bandwidth[1])
|
||||
sum.MaxDiskIO[0] = max(sum.MaxDiskIO[0], stats.MaxDiskIO[0], stats.DiskIO[0])
|
||||
sum.MaxDiskIO[1] = max(sum.MaxDiskIO[1], stats.MaxDiskIO[1], stats.DiskIO[1])
|
||||
for i := range stats.DiskIoStats {
|
||||
sum.MaxDiskIoStats[i] = max(sum.MaxDiskIoStats[i], stats.MaxDiskIoStats[i], stats.DiskIoStats[i])
|
||||
}
|
||||
|
||||
// Accumulate network interfaces
|
||||
if sum.NetworkInterfaces == nil {
|
||||
@@ -299,6 +305,10 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
||||
fs.DiskWriteBytes += value.DiskWriteBytes
|
||||
fs.MaxDiskReadBytes = max(fs.MaxDiskReadBytes, value.MaxDiskReadBytes, value.DiskReadBytes)
|
||||
fs.MaxDiskWriteBytes = max(fs.MaxDiskWriteBytes, value.MaxDiskWriteBytes, value.DiskWriteBytes)
|
||||
for i := range value.DiskIoStats {
|
||||
fs.DiskIoStats[i] += value.DiskIoStats[i]
|
||||
fs.MaxDiskIoStats[i] = max(fs.MaxDiskIoStats[i], value.MaxDiskIoStats[i], value.DiskIoStats[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -333,8 +343,7 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
||||
}
|
||||
}
|
||||
|
||||
// Compute averages in place
|
||||
if count > 0 {
|
||||
// Compute averages
|
||||
sum.Cpu = twoDecimals(sum.Cpu / count)
|
||||
sum.Mem = twoDecimals(sum.Mem / count)
|
||||
sum.MemUsed = twoDecimals(sum.MemUsed / count)
|
||||
@@ -350,6 +359,9 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
||||
sum.DiskWritePs = twoDecimals(sum.DiskWritePs / count)
|
||||
sum.DiskIO[0] = sum.DiskIO[0] / uint64(count)
|
||||
sum.DiskIO[1] = sum.DiskIO[1] / uint64(count)
|
||||
for i := range sum.DiskIoStats {
|
||||
sum.DiskIoStats[i] = twoDecimals(sum.DiskIoStats[i] / count)
|
||||
}
|
||||
sum.NetworkSent = twoDecimals(sum.NetworkSent / count)
|
||||
sum.NetworkRecv = twoDecimals(sum.NetworkRecv / count)
|
||||
sum.LoadAvg[0] = twoDecimals(sum.LoadAvg[0] / count)
|
||||
@@ -388,6 +400,9 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
||||
fs.DiskReadPs = twoDecimals(fs.DiskReadPs / count)
|
||||
fs.DiskReadBytes = fs.DiskReadBytes / uint64(count)
|
||||
fs.DiskWriteBytes = fs.DiskWriteBytes / uint64(count)
|
||||
for i := range fs.DiskIoStats {
|
||||
fs.DiskIoStats[i] = twoDecimals(fs.DiskIoStats[i] / count)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -430,36 +445,39 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
||||
}
|
||||
sum.CpuBreakdown = avg
|
||||
}
|
||||
}
|
||||
|
||||
return sum
|
||||
}
|
||||
|
||||
// Calculate the average stats of a list of container_stats records
|
||||
func (rm *RecordManager) AverageContainerStats(db dbx.Builder, records RecordIds) []container.Stats {
|
||||
// Clear global map for reuse
|
||||
for k := range containerSums {
|
||||
delete(containerSums, k)
|
||||
}
|
||||
sums := containerSums
|
||||
count := float64(len(records))
|
||||
|
||||
for i := range records {
|
||||
id := records[i].Id
|
||||
// clear global statsRecord for reuse
|
||||
statsRecord.Stats = statsRecord.Stats[:0]
|
||||
// must set to nil (not [:0]) to avoid json.Unmarshal reusing backing array
|
||||
// which causes omitzero fields to inherit stale values from previous iterations
|
||||
containerStats = nil
|
||||
|
||||
queryParams["id"] = id
|
||||
db.NewQuery("SELECT stats FROM container_stats WHERE id = {:id}").Bind(queryParams).One(&statsRecord)
|
||||
|
||||
if err := json.Unmarshal(statsRecord.Stats, &containerStats); err != nil {
|
||||
allStats := make([][]container.Stats, 0, len(records))
|
||||
var row StatsRecord
|
||||
params := make(dbx.Params, 1)
|
||||
for _, rec := range records {
|
||||
row.Stats = row.Stats[:0]
|
||||
params["id"] = rec.Id
|
||||
db.NewQuery("SELECT stats FROM container_stats WHERE id = {:id}").Bind(params).One(&row)
|
||||
var cs []container.Stats
|
||||
if err := json.Unmarshal(row.Stats, &cs); err != nil {
|
||||
return []container.Stats{}
|
||||
}
|
||||
allStats = append(allStats, cs)
|
||||
}
|
||||
return AverageContainerStatsSlice(allStats)
|
||||
}
|
||||
|
||||
// AverageContainerStatsSlice computes the average of container stats across multiple time periods.
|
||||
func AverageContainerStatsSlice(records [][]container.Stats) []container.Stats {
|
||||
if len(records) == 0 {
|
||||
return []container.Stats{}
|
||||
}
|
||||
sums := make(map[string]*container.Stats)
|
||||
count := float64(len(records))
|
||||
|
||||
for _, containerStats := range records {
|
||||
for i := range containerStats {
|
||||
stat := containerStats[i]
|
||||
stat := &containerStats[i]
|
||||
if _, ok := sums[stat.Name]; !ok {
|
||||
sums[stat.Name] = &container.Stats{Name: stat.Name}
|
||||
}
|
||||
@@ -488,131 +506,78 @@ func (rm *RecordManager) AverageContainerStats(db dbx.Builder, records RecordIds
|
||||
return result
|
||||
}
|
||||
|
||||
// Delete old records
|
||||
func (rm *RecordManager) DeleteOldRecords() {
|
||||
rm.app.RunInTransaction(func(txApp core.App) error {
|
||||
err := deleteOldSystemStats(txApp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = deleteOldContainerRecords(txApp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = deleteOldSystemdServiceRecords(txApp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = deleteOldAlertsHistory(txApp, 200, 250)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = deleteOldQuietHours(txApp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Delete old alerts history records
|
||||
func deleteOldAlertsHistory(app core.App, countToKeep, countBeforeDeletion int) error {
|
||||
db := app.DB()
|
||||
var users []struct {
|
||||
Id string `db:"user"`
|
||||
}
|
||||
err := db.NewQuery("SELECT user, COUNT(*) as count FROM alerts_history GROUP BY user HAVING count > {:countBeforeDeletion}").Bind(dbx.Params{"countBeforeDeletion": countBeforeDeletion}).All(&users)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, user := range users {
|
||||
_, err = db.NewQuery("DELETE FROM alerts_history WHERE user = {:user} AND id NOT IN (SELECT id FROM alerts_history WHERE user = {:user} ORDER BY created DESC LIMIT {:countToKeep})").Bind(dbx.Params{"user": user.Id, "countToKeep": countToKeep}).Execute()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deletes system_stats records older than what is displayed in the UI
|
||||
func deleteOldSystemStats(app core.App) error {
|
||||
// Collections to process
|
||||
collections := [2]string{"system_stats", "container_stats"}
|
||||
|
||||
// Record types and their retention periods
|
||||
type RecordDeletionData struct {
|
||||
recordType string
|
||||
retention time.Duration
|
||||
}
|
||||
recordData := []RecordDeletionData{
|
||||
{recordType: "1m", retention: time.Hour}, // 1 hour
|
||||
{recordType: "10m", retention: 12 * time.Hour}, // 12 hours
|
||||
{recordType: "20m", retention: 24 * time.Hour}, // 1 day
|
||||
{recordType: "120m", retention: 7 * 24 * time.Hour}, // 7 days
|
||||
{recordType: "480m", retention: 30 * 24 * time.Hour}, // 30 days
|
||||
// AverageProbeStats averages probe stats across multiple records.
|
||||
// For each probe key: avg of average fields, min of mins, and max of maxes.
|
||||
func (rm *RecordManager) AverageProbeStats(db dbx.Builder, records RecordIds) map[string]probe.Result {
|
||||
type probeValues struct {
|
||||
sums probe.Result
|
||||
counts []int
|
||||
}
|
||||
|
||||
now := time.Now().UTC()
|
||||
query := db.NewQuery("SELECT stats FROM network_probe_stats WHERE id = {:id}")
|
||||
|
||||
for _, collection := range collections {
|
||||
// Build the WHERE clause
|
||||
var conditionParts []string
|
||||
var params dbx.Params = make(map[string]any)
|
||||
for i := range recordData {
|
||||
rd := recordData[i]
|
||||
// Create parameterized condition for this record type
|
||||
dateParam := fmt.Sprintf("date%d", i)
|
||||
conditionParts = append(conditionParts, fmt.Sprintf("(type = '%s' AND created < {:%s})", rd.recordType, dateParam))
|
||||
params[dateParam] = now.Add(-rd.retention)
|
||||
// accumulate sums for each probe key across records
|
||||
sums := make(map[string]*probeValues)
|
||||
var row StatsRecord
|
||||
for _, rec := range records {
|
||||
row.Stats = row.Stats[:0]
|
||||
query.Bind(dbx.Params{"id": rec.Id}).One(&row)
|
||||
var rawStats map[string]probe.Result
|
||||
if err := json.Unmarshal(row.Stats, &rawStats); err != nil {
|
||||
continue
|
||||
}
|
||||
// Combine conditions with OR
|
||||
conditionStr := strings.Join(conditionParts, " OR ")
|
||||
// Construct and execute the full raw query
|
||||
rawQuery := fmt.Sprintf("DELETE FROM %s WHERE %s", collection, conditionStr)
|
||||
if _, err := app.DB().NewQuery(rawQuery).Bind(params).Execute(); err != nil {
|
||||
return fmt.Errorf("failed to delete from %s: %v", collection, err)
|
||||
for key, vals := range rawStats {
|
||||
s, ok := sums[key]
|
||||
if !ok {
|
||||
s = &probeValues{sums: make(probe.Result, len(vals)), counts: make([]int, len(vals))}
|
||||
sums[key] = s
|
||||
}
|
||||
if len(vals) > len(s.sums) {
|
||||
expandedSums := make(probe.Result, len(vals))
|
||||
copy(expandedSums, s.sums)
|
||||
s.sums = expandedSums
|
||||
|
||||
expandedCounts := make([]int, len(vals))
|
||||
copy(expandedCounts, s.counts)
|
||||
s.counts = expandedCounts
|
||||
}
|
||||
for i := range vals {
|
||||
switch i {
|
||||
case 2: // min fields
|
||||
if s.counts[i] == 0 || vals[i] < s.sums[i] {
|
||||
s.sums[i] = vals[i]
|
||||
}
|
||||
case 3: // max fields
|
||||
if s.counts[i] == 0 || vals[i] > s.sums[i] {
|
||||
s.sums[i] = vals[i]
|
||||
}
|
||||
default: // average fields
|
||||
s.sums[i] += vals[i]
|
||||
}
|
||||
s.counts[i]++
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deletes systemd service records that haven't been updated in the last 20 minutes
|
||||
func deleteOldSystemdServiceRecords(app core.App) error {
|
||||
now := time.Now().UTC()
|
||||
twentyMinutesAgo := now.Add(-20 * time.Minute)
|
||||
|
||||
// Delete systemd service records where updated < twentyMinutesAgo
|
||||
_, err := app.DB().NewQuery("DELETE FROM systemd_services WHERE updated < {:updated}").Bind(dbx.Params{"updated": twentyMinutesAgo.UnixMilli()}).Execute()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete old systemd service records: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deletes container records that haven't been updated in the last 10 minutes
|
||||
func deleteOldContainerRecords(app core.App) error {
|
||||
now := time.Now().UTC()
|
||||
tenMinutesAgo := now.Add(-10 * time.Minute)
|
||||
|
||||
// Delete container records where updated < tenMinutesAgo
|
||||
_, err := app.DB().NewQuery("DELETE FROM containers WHERE updated < {:updated}").Bind(dbx.Params{"updated": tenMinutesAgo.UnixMilli()}).Execute()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete old container records: %v", err)
|
||||
// compute final averages
|
||||
result := make(map[string]probe.Result, len(sums))
|
||||
for key, s := range sums {
|
||||
if len(s.counts) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deletes old quiet hours records where end date has passed
|
||||
func deleteOldQuietHours(app core.App) error {
|
||||
now := time.Now().UTC()
|
||||
_, err := app.DB().NewQuery("DELETE FROM quiet_hours WHERE type = 'one-time' AND end < {:now}").Bind(dbx.Params{"now": now}).Execute()
|
||||
if err != nil {
|
||||
return err
|
||||
for i := range s.sums {
|
||||
switch i {
|
||||
case 2, 3: // min and max fields should not be averaged
|
||||
continue
|
||||
default:
|
||||
if s.counts[i] > 0 {
|
||||
s.sums[i] = twoDecimals(s.sums[i] / float64(s.counts[i]))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
result[key] = s.sums
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
/* Round float to two decimals */
|
||||
|
||||
820
internal/records/records_averaging_test.go
Normal file
820
internal/records/records_averaging_test.go
Normal file
@@ -0,0 +1,820 @@
|
||||
//go:build testing
|
||||
|
||||
package records_test
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/container"
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
"github.com/henrygd/beszel/internal/records"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAverageSystemStatsSlice_Empty(t *testing.T) {
|
||||
result := records.AverageSystemStatsSlice(nil)
|
||||
assert.Equal(t, system.Stats{}, result)
|
||||
|
||||
result = records.AverageSystemStatsSlice([]system.Stats{})
|
||||
assert.Equal(t, system.Stats{}, result)
|
||||
}
|
||||
|
||||
func TestAverageSystemStatsSlice_SingleRecord(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{
|
||||
Cpu: 45.67,
|
||||
Mem: 16.0,
|
||||
MemUsed: 8.5,
|
||||
MemPct: 53.12,
|
||||
MemBuffCache: 2.0,
|
||||
Swap: 4.0,
|
||||
SwapUsed: 1.0,
|
||||
DiskTotal: 500.0,
|
||||
DiskUsed: 250.0,
|
||||
DiskPct: 50.0,
|
||||
DiskReadPs: 100.5,
|
||||
DiskWritePs: 200.75,
|
||||
NetworkSent: 10.5,
|
||||
NetworkRecv: 20.25,
|
||||
LoadAvg: [3]float64{1.5, 2.0, 3.5},
|
||||
Bandwidth: [2]uint64{1000, 2000},
|
||||
DiskIO: [2]uint64{500, 600},
|
||||
Battery: [2]uint8{80, 1},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
assert.Equal(t, 45.67, result.Cpu)
|
||||
assert.Equal(t, 16.0, result.Mem)
|
||||
assert.Equal(t, 8.5, result.MemUsed)
|
||||
assert.Equal(t, 53.12, result.MemPct)
|
||||
assert.Equal(t, 2.0, result.MemBuffCache)
|
||||
assert.Equal(t, 4.0, result.Swap)
|
||||
assert.Equal(t, 1.0, result.SwapUsed)
|
||||
assert.Equal(t, 500.0, result.DiskTotal)
|
||||
assert.Equal(t, 250.0, result.DiskUsed)
|
||||
assert.Equal(t, 50.0, result.DiskPct)
|
||||
assert.Equal(t, 100.5, result.DiskReadPs)
|
||||
assert.Equal(t, 200.75, result.DiskWritePs)
|
||||
assert.Equal(t, 10.5, result.NetworkSent)
|
||||
assert.Equal(t, 20.25, result.NetworkRecv)
|
||||
assert.Equal(t, [3]float64{1.5, 2.0, 3.5}, result.LoadAvg)
|
||||
assert.Equal(t, [2]uint64{1000, 2000}, result.Bandwidth)
|
||||
assert.Equal(t, [2]uint64{500, 600}, result.DiskIO)
|
||||
assert.Equal(t, uint8(80), result.Battery[0])
|
||||
assert.Equal(t, uint8(1), result.Battery[1])
|
||||
}
|
||||
|
||||
func TestAverageSystemStatsSlice_BasicAveraging(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{
|
||||
Cpu: 20.0,
|
||||
Mem: 16.0,
|
||||
MemUsed: 6.0,
|
||||
MemPct: 37.5,
|
||||
MemBuffCache: 1.0,
|
||||
MemZfsArc: 0.5,
|
||||
Swap: 4.0,
|
||||
SwapUsed: 1.0,
|
||||
DiskTotal: 500.0,
|
||||
DiskUsed: 200.0,
|
||||
DiskPct: 40.0,
|
||||
DiskReadPs: 100.0,
|
||||
DiskWritePs: 200.0,
|
||||
NetworkSent: 10.0,
|
||||
NetworkRecv: 20.0,
|
||||
LoadAvg: [3]float64{1.0, 2.0, 3.0},
|
||||
Bandwidth: [2]uint64{1000, 2000},
|
||||
DiskIO: [2]uint64{400, 600},
|
||||
Battery: [2]uint8{80, 1},
|
||||
},
|
||||
{
|
||||
Cpu: 40.0,
|
||||
Mem: 16.0,
|
||||
MemUsed: 10.0,
|
||||
MemPct: 62.5,
|
||||
MemBuffCache: 3.0,
|
||||
MemZfsArc: 1.5,
|
||||
Swap: 4.0,
|
||||
SwapUsed: 3.0,
|
||||
DiskTotal: 500.0,
|
||||
DiskUsed: 300.0,
|
||||
DiskPct: 60.0,
|
||||
DiskReadPs: 200.0,
|
||||
DiskWritePs: 400.0,
|
||||
NetworkSent: 30.0,
|
||||
NetworkRecv: 40.0,
|
||||
LoadAvg: [3]float64{3.0, 4.0, 5.0},
|
||||
Bandwidth: [2]uint64{3000, 4000},
|
||||
DiskIO: [2]uint64{600, 800},
|
||||
Battery: [2]uint8{60, 1},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
assert.Equal(t, 30.0, result.Cpu)
|
||||
assert.Equal(t, 16.0, result.Mem)
|
||||
assert.Equal(t, 8.0, result.MemUsed)
|
||||
assert.Equal(t, 50.0, result.MemPct)
|
||||
assert.Equal(t, 2.0, result.MemBuffCache)
|
||||
assert.Equal(t, 1.0, result.MemZfsArc)
|
||||
assert.Equal(t, 4.0, result.Swap)
|
||||
assert.Equal(t, 2.0, result.SwapUsed)
|
||||
assert.Equal(t, 500.0, result.DiskTotal)
|
||||
assert.Equal(t, 250.0, result.DiskUsed)
|
||||
assert.Equal(t, 50.0, result.DiskPct)
|
||||
assert.Equal(t, 150.0, result.DiskReadPs)
|
||||
assert.Equal(t, 300.0, result.DiskWritePs)
|
||||
assert.Equal(t, 20.0, result.NetworkSent)
|
||||
assert.Equal(t, 30.0, result.NetworkRecv)
|
||||
assert.Equal(t, [3]float64{2.0, 3.0, 4.0}, result.LoadAvg)
|
||||
assert.Equal(t, [2]uint64{2000, 3000}, result.Bandwidth)
|
||||
assert.Equal(t, [2]uint64{500, 700}, result.DiskIO)
|
||||
assert.Equal(t, uint8(70), result.Battery[0])
|
||||
assert.Equal(t, uint8(1), result.Battery[1])
|
||||
}
|
||||
|
||||
func TestAverageSystemStatsSlice_PeakValues(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{
|
||||
Cpu: 20.0,
|
||||
MaxCpu: 25.0,
|
||||
MemUsed: 6.0,
|
||||
MaxMem: 7.0,
|
||||
NetworkSent: 10.0,
|
||||
MaxNetworkSent: 15.0,
|
||||
NetworkRecv: 20.0,
|
||||
MaxNetworkRecv: 25.0,
|
||||
DiskReadPs: 100.0,
|
||||
MaxDiskReadPs: 120.0,
|
||||
DiskWritePs: 200.0,
|
||||
MaxDiskWritePs: 220.0,
|
||||
Bandwidth: [2]uint64{1000, 2000},
|
||||
MaxBandwidth: [2]uint64{1500, 2500},
|
||||
DiskIO: [2]uint64{400, 600},
|
||||
MaxDiskIO: [2]uint64{500, 700},
|
||||
DiskIoStats: [6]float64{10.0, 20.0, 30.0, 5.0, 8.0, 12.0},
|
||||
MaxDiskIoStats: [6]float64{15.0, 25.0, 35.0, 6.0, 9.0, 14.0},
|
||||
},
|
||||
{
|
||||
Cpu: 40.0,
|
||||
MaxCpu: 50.0,
|
||||
MemUsed: 10.0,
|
||||
MaxMem: 12.0,
|
||||
NetworkSent: 30.0,
|
||||
MaxNetworkSent: 35.0,
|
||||
NetworkRecv: 40.0,
|
||||
MaxNetworkRecv: 45.0,
|
||||
DiskReadPs: 200.0,
|
||||
MaxDiskReadPs: 210.0,
|
||||
DiskWritePs: 400.0,
|
||||
MaxDiskWritePs: 410.0,
|
||||
Bandwidth: [2]uint64{3000, 4000},
|
||||
MaxBandwidth: [2]uint64{3500, 4500},
|
||||
DiskIO: [2]uint64{600, 800},
|
||||
MaxDiskIO: [2]uint64{650, 850},
|
||||
DiskIoStats: [6]float64{50.0, 60.0, 70.0, 15.0, 18.0, 22.0},
|
||||
MaxDiskIoStats: [6]float64{55.0, 65.0, 75.0, 16.0, 19.0, 23.0},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
assert.Equal(t, 50.0, result.MaxCpu)
|
||||
assert.Equal(t, 12.0, result.MaxMem)
|
||||
assert.Equal(t, 35.0, result.MaxNetworkSent)
|
||||
assert.Equal(t, 45.0, result.MaxNetworkRecv)
|
||||
assert.Equal(t, 210.0, result.MaxDiskReadPs)
|
||||
assert.Equal(t, 410.0, result.MaxDiskWritePs)
|
||||
assert.Equal(t, [2]uint64{3500, 4500}, result.MaxBandwidth)
|
||||
assert.Equal(t, [2]uint64{650, 850}, result.MaxDiskIO)
|
||||
assert.Equal(t, [6]float64{30.0, 40.0, 50.0, 10.0, 13.0, 17.0}, result.DiskIoStats)
|
||||
assert.Equal(t, [6]float64{55.0, 65.0, 75.0, 16.0, 19.0, 23.0}, result.MaxDiskIoStats)
|
||||
}
|
||||
|
||||
func TestAverageSystemStatsSlice_DiskIoStats(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{
|
||||
Cpu: 10.0,
|
||||
DiskIoStats: [6]float64{10.0, 20.0, 30.0, 5.0, 8.0, 12.0},
|
||||
MaxDiskIoStats: [6]float64{12.0, 22.0, 32.0, 6.0, 9.0, 13.0},
|
||||
},
|
||||
{
|
||||
Cpu: 20.0,
|
||||
DiskIoStats: [6]float64{30.0, 40.0, 50.0, 15.0, 18.0, 22.0},
|
||||
MaxDiskIoStats: [6]float64{28.0, 38.0, 48.0, 14.0, 17.0, 21.0},
|
||||
},
|
||||
{
|
||||
Cpu: 30.0,
|
||||
DiskIoStats: [6]float64{20.0, 30.0, 40.0, 10.0, 12.0, 16.0},
|
||||
MaxDiskIoStats: [6]float64{25.0, 35.0, 45.0, 11.0, 13.0, 17.0},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
// Average: (10+30+20)/3=20, (20+40+30)/3=30, (30+50+40)/3=40, (5+15+10)/3=10, (8+18+12)/3≈12.67, (12+22+16)/3≈16.67
|
||||
assert.Equal(t, 20.0, result.DiskIoStats[0])
|
||||
assert.Equal(t, 30.0, result.DiskIoStats[1])
|
||||
assert.Equal(t, 40.0, result.DiskIoStats[2])
|
||||
assert.Equal(t, 10.0, result.DiskIoStats[3])
|
||||
assert.Equal(t, 12.67, result.DiskIoStats[4])
|
||||
assert.Equal(t, 16.67, result.DiskIoStats[5])
|
||||
// Max: current DiskIoStats[0] wins for record 2 (30 > MaxDiskIoStats 28)
|
||||
assert.Equal(t, 30.0, result.MaxDiskIoStats[0])
|
||||
assert.Equal(t, 40.0, result.MaxDiskIoStats[1])
|
||||
assert.Equal(t, 50.0, result.MaxDiskIoStats[2])
|
||||
assert.Equal(t, 15.0, result.MaxDiskIoStats[3])
|
||||
assert.Equal(t, 18.0, result.MaxDiskIoStats[4])
|
||||
assert.Equal(t, 22.0, result.MaxDiskIoStats[5])
|
||||
}
|
||||
|
||||
// Tests that current DiskIoStats values are considered when computing MaxDiskIoStats.
|
||||
func TestAverageSystemStatsSlice_DiskIoStatsPeakFromCurrentValues(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{Cpu: 10.0, DiskIoStats: [6]float64{95.0, 90.0, 85.0, 50.0, 60.0, 80.0}, MaxDiskIoStats: [6]float64{80.0, 80.0, 80.0, 40.0, 50.0, 70.0}},
|
||||
{Cpu: 20.0, DiskIoStats: [6]float64{10.0, 10.0, 10.0, 5.0, 6.0, 8.0}, MaxDiskIoStats: [6]float64{20.0, 20.0, 20.0, 10.0, 12.0, 16.0}},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
// Current value from first record (95, 90, 85, 50, 60, 80) beats MaxDiskIoStats in both records
|
||||
assert.Equal(t, 95.0, result.MaxDiskIoStats[0])
|
||||
assert.Equal(t, 90.0, result.MaxDiskIoStats[1])
|
||||
assert.Equal(t, 85.0, result.MaxDiskIoStats[2])
|
||||
assert.Equal(t, 50.0, result.MaxDiskIoStats[3])
|
||||
assert.Equal(t, 60.0, result.MaxDiskIoStats[4])
|
||||
assert.Equal(t, 80.0, result.MaxDiskIoStats[5])
|
||||
}
|
||||
|
||||
// Tests that current values are considered when computing peaks
|
||||
// (i.e., current cpu > MaxCpu should still win).
|
||||
func TestAverageSystemStatsSlice_PeakFromCurrentValues(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{Cpu: 95.0, MaxCpu: 80.0, MemUsed: 15.0, MaxMem: 10.0},
|
||||
{Cpu: 10.0, MaxCpu: 20.0, MemUsed: 5.0, MaxMem: 8.0},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
assert.Equal(t, 95.0, result.MaxCpu)
|
||||
assert.Equal(t, 15.0, result.MaxMem)
|
||||
}
|
||||
|
||||
// Tests that records without temperature data are excluded from the temperature average.
|
||||
func TestAverageSystemStatsSlice_Temperatures(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{
|
||||
Cpu: 10.0,
|
||||
Temperatures: map[string]float64{"cpu": 60.0, "gpu": 70.0},
|
||||
},
|
||||
{
|
||||
Cpu: 20.0,
|
||||
Temperatures: map[string]float64{"cpu": 80.0, "gpu": 90.0},
|
||||
},
|
||||
{
|
||||
// No temperatures - should not affect temp averaging
|
||||
Cpu: 30.0,
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
require.NotNil(t, result.Temperatures)
|
||||
// Average over 2 records that had temps, not 3
|
||||
assert.Equal(t, 70.0, result.Temperatures["cpu"])
|
||||
assert.Equal(t, 80.0, result.Temperatures["gpu"])
|
||||
}
|
||||
|
||||
func TestAverageSystemStatsSlice_NetworkInterfaces(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{
|
||||
Cpu: 10.0,
|
||||
NetworkInterfaces: map[string][4]uint64{
|
||||
"eth0": {100, 200, 150, 250},
|
||||
"eth1": {50, 60, 70, 80},
|
||||
},
|
||||
},
|
||||
{
|
||||
Cpu: 20.0,
|
||||
NetworkInterfaces: map[string][4]uint64{
|
||||
"eth0": {200, 400, 300, 500},
|
||||
"eth1": {150, 160, 170, 180},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
require.NotNil(t, result.NetworkInterfaces)
|
||||
// [0] and [1] are averaged, [2] and [3] are max
|
||||
assert.Equal(t, [4]uint64{150, 300, 300, 500}, result.NetworkInterfaces["eth0"])
|
||||
assert.Equal(t, [4]uint64{100, 110, 170, 180}, result.NetworkInterfaces["eth1"])
|
||||
}
|
||||
|
||||
func TestAverageSystemStatsSlice_ExtraFs(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{
|
||||
Cpu: 10.0,
|
||||
ExtraFs: map[string]*system.FsStats{
|
||||
"/data": {
|
||||
DiskTotal: 1000.0,
|
||||
DiskUsed: 400.0,
|
||||
DiskReadPs: 50.0,
|
||||
DiskWritePs: 100.0,
|
||||
MaxDiskReadPS: 60.0,
|
||||
MaxDiskWritePS: 110.0,
|
||||
DiskReadBytes: 5000,
|
||||
DiskWriteBytes: 10000,
|
||||
MaxDiskReadBytes: 6000,
|
||||
MaxDiskWriteBytes: 11000,
|
||||
DiskIoStats: [6]float64{10.0, 20.0, 30.0, 5.0, 8.0, 12.0},
|
||||
MaxDiskIoStats: [6]float64{12.0, 22.0, 32.0, 6.0, 9.0, 13.0},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Cpu: 20.0,
|
||||
ExtraFs: map[string]*system.FsStats{
|
||||
"/data": {
|
||||
DiskTotal: 1000.0,
|
||||
DiskUsed: 600.0,
|
||||
DiskReadPs: 150.0,
|
||||
DiskWritePs: 200.0,
|
||||
MaxDiskReadPS: 160.0,
|
||||
MaxDiskWritePS: 210.0,
|
||||
DiskReadBytes: 15000,
|
||||
DiskWriteBytes: 20000,
|
||||
MaxDiskReadBytes: 16000,
|
||||
MaxDiskWriteBytes: 21000,
|
||||
DiskIoStats: [6]float64{50.0, 60.0, 70.0, 15.0, 18.0, 22.0},
|
||||
MaxDiskIoStats: [6]float64{55.0, 65.0, 75.0, 16.0, 19.0, 23.0},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
require.NotNil(t, result.ExtraFs)
|
||||
require.NotNil(t, result.ExtraFs["/data"])
|
||||
fs := result.ExtraFs["/data"]
|
||||
assert.Equal(t, 1000.0, fs.DiskTotal)
|
||||
assert.Equal(t, 500.0, fs.DiskUsed)
|
||||
assert.Equal(t, 100.0, fs.DiskReadPs)
|
||||
assert.Equal(t, 150.0, fs.DiskWritePs)
|
||||
assert.Equal(t, 160.0, fs.MaxDiskReadPS)
|
||||
assert.Equal(t, 210.0, fs.MaxDiskWritePS)
|
||||
assert.Equal(t, uint64(10000), fs.DiskReadBytes)
|
||||
assert.Equal(t, uint64(15000), fs.DiskWriteBytes)
|
||||
assert.Equal(t, uint64(16000), fs.MaxDiskReadBytes)
|
||||
assert.Equal(t, uint64(21000), fs.MaxDiskWriteBytes)
|
||||
assert.Equal(t, [6]float64{30.0, 40.0, 50.0, 10.0, 13.0, 17.0}, fs.DiskIoStats)
|
||||
assert.Equal(t, [6]float64{55.0, 65.0, 75.0, 16.0, 19.0, 23.0}, fs.MaxDiskIoStats)
|
||||
}
|
||||
|
||||
// Tests that ExtraFs DiskIoStats peak considers current values, not just previous peaks.
|
||||
func TestAverageSystemStatsSlice_ExtraFsDiskIoStatsPeakFromCurrentValues(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{
|
||||
Cpu: 10.0,
|
||||
ExtraFs: map[string]*system.FsStats{
|
||||
"/data": {
|
||||
DiskIoStats: [6]float64{95.0, 90.0, 85.0, 50.0, 60.0, 80.0}, // exceeds MaxDiskIoStats
|
||||
MaxDiskIoStats: [6]float64{80.0, 80.0, 80.0, 40.0, 50.0, 70.0},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Cpu: 20.0,
|
||||
ExtraFs: map[string]*system.FsStats{
|
||||
"/data": {
|
||||
DiskIoStats: [6]float64{10.0, 10.0, 10.0, 5.0, 6.0, 8.0},
|
||||
MaxDiskIoStats: [6]float64{20.0, 20.0, 20.0, 10.0, 12.0, 16.0},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
fs := result.ExtraFs["/data"]
|
||||
assert.Equal(t, 95.0, fs.MaxDiskIoStats[0])
|
||||
assert.Equal(t, 90.0, fs.MaxDiskIoStats[1])
|
||||
assert.Equal(t, 85.0, fs.MaxDiskIoStats[2])
|
||||
assert.Equal(t, 50.0, fs.MaxDiskIoStats[3])
|
||||
assert.Equal(t, 60.0, fs.MaxDiskIoStats[4])
|
||||
assert.Equal(t, 80.0, fs.MaxDiskIoStats[5])
|
||||
}
|
||||
|
||||
// Tests that extra FS peak values consider current values, not just previous peaks.
|
||||
func TestAverageSystemStatsSlice_ExtraFsPeaksFromCurrentValues(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{
|
||||
Cpu: 10.0,
|
||||
ExtraFs: map[string]*system.FsStats{
|
||||
"/data": {
|
||||
DiskReadPs: 500.0, // exceeds MaxDiskReadPS
|
||||
MaxDiskReadPS: 100.0,
|
||||
DiskReadBytes: 50000,
|
||||
MaxDiskReadBytes: 10000,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Cpu: 20.0,
|
||||
ExtraFs: map[string]*system.FsStats{
|
||||
"/data": {
|
||||
DiskReadPs: 50.0,
|
||||
MaxDiskReadPS: 200.0,
|
||||
DiskReadBytes: 5000,
|
||||
MaxDiskReadBytes: 20000,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
fs := result.ExtraFs["/data"]
|
||||
assert.Equal(t, 500.0, fs.MaxDiskReadPS)
|
||||
assert.Equal(t, uint64(50000), fs.MaxDiskReadBytes)
|
||||
}
|
||||
|
||||
func TestAverageSystemStatsSlice_GPUData(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{
|
||||
Cpu: 10.0,
|
||||
GPUData: map[string]system.GPUData{
|
||||
"gpu0": {
|
||||
Name: "RTX 4090",
|
||||
Temperature: 60.0,
|
||||
MemoryUsed: 4.0,
|
||||
MemoryTotal: 24.0,
|
||||
Usage: 30.0,
|
||||
Power: 200.0,
|
||||
Count: 1.0,
|
||||
Engines: map[string]float64{
|
||||
"3D": 50.0,
|
||||
"Video": 10.0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Cpu: 20.0,
|
||||
GPUData: map[string]system.GPUData{
|
||||
"gpu0": {
|
||||
Name: "RTX 4090",
|
||||
Temperature: 80.0,
|
||||
MemoryUsed: 8.0,
|
||||
MemoryTotal: 24.0,
|
||||
Usage: 70.0,
|
||||
Power: 300.0,
|
||||
Count: 1.0,
|
||||
Engines: map[string]float64{
|
||||
"3D": 90.0,
|
||||
"Video": 30.0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
require.NotNil(t, result.GPUData)
|
||||
gpu := result.GPUData["gpu0"]
|
||||
assert.Equal(t, "RTX 4090", gpu.Name)
|
||||
assert.Equal(t, 70.0, gpu.Temperature)
|
||||
assert.Equal(t, 6.0, gpu.MemoryUsed)
|
||||
assert.Equal(t, 24.0, gpu.MemoryTotal)
|
||||
assert.Equal(t, 50.0, gpu.Usage)
|
||||
assert.Equal(t, 250.0, gpu.Power)
|
||||
assert.Equal(t, 1.0, gpu.Count)
|
||||
require.NotNil(t, gpu.Engines)
|
||||
assert.Equal(t, 70.0, gpu.Engines["3D"])
|
||||
assert.Equal(t, 20.0, gpu.Engines["Video"])
|
||||
}
|
||||
|
||||
func TestAverageSystemStatsSlice_MultipleGPUs(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{
|
||||
Cpu: 10.0,
|
||||
GPUData: map[string]system.GPUData{
|
||||
"gpu0": {Name: "GPU A", Usage: 20.0, Temperature: 50.0},
|
||||
"gpu1": {Name: "GPU B", Usage: 60.0, Temperature: 70.0},
|
||||
},
|
||||
},
|
||||
{
|
||||
Cpu: 20.0,
|
||||
GPUData: map[string]system.GPUData{
|
||||
"gpu0": {Name: "GPU A", Usage: 40.0, Temperature: 60.0},
|
||||
"gpu1": {Name: "GPU B", Usage: 80.0, Temperature: 80.0},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
require.NotNil(t, result.GPUData)
|
||||
assert.Equal(t, 30.0, result.GPUData["gpu0"].Usage)
|
||||
assert.Equal(t, 55.0, result.GPUData["gpu0"].Temperature)
|
||||
assert.Equal(t, 70.0, result.GPUData["gpu1"].Usage)
|
||||
assert.Equal(t, 75.0, result.GPUData["gpu1"].Temperature)
|
||||
}
|
||||
|
||||
func TestAverageSystemStatsSlice_CpuCoresUsage(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{Cpu: 10.0, CpuCoresUsage: system.Uint8Slice{10, 20, 30, 40}},
|
||||
{Cpu: 20.0, CpuCoresUsage: system.Uint8Slice{30, 40, 50, 60}},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
require.NotNil(t, result.CpuCoresUsage)
|
||||
assert.Equal(t, system.Uint8Slice{20, 30, 40, 50}, result.CpuCoresUsage)
|
||||
}
|
||||
|
||||
// Tests that per-core usage rounds correctly (e.g., 15.5 -> 16 via math.Round).
|
||||
func TestAverageSystemStatsSlice_CpuCoresUsageRounding(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{Cpu: 10.0, CpuCoresUsage: system.Uint8Slice{11}},
|
||||
{Cpu: 20.0, CpuCoresUsage: system.Uint8Slice{20}},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
require.NotNil(t, result.CpuCoresUsage)
|
||||
// (11+20)/2 = 15.5, rounds to 16
|
||||
assert.Equal(t, uint8(16), result.CpuCoresUsage[0])
|
||||
}
|
||||
|
||||
func TestAverageSystemStatsSlice_CpuBreakdown(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{Cpu: 10.0, CpuBreakdown: []float64{5.0, 3.0, 1.0, 0.5, 90.5}},
|
||||
{Cpu: 20.0, CpuBreakdown: []float64{15.0, 7.0, 3.0, 1.5, 73.5}},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
require.NotNil(t, result.CpuBreakdown)
|
||||
assert.Equal(t, []float64{10.0, 5.0, 2.0, 1.0, 82.0}, result.CpuBreakdown)
|
||||
}
|
||||
|
||||
// Tests that Battery[1] (charge state) uses the last record's value.
|
||||
func TestAverageSystemStatsSlice_BatteryLastChargeState(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{Cpu: 10.0, Battery: [2]uint8{100, 1}}, // charging
|
||||
{Cpu: 20.0, Battery: [2]uint8{90, 0}}, // not charging
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
assert.Equal(t, uint8(95), result.Battery[0])
|
||||
assert.Equal(t, uint8(0), result.Battery[1]) // last record's charge state
|
||||
}
|
||||
|
||||
func TestAverageSystemStatsSlice_ThreeRecordsRounding(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{Cpu: 10.0, Mem: 8.0},
|
||||
{Cpu: 20.0, Mem: 8.0},
|
||||
{Cpu: 30.0, Mem: 8.0},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
assert.Equal(t, 20.0, result.Cpu)
|
||||
assert.Equal(t, 8.0, result.Mem)
|
||||
}
|
||||
|
||||
// Tests records where some have optional fields and others don't.
|
||||
func TestAverageSystemStatsSlice_MixedOptionalFields(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{
|
||||
Cpu: 10.0,
|
||||
CpuCoresUsage: system.Uint8Slice{50, 60},
|
||||
CpuBreakdown: []float64{5.0, 3.0, 1.0, 0.5, 90.5},
|
||||
GPUData: map[string]system.GPUData{
|
||||
"gpu0": {Name: "GPU", Usage: 40.0},
|
||||
},
|
||||
},
|
||||
{
|
||||
Cpu: 20.0,
|
||||
// No CpuCoresUsage, CpuBreakdown, or GPUData
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
assert.Equal(t, 15.0, result.Cpu)
|
||||
// CpuCoresUsage: only 1 record had it, so sum/2
|
||||
require.NotNil(t, result.CpuCoresUsage)
|
||||
assert.Equal(t, uint8(25), result.CpuCoresUsage[0])
|
||||
assert.Equal(t, uint8(30), result.CpuCoresUsage[1])
|
||||
// CpuBreakdown: only 1 record had it, so sum/2
|
||||
require.NotNil(t, result.CpuBreakdown)
|
||||
assert.Equal(t, 2.5, result.CpuBreakdown[0])
|
||||
// GPUData: only 1 record had it, so sum/2
|
||||
require.NotNil(t, result.GPUData)
|
||||
assert.Equal(t, 20.0, result.GPUData["gpu0"].Usage)
|
||||
}
|
||||
|
||||
// Tests with 10 records matching the common real-world case (10 x 1m -> 1 x 10m).
|
||||
func TestAverageSystemStatsSlice_TenRecords(t *testing.T) {
|
||||
input := make([]system.Stats, 10)
|
||||
for i := range input {
|
||||
input[i] = system.Stats{
|
||||
Cpu: float64(i * 10), // 0, 10, 20, ..., 90
|
||||
Mem: 16.0,
|
||||
MemUsed: float64(4 + i), // 4, 5, 6, ..., 13
|
||||
MemPct: float64(25 + i), // 25, 26, ..., 34
|
||||
DiskTotal: 500.0,
|
||||
DiskUsed: 250.0,
|
||||
DiskPct: 50.0,
|
||||
NetworkSent: float64(i),
|
||||
NetworkRecv: float64(i * 2),
|
||||
Bandwidth: [2]uint64{uint64(i * 1000), uint64(i * 2000)},
|
||||
LoadAvg: [3]float64{float64(i), float64(i) * 0.5, float64(i) * 0.25},
|
||||
}
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
assert.Equal(t, 45.0, result.Cpu) // avg of 0..90
|
||||
assert.Equal(t, 16.0, result.Mem) // constant
|
||||
assert.Equal(t, 8.5, result.MemUsed) // avg of 4..13
|
||||
assert.Equal(t, 29.5, result.MemPct) // avg of 25..34
|
||||
assert.Equal(t, 500.0, result.DiskTotal)
|
||||
assert.Equal(t, 250.0, result.DiskUsed)
|
||||
assert.Equal(t, 50.0, result.DiskPct)
|
||||
assert.Equal(t, 4.5, result.NetworkSent)
|
||||
assert.Equal(t, 9.0, result.NetworkRecv)
|
||||
assert.Equal(t, [2]uint64{4500, 9000}, result.Bandwidth)
|
||||
}
|
||||
|
||||
// --- Container Stats Tests ---
|
||||
|
||||
func TestAverageContainerStatsSlice_Empty(t *testing.T) {
|
||||
result := records.AverageContainerStatsSlice(nil)
|
||||
assert.Equal(t, []container.Stats{}, result)
|
||||
|
||||
result = records.AverageContainerStatsSlice([][]container.Stats{})
|
||||
assert.Equal(t, []container.Stats{}, result)
|
||||
}
|
||||
|
||||
func TestAverageContainerStatsSlice_SingleRecord(t *testing.T) {
|
||||
input := [][]container.Stats{
|
||||
{
|
||||
{Name: "nginx", Cpu: 5.0, Mem: 128.0, Bandwidth: [2]uint64{1000, 2000}},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageContainerStatsSlice(input)
|
||||
|
||||
require.Len(t, result, 1)
|
||||
assert.Equal(t, "nginx", result[0].Name)
|
||||
assert.Equal(t, 5.0, result[0].Cpu)
|
||||
assert.Equal(t, 128.0, result[0].Mem)
|
||||
assert.Equal(t, [2]uint64{1000, 2000}, result[0].Bandwidth)
|
||||
}
|
||||
|
||||
func TestAverageContainerStatsSlice_BasicAveraging(t *testing.T) {
|
||||
input := [][]container.Stats{
|
||||
{
|
||||
{Name: "nginx", Cpu: 10.0, Mem: 100.0, Bandwidth: [2]uint64{1000, 2000}},
|
||||
{Name: "redis", Cpu: 5.0, Mem: 64.0, Bandwidth: [2]uint64{500, 1000}},
|
||||
},
|
||||
{
|
||||
{Name: "nginx", Cpu: 20.0, Mem: 200.0, Bandwidth: [2]uint64{3000, 4000}},
|
||||
{Name: "redis", Cpu: 15.0, Mem: 128.0, Bandwidth: [2]uint64{1500, 2000}},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageContainerStatsSlice(input)
|
||||
sort.Slice(result, func(i, j int) bool { return result[i].Name < result[j].Name })
|
||||
|
||||
require.Len(t, result, 2)
|
||||
|
||||
assert.Equal(t, "nginx", result[0].Name)
|
||||
assert.Equal(t, 15.0, result[0].Cpu)
|
||||
assert.Equal(t, 150.0, result[0].Mem)
|
||||
assert.Equal(t, [2]uint64{2000, 3000}, result[0].Bandwidth)
|
||||
|
||||
assert.Equal(t, "redis", result[1].Name)
|
||||
assert.Equal(t, 10.0, result[1].Cpu)
|
||||
assert.Equal(t, 96.0, result[1].Mem)
|
||||
assert.Equal(t, [2]uint64{1000, 1500}, result[1].Bandwidth)
|
||||
}
|
||||
|
||||
// Tests containers that appear in some records but not all.
|
||||
func TestAverageContainerStatsSlice_ContainerAppearsInSomeRecords(t *testing.T) {
|
||||
input := [][]container.Stats{
|
||||
{
|
||||
{Name: "nginx", Cpu: 10.0, Mem: 100.0},
|
||||
{Name: "redis", Cpu: 5.0, Mem: 64.0},
|
||||
},
|
||||
{
|
||||
{Name: "nginx", Cpu: 20.0, Mem: 200.0},
|
||||
// redis not present
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageContainerStatsSlice(input)
|
||||
sort.Slice(result, func(i, j int) bool { return result[i].Name < result[j].Name })
|
||||
|
||||
require.Len(t, result, 2)
|
||||
|
||||
assert.Equal(t, "nginx", result[0].Name)
|
||||
assert.Equal(t, 15.0, result[0].Cpu)
|
||||
assert.Equal(t, 150.0, result[0].Mem)
|
||||
|
||||
// redis: sum / count where count = total records (2), not records containing redis
|
||||
assert.Equal(t, "redis", result[1].Name)
|
||||
assert.Equal(t, 2.5, result[1].Cpu)
|
||||
assert.Equal(t, 32.0, result[1].Mem)
|
||||
}
|
||||
|
||||
// Tests backward compatibility with deprecated NetworkSent/NetworkRecv (MB) when Bandwidth is zero.
|
||||
func TestAverageContainerStatsSlice_DeprecatedNetworkFields(t *testing.T) {
|
||||
input := [][]container.Stats{
|
||||
{
|
||||
{Name: "nginx", Cpu: 10.0, Mem: 100.0, NetworkSent: 1.0, NetworkRecv: 2.0}, // 1 MB, 2 MB
|
||||
},
|
||||
{
|
||||
{Name: "nginx", Cpu: 20.0, Mem: 200.0, NetworkSent: 3.0, NetworkRecv: 4.0}, // 3 MB, 4 MB
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageContainerStatsSlice(input)
|
||||
|
||||
require.Len(t, result, 1)
|
||||
assert.Equal(t, "nginx", result[0].Name)
|
||||
// avg sent = (1*1048576 + 3*1048576) / 2 = 2*1048576
|
||||
assert.Equal(t, uint64(2*1048576), result[0].Bandwidth[0])
|
||||
// avg recv = (2*1048576 + 4*1048576) / 2 = 3*1048576
|
||||
assert.Equal(t, uint64(3*1048576), result[0].Bandwidth[1])
|
||||
}
|
||||
|
||||
// Tests that when Bandwidth is set, deprecated NetworkSent/NetworkRecv are ignored.
|
||||
func TestAverageContainerStatsSlice_MixedBandwidthAndDeprecated(t *testing.T) {
|
||||
input := [][]container.Stats{
|
||||
{
|
||||
{Name: "nginx", Cpu: 10.0, Mem: 100.0, Bandwidth: [2]uint64{5000, 6000}, NetworkSent: 99.0, NetworkRecv: 99.0},
|
||||
},
|
||||
{
|
||||
{Name: "nginx", Cpu: 20.0, Mem: 200.0, Bandwidth: [2]uint64{7000, 8000}},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageContainerStatsSlice(input)
|
||||
|
||||
require.Len(t, result, 1)
|
||||
assert.Equal(t, uint64(6000), result[0].Bandwidth[0])
|
||||
assert.Equal(t, uint64(7000), result[0].Bandwidth[1])
|
||||
}
|
||||
|
||||
func TestAverageContainerStatsSlice_ThreeRecords(t *testing.T) {
|
||||
input := [][]container.Stats{
|
||||
{{Name: "app", Cpu: 1.0, Mem: 100.0}},
|
||||
{{Name: "app", Cpu: 2.0, Mem: 200.0}},
|
||||
{{Name: "app", Cpu: 3.0, Mem: 300.0}},
|
||||
}
|
||||
|
||||
result := records.AverageContainerStatsSlice(input)
|
||||
|
||||
require.Len(t, result, 1)
|
||||
assert.Equal(t, 2.0, result[0].Cpu)
|
||||
assert.Equal(t, 200.0, result[0].Mem)
|
||||
}
|
||||
|
||||
func TestAverageContainerStatsSlice_ManyContainers(t *testing.T) {
|
||||
input := [][]container.Stats{
|
||||
{
|
||||
{Name: "a", Cpu: 10.0, Mem: 100.0},
|
||||
{Name: "b", Cpu: 20.0, Mem: 200.0},
|
||||
{Name: "c", Cpu: 30.0, Mem: 300.0},
|
||||
{Name: "d", Cpu: 40.0, Mem: 400.0},
|
||||
},
|
||||
{
|
||||
{Name: "a", Cpu: 20.0, Mem: 200.0},
|
||||
{Name: "b", Cpu: 30.0, Mem: 300.0},
|
||||
{Name: "c", Cpu: 40.0, Mem: 400.0},
|
||||
{Name: "d", Cpu: 50.0, Mem: 500.0},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageContainerStatsSlice(input)
|
||||
sort.Slice(result, func(i, j int) bool { return result[i].Name < result[j].Name })
|
||||
|
||||
require.Len(t, result, 4)
|
||||
assert.Equal(t, 15.0, result[0].Cpu)
|
||||
assert.Equal(t, 25.0, result[1].Cpu)
|
||||
assert.Equal(t, 35.0, result[2].Cpu)
|
||||
assert.Equal(t, 45.0, result[3].Cpu)
|
||||
}
|
||||
138
internal/records/records_deletion.go
Normal file
138
internal/records/records_deletion.go
Normal file
@@ -0,0 +1,138 @@
|
||||
package records
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pocketbase/dbx"
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
)
|
||||
|
||||
// Delete old records
|
||||
func (rm *RecordManager) DeleteOldRecords() {
|
||||
rm.app.RunInTransaction(func(txApp core.App) error {
|
||||
err := deleteOldSystemStats(txApp)
|
||||
if err != nil {
|
||||
slog.Error("Error deleting old system stats", "err", err)
|
||||
}
|
||||
err = deleteOldContainerRecords(txApp)
|
||||
if err != nil {
|
||||
slog.Error("Error deleting old container records", "err", err)
|
||||
}
|
||||
err = deleteOldSystemdServiceRecords(txApp)
|
||||
if err != nil {
|
||||
slog.Error("Error deleting old systemd service records", "err", err)
|
||||
}
|
||||
err = deleteOldAlertsHistory(txApp, 200, 250)
|
||||
if err != nil {
|
||||
slog.Error("Error deleting old alerts history", "err", err)
|
||||
}
|
||||
err = deleteOldQuietHours(txApp)
|
||||
if err != nil {
|
||||
slog.Error("Error deleting old quiet hours", "err", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Delete old alerts history records
|
||||
func deleteOldAlertsHistory(app core.App, countToKeep, countBeforeDeletion int) error {
|
||||
db := app.DB()
|
||||
var users []struct {
|
||||
Id string `db:"user"`
|
||||
}
|
||||
err := db.NewQuery("SELECT user, COUNT(*) as count FROM alerts_history GROUP BY user HAVING count > {:countBeforeDeletion}").Bind(dbx.Params{"countBeforeDeletion": countBeforeDeletion}).All(&users)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, user := range users {
|
||||
_, err = db.NewQuery("DELETE FROM alerts_history WHERE user = {:user} AND id NOT IN (SELECT id FROM alerts_history WHERE user = {:user} ORDER BY created DESC LIMIT {:countToKeep})").Bind(dbx.Params{"user": user.Id, "countToKeep": countToKeep}).Execute()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deletes system_stats records older than what is displayed in the UI
|
||||
func deleteOldSystemStats(app core.App) error {
|
||||
// Collections to process
|
||||
collections := [3]string{"system_stats", "container_stats", "network_probe_stats"}
|
||||
|
||||
// Record types and their retention periods
|
||||
type RecordDeletionData struct {
|
||||
recordType string
|
||||
retention time.Duration
|
||||
}
|
||||
recordData := []RecordDeletionData{
|
||||
{recordType: "1m", retention: time.Hour}, // 1 hour
|
||||
{recordType: "10m", retention: 12 * time.Hour}, // 12 hours
|
||||
{recordType: "20m", retention: 24 * time.Hour}, // 1 day
|
||||
{recordType: "120m", retention: 7 * 24 * time.Hour}, // 7 days
|
||||
{recordType: "480m", retention: 30 * 24 * time.Hour}, // 30 days
|
||||
}
|
||||
|
||||
now := time.Now().UTC()
|
||||
|
||||
for _, collection := range collections {
|
||||
// Build the WHERE clause
|
||||
var conditionParts []string
|
||||
var params dbx.Params = make(map[string]any)
|
||||
for i := range recordData {
|
||||
rd := recordData[i]
|
||||
// Create parameterized condition for this record type
|
||||
dateParam := fmt.Sprintf("date%d", i)
|
||||
conditionParts = append(conditionParts, fmt.Sprintf("(type = '%s' AND created < {:%s})", rd.recordType, dateParam))
|
||||
params[dateParam] = now.Add(-rd.retention)
|
||||
}
|
||||
// Combine conditions with OR
|
||||
conditionStr := strings.Join(conditionParts, " OR ")
|
||||
// Construct and execute the full raw query
|
||||
rawQuery := fmt.Sprintf("DELETE FROM %s WHERE %s", collection, conditionStr)
|
||||
if _, err := app.DB().NewQuery(rawQuery).Bind(params).Execute(); err != nil {
|
||||
return fmt.Errorf("failed to delete from %s: %v", collection, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deletes systemd service records that haven't been updated in the last 20 minutes
|
||||
func deleteOldSystemdServiceRecords(app core.App) error {
|
||||
now := time.Now().UTC()
|
||||
twentyMinutesAgo := now.Add(-20 * time.Minute)
|
||||
|
||||
// Delete systemd service records where updated < twentyMinutesAgo
|
||||
_, err := app.DB().NewQuery("DELETE FROM systemd_services WHERE updated < {:updated}").Bind(dbx.Params{"updated": twentyMinutesAgo.UnixMilli()}).Execute()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete old systemd service records: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deletes container records that haven't been updated in the last 10 minutes
|
||||
func deleteOldContainerRecords(app core.App) error {
|
||||
now := time.Now().UTC()
|
||||
tenMinutesAgo := now.Add(-10 * time.Minute)
|
||||
|
||||
// Delete container records where updated < tenMinutesAgo
|
||||
_, err := app.DB().NewQuery("DELETE FROM containers WHERE updated < {:updated}").Bind(dbx.Params{"updated": tenMinutesAgo.UnixMilli()}).Execute()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete old container records: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deletes old quiet hours records where end date has passed
|
||||
func deleteOldQuietHours(app core.App) error {
|
||||
now := time.Now().UTC()
|
||||
_, err := app.DB().NewQuery("DELETE FROM quiet_hours WHERE type = 'one-time' AND end < {:now}").Bind(dbx.Params{"now": now}).Execute()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
428
internal/records/records_deletion_test.go
Normal file
428
internal/records/records_deletion_test.go
Normal file
@@ -0,0 +1,428 @@
|
||||
//go:build testing
|
||||
|
||||
package records_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/internal/records"
|
||||
"github.com/henrygd/beszel/internal/tests"
|
||||
|
||||
"github.com/pocketbase/dbx"
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
"github.com/pocketbase/pocketbase/tools/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestDeleteOldRecords tests the main DeleteOldRecords function
|
||||
func TestDeleteOldRecords(t *testing.T) {
|
||||
hub, err := tests.NewTestHub(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
defer hub.Cleanup()
|
||||
|
||||
rm := records.NewRecordManager(hub)
|
||||
|
||||
// Create test user for alerts history
|
||||
user, err := tests.CreateUser(hub, "test@example.com", "testtesttest")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create test system
|
||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "test-system",
|
||||
"host": "localhost",
|
||||
"port": "45876",
|
||||
"status": "up",
|
||||
"users": []string{user.Id},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
now := time.Now()
|
||||
|
||||
// Create old system_stats records that should be deleted
|
||||
var record *core.Record
|
||||
record, err = tests.CreateRecord(hub, "system_stats", map[string]any{
|
||||
"system": system.Id,
|
||||
"type": "1m",
|
||||
"stats": `{"cpu": 50.0, "mem": 1024}`,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
// created is autodate field, so we need to set it manually
|
||||
record.SetRaw("created", now.UTC().Add(-2*time.Hour).Format(types.DefaultDateLayout))
|
||||
err = hub.SaveNoValidate(record)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, record)
|
||||
require.InDelta(t, record.GetDateTime("created").Time().UTC().Unix(), now.UTC().Add(-2*time.Hour).Unix(), 1)
|
||||
require.Equal(t, record.Get("system"), system.Id)
|
||||
require.Equal(t, record.Get("type"), "1m")
|
||||
|
||||
// Create recent system_stats record that should be kept
|
||||
_, err = tests.CreateRecord(hub, "system_stats", map[string]any{
|
||||
"system": system.Id,
|
||||
"type": "1m",
|
||||
"stats": `{"cpu": 30.0, "mem": 512}`,
|
||||
"created": now.Add(-30 * time.Minute), // 30 minutes old, should be kept
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create many alerts history records to trigger deletion
|
||||
for i := range 260 { // More than countBeforeDeletion (250)
|
||||
_, err = tests.CreateRecord(hub, "alerts_history", map[string]any{
|
||||
"user": user.Id,
|
||||
"name": "CPU",
|
||||
"value": i + 1,
|
||||
"system": system.Id,
|
||||
"created": now.Add(-time.Duration(i) * time.Minute),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Count records before deletion
|
||||
systemStatsCountBefore, err := hub.CountRecords("system_stats")
|
||||
require.NoError(t, err)
|
||||
alertsCountBefore, err := hub.CountRecords("alerts_history")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Run deletion
|
||||
rm.DeleteOldRecords()
|
||||
|
||||
// Count records after deletion
|
||||
systemStatsCountAfter, err := hub.CountRecords("system_stats")
|
||||
require.NoError(t, err)
|
||||
alertsCountAfter, err := hub.CountRecords("alerts_history")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify old system stats were deleted
|
||||
assert.Less(t, systemStatsCountAfter, systemStatsCountBefore, "Old system stats should be deleted")
|
||||
|
||||
// Verify alerts history was trimmed
|
||||
assert.Less(t, alertsCountAfter, alertsCountBefore, "Excessive alerts history should be deleted")
|
||||
assert.Equal(t, alertsCountAfter, int64(200), "Alerts count should be equal to countToKeep (200)")
|
||||
}
|
||||
|
||||
// TestDeleteOldSystemStats tests the deleteOldSystemStats function
|
||||
func TestDeleteOldSystemStats(t *testing.T) {
|
||||
hub, err := tests.NewTestHub(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
defer hub.Cleanup()
|
||||
|
||||
// Create test system
|
||||
user, err := tests.CreateUser(hub, "test@example.com", "testtesttest")
|
||||
require.NoError(t, err)
|
||||
|
||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "test-system",
|
||||
"host": "localhost",
|
||||
"port": "45876",
|
||||
"status": "up",
|
||||
"users": []string{user.Id},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
now := time.Now().UTC()
|
||||
|
||||
// Test data for different record types and their retention periods
|
||||
testCases := []struct {
|
||||
recordType string
|
||||
retention time.Duration
|
||||
shouldBeKept bool
|
||||
ageFromNow time.Duration
|
||||
description string
|
||||
}{
|
||||
{"1m", time.Hour, true, 30 * time.Minute, "1m record within 1 hour should be kept"},
|
||||
{"1m", time.Hour, false, 2 * time.Hour, "1m record older than 1 hour should be deleted"},
|
||||
{"10m", 12 * time.Hour, true, 6 * time.Hour, "10m record within 12 hours should be kept"},
|
||||
{"10m", 12 * time.Hour, false, 24 * time.Hour, "10m record older than 12 hours should be deleted"},
|
||||
{"20m", 24 * time.Hour, true, 12 * time.Hour, "20m record within 24 hours should be kept"},
|
||||
{"20m", 24 * time.Hour, false, 48 * time.Hour, "20m record older than 24 hours should be deleted"},
|
||||
{"120m", 7 * 24 * time.Hour, true, 3 * 24 * time.Hour, "120m record within 7 days should be kept"},
|
||||
{"120m", 7 * 24 * time.Hour, false, 10 * 24 * time.Hour, "120m record older than 7 days should be deleted"},
|
||||
{"480m", 30 * 24 * time.Hour, true, 15 * 24 * time.Hour, "480m record within 30 days should be kept"},
|
||||
{"480m", 30 * 24 * time.Hour, false, 45 * 24 * time.Hour, "480m record older than 30 days should be deleted"},
|
||||
}
|
||||
|
||||
// Create test records for both system_stats and container_stats
|
||||
collections := []string{"system_stats", "container_stats"}
|
||||
recordIds := make(map[string][]string)
|
||||
|
||||
for _, collection := range collections {
|
||||
recordIds[collection] = make([]string, 0)
|
||||
|
||||
for i, tc := range testCases {
|
||||
recordTime := now.Add(-tc.ageFromNow)
|
||||
|
||||
var stats string
|
||||
if collection == "system_stats" {
|
||||
stats = fmt.Sprintf(`{"cpu": %d.0, "mem": %d}`, i*10, i*100)
|
||||
} else {
|
||||
stats = fmt.Sprintf(`[{"name": "container%d", "cpu": %d.0, "mem": %d}]`, i, i*5, i*50)
|
||||
}
|
||||
|
||||
record, err := tests.CreateRecord(hub, collection, map[string]any{
|
||||
"system": system.Id,
|
||||
"type": tc.recordType,
|
||||
"stats": stats,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
record.SetRaw("created", recordTime.Format(types.DefaultDateLayout))
|
||||
err = hub.SaveNoValidate(record)
|
||||
require.NoError(t, err)
|
||||
recordIds[collection] = append(recordIds[collection], record.Id)
|
||||
}
|
||||
}
|
||||
|
||||
// Run deletion
|
||||
err = records.DeleteOldSystemStats(hub)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify results
|
||||
for _, collection := range collections {
|
||||
for i, tc := range testCases {
|
||||
recordId := recordIds[collection][i]
|
||||
|
||||
// Try to find the record
|
||||
_, err := hub.FindRecordById(collection, recordId)
|
||||
|
||||
if tc.shouldBeKept {
|
||||
assert.NoError(t, err, "Record should exist: %s", tc.description)
|
||||
} else {
|
||||
assert.Error(t, err, "Record should be deleted: %s", tc.description)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestDeleteOldAlertsHistory tests the deleteOldAlertsHistory function
|
||||
func TestDeleteOldAlertsHistory(t *testing.T) {
|
||||
hub, err := tests.NewTestHub(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
defer hub.Cleanup()
|
||||
|
||||
// Create test users
|
||||
user1, err := tests.CreateUser(hub, "user1@example.com", "testtesttest")
|
||||
require.NoError(t, err)
|
||||
|
||||
user2, err := tests.CreateUser(hub, "user2@example.com", "testtesttest")
|
||||
require.NoError(t, err)
|
||||
|
||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "test-system",
|
||||
"host": "localhost",
|
||||
"port": "45876",
|
||||
"status": "up",
|
||||
"users": []string{user1.Id, user2.Id},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
now := time.Now().UTC()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
user *core.Record
|
||||
alertCount int
|
||||
countToKeep int
|
||||
countBeforeDeletion int
|
||||
expectedAfterDeletion int
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "User with few alerts (below threshold)",
|
||||
user: user1,
|
||||
alertCount: 100,
|
||||
countToKeep: 50,
|
||||
countBeforeDeletion: 150,
|
||||
expectedAfterDeletion: 100, // No deletion because below threshold
|
||||
description: "User with alerts below countBeforeDeletion should not have any deleted",
|
||||
},
|
||||
{
|
||||
name: "User with many alerts (above threshold)",
|
||||
user: user2,
|
||||
alertCount: 300,
|
||||
countToKeep: 100,
|
||||
countBeforeDeletion: 200,
|
||||
expectedAfterDeletion: 100, // Should be trimmed to countToKeep
|
||||
description: "User with alerts above countBeforeDeletion should be trimmed to countToKeep",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Create alerts for this user
|
||||
for i := 0; i < tc.alertCount; i++ {
|
||||
_, err := tests.CreateRecord(hub, "alerts_history", map[string]any{
|
||||
"user": tc.user.Id,
|
||||
"name": "CPU",
|
||||
"value": i + 1,
|
||||
"system": system.Id,
|
||||
"created": now.Add(-time.Duration(i) * time.Minute),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Count before deletion
|
||||
countBefore, err := hub.CountRecords("alerts_history",
|
||||
dbx.NewExp("user = {:user}", dbx.Params{"user": tc.user.Id}))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(tc.alertCount), countBefore, "Initial count should match")
|
||||
|
||||
// Run deletion
|
||||
err = records.DeleteOldAlertsHistory(hub, tc.countToKeep, tc.countBeforeDeletion)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Count after deletion
|
||||
countAfter, err := hub.CountRecords("alerts_history",
|
||||
dbx.NewExp("user = {:user}", dbx.Params{"user": tc.user.Id}))
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, int64(tc.expectedAfterDeletion), countAfter, tc.description)
|
||||
|
||||
// If deletion occurred, verify the most recent records were kept
|
||||
if tc.expectedAfterDeletion < tc.alertCount {
|
||||
records, err := hub.FindRecordsByFilter("alerts_history",
|
||||
"user = {:user}",
|
||||
"-created", // Order by created DESC
|
||||
tc.countToKeep,
|
||||
0,
|
||||
map[string]any{"user": tc.user.Id})
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, records, tc.expectedAfterDeletion, "Should have exactly countToKeep records")
|
||||
|
||||
// Verify records are in descending order by created time
|
||||
for i := 1; i < len(records); i++ {
|
||||
prev := records[i-1].GetDateTime("created").Time()
|
||||
curr := records[i].GetDateTime("created").Time()
|
||||
assert.True(t, prev.After(curr) || prev.Equal(curr),
|
||||
"Records should be ordered by created time (newest first)")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestDeleteOldAlertsHistoryEdgeCases tests edge cases for alerts history deletion
|
||||
func TestDeleteOldAlertsHistoryEdgeCases(t *testing.T) {
|
||||
hub, err := tests.NewTestHub(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
defer hub.Cleanup()
|
||||
|
||||
t.Run("No users with excessive alerts", func(t *testing.T) {
|
||||
// Create user with few alerts
|
||||
user, err := tests.CreateUser(hub, "few@example.com", "testtesttest")
|
||||
require.NoError(t, err)
|
||||
|
||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "test-system",
|
||||
"host": "localhost",
|
||||
"port": "45876",
|
||||
"status": "up",
|
||||
"users": []string{user.Id},
|
||||
})
|
||||
|
||||
// Create only 5 alerts (well below threshold)
|
||||
for i := range 5 {
|
||||
_, err := tests.CreateRecord(hub, "alerts_history", map[string]any{
|
||||
"user": user.Id,
|
||||
"name": "CPU",
|
||||
"value": i + 1,
|
||||
"system": system.Id,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Should not error and should not delete anything
|
||||
err = records.DeleteOldAlertsHistory(hub, 10, 20)
|
||||
require.NoError(t, err)
|
||||
|
||||
count, err := hub.CountRecords("alerts_history")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(5), count, "All alerts should remain")
|
||||
})
|
||||
|
||||
t.Run("Empty alerts_history table", func(t *testing.T) {
|
||||
// Clear any existing alerts
|
||||
_, err := hub.DB().NewQuery("DELETE FROM alerts_history").Execute()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Should not error with empty table
|
||||
err = records.DeleteOldAlertsHistory(hub, 10, 20)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
// TestDeleteOldSystemdServiceRecords tests systemd service cleanup via DeleteOldRecords
|
||||
func TestDeleteOldSystemdServiceRecords(t *testing.T) {
|
||||
hub, err := tests.NewTestHub(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
defer hub.Cleanup()
|
||||
|
||||
rm := records.NewRecordManager(hub)
|
||||
|
||||
// Create test user and system
|
||||
user, err := tests.CreateUser(hub, "test@example.com", "testtesttest")
|
||||
require.NoError(t, err)
|
||||
|
||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "test-system",
|
||||
"host": "localhost",
|
||||
"port": "45876",
|
||||
"status": "up",
|
||||
"users": []string{user.Id},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
now := time.Now().UTC()
|
||||
|
||||
// Create old systemd service records that should be deleted (older than 20 minutes)
|
||||
oldRecord, err := tests.CreateRecord(hub, "systemd_services", map[string]any{
|
||||
"system": system.Id,
|
||||
"name": "nginx.service",
|
||||
"state": 0, // Active
|
||||
"sub": 1, // Running
|
||||
"cpu": 5.0,
|
||||
"cpuPeak": 10.0,
|
||||
"memory": 1024000,
|
||||
"memPeak": 2048000,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
// Set updated time to 25 minutes ago (should be deleted)
|
||||
oldRecord.SetRaw("updated", now.Add(-25*time.Minute).UnixMilli())
|
||||
err = hub.SaveNoValidate(oldRecord)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create recent systemd service record that should be kept (within 20 minutes)
|
||||
recentRecord, err := tests.CreateRecord(hub, "systemd_services", map[string]any{
|
||||
"system": system.Id,
|
||||
"name": "apache.service",
|
||||
"state": 1, // Inactive
|
||||
"sub": 0, // Dead
|
||||
"cpu": 2.0,
|
||||
"cpuPeak": 3.0,
|
||||
"memory": 512000,
|
||||
"memPeak": 1024000,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
// Set updated time to 10 minutes ago (should be kept)
|
||||
recentRecord.SetRaw("updated", now.Add(-10*time.Minute).UnixMilli())
|
||||
err = hub.SaveNoValidate(recentRecord)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Count records before deletion
|
||||
countBefore, err := hub.CountRecords("systemd_services")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(2), countBefore, "Should have 2 systemd service records initially")
|
||||
|
||||
// Run deletion via RecordManager
|
||||
rm.DeleteOldRecords()
|
||||
|
||||
// Count records after deletion
|
||||
countAfter, err := hub.CountRecords("systemd_services")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(1), countAfter, "Should have 1 systemd service record after deletion")
|
||||
|
||||
// Verify the correct record was kept
|
||||
remainingRecords, err := hub.FindRecordsByFilter("systemd_services", "", "", 10, 0, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, remainingRecords, 1, "Should have exactly 1 record remaining")
|
||||
assert.Equal(t, "apache.service", remainingRecords[0].Get("name"), "The recent record should be kept")
|
||||
}
|
||||
@@ -3,430 +3,15 @@
|
||||
package records_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/internal/records"
|
||||
"github.com/henrygd/beszel/internal/tests"
|
||||
|
||||
"github.com/pocketbase/dbx"
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
"github.com/pocketbase/pocketbase/tools/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestDeleteOldRecords tests the main DeleteOldRecords function
|
||||
func TestDeleteOldRecords(t *testing.T) {
|
||||
hub, err := tests.NewTestHub(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
defer hub.Cleanup()
|
||||
|
||||
rm := records.NewRecordManager(hub)
|
||||
|
||||
// Create test user for alerts history
|
||||
user, err := tests.CreateUser(hub, "test@example.com", "testtesttest")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create test system
|
||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "test-system",
|
||||
"host": "localhost",
|
||||
"port": "45876",
|
||||
"status": "up",
|
||||
"users": []string{user.Id},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
now := time.Now()
|
||||
|
||||
// Create old system_stats records that should be deleted
|
||||
var record *core.Record
|
||||
record, err = tests.CreateRecord(hub, "system_stats", map[string]any{
|
||||
"system": system.Id,
|
||||
"type": "1m",
|
||||
"stats": `{"cpu": 50.0, "mem": 1024}`,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
// created is autodate field, so we need to set it manually
|
||||
record.SetRaw("created", now.UTC().Add(-2*time.Hour).Format(types.DefaultDateLayout))
|
||||
err = hub.SaveNoValidate(record)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, record)
|
||||
require.InDelta(t, record.GetDateTime("created").Time().UTC().Unix(), now.UTC().Add(-2*time.Hour).Unix(), 1)
|
||||
require.Equal(t, record.Get("system"), system.Id)
|
||||
require.Equal(t, record.Get("type"), "1m")
|
||||
|
||||
// Create recent system_stats record that should be kept
|
||||
_, err = tests.CreateRecord(hub, "system_stats", map[string]any{
|
||||
"system": system.Id,
|
||||
"type": "1m",
|
||||
"stats": `{"cpu": 30.0, "mem": 512}`,
|
||||
"created": now.Add(-30 * time.Minute), // 30 minutes old, should be kept
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create many alerts history records to trigger deletion
|
||||
for i := range 260 { // More than countBeforeDeletion (250)
|
||||
_, err = tests.CreateRecord(hub, "alerts_history", map[string]any{
|
||||
"user": user.Id,
|
||||
"name": "CPU",
|
||||
"value": i + 1,
|
||||
"system": system.Id,
|
||||
"created": now.Add(-time.Duration(i) * time.Minute),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Count records before deletion
|
||||
systemStatsCountBefore, err := hub.CountRecords("system_stats")
|
||||
require.NoError(t, err)
|
||||
alertsCountBefore, err := hub.CountRecords("alerts_history")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Run deletion
|
||||
rm.DeleteOldRecords()
|
||||
|
||||
// Count records after deletion
|
||||
systemStatsCountAfter, err := hub.CountRecords("system_stats")
|
||||
require.NoError(t, err)
|
||||
alertsCountAfter, err := hub.CountRecords("alerts_history")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify old system stats were deleted
|
||||
assert.Less(t, systemStatsCountAfter, systemStatsCountBefore, "Old system stats should be deleted")
|
||||
|
||||
// Verify alerts history was trimmed
|
||||
assert.Less(t, alertsCountAfter, alertsCountBefore, "Excessive alerts history should be deleted")
|
||||
assert.Equal(t, alertsCountAfter, int64(200), "Alerts count should be equal to countToKeep (200)")
|
||||
}
|
||||
|
||||
// TestDeleteOldSystemStats tests the deleteOldSystemStats function
|
||||
func TestDeleteOldSystemStats(t *testing.T) {
|
||||
hub, err := tests.NewTestHub(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
defer hub.Cleanup()
|
||||
|
||||
// Create test system
|
||||
user, err := tests.CreateUser(hub, "test@example.com", "testtesttest")
|
||||
require.NoError(t, err)
|
||||
|
||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "test-system",
|
||||
"host": "localhost",
|
||||
"port": "45876",
|
||||
"status": "up",
|
||||
"users": []string{user.Id},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
now := time.Now().UTC()
|
||||
|
||||
// Test data for different record types and their retention periods
|
||||
testCases := []struct {
|
||||
recordType string
|
||||
retention time.Duration
|
||||
shouldBeKept bool
|
||||
ageFromNow time.Duration
|
||||
description string
|
||||
}{
|
||||
{"1m", time.Hour, true, 30 * time.Minute, "1m record within 1 hour should be kept"},
|
||||
{"1m", time.Hour, false, 2 * time.Hour, "1m record older than 1 hour should be deleted"},
|
||||
{"10m", 12 * time.Hour, true, 6 * time.Hour, "10m record within 12 hours should be kept"},
|
||||
{"10m", 12 * time.Hour, false, 24 * time.Hour, "10m record older than 12 hours should be deleted"},
|
||||
{"20m", 24 * time.Hour, true, 12 * time.Hour, "20m record within 24 hours should be kept"},
|
||||
{"20m", 24 * time.Hour, false, 48 * time.Hour, "20m record older than 24 hours should be deleted"},
|
||||
{"120m", 7 * 24 * time.Hour, true, 3 * 24 * time.Hour, "120m record within 7 days should be kept"},
|
||||
{"120m", 7 * 24 * time.Hour, false, 10 * 24 * time.Hour, "120m record older than 7 days should be deleted"},
|
||||
{"480m", 30 * 24 * time.Hour, true, 15 * 24 * time.Hour, "480m record within 30 days should be kept"},
|
||||
{"480m", 30 * 24 * time.Hour, false, 45 * 24 * time.Hour, "480m record older than 30 days should be deleted"},
|
||||
}
|
||||
|
||||
// Create test records for both system_stats and container_stats
|
||||
collections := []string{"system_stats", "container_stats"}
|
||||
recordIds := make(map[string][]string)
|
||||
|
||||
for _, collection := range collections {
|
||||
recordIds[collection] = make([]string, 0)
|
||||
|
||||
for i, tc := range testCases {
|
||||
recordTime := now.Add(-tc.ageFromNow)
|
||||
|
||||
var stats string
|
||||
if collection == "system_stats" {
|
||||
stats = fmt.Sprintf(`{"cpu": %d.0, "mem": %d}`, i*10, i*100)
|
||||
} else {
|
||||
stats = fmt.Sprintf(`[{"name": "container%d", "cpu": %d.0, "mem": %d}]`, i, i*5, i*50)
|
||||
}
|
||||
|
||||
record, err := tests.CreateRecord(hub, collection, map[string]any{
|
||||
"system": system.Id,
|
||||
"type": tc.recordType,
|
||||
"stats": stats,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
record.SetRaw("created", recordTime.Format(types.DefaultDateLayout))
|
||||
err = hub.SaveNoValidate(record)
|
||||
require.NoError(t, err)
|
||||
recordIds[collection] = append(recordIds[collection], record.Id)
|
||||
}
|
||||
}
|
||||
|
||||
// Run deletion
|
||||
err = records.DeleteOldSystemStats(hub)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify results
|
||||
for _, collection := range collections {
|
||||
for i, tc := range testCases {
|
||||
recordId := recordIds[collection][i]
|
||||
|
||||
// Try to find the record
|
||||
_, err := hub.FindRecordById(collection, recordId)
|
||||
|
||||
if tc.shouldBeKept {
|
||||
assert.NoError(t, err, "Record should exist: %s", tc.description)
|
||||
} else {
|
||||
assert.Error(t, err, "Record should be deleted: %s", tc.description)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestDeleteOldAlertsHistory tests the deleteOldAlertsHistory function
|
||||
func TestDeleteOldAlertsHistory(t *testing.T) {
|
||||
hub, err := tests.NewTestHub(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
defer hub.Cleanup()
|
||||
|
||||
// Create test users
|
||||
user1, err := tests.CreateUser(hub, "user1@example.com", "testtesttest")
|
||||
require.NoError(t, err)
|
||||
|
||||
user2, err := tests.CreateUser(hub, "user2@example.com", "testtesttest")
|
||||
require.NoError(t, err)
|
||||
|
||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "test-system",
|
||||
"host": "localhost",
|
||||
"port": "45876",
|
||||
"status": "up",
|
||||
"users": []string{user1.Id, user2.Id},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
now := time.Now().UTC()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
user *core.Record
|
||||
alertCount int
|
||||
countToKeep int
|
||||
countBeforeDeletion int
|
||||
expectedAfterDeletion int
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "User with few alerts (below threshold)",
|
||||
user: user1,
|
||||
alertCount: 100,
|
||||
countToKeep: 50,
|
||||
countBeforeDeletion: 150,
|
||||
expectedAfterDeletion: 100, // No deletion because below threshold
|
||||
description: "User with alerts below countBeforeDeletion should not have any deleted",
|
||||
},
|
||||
{
|
||||
name: "User with many alerts (above threshold)",
|
||||
user: user2,
|
||||
alertCount: 300,
|
||||
countToKeep: 100,
|
||||
countBeforeDeletion: 200,
|
||||
expectedAfterDeletion: 100, // Should be trimmed to countToKeep
|
||||
description: "User with alerts above countBeforeDeletion should be trimmed to countToKeep",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Create alerts for this user
|
||||
for i := 0; i < tc.alertCount; i++ {
|
||||
_, err := tests.CreateRecord(hub, "alerts_history", map[string]any{
|
||||
"user": tc.user.Id,
|
||||
"name": "CPU",
|
||||
"value": i + 1,
|
||||
"system": system.Id,
|
||||
"created": now.Add(-time.Duration(i) * time.Minute),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Count before deletion
|
||||
countBefore, err := hub.CountRecords("alerts_history",
|
||||
dbx.NewExp("user = {:user}", dbx.Params{"user": tc.user.Id}))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(tc.alertCount), countBefore, "Initial count should match")
|
||||
|
||||
// Run deletion
|
||||
err = records.DeleteOldAlertsHistory(hub, tc.countToKeep, tc.countBeforeDeletion)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Count after deletion
|
||||
countAfter, err := hub.CountRecords("alerts_history",
|
||||
dbx.NewExp("user = {:user}", dbx.Params{"user": tc.user.Id}))
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, int64(tc.expectedAfterDeletion), countAfter, tc.description)
|
||||
|
||||
// If deletion occurred, verify the most recent records were kept
|
||||
if tc.expectedAfterDeletion < tc.alertCount {
|
||||
records, err := hub.FindRecordsByFilter("alerts_history",
|
||||
"user = {:user}",
|
||||
"-created", // Order by created DESC
|
||||
tc.countToKeep,
|
||||
0,
|
||||
map[string]any{"user": tc.user.Id})
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, records, tc.expectedAfterDeletion, "Should have exactly countToKeep records")
|
||||
|
||||
// Verify records are in descending order by created time
|
||||
for i := 1; i < len(records); i++ {
|
||||
prev := records[i-1].GetDateTime("created").Time()
|
||||
curr := records[i].GetDateTime("created").Time()
|
||||
assert.True(t, prev.After(curr) || prev.Equal(curr),
|
||||
"Records should be ordered by created time (newest first)")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestDeleteOldAlertsHistoryEdgeCases tests edge cases for alerts history deletion
|
||||
func TestDeleteOldAlertsHistoryEdgeCases(t *testing.T) {
|
||||
hub, err := tests.NewTestHub(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
defer hub.Cleanup()
|
||||
|
||||
t.Run("No users with excessive alerts", func(t *testing.T) {
|
||||
// Create user with few alerts
|
||||
user, err := tests.CreateUser(hub, "few@example.com", "testtesttest")
|
||||
require.NoError(t, err)
|
||||
|
||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "test-system",
|
||||
"host": "localhost",
|
||||
"port": "45876",
|
||||
"status": "up",
|
||||
"users": []string{user.Id},
|
||||
})
|
||||
|
||||
// Create only 5 alerts (well below threshold)
|
||||
for i := range 5 {
|
||||
_, err := tests.CreateRecord(hub, "alerts_history", map[string]any{
|
||||
"user": user.Id,
|
||||
"name": "CPU",
|
||||
"value": i + 1,
|
||||
"system": system.Id,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Should not error and should not delete anything
|
||||
err = records.DeleteOldAlertsHistory(hub, 10, 20)
|
||||
require.NoError(t, err)
|
||||
|
||||
count, err := hub.CountRecords("alerts_history")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(5), count, "All alerts should remain")
|
||||
})
|
||||
|
||||
t.Run("Empty alerts_history table", func(t *testing.T) {
|
||||
// Clear any existing alerts
|
||||
_, err := hub.DB().NewQuery("DELETE FROM alerts_history").Execute()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Should not error with empty table
|
||||
err = records.DeleteOldAlertsHistory(hub, 10, 20)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
// TestDeleteOldSystemdServiceRecords tests systemd service cleanup via DeleteOldRecords
|
||||
func TestDeleteOldSystemdServiceRecords(t *testing.T) {
|
||||
hub, err := tests.NewTestHub(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
defer hub.Cleanup()
|
||||
|
||||
rm := records.NewRecordManager(hub)
|
||||
|
||||
// Create test user and system
|
||||
user, err := tests.CreateUser(hub, "test@example.com", "testtesttest")
|
||||
require.NoError(t, err)
|
||||
|
||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "test-system",
|
||||
"host": "localhost",
|
||||
"port": "45876",
|
||||
"status": "up",
|
||||
"users": []string{user.Id},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
now := time.Now().UTC()
|
||||
|
||||
// Create old systemd service records that should be deleted (older than 20 minutes)
|
||||
oldRecord, err := tests.CreateRecord(hub, "systemd_services", map[string]any{
|
||||
"system": system.Id,
|
||||
"name": "nginx.service",
|
||||
"state": 0, // Active
|
||||
"sub": 1, // Running
|
||||
"cpu": 5.0,
|
||||
"cpuPeak": 10.0,
|
||||
"memory": 1024000,
|
||||
"memPeak": 2048000,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
// Set updated time to 25 minutes ago (should be deleted)
|
||||
oldRecord.SetRaw("updated", now.Add(-25*time.Minute).UnixMilli())
|
||||
err = hub.SaveNoValidate(oldRecord)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create recent systemd service record that should be kept (within 20 minutes)
|
||||
recentRecord, err := tests.CreateRecord(hub, "systemd_services", map[string]any{
|
||||
"system": system.Id,
|
||||
"name": "apache.service",
|
||||
"state": 1, // Inactive
|
||||
"sub": 0, // Dead
|
||||
"cpu": 2.0,
|
||||
"cpuPeak": 3.0,
|
||||
"memory": 512000,
|
||||
"memPeak": 1024000,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
// Set updated time to 10 minutes ago (should be kept)
|
||||
recentRecord.SetRaw("updated", now.Add(-10*time.Minute).UnixMilli())
|
||||
err = hub.SaveNoValidate(recentRecord)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Count records before deletion
|
||||
countBefore, err := hub.CountRecords("systemd_services")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(2), countBefore, "Should have 2 systemd service records initially")
|
||||
|
||||
// Run deletion via RecordManager
|
||||
rm.DeleteOldRecords()
|
||||
|
||||
// Count records after deletion
|
||||
countAfter, err := hub.CountRecords("systemd_services")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(1), countAfter, "Should have 1 systemd service record after deletion")
|
||||
|
||||
// Verify the correct record was kept
|
||||
remainingRecords, err := hub.FindRecordsByFilter("systemd_services", "", "", 10, 0, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, remainingRecords, 1, "Should have exactly 1 record remaining")
|
||||
assert.Equal(t, "apache.service", remainingRecords[0].Get("name"), "The recent record should be kept")
|
||||
}
|
||||
|
||||
// TestRecordManagerCreation tests RecordManager creation
|
||||
func TestRecordManagerCreation(t *testing.T) {
|
||||
hub, err := tests.NewTestHub(t.TempDir())
|
||||
|
||||
@@ -41,7 +41,7 @@
|
||||
"recharts": "^2.15.4",
|
||||
"shiki": "^3.13.0",
|
||||
"tailwind-merge": "^3.3.1",
|
||||
"valibot": "^0.42.1",
|
||||
"valibot": "^1.3.1",
|
||||
},
|
||||
"devDependencies": {
|
||||
"@biomejs/biome": "2.2.4",
|
||||
@@ -927,7 +927,7 @@
|
||||
|
||||
"util-deprecate": ["util-deprecate@1.0.2", "", {}, "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw=="],
|
||||
|
||||
"valibot": ["valibot@0.42.1", "", { "peerDependencies": { "typescript": ">=5" } }, "sha512-3keXV29Ar5b//Hqi4MbSdV7lfVp6zuYLZuA9V1PvQUsXqogr+u5lvLPLk3A4f74VUXDnf/JfWMN6sB+koJ/FFw=="],
|
||||
"valibot": ["valibot@1.3.1", "", { "peerDependencies": { "typescript": ">=5" }, "optionalPeers": ["typescript"] }, "sha512-sfdRir/QFM0JaF22hqTroPc5xy4DimuGQVKFrzF1YfGwaS1nJot3Y8VqMdLO2Lg27fMzat2yD3pY5PbAYO39Gg=="],
|
||||
|
||||
"vfile": ["vfile@6.0.3", "", { "dependencies": { "@types/unist": "^3.0.0", "vfile-message": "^4.0.0" } }, "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q=="],
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
<meta charset="UTF-8" />
|
||||
<link rel="manifest" href="./static/manifest.json" crossorigin="use-credentials" />
|
||||
<link rel="icon" type="image/svg+xml" href="./static/icon.svg" />
|
||||
<link rel="apple-touch-icon" href="./static/icon.png" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0,maximum-scale=1.0, user-scalable=no, viewport-fit=cover" />
|
||||
<meta name="robots" content="noindex, nofollow" />
|
||||
<title>Beszel</title>
|
||||
@@ -21,11 +22,7 @@
|
||||
})();
|
||||
</script>
|
||||
<script>
|
||||
globalThis.BESZEL = {
|
||||
BASE_PATH: "%BASE_URL%",
|
||||
HUB_VERSION: "{{V}}",
|
||||
HUB_URL: "{{HUB_URL}}"
|
||||
}
|
||||
globalThis.BESZEL = "{info}"
|
||||
</script>
|
||||
</head>
|
||||
<body>
|
||||
|
||||
407
internal/site/package-lock.json
generated
407
internal/site/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "beszel",
|
||||
"version": "0.18.3",
|
||||
"version": "0.18.7",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "beszel",
|
||||
"version": "0.18.3",
|
||||
"version": "0.18.7",
|
||||
"dependencies": {
|
||||
"@henrygd/queue": "^1.0.7",
|
||||
"@henrygd/semaphore": "^0.0.2",
|
||||
@@ -44,7 +44,7 @@
|
||||
"recharts": "^2.15.4",
|
||||
"shiki": "^3.13.0",
|
||||
"tailwind-merge": "^3.3.1",
|
||||
"valibot": "^0.42.1"
|
||||
"valibot": "^1.3.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@biomejs/biome": "2.2.4",
|
||||
@@ -986,29 +986,6 @@
|
||||
"integrity": "sha512-N3W7MKwTRmAxOjeG0NAT18oe2Xn3KdjkpMR6crbkF1UDamMGPjyigqEsefiv+qTaxibtc1a+zXCVzb9YXANVqw==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@isaacs/balanced-match": {
|
||||
"version": "4.0.1",
|
||||
"resolved": "https://registry.npmjs.org/@isaacs/balanced-match/-/balanced-match-4.0.1.tgz",
|
||||
"integrity": "sha512-yzMTt9lEb8Gv7zRioUilSglI0c0smZ9k5D65677DLWLtWJaXIS3CqcGyUFByYKlnUj6TkjLVs54fBl6+TiGQDQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": "20 || >=22"
|
||||
}
|
||||
},
|
||||
"node_modules/@isaacs/brace-expansion": {
|
||||
"version": "5.0.0",
|
||||
"resolved": "https://registry.npmjs.org/@isaacs/brace-expansion/-/brace-expansion-5.0.0.tgz",
|
||||
"integrity": "sha512-ZT55BDLV0yv0RBm2czMiZ+SqCGO7AvmOM3G/w2xhVPH+te0aKgFjmBvGlL1dH+ql2tgGO3MVrbb3jCKyvpgnxA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@isaacs/balanced-match": "^4.0.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": "20 || >=22"
|
||||
}
|
||||
},
|
||||
"node_modules/@isaacs/cliui": {
|
||||
"version": "8.0.2",
|
||||
"resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz",
|
||||
@@ -1243,9 +1220,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@lingui/cli/node_modules/picomatch": {
|
||||
"version": "2.3.1",
|
||||
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
|
||||
"integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
|
||||
"version": "2.3.2",
|
||||
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.2.tgz",
|
||||
"integrity": "sha512-V7+vQEJ06Z+c5tSye8S+nHUfI51xoXIXjHQ99cQtKUkQqqO1kO/KCJUfZXuB47h/YBlDhah2H3hdUGXn8ie0oA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
@@ -2408,9 +2385,9 @@
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@rollup/rollup-android-arm-eabi": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.48.1.tgz",
|
||||
"integrity": "sha512-rGmb8qoG/zdmKoYELCBwu7vt+9HxZ7Koos3pD0+sH5fR3u3Wb/jGcpnqxcnWsPEKDUyzeLSqksN8LJtgXjqBYw==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.60.1.tgz",
|
||||
"integrity": "sha512-d6FinEBLdIiK+1uACUttJKfgZREXrF0Qc2SmLII7W2AD8FfiZ9Wjd+rD/iRuf5s5dWrr1GgwXCvPqOuDquOowA==",
|
||||
"cpu": [
|
||||
"arm"
|
||||
],
|
||||
@@ -2422,9 +2399,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-android-arm64": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.48.1.tgz",
|
||||
"integrity": "sha512-4e9WtTxrk3gu1DFE+imNJr4WsL13nWbD/Y6wQcyku5qadlKHY3OQ3LJ/INrrjngv2BJIHnIzbqMk1GTAC2P8yQ==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.60.1.tgz",
|
||||
"integrity": "sha512-YjG/EwIDvvYI1YvYbHvDz/BYHtkY4ygUIXHnTdLhG+hKIQFBiosfWiACWortsKPKU/+dUwQQCKQM3qrDe8c9BA==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -2436,9 +2413,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-darwin-arm64": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.48.1.tgz",
|
||||
"integrity": "sha512-+XjmyChHfc4TSs6WUQGmVf7Hkg8ferMAE2aNYYWjiLzAS/T62uOsdfnqv+GHRjq7rKRnYh4mwWb4Hz7h/alp8A==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.60.1.tgz",
|
||||
"integrity": "sha512-mjCpF7GmkRtSJwon+Rq1N8+pI+8l7w5g9Z3vWj4T7abguC4Czwi3Yu/pFaLvA3TTeMVjnu3ctigusqWUfjZzvw==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -2450,9 +2427,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-darwin-x64": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.48.1.tgz",
|
||||
"integrity": "sha512-upGEY7Ftw8M6BAJyGwnwMw91rSqXTcOKZnnveKrVWsMTF8/k5mleKSuh7D4v4IV1pLxKAk3Tbs0Lo9qYmii5mQ==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.60.1.tgz",
|
||||
"integrity": "sha512-haZ7hJ1JT4e9hqkoT9R/19XW2QKqjfJVv+i5AGg57S+nLk9lQnJ1F/eZloRO3o9Scy9CM3wQ9l+dkXtcBgN5Ew==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -2464,9 +2441,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-freebsd-arm64": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.48.1.tgz",
|
||||
"integrity": "sha512-P9ViWakdoynYFUOZhqq97vBrhuvRLAbN/p2tAVJvhLb8SvN7rbBnJQcBu8e/rQts42pXGLVhfsAP0k9KXWa3nQ==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.60.1.tgz",
|
||||
"integrity": "sha512-czw90wpQq3ZsAVBlinZjAYTKduOjTywlG7fEeWKUA7oCmpA8xdTkxZZlwNJKWqILlq0wehoZcJYfBvOyhPTQ6w==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -2478,9 +2455,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-freebsd-x64": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.48.1.tgz",
|
||||
"integrity": "sha512-VLKIwIpnBya5/saccM8JshpbxfyJt0Dsli0PjXozHwbSVaHTvWXJH1bbCwPXxnMzU4zVEfgD1HpW3VQHomi2AQ==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.60.1.tgz",
|
||||
"integrity": "sha512-KVB2rqsxTHuBtfOeySEyzEOB7ltlB/ux38iu2rBQzkjbwRVlkhAGIEDiiYnO2kFOkJp+Z7pUXKyrRRFuFUKt+g==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -2492,9 +2469,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-linux-arm-gnueabihf": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.48.1.tgz",
|
||||
"integrity": "sha512-3zEuZsXfKaw8n/yF7t8N6NNdhyFw3s8xJTqjbTDXlipwrEHo4GtIKcMJr5Ed29leLpB9AugtAQpAHW0jvtKKaQ==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.60.1.tgz",
|
||||
"integrity": "sha512-L+34Qqil+v5uC0zEubW7uByo78WOCIrBvci69E7sFASRl0X7b/MB6Cqd1lky/CtcSVTydWa2WZwFuWexjS5o6g==",
|
||||
"cpu": [
|
||||
"arm"
|
||||
],
|
||||
@@ -2506,9 +2483,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-linux-arm-musleabihf": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.48.1.tgz",
|
||||
"integrity": "sha512-leo9tOIlKrcBmmEypzunV/2w946JeLbTdDlwEZ7OnnsUyelZ72NMnT4B2vsikSgwQifjnJUbdXzuW4ToN1wV+Q==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.60.1.tgz",
|
||||
"integrity": "sha512-n83O8rt4v34hgFzlkb1ycniJh7IR5RCIqt6mz1VRJD6pmhRi0CXdmfnLu9dIUS6buzh60IvACM842Ffb3xd6Gg==",
|
||||
"cpu": [
|
||||
"arm"
|
||||
],
|
||||
@@ -2520,9 +2497,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-linux-arm64-gnu": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.48.1.tgz",
|
||||
"integrity": "sha512-Vy/WS4z4jEyvnJm+CnPfExIv5sSKqZrUr98h03hpAMbE2aI0aD2wvK6GiSe8Gx2wGp3eD81cYDpLLBqNb2ydwQ==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.60.1.tgz",
|
||||
"integrity": "sha512-Nql7sTeAzhTAja3QXeAI48+/+GjBJ+QmAH13snn0AJSNL50JsDqotyudHyMbO2RbJkskbMbFJfIJKWA6R1LCJQ==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -2534,9 +2511,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-linux-arm64-musl": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.48.1.tgz",
|
||||
"integrity": "sha512-x5Kzn7XTwIssU9UYqWDB9VpLpfHYuXw5c6bJr4Mzv9kIv242vmJHbI5PJJEnmBYitUIfoMCODDhR7KoZLot2VQ==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.60.1.tgz",
|
||||
"integrity": "sha512-+pUymDhd0ys9GcKZPPWlFiZ67sTWV5UU6zOJat02M1+PiuSGDziyRuI/pPue3hoUwm2uGfxdL+trT6Z9rxnlMA==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -2547,10 +2524,24 @@
|
||||
"linux"
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-linux-loongarch64-gnu": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loongarch64-gnu/-/rollup-linux-loongarch64-gnu-4.48.1.tgz",
|
||||
"integrity": "sha512-yzCaBbwkkWt/EcgJOKDUdUpMHjhiZT/eDktOPWvSRpqrVE04p0Nd6EGV4/g7MARXXeOqstflqsKuXVM3H9wOIQ==",
|
||||
"node_modules/@rollup/rollup-linux-loong64-gnu": {
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.60.1.tgz",
|
||||
"integrity": "sha512-VSvgvQeIcsEvY4bKDHEDWcpW4Yw7BtlKG1GUT4FzBUlEKQK0rWHYBqQt6Fm2taXS+1bXvJT6kICu5ZwqKCnvlQ==",
|
||||
"cpu": [
|
||||
"loong64"
|
||||
],
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-linux-loong64-musl": {
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.60.1.tgz",
|
||||
"integrity": "sha512-4LqhUomJqwe641gsPp6xLfhqWMbQV04KtPp7/dIp0nzPxAkNY1AbwL5W0MQpcalLYk07vaW9Kp1PBhdpZYYcEw==",
|
||||
"cpu": [
|
||||
"loong64"
|
||||
],
|
||||
@@ -2562,9 +2553,23 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-linux-ppc64-gnu": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.48.1.tgz",
|
||||
"integrity": "sha512-UK0WzWUjMAJccHIeOpPhPcKBqax7QFg47hwZTp6kiMhQHeOYJeaMwzeRZe1q5IiTKsaLnHu9s6toSYVUlZ2QtQ==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.60.1.tgz",
|
||||
"integrity": "sha512-tLQQ9aPvkBxOc/EUT6j3pyeMD6Hb8QF2BTBnCQWP/uu1lhc9AIrIjKnLYMEroIz/JvtGYgI9dF3AxHZNaEH0rw==",
|
||||
"cpu": [
|
||||
"ppc64"
|
||||
],
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-linux-ppc64-musl": {
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.60.1.tgz",
|
||||
"integrity": "sha512-RMxFhJwc9fSXP6PqmAz4cbv3kAyvD1etJFjTx4ONqFP9DkTkXsAMU4v3Vyc5BgzC+anz7nS/9tp4obsKfqkDHg==",
|
||||
"cpu": [
|
||||
"ppc64"
|
||||
],
|
||||
@@ -2576,9 +2581,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-linux-riscv64-gnu": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.48.1.tgz",
|
||||
"integrity": "sha512-3NADEIlt+aCdCbWVZ7D3tBjBX1lHpXxcvrLt/kdXTiBrOds8APTdtk2yRL2GgmnSVeX4YS1JIf0imFujg78vpw==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.60.1.tgz",
|
||||
"integrity": "sha512-QKgFl+Yc1eEk6MmOBfRHYF6lTxiiiV3/z/BRrbSiW2I7AFTXoBFvdMEyglohPj//2mZS4hDOqeB0H1ACh3sBbg==",
|
||||
"cpu": [
|
||||
"riscv64"
|
||||
],
|
||||
@@ -2590,9 +2595,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-linux-riscv64-musl": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.48.1.tgz",
|
||||
"integrity": "sha512-euuwm/QTXAMOcyiFCcrx0/S2jGvFlKJ2Iro8rsmYL53dlblp3LkUQVFzEidHhvIPPvcIsxDhl2wkBE+I6YVGzA==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.60.1.tgz",
|
||||
"integrity": "sha512-RAjXjP/8c6ZtzatZcA1RaQr6O1TRhzC+adn8YZDnChliZHviqIjmvFwHcxi4JKPSDAt6Uhf/7vqcBzQJy0PDJg==",
|
||||
"cpu": [
|
||||
"riscv64"
|
||||
],
|
||||
@@ -2604,9 +2609,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-linux-s390x-gnu": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.48.1.tgz",
|
||||
"integrity": "sha512-w8mULUjmPdWLJgmTYJx/W6Qhln1a+yqvgwmGXcQl2vFBkWsKGUBRbtLRuKJUln8Uaimf07zgJNxOhHOvjSQmBQ==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.60.1.tgz",
|
||||
"integrity": "sha512-wcuocpaOlaL1COBYiA89O6yfjlp3RwKDeTIA0hM7OpmhR1Bjo9j31G1uQVpDlTvwxGn2nQs65fBFL5UFd76FcQ==",
|
||||
"cpu": [
|
||||
"s390x"
|
||||
],
|
||||
@@ -2618,9 +2623,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-linux-x64-gnu": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.48.1.tgz",
|
||||
"integrity": "sha512-90taWXCWxTbClWuMZD0DKYohY1EovA+W5iytpE89oUPmT5O1HFdf8cuuVIylE6vCbrGdIGv85lVRzTcpTRZ+kA==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.60.1.tgz",
|
||||
"integrity": "sha512-77PpsFQUCOiZR9+LQEFg9GClyfkNXj1MP6wRnzYs0EeWbPcHs02AXu4xuUbM1zhwn3wqaizle3AEYg5aeoohhg==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -2632,9 +2637,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-linux-x64-musl": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.48.1.tgz",
|
||||
"integrity": "sha512-2Gu29SkFh1FfTRuN1GR1afMuND2GKzlORQUP3mNMJbqdndOg7gNsa81JnORctazHRokiDzQ5+MLE5XYmZW5VWg==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.60.1.tgz",
|
||||
"integrity": "sha512-5cIATbk5vynAjqqmyBjlciMJl1+R/CwX9oLk/EyiFXDWd95KpHdrOJT//rnUl4cUcskrd0jCCw3wpZnhIHdD9w==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -2645,10 +2650,38 @@
|
||||
"linux"
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-openbsd-x64": {
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.60.1.tgz",
|
||||
"integrity": "sha512-cl0w09WsCi17mcmWqqglez9Gk8isgeWvoUZ3WiJFYSR3zjBQc2J5/ihSjpl+VLjPqjQ/1hJRcqBfLjssREQILw==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"openbsd"
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-openharmony-arm64": {
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.60.1.tgz",
|
||||
"integrity": "sha512-4Cv23ZrONRbNtbZa37mLSueXUCtN7MXccChtKpUnQNgF010rjrjfHx3QxkS2PI7LqGT5xXyYs1a7LbzAwT0iCA==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"openharmony"
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-win32-arm64-msvc": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.48.1.tgz",
|
||||
"integrity": "sha512-6kQFR1WuAO50bxkIlAVeIYsz3RUx+xymwhTo9j94dJ+kmHe9ly7muH23sdfWduD0BA8pD9/yhonUvAjxGh34jQ==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.60.1.tgz",
|
||||
"integrity": "sha512-i1okWYkA4FJICtr7KpYzFpRTHgy5jdDbZiWfvny21iIKky5YExiDXP+zbXzm3dUcFpkEeYNHgQ5fuG236JPq0g==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -2660,9 +2693,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-win32-ia32-msvc": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.48.1.tgz",
|
||||
"integrity": "sha512-RUyZZ/mga88lMI3RlXFs4WQ7n3VyU07sPXmMG7/C1NOi8qisUg57Y7LRarqoGoAiopmGmChUhSwfpvQ3H5iGSQ==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.60.1.tgz",
|
||||
"integrity": "sha512-u09m3CuwLzShA0EYKMNiFgcjjzwqtUMLmuCJLeZWjjOYA3IT2Di09KaxGBTP9xVztWyIWjVdsB2E9goMjZvTQg==",
|
||||
"cpu": [
|
||||
"ia32"
|
||||
],
|
||||
@@ -2673,10 +2706,24 @@
|
||||
"win32"
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-win32-x64-gnu": {
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.60.1.tgz",
|
||||
"integrity": "sha512-k+600V9Zl1CM7eZxJgMyTUzmrmhB/0XZnF4pRypKAlAgxmedUA+1v9R+XOFv56W4SlHEzfeMtzujLJD22Uz5zg==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"win32"
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-win32-x64-msvc": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.48.1.tgz",
|
||||
"integrity": "sha512-8a/caCUN4vkTChxkaIJcMtwIVcBhi4X2PQRoT+yCK3qRYaZ7cURrmJFL5Ux9H9RaMIXj9RuihckdmkBX3zZsgg==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.60.1.tgz",
|
||||
"integrity": "sha512-lWMnixq/QzxyhTV6NjQJ4SFo1J6PvOX8vUx5Wb4bBPsEb+8xZ89Bz6kOXpfXj9ak9AHTQVQzlgzBEc1SyM27xQ==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -3235,6 +3282,66 @@
|
||||
"node": ">=14.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@emnapi/core": {
|
||||
"version": "1.4.5",
|
||||
"dev": true,
|
||||
"inBundle": true,
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"dependencies": {
|
||||
"@emnapi/wasi-threads": "1.0.4",
|
||||
"tslib": "^2.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@emnapi/runtime": {
|
||||
"version": "1.4.5",
|
||||
"dev": true,
|
||||
"inBundle": true,
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"dependencies": {
|
||||
"tslib": "^2.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@emnapi/wasi-threads": {
|
||||
"version": "1.0.4",
|
||||
"dev": true,
|
||||
"inBundle": true,
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"dependencies": {
|
||||
"tslib": "^2.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@napi-rs/wasm-runtime": {
|
||||
"version": "0.2.12",
|
||||
"dev": true,
|
||||
"inBundle": true,
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"dependencies": {
|
||||
"@emnapi/core": "^1.4.3",
|
||||
"@emnapi/runtime": "^1.4.3",
|
||||
"@tybys/wasm-util": "^0.10.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@tybys/wasm-util": {
|
||||
"version": "0.10.0",
|
||||
"dev": true,
|
||||
"inBundle": true,
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"dependencies": {
|
||||
"tslib": "^2.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/tslib": {
|
||||
"version": "2.8.0",
|
||||
"dev": true,
|
||||
"inBundle": true,
|
||||
"license": "0BSD",
|
||||
"optional": true
|
||||
},
|
||||
"node_modules/@tailwindcss/oxide-win32-arm64-msvc": {
|
||||
"version": "4.1.12",
|
||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-arm64-msvc/-/oxide-win32-arm64-msvc-4.1.12.tgz",
|
||||
@@ -3589,9 +3696,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/anymatch/node_modules/picomatch": {
|
||||
"version": "2.3.1",
|
||||
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
|
||||
"integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
|
||||
"version": "2.3.2",
|
||||
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.2.tgz",
|
||||
"integrity": "sha512-V7+vQEJ06Z+c5tSye8S+nHUfI51xoXIXjHQ99cQtKUkQqqO1kO/KCJUfZXuB47h/YBlDhah2H3hdUGXn8ie0oA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
@@ -3620,6 +3727,16 @@
|
||||
"node": ">=10"
|
||||
}
|
||||
},
|
||||
"node_modules/balanced-match": {
|
||||
"version": "4.0.4",
|
||||
"resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-4.0.4.tgz",
|
||||
"integrity": "sha512-BLrgEcRTwX2o6gGxGOCNyMvGSp35YofuYzw9h1IMTRmKqttAZZVU67bdb9Pr2vUHA8+j3i2tJfjO6C6+4myGTA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": "18 || 20 || >=22"
|
||||
}
|
||||
},
|
||||
"node_modules/base64-js": {
|
||||
"version": "1.5.1",
|
||||
"resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz",
|
||||
@@ -3666,6 +3783,19 @@
|
||||
"readable-stream": "^3.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/brace-expansion": {
|
||||
"version": "5.0.5",
|
||||
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-5.0.5.tgz",
|
||||
"integrity": "sha512-VZznLgtwhn+Mact9tfiwx64fA9erHH/MCXEUfB/0bX/6Fz6ny5EGTXYltMocqg4xFAQZtnO3DHWWXi8RiuN7cQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"balanced-match": "^4.0.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": "18 || 20 || >=22"
|
||||
}
|
||||
},
|
||||
"node_modules/braces": {
|
||||
"version": "3.0.3",
|
||||
"resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz",
|
||||
@@ -5072,9 +5202,9 @@
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/lodash": {
|
||||
"version": "4.17.23",
|
||||
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.23.tgz",
|
||||
"integrity": "sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w==",
|
||||
"version": "4.18.1",
|
||||
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.18.1.tgz",
|
||||
"integrity": "sha512-dMInicTPVE8d1e5otfwmmjlxkZoUpiVLwyeTdUsi/Caj/gfzzblBcCE5sRHV/AsjuCmxWrte2TNGSYuCeCq+0Q==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/lodash.sortby": {
|
||||
@@ -5267,9 +5397,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/micromatch/node_modules/picomatch": {
|
||||
"version": "2.3.1",
|
||||
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
|
||||
"integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
|
||||
"version": "2.3.2",
|
||||
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.2.tgz",
|
||||
"integrity": "sha512-V7+vQEJ06Z+c5tSye8S+nHUfI51xoXIXjHQ99cQtKUkQqqO1kO/KCJUfZXuB47h/YBlDhah2H3hdUGXn8ie0oA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
@@ -5290,16 +5420,16 @@
|
||||
}
|
||||
},
|
||||
"node_modules/minimatch": {
|
||||
"version": "10.1.1",
|
||||
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-10.1.1.tgz",
|
||||
"integrity": "sha512-enIvLvRAFZYXJzkCYG5RKmPfrFArdLv+R+lbQ53BmIMLIry74bjKzX6iHAm8WYamJkhSSEabrWN5D97XnKObjQ==",
|
||||
"version": "10.2.5",
|
||||
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-10.2.5.tgz",
|
||||
"integrity": "sha512-MULkVLfKGYDFYejP07QOurDLLQpcjk7Fw+7jXS2R2czRQzR56yHRveU5NDJEOviH+hETZKSkIk5c+T23GjFUMg==",
|
||||
"dev": true,
|
||||
"license": "BlueOak-1.0.0",
|
||||
"dependencies": {
|
||||
"@isaacs/brace-expansion": "^5.0.0"
|
||||
"brace-expansion": "^5.0.5"
|
||||
},
|
||||
"engines": {
|
||||
"node": "20 || >=22"
|
||||
"node": "18 || 20 || >=22"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/isaacs"
|
||||
@@ -5575,9 +5705,9 @@
|
||||
"license": "ISC"
|
||||
},
|
||||
"node_modules/picomatch": {
|
||||
"version": "4.0.3",
|
||||
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz",
|
||||
"integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
|
||||
"version": "4.0.4",
|
||||
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.4.tgz",
|
||||
"integrity": "sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
@@ -5956,9 +6086,9 @@
|
||||
"license": "ISC"
|
||||
},
|
||||
"node_modules/rollup": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/rollup/-/rollup-4.48.1.tgz",
|
||||
"integrity": "sha512-jVG20NvbhTYDkGAty2/Yh7HK6/q3DGSRH4o8ALKGArmMuaauM9kLfoMZ+WliPwA5+JHr2lTn3g557FxBV87ifg==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/rollup/-/rollup-4.60.1.tgz",
|
||||
"integrity": "sha512-VmtB2rFU/GroZ4oL8+ZqXgSA38O6GR8KSIvWmEFv63pQ0G6KaBH9s07PO8XTXP4vI+3UJUEypOfjkGfmSBBR0w==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
@@ -5972,26 +6102,31 @@
|
||||
"npm": ">=8.0.0"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@rollup/rollup-android-arm-eabi": "4.48.1",
|
||||
"@rollup/rollup-android-arm64": "4.48.1",
|
||||
"@rollup/rollup-darwin-arm64": "4.48.1",
|
||||
"@rollup/rollup-darwin-x64": "4.48.1",
|
||||
"@rollup/rollup-freebsd-arm64": "4.48.1",
|
||||
"@rollup/rollup-freebsd-x64": "4.48.1",
|
||||
"@rollup/rollup-linux-arm-gnueabihf": "4.48.1",
|
||||
"@rollup/rollup-linux-arm-musleabihf": "4.48.1",
|
||||
"@rollup/rollup-linux-arm64-gnu": "4.48.1",
|
||||
"@rollup/rollup-linux-arm64-musl": "4.48.1",
|
||||
"@rollup/rollup-linux-loongarch64-gnu": "4.48.1",
|
||||
"@rollup/rollup-linux-ppc64-gnu": "4.48.1",
|
||||
"@rollup/rollup-linux-riscv64-gnu": "4.48.1",
|
||||
"@rollup/rollup-linux-riscv64-musl": "4.48.1",
|
||||
"@rollup/rollup-linux-s390x-gnu": "4.48.1",
|
||||
"@rollup/rollup-linux-x64-gnu": "4.48.1",
|
||||
"@rollup/rollup-linux-x64-musl": "4.48.1",
|
||||
"@rollup/rollup-win32-arm64-msvc": "4.48.1",
|
||||
"@rollup/rollup-win32-ia32-msvc": "4.48.1",
|
||||
"@rollup/rollup-win32-x64-msvc": "4.48.1",
|
||||
"@rollup/rollup-android-arm-eabi": "4.60.1",
|
||||
"@rollup/rollup-android-arm64": "4.60.1",
|
||||
"@rollup/rollup-darwin-arm64": "4.60.1",
|
||||
"@rollup/rollup-darwin-x64": "4.60.1",
|
||||
"@rollup/rollup-freebsd-arm64": "4.60.1",
|
||||
"@rollup/rollup-freebsd-x64": "4.60.1",
|
||||
"@rollup/rollup-linux-arm-gnueabihf": "4.60.1",
|
||||
"@rollup/rollup-linux-arm-musleabihf": "4.60.1",
|
||||
"@rollup/rollup-linux-arm64-gnu": "4.60.1",
|
||||
"@rollup/rollup-linux-arm64-musl": "4.60.1",
|
||||
"@rollup/rollup-linux-loong64-gnu": "4.60.1",
|
||||
"@rollup/rollup-linux-loong64-musl": "4.60.1",
|
||||
"@rollup/rollup-linux-ppc64-gnu": "4.60.1",
|
||||
"@rollup/rollup-linux-ppc64-musl": "4.60.1",
|
||||
"@rollup/rollup-linux-riscv64-gnu": "4.60.1",
|
||||
"@rollup/rollup-linux-riscv64-musl": "4.60.1",
|
||||
"@rollup/rollup-linux-s390x-gnu": "4.60.1",
|
||||
"@rollup/rollup-linux-x64-gnu": "4.60.1",
|
||||
"@rollup/rollup-linux-x64-musl": "4.60.1",
|
||||
"@rollup/rollup-openbsd-x64": "4.60.1",
|
||||
"@rollup/rollup-openharmony-arm64": "4.60.1",
|
||||
"@rollup/rollup-win32-arm64-msvc": "4.60.1",
|
||||
"@rollup/rollup-win32-ia32-msvc": "4.60.1",
|
||||
"@rollup/rollup-win32-x64-gnu": "4.60.1",
|
||||
"@rollup/rollup-win32-x64-msvc": "4.60.1",
|
||||
"fsevents": "~2.3.2"
|
||||
}
|
||||
},
|
||||
@@ -6290,9 +6425,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/tar": {
|
||||
"version": "7.5.7",
|
||||
"resolved": "https://registry.npmjs.org/tar/-/tar-7.5.7.tgz",
|
||||
"integrity": "sha512-fov56fJiRuThVFXD6o6/Q354S7pnWMJIVlDBYijsTNx6jKSE4pvrDTs6lUnmGvNyfJwFQQwWy3owKz1ucIhveQ==",
|
||||
"version": "7.5.13",
|
||||
"resolved": "https://registry.npmjs.org/tar/-/tar-7.5.13.tgz",
|
||||
"integrity": "sha512-tOG/7GyXpFevhXVh8jOPJrmtRpOTsYqUIkVdVooZYJS/z8WhfQUX8RJILmeuJNinGAMSu1veBr4asSHFt5/hng==",
|
||||
"dev": true,
|
||||
"license": "BlueOak-1.0.0",
|
||||
"dependencies": {
|
||||
@@ -6559,9 +6694,9 @@
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/valibot": {
|
||||
"version": "0.42.1",
|
||||
"resolved": "https://registry.npmjs.org/valibot/-/valibot-0.42.1.tgz",
|
||||
"integrity": "sha512-3keXV29Ar5b//Hqi4MbSdV7lfVp6zuYLZuA9V1PvQUsXqogr+u5lvLPLk3A4f74VUXDnf/JfWMN6sB+koJ/FFw==",
|
||||
"version": "1.3.1",
|
||||
"resolved": "https://registry.npmjs.org/valibot/-/valibot-1.3.1.tgz",
|
||||
"integrity": "sha512-sfdRir/QFM0JaF22hqTroPc5xy4DimuGQVKFrzF1YfGwaS1nJot3Y8VqMdLO2Lg27fMzat2yD3pY5PbAYO39Gg==",
|
||||
"license": "MIT",
|
||||
"peerDependencies": {
|
||||
"typescript": ">=5"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "beszel",
|
||||
"private": true,
|
||||
"version": "0.18.5",
|
||||
"version": "0.18.7",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"dev": "vite --host",
|
||||
@@ -52,7 +52,7 @@
|
||||
"recharts": "^2.15.4",
|
||||
"shiki": "^3.13.0",
|
||||
"tailwind-merge": "^3.3.1",
|
||||
"valibot": "^0.42.1"
|
||||
"valibot": "^1.3.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@biomejs/biome": "2.2.4",
|
||||
|
||||
@@ -20,7 +20,7 @@ export default memo(function AlertsButton({ system }: { system: SystemRecord })
|
||||
<SheetTrigger asChild>
|
||||
<Button variant="ghost" size="icon" aria-label={t`Alerts`} data-nolink onClick={() => setOpened(true)}>
|
||||
<BellIcon
|
||||
className={cn("h-[1.2em] w-[1.2em] pointer-events-none", {
|
||||
className={cn("size-[1.2em] pointer-events-none", {
|
||||
"fill-primary": hasSystemAlert,
|
||||
})}
|
||||
/>
|
||||
|
||||
@@ -2,11 +2,13 @@ import { t } from "@lingui/core/macro"
|
||||
import { Plural, Trans } from "@lingui/react/macro"
|
||||
import { useStore } from "@nanostores/react"
|
||||
import { getPagePath } from "@nanostores/router"
|
||||
import { GlobeIcon, ServerIcon } from "lucide-react"
|
||||
import { ChevronDownIcon, GlobeIcon, ServerIcon } from "lucide-react"
|
||||
import { lazy, memo, Suspense, useMemo, useState } from "react"
|
||||
import { $router, Link } from "@/components/router"
|
||||
import { Button } from "@/components/ui/button"
|
||||
import { Checkbox } from "@/components/ui/checkbox"
|
||||
import { DialogDescription, DialogHeader, DialogTitle } from "@/components/ui/dialog"
|
||||
import { DropdownMenu, DropdownMenuContent, DropdownMenuItem, DropdownMenuTrigger } from "@/components/ui/dropdown-menu"
|
||||
import { Input } from "@/components/ui/input"
|
||||
import { Switch } from "@/components/ui/switch"
|
||||
import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs"
|
||||
@@ -64,11 +66,57 @@ const deleteAlerts = debounce(async ({ name, systems }: { name: string; systems:
|
||||
|
||||
export const AlertDialogContent = memo(function AlertDialogContent({ system }: { system: SystemRecord }) {
|
||||
const alerts = useStore($alerts)
|
||||
const systems = useStore($systems)
|
||||
const [overwriteExisting, setOverwriteExisting] = useState<boolean | "indeterminate">(false)
|
||||
const [currentTab, setCurrentTab] = useState("system")
|
||||
// copyKey is used to force remount AlertContent components with
|
||||
// new alert data after copying alerts from another system
|
||||
const [copyKey, setCopyKey] = useState(0)
|
||||
|
||||
const systemAlerts = alerts[system.id] ?? new Map()
|
||||
|
||||
// Systems that have at least one alert configured (excluding the current system)
|
||||
const systemsWithAlerts = useMemo(
|
||||
() => systems.filter((s) => s.id !== system.id && alerts[s.id]?.size),
|
||||
[systems, alerts, system.id]
|
||||
)
|
||||
|
||||
async function copyAlertsFromSystem(sourceSystemId: string) {
|
||||
const sourceAlerts = $alerts.get()[sourceSystemId]
|
||||
if (!sourceAlerts?.size) return
|
||||
try {
|
||||
const currentTargetAlerts = $alerts.get()[system.id] ?? new Map()
|
||||
// Alert names present on target but absent from source should be deleted
|
||||
const namesToDelete = Array.from(currentTargetAlerts.keys()).filter((name) => !sourceAlerts.has(name))
|
||||
await Promise.all([
|
||||
...Array.from(sourceAlerts.values()).map(({ name, value, min }) =>
|
||||
pb.send<{ success: boolean }>(endpoint, {
|
||||
method: "POST",
|
||||
body: { name, value, min, systems: [system.id], overwrite: true },
|
||||
requestKey: name,
|
||||
})
|
||||
),
|
||||
...namesToDelete.map((name) =>
|
||||
pb.send<{ success: boolean }>(endpoint, {
|
||||
method: "DELETE",
|
||||
body: { name, systems: [system.id] },
|
||||
requestKey: name,
|
||||
})
|
||||
),
|
||||
])
|
||||
// Optimistically update the store so components re-mount with correct data
|
||||
// before the realtime subscription event arrives.
|
||||
const newSystemAlerts = new Map<string, AlertRecord>()
|
||||
for (const alert of sourceAlerts.values()) {
|
||||
newSystemAlerts.set(alert.name, { ...alert, system: system.id, triggered: false })
|
||||
}
|
||||
$alerts.setKey(system.id, newSystemAlerts)
|
||||
setCopyKey((k) => k + 1)
|
||||
} catch (error) {
|
||||
failedUpdateToast(error)
|
||||
}
|
||||
}
|
||||
|
||||
// We need to keep a copy of alerts when we switch to global tab. If we always compare to
|
||||
// current alerts, it will only be updated when first checked, then won't be updated because
|
||||
// after that it exists.
|
||||
@@ -93,7 +141,8 @@ export const AlertDialogContent = memo(function AlertDialogContent({ system }: {
|
||||
</DialogDescription>
|
||||
</DialogHeader>
|
||||
<Tabs defaultValue="system" onValueChange={setCurrentTab}>
|
||||
<TabsList className="mb-1 -mt-0.5">
|
||||
<div className="flex items-center justify-between mb-1 -mt-0.5">
|
||||
<TabsList>
|
||||
<TabsTrigger value="system">
|
||||
<ServerIcon className="me-2 h-3.5 w-3.5" />
|
||||
<span className="truncate max-w-60">{system.name}</span>
|
||||
@@ -103,8 +152,26 @@ export const AlertDialogContent = memo(function AlertDialogContent({ system }: {
|
||||
<Trans>All Systems</Trans>
|
||||
</TabsTrigger>
|
||||
</TabsList>
|
||||
{systemsWithAlerts.length > 0 && currentTab === "system" && (
|
||||
<DropdownMenu>
|
||||
<DropdownMenuTrigger asChild>
|
||||
<Button variant="ghost" size="sm" className="text-muted-foreground text-xs gap-1.5">
|
||||
<Trans context="Copy alerts from another system">Copy from</Trans>
|
||||
<ChevronDownIcon className="h-3.5 w-3.5" />
|
||||
</Button>
|
||||
</DropdownMenuTrigger>
|
||||
<DropdownMenuContent align="end" className="max-h-100 overflow-auto">
|
||||
{systemsWithAlerts.map((s) => (
|
||||
<DropdownMenuItem key={s.id} className="min-w-44" onSelect={() => copyAlertsFromSystem(s.id)}>
|
||||
{s.name}
|
||||
</DropdownMenuItem>
|
||||
))}
|
||||
</DropdownMenuContent>
|
||||
</DropdownMenu>
|
||||
)}
|
||||
</div>
|
||||
<TabsContent value="system">
|
||||
<div className="grid gap-3">
|
||||
<div key={copyKey} className="grid gap-3">
|
||||
{alertKeys.map((name) => (
|
||||
<AlertContent
|
||||
key={name}
|
||||
|
||||
@@ -41,8 +41,8 @@ export default function AreaChartDefault({
|
||||
hideYAxis = false,
|
||||
filter,
|
||||
truncate = false,
|
||||
}: // logRender = false,
|
||||
{
|
||||
chartProps,
|
||||
}: {
|
||||
chartData: ChartData
|
||||
// biome-ignore lint/suspicious/noExplicitAny: accepts different data source types (systemStats or containerData)
|
||||
customData?: any[]
|
||||
@@ -62,13 +62,13 @@ export default function AreaChartDefault({
|
||||
hideYAxis?: boolean
|
||||
filter?: string
|
||||
truncate?: boolean
|
||||
// logRender?: boolean
|
||||
chartProps?: Omit<React.ComponentProps<typeof AreaChart>, "data" | "margin">
|
||||
}) {
|
||||
const { yAxisWidth, updateYAxisWidth } = useYAxisWidth()
|
||||
const { isIntersecting, ref } = useIntersectionObserver({ freeze: false })
|
||||
const sourceData = customData ?? chartData.systemStats
|
||||
// Only update the rendered data while the chart is visible
|
||||
const [displayData, setDisplayData] = useState(sourceData)
|
||||
const [displayMaxToggled, setDisplayMaxToggled] = useState(maxToggled)
|
||||
|
||||
// Reduce chart redraws by only updating while visible or when chart time changes
|
||||
useEffect(() => {
|
||||
@@ -78,7 +78,10 @@ export default function AreaChartDefault({
|
||||
if (shouldUpdate) {
|
||||
setDisplayData(sourceData)
|
||||
}
|
||||
}, [displayData, isIntersecting, sourceData])
|
||||
if (isIntersecting && maxToggled !== displayMaxToggled) {
|
||||
setDisplayMaxToggled(maxToggled)
|
||||
}
|
||||
}, [displayData, displayMaxToggled, isIntersecting, maxToggled, sourceData])
|
||||
|
||||
// Use a stable key derived from data point identities and visual properties
|
||||
const areasKey = dataPoints?.map((d) => `${d.label}:${d.opacity}`).join("\0")
|
||||
@@ -106,14 +109,14 @@ export default function AreaChartDefault({
|
||||
/>
|
||||
)
|
||||
})
|
||||
}, [areasKey, maxToggled])
|
||||
}, [areasKey, displayMaxToggled])
|
||||
|
||||
return useMemo(() => {
|
||||
if (displayData.length === 0) {
|
||||
return null
|
||||
}
|
||||
// if (logRender) {
|
||||
// console.log("Rendered at", new Date(), "for", dataPoints?.at(0)?.label)
|
||||
// console.log("Rendered", dataPoints?.map((d) => d.label).join(", "), new Date())
|
||||
// }
|
||||
return (
|
||||
<ChartContainer
|
||||
@@ -128,6 +131,7 @@ export default function AreaChartDefault({
|
||||
accessibilityLayer
|
||||
data={displayData}
|
||||
margin={hideYAxis ? { ...chartMargin, left: 5 } : chartMargin}
|
||||
{...chartProps}
|
||||
>
|
||||
<CartesianGrid vertical={false} />
|
||||
{!hideYAxis && (
|
||||
@@ -142,7 +146,7 @@ export default function AreaChartDefault({
|
||||
axisLine={false}
|
||||
/>
|
||||
)}
|
||||
{xAxis(chartData)}
|
||||
{xAxis(chartData.chartTime, displayData.at(-1)?.created as number)}
|
||||
<ChartTooltip
|
||||
animationEasing="ease-out"
|
||||
animationDuration={150}
|
||||
@@ -163,5 +167,5 @@ export default function AreaChartDefault({
|
||||
</AreaChart>
|
||||
</ChartContainer>
|
||||
)
|
||||
}, [displayData, yAxisWidth, showTotal, filter])
|
||||
}, [displayData, yAxisWidth, filter, Areas])
|
||||
}
|
||||
|
||||
@@ -40,8 +40,9 @@ export default function LineChartDefault({
|
||||
hideYAxis = false,
|
||||
filter,
|
||||
truncate = false,
|
||||
}: // logRender = false,
|
||||
{
|
||||
chartProps,
|
||||
connectNulls,
|
||||
}: {
|
||||
chartData: ChartData
|
||||
// biome-ignore lint/suspicious/noExplicitAny: accepts different data source types (systemStats or containerData)
|
||||
customData?: any[]
|
||||
@@ -61,13 +62,14 @@ export default function LineChartDefault({
|
||||
hideYAxis?: boolean
|
||||
filter?: string
|
||||
truncate?: boolean
|
||||
// logRender?: boolean
|
||||
chartProps?: Omit<React.ComponentProps<typeof LineChart>, "data" | "margin">
|
||||
connectNulls?: boolean
|
||||
}) {
|
||||
const { yAxisWidth, updateYAxisWidth } = useYAxisWidth()
|
||||
const { isIntersecting, ref } = useIntersectionObserver({ freeze: false })
|
||||
const sourceData = customData ?? chartData.systemStats
|
||||
// Only update the rendered data while the chart is visible
|
||||
const [displayData, setDisplayData] = useState(sourceData)
|
||||
const [displayMaxToggled, setDisplayMaxToggled] = useState(maxToggled)
|
||||
|
||||
// Reduce chart redraws by only updating while visible or when chart time changes
|
||||
useEffect(() => {
|
||||
@@ -77,7 +79,10 @@ export default function LineChartDefault({
|
||||
if (shouldUpdate) {
|
||||
setDisplayData(sourceData)
|
||||
}
|
||||
}, [displayData, isIntersecting, sourceData])
|
||||
if (isIntersecting && maxToggled !== displayMaxToggled) {
|
||||
setDisplayMaxToggled(maxToggled)
|
||||
}
|
||||
}, [displayData, displayMaxToggled, isIntersecting, maxToggled, sourceData])
|
||||
|
||||
// Use a stable key derived from data point identities and visual properties
|
||||
const linesKey = dataPoints?.map((d) => `${d.label}:${d.strokeOpacity ?? ""}`).join("\0")
|
||||
@@ -101,18 +106,19 @@ export default function LineChartDefault({
|
||||
isAnimationActive={false}
|
||||
// stackId={dataPoint.stackId}
|
||||
order={dataPoint.order || i}
|
||||
// activeDot={dataPoint.activeDot ?? true}
|
||||
activeDot={dataPoint.activeDot ?? true}
|
||||
connectNulls={connectNulls}
|
||||
/>
|
||||
)
|
||||
})
|
||||
}, [linesKey, maxToggled])
|
||||
}, [linesKey, displayMaxToggled])
|
||||
|
||||
return useMemo(() => {
|
||||
if (displayData.length === 0) {
|
||||
return null
|
||||
}
|
||||
// if (logRender) {
|
||||
// console.log("Rendered at", new Date(), "for", dataPoints?.at(0)?.label)
|
||||
// console.log("Rendered", dataPoints?.map((d) => d.label).join(", "), new Date())
|
||||
// }
|
||||
return (
|
||||
<ChartContainer
|
||||
@@ -127,6 +133,7 @@ export default function LineChartDefault({
|
||||
accessibilityLayer
|
||||
data={displayData}
|
||||
margin={hideYAxis ? { ...chartMargin, left: 5 } : chartMargin}
|
||||
{...chartProps}
|
||||
>
|
||||
<CartesianGrid vertical={false} />
|
||||
{!hideYAxis && (
|
||||
@@ -141,7 +148,7 @@ export default function LineChartDefault({
|
||||
axisLine={false}
|
||||
/>
|
||||
)}
|
||||
{xAxis(chartData)}
|
||||
{xAxis(chartData.chartTime, displayData.at(-1)?.created as number)}
|
||||
<ChartTooltip
|
||||
animationEasing="ease-out"
|
||||
animationDuration={150}
|
||||
@@ -162,5 +169,5 @@ export default function LineChartDefault({
|
||||
</LineChart>
|
||||
</ChartContainer>
|
||||
)
|
||||
}, [displayData, yAxisWidth, showTotal, filter, chartData.chartTime])
|
||||
}, [displayData, yAxisWidth, filter, Lines])
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ import { Label } from "@/components/ui/label"
|
||||
import { pb } from "@/lib/api"
|
||||
import { $authenticated } from "@/lib/stores"
|
||||
import { cn } from "@/lib/utils"
|
||||
import { $router, Link, prependBasePath } from "../router"
|
||||
import { $router, Link, basePath, prependBasePath } from "../router"
|
||||
import { toast } from "../ui/use-toast"
|
||||
import { OtpInputForm } from "./otp-forms"
|
||||
|
||||
@@ -37,8 +37,7 @@ const RegisterSchema = v.looseObject({
|
||||
passwordConfirm: passwordSchema,
|
||||
})
|
||||
|
||||
export const showLoginFaliedToast = (description?: string) => {
|
||||
description ||= t`Please check your credentials and try again`
|
||||
export const showLoginFaliedToast = (description = t`Please check your credentials and try again`) => {
|
||||
toast({
|
||||
title: t`Login attempt failed`,
|
||||
description,
|
||||
@@ -130,10 +129,6 @@ export function UserAuthForm({
|
||||
[isFirstRun]
|
||||
)
|
||||
|
||||
if (!authMethods) {
|
||||
return null
|
||||
}
|
||||
|
||||
const authProviders = authMethods.oauth2.providers ?? []
|
||||
const oauthEnabled = authMethods.oauth2.enabled && authProviders.length > 0
|
||||
const passwordEnabled = authMethods.password.enabled
|
||||
@@ -142,6 +137,12 @@ export function UserAuthForm({
|
||||
|
||||
function loginWithOauth(provider: AuthProviderInfo, forcePopup = false) {
|
||||
setIsOauthLoading(true)
|
||||
|
||||
if (globalThis.BESZEL.OAUTH_DISABLE_POPUP) {
|
||||
redirectToOauthProvider(provider)
|
||||
return
|
||||
}
|
||||
|
||||
const oAuthOpts: OAuth2AuthConfig = {
|
||||
provider: provider.name,
|
||||
}
|
||||
@@ -150,10 +151,7 @@ export function UserAuthForm({
|
||||
const authWindow = window.open()
|
||||
if (!authWindow) {
|
||||
setIsOauthLoading(false)
|
||||
toast({
|
||||
title: t`Error`,
|
||||
description: t`Please enable pop-ups for this site`,
|
||||
})
|
||||
showLoginFaliedToast(t`Please enable pop-ups for this site`)
|
||||
return
|
||||
}
|
||||
oAuthOpts.urlCallback = (url) => {
|
||||
@@ -171,16 +169,57 @@ export function UserAuthForm({
|
||||
})
|
||||
}
|
||||
|
||||
useEffect(() => {
|
||||
// auto login if password disabled and only one auth provider
|
||||
if (!passwordEnabled && authProviders.length === 1 && !sessionStorage.getItem("lo")) {
|
||||
// Add a small timeout to ensure browser is ready to handle popups
|
||||
setTimeout(() => {
|
||||
loginWithOauth(authProviders[0], true)
|
||||
}, 300)
|
||||
/**
|
||||
* Redirects the user to the OAuth provider's authentication page in the same window.
|
||||
* Requires the app's base URL to be registered as a redirect URI with the OAuth provider.
|
||||
*/
|
||||
function redirectToOauthProvider(provider: AuthProviderInfo) {
|
||||
const url = new URL(provider.authURL)
|
||||
// url.searchParams.set("redirect_uri", `${window.location.origin}${basePath}`)
|
||||
sessionStorage.setItem("provider", JSON.stringify(provider))
|
||||
window.location.href = url.toString()
|
||||
}
|
||||
|
||||
useEffect(() => {
|
||||
// handle redirect-based OAuth callback if we have a code
|
||||
const params = new URLSearchParams(window.location.search)
|
||||
const code = params.get("code")
|
||||
if (code) {
|
||||
const state = params.get("state")
|
||||
const provider: AuthProviderInfo = JSON.parse(sessionStorage.getItem("provider") ?? "{}")
|
||||
if (!state || provider.state !== state) {
|
||||
showLoginFaliedToast()
|
||||
} else {
|
||||
setIsOauthLoading(true)
|
||||
window.history.replaceState({}, "", window.location.pathname)
|
||||
pb.collection("users")
|
||||
.authWithOAuth2Code(provider.name, code, provider.codeVerifier, `${window.location.origin}${basePath}`)
|
||||
.then(() => $authenticated.set(pb.authStore.isValid))
|
||||
.catch((e: unknown) => showLoginFaliedToast((e as Error).message))
|
||||
.finally(() => setIsOauthLoading(false))
|
||||
}
|
||||
}
|
||||
|
||||
// auto login if password disabled and only one auth provider
|
||||
if (!code && !passwordEnabled && authProviders.length === 1 && !sessionStorage.getItem("lo")) {
|
||||
// Add a small timeout to ensure browser is ready to handle popups
|
||||
setTimeout(() => loginWithOauth(authProviders[0], false), 300)
|
||||
return
|
||||
}
|
||||
|
||||
// refresh auth if not in above states (required for trusted auth header)
|
||||
pb.collection("users")
|
||||
.authRefresh()
|
||||
.then((res) => {
|
||||
pb.authStore.save(res.token, res.record)
|
||||
$authenticated.set(!!pb.authStore.isValid)
|
||||
})
|
||||
}, [])
|
||||
|
||||
if (!authMethods) {
|
||||
return null
|
||||
}
|
||||
|
||||
if (otpId && mfaId) {
|
||||
return <OtpInputForm otpId={otpId} mfaId={mfaId} />
|
||||
}
|
||||
@@ -248,7 +287,7 @@ export function UserAuthForm({
|
||||
)}
|
||||
<div className="sr-only">
|
||||
{/* honeypot */}
|
||||
<label htmlFor="website"></label>
|
||||
<label htmlFor="website">Website</label>
|
||||
<input
|
||||
id="website"
|
||||
type="text"
|
||||
|
||||
@@ -1,28 +1,39 @@
|
||||
import { t } from "@lingui/core/macro"
|
||||
import { MoonStarIcon, SunIcon } from "lucide-react"
|
||||
import { MoonStarIcon, SunIcon, SunMoonIcon } from "lucide-react"
|
||||
import { useTheme } from "@/components/theme-provider"
|
||||
import { Button } from "@/components/ui/button"
|
||||
import { Tooltip, TooltipContent, TooltipTrigger } from "./ui/tooltip"
|
||||
import { Trans } from "@lingui/react/macro"
|
||||
import { cn } from "@/lib/utils"
|
||||
|
||||
const themes = ["light", "dark", "system"] as const
|
||||
const icons = [SunIcon, MoonStarIcon, SunMoonIcon] as const
|
||||
|
||||
export function ModeToggle() {
|
||||
const { theme, setTheme } = useTheme()
|
||||
|
||||
const currentIndex = themes.indexOf(theme)
|
||||
const Icon = icons[currentIndex]
|
||||
|
||||
return (
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<Button
|
||||
variant={"ghost"}
|
||||
size="icon"
|
||||
aria-label={t`Toggle theme`}
|
||||
onClick={() => setTheme(theme === "dark" ? "light" : "dark")}
|
||||
aria-label={t`Switch theme`}
|
||||
onClick={() => setTheme(themes[(currentIndex + 1) % themes.length])}
|
||||
>
|
||||
<SunIcon className="h-[1.2rem] w-[1.2rem] transition-all -rotate-90 dark:opacity-0 dark:rotate-0" />
|
||||
<MoonStarIcon className="absolute h-[1.2rem] w-[1.2rem] transition-all opacity-0 -rotate-90 dark:opacity-100 dark:rotate-0" />
|
||||
<Icon
|
||||
className={cn(
|
||||
"animate-in fade-in spin-in-[-30deg] duration-200",
|
||||
currentIndex === 2 ? "size-[1.35rem]" : "size-[1.2rem]"
|
||||
)}
|
||||
/>
|
||||
</Button>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>
|
||||
<Trans>Toggle theme</Trans>
|
||||
<Trans>Switch theme</Trans>
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
)
|
||||
|
||||
@@ -8,6 +8,7 @@ import {
|
||||
LogOutIcon,
|
||||
LogsIcon,
|
||||
MenuIcon,
|
||||
NetworkIcon,
|
||||
PlusIcon,
|
||||
SearchIcon,
|
||||
ServerIcon,
|
||||
@@ -63,7 +64,7 @@ export default function Navbar() {
|
||||
className="p-2 ps-0 me-3 group"
|
||||
onMouseEnter={runOnce(() => import("@/components/routes/home"))}
|
||||
>
|
||||
<Logo className="h-[1.1rem] md:h-5 fill-foreground" />
|
||||
<Logo className="h-[1.2rem] md:h-5 fill-foreground" />
|
||||
</Link>
|
||||
<Button
|
||||
variant="outline"
|
||||
@@ -109,6 +110,10 @@ export default function Navbar() {
|
||||
<HardDriveIcon className="h-4 w-4 me-2.5" strokeWidth={1.5} />
|
||||
<span>S.M.A.R.T.</span>
|
||||
</DropdownMenuItem>
|
||||
<DropdownMenuItem onClick={() => navigate(getPagePath($router, "probes"))} className="flex items-center">
|
||||
<NetworkIcon className="h-4 w-4 me-2.5" strokeWidth={1.5} />
|
||||
<Trans>Network Probes</Trans>
|
||||
</DropdownMenuItem>
|
||||
<DropdownMenuItem
|
||||
onClick={() => navigate(getPagePath($router, "settings", { name: "general" }))}
|
||||
className="flex items-center"
|
||||
@@ -125,6 +130,7 @@ export default function Navbar() {
|
||||
<DropdownMenuSubContent>{AdminLinks}</DropdownMenuSubContent>
|
||||
</DropdownMenuSub>
|
||||
)}
|
||||
{!isReadOnlyUser() && (
|
||||
<DropdownMenuItem
|
||||
className="flex items-center"
|
||||
onSelect={() => {
|
||||
@@ -134,6 +140,7 @@ export default function Navbar() {
|
||||
<PlusIcon className="h-4 w-4 me-2.5" />
|
||||
<Trans>Add {{ foo: systemTranslation }}</Trans>
|
||||
</DropdownMenuItem>
|
||||
)}
|
||||
</DropdownMenuGroup>
|
||||
<DropdownMenuSeparator />
|
||||
<DropdownMenuGroup>
|
||||
@@ -178,6 +185,21 @@ export default function Navbar() {
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>S.M.A.R.T.</TooltipContent>
|
||||
</Tooltip>
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<Link
|
||||
href={getPagePath($router, "probes")}
|
||||
className={cn("hidden md:grid", buttonVariants({ variant: "ghost", size: "icon" }))}
|
||||
aria-label="Network Probes"
|
||||
onMouseEnter={() => import("@/components/routes/probes")}
|
||||
>
|
||||
<NetworkIcon className="h-[1.2rem] w-[1.2rem]" strokeWidth={1.5} />
|
||||
</Link>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>
|
||||
<Trans>Network Probes</Trans>
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
<LangToggle />
|
||||
<ModeToggle />
|
||||
<Tooltip>
|
||||
@@ -217,10 +239,12 @@ export default function Navbar() {
|
||||
</DropdownMenuItem>
|
||||
</DropdownMenuContent>
|
||||
</DropdownMenu>
|
||||
{!isReadOnlyUser() && (
|
||||
<Button variant="outline" className="flex gap-1 ms-2" onClick={() => setAddSystemDialogOpen(true)}>
|
||||
<PlusIcon className="h-4 w-4 -ms-1" />
|
||||
<Trans>Add {{ foo: systemTranslation }}</Trans>
|
||||
</Button>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
|
||||
@@ -0,0 +1,238 @@
|
||||
import type { CellContext, Column, ColumnDef } from "@tanstack/react-table"
|
||||
import { Button } from "@/components/ui/button"
|
||||
import { cn, decimalString, hourWithSeconds } from "@/lib/utils"
|
||||
import {
|
||||
GlobeIcon,
|
||||
TimerIcon,
|
||||
WifiOffIcon,
|
||||
Trash2Icon,
|
||||
ArrowLeftRightIcon,
|
||||
MoreHorizontalIcon,
|
||||
ServerIcon,
|
||||
ClockIcon,
|
||||
NetworkIcon,
|
||||
RefreshCwIcon,
|
||||
} from "lucide-react"
|
||||
import { t } from "@lingui/core/macro"
|
||||
import type { NetworkProbeRecord } from "@/types"
|
||||
import { DropdownMenu, DropdownMenuContent, DropdownMenuItem, DropdownMenuTrigger } from "@/components/ui/dropdown-menu"
|
||||
import { Trans } from "@lingui/react/macro"
|
||||
import { pb } from "@/lib/api"
|
||||
import { toast } from "../ui/use-toast"
|
||||
import { $allSystemsById } from "@/lib/stores"
|
||||
import { useStore } from "@nanostores/react"
|
||||
|
||||
const protocolColors: Record<string, string> = {
|
||||
icmp: "bg-blue-500/15 text-blue-400",
|
||||
tcp: "bg-purple-500/15 text-purple-400",
|
||||
http: "bg-green-500/15 text-green-400",
|
||||
}
|
||||
|
||||
async function deleteProbe(id: string) {
|
||||
try {
|
||||
await pb.collection("network_probes").delete(id)
|
||||
} catch (err: unknown) {
|
||||
toast({ variant: "destructive", title: t`Error`, description: (err as Error)?.message })
|
||||
}
|
||||
}
|
||||
|
||||
export function getProbeColumns(longestName = 0, longestTarget = 0): ColumnDef<NetworkProbeRecord>[] {
|
||||
return [
|
||||
{
|
||||
id: "name",
|
||||
sortingFn: (a, b) => (a.original.name || a.original.target).localeCompare(b.original.name || b.original.target),
|
||||
accessorFn: (record) => record.name || record.target,
|
||||
header: ({ column }) => <HeaderButton column={column} name={t`Name`} Icon={NetworkIcon} />,
|
||||
cell: ({ getValue }) => (
|
||||
<div className="ms-1.5 max-w-40 block truncate tabular-nums" style={{ width: `${longestName / 1.05}ch` }}>
|
||||
{getValue() as string}
|
||||
</div>
|
||||
),
|
||||
},
|
||||
{
|
||||
id: "system",
|
||||
accessorFn: (record) => record.system,
|
||||
sortingFn: (a, b) => {
|
||||
const allSystems = $allSystemsById.get()
|
||||
const systemNameA = allSystems[a.original.system]?.name ?? ""
|
||||
const systemNameB = allSystems[b.original.system]?.name ?? ""
|
||||
return systemNameA.localeCompare(systemNameB)
|
||||
},
|
||||
header: ({ column }) => <HeaderButton column={column} name={t`System`} Icon={ServerIcon} />,
|
||||
cell: ({ getValue }) => {
|
||||
const allSystems = useStore($allSystemsById)
|
||||
return <span className="ms-1.5 xl:w-20 block truncate">{allSystems[getValue() as string]?.name ?? ""}</span>
|
||||
},
|
||||
},
|
||||
{
|
||||
id: "target",
|
||||
sortingFn: (a, b) => a.original.target.localeCompare(b.original.target),
|
||||
accessorFn: (record) => record.target,
|
||||
header: ({ column }) => <HeaderButton column={column} name={t`Target`} Icon={GlobeIcon} />,
|
||||
cell: ({ getValue }) => (
|
||||
<div className="ms-1.5 tabular-nums block truncate max-w-44" style={{ width: `${longestTarget / 1.05}ch` }}>
|
||||
{getValue() as string}
|
||||
</div>
|
||||
),
|
||||
},
|
||||
{
|
||||
id: "protocol",
|
||||
accessorFn: (record) => record.protocol,
|
||||
header: ({ column }) => <HeaderButton column={column} name={t`Protocol`} Icon={ArrowLeftRightIcon} />,
|
||||
cell: ({ getValue }) => {
|
||||
const protocol = getValue() as string
|
||||
return (
|
||||
<span className={cn("ms-1.5 px-2 py-0.5 rounded text-xs font-medium uppercase", protocolColors[protocol])}>
|
||||
{protocol}
|
||||
</span>
|
||||
)
|
||||
},
|
||||
},
|
||||
{
|
||||
id: "interval",
|
||||
accessorFn: (record) => record.interval,
|
||||
header: ({ column }) => <HeaderButton column={column} name={t`Interval`} Icon={RefreshCwIcon} />,
|
||||
cell: ({ getValue }) => <span className="ms-1.5 tabular-nums">{getValue() as number}s</span>,
|
||||
},
|
||||
{
|
||||
id: "res",
|
||||
accessorFn: (record) => record.res,
|
||||
invertSorting: true,
|
||||
header: ({ column }) => <HeaderButton column={column} name={t`Response`} Icon={TimerIcon} />,
|
||||
cell: responseTimeCell,
|
||||
},
|
||||
{
|
||||
id: "res1h",
|
||||
accessorFn: (record) => record.resAvg1h,
|
||||
invertSorting: true,
|
||||
header: ({ column }) => <HeaderButton column={column} name={t`Avg 1h`} Icon={TimerIcon} />,
|
||||
cell: responseTimeCell,
|
||||
},
|
||||
{
|
||||
id: "max1h",
|
||||
accessorFn: (record) => record.resMax1h,
|
||||
invertSorting: true,
|
||||
header: ({ column }) => <HeaderButton column={column} name={t`Max 1h`} Icon={TimerIcon} />,
|
||||
cell: responseTimeCell,
|
||||
},
|
||||
{
|
||||
id: "min1h",
|
||||
accessorFn: (record) => record.resMin1h,
|
||||
invertSorting: true,
|
||||
header: ({ column }) => <HeaderButton column={column} name={t`Min 1h`} Icon={TimerIcon} />,
|
||||
cell: responseTimeCell,
|
||||
},
|
||||
{
|
||||
id: "loss",
|
||||
accessorFn: (record) => record.loss1h,
|
||||
invertSorting: true,
|
||||
header: ({ column }) => <HeaderButton column={column} name={t`Loss 1h`} Icon={WifiOffIcon} />,
|
||||
cell: ({ row }) => {
|
||||
const { loss1h, res } = row.original
|
||||
if (loss1h === undefined || (!res && !loss1h)) {
|
||||
return <span className="ms-1.5 text-muted-foreground">-</span>
|
||||
}
|
||||
let color = "bg-green-500"
|
||||
if (loss1h) {
|
||||
color = loss1h > 20 ? "bg-red-500" : "bg-yellow-500"
|
||||
}
|
||||
return (
|
||||
<span className="ms-1.5 tabular-nums flex gap-2 items-center">
|
||||
<span className={cn("shrink-0 size-2 rounded-full", color)} />
|
||||
{loss1h}%
|
||||
</span>
|
||||
)
|
||||
},
|
||||
},
|
||||
{
|
||||
id: "updated",
|
||||
invertSorting: true,
|
||||
accessorFn: (record) => record.updated,
|
||||
header: ({ column }) => <HeaderButton column={column} name={t`Updated`} Icon={ClockIcon} />,
|
||||
cell: ({ getValue }) => {
|
||||
const timestamp = getValue() as number
|
||||
return <span className="ms-1.5 tabular-nums">{hourWithSeconds(new Date(timestamp).toISOString())}</span>
|
||||
},
|
||||
},
|
||||
{
|
||||
id: "actions",
|
||||
enableSorting: false,
|
||||
enableHiding: false,
|
||||
header: () => null,
|
||||
size: 40,
|
||||
cell: ({ row }) => (
|
||||
<DropdownMenu>
|
||||
<DropdownMenuTrigger asChild>
|
||||
<Button
|
||||
variant="ghost"
|
||||
size="icon"
|
||||
className="size-10"
|
||||
onClick={(event) => event.stopPropagation()}
|
||||
onMouseDown={(event) => event.stopPropagation()}
|
||||
>
|
||||
<span className="sr-only">
|
||||
<Trans>Open menu</Trans>
|
||||
</span>
|
||||
<MoreHorizontalIcon className="w-5" />
|
||||
</Button>
|
||||
</DropdownMenuTrigger>
|
||||
<DropdownMenuContent align="end" onClick={(event) => event.stopPropagation()}>
|
||||
<DropdownMenuItem
|
||||
onClick={(event) => {
|
||||
event.stopPropagation()
|
||||
deleteProbe(row.original.id)
|
||||
}}
|
||||
>
|
||||
<Trash2Icon className="me-2.5 size-4" />
|
||||
<Trans>Delete</Trans>
|
||||
</DropdownMenuItem>
|
||||
</DropdownMenuContent>
|
||||
</DropdownMenu>
|
||||
),
|
||||
},
|
||||
]
|
||||
}
|
||||
function responseTimeCell(cell: CellContext<NetworkProbeRecord, unknown>) {
|
||||
const val = cell.getValue() as number | undefined
|
||||
if (!val) {
|
||||
return <span className="ms-1.5 text-muted-foreground">-</span>
|
||||
}
|
||||
let color = "bg-green-500"
|
||||
if (val > 200) {
|
||||
color = "bg-yellow-500"
|
||||
}
|
||||
if (val > 2000) {
|
||||
color = "bg-red-500"
|
||||
}
|
||||
return (
|
||||
<span className="ms-1.5 tabular-nums flex gap-2 items-center">
|
||||
<span className={cn("shrink-0 size-2 rounded-full", color)} />
|
||||
{decimalString(val, val < 100 ? 2 : 1).toLocaleString()}ms
|
||||
</span>
|
||||
)
|
||||
}
|
||||
|
||||
function HeaderButton({
|
||||
column,
|
||||
name,
|
||||
Icon,
|
||||
}: {
|
||||
column: Column<NetworkProbeRecord>
|
||||
name: string
|
||||
Icon: React.ElementType
|
||||
}) {
|
||||
const isSorted = column.getIsSorted()
|
||||
return (
|
||||
<Button
|
||||
className={cn(
|
||||
"h-9 px-3 flex items-center gap-2 duration-50",
|
||||
isSorted && "bg-accent/70 light:bg-accent text-accent-foreground/90"
|
||||
)}
|
||||
variant="ghost"
|
||||
onClick={() => column.toggleSorting(column.getIsSorted() === "asc")}
|
||||
>
|
||||
{Icon && <Icon className="size-4" />}
|
||||
{name}
|
||||
</Button>
|
||||
)
|
||||
}
|
||||
@@ -0,0 +1,354 @@
|
||||
import { t } from "@lingui/core/macro"
|
||||
import { Trans } from "@lingui/react/macro"
|
||||
import {
|
||||
type ColumnFiltersState,
|
||||
type ColumnDef,
|
||||
flexRender,
|
||||
getCoreRowModel,
|
||||
getFilteredRowModel,
|
||||
getSortedRowModel,
|
||||
type Row,
|
||||
type RowSelectionState,
|
||||
type SortingState,
|
||||
type Table as TableType,
|
||||
useReactTable,
|
||||
type VisibilityState,
|
||||
} from "@tanstack/react-table"
|
||||
import { useVirtualizer, type VirtualItem } from "@tanstack/react-virtual"
|
||||
import {
|
||||
AlertDialog,
|
||||
AlertDialogAction,
|
||||
AlertDialogCancel,
|
||||
AlertDialogContent,
|
||||
AlertDialogDescription,
|
||||
AlertDialogFooter,
|
||||
AlertDialogHeader,
|
||||
AlertDialogTitle,
|
||||
AlertDialogTrigger,
|
||||
} from "@/components/ui/alert-dialog"
|
||||
import { Button, buttonVariants } from "@/components/ui/button"
|
||||
import { memo, useMemo, useRef, useState } from "react"
|
||||
import { getProbeColumns } from "@/components/network-probes-table/network-probes-columns"
|
||||
import { Card, CardHeader, CardTitle } from "@/components/ui/card"
|
||||
import { Checkbox } from "@/components/ui/checkbox"
|
||||
import { Input } from "@/components/ui/input"
|
||||
import { TableBody, TableCell, TableHead, TableHeader, TableRow } from "@/components/ui/table"
|
||||
import { useToast } from "@/components/ui/use-toast"
|
||||
import { isReadOnlyUser } from "@/lib/api"
|
||||
import { pb } from "@/lib/api"
|
||||
import { $allSystemsById } from "@/lib/stores"
|
||||
import { cn, getVisualStringWidth, useBrowserStorage } from "@/lib/utils"
|
||||
import { Trash2Icon } from "lucide-react"
|
||||
import type { NetworkProbeRecord } from "@/types"
|
||||
import { AddProbeDialog } from "./probe-dialog"
|
||||
|
||||
export default function NetworkProbesTableNew({
|
||||
systemId,
|
||||
probes,
|
||||
}: {
|
||||
systemId?: string
|
||||
probes: NetworkProbeRecord[]
|
||||
}) {
|
||||
const [sorting, setSorting] = useBrowserStorage<SortingState>(
|
||||
`sort-np-${systemId ? 1 : 0}`,
|
||||
[{ id: systemId ? "name" : "system", desc: false }],
|
||||
sessionStorage
|
||||
)
|
||||
const [columnFilters, setColumnFilters] = useState<ColumnFiltersState>([])
|
||||
const [columnVisibility, setColumnVisibility] = useState<VisibilityState>({})
|
||||
const [rowSelection, setRowSelection] = useState<RowSelectionState>({})
|
||||
const [globalFilter, setGlobalFilter] = useState("")
|
||||
const [deleteOpen, setDeleteOpen] = useState(false)
|
||||
const { toast } = useToast()
|
||||
const canManageProbes = !isReadOnlyUser()
|
||||
|
||||
const { longestName, longestTarget } = useMemo(() => {
|
||||
let longestName = 0
|
||||
let longestTarget = 0
|
||||
for (const p of probes) {
|
||||
longestName = Math.max(longestName, getVisualStringWidth(p.name || p.target))
|
||||
longestTarget = Math.max(longestTarget, getVisualStringWidth(p.target))
|
||||
}
|
||||
return { longestName, longestTarget }
|
||||
}, [probes])
|
||||
|
||||
// Filter columns based on whether systemId is provided
|
||||
const columns = useMemo(() => {
|
||||
let columns = getProbeColumns(longestName, longestTarget)
|
||||
columns = systemId ? columns.filter((col) => col.id !== "system") : columns
|
||||
columns = canManageProbes ? columns : columns.filter((col) => col.id !== "actions")
|
||||
if (!canManageProbes) {
|
||||
return columns
|
||||
}
|
||||
|
||||
const selectionColumn: ColumnDef<NetworkProbeRecord> = {
|
||||
id: "select",
|
||||
header: ({ table }) => (
|
||||
<Checkbox
|
||||
className="ms-2"
|
||||
checked={table.getIsAllRowsSelected() || (table.getIsSomeRowsSelected() && "indeterminate")}
|
||||
onCheckedChange={(value) => table.toggleAllRowsSelected(!!value)}
|
||||
aria-label={t`Select all`}
|
||||
/>
|
||||
),
|
||||
cell: ({ row }) => (
|
||||
<Checkbox
|
||||
checked={row.getIsSelected()}
|
||||
onCheckedChange={(value) => row.toggleSelected(!!value)}
|
||||
aria-label={t`Select row`}
|
||||
/>
|
||||
),
|
||||
enableSorting: false,
|
||||
enableHiding: false,
|
||||
size: 44,
|
||||
}
|
||||
|
||||
return [selectionColumn, ...columns]
|
||||
}, [systemId, longestName, longestTarget, canManageProbes])
|
||||
|
||||
const handleBulkDelete = async () => {
|
||||
setDeleteOpen(false)
|
||||
const selectedIds = Object.keys(rowSelection)
|
||||
if (!selectedIds.length) {
|
||||
return
|
||||
}
|
||||
|
||||
try {
|
||||
let batch = pb.createBatch()
|
||||
let inBatch = 0
|
||||
for (const id of selectedIds) {
|
||||
batch.collection("network_probes").delete(id)
|
||||
inBatch++
|
||||
if (inBatch >= 20) {
|
||||
await batch.send()
|
||||
batch = pb.createBatch()
|
||||
inBatch = 0
|
||||
}
|
||||
}
|
||||
if (inBatch) {
|
||||
await batch.send()
|
||||
}
|
||||
table.resetRowSelection()
|
||||
} catch (err: unknown) {
|
||||
toast({
|
||||
variant: "destructive",
|
||||
title: t`Error`,
|
||||
description: (err as Error)?.message || t`Failed to delete probes.`,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const table = useReactTable({
|
||||
data: probes,
|
||||
columns,
|
||||
getRowId: (row) => row.id,
|
||||
getCoreRowModel: getCoreRowModel(),
|
||||
getSortedRowModel: getSortedRowModel(),
|
||||
getFilteredRowModel: getFilteredRowModel(),
|
||||
onSortingChange: setSorting,
|
||||
onColumnFiltersChange: setColumnFilters,
|
||||
onColumnVisibilityChange: setColumnVisibility,
|
||||
onRowSelectionChange: setRowSelection,
|
||||
defaultColumn: {
|
||||
sortUndefined: "last",
|
||||
size: 900,
|
||||
minSize: 0,
|
||||
},
|
||||
state: {
|
||||
sorting,
|
||||
columnFilters,
|
||||
columnVisibility,
|
||||
rowSelection,
|
||||
globalFilter,
|
||||
},
|
||||
onGlobalFilterChange: setGlobalFilter,
|
||||
globalFilterFn: (row, _columnId, filterValue) => {
|
||||
const probe = row.original
|
||||
const systemName = $allSystemsById.get()[probe.system]?.name ?? ""
|
||||
const searchString = `${probe.name}${probe.target}${probe.protocol}${systemName}`.toLocaleLowerCase()
|
||||
return (filterValue as string)
|
||||
.toLowerCase()
|
||||
.split(" ")
|
||||
.every((term) => searchString.includes(term))
|
||||
},
|
||||
})
|
||||
|
||||
const rows = table.getRowModel().rows
|
||||
const visibleColumns = table.getVisibleLeafColumns()
|
||||
|
||||
return (
|
||||
<Card className="@container w-full px-3 py-5 sm:py-6 sm:px-6">
|
||||
<CardHeader className="p-0 mb-3 sm:mb-4">
|
||||
<div className="grid md:flex gap-x-5 gap-y-3 w-full items-end">
|
||||
<div className="px-2 sm:px-1">
|
||||
<CardTitle className="mb-2">
|
||||
<Trans>Network Probes</Trans>
|
||||
</CardTitle>
|
||||
<div className="text-sm text-muted-foreground flex items-center flex-wrap">
|
||||
<Trans>Response time monitoring from agents.</Trans>
|
||||
</div>
|
||||
</div>
|
||||
<div className="md:ms-auto flex items-center gap-2">
|
||||
{canManageProbes && table.getFilteredSelectedRowModel().rows.length > 0 && (
|
||||
<div className="fixed bottom-0 left-0 w-full p-4 grid grid-cols-1 items-center gap-4 z-50 backdrop-blur-md shrink-0 md:static md:p-0 md:w-auto md:gap-3">
|
||||
<AlertDialog open={deleteOpen} onOpenChange={setDeleteOpen}>
|
||||
<AlertDialogTrigger asChild>
|
||||
<Button variant="destructive" className="h-9 shrink-0">
|
||||
<Trash2Icon className="size-4 shrink-0" />
|
||||
<span className="ms-1">
|
||||
<Trans>Delete</Trans>
|
||||
</span>
|
||||
</Button>
|
||||
</AlertDialogTrigger>
|
||||
<AlertDialogContent>
|
||||
<AlertDialogHeader>
|
||||
<AlertDialogTitle>
|
||||
<Trans>Are you sure?</Trans>
|
||||
</AlertDialogTitle>
|
||||
<AlertDialogDescription>
|
||||
<Trans>This will permanently delete all selected records from the database.</Trans>
|
||||
</AlertDialogDescription>
|
||||
</AlertDialogHeader>
|
||||
<AlertDialogFooter>
|
||||
<AlertDialogCancel>
|
||||
<Trans>Cancel</Trans>
|
||||
</AlertDialogCancel>
|
||||
<AlertDialogAction
|
||||
className={cn(buttonVariants({ variant: "destructive" }))}
|
||||
onClick={handleBulkDelete}
|
||||
>
|
||||
<Trans>Continue</Trans>
|
||||
</AlertDialogAction>
|
||||
</AlertDialogFooter>
|
||||
</AlertDialogContent>
|
||||
</AlertDialog>
|
||||
</div>
|
||||
)}
|
||||
{probes.length > 0 && (
|
||||
<Input
|
||||
placeholder={t`Filter...`}
|
||||
value={globalFilter}
|
||||
onChange={(e) => setGlobalFilter(e.target.value)}
|
||||
className="ms-auto px-4 w-full max-w-full md:w-50"
|
||||
/>
|
||||
)}
|
||||
{canManageProbes ? <AddProbeDialog systemId={systemId} /> : null}
|
||||
</div>
|
||||
</div>
|
||||
</CardHeader>
|
||||
<div className="rounded-md">
|
||||
<NetworkProbesTable table={table} rows={rows} colLength={visibleColumns.length} rowSelection={rowSelection} />
|
||||
</div>
|
||||
</Card>
|
||||
)
|
||||
}
|
||||
|
||||
const NetworkProbesTable = memo(function NetworkProbeTable({
|
||||
table,
|
||||
rows,
|
||||
colLength,
|
||||
rowSelection: _rowSelection,
|
||||
}: {
|
||||
table: TableType<NetworkProbeRecord>
|
||||
rows: Row<NetworkProbeRecord>[]
|
||||
colLength: number
|
||||
rowSelection: RowSelectionState
|
||||
}) {
|
||||
// The virtualizer will need a reference to the scrollable container element
|
||||
const scrollRef = useRef<HTMLDivElement>(null)
|
||||
|
||||
const virtualizer = useVirtualizer<HTMLDivElement, HTMLTableRowElement>({
|
||||
count: rows.length,
|
||||
estimateSize: () => 54,
|
||||
getScrollElement: () => scrollRef.current,
|
||||
overscan: 5,
|
||||
})
|
||||
const virtualRows = virtualizer.getVirtualItems()
|
||||
|
||||
const paddingTop = Math.max(0, virtualRows[0]?.start ?? 0 - virtualizer.options.scrollMargin)
|
||||
const paddingBottom = Math.max(0, virtualizer.getTotalSize() - (virtualRows[virtualRows.length - 1]?.end ?? 0))
|
||||
|
||||
return (
|
||||
<div
|
||||
className={cn(
|
||||
"h-min max-h-[calc(100dvh-17rem)] max-w-full relative overflow-auto border rounded-md",
|
||||
// don't set min height if there are less than 2 rows, do set if we need to display the empty state
|
||||
(!rows.length || rows.length > 2) && "min-h-50"
|
||||
)}
|
||||
ref={scrollRef}
|
||||
>
|
||||
{/* add header height to table size */}
|
||||
<div style={{ height: `${virtualizer.getTotalSize() + 48}px`, paddingTop, paddingBottom }}>
|
||||
<table className="text-sm w-full h-full text-nowrap">
|
||||
<NetworkProbeTableHead table={table} />
|
||||
<TableBody>
|
||||
{rows.length ? (
|
||||
virtualRows.map((virtualRow) => {
|
||||
const row = rows[virtualRow.index]
|
||||
return (
|
||||
<NetworkProbeTableRow
|
||||
key={row.id}
|
||||
row={row}
|
||||
virtualRow={virtualRow}
|
||||
isSelected={row.getIsSelected()}
|
||||
/>
|
||||
)
|
||||
})
|
||||
) : (
|
||||
<TableRow>
|
||||
<TableCell colSpan={colLength} className="h-37 text-center pointer-events-none">
|
||||
<Trans>No results.</Trans>
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
)}
|
||||
</TableBody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
})
|
||||
|
||||
function NetworkProbeTableHead({ table }: { table: TableType<NetworkProbeRecord> }) {
|
||||
return (
|
||||
<TableHeader className="sticky top-0 z-50 w-full border-b-2">
|
||||
{table.getHeaderGroups().map((headerGroup) => (
|
||||
<tr key={headerGroup.id}>
|
||||
{headerGroup.headers.map((header) => {
|
||||
return (
|
||||
<TableHead className="px-2" key={header.id}>
|
||||
{header.isPlaceholder ? null : flexRender(header.column.columnDef.header, header.getContext())}
|
||||
</TableHead>
|
||||
)
|
||||
})}
|
||||
</tr>
|
||||
))}
|
||||
</TableHeader>
|
||||
)
|
||||
}
|
||||
|
||||
const NetworkProbeTableRow = memo(function NetworkProbeTableRow({
|
||||
row,
|
||||
virtualRow,
|
||||
isSelected,
|
||||
}: {
|
||||
row: Row<NetworkProbeRecord>
|
||||
virtualRow: VirtualItem
|
||||
isSelected: boolean
|
||||
}) {
|
||||
return (
|
||||
<TableRow data-state={isSelected && "selected"} className="transition-opacity">
|
||||
{row.getVisibleCells().map((cell) => (
|
||||
<TableCell
|
||||
key={cell.id}
|
||||
className="py-0"
|
||||
style={{
|
||||
width: `${cell.column.getSize()}px`,
|
||||
height: virtualRow.size,
|
||||
}}
|
||||
>
|
||||
{flexRender(cell.column.columnDef.cell, cell.getContext())}
|
||||
</TableCell>
|
||||
))}
|
||||
</TableRow>
|
||||
)
|
||||
})
|
||||
@@ -0,0 +1,413 @@
|
||||
import { useState } from "react"
|
||||
import { Trans, useLingui } from "@lingui/react/macro"
|
||||
import { useStore } from "@nanostores/react"
|
||||
import { pb } from "@/lib/api"
|
||||
import {
|
||||
Dialog,
|
||||
DialogContent,
|
||||
DialogDescription,
|
||||
DialogFooter,
|
||||
DialogHeader,
|
||||
DialogTitle,
|
||||
} from "@/components/ui/dialog"
|
||||
import { DropdownMenu, DropdownMenuContent, DropdownMenuItem, DropdownMenuTrigger } from "@/components/ui/dropdown-menu"
|
||||
import { Sheet, SheetContent, SheetDescription, SheetFooter, SheetHeader, SheetTitle } from "@/components/ui/sheet"
|
||||
import { Button } from "@/components/ui/button"
|
||||
import { Input } from "@/components/ui/input"
|
||||
import { Label } from "@/components/ui/label"
|
||||
import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "@/components/ui/select"
|
||||
import { Textarea } from "@/components/ui/textarea"
|
||||
import { ChevronDownIcon, ListIcon } from "lucide-react"
|
||||
import { useToast } from "@/components/ui/use-toast"
|
||||
import { $systems } from "@/lib/stores"
|
||||
import * as v from "valibot"
|
||||
|
||||
type ProbeProtocol = "icmp" | "tcp" | "http"
|
||||
|
||||
type ProbeValues = {
|
||||
system: string
|
||||
target: string
|
||||
protocol: ProbeProtocol
|
||||
port: number
|
||||
interval: string
|
||||
name?: string
|
||||
}
|
||||
|
||||
const Schema = v.object({
|
||||
system: v.string(),
|
||||
target: v.string(),
|
||||
protocol: v.picklist(["icmp", "tcp", "http"]),
|
||||
port: v.number(),
|
||||
interval: v.pipe(v.string(), v.toNumber(), v.minValue(1), v.maxValue(3600)),
|
||||
enabled: v.boolean(),
|
||||
name: v.optional(v.string()),
|
||||
})
|
||||
|
||||
function buildProbePayload(values: ProbeValues) {
|
||||
const normalizedPort = (values.protocol === "tcp" || values.protocol === "http") && !values.port ? 443 : values.port
|
||||
const payload = v.parse(Schema, {
|
||||
system: values.system,
|
||||
target: values.target,
|
||||
protocol: values.protocol,
|
||||
port: normalizedPort,
|
||||
interval: values.interval,
|
||||
enabled: true,
|
||||
})
|
||||
const trimmedName = values.name?.trim()
|
||||
const targetName = values.target.replace(/^https?:\/\//i, "")
|
||||
if (trimmedName) {
|
||||
payload.name = trimmedName
|
||||
} else if (targetName !== values.target) {
|
||||
payload.name = targetName
|
||||
}
|
||||
return payload
|
||||
}
|
||||
|
||||
function parseBulkProbeLine(line: string, lineNumber: number, system: string) {
|
||||
const [rawTarget = "", rawProtocol = "", rawPort = "", rawInterval = "", ...rawName] = line.split(",")
|
||||
const target = rawTarget.trim()
|
||||
if (!target) {
|
||||
throw new Error(`Line ${lineNumber}: target is required`)
|
||||
}
|
||||
|
||||
const inferredProtocol: ProbeProtocol = /^https?:\/\//i.test(target) ? "http" : "icmp"
|
||||
const protocolValue = rawProtocol.trim().toLowerCase() || inferredProtocol
|
||||
if (protocolValue !== "icmp" && protocolValue !== "tcp" && protocolValue !== "http") {
|
||||
throw new Error(`Line ${lineNumber}: protocol must be icmp, tcp, or http`)
|
||||
}
|
||||
|
||||
const portValue = rawPort.trim()
|
||||
if (protocolValue === "tcp") {
|
||||
const port = portValue ? Number(portValue) : 443
|
||||
if (!Number.isInteger(port) || port < 1 || port > 65535) {
|
||||
throw new Error(`Line ${lineNumber}: TCP entries require a port between 1 and 65535`)
|
||||
}
|
||||
return buildProbePayload({
|
||||
system,
|
||||
target,
|
||||
protocol: "tcp",
|
||||
port,
|
||||
interval: rawInterval.trim() || "30",
|
||||
name: rawName.join(",").trim() || undefined,
|
||||
})
|
||||
}
|
||||
|
||||
return buildProbePayload({
|
||||
system,
|
||||
target,
|
||||
protocol: protocolValue,
|
||||
port: 0,
|
||||
interval: rawInterval.trim() || "30",
|
||||
name: rawName.join(",").trim() || undefined,
|
||||
})
|
||||
}
|
||||
|
||||
export function AddProbeDialog({ systemId }: { systemId?: string }) {
|
||||
const [open, setOpen] = useState(false)
|
||||
const [bulkOpen, setBulkOpen] = useState(false)
|
||||
const [protocol, setProtocol] = useState<string>("icmp")
|
||||
const [target, setTarget] = useState("")
|
||||
const [port, setPort] = useState("")
|
||||
const [probeInterval, setProbeInterval] = useState("30")
|
||||
const [name, setName] = useState("")
|
||||
const [loading, setLoading] = useState(false)
|
||||
const [bulkInput, setBulkInput] = useState("")
|
||||
const [bulkLoading, setBulkLoading] = useState(false)
|
||||
const [selectedSystemId, setSelectedSystemId] = useState("")
|
||||
const [bulkSelectedSystemId, setBulkSelectedSystemId] = useState("")
|
||||
const systems = useStore($systems)
|
||||
const { toast } = useToast()
|
||||
const { t } = useLingui()
|
||||
const targetName = target.replace(/^https?:\/\//, "")
|
||||
|
||||
const resetForm = () => {
|
||||
setProtocol("icmp")
|
||||
setTarget("")
|
||||
setPort("")
|
||||
setProbeInterval("30")
|
||||
setName("")
|
||||
setSelectedSystemId("")
|
||||
}
|
||||
|
||||
const resetBulkForm = () => {
|
||||
setBulkInput("")
|
||||
setBulkSelectedSystemId("")
|
||||
}
|
||||
|
||||
const openBulkAdd = () => {
|
||||
if (!systemId && selectedSystemId) {
|
||||
setBulkSelectedSystemId(selectedSystemId)
|
||||
}
|
||||
setOpen(false)
|
||||
setBulkOpen(true)
|
||||
}
|
||||
|
||||
const openAdd = () => {
|
||||
setBulkOpen(false)
|
||||
setOpen(true)
|
||||
}
|
||||
|
||||
async function handleSubmit(e: React.FormEvent) {
|
||||
e.preventDefault()
|
||||
setLoading(true)
|
||||
|
||||
try {
|
||||
const payload = buildProbePayload({
|
||||
system: systemId ?? selectedSystemId,
|
||||
target,
|
||||
protocol: protocol as ProbeProtocol,
|
||||
port: protocol === "tcp" ? Number(port) : 0,
|
||||
interval: probeInterval,
|
||||
name,
|
||||
})
|
||||
await pb.collection("network_probes").create(payload)
|
||||
resetForm()
|
||||
setOpen(false)
|
||||
} catch (err: unknown) {
|
||||
toast({ variant: "destructive", title: t`Error`, description: (err as Error)?.message })
|
||||
} finally {
|
||||
setLoading(false)
|
||||
}
|
||||
}
|
||||
|
||||
async function handleBulkSubmit(e: React.FormEvent) {
|
||||
e.preventDefault()
|
||||
setBulkLoading(true)
|
||||
let closedForSubmit = false
|
||||
|
||||
try {
|
||||
const system = systemId ?? bulkSelectedSystemId
|
||||
const rawLines = bulkInput.split(/\r?\n/).filter((line) => line.trim())
|
||||
if (!rawLines.length) {
|
||||
throw new Error("Enter at least one probe.")
|
||||
}
|
||||
|
||||
const payloads = rawLines.map((line, index) => parseBulkProbeLine(line, index + 1, system))
|
||||
setBulkOpen(false)
|
||||
closedForSubmit = true
|
||||
let batch = pb.createBatch()
|
||||
let inBatch = 0
|
||||
for (const payload of payloads) {
|
||||
batch.collection("network_probes").create(payload)
|
||||
inBatch++
|
||||
if (inBatch > 20) {
|
||||
await batch.send()
|
||||
batch = pb.createBatch()
|
||||
inBatch = 0
|
||||
}
|
||||
}
|
||||
if (inBatch) {
|
||||
await batch.send()
|
||||
}
|
||||
|
||||
resetBulkForm()
|
||||
toast({ title: t`Probes created`, description: `${payloads.length} probe(s) added.` })
|
||||
} catch (err: unknown) {
|
||||
if (closedForSubmit) {
|
||||
setBulkOpen(true)
|
||||
}
|
||||
toast({ variant: "destructive", title: t`Error`, description: (err as Error)?.message })
|
||||
} finally {
|
||||
setBulkLoading(false)
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<>
|
||||
<div className="flex gap-0 rounded-lg">
|
||||
<Button variant="outline" onClick={openAdd} className="rounded-e-none grow">
|
||||
{/* <PlusIcon className="size-4 me-1" /> */}
|
||||
<Trans>Add {{ foo: t`Probe` }}</Trans>
|
||||
</Button>
|
||||
<div className="w-px h-full bg-muted"></div>
|
||||
<DropdownMenu>
|
||||
<DropdownMenuTrigger asChild>
|
||||
<Button variant="outline" className="px-2 rounded-s-none border-s-0" aria-label={t`More probe actions`}>
|
||||
<ChevronDownIcon className="size-4" />
|
||||
</Button>
|
||||
</DropdownMenuTrigger>
|
||||
<DropdownMenuContent align="end">
|
||||
<DropdownMenuItem onClick={openBulkAdd}>
|
||||
<ListIcon className="size-4 me-2" />
|
||||
<Trans>Bulk Add</Trans>
|
||||
</DropdownMenuItem>
|
||||
</DropdownMenuContent>
|
||||
</DropdownMenu>
|
||||
</div>
|
||||
<Dialog open={open} onOpenChange={setOpen}>
|
||||
<DialogContent className="max-w-md">
|
||||
<DialogHeader>
|
||||
<DialogTitle>
|
||||
<Trans>Add {{ foo: t`Network Probe` }}</Trans>
|
||||
</DialogTitle>
|
||||
<DialogDescription>
|
||||
<Trans>Configure response monitoring from this agent.</Trans>
|
||||
</DialogDescription>
|
||||
</DialogHeader>
|
||||
<form onSubmit={handleSubmit} className="grid gap-4 tabular-nums">
|
||||
{!systemId && (
|
||||
<div className="grid gap-2">
|
||||
<Label>
|
||||
<Trans>System</Trans>
|
||||
</Label>
|
||||
<Select value={selectedSystemId} onValueChange={setSelectedSystemId} required>
|
||||
<SelectTrigger>
|
||||
<SelectValue placeholder={t`Select a system`} />
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
{systems.map((sys) => (
|
||||
<SelectItem key={sys.id} value={sys.id}>
|
||||
{sys.name}
|
||||
</SelectItem>
|
||||
))}
|
||||
</SelectContent>
|
||||
</Select>
|
||||
</div>
|
||||
)}
|
||||
<div className="grid gap-2">
|
||||
<Label>
|
||||
<Trans>Target</Trans>
|
||||
</Label>
|
||||
<Input
|
||||
value={target}
|
||||
onChange={(e) => setTarget(e.target.value)}
|
||||
placeholder={protocol === "http" ? "https://example.com" : "1.1.1.1"}
|
||||
required
|
||||
/>
|
||||
</div>
|
||||
<div className="grid gap-2">
|
||||
<Label>
|
||||
<Trans>Protocol</Trans>
|
||||
</Label>
|
||||
|
||||
<Select value={protocol} onValueChange={setProtocol}>
|
||||
<SelectTrigger>
|
||||
<SelectValue />
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
<SelectItem value="icmp">ICMP</SelectItem>
|
||||
<SelectItem value="tcp">TCP</SelectItem>
|
||||
<SelectItem value="http">HTTP</SelectItem>
|
||||
</SelectContent>
|
||||
</Select>
|
||||
</div>
|
||||
{protocol === "tcp" && (
|
||||
<div className="grid gap-2">
|
||||
<Label>
|
||||
<Trans>Port</Trans>
|
||||
</Label>
|
||||
<Input
|
||||
type="number"
|
||||
value={port}
|
||||
onChange={(e) => setPort(e.target.value)}
|
||||
placeholder="443"
|
||||
min={1}
|
||||
max={65535}
|
||||
required
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
<div className="grid gap-2">
|
||||
<Label>
|
||||
<Trans>Interval (seconds)</Trans>
|
||||
</Label>
|
||||
<Input
|
||||
type="number"
|
||||
value={probeInterval}
|
||||
onChange={(e) => setProbeInterval(e.target.value)}
|
||||
min={1}
|
||||
max={3600}
|
||||
required
|
||||
/>
|
||||
</div>
|
||||
<div className="grid gap-2">
|
||||
<Label>
|
||||
<Trans>Name (optional)</Trans>
|
||||
</Label>
|
||||
<Input
|
||||
value={name}
|
||||
onChange={(e) => setName(e.target.value)}
|
||||
placeholder={targetName || t`e.g. Cloudflare DNS`}
|
||||
/>
|
||||
</div>
|
||||
<DialogFooter>
|
||||
<Button type="submit" disabled={loading || (!systemId && !selectedSystemId)}>
|
||||
{loading ? <Trans>Creating...</Trans> : <Trans>Add {{ foo: t`Probe` }}</Trans>}
|
||||
</Button>
|
||||
</DialogFooter>
|
||||
</form>
|
||||
</DialogContent>
|
||||
</Dialog>
|
||||
|
||||
<Sheet open={bulkOpen} onOpenChange={setBulkOpen}>
|
||||
<SheetContent className="w-full sm:max-w-xl gap-0">
|
||||
<SheetHeader className="border-b">
|
||||
<SheetTitle>
|
||||
<Trans>Bulk Add {{ foo: t`Network Probes` }}</Trans>
|
||||
</SheetTitle>
|
||||
<SheetDescription>
|
||||
<Trans>
|
||||
Paste one probe per line. See{" "}
|
||||
<a href={"#bulk-add-probes-docs"} className="underline underline-offset-2">
|
||||
the documentation
|
||||
</a>
|
||||
.
|
||||
</Trans>
|
||||
</SheetDescription>
|
||||
</SheetHeader>
|
||||
<form onSubmit={handleBulkSubmit} className="flex h-full flex-col overflow-hidden">
|
||||
<div className="flex-1 space-y-4 overflow-auto p-4">
|
||||
{!systemId && (
|
||||
<div className="grid gap-2">
|
||||
<Label>
|
||||
<Trans>System</Trans>
|
||||
</Label>
|
||||
<Select value={bulkSelectedSystemId} onValueChange={setBulkSelectedSystemId} required>
|
||||
<SelectTrigger>
|
||||
<SelectValue placeholder={t`Select a system`} />
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
{systems.map((sys) => (
|
||||
<SelectItem key={sys.id} value={sys.id}>
|
||||
{sys.name}
|
||||
</SelectItem>
|
||||
))}
|
||||
</SelectContent>
|
||||
</Select>
|
||||
</div>
|
||||
)}
|
||||
<div className="grid gap-2">
|
||||
<Label htmlFor="bulk-probes">
|
||||
<Trans>Entries</Trans>
|
||||
</Label>
|
||||
<Textarea
|
||||
id="bulk-probes"
|
||||
value={bulkInput}
|
||||
onChange={(e) => setBulkInput(e.target.value)}
|
||||
onKeyDown={(e) => {
|
||||
if (e.key === "Enter" && (e.ctrlKey || e.metaKey)) {
|
||||
e.preventDefault()
|
||||
handleBulkSubmit(e)
|
||||
}
|
||||
}}
|
||||
className="h-120 font-mono text-sm bg-muted/40"
|
||||
style={{ maxHeight: `calc(100vh - 20rem)` }}
|
||||
placeholder={["1.1.1.1", "example.com,tcp", "https://example.com,http,,60,Homepage"].join("\n")}
|
||||
required
|
||||
/>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
target[,protocol[,port[,interval[,name]]]] • TCP and HTTP default to port 443.
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
<SheetFooter className="border-t">
|
||||
<Button type="submit" disabled={bulkLoading || (!systemId && !bulkSelectedSystemId)}>
|
||||
<Trans>Add {{ foo: t`Network Probes` }}</Trans>
|
||||
</Button>
|
||||
</SheetFooter>
|
||||
</form>
|
||||
</SheetContent>
|
||||
</Sheet>
|
||||
</>
|
||||
)
|
||||
}
|
||||
@@ -4,6 +4,7 @@ const routes = {
|
||||
home: "/",
|
||||
containers: "/containers",
|
||||
smart: "/smart",
|
||||
probes: "/probes",
|
||||
system: `/system/:id`,
|
||||
settings: `/settings/:name?`,
|
||||
forgot_password: `/forgot-password`,
|
||||
|
||||
25
internal/site/src/components/routes/probes.tsx
Normal file
25
internal/site/src/components/routes/probes.tsx
Normal file
@@ -0,0 +1,25 @@
|
||||
import { useLingui } from "@lingui/react/macro"
|
||||
import { memo, useEffect } from "react"
|
||||
import NetworkProbesTableNew from "@/components/network-probes-table/network-probes-table"
|
||||
import { ActiveAlerts } from "@/components/active-alerts"
|
||||
import { FooterRepoLink } from "@/components/footer-repo-link"
|
||||
import { useNetworkProbesData } from "@/lib/use-network-probes"
|
||||
|
||||
export default memo(() => {
|
||||
const { t } = useLingui()
|
||||
const { probes } = useNetworkProbesData({})
|
||||
|
||||
useEffect(() => {
|
||||
document.title = `${t`Network Probes`} / Beszel`
|
||||
}, [t])
|
||||
|
||||
return (
|
||||
<>
|
||||
<div className="grid gap-4">
|
||||
<ActiveAlerts />
|
||||
<NetworkProbesTableNew probes={probes} />
|
||||
</div>
|
||||
<FooterRepoLink />
|
||||
</>
|
||||
)
|
||||
})
|
||||
@@ -15,6 +15,7 @@ import { isAdmin, pb } from "@/lib/api"
|
||||
import type { UserSettings } from "@/types"
|
||||
import { saveSettings } from "./layout"
|
||||
import { QuietHours } from "./quiet-hours"
|
||||
import type { ClientResponseError } from "pocketbase"
|
||||
|
||||
interface ShoutrrrUrlCardProps {
|
||||
url: string
|
||||
@@ -59,10 +60,10 @@ const SettingsNotificationsPage = ({ userSettings }: { userSettings: UserSetting
|
||||
try {
|
||||
const parsedData = v.parse(NotificationSchema, { emails, webhooks })
|
||||
await saveSettings(parsedData)
|
||||
} catch (e: any) {
|
||||
} catch (e: unknown) {
|
||||
toast({
|
||||
title: t`Failed to save settings`,
|
||||
description: e.message,
|
||||
description: (e as Error).message,
|
||||
variant: "destructive",
|
||||
})
|
||||
}
|
||||
@@ -136,12 +137,7 @@ const SettingsNotificationsPage = ({ userSettings }: { userSettings: UserSetting
|
||||
</Trans>
|
||||
</p>
|
||||
</div>
|
||||
<Button
|
||||
type="button"
|
||||
variant="outline"
|
||||
className="h-10 shrink-0"
|
||||
onClick={addWebhook}
|
||||
>
|
||||
<Button type="button" variant="outline" className="h-10 shrink-0" onClick={addWebhook}>
|
||||
<PlusIcon className="size-4" />
|
||||
<span className="ms-1">
|
||||
<Trans>Add URL</Trans>
|
||||
@@ -180,11 +176,20 @@ const SettingsNotificationsPage = ({ userSettings }: { userSettings: UserSetting
|
||||
)
|
||||
}
|
||||
|
||||
function showTestNotificationError(msg: string) {
|
||||
toast({
|
||||
title: t`Error`,
|
||||
description: msg ?? t`Failed to send test notification`,
|
||||
variant: "destructive",
|
||||
})
|
||||
}
|
||||
|
||||
const ShoutrrrUrlCard = ({ url, onUrlChange, onRemove }: ShoutrrrUrlCardProps) => {
|
||||
const [isLoading, setIsLoading] = useState(false)
|
||||
|
||||
const sendTestNotification = async () => {
|
||||
setIsLoading(true)
|
||||
try {
|
||||
const res = await pb.send("/api/beszel/test-notification", { method: "POST", body: { url } })
|
||||
if ("err" in res && !res.err) {
|
||||
toast({
|
||||
@@ -192,14 +197,14 @@ const ShoutrrrUrlCard = ({ url, onUrlChange, onRemove }: ShoutrrrUrlCardProps) =
|
||||
description: t`Check your notification service`,
|
||||
})
|
||||
} else {
|
||||
toast({
|
||||
title: t`Error`,
|
||||
description: res.err ?? t`Failed to send test notification`,
|
||||
variant: "destructive",
|
||||
})
|
||||
showTestNotificationError(res.err)
|
||||
}
|
||||
} catch (e: unknown) {
|
||||
showTestNotificationError((e as ClientResponseError).data?.message)
|
||||
} finally {
|
||||
setIsLoading(false)
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<Card className="bg-table-header p-2 md:p-3">
|
||||
|
||||
@@ -1,21 +1,19 @@
|
||||
import { memo, useState } from "react"
|
||||
import { Trans } from "@lingui/react/macro"
|
||||
import { compareSemVer, parseSemVer } from "@/lib/utils"
|
||||
|
||||
import type { GPUData } from "@/types"
|
||||
import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs"
|
||||
import InfoBar from "./system/info-bar"
|
||||
import { useSystemData } from "./system/use-system-data"
|
||||
import { CpuChart, ContainerCpuChart } from "./system/charts/cpu-charts"
|
||||
import { MemoryChart, ContainerMemoryChart, SwapChart } from "./system/charts/memory-charts"
|
||||
import { DiskCharts } from "./system/charts/disk-charts"
|
||||
import { RootDiskCharts, ExtraFsCharts } from "./system/charts/disk-charts"
|
||||
import { BandwidthChart, ContainerNetworkChart } from "./system/charts/network-charts"
|
||||
import { TemperatureChart, BatteryChart } from "./system/charts/sensor-charts"
|
||||
import { GpuPowerChart, GpuDetailCharts } from "./system/charts/gpu-charts"
|
||||
import { ExtraFsCharts } from "./system/charts/extra-fs-charts"
|
||||
import { LazyContainersTable, LazySmartTable, LazySystemdTable } from "./system/lazy-tables"
|
||||
import { LazyContainersTable, LazySmartTable, LazySystemdTable, LazyNetworkProbesTable } from "./system/lazy-tables"
|
||||
import { LoadAverageChart } from "./system/charts/load-average-chart"
|
||||
import { ContainerIcon, CpuIcon, HardDriveIcon, TerminalSquareIcon } from "lucide-react"
|
||||
import { ContainerIcon, CpuIcon, HardDriveIcon, NetworkIcon, TerminalSquareIcon } from "lucide-react"
|
||||
import { GpuIcon } from "../ui/icons"
|
||||
import SystemdTable from "../systemd-table/systemd-table"
|
||||
import ContainersTable from "../containers-table/containers-table"
|
||||
@@ -24,6 +22,8 @@ const SEMVER_0_14_0 = parseSemVer("0.14.0")
|
||||
const SEMVER_0_15_0 = parseSemVer("0.15.0")
|
||||
|
||||
export default memo(function SystemDetail({ id }: { id: string }) {
|
||||
const systemData = useSystemData(id)
|
||||
|
||||
const {
|
||||
system,
|
||||
systemStats,
|
||||
@@ -48,7 +48,7 @@ export default memo(function SystemDetail({ id }: { id: string }) {
|
||||
hasGpuData,
|
||||
hasGpuEnginesData,
|
||||
hasGpuPowerData,
|
||||
} = useSystemData(id)
|
||||
} = systemData
|
||||
|
||||
// extra margin to add to bottom of page, specifically for temperature chart,
|
||||
// where the tooltip can go past the bottom of the page if lots of sensors
|
||||
@@ -65,7 +65,7 @@ export default memo(function SystemDetail({ id }: { id: string }) {
|
||||
const hasGpu = hasGpuData || hasGpuPowerData
|
||||
|
||||
// keep tabsRef in sync for keyboard navigation
|
||||
const tabs = ["core", "disk"]
|
||||
const tabs = ["core", "network", "disk"]
|
||||
if (hasGpu) tabs.push("gpu")
|
||||
if (hasContainers) tabs.push("containers")
|
||||
if (hasSystemd) tabs.push("services")
|
||||
@@ -103,7 +103,7 @@ export default memo(function SystemDetail({ id }: { id: string }) {
|
||||
/>
|
||||
)}
|
||||
|
||||
<DiskCharts {...coreProps} systemStats={systemStats} />
|
||||
<RootDiskCharts systemData={systemData} />
|
||||
|
||||
<BandwidthChart {...coreProps} systemStats={systemStats} />
|
||||
|
||||
@@ -138,13 +138,15 @@ export default memo(function SystemDetail({ id }: { id: string }) {
|
||||
/>
|
||||
)}
|
||||
|
||||
<ExtraFsCharts {...coreProps} systemStats={systemStats} />
|
||||
<ExtraFsCharts systemData={systemData} />
|
||||
|
||||
{maybeHasSmartData && <LazySmartTable systemId={system.id} />}
|
||||
|
||||
{hasContainersTable && <LazyContainersTable systemId={system.id} />}
|
||||
|
||||
{hasSystemd && <LazySystemdTable systemId={system.id} />}
|
||||
|
||||
<LazyNetworkProbesTable systemId={system.id} systemData={systemData} />
|
||||
</>
|
||||
)
|
||||
}
|
||||
@@ -157,6 +159,10 @@ export default memo(function SystemDetail({ id }: { id: string }) {
|
||||
<CpuIcon className="size-3.5" />
|
||||
<Trans context="Core system metrics">Core</Trans>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="network" className="w-full flex items-center gap-1.5">
|
||||
<NetworkIcon className="size-3.5" />
|
||||
<Trans>Network</Trans>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="disk" className="w-full flex items-center gap-1.5">
|
||||
<HardDriveIcon className="size-3.5" />
|
||||
<Trans>Disk</Trans>
|
||||
@@ -184,22 +190,33 @@ export default memo(function SystemDetail({ id }: { id: string }) {
|
||||
<TabsContent value="core" forceMount className={activeTab === "core" ? "contents" : "hidden"}>
|
||||
<div className="grid xl:grid-cols-2 gap-4">
|
||||
<CpuChart {...coreProps} />
|
||||
<MemoryChart {...coreProps} />
|
||||
<LoadAverageChart chartData={chartData} grid={grid} dataEmpty={dataEmpty} />
|
||||
<BandwidthChart {...coreProps} systemStats={systemStats} />
|
||||
<TemperatureChart {...coreProps} setPageBottomExtraMargin={setPageBottomExtraMargin} />
|
||||
<MemoryChart {...coreProps} />
|
||||
<SwapChart chartData={chartData} grid={grid} dataEmpty={dataEmpty} systemStats={systemStats} />
|
||||
<TemperatureChart {...coreProps} setPageBottomExtraMargin={setPageBottomExtraMargin} />
|
||||
<BatteryChart {...coreProps} />
|
||||
{pageBottomExtraMargin > 0 && <div style={{ marginBottom: pageBottomExtraMargin }}></div>}
|
||||
</div>
|
||||
</TabsContent>
|
||||
|
||||
<TabsContent value="network" forceMount className={activeTab === "network" ? "contents" : "hidden"}>
|
||||
{mountedTabs.has("network") && (
|
||||
<>
|
||||
<div className="grid xl:grid-cols-2 gap-4">
|
||||
<BandwidthChart {...coreProps} systemStats={systemStats} />
|
||||
</div>
|
||||
<LazyNetworkProbesTable systemId={system.id} systemData={systemData} />
|
||||
</>
|
||||
)}
|
||||
</TabsContent>
|
||||
|
||||
<TabsContent value="disk" forceMount className={activeTab === "disk" ? "contents" : "hidden"}>
|
||||
{mountedTabs.has("disk") && (
|
||||
<>
|
||||
<div className="grid xl:grid-cols-2 gap-4">
|
||||
<DiskCharts {...coreProps} systemStats={systemStats} />
|
||||
<RootDiskCharts systemData={systemData} />
|
||||
</div>
|
||||
<ExtraFsCharts {...coreProps} systemStats={systemStats} />
|
||||
<ExtraFsCharts systemData={systemData} />
|
||||
{maybeHasSmartData && <LazySmartTable systemId={system.id} />}
|
||||
</>
|
||||
)}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import { timeTicks } from "d3-time"
|
||||
import { getPbTimestamp, pb } from "@/lib/api"
|
||||
import { chartTimeData } from "@/lib/utils"
|
||||
import type { ChartData, ChartTimes, ContainerStatsRecord, SystemStatsRecord } from "@/types"
|
||||
import type { ChartData, ChartTimes, ContainerStatsRecord, NetworkProbeStatsRecord, SystemStatsRecord } from "@/types"
|
||||
|
||||
type ChartTimeData = {
|
||||
time: number
|
||||
@@ -17,27 +16,6 @@ export const cache = new Map<
|
||||
ChartTimeData | SystemStatsRecord[] | ContainerStatsRecord[] | ChartData["containerData"]
|
||||
>()
|
||||
|
||||
// create ticks and domain for charts
|
||||
export function getTimeData(chartTime: ChartTimes, lastCreated: number) {
|
||||
const cached = cache.get("td") as ChartTimeData | undefined
|
||||
if (cached && cached.chartTime === chartTime) {
|
||||
if (!lastCreated || cached.time >= lastCreated) {
|
||||
return cached.data
|
||||
}
|
||||
}
|
||||
|
||||
// const buffer = chartTime === "1m" ? 400 : 20_000
|
||||
const now = new Date(Date.now())
|
||||
const startTime = chartTimeData[chartTime].getOffset(now)
|
||||
const ticks = timeTicks(startTime, now, chartTimeData[chartTime].ticks ?? 12).map((date) => date.getTime())
|
||||
const data = {
|
||||
ticks,
|
||||
domain: [chartTimeData[chartTime].getOffset(now).getTime(), now.getTime()],
|
||||
}
|
||||
cache.set("td", { time: now.getTime(), data, chartTime })
|
||||
return data
|
||||
}
|
||||
|
||||
/** Append new records onto prev with gap detection. Converts string `created` values to ms timestamps in place.
|
||||
* Pass `maxLen` to cap the result length in one copy instead of slicing again after the call. */
|
||||
export function appendData<T extends { created: string | number | null }>(
|
||||
@@ -66,12 +44,12 @@ export function appendData<T extends { created: string | number | null }>(
|
||||
return result
|
||||
}
|
||||
|
||||
export async function getStats<T extends SystemStatsRecord | ContainerStatsRecord>(
|
||||
export async function getStats<T extends SystemStatsRecord | ContainerStatsRecord | NetworkProbeStatsRecord>(
|
||||
collection: string,
|
||||
systemId: string,
|
||||
chartTime: ChartTimes
|
||||
chartTime: ChartTimes,
|
||||
cachedStats?: { created: string | number | null }[]
|
||||
): Promise<T[]> {
|
||||
const cachedStats = cache.get(`${systemId}_${chartTime}_${collection}`) as T[] | undefined
|
||||
const lastCached = cachedStats?.at(-1)?.created as number
|
||||
return await pb.collection<T>(collection).getFullList({
|
||||
filter: pb.filter("system={:id} && created > {:created} && type={:type}", {
|
||||
|
||||
@@ -1,39 +1,124 @@
|
||||
import { t } from "@lingui/core/macro"
|
||||
import AreaChartDefault from "@/components/charts/area-chart"
|
||||
import { $userSettings } from "@/lib/stores"
|
||||
import { decimalString, formatBytes, toFixedFloat } from "@/lib/utils"
|
||||
import type { ChartData, SystemStatsRecord } from "@/types"
|
||||
import type { SystemStatsRecord } from "@/types"
|
||||
import { ChartCard, SelectAvgMax } from "../chart-card"
|
||||
import { Unit } from "@/lib/enums"
|
||||
import { pinnedAxisDomain } from "@/components/ui/chart"
|
||||
import DiskIoSheet from "../disk-io-sheet"
|
||||
import type { SystemData } from "../use-system-data"
|
||||
import { useStore } from "@nanostores/react"
|
||||
import { $userSettings } from "@/lib/stores"
|
||||
|
||||
export function DiskCharts({
|
||||
chartData,
|
||||
grid,
|
||||
dataEmpty,
|
||||
showMax,
|
||||
isLongerChart,
|
||||
maxValues,
|
||||
}: {
|
||||
chartData: ChartData
|
||||
grid: boolean
|
||||
dataEmpty: boolean
|
||||
showMax: boolean
|
||||
isLongerChart: boolean
|
||||
maxValues: boolean
|
||||
systemStats: SystemStatsRecord[]
|
||||
}) {
|
||||
const maxValSelect = isLongerChart ? <SelectAvgMax max={maxValues} /> : null
|
||||
const userSettings = $userSettings.get()
|
||||
// Helpers for indexed dios/diosm access
|
||||
const dios =
|
||||
(i: number) =>
|
||||
({ stats }: SystemStatsRecord) =>
|
||||
stats?.dios?.[i] ?? 0
|
||||
const diosMax =
|
||||
(i: number) =>
|
||||
({ stats }: SystemStatsRecord) =>
|
||||
stats?.diosm?.[i] ?? 0
|
||||
const extraDios =
|
||||
(name: string, i: number) =>
|
||||
({ stats }: SystemStatsRecord) =>
|
||||
stats?.efs?.[name]?.dios?.[i] ?? 0
|
||||
const extraDiosMax =
|
||||
(name: string, i: number) =>
|
||||
({ stats }: SystemStatsRecord) =>
|
||||
stats?.efs?.[name]?.diosm?.[i] ?? 0
|
||||
|
||||
export const diskDataFns = {
|
||||
// usage
|
||||
usage: ({ stats }: SystemStatsRecord) => stats?.du ?? 0,
|
||||
extraUsage:
|
||||
(name: string) =>
|
||||
({ stats }: SystemStatsRecord) =>
|
||||
stats?.efs?.[name]?.du ?? 0,
|
||||
// throughput
|
||||
read: ({ stats }: SystemStatsRecord) => stats?.dio?.[0] ?? (stats?.dr ?? 0) * 1024 * 1024,
|
||||
readMax: ({ stats }: SystemStatsRecord) => stats?.diom?.[0] ?? (stats?.drm ?? 0) * 1024 * 1024,
|
||||
write: ({ stats }: SystemStatsRecord) => stats?.dio?.[1] ?? (stats?.dw ?? 0) * 1024 * 1024,
|
||||
writeMax: ({ stats }: SystemStatsRecord) => stats?.diom?.[1] ?? (stats?.dwm ?? 0) * 1024 * 1024,
|
||||
// extra fs throughput
|
||||
extraRead:
|
||||
(name: string) =>
|
||||
({ stats }: SystemStatsRecord) =>
|
||||
stats?.efs?.[name]?.rb ?? (stats?.efs?.[name]?.r ?? 0) * 1024 * 1024,
|
||||
extraReadMax:
|
||||
(name: string) =>
|
||||
({ stats }: SystemStatsRecord) =>
|
||||
stats?.efs?.[name]?.rbm ?? (stats?.efs?.[name]?.rm ?? 0) * 1024 * 1024,
|
||||
extraWrite:
|
||||
(name: string) =>
|
||||
({ stats }: SystemStatsRecord) =>
|
||||
stats?.efs?.[name]?.wb ?? (stats?.efs?.[name]?.w ?? 0) * 1024 * 1024,
|
||||
extraWriteMax:
|
||||
(name: string) =>
|
||||
({ stats }: SystemStatsRecord) =>
|
||||
stats?.efs?.[name]?.wbm ?? (stats?.efs?.[name]?.wm ?? 0) * 1024 * 1024,
|
||||
// read/write time
|
||||
readTime: dios(0),
|
||||
readTimeMax: diosMax(0),
|
||||
extraReadTime: (name: string) => extraDios(name, 0),
|
||||
extraReadTimeMax: (name: string) => extraDiosMax(name, 0),
|
||||
writeTime: dios(1),
|
||||
writeTimeMax: diosMax(1),
|
||||
extraWriteTime: (name: string) => extraDios(name, 1),
|
||||
extraWriteTimeMax: (name: string) => extraDiosMax(name, 1),
|
||||
// utilization (IoTime-based, 0-100%)
|
||||
util: dios(2),
|
||||
utilMax: diosMax(2),
|
||||
extraUtil: (name: string) => extraDios(name, 2),
|
||||
extraUtilMax: (name: string) => extraDiosMax(name, 2),
|
||||
// r_await / w_await: average service time per read/write operation (ms)
|
||||
rAwait: dios(3),
|
||||
rAwaitMax: diosMax(3),
|
||||
extraRAwait: (name: string) => extraDios(name, 3),
|
||||
extraRAwaitMax: (name: string) => extraDiosMax(name, 3),
|
||||
wAwait: dios(4),
|
||||
wAwaitMax: diosMax(4),
|
||||
extraWAwait: (name: string) => extraDios(name, 4),
|
||||
extraWAwaitMax: (name: string) => extraDiosMax(name, 4),
|
||||
// average queue depth: stored as queue_depth * 100 in Go, divided here
|
||||
weightedIO: ({ stats }: SystemStatsRecord) => (stats?.dios?.[5] ?? 0) / 100,
|
||||
weightedIOMax: ({ stats }: SystemStatsRecord) => (stats?.diosm?.[5] ?? 0) / 100,
|
||||
extraWeightedIO:
|
||||
(name: string) =>
|
||||
({ stats }: SystemStatsRecord) =>
|
||||
(stats?.efs?.[name]?.dios?.[5] ?? 0) / 100,
|
||||
extraWeightedIOMax:
|
||||
(name: string) =>
|
||||
({ stats }: SystemStatsRecord) =>
|
||||
(stats?.efs?.[name]?.diosm?.[5] ?? 0) / 100,
|
||||
}
|
||||
|
||||
export function RootDiskCharts({ systemData }: { systemData: SystemData }) {
|
||||
return (
|
||||
<>
|
||||
<DiskUsageChart systemData={systemData} />
|
||||
<DiskIOChart systemData={systemData} />
|
||||
</>
|
||||
)
|
||||
}
|
||||
|
||||
export function DiskUsageChart({ systemData, extraFsName }: { systemData: SystemData; extraFsName?: string }) {
|
||||
const { chartData, grid, dataEmpty } = systemData
|
||||
|
||||
let diskSize = chartData.systemStats?.at(-1)?.stats.d ?? NaN
|
||||
if (extraFsName) {
|
||||
diskSize = chartData.systemStats?.at(-1)?.stats.efs?.[extraFsName]?.d ?? NaN
|
||||
}
|
||||
// round to nearest GB
|
||||
if (diskSize >= 100) {
|
||||
diskSize = Math.round(diskSize)
|
||||
}
|
||||
|
||||
const title = extraFsName ? `${extraFsName} ${t`Usage`}` : t`Disk Usage`
|
||||
const description = extraFsName ? t`Disk usage of ${extraFsName}` : t`Usage of root partition`
|
||||
|
||||
return (
|
||||
<>
|
||||
<ChartCard empty={dataEmpty} grid={grid} title={t`Disk Usage`} description={t`Usage of root partition`}>
|
||||
<ChartCard empty={dataEmpty} grid={grid} title={title} description={description}>
|
||||
<AreaChartDefault
|
||||
chartData={chartData}
|
||||
domain={[0, diskSize]}
|
||||
@@ -50,42 +135,62 @@ export function DiskCharts({
|
||||
label: t`Disk Usage`,
|
||||
color: 4,
|
||||
opacity: 0.4,
|
||||
dataKey: ({ stats }) => stats?.du,
|
||||
dataKey: extraFsName ? diskDataFns.extraUsage(extraFsName) : diskDataFns.usage,
|
||||
},
|
||||
]}
|
||||
></AreaChartDefault>
|
||||
</ChartCard>
|
||||
)
|
||||
}
|
||||
|
||||
<ChartCard
|
||||
empty={dataEmpty}
|
||||
grid={grid}
|
||||
title={t`Disk I/O`}
|
||||
description={t`Throughput of root filesystem`}
|
||||
cornerEl={maxValSelect}
|
||||
>
|
||||
export function DiskIOChart({ systemData, extraFsName }: { systemData: SystemData; extraFsName?: string }) {
|
||||
const { chartData, grid, dataEmpty, showMax, isLongerChart, maxValues } = systemData
|
||||
const maxValSelect = isLongerChart ? <SelectAvgMax max={maxValues} /> : null
|
||||
const userSettings = useStore($userSettings)
|
||||
|
||||
if (!chartData.systemStats?.length) {
|
||||
return null
|
||||
}
|
||||
|
||||
const title = extraFsName ? `${extraFsName} I/O` : t`Disk I/O`
|
||||
const description = extraFsName ? t`Throughput of ${extraFsName}` : t`Throughput of root filesystem`
|
||||
|
||||
const hasMoreIOMetrics = chartData.systemStats?.some((record) => record.stats?.dios?.at(0))
|
||||
|
||||
let CornerEl = maxValSelect
|
||||
if (hasMoreIOMetrics) {
|
||||
CornerEl = (
|
||||
<div className="flex gap-2">
|
||||
{maxValSelect}
|
||||
<DiskIoSheet systemData={systemData} extraFsName={extraFsName} title={title} description={description} />
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
let readFn = showMax ? diskDataFns.readMax : diskDataFns.read
|
||||
let writeFn = showMax ? diskDataFns.writeMax : diskDataFns.write
|
||||
if (extraFsName) {
|
||||
readFn = showMax ? diskDataFns.extraReadMax(extraFsName) : diskDataFns.extraRead(extraFsName)
|
||||
writeFn = showMax ? diskDataFns.extraWriteMax(extraFsName) : diskDataFns.extraWrite(extraFsName)
|
||||
}
|
||||
|
||||
return (
|
||||
<ChartCard empty={dataEmpty} grid={grid} title={title} description={description} cornerEl={CornerEl}>
|
||||
<AreaChartDefault
|
||||
chartData={chartData}
|
||||
maxToggled={showMax}
|
||||
// domain={pinnedAxisDomain(true)}
|
||||
showTotal={true}
|
||||
dataPoints={[
|
||||
{
|
||||
label: t({ message: "Write", comment: "Disk write" }),
|
||||
dataKey: ({ stats }: SystemStatsRecord) => {
|
||||
if (showMax) {
|
||||
return stats?.dio?.[1] ?? (stats?.dwm ?? 0) * 1024 * 1024
|
||||
}
|
||||
return stats?.dio?.[1] ?? (stats?.dw ?? 0) * 1024 * 1024
|
||||
},
|
||||
dataKey: writeFn,
|
||||
color: 3,
|
||||
opacity: 0.3,
|
||||
},
|
||||
{
|
||||
label: t({ message: "Read", comment: "Disk read" }),
|
||||
dataKey: ({ stats }: SystemStatsRecord) => {
|
||||
if (showMax) {
|
||||
return stats?.diom?.[0] ?? (stats?.drm ?? 0) * 1024 * 1024
|
||||
}
|
||||
return stats?.dio?.[0] ?? (stats?.dr ?? 0) * 1024 * 1024
|
||||
},
|
||||
dataKey: readFn,
|
||||
color: 1,
|
||||
opacity: 0.3,
|
||||
},
|
||||
@@ -98,9 +203,81 @@ export function DiskCharts({
|
||||
const { value: convertedValue, unit } = formatBytes(value, true, userSettings.unitDisk, false)
|
||||
return `${decimalString(convertedValue, convertedValue >= 100 ? 1 : 2)} ${unit}`
|
||||
}}
|
||||
showTotal={true}
|
||||
/>
|
||||
</ChartCard>
|
||||
</>
|
||||
)
|
||||
}
|
||||
|
||||
export function DiskUtilizationChart({ systemData, extraFsName }: { systemData: SystemData; extraFsName?: string }) {
|
||||
const { chartData, grid, dataEmpty, showMax, isLongerChart, maxValues } = systemData
|
||||
const maxValSelect = isLongerChart ? <SelectAvgMax max={maxValues} /> : null
|
||||
|
||||
if (!chartData.systemStats?.length) {
|
||||
return null
|
||||
}
|
||||
|
||||
let utilFn = showMax ? diskDataFns.utilMax : diskDataFns.util
|
||||
if (extraFsName) {
|
||||
utilFn = showMax ? diskDataFns.extraUtilMax(extraFsName) : diskDataFns.extraUtil(extraFsName)
|
||||
}
|
||||
return (
|
||||
<ChartCard
|
||||
cornerEl={maxValSelect}
|
||||
empty={dataEmpty}
|
||||
grid={grid}
|
||||
title={t({
|
||||
message: `I/O Utilization`,
|
||||
context: "Percent of time the disk is busy with I/O",
|
||||
})}
|
||||
description={t`Percent of time the disk is busy with I/O`}
|
||||
// legend={true}
|
||||
className="min-h-auto"
|
||||
>
|
||||
<AreaChartDefault
|
||||
chartData={chartData}
|
||||
domain={pinnedAxisDomain()}
|
||||
tickFormatter={(val) => `${toFixedFloat(val, 2)}%`}
|
||||
contentFormatter={({ value }) => `${decimalString(value)}%`}
|
||||
maxToggled={showMax}
|
||||
chartProps={{ syncId: "io" }}
|
||||
dataPoints={[
|
||||
{
|
||||
label: t({ message: "Utilization", context: "Disk I/O utilization" }),
|
||||
dataKey: utilFn,
|
||||
color: 1,
|
||||
opacity: 0.4,
|
||||
},
|
||||
]}
|
||||
/>
|
||||
</ChartCard>
|
||||
)
|
||||
}
|
||||
|
||||
export function ExtraFsCharts({ systemData }: { systemData: SystemData }) {
|
||||
const { systemStats } = systemData.chartData
|
||||
|
||||
const extraFs = systemStats?.at(-1)?.stats.efs
|
||||
|
||||
if (!extraFs || Object.keys(extraFs).length === 0) {
|
||||
return null
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="grid xl:grid-cols-2 gap-4">
|
||||
{Object.keys(extraFs).map((extraFsName) => {
|
||||
let diskSize = systemStats.at(-1)?.stats.efs?.[extraFsName].d ?? NaN
|
||||
// round to nearest GB
|
||||
if (diskSize >= 100) {
|
||||
diskSize = Math.round(diskSize)
|
||||
}
|
||||
return (
|
||||
<div key={extraFsName} className="contents">
|
||||
<DiskUsageChart systemData={systemData} extraFsName={extraFsName} />
|
||||
|
||||
<DiskIOChart systemData={systemData} extraFsName={extraFsName} />
|
||||
</div>
|
||||
)
|
||||
})}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
@@ -1,120 +0,0 @@
|
||||
import { t } from "@lingui/core/macro"
|
||||
import AreaChartDefault from "@/components/charts/area-chart"
|
||||
import { $userSettings } from "@/lib/stores"
|
||||
import { decimalString, formatBytes, toFixedFloat } from "@/lib/utils"
|
||||
import type { ChartData, SystemStatsRecord } from "@/types"
|
||||
import { ChartCard, SelectAvgMax } from "../chart-card"
|
||||
import { Unit } from "@/lib/enums"
|
||||
|
||||
export function ExtraFsCharts({
|
||||
chartData,
|
||||
grid,
|
||||
dataEmpty,
|
||||
showMax,
|
||||
isLongerChart,
|
||||
maxValues,
|
||||
systemStats,
|
||||
}: {
|
||||
chartData: ChartData
|
||||
grid: boolean
|
||||
dataEmpty: boolean
|
||||
showMax: boolean
|
||||
isLongerChart: boolean
|
||||
maxValues: boolean
|
||||
systemStats: SystemStatsRecord[]
|
||||
}) {
|
||||
const maxValSelect = isLongerChart ? <SelectAvgMax max={maxValues} /> : null
|
||||
const userSettings = $userSettings.get()
|
||||
const extraFs = systemStats.at(-1)?.stats.efs
|
||||
if (!extraFs || Object.keys(extraFs).length === 0) {
|
||||
return null
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="grid xl:grid-cols-2 gap-4">
|
||||
{Object.keys(extraFs).map((extraFsName) => {
|
||||
let diskSize = systemStats.at(-1)?.stats.efs?.[extraFsName].d ?? NaN
|
||||
// round to nearest GB
|
||||
if (diskSize >= 100) {
|
||||
diskSize = Math.round(diskSize)
|
||||
}
|
||||
return (
|
||||
<div key={extraFsName} className="contents">
|
||||
<ChartCard
|
||||
empty={dataEmpty}
|
||||
grid={grid}
|
||||
title={`${extraFsName} ${t`Usage`}`}
|
||||
description={t`Disk usage of ${extraFsName}`}
|
||||
>
|
||||
<AreaChartDefault
|
||||
chartData={chartData}
|
||||
domain={[0, diskSize]}
|
||||
tickFormatter={(val) => {
|
||||
const { value, unit } = formatBytes(val * 1024, false, Unit.Bytes, true)
|
||||
return `${toFixedFloat(value, value >= 10 ? 0 : 1)} ${unit}`
|
||||
}}
|
||||
contentFormatter={({ value }) => {
|
||||
const { value: convertedValue, unit } = formatBytes(value * 1024, false, Unit.Bytes, true)
|
||||
return `${decimalString(convertedValue)} ${unit}`
|
||||
}}
|
||||
dataPoints={[
|
||||
{
|
||||
label: t`Disk Usage`,
|
||||
color: 4,
|
||||
opacity: 0.4,
|
||||
dataKey: ({ stats }) => stats?.efs?.[extraFsName]?.du,
|
||||
},
|
||||
]}
|
||||
></AreaChartDefault>
|
||||
</ChartCard>
|
||||
<ChartCard
|
||||
empty={dataEmpty}
|
||||
grid={grid}
|
||||
title={`${extraFsName} I/O`}
|
||||
description={t`Throughput of ${extraFsName}`}
|
||||
cornerEl={maxValSelect}
|
||||
>
|
||||
<AreaChartDefault
|
||||
chartData={chartData}
|
||||
showTotal={true}
|
||||
dataPoints={[
|
||||
{
|
||||
label: t`Write`,
|
||||
dataKey: ({ stats }) => {
|
||||
if (showMax) {
|
||||
return stats?.efs?.[extraFsName]?.wbm || (stats?.efs?.[extraFsName]?.wm ?? 0) * 1024 * 1024
|
||||
}
|
||||
return stats?.efs?.[extraFsName]?.wb || (stats?.efs?.[extraFsName]?.w ?? 0) * 1024 * 1024
|
||||
},
|
||||
color: 3,
|
||||
opacity: 0.3,
|
||||
},
|
||||
{
|
||||
label: t`Read`,
|
||||
dataKey: ({ stats }) => {
|
||||
if (showMax) {
|
||||
return stats?.efs?.[extraFsName]?.rbm ?? (stats?.efs?.[extraFsName]?.rm ?? 0) * 1024 * 1024
|
||||
}
|
||||
return stats?.efs?.[extraFsName]?.rb ?? (stats?.efs?.[extraFsName]?.r ?? 0) * 1024 * 1024
|
||||
},
|
||||
color: 1,
|
||||
opacity: 0.3,
|
||||
},
|
||||
]}
|
||||
maxToggled={showMax}
|
||||
tickFormatter={(val) => {
|
||||
const { value, unit } = formatBytes(val, true, userSettings.unitDisk, false)
|
||||
return `${toFixedFloat(value, value >= 10 ? 0 : 1)} ${unit}`
|
||||
}}
|
||||
contentFormatter={({ value }) => {
|
||||
const { value: convertedValue, unit } = formatBytes(value, true, userSettings.unitDisk, false)
|
||||
return `${decimalString(convertedValue, convertedValue >= 100 ? 1 : 2)} ${unit}`
|
||||
}}
|
||||
/>
|
||||
</ChartCard>
|
||||
</div>
|
||||
)
|
||||
})}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
@@ -0,0 +1,152 @@
|
||||
import LineChartDefault from "@/components/charts/line-chart"
|
||||
import type { DataPoint } from "@/components/charts/line-chart"
|
||||
import { toFixedFloat, decimalString } from "@/lib/utils"
|
||||
import { useLingui } from "@lingui/react/macro"
|
||||
import { ChartCard, FilterBar } from "../chart-card"
|
||||
import type { ChartData, NetworkProbeRecord, NetworkProbeStatsRecord } from "@/types"
|
||||
import { useMemo } from "react"
|
||||
import { atom } from "nanostores"
|
||||
import { useStore } from "@nanostores/react"
|
||||
|
||||
const $filter = atom("")
|
||||
|
||||
type ProbeChartProps = {
|
||||
probeStats: NetworkProbeStatsRecord[]
|
||||
grid?: boolean
|
||||
probes: NetworkProbeRecord[]
|
||||
chartData: ChartData
|
||||
empty: boolean
|
||||
}
|
||||
|
||||
type ProbeChartBaseProps = ProbeChartProps & {
|
||||
valueIndex: number
|
||||
title: string
|
||||
description: string
|
||||
tickFormatter: (value: number) => string
|
||||
contentFormatter: ({ value }: { value: number | string }) => string | number
|
||||
domain?: [number | "auto", number | "auto"]
|
||||
}
|
||||
|
||||
function ProbeChart({
|
||||
probeStats,
|
||||
grid,
|
||||
probes,
|
||||
chartData,
|
||||
empty,
|
||||
valueIndex,
|
||||
title,
|
||||
description,
|
||||
tickFormatter,
|
||||
contentFormatter,
|
||||
domain,
|
||||
}: ProbeChartBaseProps) {
|
||||
const filter = useStore($filter)
|
||||
|
||||
const { dataPoints, visibleKeys } = useMemo(() => {
|
||||
const sortedProbes = [...probes].sort((a, b) => b.resAvg1h - a.resAvg1h)
|
||||
const count = sortedProbes.length
|
||||
const points: DataPoint<NetworkProbeStatsRecord>[] = []
|
||||
const visibleIDs: string[] = []
|
||||
const filterTerms = filter
|
||||
? filter
|
||||
.toLowerCase()
|
||||
.split(" ")
|
||||
.filter((term) => term.length > 0)
|
||||
: []
|
||||
for (let i = 0; i < count; i++) {
|
||||
const p = sortedProbes[i]
|
||||
const label = p.name || p.target
|
||||
const filtered = filterTerms.length > 0 && !filterTerms.some((term) => label.toLowerCase().includes(term))
|
||||
if (filtered) {
|
||||
continue
|
||||
}
|
||||
visibleIDs.push(p.id)
|
||||
points.push({
|
||||
order: i,
|
||||
label,
|
||||
dataKey: (record: NetworkProbeStatsRecord) => record.stats?.[p.id]?.[valueIndex] ?? "-",
|
||||
color: count <= 5 ? i + 1 : `hsl(${(i * 360) / count}, var(--chart-saturation), var(--chart-lightness))`,
|
||||
})
|
||||
}
|
||||
return { dataPoints: points, visibleKeys: visibleIDs }
|
||||
}, [probes, filter, valueIndex])
|
||||
|
||||
const filteredProbeStats = useMemo(() => {
|
||||
if (!visibleKeys.length) return probeStats
|
||||
return probeStats.filter((record) => visibleKeys.some((id) => record.stats?.[id] != null))
|
||||
}, [probeStats, visibleKeys])
|
||||
|
||||
const legend = dataPoints.length < 10
|
||||
|
||||
return (
|
||||
<ChartCard
|
||||
legend={legend}
|
||||
cornerEl={<FilterBar store={$filter} />}
|
||||
empty={empty}
|
||||
title={title}
|
||||
description={description}
|
||||
grid={grid}
|
||||
>
|
||||
<LineChartDefault
|
||||
chartData={chartData}
|
||||
customData={filteredProbeStats}
|
||||
dataPoints={dataPoints}
|
||||
domain={domain ?? ["auto", "auto"]}
|
||||
connectNulls
|
||||
tickFormatter={tickFormatter}
|
||||
contentFormatter={contentFormatter}
|
||||
legend={legend}
|
||||
filter={filter}
|
||||
/>
|
||||
</ChartCard>
|
||||
)
|
||||
}
|
||||
|
||||
export function ResponseChart({ probeStats, grid, probes, chartData, empty }: ProbeChartProps) {
|
||||
const { t } = useLingui()
|
||||
|
||||
return (
|
||||
<ProbeChart
|
||||
probeStats={probeStats}
|
||||
grid={grid}
|
||||
probes={probes}
|
||||
chartData={chartData}
|
||||
empty={empty}
|
||||
valueIndex={0}
|
||||
title={t`Response`}
|
||||
description={t`Average response time (ms)`}
|
||||
tickFormatter={(value) => `${toFixedFloat(value, value >= 10 ? 0 : 1)} ms`}
|
||||
contentFormatter={({ value }) => {
|
||||
if (typeof value !== "number") {
|
||||
return value
|
||||
}
|
||||
return `${decimalString(value, 2)} ms`
|
||||
}}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
export function LossChart({ probeStats, grid, probes, chartData, empty }: ProbeChartProps) {
|
||||
const { t } = useLingui()
|
||||
|
||||
return (
|
||||
<ProbeChart
|
||||
probeStats={probeStats}
|
||||
grid={grid}
|
||||
probes={probes}
|
||||
chartData={chartData}
|
||||
empty={empty}
|
||||
valueIndex={4}
|
||||
title={t`Loss`}
|
||||
description={t`Packet loss (%)`}
|
||||
domain={[0, 100]}
|
||||
tickFormatter={(value) => `${toFixedFloat(value, value >= 10 ? 0 : 1)}%`}
|
||||
contentFormatter={({ value }) => {
|
||||
if (typeof value !== "number") {
|
||||
return value
|
||||
}
|
||||
return `${decimalString(value, 2)}%`
|
||||
}}
|
||||
/>
|
||||
)
|
||||
}
|
||||
265
internal/site/src/components/routes/system/disk-io-sheet.tsx
Normal file
265
internal/site/src/components/routes/system/disk-io-sheet.tsx
Normal file
@@ -0,0 +1,265 @@
|
||||
import { t } from "@lingui/core/macro"
|
||||
import { useStore } from "@nanostores/react"
|
||||
import { MoreHorizontalIcon } from "lucide-react"
|
||||
import { memo, useRef, useState } from "react"
|
||||
import AreaChartDefault from "@/components/charts/area-chart"
|
||||
import ChartTimeSelect from "@/components/charts/chart-time-select"
|
||||
import { Button } from "@/components/ui/button"
|
||||
import { Sheet, SheetContent, SheetTrigger } from "@/components/ui/sheet"
|
||||
import { DialogTitle } from "@/components/ui/dialog"
|
||||
import { $userSettings } from "@/lib/stores"
|
||||
import { decimalString, formatBytes, toFixedFloat } from "@/lib/utils"
|
||||
import { ChartCard, SelectAvgMax } from "@/components/routes/system/chart-card"
|
||||
import type { SystemData } from "@/components/routes/system/use-system-data"
|
||||
import { diskDataFns, DiskUtilizationChart } from "./charts/disk-charts"
|
||||
import { pinnedAxisDomain } from "@/components/ui/chart"
|
||||
|
||||
export default memo(function DiskIOSheet({
|
||||
systemData,
|
||||
extraFsName,
|
||||
title,
|
||||
description,
|
||||
}: {
|
||||
systemData: SystemData
|
||||
extraFsName?: string
|
||||
title: string
|
||||
description: string
|
||||
}) {
|
||||
const { chartData, grid, dataEmpty, showMax, maxValues, isLongerChart } = systemData
|
||||
const userSettings = useStore($userSettings)
|
||||
|
||||
const [sheetOpen, setSheetOpen] = useState(false)
|
||||
|
||||
const hasOpened = useRef(false)
|
||||
|
||||
if (sheetOpen && !hasOpened.current) {
|
||||
hasOpened.current = true
|
||||
}
|
||||
|
||||
// throughput functions, with extra fs variants if needed
|
||||
let readFn = showMax ? diskDataFns.readMax : diskDataFns.read
|
||||
let writeFn = showMax ? diskDataFns.writeMax : diskDataFns.write
|
||||
if (extraFsName) {
|
||||
readFn = showMax ? diskDataFns.extraReadMax(extraFsName) : diskDataFns.extraRead(extraFsName)
|
||||
writeFn = showMax ? diskDataFns.extraWriteMax(extraFsName) : diskDataFns.extraWrite(extraFsName)
|
||||
}
|
||||
|
||||
// read and write time functions, with extra fs variants if needed
|
||||
let readTimeFn = showMax ? diskDataFns.readTimeMax : diskDataFns.readTime
|
||||
let writeTimeFn = showMax ? diskDataFns.writeTimeMax : diskDataFns.writeTime
|
||||
if (extraFsName) {
|
||||
readTimeFn = showMax ? diskDataFns.extraReadTimeMax(extraFsName) : diskDataFns.extraReadTime(extraFsName)
|
||||
writeTimeFn = showMax ? diskDataFns.extraWriteTimeMax(extraFsName) : diskDataFns.extraWriteTime(extraFsName)
|
||||
}
|
||||
|
||||
// I/O await functions, with extra fs variants if needed
|
||||
let rAwaitFn = showMax ? diskDataFns.rAwaitMax : diskDataFns.rAwait
|
||||
let wAwaitFn = showMax ? diskDataFns.wAwaitMax : diskDataFns.wAwait
|
||||
if (extraFsName) {
|
||||
rAwaitFn = showMax ? diskDataFns.extraRAwaitMax(extraFsName) : diskDataFns.extraRAwait(extraFsName)
|
||||
wAwaitFn = showMax ? diskDataFns.extraWAwaitMax(extraFsName) : diskDataFns.extraWAwait(extraFsName)
|
||||
}
|
||||
|
||||
// weighted I/O function, with extra fs variant if needed
|
||||
let weightedIOFn = showMax ? diskDataFns.weightedIOMax : diskDataFns.weightedIO
|
||||
if (extraFsName) {
|
||||
weightedIOFn = showMax ? diskDataFns.extraWeightedIOMax(extraFsName) : diskDataFns.extraWeightedIO(extraFsName)
|
||||
}
|
||||
|
||||
// check for availability of I/O metrics
|
||||
let hasUtilization = false
|
||||
let hasAwait = false
|
||||
let hasWeightedIO = false
|
||||
for (const record of chartData.systemStats ?? []) {
|
||||
const dios = record.stats?.dios
|
||||
if ((dios?.at(2) ?? 0) > 0) hasUtilization = true
|
||||
if ((dios?.at(3) ?? 0) > 0) hasAwait = true
|
||||
if ((dios?.at(5) ?? 0) > 0) hasWeightedIO = true
|
||||
if (hasUtilization && hasAwait && hasWeightedIO) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
const maxValSelect = isLongerChart ? <SelectAvgMax max={maxValues} /> : null
|
||||
|
||||
const chartProps = { syncId: "io" }
|
||||
|
||||
const queueDepthTranslation = t({ message: "Queue Depth", context: "Disk I/O average queue depth" })
|
||||
|
||||
return (
|
||||
<Sheet open={sheetOpen} onOpenChange={setSheetOpen}>
|
||||
<DialogTitle className="sr-only">{title}</DialogTitle>
|
||||
<SheetTrigger asChild>
|
||||
<Button
|
||||
title={t`View more`}
|
||||
variant="outline"
|
||||
size="icon"
|
||||
className="shrink-0 max-sm:absolute max-sm:top-0 max-sm:end-0"
|
||||
>
|
||||
<MoreHorizontalIcon />
|
||||
</Button>
|
||||
</SheetTrigger>
|
||||
{hasOpened.current && (
|
||||
<SheetContent aria-describedby={undefined} className="overflow-auto w-200 !max-w-full p-4 sm:p-6">
|
||||
<ChartTimeSelect className="w-[calc(100%-2em)] bg-card" agentVersion={chartData.agentVersion} />
|
||||
|
||||
<ChartCard
|
||||
className="min-h-auto"
|
||||
empty={dataEmpty}
|
||||
grid={grid}
|
||||
title={title}
|
||||
description={description}
|
||||
cornerEl={maxValSelect}
|
||||
// legend={true}
|
||||
>
|
||||
<AreaChartDefault
|
||||
chartData={chartData}
|
||||
maxToggled={showMax}
|
||||
chartProps={chartProps}
|
||||
showTotal={true}
|
||||
domain={pinnedAxisDomain()}
|
||||
itemSorter={(a, b) => a.order - b.order}
|
||||
reverseStackOrder={true}
|
||||
dataPoints={[
|
||||
{
|
||||
label: t`Write`,
|
||||
dataKey: writeFn,
|
||||
color: 3,
|
||||
opacity: 0.4,
|
||||
stackId: 0,
|
||||
order: 0,
|
||||
},
|
||||
{
|
||||
label: t`Read`,
|
||||
dataKey: readFn,
|
||||
color: 1,
|
||||
opacity: 0.4,
|
||||
stackId: 0,
|
||||
order: 1,
|
||||
},
|
||||
]}
|
||||
tickFormatter={(val) => {
|
||||
const { value, unit } = formatBytes(val, true, userSettings.unitDisk, false)
|
||||
return `${toFixedFloat(value, value >= 10 ? 0 : 1)} ${unit}`
|
||||
}}
|
||||
contentFormatter={({ value }) => {
|
||||
const { value: convertedValue, unit } = formatBytes(value, true, userSettings.unitDisk, false)
|
||||
return `${decimalString(convertedValue, convertedValue >= 100 ? 1 : 2)} ${unit}`
|
||||
}}
|
||||
/>
|
||||
</ChartCard>
|
||||
|
||||
{hasUtilization && <DiskUtilizationChart systemData={systemData} extraFsName={extraFsName} />}
|
||||
|
||||
<ChartCard
|
||||
empty={dataEmpty}
|
||||
grid={grid}
|
||||
title={t({ message: "I/O Time", context: "Disk I/O total time spent on read/write" })}
|
||||
description={t({
|
||||
message: "Total time spent on read/write (can exceed 100%)",
|
||||
context: "Disk I/O",
|
||||
})}
|
||||
className="min-h-auto"
|
||||
cornerEl={maxValSelect}
|
||||
>
|
||||
<AreaChartDefault
|
||||
chartData={chartData}
|
||||
domain={pinnedAxisDomain()}
|
||||
tickFormatter={(val) => `${toFixedFloat(val, 2)}%`}
|
||||
contentFormatter={({ value }) => `${decimalString(value)}%`}
|
||||
maxToggled={showMax}
|
||||
chartProps={chartProps}
|
||||
showTotal={true}
|
||||
itemSorter={(a, b) => a.order - b.order}
|
||||
reverseStackOrder={true}
|
||||
dataPoints={[
|
||||
{
|
||||
label: t`Write`,
|
||||
dataKey: writeTimeFn,
|
||||
color: 3,
|
||||
opacity: 0.4,
|
||||
stackId: 0,
|
||||
order: 0,
|
||||
},
|
||||
{
|
||||
label: t`Read`,
|
||||
dataKey: readTimeFn,
|
||||
color: 1,
|
||||
opacity: 0.4,
|
||||
stackId: 0,
|
||||
order: 1,
|
||||
},
|
||||
]}
|
||||
/>
|
||||
</ChartCard>
|
||||
|
||||
{hasWeightedIO && (
|
||||
<ChartCard
|
||||
empty={dataEmpty}
|
||||
grid={grid}
|
||||
title={queueDepthTranslation}
|
||||
description={t`Average number of I/O operations waiting to be serviced`}
|
||||
className="min-h-auto"
|
||||
cornerEl={maxValSelect}
|
||||
>
|
||||
<AreaChartDefault
|
||||
chartData={chartData}
|
||||
domain={pinnedAxisDomain()}
|
||||
tickFormatter={(val) => `${toFixedFloat(val, 2)}`}
|
||||
contentFormatter={({ value }) => decimalString(value, value < 10 ? 3 : 2)}
|
||||
maxToggled={showMax}
|
||||
chartProps={chartProps}
|
||||
dataPoints={[
|
||||
{
|
||||
label: queueDepthTranslation,
|
||||
dataKey: weightedIOFn,
|
||||
color: 1,
|
||||
opacity: 0.4,
|
||||
},
|
||||
]}
|
||||
/>
|
||||
</ChartCard>
|
||||
)}
|
||||
|
||||
{hasAwait && (
|
||||
<ChartCard
|
||||
empty={dataEmpty}
|
||||
grid={grid}
|
||||
title={t({ message: "I/O Await", context: "Disk I/O average operation time (iostat await)" })}
|
||||
description={t({
|
||||
message: "Average queue to completion time per operation",
|
||||
context: "Disk I/O average operation time (iostat await)",
|
||||
})}
|
||||
className="min-h-auto"
|
||||
cornerEl={maxValSelect}
|
||||
// legend={true}
|
||||
>
|
||||
<AreaChartDefault
|
||||
chartData={chartData}
|
||||
domain={pinnedAxisDomain()}
|
||||
tickFormatter={(val) => `${toFixedFloat(val, 2)} ms`}
|
||||
contentFormatter={({ value }) => `${decimalString(value)} ms`}
|
||||
maxToggled={showMax}
|
||||
chartProps={chartProps}
|
||||
dataPoints={[
|
||||
{
|
||||
label: t`Write`,
|
||||
dataKey: wAwaitFn,
|
||||
color: 3,
|
||||
opacity: 0.3,
|
||||
},
|
||||
{
|
||||
label: t`Read`,
|
||||
dataKey: rAwaitFn,
|
||||
color: 1,
|
||||
opacity: 0.3,
|
||||
},
|
||||
]}
|
||||
/>
|
||||
</ChartCard>
|
||||
)}
|
||||
</SheetContent>
|
||||
)}
|
||||
</Sheet>
|
||||
)
|
||||
})
|
||||
@@ -1,6 +1,11 @@
|
||||
import { lazy } from "react"
|
||||
import { useIntersectionObserver } from "@/lib/use-intersection-observer"
|
||||
import { cn } from "@/lib/utils"
|
||||
import { ResponseChart, LossChart } from "./charts/probes-charts"
|
||||
import type { SystemData } from "./use-system-data"
|
||||
import { $chartTime } from "@/lib/stores"
|
||||
import { useStore } from "@nanostores/react"
|
||||
import { useNetworkProbesData } from "@/lib/use-network-probes"
|
||||
|
||||
const ContainersTable = lazy(() => import("../../containers-table/containers-table"))
|
||||
|
||||
@@ -34,3 +39,46 @@ export function LazySystemdTable({ systemId }: { systemId: string }) {
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
const NetworkProbesTable = lazy(() => import("@/components/network-probes-table/network-probes-table"))
|
||||
|
||||
export function LazyNetworkProbesTable({ systemId, systemData }: { systemId: string; systemData: SystemData }) {
|
||||
const { isIntersecting, ref } = useIntersectionObserver()
|
||||
|
||||
return (
|
||||
<div ref={ref} className={cn(isIntersecting && "contents")}>
|
||||
{isIntersecting && <ProbesTable systemId={systemId} systemData={systemData} />}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
function ProbesTable({ systemId, systemData }: { systemId: string; systemData: SystemData }) {
|
||||
const { grid, chartData } = systemData ?? {}
|
||||
const chartTime = useStore($chartTime)
|
||||
|
||||
const { probes, probeStats } = useNetworkProbesData({ systemId, loadStats: !!chartData, chartTime })
|
||||
|
||||
return (
|
||||
<>
|
||||
<NetworkProbesTable systemId={systemId} probes={probes} />
|
||||
{!!chartData && !!probes.length && (
|
||||
<div className="grid xl:grid-cols-2 gap-4">
|
||||
<ResponseChart
|
||||
probeStats={probeStats}
|
||||
grid={grid}
|
||||
probes={probes}
|
||||
chartData={chartData}
|
||||
empty={!probeStats.length}
|
||||
/>
|
||||
<LossChart
|
||||
probeStats={probeStats}
|
||||
grid={grid}
|
||||
probes={probes}
|
||||
chartData={chartData}
|
||||
empty={!probeStats.length}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
</>
|
||||
)
|
||||
}
|
||||
|
||||
@@ -36,7 +36,7 @@ import { Input } from "@/components/ui/input"
|
||||
import { Table, TableBody, TableCell, TableHead, TableHeader, TableRow } from "@/components/ui/table"
|
||||
import { Badge } from "@/components/ui/badge"
|
||||
import { Button } from "@/components/ui/button"
|
||||
import { pb } from "@/lib/api"
|
||||
import { isReadOnlyUser, pb } from "@/lib/api"
|
||||
import type { SmartDeviceRecord, SmartAttribute } from "@/types"
|
||||
import {
|
||||
formatBytes,
|
||||
@@ -492,7 +492,7 @@ export default function DisksTable({ systemId }: { systemId?: string }) {
|
||||
const tableColumns = useMemo(() => {
|
||||
const columns = createColumns(longestName, longestModel, longestDevice)
|
||||
const baseColumns = systemId ? columns.filter((col) => col.id !== "system") : columns
|
||||
return [...baseColumns, actionColumn]
|
||||
return isReadOnlyUser() ? baseColumns : [...baseColumns, actionColumn]
|
||||
}, [systemId, actionColumn, longestName, longestModel, longestDevice])
|
||||
|
||||
const table = useReactTable({
|
||||
|
||||
@@ -26,7 +26,9 @@ import type {
|
||||
SystemStatsRecord,
|
||||
} from "@/types"
|
||||
import { $router, navigate } from "../../router"
|
||||
import { appendData, cache, getStats, getTimeData, makeContainerData, makeContainerPoint } from "./chart-data"
|
||||
import { appendData, cache, getStats, makeContainerData, makeContainerPoint } from "./chart-data"
|
||||
|
||||
export type SystemData = ReturnType<typeof useSystemData>
|
||||
|
||||
export function useSystemData(id: string) {
|
||||
const direction = useStore($direction)
|
||||
@@ -149,16 +151,11 @@ export function useSystemData(id: string) {
|
||||
const agentVersion = useMemo(() => parseSemVer(system?.info?.v), [system?.info?.v])
|
||||
|
||||
const chartData: ChartData = useMemo(() => {
|
||||
const lastCreated = Math.max(
|
||||
(systemStats.at(-1)?.created as number) ?? 0,
|
||||
(containerData.at(-1)?.created as number) ?? 0
|
||||
)
|
||||
return {
|
||||
systemStats,
|
||||
containerData,
|
||||
chartTime,
|
||||
orientation: direction === "rtl" ? "right" : "left",
|
||||
...getTimeData(chartTime, lastCreated),
|
||||
agentVersion,
|
||||
}
|
||||
}, [systemStats, containerData, direction])
|
||||
@@ -190,7 +187,7 @@ export function useSystemData(id: string) {
|
||||
|
||||
// Skip the fetch if the latest cached point is recent enough that no new point is expected yet
|
||||
const lastCreated = cachedSystemStats.at(-1)?.created as number | undefined
|
||||
if (lastCreated && Date.now() - lastCreated < expectedInterval) {
|
||||
if (lastCreated && Date.now() - lastCreated < expectedInterval * 0.9) {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
@@ -198,8 +195,8 @@ export function useSystemData(id: string) {
|
||||
}
|
||||
|
||||
Promise.allSettled([
|
||||
getStats<SystemStatsRecord>("system_stats", systemId, chartTime),
|
||||
getStats<ContainerStatsRecord>("container_stats", systemId, chartTime),
|
||||
getStats<SystemStatsRecord>("system_stats", systemId, chartTime, cachedSystemStats),
|
||||
getStats<ContainerStatsRecord>("container_stats", systemId, chartTime, cachedContainerData),
|
||||
]).then(([systemStats, containerStats]) => {
|
||||
// If another request has been made since this one, ignore the results
|
||||
if (requestId !== statsRequestId.current) {
|
||||
|
||||
@@ -18,7 +18,7 @@ import { listenKeys } from "nanostores"
|
||||
import { memo, type ReactNode, useEffect, useMemo, useRef, useState } from "react"
|
||||
import { getStatusColor, systemdTableCols } from "@/components/systemd-table/systemd-table-columns"
|
||||
import { Alert, AlertDescription, AlertTitle } from "@/components/ui/alert"
|
||||
import { Card, CardDescription, CardHeader, CardTitle } from "@/components/ui/card"
|
||||
import { Card, CardHeader, CardTitle } from "@/components/ui/card"
|
||||
import { Input } from "@/components/ui/input"
|
||||
import { Sheet, SheetContent, SheetHeader, SheetTitle } from "@/components/ui/sheet"
|
||||
import { TableBody, TableCell, TableHead, TableHeader, TableRow } from "@/components/ui/table"
|
||||
@@ -161,13 +161,13 @@ export default function SystemdTable({ systemId }: { systemId?: string }) {
|
||||
<CardTitle className="mb-2">
|
||||
<Trans>Systemd Services</Trans>
|
||||
</CardTitle>
|
||||
<CardDescription className="flex items-center">
|
||||
<div className="text-sm text-muted-foreground flex items-center flex-wrap">
|
||||
<Trans>Total: {data.length}</Trans>
|
||||
<Separator orientation="vertical" className="h-4 mx-2 bg-primary/40" />
|
||||
<Trans>Failed: {statusTotals[ServiceStatus.Failed]}</Trans>
|
||||
<Separator orientation="vertical" className="h-4 mx-2 bg-primary/40" />
|
||||
<Trans>Updated every 10 minutes.</Trans>
|
||||
</CardDescription>
|
||||
</div>
|
||||
</div>
|
||||
<Input
|
||||
placeholder={t`Filter...`}
|
||||
|
||||
@@ -110,20 +110,23 @@ export function SystemsTableColumns(viewMode: "table" | "grid"): ColumnDef<Syste
|
||||
|
||||
// match filter value against name or translated status
|
||||
return (row, _, newFilterInput) => {
|
||||
const { name, status } = row.original
|
||||
const sys = row.original
|
||||
if (sys.host.includes(newFilterInput) || sys.info.v?.includes(newFilterInput)) {
|
||||
return true
|
||||
}
|
||||
if (newFilterInput !== filterInput) {
|
||||
filterInput = newFilterInput
|
||||
filterInputLower = newFilterInput.toLowerCase()
|
||||
}
|
||||
let nameLower = nameCache.get(name)
|
||||
let nameLower = nameCache.get(sys.name)
|
||||
if (nameLower === undefined) {
|
||||
nameLower = name.toLowerCase()
|
||||
nameCache.set(name, nameLower)
|
||||
nameLower = sys.name.toLowerCase()
|
||||
nameCache.set(sys.name, nameLower)
|
||||
}
|
||||
if (nameLower.includes(filterInputLower)) {
|
||||
return true
|
||||
}
|
||||
const statusLower = statusTranslations[status as keyof typeof statusTranslations]
|
||||
const statusLower = statusTranslations[sys.status as keyof typeof statusTranslations]
|
||||
return statusLower?.includes(filterInputLower) || false
|
||||
}
|
||||
})(),
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user