mirror of
https://github.com/henrygd/beszel.git
synced 2026-04-18 02:41:50 +02:00
Compare commits
73 Commits
be70840609
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a71617e058 | ||
|
|
e5507fa106 | ||
|
|
a024c3cfd0 | ||
|
|
07466804e7 | ||
|
|
981c788d6f | ||
|
|
f5576759de | ||
|
|
be0b708064 | ||
|
|
ab3a3de46c | ||
|
|
1556e53926 | ||
|
|
e3ade3aeb8 | ||
|
|
b013f06956 | ||
|
|
3793b27958 | ||
|
|
5b02158228 | ||
|
|
0ae8c42ae0 | ||
|
|
ea80f3c5a2 | ||
|
|
c3dffff5e4 | ||
|
|
06fdd0e7a8 | ||
|
|
6e3fd90834 | ||
|
|
5ab82183fa | ||
|
|
a68e02ca84 | ||
|
|
0f2e16c63c | ||
|
|
c4009f2b43 | ||
|
|
ef0c1420d1 | ||
|
|
eb9a8e1ef9 | ||
|
|
6b5e6ffa9a | ||
|
|
d656036d3b | ||
|
|
80b73c7faf | ||
|
|
afe9eb7a70 | ||
|
|
7f565a3086 | ||
|
|
77862d4cb1 | ||
|
|
e158a9001b | ||
|
|
f670e868e4 | ||
|
|
0fff699bf6 | ||
|
|
ba10da1b9f | ||
|
|
7f4f14b505 | ||
|
|
2fda4ff264 | ||
|
|
20b0b40ec8 | ||
|
|
d548a012b4 | ||
|
|
ce5d1217dd | ||
|
|
cef09d7cb1 | ||
|
|
f6440acb43 | ||
|
|
5463a38f0f | ||
|
|
80135fdad3 | ||
|
|
5db4eb4346 | ||
|
|
f6c5e2928a | ||
|
|
6a207c33fa | ||
|
|
9f19afccde | ||
|
|
f25f2469e3 | ||
|
|
5bd43ed461 | ||
|
|
afdc3f7779 | ||
|
|
a227c77526 | ||
|
|
8202d746af | ||
|
|
9840b99327 | ||
|
|
f7b5a505e8 | ||
|
|
3cb32ac046 | ||
|
|
e610d9bfc8 | ||
|
|
b53fdbe0ef | ||
|
|
c7261b56f1 | ||
|
|
3f4c3d51b6 | ||
|
|
ad21cab457 | ||
|
|
f04684b30a | ||
|
|
4d4e4fba9b | ||
|
|
62587919f4 | ||
|
|
35528332fd | ||
|
|
e3e453140e | ||
|
|
7a64da9f65 | ||
|
|
8e71c8ad97 | ||
|
|
97f3b8c61f | ||
|
|
0b0b5d16d7 | ||
|
|
b2fd50211e | ||
|
|
c159eaacd1 | ||
|
|
441bdd2ec5 | ||
|
|
ff36138229 |
@@ -19,6 +19,8 @@ import (
|
|||||||
gossh "golang.org/x/crypto/ssh"
|
gossh "golang.org/x/crypto/ssh"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const defaultDataCacheTimeMs uint16 = 60_000
|
||||||
|
|
||||||
type Agent struct {
|
type Agent struct {
|
||||||
sync.Mutex // Used to lock agent while collecting data
|
sync.Mutex // Used to lock agent while collecting data
|
||||||
debug bool // true if LOG_LEVEL is set to debug
|
debug bool // true if LOG_LEVEL is set to debug
|
||||||
@@ -36,6 +38,7 @@ type Agent struct {
|
|||||||
sensorConfig *SensorConfig // Sensors config
|
sensorConfig *SensorConfig // Sensors config
|
||||||
systemInfo system.Info // Host system info (dynamic)
|
systemInfo system.Info // Host system info (dynamic)
|
||||||
systemDetails system.Details // Host system details (static, once-per-connection)
|
systemDetails system.Details // Host system details (static, once-per-connection)
|
||||||
|
detailsDirty bool // Whether system details have changed and need to be resent
|
||||||
gpuManager *GPUManager // Manages GPU data
|
gpuManager *GPUManager // Manages GPU data
|
||||||
cache *systemDataCache // Cache for system stats based on cache time
|
cache *systemDataCache // Cache for system stats based on cache time
|
||||||
connectionManager *ConnectionManager // Channel to signal connection events
|
connectionManager *ConnectionManager // Channel to signal connection events
|
||||||
@@ -97,7 +100,7 @@ func NewAgent(dataDir ...string) (agent *Agent, err error) {
|
|||||||
slog.Debug(beszel.Version)
|
slog.Debug(beszel.Version)
|
||||||
|
|
||||||
// initialize docker manager
|
// initialize docker manager
|
||||||
agent.dockerManager = newDockerManager()
|
agent.dockerManager = newDockerManager(agent)
|
||||||
|
|
||||||
// initialize system info
|
// initialize system info
|
||||||
agent.refreshSystemDetails()
|
agent.refreshSystemDetails()
|
||||||
@@ -142,7 +145,7 @@ func NewAgent(dataDir ...string) (agent *Agent, err error) {
|
|||||||
|
|
||||||
// if debugging, print stats
|
// if debugging, print stats
|
||||||
if agent.debug {
|
if agent.debug {
|
||||||
slog.Debug("Stats", "data", agent.gatherStats(common.DataRequestOptions{CacheTimeMs: 60_000, IncludeDetails: true}))
|
slog.Debug("Stats", "data", agent.gatherStats(common.DataRequestOptions{CacheTimeMs: defaultDataCacheTimeMs, IncludeDetails: true}))
|
||||||
}
|
}
|
||||||
|
|
||||||
return agent, nil
|
return agent, nil
|
||||||
@@ -164,11 +167,6 @@ func (a *Agent) gatherStats(options common.DataRequestOptions) *system.CombinedD
|
|||||||
Info: a.systemInfo,
|
Info: a.systemInfo,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Include static system details only when requested
|
|
||||||
if options.IncludeDetails {
|
|
||||||
data.Details = &a.systemDetails
|
|
||||||
}
|
|
||||||
|
|
||||||
// slog.Info("System data", "data", data, "cacheTimeMs", cacheTimeMs)
|
// slog.Info("System data", "data", data, "cacheTimeMs", cacheTimeMs)
|
||||||
|
|
||||||
if a.dockerManager != nil {
|
if a.dockerManager != nil {
|
||||||
@@ -181,7 +179,7 @@ func (a *Agent) gatherStats(options common.DataRequestOptions) *system.CombinedD
|
|||||||
}
|
}
|
||||||
|
|
||||||
// skip updating systemd services if cache time is not the default 60sec interval
|
// skip updating systemd services if cache time is not the default 60sec interval
|
||||||
if a.systemdManager != nil && cacheTimeMs == 60_000 {
|
if a.systemdManager != nil && cacheTimeMs == defaultDataCacheTimeMs {
|
||||||
totalCount := uint16(a.systemdManager.getServiceStatsCount())
|
totalCount := uint16(a.systemdManager.getServiceStatsCount())
|
||||||
if totalCount > 0 {
|
if totalCount > 0 {
|
||||||
numFailed := a.systemdManager.getFailedServiceCount()
|
numFailed := a.systemdManager.getFailedServiceCount()
|
||||||
@@ -212,7 +210,8 @@ func (a *Agent) gatherStats(options common.DataRequestOptions) *system.CombinedD
|
|||||||
slog.Debug("Extra FS", "data", data.Stats.ExtraFs)
|
slog.Debug("Extra FS", "data", data.Stats.ExtraFs)
|
||||||
|
|
||||||
a.cache.Set(data, cacheTimeMs)
|
a.cache.Set(data, cacheTimeMs)
|
||||||
return data
|
|
||||||
|
return a.attachSystemDetails(data, cacheTimeMs, options.IncludeDetails)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start initializes and starts the agent with optional WebSocket connection
|
// Start initializes and starts the agent with optional WebSocket connection
|
||||||
|
|||||||
@@ -1,84 +1,11 @@
|
|||||||
//go:build !freebsd
|
// Package battery provides functions to check if the system has a battery and return the charge state and percentage.
|
||||||
|
|
||||||
// Package battery provides functions to check if the system has a battery and to get the battery stats.
|
|
||||||
package battery
|
package battery
|
||||||
|
|
||||||
import (
|
const (
|
||||||
"errors"
|
stateUnknown uint8 = iota
|
||||||
"log/slog"
|
stateEmpty
|
||||||
"math"
|
stateFull
|
||||||
|
stateCharging
|
||||||
"github.com/distatus/battery"
|
stateDischarging
|
||||||
|
stateIdle
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
systemHasBattery = false
|
|
||||||
haveCheckedBattery = false
|
|
||||||
)
|
|
||||||
|
|
||||||
// HasReadableBattery checks if the system has a battery and returns true if it does.
|
|
||||||
func HasReadableBattery() bool {
|
|
||||||
if haveCheckedBattery {
|
|
||||||
return systemHasBattery
|
|
||||||
}
|
|
||||||
haveCheckedBattery = true
|
|
||||||
batteries, err := battery.GetAll()
|
|
||||||
for _, bat := range batteries {
|
|
||||||
if bat != nil && (bat.Full > 0 || bat.Design > 0) {
|
|
||||||
systemHasBattery = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !systemHasBattery {
|
|
||||||
slog.Debug("No battery found", "err", err)
|
|
||||||
}
|
|
||||||
return systemHasBattery
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetBatteryStats returns the current battery percent and charge state
|
|
||||||
// percent = (current charge of all batteries) / (sum of designed/full capacity of all batteries)
|
|
||||||
func GetBatteryStats() (batteryPercent uint8, batteryState uint8, err error) {
|
|
||||||
if !HasReadableBattery() {
|
|
||||||
return batteryPercent, batteryState, errors.ErrUnsupported
|
|
||||||
}
|
|
||||||
batteries, err := battery.GetAll()
|
|
||||||
// we'll handle errors later by skipping batteries with errors, rather
|
|
||||||
// than skipping everything because of the presence of some errors.
|
|
||||||
if len(batteries) == 0 {
|
|
||||||
return batteryPercent, batteryState, errors.New("no batteries")
|
|
||||||
}
|
|
||||||
|
|
||||||
totalCapacity := float64(0)
|
|
||||||
totalCharge := float64(0)
|
|
||||||
errs, partialErrs := err.(battery.Errors)
|
|
||||||
|
|
||||||
batteryState = math.MaxUint8
|
|
||||||
|
|
||||||
for i, bat := range batteries {
|
|
||||||
if partialErrs && errs[i] != nil {
|
|
||||||
// if there were some errors, like missing data, skip it
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if bat == nil || bat.Full == 0 {
|
|
||||||
// skip batteries with no capacity. Charge is unlikely to ever be zero, but
|
|
||||||
// we can't guarantee that, so don't skip based on charge.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
totalCapacity += bat.Full
|
|
||||||
totalCharge += min(bat.Current, bat.Full)
|
|
||||||
if bat.State.Raw >= 0 {
|
|
||||||
batteryState = uint8(bat.State.Raw)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if totalCapacity == 0 || batteryState == math.MaxUint8 {
|
|
||||||
// for macs there's sometimes a ghost battery with 0 capacity
|
|
||||||
// https://github.com/distatus/battery/issues/34
|
|
||||||
// Instead of skipping over those batteries, we'll check for total 0 capacity
|
|
||||||
// and return an error. This also prevents a divide by zero.
|
|
||||||
return batteryPercent, batteryState, errors.New("no battery capacity")
|
|
||||||
}
|
|
||||||
|
|
||||||
batteryPercent = uint8(totalCharge / totalCapacity * 100)
|
|
||||||
return batteryPercent, batteryState, nil
|
|
||||||
}
|
|
||||||
|
|||||||
96
agent/battery/battery_darwin.go
Normal file
96
agent/battery/battery_darwin.go
Normal file
@@ -0,0 +1,96 @@
|
|||||||
|
//go:build darwin
|
||||||
|
|
||||||
|
package battery
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"log/slog"
|
||||||
|
"math"
|
||||||
|
"os/exec"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"howett.net/plist"
|
||||||
|
)
|
||||||
|
|
||||||
|
type macBattery struct {
|
||||||
|
CurrentCapacity int `plist:"CurrentCapacity"`
|
||||||
|
MaxCapacity int `plist:"MaxCapacity"`
|
||||||
|
FullyCharged bool `plist:"FullyCharged"`
|
||||||
|
IsCharging bool `plist:"IsCharging"`
|
||||||
|
ExternalConnected bool `plist:"ExternalConnected"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func readMacBatteries() ([]macBattery, error) {
|
||||||
|
out, err := exec.Command("ioreg", "-n", "AppleSmartBattery", "-r", "-a").Output()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(out) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
var batteries []macBattery
|
||||||
|
if _, err := plist.Unmarshal(out, &batteries); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return batteries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasReadableBattery checks if the system has a battery and returns true if it does.
|
||||||
|
var HasReadableBattery = sync.OnceValue(func() bool {
|
||||||
|
systemHasBattery := false
|
||||||
|
batteries, err := readMacBatteries()
|
||||||
|
slog.Debug("Batteries", "batteries", batteries, "err", err)
|
||||||
|
for _, bat := range batteries {
|
||||||
|
if bat.MaxCapacity > 0 {
|
||||||
|
systemHasBattery = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return systemHasBattery
|
||||||
|
})
|
||||||
|
|
||||||
|
// GetBatteryStats returns the current battery percent and charge state.
|
||||||
|
// Uses CurrentCapacity/MaxCapacity to match the value macOS displays.
|
||||||
|
func GetBatteryStats() (batteryPercent uint8, batteryState uint8, err error) {
|
||||||
|
if !HasReadableBattery() {
|
||||||
|
return batteryPercent, batteryState, errors.ErrUnsupported
|
||||||
|
}
|
||||||
|
batteries, err := readMacBatteries()
|
||||||
|
if len(batteries) == 0 {
|
||||||
|
return batteryPercent, batteryState, errors.New("no batteries")
|
||||||
|
}
|
||||||
|
|
||||||
|
totalCapacity := 0
|
||||||
|
totalCharge := 0
|
||||||
|
batteryState = math.MaxUint8
|
||||||
|
|
||||||
|
for _, bat := range batteries {
|
||||||
|
if bat.MaxCapacity == 0 {
|
||||||
|
// skip ghost batteries with 0 capacity
|
||||||
|
// https://github.com/distatus/battery/issues/34
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
totalCapacity += bat.MaxCapacity
|
||||||
|
totalCharge += min(bat.CurrentCapacity, bat.MaxCapacity)
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case !bat.ExternalConnected:
|
||||||
|
batteryState = stateDischarging
|
||||||
|
case bat.IsCharging:
|
||||||
|
batteryState = stateCharging
|
||||||
|
case bat.CurrentCapacity == 0:
|
||||||
|
batteryState = stateEmpty
|
||||||
|
case !bat.FullyCharged:
|
||||||
|
batteryState = stateIdle
|
||||||
|
default:
|
||||||
|
batteryState = stateFull
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if totalCapacity == 0 || batteryState == math.MaxUint8 {
|
||||||
|
return batteryPercent, batteryState, errors.New("no battery capacity")
|
||||||
|
}
|
||||||
|
|
||||||
|
batteryPercent = uint8(float64(totalCharge) / float64(totalCapacity) * 100)
|
||||||
|
return batteryPercent, batteryState, nil
|
||||||
|
}
|
||||||
120
agent/battery/battery_linux.go
Normal file
120
agent/battery/battery_linux.go
Normal file
@@ -0,0 +1,120 @@
|
|||||||
|
//go:build linux
|
||||||
|
|
||||||
|
package battery
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"log/slog"
|
||||||
|
"math"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
// getBatteryPaths returns the paths of all batteries in /sys/class/power_supply
|
||||||
|
var getBatteryPaths func() ([]string, error)
|
||||||
|
|
||||||
|
// HasReadableBattery checks if the system has a battery and returns true if it does.
|
||||||
|
var HasReadableBattery func() bool
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
resetBatteryState("/sys/class/power_supply")
|
||||||
|
}
|
||||||
|
|
||||||
|
// resetBatteryState resets the sync.Once functions to a fresh state.
|
||||||
|
// Tests call this after swapping sysfsPowerSupply so the new path is picked up.
|
||||||
|
func resetBatteryState(sysfsPowerSupplyPath string) {
|
||||||
|
getBatteryPaths = sync.OnceValues(func() ([]string, error) {
|
||||||
|
entries, err := os.ReadDir(sysfsPowerSupplyPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var paths []string
|
||||||
|
for _, e := range entries {
|
||||||
|
path := filepath.Join(sysfsPowerSupplyPath, e.Name())
|
||||||
|
if utils.ReadStringFile(filepath.Join(path, "type")) == "Battery" {
|
||||||
|
paths = append(paths, path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return paths, nil
|
||||||
|
})
|
||||||
|
HasReadableBattery = sync.OnceValue(func() bool {
|
||||||
|
systemHasBattery := false
|
||||||
|
paths, err := getBatteryPaths()
|
||||||
|
for _, path := range paths {
|
||||||
|
if _, ok := utils.ReadStringFileOK(filepath.Join(path, "capacity")); ok {
|
||||||
|
systemHasBattery = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !systemHasBattery {
|
||||||
|
slog.Debug("No battery found", "err", err)
|
||||||
|
}
|
||||||
|
return systemHasBattery
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseSysfsState(status string) uint8 {
|
||||||
|
switch status {
|
||||||
|
case "Empty":
|
||||||
|
return stateEmpty
|
||||||
|
case "Full":
|
||||||
|
return stateFull
|
||||||
|
case "Charging":
|
||||||
|
return stateCharging
|
||||||
|
case "Discharging":
|
||||||
|
return stateDischarging
|
||||||
|
case "Not charging":
|
||||||
|
return stateIdle
|
||||||
|
default:
|
||||||
|
return stateUnknown
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBatteryStats returns the current battery percent and charge state.
|
||||||
|
// Reads /sys/class/power_supply/*/capacity directly so the kernel-reported
|
||||||
|
// value is used, which is always 0-100 and matches what the OS displays.
|
||||||
|
func GetBatteryStats() (batteryPercent uint8, batteryState uint8, err error) {
|
||||||
|
if !HasReadableBattery() {
|
||||||
|
return batteryPercent, batteryState, errors.ErrUnsupported
|
||||||
|
}
|
||||||
|
paths, err := getBatteryPaths()
|
||||||
|
if err != nil {
|
||||||
|
return batteryPercent, batteryState, err
|
||||||
|
}
|
||||||
|
if len(paths) == 0 {
|
||||||
|
return batteryPercent, batteryState, errors.New("no batteries")
|
||||||
|
}
|
||||||
|
|
||||||
|
batteryState = math.MaxUint8
|
||||||
|
totalPercent := 0
|
||||||
|
count := 0
|
||||||
|
|
||||||
|
for _, path := range paths {
|
||||||
|
capStr, ok := utils.ReadStringFileOK(filepath.Join(path, "capacity"))
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
cap, parseErr := strconv.Atoi(capStr)
|
||||||
|
if parseErr != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
totalPercent += cap
|
||||||
|
count++
|
||||||
|
|
||||||
|
state := parseSysfsState(utils.ReadStringFile(filepath.Join(path, "status")))
|
||||||
|
if state != stateUnknown {
|
||||||
|
batteryState = state
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if count == 0 || batteryState == math.MaxUint8 {
|
||||||
|
return batteryPercent, batteryState, errors.New("no battery capacity")
|
||||||
|
}
|
||||||
|
|
||||||
|
batteryPercent = uint8(totalPercent / count)
|
||||||
|
return batteryPercent, batteryState, nil
|
||||||
|
}
|
||||||
201
agent/battery/battery_linux_test.go
Normal file
201
agent/battery/battery_linux_test.go
Normal file
@@ -0,0 +1,201 @@
|
|||||||
|
//go:build testing && linux
|
||||||
|
|
||||||
|
package battery
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
// setupFakeSysfs creates a temporary sysfs-like tree under t.TempDir(),
|
||||||
|
// swaps sysfsPowerSupply, resets the sync.Once caches, and restores
|
||||||
|
// everything on cleanup. Returns a helper to create battery directories.
|
||||||
|
func setupFakeSysfs(t *testing.T) (tmpDir string, addBattery func(name, capacity, status string)) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
tmp := t.TempDir()
|
||||||
|
resetBatteryState(tmp)
|
||||||
|
|
||||||
|
write := func(path, content string) {
|
||||||
|
t.Helper()
|
||||||
|
dir := filepath.Dir(path)
|
||||||
|
if err := os.MkdirAll(dir, 0o755); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := os.WriteFile(path, []byte(content), 0o644); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
addBattery = func(name, capacity, status string) {
|
||||||
|
t.Helper()
|
||||||
|
batDir := filepath.Join(tmp, name)
|
||||||
|
write(filepath.Join(batDir, "type"), "Battery")
|
||||||
|
write(filepath.Join(batDir, "capacity"), capacity)
|
||||||
|
write(filepath.Join(batDir, "status"), status)
|
||||||
|
}
|
||||||
|
|
||||||
|
return tmp, addBattery
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseSysfsState(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
input string
|
||||||
|
want uint8
|
||||||
|
}{
|
||||||
|
{"Empty", stateEmpty},
|
||||||
|
{"Full", stateFull},
|
||||||
|
{"Charging", stateCharging},
|
||||||
|
{"Discharging", stateDischarging},
|
||||||
|
{"Not charging", stateIdle},
|
||||||
|
{"", stateUnknown},
|
||||||
|
{"SomethingElse", stateUnknown},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
assert.Equal(t, tt.want, parseSysfsState(tt.input), "parseSysfsState(%q)", tt.input)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetBatteryStats_SingleBattery(t *testing.T) {
|
||||||
|
_, addBattery := setupFakeSysfs(t)
|
||||||
|
addBattery("BAT0", "72", "Discharging")
|
||||||
|
|
||||||
|
pct, state, err := GetBatteryStats()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, uint8(72), pct)
|
||||||
|
assert.Equal(t, stateDischarging, state)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetBatteryStats_MultipleBatteries(t *testing.T) {
|
||||||
|
_, addBattery := setupFakeSysfs(t)
|
||||||
|
addBattery("BAT0", "80", "Charging")
|
||||||
|
addBattery("BAT1", "40", "Charging")
|
||||||
|
|
||||||
|
pct, state, err := GetBatteryStats()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// average of 80 and 40 = 60
|
||||||
|
assert.EqualValues(t, 60, pct)
|
||||||
|
assert.Equal(t, stateCharging, state)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetBatteryStats_FullBattery(t *testing.T) {
|
||||||
|
_, addBattery := setupFakeSysfs(t)
|
||||||
|
addBattery("BAT0", "100", "Full")
|
||||||
|
|
||||||
|
pct, state, err := GetBatteryStats()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, uint8(100), pct)
|
||||||
|
assert.Equal(t, stateFull, state)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetBatteryStats_EmptyBattery(t *testing.T) {
|
||||||
|
_, addBattery := setupFakeSysfs(t)
|
||||||
|
addBattery("BAT0", "0", "Empty")
|
||||||
|
|
||||||
|
pct, state, err := GetBatteryStats()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, uint8(0), pct)
|
||||||
|
assert.Equal(t, stateEmpty, state)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetBatteryStats_NotCharging(t *testing.T) {
|
||||||
|
_, addBattery := setupFakeSysfs(t)
|
||||||
|
addBattery("BAT0", "80", "Not charging")
|
||||||
|
|
||||||
|
pct, state, err := GetBatteryStats()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, uint8(80), pct)
|
||||||
|
assert.Equal(t, stateIdle, state)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetBatteryStats_NoBatteries(t *testing.T) {
|
||||||
|
setupFakeSysfs(t) // empty directory, no batteries
|
||||||
|
|
||||||
|
_, _, err := GetBatteryStats()
|
||||||
|
assert.Error(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetBatteryStats_NonBatterySupplyIgnored(t *testing.T) {
|
||||||
|
tmp, addBattery := setupFakeSysfs(t)
|
||||||
|
|
||||||
|
// Add a real battery
|
||||||
|
addBattery("BAT0", "55", "Charging")
|
||||||
|
|
||||||
|
// Add an AC adapter (type != Battery) - should be ignored
|
||||||
|
acDir := filepath.Join(tmp, "AC0")
|
||||||
|
if err := os.MkdirAll(acDir, 0o755); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := os.WriteFile(filepath.Join(acDir, "type"), []byte("Mains"), 0o644); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pct, state, err := GetBatteryStats()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, uint8(55), pct)
|
||||||
|
assert.Equal(t, stateCharging, state)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetBatteryStats_InvalidCapacitySkipped(t *testing.T) {
|
||||||
|
tmp, addBattery := setupFakeSysfs(t)
|
||||||
|
|
||||||
|
// One battery with valid capacity
|
||||||
|
addBattery("BAT0", "90", "Discharging")
|
||||||
|
|
||||||
|
// Another with invalid capacity text
|
||||||
|
badDir := filepath.Join(tmp, "BAT1")
|
||||||
|
if err := os.MkdirAll(badDir, 0o755); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := os.WriteFile(filepath.Join(badDir, "type"), []byte("Battery"), 0o644); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := os.WriteFile(filepath.Join(badDir, "capacity"), []byte("not-a-number"), 0o644); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := os.WriteFile(filepath.Join(badDir, "status"), []byte("Discharging"), 0o644); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pct, _, err := GetBatteryStats()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// Only BAT0 counted
|
||||||
|
assert.Equal(t, uint8(90), pct)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetBatteryStats_UnknownStatusOnly(t *testing.T) {
|
||||||
|
_, addBattery := setupFakeSysfs(t)
|
||||||
|
addBattery("BAT0", "50", "SomethingWeird")
|
||||||
|
|
||||||
|
_, _, err := GetBatteryStats()
|
||||||
|
assert.Error(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHasReadableBattery_True(t *testing.T) {
|
||||||
|
_, addBattery := setupFakeSysfs(t)
|
||||||
|
addBattery("BAT0", "50", "Charging")
|
||||||
|
|
||||||
|
assert.True(t, HasReadableBattery())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHasReadableBattery_False(t *testing.T) {
|
||||||
|
setupFakeSysfs(t) // no batteries
|
||||||
|
|
||||||
|
assert.False(t, HasReadableBattery())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHasReadableBattery_NoCapacityFile(t *testing.T) {
|
||||||
|
tmp, _ := setupFakeSysfs(t)
|
||||||
|
|
||||||
|
// Battery dir with type file but no capacity file
|
||||||
|
batDir := filepath.Join(tmp, "BAT0")
|
||||||
|
err := os.MkdirAll(batDir, 0o755)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
err = os.WriteFile(filepath.Join(batDir, "type"), []byte("Battery"), 0o644)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
assert.False(t, HasReadableBattery())
|
||||||
|
}
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build freebsd
|
//go:build !darwin && !linux && !windows
|
||||||
|
|
||||||
package battery
|
package battery
|
||||||
|
|
||||||
298
agent/battery/battery_windows.go
Normal file
298
agent/battery/battery_windows.go
Normal file
@@ -0,0 +1,298 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
|
// Most of the Windows battery code is based on
|
||||||
|
// distatus/battery by Karol 'Kenji Takahashi' Woźniak
|
||||||
|
|
||||||
|
package battery
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"log/slog"
|
||||||
|
"math"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
|
)
|
||||||
|
|
||||||
|
type batteryQueryInformation struct {
|
||||||
|
BatteryTag uint32
|
||||||
|
InformationLevel int32
|
||||||
|
AtRate int32
|
||||||
|
}
|
||||||
|
|
||||||
|
type batteryInformation struct {
|
||||||
|
Capabilities uint32
|
||||||
|
Technology uint8
|
||||||
|
Reserved [3]uint8
|
||||||
|
Chemistry [4]uint8
|
||||||
|
DesignedCapacity uint32
|
||||||
|
FullChargedCapacity uint32
|
||||||
|
DefaultAlert1 uint32
|
||||||
|
DefaultAlert2 uint32
|
||||||
|
CriticalBias uint32
|
||||||
|
CycleCount uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type batteryWaitStatus struct {
|
||||||
|
BatteryTag uint32
|
||||||
|
Timeout uint32
|
||||||
|
PowerState uint32
|
||||||
|
LowCapacity uint32
|
||||||
|
HighCapacity uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type batteryStatus struct {
|
||||||
|
PowerState uint32
|
||||||
|
Capacity uint32
|
||||||
|
Voltage uint32
|
||||||
|
Rate int32
|
||||||
|
}
|
||||||
|
|
||||||
|
type winGUID struct {
|
||||||
|
Data1 uint32
|
||||||
|
Data2 uint16
|
||||||
|
Data3 uint16
|
||||||
|
Data4 [8]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
type spDeviceInterfaceData struct {
|
||||||
|
cbSize uint32
|
||||||
|
InterfaceClassGuid winGUID
|
||||||
|
Flags uint32
|
||||||
|
Reserved uint
|
||||||
|
}
|
||||||
|
|
||||||
|
var guidDeviceBattery = winGUID{
|
||||||
|
0x72631e54,
|
||||||
|
0x78A4,
|
||||||
|
0x11d0,
|
||||||
|
[8]byte{0xbc, 0xf7, 0x00, 0xaa, 0x00, 0xb7, 0xb3, 0x2a},
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
setupapi = &windows.LazyDLL{Name: "setupapi.dll", System: true}
|
||||||
|
setupDiGetClassDevsW = setupapi.NewProc("SetupDiGetClassDevsW")
|
||||||
|
setupDiEnumDeviceInterfaces = setupapi.NewProc("SetupDiEnumDeviceInterfaces")
|
||||||
|
setupDiGetDeviceInterfaceDetailW = setupapi.NewProc("SetupDiGetDeviceInterfaceDetailW")
|
||||||
|
setupDiDestroyDeviceInfoList = setupapi.NewProc("SetupDiDestroyDeviceInfoList")
|
||||||
|
)
|
||||||
|
|
||||||
|
// winBatteryGet reads one battery by index. Returns (fullCapacity, currentCapacity, state, error).
|
||||||
|
// Returns error == errNotFound when there are no more batteries.
|
||||||
|
var errNotFound = errors.New("no more batteries")
|
||||||
|
|
||||||
|
func setupDiSetup(proc *windows.LazyProc, nargs, a1, a2, a3, a4, a5, a6 uintptr) (uintptr, error) {
|
||||||
|
_ = nargs
|
||||||
|
r1, _, errno := syscall.SyscallN(proc.Addr(), a1, a2, a3, a4, a5, a6)
|
||||||
|
if windows.Handle(r1) == windows.InvalidHandle {
|
||||||
|
if errno != 0 {
|
||||||
|
return 0, error(errno)
|
||||||
|
}
|
||||||
|
return 0, syscall.EINVAL
|
||||||
|
}
|
||||||
|
return r1, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func setupDiCall(proc *windows.LazyProc, nargs, a1, a2, a3, a4, a5, a6 uintptr) syscall.Errno {
|
||||||
|
_ = nargs
|
||||||
|
r1, _, errno := syscall.SyscallN(proc.Addr(), a1, a2, a3, a4, a5, a6)
|
||||||
|
if r1 == 0 {
|
||||||
|
if errno != 0 {
|
||||||
|
return errno
|
||||||
|
}
|
||||||
|
return syscall.EINVAL
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func readWinBatteryState(powerState uint32) uint8 {
|
||||||
|
switch {
|
||||||
|
case powerState&0x00000004 != 0:
|
||||||
|
return stateCharging
|
||||||
|
case powerState&0x00000008 != 0:
|
||||||
|
return stateEmpty
|
||||||
|
case powerState&0x00000002 != 0:
|
||||||
|
return stateDischarging
|
||||||
|
case powerState&0x00000001 != 0:
|
||||||
|
return stateFull
|
||||||
|
default:
|
||||||
|
return stateUnknown
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func winBatteryGet(idx int) (full, current uint32, state uint8, err error) {
|
||||||
|
hdev, err := setupDiSetup(
|
||||||
|
setupDiGetClassDevsW,
|
||||||
|
4,
|
||||||
|
uintptr(unsafe.Pointer(&guidDeviceBattery)),
|
||||||
|
0, 0,
|
||||||
|
2|16, // DIGCF_PRESENT|DIGCF_DEVICEINTERFACE
|
||||||
|
0, 0,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, stateUnknown, err
|
||||||
|
}
|
||||||
|
defer syscall.SyscallN(setupDiDestroyDeviceInfoList.Addr(), hdev)
|
||||||
|
|
||||||
|
var did spDeviceInterfaceData
|
||||||
|
did.cbSize = uint32(unsafe.Sizeof(did))
|
||||||
|
errno := setupDiCall(
|
||||||
|
setupDiEnumDeviceInterfaces,
|
||||||
|
5,
|
||||||
|
hdev, 0,
|
||||||
|
uintptr(unsafe.Pointer(&guidDeviceBattery)),
|
||||||
|
uintptr(idx),
|
||||||
|
uintptr(unsafe.Pointer(&did)),
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
if errno == 259 { // ERROR_NO_MORE_ITEMS
|
||||||
|
return 0, 0, stateUnknown, errNotFound
|
||||||
|
}
|
||||||
|
if errno != 0 {
|
||||||
|
return 0, 0, stateUnknown, errno
|
||||||
|
}
|
||||||
|
|
||||||
|
var cbRequired uint32
|
||||||
|
errno = setupDiCall(
|
||||||
|
setupDiGetDeviceInterfaceDetailW,
|
||||||
|
6,
|
||||||
|
hdev,
|
||||||
|
uintptr(unsafe.Pointer(&did)),
|
||||||
|
0, 0,
|
||||||
|
uintptr(unsafe.Pointer(&cbRequired)),
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
if errno != 0 && errno != 122 { // ERROR_INSUFFICIENT_BUFFER
|
||||||
|
return 0, 0, stateUnknown, errno
|
||||||
|
}
|
||||||
|
didd := make([]uint16, cbRequired/2)
|
||||||
|
cbSize := (*uint32)(unsafe.Pointer(&didd[0]))
|
||||||
|
if unsafe.Sizeof(uint(0)) == 8 {
|
||||||
|
*cbSize = 8
|
||||||
|
} else {
|
||||||
|
*cbSize = 6
|
||||||
|
}
|
||||||
|
errno = setupDiCall(
|
||||||
|
setupDiGetDeviceInterfaceDetailW,
|
||||||
|
6,
|
||||||
|
hdev,
|
||||||
|
uintptr(unsafe.Pointer(&did)),
|
||||||
|
uintptr(unsafe.Pointer(&didd[0])),
|
||||||
|
uintptr(cbRequired),
|
||||||
|
uintptr(unsafe.Pointer(&cbRequired)),
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
if errno != 0 {
|
||||||
|
return 0, 0, stateUnknown, errno
|
||||||
|
}
|
||||||
|
devicePath := &didd[2:][0]
|
||||||
|
|
||||||
|
handle, err := windows.CreateFile(
|
||||||
|
devicePath,
|
||||||
|
windows.GENERIC_READ|windows.GENERIC_WRITE,
|
||||||
|
windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE,
|
||||||
|
nil,
|
||||||
|
windows.OPEN_EXISTING,
|
||||||
|
windows.FILE_ATTRIBUTE_NORMAL,
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, stateUnknown, err
|
||||||
|
}
|
||||||
|
defer windows.CloseHandle(handle)
|
||||||
|
|
||||||
|
var dwOut uint32
|
||||||
|
var dwWait uint32
|
||||||
|
var bqi batteryQueryInformation
|
||||||
|
err = windows.DeviceIoControl(
|
||||||
|
handle,
|
||||||
|
2703424, // IOCTL_BATTERY_QUERY_TAG
|
||||||
|
(*byte)(unsafe.Pointer(&dwWait)),
|
||||||
|
uint32(unsafe.Sizeof(dwWait)),
|
||||||
|
(*byte)(unsafe.Pointer(&bqi.BatteryTag)),
|
||||||
|
uint32(unsafe.Sizeof(bqi.BatteryTag)),
|
||||||
|
&dwOut, nil,
|
||||||
|
)
|
||||||
|
if err != nil || bqi.BatteryTag == 0 {
|
||||||
|
return 0, 0, stateUnknown, errors.New("battery tag not returned")
|
||||||
|
}
|
||||||
|
|
||||||
|
var bi batteryInformation
|
||||||
|
if err = windows.DeviceIoControl(
|
||||||
|
handle,
|
||||||
|
2703428, // IOCTL_BATTERY_QUERY_INFORMATION
|
||||||
|
(*byte)(unsafe.Pointer(&bqi)),
|
||||||
|
uint32(unsafe.Sizeof(bqi)),
|
||||||
|
(*byte)(unsafe.Pointer(&bi)),
|
||||||
|
uint32(unsafe.Sizeof(bi)),
|
||||||
|
&dwOut, nil,
|
||||||
|
); err != nil {
|
||||||
|
return 0, 0, stateUnknown, err
|
||||||
|
}
|
||||||
|
|
||||||
|
bws := batteryWaitStatus{BatteryTag: bqi.BatteryTag}
|
||||||
|
var bs batteryStatus
|
||||||
|
if err = windows.DeviceIoControl(
|
||||||
|
handle,
|
||||||
|
2703436, // IOCTL_BATTERY_QUERY_STATUS
|
||||||
|
(*byte)(unsafe.Pointer(&bws)),
|
||||||
|
uint32(unsafe.Sizeof(bws)),
|
||||||
|
(*byte)(unsafe.Pointer(&bs)),
|
||||||
|
uint32(unsafe.Sizeof(bs)),
|
||||||
|
&dwOut, nil,
|
||||||
|
); err != nil {
|
||||||
|
return 0, 0, stateUnknown, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if bs.Capacity == 0xffffffff { // BATTERY_UNKNOWN_CAPACITY
|
||||||
|
return 0, 0, stateUnknown, errors.New("battery capacity unknown")
|
||||||
|
}
|
||||||
|
|
||||||
|
return bi.FullChargedCapacity, bs.Capacity, readWinBatteryState(bs.PowerState), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasReadableBattery checks if the system has a battery and returns true if it does.
|
||||||
|
var HasReadableBattery = sync.OnceValue(func() bool {
|
||||||
|
systemHasBattery := false
|
||||||
|
full, _, _, err := winBatteryGet(0)
|
||||||
|
if err == nil && full > 0 {
|
||||||
|
systemHasBattery = true
|
||||||
|
}
|
||||||
|
if !systemHasBattery {
|
||||||
|
slog.Debug("No battery found", "err", err)
|
||||||
|
}
|
||||||
|
return systemHasBattery
|
||||||
|
})
|
||||||
|
|
||||||
|
// GetBatteryStats returns the current battery percent and charge state.
|
||||||
|
func GetBatteryStats() (batteryPercent uint8, batteryState uint8, err error) {
|
||||||
|
if !HasReadableBattery() {
|
||||||
|
return batteryPercent, batteryState, errors.ErrUnsupported
|
||||||
|
}
|
||||||
|
|
||||||
|
totalFull := uint32(0)
|
||||||
|
totalCurrent := uint32(0)
|
||||||
|
batteryState = math.MaxUint8
|
||||||
|
|
||||||
|
for i := 0; ; i++ {
|
||||||
|
full, current, state, bErr := winBatteryGet(i)
|
||||||
|
if errors.Is(bErr, errNotFound) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if bErr != nil || full == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
totalFull += full
|
||||||
|
totalCurrent += min(current, full)
|
||||||
|
batteryState = state
|
||||||
|
}
|
||||||
|
|
||||||
|
if totalFull == 0 || batteryState == math.MaxUint8 {
|
||||||
|
return batteryPercent, batteryState, errors.New("no battery capacity")
|
||||||
|
}
|
||||||
|
|
||||||
|
batteryPercent = uint8(float64(totalCurrent) / float64(totalFull) * 100)
|
||||||
|
return batteryPercent, batteryState, nil
|
||||||
|
}
|
||||||
@@ -20,6 +20,7 @@ import (
|
|||||||
"github.com/fxamacker/cbor/v2"
|
"github.com/fxamacker/cbor/v2"
|
||||||
"github.com/lxzan/gws"
|
"github.com/lxzan/gws"
|
||||||
"golang.org/x/crypto/ssh"
|
"golang.org/x/crypto/ssh"
|
||||||
|
"golang.org/x/net/proxy"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -104,6 +105,11 @@ func (client *WebSocketClient) getOptions() *gws.ClientOption {
|
|||||||
}
|
}
|
||||||
client.hubURL.Path = path.Join(client.hubURL.Path, "api/beszel/agent-connect")
|
client.hubURL.Path = path.Join(client.hubURL.Path, "api/beszel/agent-connect")
|
||||||
|
|
||||||
|
// make sure BESZEL_AGENT_ALL_PROXY works (GWS only checks ALL_PROXY)
|
||||||
|
if val := os.Getenv("BESZEL_AGENT_ALL_PROXY"); val != "" {
|
||||||
|
os.Setenv("ALL_PROXY", val)
|
||||||
|
}
|
||||||
|
|
||||||
client.options = &gws.ClientOption{
|
client.options = &gws.ClientOption{
|
||||||
Addr: client.hubURL.String(),
|
Addr: client.hubURL.String(),
|
||||||
TlsConfig: &tls.Config{InsecureSkipVerify: true},
|
TlsConfig: &tls.Config{InsecureSkipVerify: true},
|
||||||
@@ -112,6 +118,9 @@ func (client *WebSocketClient) getOptions() *gws.ClientOption {
|
|||||||
"X-Token": []string{client.token},
|
"X-Token": []string{client.token},
|
||||||
"X-Beszel": []string{beszel.Version},
|
"X-Beszel": []string{beszel.Version},
|
||||||
},
|
},
|
||||||
|
NewDialer: func() (gws.Dialer, error) {
|
||||||
|
return proxy.FromEnvironment(), nil
|
||||||
|
},
|
||||||
}
|
}
|
||||||
return client.options
|
return client.options
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,11 +4,15 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/henrygd/beszel/agent/health"
|
"github.com/henrygd/beszel/agent/health"
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
"github.com/henrygd/beszel/internal/entities/system"
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -111,11 +115,34 @@ func (c *ConnectionManager) Start(serverOptions ServerOptions) error {
|
|||||||
_ = health.Update()
|
_ = health.Update()
|
||||||
case <-sigCtx.Done():
|
case <-sigCtx.Done():
|
||||||
slog.Info("Shutting down", "cause", context.Cause(sigCtx))
|
slog.Info("Shutting down", "cause", context.Cause(sigCtx))
|
||||||
|
return c.stop()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// stop does not stop the connection manager itself, just any active connections. The manager will attempt to reconnect after stopping, so this should only be called immediately before shutting down the entire agent.
|
||||||
|
//
|
||||||
|
// If we need or want to expose a graceful Stop method in the future, do something like this to actually stop the manager:
|
||||||
|
//
|
||||||
|
// func (c *ConnectionManager) Start(serverOptions ServerOptions) error {
|
||||||
|
// ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
// c.cancel = cancel
|
||||||
|
//
|
||||||
|
// for {
|
||||||
|
// select {
|
||||||
|
// case <-ctx.Done():
|
||||||
|
// return c.stop()
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// func (c *ConnectionManager) Stop() {
|
||||||
|
// c.cancel()
|
||||||
|
// }
|
||||||
|
func (c *ConnectionManager) stop() error {
|
||||||
_ = c.agent.StopServer()
|
_ = c.agent.StopServer()
|
||||||
c.closeWebSocket()
|
c.closeWebSocket()
|
||||||
return health.CleanUp()
|
return health.CleanUp()
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// handleEvent processes connection events and updates the connection state accordingly.
|
// handleEvent processes connection events and updates the connection state accordingly.
|
||||||
@@ -185,10 +212,17 @@ func (c *ConnectionManager) connect() {
|
|||||||
|
|
||||||
// Try WebSocket first, if it fails, start SSH server
|
// Try WebSocket first, if it fails, start SSH server
|
||||||
err := c.startWebSocketConnection()
|
err := c.startWebSocketConnection()
|
||||||
if err != nil && c.State == Disconnected {
|
if err != nil {
|
||||||
|
if shouldExitOnErr(err) {
|
||||||
|
time.Sleep(2 * time.Second) // prevent tight restart loop
|
||||||
|
_ = c.stop()
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
if c.State == Disconnected {
|
||||||
c.startSSHServer()
|
c.startSSHServer()
|
||||||
c.startWsTicker()
|
c.startWsTicker()
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// startWebSocketConnection attempts to establish a WebSocket connection to the hub.
|
// startWebSocketConnection attempts to establish a WebSocket connection to the hub.
|
||||||
@@ -224,3 +258,14 @@ func (c *ConnectionManager) closeWebSocket() {
|
|||||||
c.wsClient.Close()
|
c.wsClient.Close()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// shouldExitOnErr checks if the error is a DNS resolution failure and if the
|
||||||
|
// EXIT_ON_DNS_ERROR env var is set. https://github.com/henrygd/beszel/issues/1924.
|
||||||
|
func shouldExitOnErr(err error) bool {
|
||||||
|
if val, _ := utils.GetEnv("EXIT_ON_DNS_ERROR"); val == "true" {
|
||||||
|
if opErr, ok := errors.AsType[*net.OpError](err); ok {
|
||||||
|
return strings.Contains(opErr.Err.Error(), "lookup")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ package agent
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/ed25519"
|
"crypto/ed25519"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"net/url"
|
"net/url"
|
||||||
@@ -298,3 +299,65 @@ func TestConnectionManager_ConnectFlow(t *testing.T) {
|
|||||||
cm.connect()
|
cm.connect()
|
||||||
}, "Connect should not panic without WebSocket client")
|
}, "Connect should not panic without WebSocket client")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestShouldExitOnErr(t *testing.T) {
|
||||||
|
createDialErr := func(msg string) error {
|
||||||
|
return &net.OpError{
|
||||||
|
Op: "dial",
|
||||||
|
Net: "tcp",
|
||||||
|
Err: errors.New(msg),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
err error
|
||||||
|
envValue string
|
||||||
|
expected bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "no env var",
|
||||||
|
err: createDialErr("lookup lkahsdfasdf: no such host"),
|
||||||
|
envValue: "",
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "env var false",
|
||||||
|
err: createDialErr("lookup lkahsdfasdf: no such host"),
|
||||||
|
envValue: "false",
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "env var true, matching error",
|
||||||
|
err: createDialErr("lookup lkahsdfasdf: no such host"),
|
||||||
|
envValue: "true",
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "env var true, matching error with extra context",
|
||||||
|
err: createDialErr("lookup beszel.server.lan on [::1]:53: read udp [::1]:44557->[::1]:53: read: connection refused"),
|
||||||
|
envValue: "true",
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "env var true, non-matching error",
|
||||||
|
err: errors.New("connection refused"),
|
||||||
|
envValue: "true",
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "env var true, dial but not lookup",
|
||||||
|
err: createDialErr("connection timeout"),
|
||||||
|
envValue: "true",
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
t.Setenv("EXIT_ON_DNS_ERROR", tt.envValue)
|
||||||
|
result := shouldExitOnErr(tt.err)
|
||||||
|
assert.Equal(t, tt.expected, result)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
111
agent/disk.go
111
agent/disk.go
@@ -1,6 +1,7 @@
|
|||||||
package agent
|
package agent
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@@ -33,6 +34,34 @@ type diskDiscovery struct {
|
|||||||
ctx fsRegistrationContext
|
ctx fsRegistrationContext
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// prevDisk stores previous per-device disk counters for a given cache interval
|
||||||
|
type prevDisk struct {
|
||||||
|
readBytes uint64
|
||||||
|
writeBytes uint64
|
||||||
|
readTime uint64 // cumulative ms spent on reads (from ReadTime)
|
||||||
|
writeTime uint64 // cumulative ms spent on writes (from WriteTime)
|
||||||
|
ioTime uint64 // cumulative ms spent doing I/O (from IoTime)
|
||||||
|
weightedIO uint64 // cumulative weighted ms (queue-depth × ms, from WeightedIO)
|
||||||
|
readCount uint64 // cumulative read operation count
|
||||||
|
writeCount uint64 // cumulative write operation count
|
||||||
|
at time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// prevDiskFromCounter creates a prevDisk snapshot from a disk.IOCountersStat at time t.
|
||||||
|
func prevDiskFromCounter(d disk.IOCountersStat, t time.Time) prevDisk {
|
||||||
|
return prevDisk{
|
||||||
|
readBytes: d.ReadBytes,
|
||||||
|
writeBytes: d.WriteBytes,
|
||||||
|
readTime: d.ReadTime,
|
||||||
|
writeTime: d.WriteTime,
|
||||||
|
ioTime: d.IoTime,
|
||||||
|
weightedIO: d.WeightedIO,
|
||||||
|
readCount: d.ReadCount,
|
||||||
|
writeCount: d.WriteCount,
|
||||||
|
at: t,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// parseFilesystemEntry parses a filesystem entry in the format "device__customname"
|
// parseFilesystemEntry parses a filesystem entry in the format "device__customname"
|
||||||
// Returns the device/filesystem part and the custom name part
|
// Returns the device/filesystem part and the custom name part
|
||||||
func parseFilesystemEntry(entry string) (device, customName string) {
|
func parseFilesystemEntry(entry string) (device, customName string) {
|
||||||
@@ -238,9 +267,11 @@ func (d *diskDiscovery) addConfiguredExtraFilesystems(extraFilesystems string) {
|
|||||||
|
|
||||||
// addPartitionExtraFs registers partitions mounted under /extra-filesystems so
|
// addPartitionExtraFs registers partitions mounted under /extra-filesystems so
|
||||||
// their display names can come from the folder name while their I/O keys still
|
// their display names can come from the folder name while their I/O keys still
|
||||||
// prefer the underlying partition device.
|
// prefer the underlying partition device. Only direct children are matched to
|
||||||
|
// avoid registering nested virtual mounts (e.g. /proc, /sys) that are returned by
|
||||||
|
// disk.Partitions(true) when the host root is bind-mounted in /extra-filesystems.
|
||||||
func (d *diskDiscovery) addPartitionExtraFs(p disk.PartitionStat) {
|
func (d *diskDiscovery) addPartitionExtraFs(p disk.PartitionStat) {
|
||||||
if !strings.HasPrefix(p.Mountpoint, d.ctx.efPath) {
|
if filepath.Dir(p.Mountpoint) != d.ctx.efPath {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
device, customName := extraFilesystemPartitionInfo(p)
|
device, customName := extraFilesystemPartitionInfo(p)
|
||||||
@@ -273,7 +304,7 @@ func (a *Agent) initializeDiskInfo() {
|
|||||||
hasRoot := false
|
hasRoot := false
|
||||||
isWindows := runtime.GOOS == "windows"
|
isWindows := runtime.GOOS == "windows"
|
||||||
|
|
||||||
partitions, err := disk.Partitions(false)
|
partitions, err := disk.PartitionsWithContext(context.Background(), true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Error("Error getting disk partitions", "err", err)
|
slog.Error("Error getting disk partitions", "err", err)
|
||||||
}
|
}
|
||||||
@@ -578,16 +609,29 @@ func (a *Agent) updateDiskIo(cacheTimeMs uint16, systemStats *system.Stats) {
|
|||||||
prev, hasPrev := a.diskPrev[cacheTimeMs][name]
|
prev, hasPrev := a.diskPrev[cacheTimeMs][name]
|
||||||
if !hasPrev {
|
if !hasPrev {
|
||||||
// Seed from agent-level fsStats if present, else seed from current
|
// Seed from agent-level fsStats if present, else seed from current
|
||||||
prev = prevDisk{readBytes: stats.TotalRead, writeBytes: stats.TotalWrite, at: stats.Time}
|
prev = prevDisk{
|
||||||
|
readBytes: stats.TotalRead,
|
||||||
|
writeBytes: stats.TotalWrite,
|
||||||
|
readTime: d.ReadTime,
|
||||||
|
writeTime: d.WriteTime,
|
||||||
|
ioTime: d.IoTime,
|
||||||
|
weightedIO: d.WeightedIO,
|
||||||
|
readCount: d.ReadCount,
|
||||||
|
writeCount: d.WriteCount,
|
||||||
|
at: stats.Time,
|
||||||
|
}
|
||||||
if prev.at.IsZero() {
|
if prev.at.IsZero() {
|
||||||
prev = prevDisk{readBytes: d.ReadBytes, writeBytes: d.WriteBytes, at: now}
|
prev = prevDiskFromCounter(d, now)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
msElapsed := uint64(now.Sub(prev.at).Milliseconds())
|
msElapsed := uint64(now.Sub(prev.at).Milliseconds())
|
||||||
|
|
||||||
|
// Update per-interval snapshot
|
||||||
|
a.diskPrev[cacheTimeMs][name] = prevDiskFromCounter(d, now)
|
||||||
|
|
||||||
|
// Avoid division by zero or clock issues
|
||||||
if msElapsed < 100 {
|
if msElapsed < 100 {
|
||||||
// Avoid division by zero or clock issues; update snapshot and continue
|
|
||||||
a.diskPrev[cacheTimeMs][name] = prevDisk{readBytes: d.ReadBytes, writeBytes: d.WriteBytes, at: now}
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -599,15 +643,38 @@ func (a *Agent) updateDiskIo(cacheTimeMs uint16, systemStats *system.Stats) {
|
|||||||
// validate values
|
// validate values
|
||||||
if readMbPerSecond > 50_000 || writeMbPerSecond > 50_000 {
|
if readMbPerSecond > 50_000 || writeMbPerSecond > 50_000 {
|
||||||
slog.Warn("Invalid disk I/O. Resetting.", "name", d.Name, "read", readMbPerSecond, "write", writeMbPerSecond)
|
slog.Warn("Invalid disk I/O. Resetting.", "name", d.Name, "read", readMbPerSecond, "write", writeMbPerSecond)
|
||||||
// Reset interval snapshot and seed from current
|
|
||||||
a.diskPrev[cacheTimeMs][name] = prevDisk{readBytes: d.ReadBytes, writeBytes: d.WriteBytes, at: now}
|
|
||||||
// also refresh agent baseline to avoid future negatives
|
// also refresh agent baseline to avoid future negatives
|
||||||
a.initializeDiskIoStats(ioCounters)
|
a.initializeDiskIoStats(ioCounters)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update per-interval snapshot
|
// These properties are calculated differently on different platforms,
|
||||||
a.diskPrev[cacheTimeMs][name] = prevDisk{readBytes: d.ReadBytes, writeBytes: d.WriteBytes, at: now}
|
// but generally represent cumulative time spent doing reads/writes on the device.
|
||||||
|
// This can surpass 100% if there are multiple concurrent I/O operations.
|
||||||
|
// Linux kernel docs:
|
||||||
|
// This is the total number of milliseconds spent by all reads (as
|
||||||
|
// measured from __make_request() to end_that_request_last()).
|
||||||
|
// https://www.kernel.org/doc/Documentation/iostats.txt (fields 4, 8)
|
||||||
|
diskReadTime := utils.TwoDecimals(float64(d.ReadTime-prev.readTime) / float64(msElapsed) * 100)
|
||||||
|
diskWriteTime := utils.TwoDecimals(float64(d.WriteTime-prev.writeTime) / float64(msElapsed) * 100)
|
||||||
|
|
||||||
|
// I/O utilization %: fraction of wall time the device had any I/O in progress (0-100).
|
||||||
|
diskIoUtilPct := utils.TwoDecimals(float64(d.IoTime-prev.ioTime) / float64(msElapsed) * 100)
|
||||||
|
|
||||||
|
// Weighted I/O: queue-depth weighted I/O time, normalized to interval (can exceed 100%).
|
||||||
|
// Linux kernel field 11: incremented by iops_in_progress × ms_since_last_update.
|
||||||
|
// Used to display queue depth. Multipled by 100 to increase accuracy of digit truncation (divided by 100 in UI).
|
||||||
|
diskWeightedIO := utils.TwoDecimals(float64(d.WeightedIO-prev.weightedIO) / float64(msElapsed) * 100)
|
||||||
|
|
||||||
|
// r_await / w_await: average time per read/write operation in milliseconds.
|
||||||
|
// Equivalent to r_await and w_await in iostat.
|
||||||
|
var rAwait, wAwait float64
|
||||||
|
if deltaReadCount := d.ReadCount - prev.readCount; deltaReadCount > 0 {
|
||||||
|
rAwait = utils.TwoDecimals(float64(d.ReadTime-prev.readTime) / float64(deltaReadCount))
|
||||||
|
}
|
||||||
|
if deltaWriteCount := d.WriteCount - prev.writeCount; deltaWriteCount > 0 {
|
||||||
|
wAwait = utils.TwoDecimals(float64(d.WriteTime-prev.writeTime) / float64(deltaWriteCount))
|
||||||
|
}
|
||||||
|
|
||||||
// Update global fsStats baseline for cross-interval correctness
|
// Update global fsStats baseline for cross-interval correctness
|
||||||
stats.Time = now
|
stats.Time = now
|
||||||
@@ -617,20 +684,40 @@ func (a *Agent) updateDiskIo(cacheTimeMs uint16, systemStats *system.Stats) {
|
|||||||
stats.DiskWritePs = writeMbPerSecond
|
stats.DiskWritePs = writeMbPerSecond
|
||||||
stats.DiskReadBytes = diskIORead
|
stats.DiskReadBytes = diskIORead
|
||||||
stats.DiskWriteBytes = diskIOWrite
|
stats.DiskWriteBytes = diskIOWrite
|
||||||
|
stats.DiskIoStats[0] = diskReadTime
|
||||||
|
stats.DiskIoStats[1] = diskWriteTime
|
||||||
|
stats.DiskIoStats[2] = diskIoUtilPct
|
||||||
|
stats.DiskIoStats[3] = rAwait
|
||||||
|
stats.DiskIoStats[4] = wAwait
|
||||||
|
stats.DiskIoStats[5] = diskWeightedIO
|
||||||
|
|
||||||
if stats.Root {
|
if stats.Root {
|
||||||
systemStats.DiskReadPs = stats.DiskReadPs
|
systemStats.DiskReadPs = stats.DiskReadPs
|
||||||
systemStats.DiskWritePs = stats.DiskWritePs
|
systemStats.DiskWritePs = stats.DiskWritePs
|
||||||
systemStats.DiskIO[0] = diskIORead
|
systemStats.DiskIO[0] = diskIORead
|
||||||
systemStats.DiskIO[1] = diskIOWrite
|
systemStats.DiskIO[1] = diskIOWrite
|
||||||
|
systemStats.DiskIoStats[0] = diskReadTime
|
||||||
|
systemStats.DiskIoStats[1] = diskWriteTime
|
||||||
|
systemStats.DiskIoStats[2] = diskIoUtilPct
|
||||||
|
systemStats.DiskIoStats[3] = rAwait
|
||||||
|
systemStats.DiskIoStats[4] = wAwait
|
||||||
|
systemStats.DiskIoStats[5] = diskWeightedIO
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// getRootMountPoint returns the appropriate root mount point for the system
|
// getRootMountPoint returns the appropriate root mount point for the system.
|
||||||
|
// On Windows it returns the system drive (e.g. "C:").
|
||||||
// For immutable systems like Fedora Silverblue, it returns /sysroot instead of /
|
// For immutable systems like Fedora Silverblue, it returns /sysroot instead of /
|
||||||
func (a *Agent) getRootMountPoint() string {
|
func (a *Agent) getRootMountPoint() string {
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
if sd := os.Getenv("SystemDrive"); sd != "" {
|
||||||
|
return sd
|
||||||
|
}
|
||||||
|
return "C:"
|
||||||
|
}
|
||||||
|
|
||||||
// 1. Check if /etc/os-release contains indicators of an immutable system
|
// 1. Check if /etc/os-release contains indicators of an immutable system
|
||||||
if osReleaseContent, err := os.ReadFile("/etc/os-release"); err == nil {
|
if osReleaseContent, err := os.ReadFile("/etc/os-release"); err == nil {
|
||||||
content := string(osReleaseContent)
|
content := string(osReleaseContent)
|
||||||
|
|||||||
@@ -530,6 +530,87 @@ func TestAddExtraFilesystemFolders(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAddPartitionExtraFs(t *testing.T) {
|
||||||
|
makeDiscovery := func(agent *Agent) diskDiscovery {
|
||||||
|
return diskDiscovery{
|
||||||
|
agent: agent,
|
||||||
|
ctx: fsRegistrationContext{
|
||||||
|
isWindows: false,
|
||||||
|
efPath: "/extra-filesystems",
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"nvme0n1p1": {Name: "nvme0n1p1"},
|
||||||
|
"nvme1n1": {Name: "nvme1n1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("registers direct child of extra-filesystems", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
d := makeDiscovery(agent)
|
||||||
|
|
||||||
|
d.addPartitionExtraFs(disk.PartitionStat{
|
||||||
|
Device: "/dev/nvme0n1p1",
|
||||||
|
Mountpoint: "/extra-filesystems/nvme0n1p1__caddy1-root",
|
||||||
|
})
|
||||||
|
|
||||||
|
stats, exists := agent.fsStats["nvme0n1p1"]
|
||||||
|
assert.True(t, exists)
|
||||||
|
assert.Equal(t, "/extra-filesystems/nvme0n1p1__caddy1-root", stats.Mountpoint)
|
||||||
|
assert.Equal(t, "caddy1-root", stats.Name)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("skips nested mount under extra-filesystem bind mount", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
d := makeDiscovery(agent)
|
||||||
|
|
||||||
|
// These simulate the virtual mounts that appear when host / is bind-mounted
|
||||||
|
// with disk.Partitions(all=true) — e.g. /proc, /sys, /dev visible under the mount.
|
||||||
|
for _, nested := range []string{
|
||||||
|
"/extra-filesystems/nvme0n1p1__caddy1-root/proc",
|
||||||
|
"/extra-filesystems/nvme0n1p1__caddy1-root/sys",
|
||||||
|
"/extra-filesystems/nvme0n1p1__caddy1-root/dev",
|
||||||
|
"/extra-filesystems/nvme0n1p1__caddy1-root/run",
|
||||||
|
} {
|
||||||
|
d.addPartitionExtraFs(disk.PartitionStat{Device: "tmpfs", Mountpoint: nested})
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Empty(t, agent.fsStats)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("registers both direct children, skips their nested mounts", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
d := makeDiscovery(agent)
|
||||||
|
|
||||||
|
partitions := []disk.PartitionStat{
|
||||||
|
{Device: "/dev/nvme0n1p1", Mountpoint: "/extra-filesystems/nvme0n1p1__caddy1-root"},
|
||||||
|
{Device: "/dev/nvme1n1", Mountpoint: "/extra-filesystems/nvme1n1__caddy1-docker"},
|
||||||
|
{Device: "proc", Mountpoint: "/extra-filesystems/nvme0n1p1__caddy1-root/proc"},
|
||||||
|
{Device: "sysfs", Mountpoint: "/extra-filesystems/nvme0n1p1__caddy1-root/sys"},
|
||||||
|
{Device: "overlay", Mountpoint: "/extra-filesystems/nvme0n1p1__caddy1-root/var/lib/docker"},
|
||||||
|
}
|
||||||
|
for _, p := range partitions {
|
||||||
|
d.addPartitionExtraFs(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Len(t, agent.fsStats, 2)
|
||||||
|
assert.Equal(t, "caddy1-root", agent.fsStats["nvme0n1p1"].Name)
|
||||||
|
assert.Equal(t, "caddy1-docker", agent.fsStats["nvme1n1"].Name)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("skips partition not under extra-filesystems", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
d := makeDiscovery(agent)
|
||||||
|
|
||||||
|
d.addPartitionExtraFs(disk.PartitionStat{
|
||||||
|
Device: "/dev/nvme0n1p1",
|
||||||
|
Mountpoint: "/",
|
||||||
|
})
|
||||||
|
|
||||||
|
assert.Empty(t, agent.fsStats)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestFindIoDevice(t *testing.T) {
|
func TestFindIoDevice(t *testing.T) {
|
||||||
t.Run("matches by device name", func(t *testing.T) {
|
t.Run("matches by device name", func(t *testing.T) {
|
||||||
ioCounters := map[string]disk.IOCountersStat{
|
ioCounters := map[string]disk.IOCountersStat{
|
||||||
|
|||||||
154
agent/docker.go
154
agent/docker.go
@@ -25,6 +25,7 @@ import (
|
|||||||
"github.com/henrygd/beszel/agent/deltatracker"
|
"github.com/henrygd/beszel/agent/deltatracker"
|
||||||
"github.com/henrygd/beszel/agent/utils"
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
"github.com/henrygd/beszel/internal/entities/container"
|
"github.com/henrygd/beszel/internal/entities/container"
|
||||||
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
|
||||||
"github.com/blang/semver"
|
"github.com/blang/semver"
|
||||||
)
|
)
|
||||||
@@ -52,6 +53,7 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type dockerManager struct {
|
type dockerManager struct {
|
||||||
|
agent *Agent // Used to propagate system detail changes back to the agent
|
||||||
client *http.Client // Client to query Docker API
|
client *http.Client // Client to query Docker API
|
||||||
wg sync.WaitGroup // WaitGroup to wait for all goroutines to finish
|
wg sync.WaitGroup // WaitGroup to wait for all goroutines to finish
|
||||||
sem chan struct{} // Semaphore to limit concurrent container requests
|
sem chan struct{} // Semaphore to limit concurrent container requests
|
||||||
@@ -60,6 +62,7 @@ type dockerManager struct {
|
|||||||
containerStatsMap map[string]*container.Stats // Keeps track of container stats
|
containerStatsMap map[string]*container.Stats // Keeps track of container stats
|
||||||
validIds map[string]struct{} // Map of valid container ids, used to prune invalid containers from containerStatsMap
|
validIds map[string]struct{} // Map of valid container ids, used to prune invalid containers from containerStatsMap
|
||||||
goodDockerVersion bool // Whether docker version is at least 25.0.0 (one-shot works correctly)
|
goodDockerVersion bool // Whether docker version is at least 25.0.0 (one-shot works correctly)
|
||||||
|
dockerVersionChecked bool // Whether a version probe has completed successfully
|
||||||
isWindows bool // Whether the Docker Engine API is running on Windows
|
isWindows bool // Whether the Docker Engine API is running on Windows
|
||||||
buf *bytes.Buffer // Buffer to store and read response bodies
|
buf *bytes.Buffer // Buffer to store and read response bodies
|
||||||
decoder *json.Decoder // Reusable JSON decoder that reads from buf
|
decoder *json.Decoder // Reusable JSON decoder that reads from buf
|
||||||
@@ -77,7 +80,7 @@ type dockerManager struct {
|
|||||||
// cacheTimeMs -> DeltaTracker for network bytes sent/received
|
// cacheTimeMs -> DeltaTracker for network bytes sent/received
|
||||||
networkSentTrackers map[uint16]*deltatracker.DeltaTracker[string, uint64]
|
networkSentTrackers map[uint16]*deltatracker.DeltaTracker[string, uint64]
|
||||||
networkRecvTrackers map[uint16]*deltatracker.DeltaTracker[string, uint64]
|
networkRecvTrackers map[uint16]*deltatracker.DeltaTracker[string, uint64]
|
||||||
retrySleep func(time.Duration)
|
lastNetworkReadTime map[uint16]map[string]time.Time // cacheTimeMs -> containerId -> last network read time
|
||||||
}
|
}
|
||||||
|
|
||||||
// userAgentRoundTripper is a custom http.RoundTripper that adds a User-Agent header to all requests
|
// userAgentRoundTripper is a custom http.RoundTripper that adds a User-Agent header to all requests
|
||||||
@@ -86,6 +89,14 @@ type userAgentRoundTripper struct {
|
|||||||
userAgent string
|
userAgent string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// dockerVersionResponse contains the /version fields used for engine checks.
|
||||||
|
type dockerVersionResponse struct {
|
||||||
|
Version string `json:"Version"`
|
||||||
|
Components []struct {
|
||||||
|
Name string `json:"Name"`
|
||||||
|
} `json:"Components"`
|
||||||
|
}
|
||||||
|
|
||||||
// RoundTrip implements the http.RoundTripper interface
|
// RoundTrip implements the http.RoundTripper interface
|
||||||
func (u *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
func (u *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||||
req.Header.Set("User-Agent", u.userAgent)
|
req.Header.Set("User-Agent", u.userAgent)
|
||||||
@@ -133,7 +144,14 @@ func (dm *dockerManager) getDockerStats(cacheTimeMs uint16) ([]*container.Stats,
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
dm.isWindows = strings.Contains(resp.Header.Get("Server"), "windows")
|
// Detect Podman and Windows from Server header
|
||||||
|
serverHeader := resp.Header.Get("Server")
|
||||||
|
if !dm.usingPodman && detectPodmanFromHeader(serverHeader) {
|
||||||
|
dm.setIsPodman()
|
||||||
|
}
|
||||||
|
dm.isWindows = strings.Contains(serverHeader, "windows")
|
||||||
|
|
||||||
|
dm.ensureDockerVersionChecked()
|
||||||
|
|
||||||
containersLength := len(dm.apiContainerList)
|
containersLength := len(dm.apiContainerList)
|
||||||
|
|
||||||
@@ -285,7 +303,7 @@ func (dm *dockerManager) cycleNetworkDeltasForCacheTime(cacheTimeMs uint16) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// calculateNetworkStats calculates network sent/receive deltas using DeltaTracker
|
// calculateNetworkStats calculates network sent/receive deltas using DeltaTracker
|
||||||
func (dm *dockerManager) calculateNetworkStats(ctr *container.ApiInfo, apiStats *container.ApiStats, stats *container.Stats, initialized bool, name string, cacheTimeMs uint16) (uint64, uint64) {
|
func (dm *dockerManager) calculateNetworkStats(ctr *container.ApiInfo, apiStats *container.ApiStats, name string, cacheTimeMs uint16) (uint64, uint64) {
|
||||||
var total_sent, total_recv uint64
|
var total_sent, total_recv uint64
|
||||||
for _, v := range apiStats.Networks {
|
for _, v := range apiStats.Networks {
|
||||||
total_sent += v.TxBytes
|
total_sent += v.TxBytes
|
||||||
@@ -304,10 +322,11 @@ func (dm *dockerManager) calculateNetworkStats(ctr *container.ApiInfo, apiStats
|
|||||||
sent_delta_raw := sentTracker.Delta(ctr.IdShort)
|
sent_delta_raw := sentTracker.Delta(ctr.IdShort)
|
||||||
recv_delta_raw := recvTracker.Delta(ctr.IdShort)
|
recv_delta_raw := recvTracker.Delta(ctr.IdShort)
|
||||||
|
|
||||||
// Calculate bytes per second independently for Tx and Rx if we have previous data
|
// Calculate bytes per second using per-cache-time read time to avoid
|
||||||
|
// interference between different cache intervals (e.g. 1000ms vs 60000ms)
|
||||||
var sent_delta, recv_delta uint64
|
var sent_delta, recv_delta uint64
|
||||||
if initialized {
|
if prevReadTime, ok := dm.lastNetworkReadTime[cacheTimeMs][ctr.IdShort]; ok {
|
||||||
millisecondsElapsed := uint64(time.Since(stats.PrevReadTime).Milliseconds())
|
millisecondsElapsed := uint64(time.Since(prevReadTime).Milliseconds())
|
||||||
if millisecondsElapsed > 0 {
|
if millisecondsElapsed > 0 {
|
||||||
if sent_delta_raw > 0 {
|
if sent_delta_raw > 0 {
|
||||||
sent_delta = sent_delta_raw * 1000 / millisecondsElapsed
|
sent_delta = sent_delta_raw * 1000 / millisecondsElapsed
|
||||||
@@ -542,7 +561,13 @@ func (dm *dockerManager) updateContainerStats(ctr *container.ApiInfo, cacheTimeM
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Calculate network stats using DeltaTracker
|
// Calculate network stats using DeltaTracker
|
||||||
sent_delta, recv_delta := dm.calculateNetworkStats(ctr, res, stats, initialized, name, cacheTimeMs)
|
sent_delta, recv_delta := dm.calculateNetworkStats(ctr, res, name, cacheTimeMs)
|
||||||
|
|
||||||
|
// Store per-cache-time network read time for next rate calculation
|
||||||
|
if dm.lastNetworkReadTime[cacheTimeMs] == nil {
|
||||||
|
dm.lastNetworkReadTime[cacheTimeMs] = make(map[string]time.Time)
|
||||||
|
}
|
||||||
|
dm.lastNetworkReadTime[cacheTimeMs][ctr.IdShort] = time.Now()
|
||||||
|
|
||||||
// Store current network values for legacy compatibility
|
// Store current network values for legacy compatibility
|
||||||
var total_sent, total_recv uint64
|
var total_sent, total_recv uint64
|
||||||
@@ -574,10 +599,13 @@ func (dm *dockerManager) deleteContainerStatsSync(id string) {
|
|||||||
for ct := range dm.lastCpuReadTime {
|
for ct := range dm.lastCpuReadTime {
|
||||||
delete(dm.lastCpuReadTime[ct], id)
|
delete(dm.lastCpuReadTime[ct], id)
|
||||||
}
|
}
|
||||||
|
for ct := range dm.lastNetworkReadTime {
|
||||||
|
delete(dm.lastNetworkReadTime[ct], id)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creates a new http client for Docker or Podman API
|
// Creates a new http client for Docker or Podman API
|
||||||
func newDockerManager() *dockerManager {
|
func newDockerManager(agent *Agent) *dockerManager {
|
||||||
dockerHost, exists := utils.GetEnv("DOCKER_HOST")
|
dockerHost, exists := utils.GetEnv("DOCKER_HOST")
|
||||||
if exists {
|
if exists {
|
||||||
// return nil if set to empty string
|
// return nil if set to empty string
|
||||||
@@ -643,6 +671,7 @@ func newDockerManager() *dockerManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
manager := &dockerManager{
|
manager := &dockerManager{
|
||||||
|
agent: agent,
|
||||||
client: &http.Client{
|
client: &http.Client{
|
||||||
Timeout: timeout,
|
Timeout: timeout,
|
||||||
Transport: userAgentTransport,
|
Transport: userAgentTransport,
|
||||||
@@ -659,51 +688,55 @@ func newDockerManager() *dockerManager {
|
|||||||
lastCpuReadTime: make(map[uint16]map[string]time.Time),
|
lastCpuReadTime: make(map[uint16]map[string]time.Time),
|
||||||
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
retrySleep: time.Sleep,
|
lastNetworkReadTime: make(map[uint16]map[string]time.Time),
|
||||||
}
|
}
|
||||||
|
|
||||||
// If using podman, return client
|
// Best-effort startup probe. If the engine is not ready yet, getDockerStats will
|
||||||
if strings.Contains(dockerHost, "podman") {
|
// retry after the first successful /containers/json request.
|
||||||
manager.usingPodman = true
|
_, _ = manager.checkDockerVersion()
|
||||||
manager.goodDockerVersion = true
|
|
||||||
return manager
|
|
||||||
}
|
|
||||||
|
|
||||||
// run version check in goroutine to avoid blocking (server may not be ready and requires retries)
|
|
||||||
go manager.checkDockerVersion()
|
|
||||||
|
|
||||||
// give version check a chance to complete before returning
|
|
||||||
time.Sleep(50 * time.Millisecond)
|
|
||||||
|
|
||||||
return manager
|
return manager
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkDockerVersion checks Docker version and sets goodDockerVersion if at least 25.0.0.
|
// checkDockerVersion checks Docker version and sets goodDockerVersion if at least 25.0.0.
|
||||||
// Versions before 25.0.0 have a bug with one-shot which requires all requests to be made in one batch.
|
// Versions before 25.0.0 have a bug with one-shot which requires all requests to be made in one batch.
|
||||||
func (dm *dockerManager) checkDockerVersion() {
|
func (dm *dockerManager) checkDockerVersion() (bool, error) {
|
||||||
var err error
|
resp, err := dm.client.Get("http://localhost/version")
|
||||||
var resp *http.Response
|
if err != nil {
|
||||||
var versionInfo struct {
|
return false, err
|
||||||
Version string `json:"Version"`
|
|
||||||
}
|
}
|
||||||
const versionMaxTries = 2
|
if resp.StatusCode != http.StatusOK {
|
||||||
for i := 1; i <= versionMaxTries; i++ {
|
status := resp.Status
|
||||||
resp, err = dm.client.Get("http://localhost/version")
|
|
||||||
if err == nil && resp.StatusCode == http.StatusOK {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if resp != nil {
|
|
||||||
resp.Body.Close()
|
resp.Body.Close()
|
||||||
|
return false, fmt.Errorf("docker version request failed: %s", status)
|
||||||
}
|
}
|
||||||
if i < versionMaxTries {
|
|
||||||
slog.Debug("Failed to get Docker version; retrying", "attempt", i, "err", err, "response", resp)
|
var versionInfo dockerVersionResponse
|
||||||
dm.retrySleep(5 * time.Second)
|
serverHeader := resp.Header.Get("Server")
|
||||||
|
if err := dm.decode(resp, &versionInfo); err != nil {
|
||||||
|
return false, err
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if err != nil || resp.StatusCode != http.StatusOK {
|
dm.applyDockerVersionInfo(serverHeader, &versionInfo)
|
||||||
|
dm.dockerVersionChecked = true
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ensureDockerVersionChecked retries the version probe after a successful
|
||||||
|
// container list request.
|
||||||
|
func (dm *dockerManager) ensureDockerVersionChecked() {
|
||||||
|
if dm.dockerVersionChecked {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := dm.decode(resp, &versionInfo); err != nil {
|
if _, err := dm.checkDockerVersion(); err != nil {
|
||||||
|
slog.Debug("Failed to get Docker version", "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// applyDockerVersionInfo updates version-dependent behavior from engine metadata.
|
||||||
|
func (dm *dockerManager) applyDockerVersionInfo(serverHeader string, versionInfo *dockerVersionResponse) {
|
||||||
|
if detectPodmanEngine(serverHeader, versionInfo) {
|
||||||
|
dm.setIsPodman()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// if version > 24, one-shot works correctly and we can limit concurrent operations
|
// if version > 24, one-shot works correctly and we can limit concurrent operations
|
||||||
@@ -929,3 +962,46 @@ func (dm *dockerManager) GetHostInfo() (info container.HostInfo, err error) {
|
|||||||
func (dm *dockerManager) IsPodman() bool {
|
func (dm *dockerManager) IsPodman() bool {
|
||||||
return dm.usingPodman
|
return dm.usingPodman
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// setIsPodman sets the manager to Podman mode and updates system details accordingly.
|
||||||
|
func (dm *dockerManager) setIsPodman() {
|
||||||
|
if dm.usingPodman {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
dm.usingPodman = true
|
||||||
|
dm.goodDockerVersion = true
|
||||||
|
dm.dockerVersionChecked = true
|
||||||
|
// keep system details updated - this may be detected late if server isn't ready when
|
||||||
|
// agent starts, so make sure we notify the hub if this happens later.
|
||||||
|
if dm.agent != nil {
|
||||||
|
dm.agent.updateSystemDetails(func(details *system.Details) {
|
||||||
|
details.Podman = true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// detectPodmanFromHeader identifies Podman from the Docker API server header.
|
||||||
|
func detectPodmanFromHeader(server string) bool {
|
||||||
|
return strings.HasPrefix(server, "Libpod")
|
||||||
|
}
|
||||||
|
|
||||||
|
// detectPodmanFromVersion identifies Podman from the version payload.
|
||||||
|
func detectPodmanFromVersion(versionInfo *dockerVersionResponse) bool {
|
||||||
|
if versionInfo == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, component := range versionInfo.Components {
|
||||||
|
if strings.HasPrefix(component.Name, "Podman") {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// detectPodmanEngine checks both header and version metadata for Podman.
|
||||||
|
func detectPodmanEngine(serverHeader string, versionInfo *dockerVersionResponse) bool {
|
||||||
|
if detectPodmanFromHeader(serverHeader) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return detectPodmanFromVersion(versionInfo)
|
||||||
|
}
|
||||||
|
|||||||
@@ -408,6 +408,7 @@ func TestCalculateNetworkStats(t *testing.T) {
|
|||||||
dm := &dockerManager{
|
dm := &dockerManager{
|
||||||
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
lastNetworkReadTime: make(map[uint16]map[string]time.Time),
|
||||||
}
|
}
|
||||||
|
|
||||||
cacheTimeMs := uint16(30000)
|
cacheTimeMs := uint16(30000)
|
||||||
@@ -423,6 +424,11 @@ func TestCalculateNetworkStats(t *testing.T) {
|
|||||||
dm.networkSentTrackers[cacheTimeMs] = sentTracker
|
dm.networkSentTrackers[cacheTimeMs] = sentTracker
|
||||||
dm.networkRecvTrackers[cacheTimeMs] = recvTracker
|
dm.networkRecvTrackers[cacheTimeMs] = recvTracker
|
||||||
|
|
||||||
|
// Set per-cache-time network read time (1 second ago)
|
||||||
|
dm.lastNetworkReadTime[cacheTimeMs] = map[string]time.Time{
|
||||||
|
"container1": time.Now().Add(-time.Second),
|
||||||
|
}
|
||||||
|
|
||||||
ctr := &container.ApiInfo{
|
ctr := &container.ApiInfo{
|
||||||
IdShort: "container1",
|
IdShort: "container1",
|
||||||
}
|
}
|
||||||
@@ -433,12 +439,8 @@ func TestCalculateNetworkStats(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
stats := &container.Stats{
|
|
||||||
PrevReadTime: time.Now().Add(-time.Second), // 1 second ago
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test with initialized container
|
// Test with initialized container
|
||||||
sent, recv := dm.calculateNetworkStats(ctr, apiStats, stats, true, "test-container", cacheTimeMs)
|
sent, recv := dm.calculateNetworkStats(ctr, apiStats, "test-container", cacheTimeMs)
|
||||||
|
|
||||||
// Should return calculated byte rates per second
|
// Should return calculated byte rates per second
|
||||||
assert.GreaterOrEqual(t, sent, uint64(0))
|
assert.GreaterOrEqual(t, sent, uint64(0))
|
||||||
@@ -446,12 +448,76 @@ func TestCalculateNetworkStats(t *testing.T) {
|
|||||||
|
|
||||||
// Cycle and test one-direction change (Tx only) is reflected independently
|
// Cycle and test one-direction change (Tx only) is reflected independently
|
||||||
dm.cycleNetworkDeltasForCacheTime(cacheTimeMs)
|
dm.cycleNetworkDeltasForCacheTime(cacheTimeMs)
|
||||||
|
dm.lastNetworkReadTime[cacheTimeMs]["container1"] = time.Now().Add(-time.Second)
|
||||||
apiStats.Networks["eth0"] = container.NetworkStats{TxBytes: 2500, RxBytes: 1800} // +500 Tx only
|
apiStats.Networks["eth0"] = container.NetworkStats{TxBytes: 2500, RxBytes: 1800} // +500 Tx only
|
||||||
sent, recv = dm.calculateNetworkStats(ctr, apiStats, stats, true, "test-container", cacheTimeMs)
|
sent, recv = dm.calculateNetworkStats(ctr, apiStats, "test-container", cacheTimeMs)
|
||||||
assert.Greater(t, sent, uint64(0))
|
assert.Greater(t, sent, uint64(0))
|
||||||
assert.Equal(t, uint64(0), recv)
|
assert.Equal(t, uint64(0), recv)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestNetworkStatsCacheTimeIsolation verifies that frequent collections at one cache time
|
||||||
|
// (e.g. 1000ms) don't cause inflated rates at another cache time (e.g. 60000ms).
|
||||||
|
// This was a bug where PrevReadTime was shared, so the 60000ms tracker would see a
|
||||||
|
// large byte delta divided by a tiny elapsed time (set by the 1000ms path).
|
||||||
|
func TestNetworkStatsCacheTimeIsolation(t *testing.T) {
|
||||||
|
dm := &dockerManager{
|
||||||
|
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
lastNetworkReadTime: make(map[uint16]map[string]time.Time),
|
||||||
|
}
|
||||||
|
|
||||||
|
ctr := &container.ApiInfo{IdShort: "container1"}
|
||||||
|
fastCache := uint16(1000)
|
||||||
|
slowCache := uint16(60000)
|
||||||
|
|
||||||
|
// Baseline for both cache times at T=0 with 100 bytes total
|
||||||
|
baseline := &container.ApiStats{
|
||||||
|
Networks: map[string]container.NetworkStats{
|
||||||
|
"eth0": {TxBytes: 100, RxBytes: 100},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
dm.calculateNetworkStats(ctr, baseline, "test", fastCache)
|
||||||
|
dm.calculateNetworkStats(ctr, baseline, "test", slowCache)
|
||||||
|
|
||||||
|
// Record read times and cycle both
|
||||||
|
now := time.Now()
|
||||||
|
dm.lastNetworkReadTime[fastCache] = map[string]time.Time{"container1": now}
|
||||||
|
dm.lastNetworkReadTime[slowCache] = map[string]time.Time{"container1": now}
|
||||||
|
dm.cycleNetworkDeltasForCacheTime(fastCache)
|
||||||
|
dm.cycleNetworkDeltasForCacheTime(slowCache)
|
||||||
|
|
||||||
|
// Simulate many fast (1000ms) collections over ~5 seconds, each adding 10 bytes
|
||||||
|
totalBytes := uint64(100)
|
||||||
|
for i := 0; i < 5; i++ {
|
||||||
|
totalBytes += 10
|
||||||
|
stats := &container.ApiStats{
|
||||||
|
Networks: map[string]container.NetworkStats{
|
||||||
|
"eth0": {TxBytes: totalBytes, RxBytes: totalBytes},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// Set fast cache read time to 1 second ago
|
||||||
|
dm.lastNetworkReadTime[fastCache]["container1"] = time.Now().Add(-time.Second)
|
||||||
|
sent, _ := dm.calculateNetworkStats(ctr, stats, "test", fastCache)
|
||||||
|
// Fast cache should see ~10 bytes/sec per interval
|
||||||
|
assert.LessOrEqual(t, sent, uint64(100), "fast cache rate should be reasonable")
|
||||||
|
dm.cycleNetworkDeltasForCacheTime(fastCache)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now do slow cache collection — total delta is 50 bytes over ~5 seconds
|
||||||
|
// Set slow cache read time to 5 seconds ago (the actual elapsed time)
|
||||||
|
dm.lastNetworkReadTime[slowCache]["container1"] = time.Now().Add(-5 * time.Second)
|
||||||
|
finalStats := &container.ApiStats{
|
||||||
|
Networks: map[string]container.NetworkStats{
|
||||||
|
"eth0": {TxBytes: totalBytes, RxBytes: totalBytes},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
sent, _ := dm.calculateNetworkStats(ctr, finalStats, "test", slowCache)
|
||||||
|
|
||||||
|
// Slow cache rate should be ~10 bytes/sec (50 bytes / 5 seconds), NOT 100x inflated
|
||||||
|
assert.LessOrEqual(t, sent, uint64(100), "slow cache rate should NOT be inflated by fast cache collections")
|
||||||
|
assert.GreaterOrEqual(t, sent, uint64(1), "slow cache should still report some traffic")
|
||||||
|
}
|
||||||
|
|
||||||
func TestDockerManagerCreation(t *testing.T) {
|
func TestDockerManagerCreation(t *testing.T) {
|
||||||
// Test that dockerManager can be created without panicking
|
// Test that dockerManager can be created without panicking
|
||||||
dm := &dockerManager{
|
dm := &dockerManager{
|
||||||
@@ -460,6 +526,7 @@ func TestDockerManagerCreation(t *testing.T) {
|
|||||||
lastCpuReadTime: make(map[uint16]map[string]time.Time),
|
lastCpuReadTime: make(map[uint16]map[string]time.Time),
|
||||||
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
lastNetworkReadTime: make(map[uint16]map[string]time.Time),
|
||||||
}
|
}
|
||||||
|
|
||||||
assert.NotNil(t, dm)
|
assert.NotNil(t, dm)
|
||||||
@@ -467,63 +534,58 @@ func TestDockerManagerCreation(t *testing.T) {
|
|||||||
assert.NotNil(t, dm.lastCpuSystem)
|
assert.NotNil(t, dm.lastCpuSystem)
|
||||||
assert.NotNil(t, dm.networkSentTrackers)
|
assert.NotNil(t, dm.networkSentTrackers)
|
||||||
assert.NotNil(t, dm.networkRecvTrackers)
|
assert.NotNil(t, dm.networkRecvTrackers)
|
||||||
|
assert.NotNil(t, dm.lastNetworkReadTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCheckDockerVersion(t *testing.T) {
|
func TestCheckDockerVersion(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
responses []struct {
|
|
||||||
statusCode int
|
statusCode int
|
||||||
body string
|
body string
|
||||||
}
|
server string
|
||||||
|
expectSuccess bool
|
||||||
expectedGood bool
|
expectedGood bool
|
||||||
expectedRequests int
|
expectedPodman bool
|
||||||
|
expectError bool
|
||||||
|
expectedRequest string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "200 with good version on first try",
|
name: "good docker version",
|
||||||
responses: []struct {
|
statusCode: http.StatusOK,
|
||||||
statusCode int
|
body: `{"Version":"25.0.1"}`,
|
||||||
body string
|
expectSuccess: true,
|
||||||
}{
|
|
||||||
{http.StatusOK, `{"Version":"25.0.1"}`},
|
|
||||||
},
|
|
||||||
expectedGood: true,
|
expectedGood: true,
|
||||||
expectedRequests: 1,
|
expectedPodman: false,
|
||||||
|
expectedRequest: "/version",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "200 with old version on first try",
|
name: "old docker version",
|
||||||
responses: []struct {
|
statusCode: http.StatusOK,
|
||||||
statusCode int
|
body: `{"Version":"24.0.7"}`,
|
||||||
body string
|
expectSuccess: true,
|
||||||
}{
|
|
||||||
{http.StatusOK, `{"Version":"24.0.7"}`},
|
|
||||||
},
|
|
||||||
expectedGood: false,
|
expectedGood: false,
|
||||||
expectedRequests: 1,
|
expectedPodman: false,
|
||||||
|
expectedRequest: "/version",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "non-200 then 200 with good version",
|
name: "podman from server header",
|
||||||
responses: []struct {
|
statusCode: http.StatusOK,
|
||||||
statusCode int
|
body: `{"Version":"5.5.0"}`,
|
||||||
body string
|
server: "Libpod/5.5.0",
|
||||||
}{
|
expectSuccess: true,
|
||||||
{http.StatusServiceUnavailable, `"not ready"`},
|
|
||||||
{http.StatusOK, `{"Version":"25.1.0"}`},
|
|
||||||
},
|
|
||||||
expectedGood: true,
|
expectedGood: true,
|
||||||
expectedRequests: 2,
|
expectedPodman: true,
|
||||||
|
expectedRequest: "/version",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "non-200 on all retries",
|
name: "non-200 response",
|
||||||
responses: []struct {
|
statusCode: http.StatusServiceUnavailable,
|
||||||
statusCode int
|
body: `"not ready"`,
|
||||||
body string
|
expectSuccess: false,
|
||||||
}{
|
|
||||||
{http.StatusInternalServerError, `"error"`},
|
|
||||||
{http.StatusUnauthorized, `"error"`},
|
|
||||||
},
|
|
||||||
expectedGood: false,
|
expectedGood: false,
|
||||||
expectedRequests: 2,
|
expectedPodman: false,
|
||||||
|
expectError: true,
|
||||||
|
expectedRequest: "/version",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -531,13 +593,13 @@ func TestCheckDockerVersion(t *testing.T) {
|
|||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
requestCount := 0
|
requestCount := 0
|
||||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
idx := requestCount
|
|
||||||
requestCount++
|
requestCount++
|
||||||
if idx >= len(tt.responses) {
|
assert.Equal(t, tt.expectedRequest, r.URL.EscapedPath())
|
||||||
idx = len(tt.responses) - 1
|
if tt.server != "" {
|
||||||
|
w.Header().Set("Server", tt.server)
|
||||||
}
|
}
|
||||||
w.WriteHeader(tt.responses[idx].statusCode)
|
w.WriteHeader(tt.statusCode)
|
||||||
fmt.Fprint(w, tt.responses[idx].body)
|
fmt.Fprint(w, tt.body)
|
||||||
}))
|
}))
|
||||||
defer server.Close()
|
defer server.Close()
|
||||||
|
|
||||||
@@ -549,17 +611,24 @@ func TestCheckDockerVersion(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
retrySleep: func(time.Duration) {},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
dm.checkDockerVersion()
|
success, err := dm.checkDockerVersion()
|
||||||
|
|
||||||
|
assert.Equal(t, tt.expectSuccess, success)
|
||||||
|
assert.Equal(t, tt.expectSuccess, dm.dockerVersionChecked)
|
||||||
assert.Equal(t, tt.expectedGood, dm.goodDockerVersion)
|
assert.Equal(t, tt.expectedGood, dm.goodDockerVersion)
|
||||||
assert.Equal(t, tt.expectedRequests, requestCount)
|
assert.Equal(t, tt.expectedPodman, dm.usingPodman)
|
||||||
|
assert.Equal(t, 1, requestCount)
|
||||||
|
if tt.expectError {
|
||||||
|
require.Error(t, err)
|
||||||
|
} else {
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Run("request error on all retries", func(t *testing.T) {
|
t.Run("request error", func(t *testing.T) {
|
||||||
requestCount := 0
|
requestCount := 0
|
||||||
dm := &dockerManager{
|
dm := &dockerManager{
|
||||||
client: &http.Client{
|
client: &http.Client{
|
||||||
@@ -570,16 +639,171 @@ func TestCheckDockerVersion(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
retrySleep: func(time.Duration) {},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
dm.checkDockerVersion()
|
success, err := dm.checkDockerVersion()
|
||||||
|
|
||||||
|
assert.False(t, success)
|
||||||
|
require.Error(t, err)
|
||||||
|
assert.False(t, dm.dockerVersionChecked)
|
||||||
assert.False(t, dm.goodDockerVersion)
|
assert.False(t, dm.goodDockerVersion)
|
||||||
assert.Equal(t, 2, requestCount)
|
assert.False(t, dm.usingPodman)
|
||||||
|
assert.Equal(t, 1, requestCount)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// newDockerManagerForVersionTest creates a dockerManager wired to a test server.
|
||||||
|
func newDockerManagerForVersionTest(server *httptest.Server) *dockerManager {
|
||||||
|
return &dockerManager{
|
||||||
|
client: &http.Client{
|
||||||
|
Transport: &http.Transport{
|
||||||
|
DialContext: func(_ context.Context, network, _ string) (net.Conn, error) {
|
||||||
|
return net.Dial(network, server.Listener.Addr().String())
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
containerStatsMap: make(map[string]*container.Stats),
|
||||||
|
lastCpuContainer: make(map[uint16]map[string]uint64),
|
||||||
|
lastCpuSystem: make(map[uint16]map[string]uint64),
|
||||||
|
lastCpuReadTime: make(map[uint16]map[string]time.Time),
|
||||||
|
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
lastNetworkReadTime: make(map[uint16]map[string]time.Time),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetDockerStatsChecksDockerVersionAfterContainerList(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
containerServer string
|
||||||
|
versionServer string
|
||||||
|
versionBody string
|
||||||
|
expectedGood bool
|
||||||
|
expectedPodman bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "200 with good version on first try",
|
||||||
|
versionBody: `{"Version":"25.0.1"}`,
|
||||||
|
expectedGood: true,
|
||||||
|
expectedPodman: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "200 with old version on first try",
|
||||||
|
versionBody: `{"Version":"24.0.7"}`,
|
||||||
|
expectedGood: false,
|
||||||
|
expectedPodman: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "podman detected from server header",
|
||||||
|
containerServer: "Libpod/5.5.0",
|
||||||
|
expectedGood: true,
|
||||||
|
expectedPodman: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
requestCounts := map[string]int{}
|
||||||
|
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
requestCounts[r.URL.EscapedPath()]++
|
||||||
|
switch r.URL.EscapedPath() {
|
||||||
|
case "/containers/json":
|
||||||
|
if tt.containerServer != "" {
|
||||||
|
w.Header().Set("Server", tt.containerServer)
|
||||||
|
}
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
fmt.Fprint(w, `[]`)
|
||||||
|
case "/version":
|
||||||
|
if tt.versionServer != "" {
|
||||||
|
w.Header().Set("Server", tt.versionServer)
|
||||||
|
}
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
fmt.Fprint(w, tt.versionBody)
|
||||||
|
default:
|
||||||
|
t.Fatalf("unexpected path: %s", r.URL.EscapedPath())
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
defer server.Close()
|
||||||
|
|
||||||
|
dm := newDockerManagerForVersionTest(server)
|
||||||
|
|
||||||
|
stats, err := dm.getDockerStats(defaultCacheTimeMs)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Empty(t, stats)
|
||||||
|
assert.True(t, dm.dockerVersionChecked)
|
||||||
|
assert.Equal(t, tt.expectedGood, dm.goodDockerVersion)
|
||||||
|
assert.Equal(t, tt.expectedPodman, dm.usingPodman)
|
||||||
|
assert.Equal(t, 1, requestCounts["/containers/json"])
|
||||||
|
if tt.expectedPodman {
|
||||||
|
assert.Equal(t, 0, requestCounts["/version"])
|
||||||
|
} else {
|
||||||
|
assert.Equal(t, 1, requestCounts["/version"])
|
||||||
|
}
|
||||||
|
|
||||||
|
stats, err = dm.getDockerStats(defaultCacheTimeMs)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Empty(t, stats)
|
||||||
|
assert.Equal(t, tt.expectedGood, dm.goodDockerVersion)
|
||||||
|
assert.Equal(t, tt.expectedPodman, dm.usingPodman)
|
||||||
|
assert.Equal(t, 2, requestCounts["/containers/json"])
|
||||||
|
if tt.expectedPodman {
|
||||||
|
assert.Equal(t, 0, requestCounts["/version"])
|
||||||
|
} else {
|
||||||
|
assert.Equal(t, 1, requestCounts["/version"])
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetDockerStatsRetriesVersionCheckUntilSuccess(t *testing.T) {
|
||||||
|
requestCounts := map[string]int{}
|
||||||
|
versionStatuses := []int{http.StatusServiceUnavailable, http.StatusOK}
|
||||||
|
versionBodies := []string{`"not ready"`, `{"Version":"25.1.0"}`}
|
||||||
|
|
||||||
|
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
requestCounts[r.URL.EscapedPath()]++
|
||||||
|
switch r.URL.EscapedPath() {
|
||||||
|
case "/containers/json":
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
fmt.Fprint(w, `[]`)
|
||||||
|
case "/version":
|
||||||
|
idx := requestCounts["/version"] - 1
|
||||||
|
if idx >= len(versionStatuses) {
|
||||||
|
idx = len(versionStatuses) - 1
|
||||||
|
}
|
||||||
|
w.WriteHeader(versionStatuses[idx])
|
||||||
|
fmt.Fprint(w, versionBodies[idx])
|
||||||
|
default:
|
||||||
|
t.Fatalf("unexpected path: %s", r.URL.EscapedPath())
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
defer server.Close()
|
||||||
|
|
||||||
|
dm := newDockerManagerForVersionTest(server)
|
||||||
|
|
||||||
|
stats, err := dm.getDockerStats(defaultCacheTimeMs)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Empty(t, stats)
|
||||||
|
assert.False(t, dm.dockerVersionChecked)
|
||||||
|
assert.False(t, dm.goodDockerVersion)
|
||||||
|
assert.Equal(t, 1, requestCounts["/version"])
|
||||||
|
|
||||||
|
stats, err = dm.getDockerStats(defaultCacheTimeMs)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Empty(t, stats)
|
||||||
|
assert.True(t, dm.dockerVersionChecked)
|
||||||
|
assert.True(t, dm.goodDockerVersion)
|
||||||
|
assert.Equal(t, 2, requestCounts["/containers/json"])
|
||||||
|
assert.Equal(t, 2, requestCounts["/version"])
|
||||||
|
|
||||||
|
stats, err = dm.getDockerStats(defaultCacheTimeMs)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Empty(t, stats)
|
||||||
|
assert.Equal(t, 3, requestCounts["/containers/json"])
|
||||||
|
assert.Equal(t, 2, requestCounts["/version"])
|
||||||
|
}
|
||||||
|
|
||||||
func TestCycleCpuDeltas(t *testing.T) {
|
func TestCycleCpuDeltas(t *testing.T) {
|
||||||
dm := &dockerManager{
|
dm := &dockerManager{
|
||||||
lastCpuContainer: map[uint16]map[string]uint64{
|
lastCpuContainer: map[uint16]map[string]uint64{
|
||||||
@@ -651,6 +875,7 @@ func TestDockerStatsWithMockData(t *testing.T) {
|
|||||||
lastCpuReadTime: make(map[uint16]map[string]time.Time),
|
lastCpuReadTime: make(map[uint16]map[string]time.Time),
|
||||||
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
lastNetworkReadTime: make(map[uint16]map[string]time.Time),
|
||||||
containerStatsMap: make(map[string]*container.Stats),
|
containerStatsMap: make(map[string]*container.Stats),
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -796,23 +1021,22 @@ func TestNetworkStatsCalculationWithRealData(t *testing.T) {
|
|||||||
dm := &dockerManager{
|
dm := &dockerManager{
|
||||||
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
lastNetworkReadTime: make(map[uint16]map[string]time.Time),
|
||||||
}
|
}
|
||||||
|
|
||||||
ctr := &container.ApiInfo{IdShort: "test-container"}
|
ctr := &container.ApiInfo{IdShort: "test-container"}
|
||||||
cacheTimeMs := uint16(30000) // Test with 30 second cache
|
cacheTimeMs := uint16(30000) // Test with 30 second cache
|
||||||
|
|
||||||
// Use exact timing for deterministic results
|
// First call sets baseline (no previous read time, so rates should be 0)
|
||||||
exactly1000msAgo := time.Now().Add(-1000 * time.Millisecond)
|
sent1, recv1 := dm.calculateNetworkStats(ctr, apiStats1, "test", cacheTimeMs)
|
||||||
stats := &container.Stats{
|
|
||||||
PrevReadTime: exactly1000msAgo,
|
|
||||||
}
|
|
||||||
|
|
||||||
// First call sets baseline
|
|
||||||
sent1, recv1 := dm.calculateNetworkStats(ctr, apiStats1, stats, true, "test", cacheTimeMs)
|
|
||||||
assert.Equal(t, uint64(0), sent1)
|
assert.Equal(t, uint64(0), sent1)
|
||||||
assert.Equal(t, uint64(0), recv1)
|
assert.Equal(t, uint64(0), recv1)
|
||||||
|
|
||||||
// Cycle to establish baseline for this cache time
|
// Record read time and cycle to establish baseline for this cache time
|
||||||
|
exactly1000msAgo := time.Now().Add(-1000 * time.Millisecond)
|
||||||
|
dm.lastNetworkReadTime[cacheTimeMs] = map[string]time.Time{
|
||||||
|
"test-container": exactly1000msAgo,
|
||||||
|
}
|
||||||
dm.cycleNetworkDeltasForCacheTime(cacheTimeMs)
|
dm.cycleNetworkDeltasForCacheTime(cacheTimeMs)
|
||||||
|
|
||||||
// Calculate expected results precisely
|
// Calculate expected results precisely
|
||||||
@@ -823,7 +1047,7 @@ func TestNetworkStatsCalculationWithRealData(t *testing.T) {
|
|||||||
expectedRecvRate := deltaRecv * 1000 / expectedElapsedMs // Should be exactly 1000000
|
expectedRecvRate := deltaRecv * 1000 / expectedElapsedMs // Should be exactly 1000000
|
||||||
|
|
||||||
// Second call with changed data
|
// Second call with changed data
|
||||||
sent2, recv2 := dm.calculateNetworkStats(ctr, apiStats2, stats, true, "test", cacheTimeMs)
|
sent2, recv2 := dm.calculateNetworkStats(ctr, apiStats2, "test", cacheTimeMs)
|
||||||
|
|
||||||
// Should be exactly the expected rates (no tolerance needed)
|
// Should be exactly the expected rates (no tolerance needed)
|
||||||
assert.Equal(t, expectedSentRate, sent2)
|
assert.Equal(t, expectedSentRate, sent2)
|
||||||
@@ -831,12 +1055,13 @@ func TestNetworkStatsCalculationWithRealData(t *testing.T) {
|
|||||||
|
|
||||||
// Bad speed cap: set absurd delta over 1ms and expect 0 due to cap
|
// Bad speed cap: set absurd delta over 1ms and expect 0 due to cap
|
||||||
dm.cycleNetworkDeltasForCacheTime(cacheTimeMs)
|
dm.cycleNetworkDeltasForCacheTime(cacheTimeMs)
|
||||||
stats.PrevReadTime = time.Now().Add(-1 * time.Millisecond)
|
dm.lastNetworkReadTime[cacheTimeMs]["test-container"] = time.Now().Add(-1 * time.Millisecond)
|
||||||
apiStats1.Networks["eth0"] = container.NetworkStats{TxBytes: 0, RxBytes: 0}
|
apiStats1.Networks["eth0"] = container.NetworkStats{TxBytes: 0, RxBytes: 0}
|
||||||
apiStats2.Networks["eth0"] = container.NetworkStats{TxBytes: 10 * 1024 * 1024 * 1024, RxBytes: 0} // 10GB delta
|
apiStats2.Networks["eth0"] = container.NetworkStats{TxBytes: 10 * 1024 * 1024 * 1024, RxBytes: 0} // 10GB delta
|
||||||
_, _ = dm.calculateNetworkStats(ctr, apiStats1, stats, true, "test", cacheTimeMs) // baseline
|
_, _ = dm.calculateNetworkStats(ctr, apiStats1, "test", cacheTimeMs) // baseline
|
||||||
dm.cycleNetworkDeltasForCacheTime(cacheTimeMs)
|
dm.cycleNetworkDeltasForCacheTime(cacheTimeMs)
|
||||||
sent3, recv3 := dm.calculateNetworkStats(ctr, apiStats2, stats, true, "test", cacheTimeMs)
|
dm.lastNetworkReadTime[cacheTimeMs]["test-container"] = time.Now().Add(-1 * time.Millisecond)
|
||||||
|
sent3, recv3 := dm.calculateNetworkStats(ctr, apiStats2, "test", cacheTimeMs)
|
||||||
assert.Equal(t, uint64(0), sent3)
|
assert.Equal(t, uint64(0), sent3)
|
||||||
assert.Equal(t, uint64(0), recv3)
|
assert.Equal(t, uint64(0), recv3)
|
||||||
}
|
}
|
||||||
@@ -857,6 +1082,7 @@ func TestContainerStatsEndToEndWithRealData(t *testing.T) {
|
|||||||
lastCpuReadTime: make(map[uint16]map[string]time.Time),
|
lastCpuReadTime: make(map[uint16]map[string]time.Time),
|
||||||
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
lastNetworkReadTime: make(map[uint16]map[string]time.Time),
|
||||||
containerStatsMap: make(map[string]*container.Stats),
|
containerStatsMap: make(map[string]*container.Stats),
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -978,6 +1204,7 @@ func TestDockerStatsWorkflow(t *testing.T) {
|
|||||||
lastCpuSystem: make(map[uint16]map[string]uint64),
|
lastCpuSystem: make(map[uint16]map[string]uint64),
|
||||||
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
lastNetworkReadTime: make(map[uint16]map[string]time.Time),
|
||||||
containerStatsMap: make(map[string]*container.Stats),
|
containerStatsMap: make(map[string]*container.Stats),
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1242,6 +1469,7 @@ func TestUpdateContainerStatsUsesPodmanInspectHealthFallback(t *testing.T) {
|
|||||||
lastCpuReadTime: make(map[uint16]map[string]time.Time),
|
lastCpuReadTime: make(map[uint16]map[string]time.Time),
|
||||||
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
lastNetworkReadTime: make(map[uint16]map[string]time.Time),
|
||||||
}
|
}
|
||||||
|
|
||||||
ctr := &container.ApiInfo{
|
ctr := &container.ApiInfo{
|
||||||
|
|||||||
13
agent/gpu.go
13
agent/gpu.go
@@ -461,7 +461,7 @@ func (gm *GPUManager) discoverGpuCapabilities() gpuCapabilities {
|
|||||||
caps.hasNvtop = true
|
caps.hasNvtop = true
|
||||||
}
|
}
|
||||||
if runtime.GOOS == "darwin" {
|
if runtime.GOOS == "darwin" {
|
||||||
if _, err := exec.LookPath(macmonCmd); err == nil {
|
if _, err := utils.LookPathHomebrew(macmonCmd); err == nil {
|
||||||
caps.hasMacmon = true
|
caps.hasMacmon = true
|
||||||
}
|
}
|
||||||
if _, err := exec.LookPath(powermetricsCmd); err == nil {
|
if _, err := exec.LookPath(powermetricsCmd); err == nil {
|
||||||
@@ -542,7 +542,7 @@ func (gm *GPUManager) collectorDefinitions(caps gpuCapabilities) map[collectorSo
|
|||||||
return map[collectorSource]collectorDefinition{
|
return map[collectorSource]collectorDefinition{
|
||||||
collectorSourceNVML: {
|
collectorSourceNVML: {
|
||||||
group: collectorGroupNvidia,
|
group: collectorGroupNvidia,
|
||||||
available: caps.hasNvidiaSmi,
|
available: true,
|
||||||
start: func(_ func()) bool {
|
start: func(_ func()) bool {
|
||||||
return gm.startNvmlCollector()
|
return gm.startNvmlCollector()
|
||||||
},
|
},
|
||||||
@@ -734,9 +734,6 @@ func NewGPUManager() (*GPUManager, error) {
|
|||||||
}
|
}
|
||||||
var gm GPUManager
|
var gm GPUManager
|
||||||
caps := gm.discoverGpuCapabilities()
|
caps := gm.discoverGpuCapabilities()
|
||||||
if !hasAnyGpuCollector(caps) {
|
|
||||||
return nil, fmt.Errorf(noGPUFoundMsg)
|
|
||||||
}
|
|
||||||
gm.GpuDataMap = make(map[string]*system.GPUData)
|
gm.GpuDataMap = make(map[string]*system.GPUData)
|
||||||
|
|
||||||
// Jetson devices should always use tegrastats (ignore GPU_COLLECTOR).
|
// Jetson devices should always use tegrastats (ignore GPU_COLLECTOR).
|
||||||
@@ -745,7 +742,7 @@ func NewGPUManager() (*GPUManager, error) {
|
|||||||
return &gm, nil
|
return &gm, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// if GPU_COLLECTOR is set, start user-defined collectors.
|
// Respect explicit collector selection before capability auto-detection.
|
||||||
if collectorConfig, ok := utils.GetEnv("GPU_COLLECTOR"); ok && strings.TrimSpace(collectorConfig) != "" {
|
if collectorConfig, ok := utils.GetEnv("GPU_COLLECTOR"); ok && strings.TrimSpace(collectorConfig) != "" {
|
||||||
priorities := parseCollectorPriority(collectorConfig)
|
priorities := parseCollectorPriority(collectorConfig)
|
||||||
if gm.startCollectorsByPriority(priorities, caps) == 0 {
|
if gm.startCollectorsByPriority(priorities, caps) == 0 {
|
||||||
@@ -754,6 +751,10 @@ func NewGPUManager() (*GPUManager, error) {
|
|||||||
return &gm, nil
|
return &gm, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !hasAnyGpuCollector(caps) {
|
||||||
|
return nil, fmt.Errorf(noGPUFoundMsg)
|
||||||
|
}
|
||||||
|
|
||||||
// auto-detect and start collectors when GPU_COLLECTOR is unset.
|
// auto-detect and start collectors when GPU_COLLECTOR is unset.
|
||||||
if gm.startCollectorsByPriority(gm.resolveLegacyCollectorPriority(caps), caps) == 0 {
|
if gm.startCollectorsByPriority(gm.resolveLegacyCollectorPriority(caps), caps) == 0 {
|
||||||
return nil, fmt.Errorf(noGPUFoundMsg)
|
return nil, fmt.Errorf(noGPUFoundMsg)
|
||||||
|
|||||||
@@ -156,6 +156,7 @@ func (gm *GPUManager) updateAmdGpuData(cardPath string) bool {
|
|||||||
func readSysfsFloat(path string) (float64, error) {
|
func readSysfsFloat(path string) (float64, error) {
|
||||||
val, err := utils.ReadStringFileLimited(path, 64)
|
val, err := utils.ReadStringFileLimited(path, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
slog.Debug("Failed to read sysfs value", "path", path, "error", err)
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
return strconv.ParseFloat(val, 64)
|
return strconv.ParseFloat(val, 64)
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
"github.com/henrygd/beszel/internal/entities/system"
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -171,7 +172,11 @@ type macmonSample struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (gm *GPUManager) collectMacmonPipe() (err error) {
|
func (gm *GPUManager) collectMacmonPipe() (err error) {
|
||||||
cmd := exec.Command(macmonCmd, "pipe", "-i", strconv.Itoa(macmonIntervalMs))
|
macmonPath, err := utils.LookPathHomebrew(macmonCmd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
cmd := exec.Command(macmonPath, "pipe", "-i", strconv.Itoa(macmonIntervalMs))
|
||||||
// Avoid blocking if macmon writes to stderr.
|
// Avoid blocking if macmon writes to stderr.
|
||||||
cmd.Stderr = io.Discard
|
cmd.Stderr = io.Discard
|
||||||
stdout, err := cmd.StdoutPipe()
|
stdout, err := cmd.StdoutPipe()
|
||||||
|
|||||||
@@ -1461,6 +1461,25 @@ func TestNewGPUManagerConfiguredCollectorsMustStart(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestCollectorDefinitionsNvmlDoesNotRequireNvidiaSmi(t *testing.T) {
|
||||||
|
gm := &GPUManager{}
|
||||||
|
definitions := gm.collectorDefinitions(gpuCapabilities{})
|
||||||
|
require.Contains(t, definitions, collectorSourceNVML)
|
||||||
|
assert.True(t, definitions[collectorSourceNVML].available)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewGPUManagerConfiguredNvmlBypassesCapabilityGate(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
t.Setenv("PATH", dir)
|
||||||
|
t.Setenv("BESZEL_AGENT_GPU_COLLECTOR", "nvml")
|
||||||
|
|
||||||
|
gm, err := NewGPUManager()
|
||||||
|
require.Nil(t, gm)
|
||||||
|
require.Error(t, err)
|
||||||
|
assert.Contains(t, err.Error(), "no configured GPU collectors are available")
|
||||||
|
assert.NotContains(t, err.Error(), noGPUFoundMsg)
|
||||||
|
}
|
||||||
|
|
||||||
func TestNewGPUManagerJetsonIgnoresCollectorConfig(t *testing.T) {
|
func TestNewGPUManagerJetsonIgnoresCollectorConfig(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
t.Setenv("PATH", dir)
|
t.Setenv("PATH", dir)
|
||||||
|
|||||||
@@ -8,6 +8,6 @@
|
|||||||
</PropertyGroup>
|
</PropertyGroup>
|
||||||
|
|
||||||
<ItemGroup>
|
<ItemGroup>
|
||||||
<PackageReference Include="LibreHardwareMonitorLib" Version="0.9.5" />
|
<PackageReference Include="LibreHardwareMonitorLib" Version="0.9.6" />
|
||||||
</ItemGroup>
|
</ItemGroup>
|
||||||
</Project>
|
</Project>
|
||||||
|
|||||||
@@ -2,12 +2,14 @@ package agent
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"path"
|
"path"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
"github.com/henrygd/beszel/agent/utils"
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
@@ -17,13 +19,20 @@ import (
|
|||||||
"github.com/shirou/gopsutil/v4/sensors"
|
"github.com/shirou/gopsutil/v4/sensors"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var errTemperatureFetchTimeout = errors.New("temperature collection timed out")
|
||||||
|
|
||||||
|
// Matches sensors.TemperaturesWithContext to allow for panic recovery (gopsutil/issues/1832)
|
||||||
|
type getTempsFn func(ctx context.Context) ([]sensors.TemperatureStat, error)
|
||||||
|
|
||||||
type SensorConfig struct {
|
type SensorConfig struct {
|
||||||
context context.Context
|
context context.Context
|
||||||
sensors map[string]struct{}
|
sensors map[string]struct{}
|
||||||
primarySensor string
|
primarySensor string
|
||||||
|
timeout time.Duration
|
||||||
isBlacklist bool
|
isBlacklist bool
|
||||||
hasWildcards bool
|
hasWildcards bool
|
||||||
skipCollection bool
|
skipCollection bool
|
||||||
|
firstRun bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Agent) newSensorConfig() *SensorConfig {
|
func (a *Agent) newSensorConfig() *SensorConfig {
|
||||||
@@ -31,20 +40,29 @@ func (a *Agent) newSensorConfig() *SensorConfig {
|
|||||||
sysSensors, _ := utils.GetEnv("SYS_SENSORS")
|
sysSensors, _ := utils.GetEnv("SYS_SENSORS")
|
||||||
sensorsEnvVal, sensorsSet := utils.GetEnv("SENSORS")
|
sensorsEnvVal, sensorsSet := utils.GetEnv("SENSORS")
|
||||||
skipCollection := sensorsSet && sensorsEnvVal == ""
|
skipCollection := sensorsSet && sensorsEnvVal == ""
|
||||||
|
sensorsTimeout, _ := utils.GetEnv("SENSORS_TIMEOUT")
|
||||||
|
|
||||||
return a.newSensorConfigWithEnv(primarySensor, sysSensors, sensorsEnvVal, skipCollection)
|
return a.newSensorConfigWithEnv(primarySensor, sysSensors, sensorsEnvVal, sensorsTimeout, skipCollection)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Matches sensors.TemperaturesWithContext to allow for panic recovery (gopsutil/issues/1832)
|
|
||||||
type getTempsFn func(ctx context.Context) ([]sensors.TemperatureStat, error)
|
|
||||||
|
|
||||||
// newSensorConfigWithEnv creates a SensorConfig with the provided environment variables
|
// newSensorConfigWithEnv creates a SensorConfig with the provided environment variables
|
||||||
// sensorsSet indicates if the SENSORS environment variable was explicitly set (even to empty string)
|
// sensorsSet indicates if the SENSORS environment variable was explicitly set (even to empty string)
|
||||||
func (a *Agent) newSensorConfigWithEnv(primarySensor, sysSensors, sensorsEnvVal string, skipCollection bool) *SensorConfig {
|
func (a *Agent) newSensorConfigWithEnv(primarySensor, sysSensors, sensorsEnvVal, sensorsTimeout string, skipCollection bool) *SensorConfig {
|
||||||
|
timeout := 2 * time.Second
|
||||||
|
if sensorsTimeout != "" {
|
||||||
|
if d, err := time.ParseDuration(sensorsTimeout); err == nil {
|
||||||
|
timeout = d
|
||||||
|
} else {
|
||||||
|
slog.Warn("Invalid SENSORS_TIMEOUT", "value", sensorsTimeout)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
config := &SensorConfig{
|
config := &SensorConfig{
|
||||||
context: context.Background(),
|
context: context.Background(),
|
||||||
primarySensor: primarySensor,
|
primarySensor: primarySensor,
|
||||||
|
timeout: timeout,
|
||||||
skipCollection: skipCollection,
|
skipCollection: skipCollection,
|
||||||
|
firstRun: true,
|
||||||
sensors: make(map[string]struct{}),
|
sensors: make(map[string]struct{}),
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -86,10 +104,12 @@ func (a *Agent) updateTemperatures(systemStats *system.Stats) {
|
|||||||
// reset high temp
|
// reset high temp
|
||||||
a.systemInfo.DashboardTemp = 0
|
a.systemInfo.DashboardTemp = 0
|
||||||
|
|
||||||
temps, err := a.getTempsWithPanicRecovery(getSensorTemps)
|
temps, err := a.getTempsWithTimeout(getSensorTemps)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// retry once on panic (gopsutil/issues/1832)
|
// retry once on panic (gopsutil/issues/1832)
|
||||||
temps, err = a.getTempsWithPanicRecovery(getSensorTemps)
|
if !errors.Is(err, errTemperatureFetchTimeout) {
|
||||||
|
temps, err = a.getTempsWithTimeout(getSensorTemps)
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Warn("Error updating temperatures", "err", err)
|
slog.Warn("Error updating temperatures", "err", err)
|
||||||
if len(systemStats.Temperatures) > 0 {
|
if len(systemStats.Temperatures) > 0 {
|
||||||
@@ -152,6 +172,34 @@ func (a *Agent) getTempsWithPanicRecovery(getTemps getTempsFn) (temps []sensors.
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (a *Agent) getTempsWithTimeout(getTemps getTempsFn) ([]sensors.TemperatureStat, error) {
|
||||||
|
type result struct {
|
||||||
|
temps []sensors.TemperatureStat
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use a longer timeout on the first run to allow for initialization
|
||||||
|
// (e.g. Windows LHM subprocess startup)
|
||||||
|
timeout := a.sensorConfig.timeout
|
||||||
|
if a.sensorConfig.firstRun {
|
||||||
|
a.sensorConfig.firstRun = false
|
||||||
|
timeout = 10 * time.Second
|
||||||
|
}
|
||||||
|
|
||||||
|
resultCh := make(chan result, 1)
|
||||||
|
go func() {
|
||||||
|
temps, err := a.getTempsWithPanicRecovery(getTemps)
|
||||||
|
resultCh <- result{temps: temps, err: err}
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case res := <-resultCh:
|
||||||
|
return res.temps, res.err
|
||||||
|
case <-time.After(timeout):
|
||||||
|
return nil, errTemperatureFetchTimeout
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// isValidSensor checks if a sensor is valid based on the sensor name and the sensor config
|
// isValidSensor checks if a sensor is valid based on the sensor name and the sensor config
|
||||||
func isValidSensor(sensorName string, config *SensorConfig) bool {
|
func isValidSensor(sensorName string, config *SensorConfig) bool {
|
||||||
// if no sensors configured, everything is valid
|
// if no sensors configured, everything is valid
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/henrygd/beszel/internal/entities/system"
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
|
||||||
@@ -167,6 +168,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
|||||||
primarySensor string
|
primarySensor string
|
||||||
sysSensors string
|
sysSensors string
|
||||||
sensors string
|
sensors string
|
||||||
|
sensorsTimeout string
|
||||||
skipCollection bool
|
skipCollection bool
|
||||||
expectedConfig *SensorConfig
|
expectedConfig *SensorConfig
|
||||||
}{
|
}{
|
||||||
@@ -178,12 +180,37 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
|||||||
expectedConfig: &SensorConfig{
|
expectedConfig: &SensorConfig{
|
||||||
context: context.Background(),
|
context: context.Background(),
|
||||||
primarySensor: "",
|
primarySensor: "",
|
||||||
|
timeout: 2 * time.Second,
|
||||||
sensors: map[string]struct{}{},
|
sensors: map[string]struct{}{},
|
||||||
isBlacklist: false,
|
isBlacklist: false,
|
||||||
hasWildcards: false,
|
hasWildcards: false,
|
||||||
skipCollection: false,
|
skipCollection: false,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "Custom timeout",
|
||||||
|
primarySensor: "",
|
||||||
|
sysSensors: "",
|
||||||
|
sensors: "",
|
||||||
|
sensorsTimeout: "5s",
|
||||||
|
expectedConfig: &SensorConfig{
|
||||||
|
context: context.Background(),
|
||||||
|
timeout: 5 * time.Second,
|
||||||
|
sensors: map[string]struct{}{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Invalid timeout falls back to default",
|
||||||
|
primarySensor: "",
|
||||||
|
sysSensors: "",
|
||||||
|
sensors: "",
|
||||||
|
sensorsTimeout: "notaduration",
|
||||||
|
expectedConfig: &SensorConfig{
|
||||||
|
context: context.Background(),
|
||||||
|
timeout: 2 * time.Second,
|
||||||
|
sensors: map[string]struct{}{},
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "Explicitly set to empty string",
|
name: "Explicitly set to empty string",
|
||||||
primarySensor: "",
|
primarySensor: "",
|
||||||
@@ -193,6 +220,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
|||||||
expectedConfig: &SensorConfig{
|
expectedConfig: &SensorConfig{
|
||||||
context: context.Background(),
|
context: context.Background(),
|
||||||
primarySensor: "",
|
primarySensor: "",
|
||||||
|
timeout: 2 * time.Second,
|
||||||
sensors: map[string]struct{}{},
|
sensors: map[string]struct{}{},
|
||||||
isBlacklist: false,
|
isBlacklist: false,
|
||||||
hasWildcards: false,
|
hasWildcards: false,
|
||||||
@@ -207,6 +235,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
|||||||
expectedConfig: &SensorConfig{
|
expectedConfig: &SensorConfig{
|
||||||
context: context.Background(),
|
context: context.Background(),
|
||||||
primarySensor: "cpu_temp",
|
primarySensor: "cpu_temp",
|
||||||
|
timeout: 2 * time.Second,
|
||||||
sensors: map[string]struct{}{},
|
sensors: map[string]struct{}{},
|
||||||
isBlacklist: false,
|
isBlacklist: false,
|
||||||
hasWildcards: false,
|
hasWildcards: false,
|
||||||
@@ -220,6 +249,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
|||||||
expectedConfig: &SensorConfig{
|
expectedConfig: &SensorConfig{
|
||||||
context: context.Background(),
|
context: context.Background(),
|
||||||
primarySensor: "cpu_temp",
|
primarySensor: "cpu_temp",
|
||||||
|
timeout: 2 * time.Second,
|
||||||
sensors: map[string]struct{}{
|
sensors: map[string]struct{}{
|
||||||
"cpu_temp": {},
|
"cpu_temp": {},
|
||||||
"gpu_temp": {},
|
"gpu_temp": {},
|
||||||
@@ -236,6 +266,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
|||||||
expectedConfig: &SensorConfig{
|
expectedConfig: &SensorConfig{
|
||||||
context: context.Background(),
|
context: context.Background(),
|
||||||
primarySensor: "cpu_temp",
|
primarySensor: "cpu_temp",
|
||||||
|
timeout: 2 * time.Second,
|
||||||
sensors: map[string]struct{}{
|
sensors: map[string]struct{}{
|
||||||
"cpu_temp": {},
|
"cpu_temp": {},
|
||||||
"gpu_temp": {},
|
"gpu_temp": {},
|
||||||
@@ -252,6 +283,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
|||||||
expectedConfig: &SensorConfig{
|
expectedConfig: &SensorConfig{
|
||||||
context: context.Background(),
|
context: context.Background(),
|
||||||
primarySensor: "cpu_temp",
|
primarySensor: "cpu_temp",
|
||||||
|
timeout: 2 * time.Second,
|
||||||
sensors: map[string]struct{}{
|
sensors: map[string]struct{}{
|
||||||
"cpu_*": {},
|
"cpu_*": {},
|
||||||
"gpu_temp": {},
|
"gpu_temp": {},
|
||||||
@@ -268,6 +300,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
|||||||
expectedConfig: &SensorConfig{
|
expectedConfig: &SensorConfig{
|
||||||
context: context.Background(),
|
context: context.Background(),
|
||||||
primarySensor: "cpu_temp",
|
primarySensor: "cpu_temp",
|
||||||
|
timeout: 2 * time.Second,
|
||||||
sensors: map[string]struct{}{
|
sensors: map[string]struct{}{
|
||||||
"cpu_*": {},
|
"cpu_*": {},
|
||||||
"gpu_temp": {},
|
"gpu_temp": {},
|
||||||
@@ -283,6 +316,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
|||||||
sensors: "cpu_temp",
|
sensors: "cpu_temp",
|
||||||
expectedConfig: &SensorConfig{
|
expectedConfig: &SensorConfig{
|
||||||
primarySensor: "cpu_temp",
|
primarySensor: "cpu_temp",
|
||||||
|
timeout: 2 * time.Second,
|
||||||
sensors: map[string]struct{}{
|
sensors: map[string]struct{}{
|
||||||
"cpu_temp": {},
|
"cpu_temp": {},
|
||||||
},
|
},
|
||||||
@@ -294,7 +328,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
|||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
result := agent.newSensorConfigWithEnv(tt.primarySensor, tt.sysSensors, tt.sensors, tt.skipCollection)
|
result := agent.newSensorConfigWithEnv(tt.primarySensor, tt.sysSensors, tt.sensors, tt.sensorsTimeout, tt.skipCollection)
|
||||||
|
|
||||||
// Check primary sensor
|
// Check primary sensor
|
||||||
assert.Equal(t, tt.expectedConfig.primarySensor, result.primarySensor)
|
assert.Equal(t, tt.expectedConfig.primarySensor, result.primarySensor)
|
||||||
@@ -313,6 +347,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
|||||||
// Check flags
|
// Check flags
|
||||||
assert.Equal(t, tt.expectedConfig.isBlacklist, result.isBlacklist)
|
assert.Equal(t, tt.expectedConfig.isBlacklist, result.isBlacklist)
|
||||||
assert.Equal(t, tt.expectedConfig.hasWildcards, result.hasWildcards)
|
assert.Equal(t, tt.expectedConfig.hasWildcards, result.hasWildcards)
|
||||||
|
assert.Equal(t, tt.expectedConfig.timeout, result.timeout)
|
||||||
|
|
||||||
// Check context
|
// Check context
|
||||||
if tt.sysSensors != "" {
|
if tt.sysSensors != "" {
|
||||||
@@ -332,12 +367,14 @@ func TestNewSensorConfig(t *testing.T) {
|
|||||||
t.Setenv("BESZEL_AGENT_PRIMARY_SENSOR", "test_primary")
|
t.Setenv("BESZEL_AGENT_PRIMARY_SENSOR", "test_primary")
|
||||||
t.Setenv("BESZEL_AGENT_SYS_SENSORS", "/test/path")
|
t.Setenv("BESZEL_AGENT_SYS_SENSORS", "/test/path")
|
||||||
t.Setenv("BESZEL_AGENT_SENSORS", "test_sensor1,test_*,test_sensor3")
|
t.Setenv("BESZEL_AGENT_SENSORS", "test_sensor1,test_*,test_sensor3")
|
||||||
|
t.Setenv("BESZEL_AGENT_SENSORS_TIMEOUT", "7s")
|
||||||
|
|
||||||
agent := &Agent{}
|
agent := &Agent{}
|
||||||
result := agent.newSensorConfig()
|
result := agent.newSensorConfig()
|
||||||
|
|
||||||
// Verify results
|
// Verify results
|
||||||
assert.Equal(t, "test_primary", result.primarySensor)
|
assert.Equal(t, "test_primary", result.primarySensor)
|
||||||
|
assert.Equal(t, 7*time.Second, result.timeout)
|
||||||
assert.NotNil(t, result.sensors)
|
assert.NotNil(t, result.sensors)
|
||||||
assert.Equal(t, 3, len(result.sensors))
|
assert.Equal(t, 3, len(result.sensors))
|
||||||
assert.True(t, result.hasWildcards)
|
assert.True(t, result.hasWildcards)
|
||||||
@@ -526,3 +563,59 @@ func TestGetTempsWithPanicRecovery(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGetTempsWithTimeout(t *testing.T) {
|
||||||
|
agent := &Agent{
|
||||||
|
sensorConfig: &SensorConfig{
|
||||||
|
context: context.Background(),
|
||||||
|
timeout: 10 * time.Millisecond,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("returns temperatures before timeout", func(t *testing.T) {
|
||||||
|
temps, err := agent.getTempsWithTimeout(func(ctx context.Context) ([]sensors.TemperatureStat, error) {
|
||||||
|
return []sensors.TemperatureStat{{SensorKey: "cpu_temp", Temperature: 42}}, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, temps, 1)
|
||||||
|
assert.Equal(t, "cpu_temp", temps[0].SensorKey)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("returns timeout error when collector hangs", func(t *testing.T) {
|
||||||
|
temps, err := agent.getTempsWithTimeout(func(ctx context.Context) ([]sensors.TemperatureStat, error) {
|
||||||
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
return []sensors.TemperatureStat{{SensorKey: "cpu_temp", Temperature: 42}}, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
assert.Nil(t, temps)
|
||||||
|
assert.ErrorIs(t, err, errTemperatureFetchTimeout)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUpdateTemperaturesSkipsOnTimeout(t *testing.T) {
|
||||||
|
agent := &Agent{
|
||||||
|
systemInfo: system.Info{DashboardTemp: 99},
|
||||||
|
sensorConfig: &SensorConfig{
|
||||||
|
context: context.Background(),
|
||||||
|
timeout: 10 * time.Millisecond,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Cleanup(func() {
|
||||||
|
getSensorTemps = sensors.TemperaturesWithContext
|
||||||
|
})
|
||||||
|
getSensorTemps = func(ctx context.Context) ([]sensors.TemperatureStat, error) {
|
||||||
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
stats := &system.Stats{
|
||||||
|
Temperatures: map[string]float64{"stale": 50},
|
||||||
|
}
|
||||||
|
|
||||||
|
agent.updateTemperatures(stats)
|
||||||
|
|
||||||
|
assert.Equal(t, 0.0, agent.systemInfo.DashboardTemp)
|
||||||
|
assert.Equal(t, map[string]float64{}, stats.Temperatures)
|
||||||
|
}
|
||||||
|
|||||||
@@ -193,7 +193,7 @@ func (a *Agent) handleSSHRequest(w io.Writer, req *common.HubRequest[cbor.RawMes
|
|||||||
|
|
||||||
// handleLegacyStats serves the legacy one-shot stats payload for older hubs
|
// handleLegacyStats serves the legacy one-shot stats payload for older hubs
|
||||||
func (a *Agent) handleLegacyStats(w io.Writer, hubVersion semver.Version) error {
|
func (a *Agent) handleLegacyStats(w io.Writer, hubVersion semver.Version) error {
|
||||||
stats := a.gatherStats(common.DataRequestOptions{CacheTimeMs: 60_000})
|
stats := a.gatherStats(common.DataRequestOptions{CacheTimeMs: defaultDataCacheTimeMs})
|
||||||
return a.writeToSession(w, stats, hubVersion)
|
return a.writeToSession(w, stats, hubVersion)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -31,6 +31,9 @@ type SmartManager struct {
|
|||||||
lastScanTime time.Time
|
lastScanTime time.Time
|
||||||
smartctlPath string
|
smartctlPath string
|
||||||
excludedDevices map[string]struct{}
|
excludedDevices map[string]struct{}
|
||||||
|
darwinNvmeOnce sync.Once
|
||||||
|
darwinNvmeCapacity map[string]uint64 // serial → bytes cache, written once via darwinNvmeOnce
|
||||||
|
darwinNvmeProvider func() ([]byte, error) // overridable for testing
|
||||||
}
|
}
|
||||||
|
|
||||||
type scanOutput struct {
|
type scanOutput struct {
|
||||||
@@ -1033,6 +1036,52 @@ func parseScsiGigabytesProcessed(value string) int64 {
|
|||||||
return parsed
|
return parsed
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// lookupDarwinNvmeCapacity returns the capacity in bytes for a given NVMe serial number on Darwin.
|
||||||
|
// It uses system_profiler SPNVMeDataType to get capacity since Apple SSDs don't report user_capacity
|
||||||
|
// via smartctl. Results are cached after the first call via sync.Once.
|
||||||
|
func (sm *SmartManager) lookupDarwinNvmeCapacity(serial string) uint64 {
|
||||||
|
sm.darwinNvmeOnce.Do(func() {
|
||||||
|
sm.darwinNvmeCapacity = make(map[string]uint64)
|
||||||
|
|
||||||
|
provider := sm.darwinNvmeProvider
|
||||||
|
if provider == nil {
|
||||||
|
provider = func() ([]byte, error) {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
return exec.CommandContext(ctx, "system_profiler", "SPNVMeDataType", "-json").Output()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
out, err := provider()
|
||||||
|
if err != nil {
|
||||||
|
slog.Debug("system_profiler NVMe lookup failed", "err", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var result struct {
|
||||||
|
SPNVMeDataType []struct {
|
||||||
|
Items []struct {
|
||||||
|
DeviceSerial string `json:"device_serial"`
|
||||||
|
SizeInBytes uint64 `json:"size_in_bytes"`
|
||||||
|
} `json:"_items"`
|
||||||
|
} `json:"SPNVMeDataType"`
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(out, &result); err != nil {
|
||||||
|
slog.Debug("system_profiler NVMe parse failed", "err", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, controller := range result.SPNVMeDataType {
|
||||||
|
for _, item := range controller.Items {
|
||||||
|
if item.DeviceSerial != "" && item.SizeInBytes > 0 {
|
||||||
|
sm.darwinNvmeCapacity[item.DeviceSerial] = item.SizeInBytes
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
return sm.darwinNvmeCapacity[serial]
|
||||||
|
}
|
||||||
|
|
||||||
// parseSmartForNvme parses the output of smartctl --all -j /dev/nvmeX and updates the SmartDataMap
|
// parseSmartForNvme parses the output of smartctl --all -j /dev/nvmeX and updates the SmartDataMap
|
||||||
// Returns hasValidData and exitStatus
|
// Returns hasValidData and exitStatus
|
||||||
func (sm *SmartManager) parseSmartForNvme(output []byte) (bool, int) {
|
func (sm *SmartManager) parseSmartForNvme(output []byte) (bool, int) {
|
||||||
@@ -1069,6 +1118,12 @@ func (sm *SmartManager) parseSmartForNvme(output []byte) (bool, int) {
|
|||||||
smartData.SerialNumber = data.SerialNumber
|
smartData.SerialNumber = data.SerialNumber
|
||||||
smartData.FirmwareVersion = data.FirmwareVersion
|
smartData.FirmwareVersion = data.FirmwareVersion
|
||||||
smartData.Capacity = data.UserCapacity.Bytes
|
smartData.Capacity = data.UserCapacity.Bytes
|
||||||
|
if smartData.Capacity == 0 {
|
||||||
|
smartData.Capacity = data.NVMeTotalCapacity
|
||||||
|
}
|
||||||
|
if smartData.Capacity == 0 && (runtime.GOOS == "darwin" || sm.darwinNvmeProvider != nil) {
|
||||||
|
smartData.Capacity = sm.lookupDarwinNvmeCapacity(data.SerialNumber)
|
||||||
|
}
|
||||||
smartData.Temperature = data.NVMeSmartHealthInformationLog.Temperature
|
smartData.Temperature = data.NVMeSmartHealthInformationLog.Temperature
|
||||||
smartData.SmartStatus = getSmartStatus(smartData.Temperature, data.SmartStatus.Passed)
|
smartData.SmartStatus = getSmartStatus(smartData.Temperature, data.SmartStatus.Passed)
|
||||||
smartData.DiskName = data.Device.Name
|
smartData.DiskName = data.Device.Name
|
||||||
@@ -1104,32 +1159,21 @@ func (sm *SmartManager) parseSmartForNvme(output []byte) (bool, int) {
|
|||||||
|
|
||||||
// detectSmartctl checks if smartctl is installed, returns an error if not
|
// detectSmartctl checks if smartctl is installed, returns an error if not
|
||||||
func (sm *SmartManager) detectSmartctl() (string, error) {
|
func (sm *SmartManager) detectSmartctl() (string, error) {
|
||||||
isWindows := runtime.GOOS == "windows"
|
if runtime.GOOS == "windows" {
|
||||||
|
|
||||||
// Load embedded smartctl.exe for Windows amd64 builds.
|
// Load embedded smartctl.exe for Windows amd64 builds.
|
||||||
if isWindows && runtime.GOARCH == "amd64" {
|
if runtime.GOARCH == "amd64" {
|
||||||
if path, err := ensureEmbeddedSmartctl(); err == nil {
|
if path, err := ensureEmbeddedSmartctl(); err == nil {
|
||||||
return path, nil
|
return path, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Try to find smartctl in the default installation location
|
||||||
if path, err := exec.LookPath("smartctl"); err == nil {
|
const location = "C:\\Program Files\\smartmontools\\bin\\smartctl.exe"
|
||||||
return path, nil
|
|
||||||
}
|
|
||||||
locations := []string{}
|
|
||||||
if isWindows {
|
|
||||||
locations = append(locations,
|
|
||||||
"C:\\Program Files\\smartmontools\\bin\\smartctl.exe",
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
locations = append(locations, "/opt/homebrew/bin/smartctl")
|
|
||||||
}
|
|
||||||
for _, location := range locations {
|
|
||||||
if _, err := os.Stat(location); err == nil {
|
if _, err := os.Stat(location); err == nil {
|
||||||
return location, nil
|
return location, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return "", errors.New("smartctl not found")
|
|
||||||
|
return utils.LookPathHomebrew("smartctl")
|
||||||
}
|
}
|
||||||
|
|
||||||
// isNvmeControllerPath checks if the path matches an NVMe controller pattern
|
// isNvmeControllerPath checks if the path matches an NVMe controller pattern
|
||||||
|
|||||||
@@ -1199,3 +1199,81 @@ func TestIsNvmeControllerPath(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestParseSmartForNvmeAppleSSD(t *testing.T) {
|
||||||
|
// Apple SSDs don't report user_capacity via smartctl; capacity should be fetched
|
||||||
|
// from system_profiler via the darwinNvmeProvider fallback.
|
||||||
|
fixturePath := filepath.Join("test-data", "smart", "apple_nvme.json")
|
||||||
|
data, err := os.ReadFile(fixturePath)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
providerCalls := 0
|
||||||
|
fakeProvider := func() ([]byte, error) {
|
||||||
|
providerCalls++
|
||||||
|
return []byte(`{
|
||||||
|
"SPNVMeDataType": [{
|
||||||
|
"_items": [{
|
||||||
|
"device_serial": "0ba0147940253c15",
|
||||||
|
"size_in_bytes": 251000193024
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
}`), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
sm := &SmartManager{
|
||||||
|
SmartDataMap: make(map[string]*smart.SmartData),
|
||||||
|
darwinNvmeProvider: fakeProvider,
|
||||||
|
}
|
||||||
|
|
||||||
|
hasData, _ := sm.parseSmartForNvme(data)
|
||||||
|
require.True(t, hasData)
|
||||||
|
|
||||||
|
deviceData, ok := sm.SmartDataMap["0ba0147940253c15"]
|
||||||
|
require.True(t, ok)
|
||||||
|
assert.Equal(t, "APPLE SSD AP0256Q", deviceData.ModelName)
|
||||||
|
assert.Equal(t, uint64(251000193024), deviceData.Capacity)
|
||||||
|
assert.Equal(t, uint8(42), deviceData.Temperature)
|
||||||
|
assert.Equal(t, "PASSED", deviceData.SmartStatus)
|
||||||
|
assert.Equal(t, 1, providerCalls, "system_profiler should be called once")
|
||||||
|
|
||||||
|
// Second parse: provider should NOT be called again (cache hit)
|
||||||
|
_, _ = sm.parseSmartForNvme(data)
|
||||||
|
assert.Equal(t, 1, providerCalls, "system_profiler should not be called again after caching")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLookupDarwinNvmeCapacityMultipleDisks(t *testing.T) {
|
||||||
|
fakeProvider := func() ([]byte, error) {
|
||||||
|
return []byte(`{
|
||||||
|
"SPNVMeDataType": [
|
||||||
|
{
|
||||||
|
"_items": [
|
||||||
|
{"device_serial": "serial-disk0", "size_in_bytes": 251000193024},
|
||||||
|
{"device_serial": "serial-disk1", "size_in_bytes": 1000204886016}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"_items": [
|
||||||
|
{"device_serial": "serial-disk2", "size_in_bytes": 512110190592}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}`), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
sm := &SmartManager{darwinNvmeProvider: fakeProvider}
|
||||||
|
assert.Equal(t, uint64(251000193024), sm.lookupDarwinNvmeCapacity("serial-disk0"))
|
||||||
|
assert.Equal(t, uint64(1000204886016), sm.lookupDarwinNvmeCapacity("serial-disk1"))
|
||||||
|
assert.Equal(t, uint64(512110190592), sm.lookupDarwinNvmeCapacity("serial-disk2"))
|
||||||
|
assert.Equal(t, uint64(0), sm.lookupDarwinNvmeCapacity("unknown-serial"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLookupDarwinNvmeCapacityProviderError(t *testing.T) {
|
||||||
|
fakeProvider := func() ([]byte, error) {
|
||||||
|
return nil, errors.New("system_profiler not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
sm := &SmartManager{darwinNvmeProvider: fakeProvider}
|
||||||
|
assert.Equal(t, uint64(0), sm.lookupDarwinNvmeCapacity("any-serial"))
|
||||||
|
// Cache should be initialized even on error so we don't retry (Once already fired)
|
||||||
|
assert.NotNil(t, sm.darwinNvmeCapacity)
|
||||||
|
}
|
||||||
|
|||||||
@@ -8,7 +8,6 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/henrygd/beszel"
|
"github.com/henrygd/beszel"
|
||||||
"github.com/henrygd/beszel/agent/battery"
|
"github.com/henrygd/beszel/agent/battery"
|
||||||
@@ -23,13 +22,6 @@ import (
|
|||||||
"github.com/shirou/gopsutil/v4/mem"
|
"github.com/shirou/gopsutil/v4/mem"
|
||||||
)
|
)
|
||||||
|
|
||||||
// prevDisk stores previous per-device disk counters for a given cache interval
|
|
||||||
type prevDisk struct {
|
|
||||||
readBytes uint64
|
|
||||||
writeBytes uint64
|
|
||||||
at time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sets initial / non-changing values about the host system
|
// Sets initial / non-changing values about the host system
|
||||||
func (a *Agent) refreshSystemDetails() {
|
func (a *Agent) refreshSystemDetails() {
|
||||||
a.systemInfo.AgentVersion = beszel.Version
|
a.systemInfo.AgentVersion = beszel.Version
|
||||||
@@ -115,6 +107,26 @@ func (a *Agent) refreshSystemDetails() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// attachSystemDetails returns details only for fresh default-interval responses.
|
||||||
|
func (a *Agent) attachSystemDetails(data *system.CombinedData, cacheTimeMs uint16, includeRequested bool) *system.CombinedData {
|
||||||
|
if cacheTimeMs != defaultDataCacheTimeMs || (!includeRequested && !a.detailsDirty) {
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
|
||||||
|
// copy data to avoid adding details to the original cached struct
|
||||||
|
response := *data
|
||||||
|
response.Details = &a.systemDetails
|
||||||
|
a.detailsDirty = false
|
||||||
|
return &response
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateSystemDetails applies a mutation to the static details payload and marks
|
||||||
|
// it for inclusion on the next fresh default-interval response.
|
||||||
|
func (a *Agent) updateSystemDetails(updateFunc func(details *system.Details)) {
|
||||||
|
updateFunc(&a.systemDetails)
|
||||||
|
a.detailsDirty = true
|
||||||
|
}
|
||||||
|
|
||||||
// Returns current info, stats about the host system
|
// Returns current info, stats about the host system
|
||||||
func (a *Agent) getSystemStats(cacheTimeMs uint16) system.Stats {
|
func (a *Agent) getSystemStats(cacheTimeMs uint16) system.Stats {
|
||||||
var systemStats system.Stats
|
var systemStats system.Stats
|
||||||
|
|||||||
61
agent/system_test.go
Normal file
61
agent/system_test.go
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
package agent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/common"
|
||||||
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGatherStatsDoesNotAttachDetailsToCachedRequests(t *testing.T) {
|
||||||
|
agent := &Agent{
|
||||||
|
cache: NewSystemDataCache(),
|
||||||
|
systemDetails: system.Details{Hostname: "updated-host", Podman: true},
|
||||||
|
detailsDirty: true,
|
||||||
|
}
|
||||||
|
cached := &system.CombinedData{
|
||||||
|
Info: system.Info{Hostname: "cached-host"},
|
||||||
|
}
|
||||||
|
agent.cache.Set(cached, defaultDataCacheTimeMs)
|
||||||
|
|
||||||
|
response := agent.gatherStats(common.DataRequestOptions{CacheTimeMs: defaultDataCacheTimeMs})
|
||||||
|
|
||||||
|
assert.Same(t, cached, response)
|
||||||
|
assert.Nil(t, response.Details)
|
||||||
|
assert.True(t, agent.detailsDirty)
|
||||||
|
assert.Equal(t, "cached-host", response.Info.Hostname)
|
||||||
|
assert.Nil(t, cached.Details)
|
||||||
|
|
||||||
|
secondResponse := agent.gatherStats(common.DataRequestOptions{CacheTimeMs: defaultDataCacheTimeMs})
|
||||||
|
assert.Same(t, cached, secondResponse)
|
||||||
|
assert.Nil(t, secondResponse.Details)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUpdateSystemDetailsMarksDetailsDirty(t *testing.T) {
|
||||||
|
agent := &Agent{}
|
||||||
|
|
||||||
|
agent.updateSystemDetails(func(details *system.Details) {
|
||||||
|
details.Hostname = "updated-host"
|
||||||
|
details.Podman = true
|
||||||
|
})
|
||||||
|
|
||||||
|
assert.True(t, agent.detailsDirty)
|
||||||
|
assert.Equal(t, "updated-host", agent.systemDetails.Hostname)
|
||||||
|
assert.True(t, agent.systemDetails.Podman)
|
||||||
|
|
||||||
|
original := &system.CombinedData{}
|
||||||
|
realTimeResponse := agent.attachSystemDetails(original, 1000, true)
|
||||||
|
assert.Same(t, original, realTimeResponse)
|
||||||
|
assert.Nil(t, realTimeResponse.Details)
|
||||||
|
assert.True(t, agent.detailsDirty)
|
||||||
|
|
||||||
|
response := agent.attachSystemDetails(original, defaultDataCacheTimeMs, false)
|
||||||
|
require.NotNil(t, response.Details)
|
||||||
|
assert.NotSame(t, original, response)
|
||||||
|
assert.Equal(t, "updated-host", response.Details.Hostname)
|
||||||
|
assert.True(t, response.Details.Podman)
|
||||||
|
assert.False(t, agent.detailsDirty)
|
||||||
|
assert.Nil(t, original.Details)
|
||||||
|
}
|
||||||
51
agent/test-data/smart/apple_nvme.json
Normal file
51
agent/test-data/smart/apple_nvme.json
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
{
|
||||||
|
"json_format_version": [1, 0],
|
||||||
|
"smartctl": {
|
||||||
|
"version": [7, 4],
|
||||||
|
"argv": ["smartctl", "-aix", "-j", "IOService:/AppleARMPE/arm-io@10F00000/AppleT810xIO/ans@77400000/AppleASCWrapV4/iop-ans-nub/RTBuddy(ANS2)/RTBuddyService/AppleANS3NVMeController/NS_01@1"],
|
||||||
|
"exit_status": 4
|
||||||
|
},
|
||||||
|
"device": {
|
||||||
|
"name": "IOService:/AppleARMPE/arm-io@10F00000/AppleT810xIO/ans@77400000/AppleASCWrapV4/iop-ans-nub/RTBuddy(ANS2)/RTBuddyService/AppleANS3NVMeController/NS_01@1",
|
||||||
|
"info_name": "IOService:/AppleARMPE/arm-io@10F00000/AppleT810xIO/ans@77400000/AppleASCWrapV4/iop-ans-nub/RTBuddy(ANS2)/RTBuddyService/AppleANS3NVMeController/NS_01@1",
|
||||||
|
"type": "nvme",
|
||||||
|
"protocol": "NVMe"
|
||||||
|
},
|
||||||
|
"model_name": "APPLE SSD AP0256Q",
|
||||||
|
"serial_number": "0ba0147940253c15",
|
||||||
|
"firmware_version": "555",
|
||||||
|
"smart_support": {
|
||||||
|
"available": true,
|
||||||
|
"enabled": true
|
||||||
|
},
|
||||||
|
"smart_status": {
|
||||||
|
"passed": true,
|
||||||
|
"nvme": {
|
||||||
|
"value": 0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nvme_smart_health_information_log": {
|
||||||
|
"critical_warning": 0,
|
||||||
|
"temperature": 42,
|
||||||
|
"available_spare": 100,
|
||||||
|
"available_spare_threshold": 99,
|
||||||
|
"percentage_used": 1,
|
||||||
|
"data_units_read": 270189386,
|
||||||
|
"data_units_written": 166753862,
|
||||||
|
"host_reads": 7543766995,
|
||||||
|
"host_writes": 3761621926,
|
||||||
|
"controller_busy_time": 0,
|
||||||
|
"power_cycles": 366,
|
||||||
|
"power_on_hours": 2850,
|
||||||
|
"unsafe_shutdowns": 195,
|
||||||
|
"media_errors": 0,
|
||||||
|
"num_err_log_entries": 0
|
||||||
|
},
|
||||||
|
"temperature": {
|
||||||
|
"current": 42
|
||||||
|
},
|
||||||
|
"power_cycle_count": 366,
|
||||||
|
"power_on_time": {
|
||||||
|
"hours": 2850
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,9 +1,14 @@
|
|||||||
|
// Package utils provides utility functions for the agent.
|
||||||
package utils
|
package utils
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
"os"
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
@@ -65,6 +70,9 @@ func ReadStringFileLimited(path string, maxSize int) (string, error) {
|
|||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
if n < 0 {
|
||||||
|
return "", fmt.Errorf("%s returned negative bytes: %d", path, n)
|
||||||
|
}
|
||||||
return strings.TrimSpace(string(buf[:n])), nil
|
return strings.TrimSpace(string(buf[:n])), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -86,3 +94,24 @@ func ReadUintFile(path string) (uint64, bool) {
|
|||||||
}
|
}
|
||||||
return parsed, true
|
return parsed, true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LookPathHomebrew is like exec.LookPath but also checks Homebrew paths.
|
||||||
|
func LookPathHomebrew(file string) (string, error) {
|
||||||
|
foundPath, lookPathErr := exec.LookPath(file)
|
||||||
|
if lookPathErr == nil {
|
||||||
|
return foundPath, nil
|
||||||
|
}
|
||||||
|
var homebrewPath string
|
||||||
|
switch runtime.GOOS {
|
||||||
|
case "darwin":
|
||||||
|
homebrewPath = filepath.Join("/opt", "homebrew", "bin", file)
|
||||||
|
case "linux":
|
||||||
|
homebrewPath = filepath.Join("/home", "linuxbrew", ".linuxbrew", "bin", file)
|
||||||
|
}
|
||||||
|
if homebrewPath != "" {
|
||||||
|
if _, err := os.Stat(homebrewPath); err == nil {
|
||||||
|
return homebrewPath, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", lookPathErr
|
||||||
|
}
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ import "github.com/blang/semver"
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
// Version is the current version of the application.
|
// Version is the current version of the application.
|
||||||
Version = "0.18.4"
|
Version = "0.18.7"
|
||||||
// AppName is the name of the application.
|
// AppName is the name of the application.
|
||||||
AppName = "beszel"
|
AppName = "beszel"
|
||||||
)
|
)
|
||||||
|
|||||||
44
go.mod
44
go.mod
@@ -5,24 +5,24 @@ go 1.26.1
|
|||||||
require (
|
require (
|
||||||
github.com/blang/semver v3.5.1+incompatible
|
github.com/blang/semver v3.5.1+incompatible
|
||||||
github.com/coreos/go-systemd/v22 v22.7.0
|
github.com/coreos/go-systemd/v22 v22.7.0
|
||||||
github.com/distatus/battery v0.11.0
|
github.com/ebitengine/purego v0.10.0
|
||||||
github.com/ebitengine/purego v0.9.1
|
|
||||||
github.com/fxamacker/cbor/v2 v2.9.0
|
github.com/fxamacker/cbor/v2 v2.9.0
|
||||||
github.com/gliderlabs/ssh v0.3.8
|
github.com/gliderlabs/ssh v0.3.8
|
||||||
github.com/google/uuid v1.6.0
|
github.com/google/uuid v1.6.0
|
||||||
github.com/lxzan/gws v1.8.9
|
github.com/lxzan/gws v1.9.1
|
||||||
github.com/nicholas-fedor/shoutrrr v0.13.2
|
github.com/nicholas-fedor/shoutrrr v0.14.3
|
||||||
github.com/pocketbase/dbx v1.12.0
|
github.com/pocketbase/dbx v1.12.0
|
||||||
github.com/pocketbase/pocketbase v0.36.4
|
github.com/pocketbase/pocketbase v0.36.8
|
||||||
github.com/shirou/gopsutil/v4 v4.26.1
|
github.com/shirou/gopsutil/v4 v4.26.3
|
||||||
github.com/spf13/cast v1.10.0
|
github.com/spf13/cast v1.10.0
|
||||||
github.com/spf13/cobra v1.10.2
|
github.com/spf13/cobra v1.10.2
|
||||||
github.com/spf13/pflag v1.0.10
|
github.com/spf13/pflag v1.0.10
|
||||||
github.com/stretchr/testify v1.11.1
|
github.com/stretchr/testify v1.11.1
|
||||||
golang.org/x/crypto v0.48.0
|
golang.org/x/crypto v0.49.0
|
||||||
golang.org/x/exp v0.0.0-20260218203240-3dfff04db8fa
|
golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90
|
||||||
golang.org/x/sys v0.41.0
|
golang.org/x/sys v0.42.0
|
||||||
gopkg.in/yaml.v3 v3.0.1
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
|
howett.net/plist v1.0.1
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
@@ -30,10 +30,10 @@ require (
|
|||||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||||
github.com/disintegration/imaging v1.6.2 // indirect
|
github.com/disintegration/imaging v1.6.2 // indirect
|
||||||
github.com/dolthub/maphash v0.1.0 // indirect
|
|
||||||
github.com/domodwyer/mailyak/v3 v3.6.2 // indirect
|
github.com/domodwyer/mailyak/v3 v3.6.2 // indirect
|
||||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||||
github.com/fatih/color v1.18.0 // indirect
|
github.com/eclipse/paho.golang v0.23.0 // indirect
|
||||||
|
github.com/fatih/color v1.19.0 // indirect
|
||||||
github.com/gabriel-vasile/mimetype v1.4.13 // indirect
|
github.com/gabriel-vasile/mimetype v1.4.13 // indirect
|
||||||
github.com/ganigeorgiev/fexpr v0.5.0 // indirect
|
github.com/ganigeorgiev/fexpr v0.5.0 // indirect
|
||||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||||
@@ -41,9 +41,10 @@ require (
|
|||||||
github.com/go-sql-driver/mysql v1.9.1 // indirect
|
github.com/go-sql-driver/mysql v1.9.1 // indirect
|
||||||
github.com/godbus/dbus/v5 v5.2.2 // indirect
|
github.com/godbus/dbus/v5 v5.2.2 // indirect
|
||||||
github.com/golang-jwt/jwt/v5 v5.3.1 // indirect
|
github.com/golang-jwt/jwt/v5 v5.3.1 // indirect
|
||||||
|
github.com/gorilla/websocket v1.5.3 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||||
github.com/klauspost/compress v1.18.4 // indirect
|
github.com/klauspost/compress v1.18.5 // indirect
|
||||||
github.com/lufia/plan9stats v0.0.0-20260216142805-b3301c5f2a88 // indirect
|
github.com/lufia/plan9stats v0.0.0-20260324052639-156f7da3f749 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||||
github.com/ncruces/go-strftime v1.0.0 // indirect
|
github.com/ncruces/go-strftime v1.0.0 // indirect
|
||||||
@@ -54,15 +55,14 @@ require (
|
|||||||
github.com/tklauser/numcpus v0.11.0 // indirect
|
github.com/tklauser/numcpus v0.11.0 // indirect
|
||||||
github.com/x448/float16 v0.8.4 // indirect
|
github.com/x448/float16 v0.8.4 // indirect
|
||||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||||
golang.org/x/image v0.36.0 // indirect
|
golang.org/x/image v0.38.0 // indirect
|
||||||
golang.org/x/net v0.50.0 // indirect
|
golang.org/x/net v0.52.0 // indirect
|
||||||
golang.org/x/oauth2 v0.35.0 // indirect
|
golang.org/x/oauth2 v0.36.0 // indirect
|
||||||
golang.org/x/sync v0.19.0 // indirect
|
golang.org/x/sync v0.20.0 // indirect
|
||||||
golang.org/x/term v0.40.0 // indirect
|
golang.org/x/term v0.41.0 // indirect
|
||||||
golang.org/x/text v0.34.0 // indirect
|
golang.org/x/text v0.35.0 // indirect
|
||||||
howett.net/plist v1.0.1 // indirect
|
modernc.org/libc v1.70.0 // indirect
|
||||||
modernc.org/libc v1.67.6 // indirect
|
|
||||||
modernc.org/mathutil v1.7.1 // indirect
|
modernc.org/mathutil v1.7.1 // indirect
|
||||||
modernc.org/memory v1.11.0 // indirect
|
modernc.org/memory v1.11.0 // indirect
|
||||||
modernc.org/sqlite v1.45.0 // indirect
|
modernc.org/sqlite v1.48.0 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
114
go.sum
114
go.sum
@@ -17,18 +17,16 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
|
|||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c=
|
github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c=
|
||||||
github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4=
|
github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4=
|
||||||
github.com/distatus/battery v0.11.0 h1:KJk89gz90Iq/wJtbjjM9yUzBXV+ASV/EG2WOOL7N8lc=
|
|
||||||
github.com/distatus/battery v0.11.0/go.mod h1:KmVkE8A8hpIX4T78QRdMktYpEp35QfOL8A8dwZBxq2k=
|
|
||||||
github.com/dolthub/maphash v0.1.0 h1:bsQ7JsF4FkkWyrP3oCnFJgrCUAFbFf3kOl4L/QxPDyQ=
|
|
||||||
github.com/dolthub/maphash v0.1.0/go.mod h1:gkg4Ch4CdCDu5h6PMriVLawB7koZ+5ijb9puGMV50a4=
|
|
||||||
github.com/domodwyer/mailyak/v3 v3.6.2 h1:x3tGMsyFhTCaxp6ycgR0FE/bu5QiNp+hetUuCOBXMn8=
|
github.com/domodwyer/mailyak/v3 v3.6.2 h1:x3tGMsyFhTCaxp6ycgR0FE/bu5QiNp+hetUuCOBXMn8=
|
||||||
github.com/domodwyer/mailyak/v3 v3.6.2/go.mod h1:lOm/u9CyCVWHeaAmHIdF4RiKVxKUT/H5XX10lIKAL6c=
|
github.com/domodwyer/mailyak/v3 v3.6.2/go.mod h1:lOm/u9CyCVWHeaAmHIdF4RiKVxKUT/H5XX10lIKAL6c=
|
||||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||||
github.com/ebitengine/purego v0.9.1 h1:a/k2f2HQU3Pi399RPW1MOaZyhKJL9w/xFpKAg4q1s0A=
|
github.com/ebitengine/purego v0.10.0 h1:QIw4xfpWT6GWTzaW5XEKy3HXoqrJGx1ijYHzTF0/ISU=
|
||||||
github.com/ebitengine/purego v0.9.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
github.com/ebitengine/purego v0.10.0/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||||
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
|
github.com/eclipse/paho.golang v0.23.0 h1:KHgl2wz6EJo7cMBmkuhpt7C576vP+kpPv7jjvSyR6Mk=
|
||||||
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
|
github.com/eclipse/paho.golang v0.23.0/go.mod h1:nQRhTkoZv8EAiNs5UU0/WdQIx2NrnWUpL9nsGJTQN04=
|
||||||
|
github.com/fatih/color v1.19.0 h1:Zp3PiM21/9Ld6FzSKyL5c/BULoe/ONr9KlbYVOfG8+w=
|
||||||
|
github.com/fatih/color v1.19.0/go.mod h1:zNk67I0ZUT1bEGsSGyCZYZNrHuTkJJB+r6Q9VuMi0LE=
|
||||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||||
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
|
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
|
||||||
@@ -58,10 +56,12 @@ github.com/golang-jwt/jwt/v5 v5.3.1/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArs
|
|||||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||||
github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83 h1:z2ogiKUYzX5Is6zr/vP9vJGqPwcdqsWjOt+V8J7+bTc=
|
github.com/google/pprof v0.0.0-20260302011040-a15ffb7f9dcc h1:VBbFa1lDYWEeV5FZKUiYKYT0VxCp9twUmmaq9eb8sXw=
|
||||||
github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83/go.mod h1:MxpfABSjhmINe3F1It9d+8exIHFvUqtLIRCdOGNXqiI=
|
github.com/google/pprof v0.0.0-20260302011040-a15ffb7f9dcc/go.mod h1:MxpfABSjhmINe3F1It9d+8exIHFvUqtLIRCdOGNXqiI=
|
||||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||||
|
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||||
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||||
@@ -69,24 +69,24 @@ github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLf
|
|||||||
github.com/jarcoal/httpmock v1.4.1 h1:0Ju+VCFuARfFlhVXFc2HxlcQkfB+Xq12/EotHko+x2A=
|
github.com/jarcoal/httpmock v1.4.1 h1:0Ju+VCFuARfFlhVXFc2HxlcQkfB+Xq12/EotHko+x2A=
|
||||||
github.com/jarcoal/httpmock v1.4.1/go.mod h1:ftW1xULwo+j0R0JJkJIIi7UKigZUXCLLanykgjwBXL0=
|
github.com/jarcoal/httpmock v1.4.1/go.mod h1:ftW1xULwo+j0R0JJkJIIi7UKigZUXCLLanykgjwBXL0=
|
||||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||||
github.com/klauspost/compress v1.18.4 h1:RPhnKRAQ4Fh8zU2FY/6ZFDwTVTxgJ/EMydqSTzE9a2c=
|
github.com/klauspost/compress v1.18.5 h1:/h1gH5Ce+VWNLSWqPzOVn6XBO+vJbCNGvjoaGBFW2IE=
|
||||||
github.com/klauspost/compress v1.18.4/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
|
github.com/klauspost/compress v1.18.5/go.mod h1:cwPg85FWrGar70rWktvGQj8/hthj3wpl0PGDogxkrSQ=
|
||||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
github.com/lufia/plan9stats v0.0.0-20260216142805-b3301c5f2a88 h1:PTw+yKnXcOFCR6+8hHTyWBeQ/P4Nb7dd4/0ohEcWQuM=
|
github.com/lufia/plan9stats v0.0.0-20260324052639-156f7da3f749 h1:Qj3hTcdWH8uMZDI41HNuTuJN525C7NBrbtH5kSO6fPk=
|
||||||
github.com/lufia/plan9stats v0.0.0-20260216142805-b3301c5f2a88/go.mod h1:autxFIvghDt3jPTLoqZ9OZ7s9qTGNAWmYCjVFWPX/zg=
|
github.com/lufia/plan9stats v0.0.0-20260324052639-156f7da3f749/go.mod h1:autxFIvghDt3jPTLoqZ9OZ7s9qTGNAWmYCjVFWPX/zg=
|
||||||
github.com/lxzan/gws v1.8.9 h1:VU3SGUeWlQrEwfUSfokcZep8mdg/BrUF+y73YYshdBM=
|
github.com/lxzan/gws v1.9.1 h1:4lbIp4cW0hOLP3ejFHR/uWRy741AURx7oKkNNi2OT9o=
|
||||||
github.com/lxzan/gws v1.8.9/go.mod h1:d9yHaR1eDTBHagQC6KY7ycUOaz5KWeqQtP3xu7aMK8Y=
|
github.com/lxzan/gws v1.9.1/go.mod h1:gXHSCPmTGryWJ4icuqy8Yho32E4YIMHH0fkDRYJRbdc=
|
||||||
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
||||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
||||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w=
|
github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w=
|
||||||
github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
||||||
github.com/nicholas-fedor/shoutrrr v0.13.2 h1:hfsYBIqSFYGg92pZP5CXk/g7/OJIkLYmiUnRl+AD1IA=
|
github.com/nicholas-fedor/shoutrrr v0.14.3 h1:aBX2iw9a7jl5wfHd3bi9LnS5ucoYIy6KcLH9XVF+gig=
|
||||||
github.com/nicholas-fedor/shoutrrr v0.13.2/go.mod h1:ZqzV3gY/Wj6AvWs1etlO7+yKbh4iptSbeL8avBpMQbA=
|
github.com/nicholas-fedor/shoutrrr v0.14.3/go.mod h1:U7IywBkLpBV7rgn8iLbQ9/LklJG1gm24bFv5cXXsDKs=
|
||||||
github.com/onsi/ginkgo/v2 v2.28.1 h1:S4hj+HbZp40fNKuLUQOYLDgZLwNUVn19N3Atb98NCyI=
|
github.com/onsi/ginkgo/v2 v2.28.1 h1:S4hj+HbZp40fNKuLUQOYLDgZLwNUVn19N3Atb98NCyI=
|
||||||
github.com/onsi/ginkgo/v2 v2.28.1/go.mod h1:CLtbVInNckU3/+gC8LzkGUb9oF+e8W8TdUsxPwvdOgE=
|
github.com/onsi/ginkgo/v2 v2.28.1/go.mod h1:CLtbVInNckU3/+gC8LzkGUb9oF+e8W8TdUsxPwvdOgE=
|
||||||
github.com/onsi/gomega v1.39.1 h1:1IJLAad4zjPn2PsnhH70V4DKRFlrCzGBNrNaru+Vf28=
|
github.com/onsi/gomega v1.39.1 h1:1IJLAad4zjPn2PsnhH70V4DKRFlrCzGBNrNaru+Vf28=
|
||||||
@@ -96,8 +96,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI
|
|||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/pocketbase/dbx v1.12.0 h1:/oLErM+A0b4xI0PWTGPqSDVjzix48PqI/bng2l0PzoA=
|
github.com/pocketbase/dbx v1.12.0 h1:/oLErM+A0b4xI0PWTGPqSDVjzix48PqI/bng2l0PzoA=
|
||||||
github.com/pocketbase/dbx v1.12.0/go.mod h1:xXRCIAKTHMgUCyCKZm55pUOdvFziJjQfXaWKhu2vhMs=
|
github.com/pocketbase/dbx v1.12.0/go.mod h1:xXRCIAKTHMgUCyCKZm55pUOdvFziJjQfXaWKhu2vhMs=
|
||||||
github.com/pocketbase/pocketbase v0.36.4 h1:zTjRZbp2WfTOJJfb+pFRWa200UaQwxZYt8RzkFMlAZ4=
|
github.com/pocketbase/pocketbase v0.36.8 h1:gCNqoesZ44saYOD3J7edhi5nDwUWKyQG7boM/kVwz2c=
|
||||||
github.com/pocketbase/pocketbase v0.36.4/go.mod h1:9CiezhRudd9FZGa5xZa53QZBTNxc5vvw/FGG+diAECI=
|
github.com/pocketbase/pocketbase v0.36.8/go.mod h1:OY4WaXbP0WnF/EXoBbboWJK+ZSZ1A85tiA0sjrTKxTA=
|
||||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
|
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
|
||||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||||
@@ -105,8 +105,8 @@ github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qq
|
|||||||
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
github.com/shirou/gopsutil/v4 v4.26.1 h1:TOkEyriIXk2HX9d4isZJtbjXbEjf5qyKPAzbzY0JWSo=
|
github.com/shirou/gopsutil/v4 v4.26.3 h1:2ESdQt90yU3oXF/CdOlRCJxrP+Am1aBYubTMTfxJ1qc=
|
||||||
github.com/shirou/gopsutil/v4 v4.26.1/go.mod h1:medLI9/UNAb0dOI9Q3/7yWSqKkj00u+1tgY8nvv41pc=
|
github.com/shirou/gopsutil/v4 v4.26.3/go.mod h1:LZ6ewCSkBqUpvSOf+LsTGnRinC6iaNUNMGBtDkJBaLQ=
|
||||||
github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY=
|
github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY=
|
||||||
github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
|
github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
|
||||||
github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
|
github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
|
||||||
@@ -115,6 +115,8 @@ github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
|
|||||||
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
|
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
|
||||||
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/objx v0.5.3 h1:jmXUvGomnU1o3W/V5h2VEradbpJDwGrzugQQvL0POH4=
|
||||||
|
github.com/stretchr/objx v0.5.3/go.mod h1:rDQraq+vQZU7Fde9LOZLr8Tax6zZvy4kuNKF+QYS+U0=
|
||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||||
@@ -126,44 +128,44 @@ github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
|||||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||||
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
|
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
|
||||||
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||||
|
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||||
|
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts=
|
golang.org/x/crypto v0.49.0 h1:+Ng2ULVvLHnJ/ZFEq4KdcDd/cfjrrjjNSXNzxg0Y4U4=
|
||||||
golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos=
|
golang.org/x/crypto v0.49.0/go.mod h1:ErX4dUh2UM+CFYiXZRTcMpEcN8b/1gxEuv3nODoYtCA=
|
||||||
golang.org/x/exp v0.0.0-20260218203240-3dfff04db8fa h1:Zt3DZoOFFYkKhDT3v7Lm9FDMEV06GpzjG2jrqW+QTE0=
|
golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90 h1:jiDhWWeC7jfWqR9c/uplMOqJ0sbNlNWv0UkzE0vX1MA=
|
||||||
golang.org/x/exp v0.0.0-20260218203240-3dfff04db8fa/go.mod h1:K79w1Vqn7PoiZn+TkNpx3BUWUQksGO3JcVX6qIjytmA=
|
golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90/go.mod h1:xE1HEv6b+1SCZ5/uscMRjUBKtIxworgEcEi+/n9NQDQ=
|
||||||
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||||
golang.org/x/image v0.36.0 h1:Iknbfm1afbgtwPTmHnS2gTM/6PPZfH+z2EFuOkSbqwc=
|
golang.org/x/image v0.38.0 h1:5l+q+Y9JDC7mBOMjo4/aPhMDcxEptsX+Tt3GgRQRPuE=
|
||||||
golang.org/x/image v0.36.0/go.mod h1:YsWD2TyyGKiIX1kZlu9QfKIsQ4nAAK9bdgdrIsE7xy4=
|
golang.org/x/image v0.38.0/go.mod h1:/3f6vaXC+6CEanU4KJxbcUZyEePbyKbaLoDOe4ehFYY=
|
||||||
golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8=
|
golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI=
|
||||||
golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w=
|
golang.org/x/mod v0.34.0/go.mod h1:ykgH52iCZe79kzLLMhyCUzhMci+nQj+0XkbXpNYtVjY=
|
||||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||||
golang.org/x/net v0.50.0 h1:ucWh9eiCGyDR3vtzso0WMQinm2Dnt8cFMuQa9K33J60=
|
golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0=
|
||||||
golang.org/x/net v0.50.0/go.mod h1:UgoSli3F/pBgdJBHCTc+tp3gmrU4XswgGRgtnwWTfyM=
|
golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw=
|
||||||
golang.org/x/oauth2 v0.35.0 h1:Mv2mzuHuZuY2+bkyWXIHMfhNdJAdwW3FuWeCPYN5GVQ=
|
golang.org/x/oauth2 v0.36.0 h1:peZ/1z27fi9hUOFCAZaHyrpWG5lwe0RJEEEeH0ThlIs=
|
||||||
golang.org/x/oauth2 v0.35.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
golang.org/x/oauth2 v0.36.0/go.mod h1:YDBUJMTkDnJS+A4BP4eZBjCqtokkg1hODuPjwiGPO7Q=
|
||||||
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4=
|
||||||
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
|
golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo=
|
||||||
golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw=
|
||||||
golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg=
|
golang.org/x/term v0.41.0 h1:QCgPso/Q3RTJx2Th4bDLqML4W6iJiaXFq2/ftQF13YU=
|
||||||
golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM=
|
golang.org/x/term v0.41.0/go.mod h1:3pfBgksrReYfZ5lvYM0kSO0LIkAl4Yl2bXOkKP7Ec2A=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk=
|
golang.org/x/text v0.35.0 h1:JOVx6vVDFokkpaq1AEptVzLTpDe9KGpj5tR4/X+ybL8=
|
||||||
golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA=
|
golang.org/x/text v0.35.0/go.mod h1:khi/HExzZJ2pGnjenulevKNX1W67CUy0AsXcNubPGCA=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.42.0 h1:uNgphsn75Tdz5Ji2q36v/nsFSfR/9BRFvqhGBaJGd5k=
|
golang.org/x/tools v0.43.0 h1:12BdW9CeB3Z+J/I/wj34VMl8X+fEXBxVR90JeMX5E7s=
|
||||||
golang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0=
|
golang.org/x/tools v0.43.0/go.mod h1:uHkMso649BX2cZK6+RpuIPXS3ho2hZo4FVwfoy1vIk0=
|
||||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||||
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
|
|
||||||
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
@@ -175,18 +177,18 @@ howett.net/plist v1.0.1 h1:37GdZ8tP09Q35o9ych3ehygcsL+HqKSwzctveSlarvM=
|
|||||||
howett.net/plist v1.0.1/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=
|
howett.net/plist v1.0.1/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=
|
||||||
modernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis=
|
modernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis=
|
||||||
modernc.org/cc/v4 v4.27.1/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
|
modernc.org/cc/v4 v4.27.1/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
|
||||||
modernc.org/ccgo/v4 v4.30.1 h1:4r4U1J6Fhj98NKfSjnPUN7Ze2c6MnAdL0hWw6+LrJpc=
|
modernc.org/ccgo/v4 v4.32.0 h1:hjG66bI/kqIPX1b2yT6fr/jt+QedtP2fqojG2VrFuVw=
|
||||||
modernc.org/ccgo/v4 v4.30.1/go.mod h1:bIOeI1JL54Utlxn+LwrFyjCx2n2RDiYEaJVSrgdrRfM=
|
modernc.org/ccgo/v4 v4.32.0/go.mod h1:6F08EBCx5uQc38kMGl+0Nm0oWczoo1c7cgpzEry7Uc0=
|
||||||
modernc.org/fileutil v1.3.40 h1:ZGMswMNc9JOCrcrakF1HrvmergNLAmxOPjizirpfqBA=
|
modernc.org/fileutil v1.4.0 h1:j6ZzNTftVS054gi281TyLjHPp6CPHr2KCxEXjEbD6SM=
|
||||||
modernc.org/fileutil v1.3.40/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc=
|
modernc.org/fileutil v1.4.0/go.mod h1:EqdKFDxiByqxLk8ozOxObDSfcVOv/54xDs/DUHdvCUU=
|
||||||
modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI=
|
modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI=
|
||||||
modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
|
modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
|
||||||
modernc.org/gc/v3 v3.1.1 h1:k8T3gkXWY9sEiytKhcgyiZ2L0DTyCQ/nvX+LoCljoRE=
|
modernc.org/gc/v3 v3.1.2 h1:ZtDCnhonXSZexk/AYsegNRV1lJGgaNZJuKjJSWKyEqo=
|
||||||
modernc.org/gc/v3 v3.1.1/go.mod h1:HFK/6AGESC7Ex+EZJhJ2Gni6cTaYpSMmU/cT9RmlfYY=
|
modernc.org/gc/v3 v3.1.2/go.mod h1:HFK/6AGESC7Ex+EZJhJ2Gni6cTaYpSMmU/cT9RmlfYY=
|
||||||
modernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks=
|
modernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks=
|
||||||
modernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI=
|
modernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI=
|
||||||
modernc.org/libc v1.67.6 h1:eVOQvpModVLKOdT+LvBPjdQqfrZq+pC39BygcT+E7OI=
|
modernc.org/libc v1.70.0 h1:U58NawXqXbgpZ/dcdS9kMshu08aiA6b7gusEusqzNkw=
|
||||||
modernc.org/libc v1.67.6/go.mod h1:JAhxUVlolfYDErnwiqaLvUqc8nfb2r6S6slAgZOnaiE=
|
modernc.org/libc v1.70.0/go.mod h1:OVmxFGP1CI/Z4L3E0Q3Mf1PDE0BucwMkcXjjLntvHJo=
|
||||||
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
|
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
|
||||||
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
|
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
|
||||||
modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI=
|
modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI=
|
||||||
@@ -195,8 +197,8 @@ modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
|
|||||||
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
|
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
|
||||||
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
|
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
|
||||||
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
|
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
|
||||||
modernc.org/sqlite v1.45.0 h1:r51cSGzKpbptxnby+EIIz5fop4VuE4qFoVEjNvWoObs=
|
modernc.org/sqlite v1.48.0 h1:ElZyLop3Q2mHYk5IFPPXADejZrlHu7APbpB0sF78bq4=
|
||||||
modernc.org/sqlite v1.45.0/go.mod h1:CzbrU2lSB1DKUusvwGz7rqEKIq+NUd8GWuBBZDs9/nA=
|
modernc.org/sqlite v1.48.0/go.mod h1:hWjRO6Tj/5Ik8ieqxQybiEOUXy0NJFNp2tpvVpKlvig=
|
||||||
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
|
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
|
||||||
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
|
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
|
||||||
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
||||||
|
|||||||
@@ -302,21 +302,6 @@ func (am *AlertManager) SendShoutrrrAlert(notificationUrl, title, message, link,
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (am *AlertManager) SendTestNotification(e *core.RequestEvent) error {
|
|
||||||
var data struct {
|
|
||||||
URL string `json:"url"`
|
|
||||||
}
|
|
||||||
err := e.BindBody(&data)
|
|
||||||
if err != nil || data.URL == "" {
|
|
||||||
return e.BadRequestError("URL is required", err)
|
|
||||||
}
|
|
||||||
err = am.SendShoutrrrAlert(data.URL, "Test Alert", "This is a notification from Beszel.", am.hub.Settings().Meta.AppURL, "View Beszel")
|
|
||||||
if err != nil {
|
|
||||||
return e.JSON(200, map[string]string{"err": err.Error()})
|
|
||||||
}
|
|
||||||
return e.JSON(200, map[string]bool{"err": false})
|
|
||||||
}
|
|
||||||
|
|
||||||
// setAlertTriggered updates the "triggered" status of an alert record in the database
|
// setAlertTriggered updates the "triggered" status of an alert record in the database
|
||||||
func (am *AlertManager) setAlertTriggered(alert CachedAlertData, triggered bool) error {
|
func (am *AlertManager) setAlertTriggered(alert CachedAlertData, triggered bool) error {
|
||||||
alertRecord, err := am.hub.FindRecordById("alerts", alert.Id)
|
alertRecord, err := am.hub.FindRecordById("alerts", alert.Id)
|
||||||
|
|||||||
@@ -3,7 +3,11 @@ package alerts
|
|||||||
import (
|
import (
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"errors"
|
"errors"
|
||||||
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"slices"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/pocketbase/dbx"
|
"github.com/pocketbase/dbx"
|
||||||
"github.com/pocketbase/pocketbase/core"
|
"github.com/pocketbase/pocketbase/core"
|
||||||
@@ -117,3 +121,72 @@ func DeleteUserAlerts(e *core.RequestEvent) error {
|
|||||||
|
|
||||||
return e.JSON(http.StatusOK, map[string]any{"success": true, "count": numDeleted})
|
return e.JSON(http.StatusOK, map[string]any{"success": true, "count": numDeleted})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SendTestNotification handles API request to send a test notification to a specified Shoutrrr URL
|
||||||
|
func (am *AlertManager) SendTestNotification(e *core.RequestEvent) error {
|
||||||
|
var data struct {
|
||||||
|
URL string `json:"url"`
|
||||||
|
}
|
||||||
|
err := e.BindBody(&data)
|
||||||
|
if err != nil || data.URL == "" {
|
||||||
|
return e.BadRequestError("URL is required", err)
|
||||||
|
}
|
||||||
|
// Only allow admins to send test notifications to internal URLs
|
||||||
|
if !e.Auth.IsSuperuser() && e.Auth.GetString("role") != "admin" {
|
||||||
|
internalURL, err := isInternalURL(data.URL)
|
||||||
|
if err != nil {
|
||||||
|
return e.BadRequestError(err.Error(), nil)
|
||||||
|
}
|
||||||
|
if internalURL {
|
||||||
|
return e.ForbiddenError("Only admins can send to internal destinations", nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = am.SendShoutrrrAlert(data.URL, "Test Alert", "This is a notification from Beszel.", am.hub.Settings().Meta.AppURL, "View Beszel")
|
||||||
|
if err != nil {
|
||||||
|
return e.JSON(200, map[string]string{"err": err.Error()})
|
||||||
|
}
|
||||||
|
return e.JSON(200, map[string]bool{"err": false})
|
||||||
|
}
|
||||||
|
|
||||||
|
// isInternalURL checks if the given shoutrrr URL points to an internal destination (localhost or private IP)
|
||||||
|
func isInternalURL(rawURL string) (bool, error) {
|
||||||
|
parsedURL, err := url.Parse(rawURL)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
host := parsedURL.Hostname()
|
||||||
|
if host == "" {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.EqualFold(host, "localhost") {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if ip := net.ParseIP(host); ip != nil {
|
||||||
|
return isInternalIP(ip), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Some Shoutrrr URLs use the host position for service identifiers rather than a
|
||||||
|
// network hostname (for example, discord://token@webhookid). Restrict DNS lookups
|
||||||
|
// to names that look like actual hostnames so valid service URLs keep working.
|
||||||
|
if !strings.Contains(host, ".") {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ips, err := net.LookupIP(host)
|
||||||
|
if err != nil {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if slices.ContainsFunc(ips, isInternalIP) {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isInternalIP(ip net.IP) bool {
|
||||||
|
return ip.IsPrivate() || ip.IsLoopback() || ip.IsUnspecified()
|
||||||
|
}
|
||||||
|
|||||||
501
internal/alerts/alerts_api_test.go
Normal file
501
internal/alerts/alerts_api_test.go
Normal file
@@ -0,0 +1,501 @@
|
|||||||
|
//go:build testing
|
||||||
|
|
||||||
|
package alerts_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/alerts"
|
||||||
|
beszelTests "github.com/henrygd/beszel/internal/tests"
|
||||||
|
pbTests "github.com/pocketbase/pocketbase/tests"
|
||||||
|
|
||||||
|
"github.com/pocketbase/dbx"
|
||||||
|
"github.com/pocketbase/pocketbase/core"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
// marshal to json and return an io.Reader (for use in ApiScenario.Body)
|
||||||
|
func jsonReader(v any) io.Reader {
|
||||||
|
data, err := json.Marshal(v)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return bytes.NewReader(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIsInternalURL(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
url string
|
||||||
|
internal bool
|
||||||
|
}{
|
||||||
|
{name: "loopback ipv4", url: "generic://127.0.0.1", internal: true},
|
||||||
|
{name: "localhost hostname", url: "generic://localhost", internal: true},
|
||||||
|
{name: "localhost hostname", url: "generic+http://localhost/api/v1/postStuff", internal: true},
|
||||||
|
{name: "localhost hostname", url: "generic+http://127.0.0.1:8080/api/v1/postStuff", internal: true},
|
||||||
|
{name: "localhost hostname", url: "generic+https://beszel.dev/api/v1/postStuff", internal: false},
|
||||||
|
{name: "public ipv4", url: "generic://8.8.8.8", internal: false},
|
||||||
|
{name: "token style service url", url: "discord://abc123@123456789", internal: false},
|
||||||
|
{name: "single label service url", url: "slack://token@team/channel", internal: false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, testCase := range testCases {
|
||||||
|
t.Run(testCase.name, func(t *testing.T) {
|
||||||
|
internal, err := alerts.IsInternalURL(testCase.url)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, testCase.internal, internal)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUserAlertsApi(t *testing.T) {
|
||||||
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
hub.StartHub()
|
||||||
|
|
||||||
|
user1, _ := beszelTests.CreateUser(hub, "alertstest@example.com", "password")
|
||||||
|
user1Token, _ := user1.NewAuthToken()
|
||||||
|
|
||||||
|
user2, _ := beszelTests.CreateUser(hub, "alertstest2@example.com", "password")
|
||||||
|
user2Token, _ := user2.NewAuthToken()
|
||||||
|
|
||||||
|
system1, _ := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||||
|
"name": "system1",
|
||||||
|
"users": []string{user1.Id},
|
||||||
|
"host": "127.0.0.1",
|
||||||
|
})
|
||||||
|
|
||||||
|
system2, _ := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||||
|
"name": "system2",
|
||||||
|
"users": []string{user1.Id, user2.Id},
|
||||||
|
"host": "127.0.0.2",
|
||||||
|
})
|
||||||
|
|
||||||
|
userRecords, _ := hub.CountRecords("users")
|
||||||
|
assert.EqualValues(t, 2, userRecords, "all users should be created")
|
||||||
|
|
||||||
|
systemRecords, _ := hub.CountRecords("systems")
|
||||||
|
assert.EqualValues(t, 2, systemRecords, "all systems should be created")
|
||||||
|
|
||||||
|
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
||||||
|
return hub.TestApp
|
||||||
|
}
|
||||||
|
|
||||||
|
scenarios := []beszelTests.ApiScenario{
|
||||||
|
// {
|
||||||
|
// Name: "GET not implemented - returns index",
|
||||||
|
// Method: http.MethodGet,
|
||||||
|
// URL: "/api/beszel/user-alerts",
|
||||||
|
// ExpectedStatus: 200,
|
||||||
|
// ExpectedContent: []string{"<html ", "globalThis.BESZEL"},
|
||||||
|
// TestAppFactory: testAppFactory,
|
||||||
|
// },
|
||||||
|
{
|
||||||
|
Name: "POST no auth",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/user-alerts",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST no body",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/user-alerts",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user1Token,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 400,
|
||||||
|
ExpectedContent: []string{"Bad data"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST bad data",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/user-alerts",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user1Token,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 400,
|
||||||
|
ExpectedContent: []string{"Bad data"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"invalidField": "this should cause validation error",
|
||||||
|
"threshold": "not a number",
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST malformed JSON",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/user-alerts",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user1Token,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 400,
|
||||||
|
ExpectedContent: []string{"Bad data"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Body: strings.NewReader(`{"alertType": "cpu", "threshold": 80, "enabled": true,}`),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST valid alert data multiple systems",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/user-alerts",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user1Token,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"success\":true"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"value": 69,
|
||||||
|
"min": 9,
|
||||||
|
"systems": []string{system1.Id, system2.Id},
|
||||||
|
"overwrite": false,
|
||||||
|
}),
|
||||||
|
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||||
|
// check total alerts
|
||||||
|
alerts, _ := app.CountRecords("alerts")
|
||||||
|
assert.EqualValues(t, 2, alerts, "should have 2 alerts")
|
||||||
|
// check alert has correct values
|
||||||
|
matchingAlerts, _ := app.CountRecords("alerts", dbx.HashExp{"name": "CPU", "user": user1.Id, "system": system1.Id, "value": 69, "min": 9})
|
||||||
|
assert.EqualValues(t, 1, matchingAlerts, "should have 1 alert")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST valid alert data single system",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/user-alerts",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user1Token,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"success\":true"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"name": "Memory",
|
||||||
|
"systems": []string{system1.Id},
|
||||||
|
"value": 90,
|
||||||
|
"min": 10,
|
||||||
|
}),
|
||||||
|
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||||
|
user1Alerts, _ := app.CountRecords("alerts", dbx.HashExp{"user": user1.Id})
|
||||||
|
assert.EqualValues(t, 3, user1Alerts, "should have 3 alerts")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "Overwrite: false, should not overwrite existing alert",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/user-alerts",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user1Token,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"success\":true"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"value": 45,
|
||||||
|
"min": 5,
|
||||||
|
"systems": []string{system1.Id},
|
||||||
|
"overwrite": false,
|
||||||
|
}),
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
beszelTests.ClearCollection(t, app, "alerts")
|
||||||
|
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"system": system1.Id,
|
||||||
|
"user": user1.Id,
|
||||||
|
"value": 80,
|
||||||
|
"min": 10,
|
||||||
|
})
|
||||||
|
},
|
||||||
|
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||||
|
alerts, _ := app.CountRecords("alerts")
|
||||||
|
assert.EqualValues(t, 1, alerts, "should have 1 alert")
|
||||||
|
alert, _ := app.FindFirstRecordByFilter("alerts", "name = 'CPU' && user = {:user}", dbx.Params{"user": user1.Id})
|
||||||
|
assert.EqualValues(t, 80, alert.Get("value"), "should have 80 as value")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "Overwrite: true, should overwrite existing alert",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/user-alerts",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user2Token,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"success\":true"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"value": 45,
|
||||||
|
"min": 5,
|
||||||
|
"systems": []string{system2.Id},
|
||||||
|
"overwrite": true,
|
||||||
|
}),
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
beszelTests.ClearCollection(t, app, "alerts")
|
||||||
|
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"system": system2.Id,
|
||||||
|
"user": user2.Id,
|
||||||
|
"value": 80,
|
||||||
|
"min": 10,
|
||||||
|
})
|
||||||
|
},
|
||||||
|
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||||
|
alerts, _ := app.CountRecords("alerts")
|
||||||
|
assert.EqualValues(t, 1, alerts, "should have 1 alert")
|
||||||
|
alert, _ := app.FindFirstRecordByFilter("alerts", "name = 'CPU' && user = {:user}", dbx.Params{"user": user2.Id})
|
||||||
|
assert.EqualValues(t, 45, alert.Get("value"), "should have 45 as value")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "DELETE no auth",
|
||||||
|
Method: http.MethodDelete,
|
||||||
|
URL: "/api/beszel/user-alerts",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"systems": []string{system1.Id},
|
||||||
|
}),
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
beszelTests.ClearCollection(t, app, "alerts")
|
||||||
|
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"system": system1.Id,
|
||||||
|
"user": user1.Id,
|
||||||
|
"value": 80,
|
||||||
|
"min": 10,
|
||||||
|
})
|
||||||
|
},
|
||||||
|
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||||
|
alerts, _ := app.CountRecords("alerts")
|
||||||
|
assert.EqualValues(t, 1, alerts, "should have 1 alert")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "DELETE alert",
|
||||||
|
Method: http.MethodDelete,
|
||||||
|
URL: "/api/beszel/user-alerts",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user1Token,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"count\":1", "\"success\":true"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"systems": []string{system1.Id},
|
||||||
|
}),
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
beszelTests.ClearCollection(t, app, "alerts")
|
||||||
|
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"system": system1.Id,
|
||||||
|
"user": user1.Id,
|
||||||
|
"value": 80,
|
||||||
|
"min": 10,
|
||||||
|
})
|
||||||
|
},
|
||||||
|
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||||
|
alerts, _ := app.CountRecords("alerts")
|
||||||
|
assert.Zero(t, alerts, "should have 0 alerts")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "DELETE alert multiple systems",
|
||||||
|
Method: http.MethodDelete,
|
||||||
|
URL: "/api/beszel/user-alerts",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user1Token,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"count\":2", "\"success\":true"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"name": "Memory",
|
||||||
|
"systems": []string{system1.Id, system2.Id},
|
||||||
|
}),
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
beszelTests.ClearCollection(t, app, "alerts")
|
||||||
|
for _, systemId := range []string{system1.Id, system2.Id} {
|
||||||
|
_, err := beszelTests.CreateRecord(app, "alerts", map[string]any{
|
||||||
|
"name": "Memory",
|
||||||
|
"system": systemId,
|
||||||
|
"user": user1.Id,
|
||||||
|
"value": 90,
|
||||||
|
"min": 10,
|
||||||
|
})
|
||||||
|
assert.NoError(t, err, "should create alert")
|
||||||
|
}
|
||||||
|
alerts, _ := app.CountRecords("alerts")
|
||||||
|
assert.EqualValues(t, 2, alerts, "should have 2 alerts")
|
||||||
|
},
|
||||||
|
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||||
|
alerts, _ := app.CountRecords("alerts")
|
||||||
|
assert.Zero(t, alerts, "should have 0 alerts")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "User 2 should not be able to delete alert of user 1",
|
||||||
|
Method: http.MethodDelete,
|
||||||
|
URL: "/api/beszel/user-alerts",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user2Token,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"count\":1", "\"success\":true"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"systems": []string{system2.Id},
|
||||||
|
}),
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
beszelTests.ClearCollection(t, app, "alerts")
|
||||||
|
for _, user := range []string{user1.Id, user2.Id} {
|
||||||
|
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"system": system2.Id,
|
||||||
|
"user": user,
|
||||||
|
"value": 80,
|
||||||
|
"min": 10,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
alerts, _ := app.CountRecords("alerts")
|
||||||
|
assert.EqualValues(t, 2, alerts, "should have 2 alerts")
|
||||||
|
user1AlertCount, _ := app.CountRecords("alerts", dbx.HashExp{"user": user1.Id})
|
||||||
|
assert.EqualValues(t, 1, user1AlertCount, "should have 1 alert")
|
||||||
|
user2AlertCount, _ := app.CountRecords("alerts", dbx.HashExp{"user": user2.Id})
|
||||||
|
assert.EqualValues(t, 1, user2AlertCount, "should have 1 alert")
|
||||||
|
},
|
||||||
|
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||||
|
user1AlertCount, _ := app.CountRecords("alerts", dbx.HashExp{"user": user1.Id})
|
||||||
|
assert.EqualValues(t, 1, user1AlertCount, "should have 1 alert")
|
||||||
|
user2AlertCount, _ := app.CountRecords("alerts", dbx.HashExp{"user": user2.Id})
|
||||||
|
assert.Zero(t, user2AlertCount, "should have 0 alerts")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, scenario := range scenarios {
|
||||||
|
scenario.Test(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func TestSendTestNotification(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
userToken, err := user.NewAuthToken()
|
||||||
|
|
||||||
|
adminUser, err := beszelTests.CreateUserWithRole(hub, "admin@example.com", "password123", "admin")
|
||||||
|
assert.NoError(t, err, "Failed to create admin user")
|
||||||
|
adminUserToken, err := adminUser.NewAuthToken()
|
||||||
|
|
||||||
|
superuser, err := beszelTests.CreateSuperuser(hub, "superuser@example.com", "password123")
|
||||||
|
assert.NoError(t, err, "Failed to create superuser")
|
||||||
|
superuserToken, err := superuser.NewAuthToken()
|
||||||
|
assert.NoError(t, err, "Failed to create superuser auth token")
|
||||||
|
|
||||||
|
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
||||||
|
return hub.TestApp
|
||||||
|
}
|
||||||
|
|
||||||
|
scenarios := []beszelTests.ApiScenario{
|
||||||
|
{
|
||||||
|
Name: "POST /test-notification - no auth should fail",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/test-notification",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"url": "generic://127.0.0.1",
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /test-notification - with external auth should succeed",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/test-notification",
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"url": "generic://8.8.8.8",
|
||||||
|
}),
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"err\":"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /test-notification - local url with user auth should fail",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/test-notification",
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"url": "generic://localhost:8010",
|
||||||
|
}),
|
||||||
|
ExpectedStatus: 403,
|
||||||
|
ExpectedContent: []string{"Only admins"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /test-notification - internal url with user auth should fail",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/test-notification",
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"url": "generic+http://192.168.0.5",
|
||||||
|
}),
|
||||||
|
ExpectedStatus: 403,
|
||||||
|
ExpectedContent: []string{"Only admins"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /test-notification - internal url with admin auth should succeed",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/test-notification",
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": adminUserToken,
|
||||||
|
},
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"url": "generic://127.0.0.1",
|
||||||
|
}),
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"err\":"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /test-notification - internal url with superuser auth should succeed",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/test-notification",
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": superuserToken,
|
||||||
|
},
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"url": "generic://127.0.0.1",
|
||||||
|
}),
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"err\":"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, scenario := range scenarios {
|
||||||
|
scenario.Test(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -109,6 +109,18 @@ func (am *AlertManager) cancelPendingAlert(alertID string) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CancelPendingStatusAlerts cancels all pending status alert timers for a given system.
|
||||||
|
// This is called when a system is paused to prevent delayed alerts from firing.
|
||||||
|
func (am *AlertManager) CancelPendingStatusAlerts(systemID string) {
|
||||||
|
am.pendingAlerts.Range(func(key, value any) bool {
|
||||||
|
info := value.(*alertInfo)
|
||||||
|
if info.alertData.SystemID == systemID {
|
||||||
|
am.cancelPendingAlert(key.(string))
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// processPendingAlert sends a "down" alert if the pending alert has expired and the system is still down.
|
// processPendingAlert sends a "down" alert if the pending alert has expired and the system is still down.
|
||||||
func (am *AlertManager) processPendingAlert(alertID string) {
|
func (am *AlertManager) processPendingAlert(alertID string) {
|
||||||
value, loaded := am.pendingAlerts.LoadAndDelete(alertID)
|
value, loaded := am.pendingAlerts.LoadAndDelete(alertID)
|
||||||
|
|||||||
@@ -941,3 +941,68 @@ func TestStatusAlertClearedBeforeSend(t *testing.T) {
|
|||||||
assert.EqualValues(t, 0, alertHistoryCount, "Should have no unresolved alert history records since alert never triggered")
|
assert.EqualValues(t, 0, alertHistoryCount, "Should have no unresolved alert history records since alert never triggered")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestCancelPendingStatusAlertsClearsAllAlertsForSystem(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
userSettings, err := hub.FindFirstRecordByFilter("user_settings", "user={:user}", map[string]any{"user": user.Id})
|
||||||
|
require.NoError(t, err)
|
||||||
|
userSettings.Set("settings", `{"emails":["test@example.com"],"webhooks":[]}`)
|
||||||
|
require.NoError(t, hub.Save(userSettings))
|
||||||
|
|
||||||
|
systemCollection, err := hub.FindCollectionByNameOrId("systems")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
system1 := core.NewRecord(systemCollection)
|
||||||
|
system1.Set("name", "system-1")
|
||||||
|
system1.Set("status", "up")
|
||||||
|
system1.Set("host", "127.0.0.1")
|
||||||
|
system1.Set("users", []string{user.Id})
|
||||||
|
require.NoError(t, hub.Save(system1))
|
||||||
|
|
||||||
|
system2 := core.NewRecord(systemCollection)
|
||||||
|
system2.Set("name", "system-2")
|
||||||
|
system2.Set("status", "up")
|
||||||
|
system2.Set("host", "127.0.0.2")
|
||||||
|
system2.Set("users", []string{user.Id})
|
||||||
|
require.NoError(t, hub.Save(system2))
|
||||||
|
|
||||||
|
alertCollection, err := hub.FindCollectionByNameOrId("alerts")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
alert1 := core.NewRecord(alertCollection)
|
||||||
|
alert1.Set("user", user.Id)
|
||||||
|
alert1.Set("system", system1.Id)
|
||||||
|
alert1.Set("name", "Status")
|
||||||
|
alert1.Set("triggered", false)
|
||||||
|
alert1.Set("min", 5)
|
||||||
|
require.NoError(t, hub.Save(alert1))
|
||||||
|
|
||||||
|
alert2 := core.NewRecord(alertCollection)
|
||||||
|
alert2.Set("user", user.Id)
|
||||||
|
alert2.Set("system", system2.Id)
|
||||||
|
alert2.Set("name", "Status")
|
||||||
|
alert2.Set("triggered", false)
|
||||||
|
alert2.Set("min", 5)
|
||||||
|
require.NoError(t, hub.Save(alert2))
|
||||||
|
|
||||||
|
am := alerts.NewTestAlertManagerWithoutWorker(hub)
|
||||||
|
initialEmailCount := hub.TestMailer.TotalSend()
|
||||||
|
|
||||||
|
// Both systems go down
|
||||||
|
require.NoError(t, am.HandleStatusAlerts("down", system1))
|
||||||
|
require.NoError(t, am.HandleStatusAlerts("down", system2))
|
||||||
|
assert.Equal(t, 2, am.GetPendingAlertsCount(), "both systems should have pending alerts")
|
||||||
|
|
||||||
|
// System 1 is paused — cancel its pending alerts
|
||||||
|
am.CancelPendingStatusAlerts(system1.Id)
|
||||||
|
assert.Equal(t, 1, am.GetPendingAlertsCount(), "only system2 alert should remain pending after pausing system1")
|
||||||
|
|
||||||
|
// Expire and process remaining alerts — only system2 should fire
|
||||||
|
am.ForceExpirePendingAlerts()
|
||||||
|
processed, err := am.ProcessPendingAlerts()
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Len(t, processed, 1, "only the non-paused system's alert should be processed")
|
||||||
|
assert.Equal(t, initialEmailCount+1, hub.TestMailer.TotalSend(), "only system2 should send a down notification")
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,11 +3,6 @@
|
|||||||
package alerts_test
|
package alerts_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
"testing"
|
||||||
"testing/synctest"
|
"testing/synctest"
|
||||||
"time"
|
"time"
|
||||||
@@ -16,359 +11,9 @@ import (
|
|||||||
|
|
||||||
"github.com/henrygd/beszel/internal/alerts"
|
"github.com/henrygd/beszel/internal/alerts"
|
||||||
"github.com/pocketbase/dbx"
|
"github.com/pocketbase/dbx"
|
||||||
"github.com/pocketbase/pocketbase/core"
|
|
||||||
pbTests "github.com/pocketbase/pocketbase/tests"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
// marshal to json and return an io.Reader (for use in ApiScenario.Body)
|
|
||||||
func jsonReader(v any) io.Reader {
|
|
||||||
data, err := json.Marshal(v)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return bytes.NewReader(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestUserAlertsApi(t *testing.T) {
|
|
||||||
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
|
||||||
defer hub.Cleanup()
|
|
||||||
|
|
||||||
hub.StartHub()
|
|
||||||
|
|
||||||
user1, _ := beszelTests.CreateUser(hub, "alertstest@example.com", "password")
|
|
||||||
user1Token, _ := user1.NewAuthToken()
|
|
||||||
|
|
||||||
user2, _ := beszelTests.CreateUser(hub, "alertstest2@example.com", "password")
|
|
||||||
user2Token, _ := user2.NewAuthToken()
|
|
||||||
|
|
||||||
system1, _ := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
|
||||||
"name": "system1",
|
|
||||||
"users": []string{user1.Id},
|
|
||||||
"host": "127.0.0.1",
|
|
||||||
})
|
|
||||||
|
|
||||||
system2, _ := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
|
||||||
"name": "system2",
|
|
||||||
"users": []string{user1.Id, user2.Id},
|
|
||||||
"host": "127.0.0.2",
|
|
||||||
})
|
|
||||||
|
|
||||||
userRecords, _ := hub.CountRecords("users")
|
|
||||||
assert.EqualValues(t, 2, userRecords, "all users should be created")
|
|
||||||
|
|
||||||
systemRecords, _ := hub.CountRecords("systems")
|
|
||||||
assert.EqualValues(t, 2, systemRecords, "all systems should be created")
|
|
||||||
|
|
||||||
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
|
||||||
return hub.TestApp
|
|
||||||
}
|
|
||||||
|
|
||||||
scenarios := []beszelTests.ApiScenario{
|
|
||||||
// {
|
|
||||||
// Name: "GET not implemented - returns index",
|
|
||||||
// Method: http.MethodGet,
|
|
||||||
// URL: "/api/beszel/user-alerts",
|
|
||||||
// ExpectedStatus: 200,
|
|
||||||
// ExpectedContent: []string{"<html ", "globalThis.BESZEL"},
|
|
||||||
// TestAppFactory: testAppFactory,
|
|
||||||
// },
|
|
||||||
{
|
|
||||||
Name: "POST no auth",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/user-alerts",
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "POST no body",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/user-alerts",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": user1Token,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 400,
|
|
||||||
ExpectedContent: []string{"Bad data"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "POST bad data",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/user-alerts",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": user1Token,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 400,
|
|
||||||
ExpectedContent: []string{"Bad data"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"invalidField": "this should cause validation error",
|
|
||||||
"threshold": "not a number",
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "POST malformed JSON",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/user-alerts",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": user1Token,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 400,
|
|
||||||
ExpectedContent: []string{"Bad data"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Body: strings.NewReader(`{"alertType": "cpu", "threshold": 80, "enabled": true,}`),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "POST valid alert data multiple systems",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/user-alerts",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": user1Token,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"\"success\":true"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"value": 69,
|
|
||||||
"min": 9,
|
|
||||||
"systems": []string{system1.Id, system2.Id},
|
|
||||||
"overwrite": false,
|
|
||||||
}),
|
|
||||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
|
||||||
// check total alerts
|
|
||||||
alerts, _ := app.CountRecords("alerts")
|
|
||||||
assert.EqualValues(t, 2, alerts, "should have 2 alerts")
|
|
||||||
// check alert has correct values
|
|
||||||
matchingAlerts, _ := app.CountRecords("alerts", dbx.HashExp{"name": "CPU", "user": user1.Id, "system": system1.Id, "value": 69, "min": 9})
|
|
||||||
assert.EqualValues(t, 1, matchingAlerts, "should have 1 alert")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "POST valid alert data single system",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/user-alerts",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": user1Token,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"\"success\":true"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"name": "Memory",
|
|
||||||
"systems": []string{system1.Id},
|
|
||||||
"value": 90,
|
|
||||||
"min": 10,
|
|
||||||
}),
|
|
||||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
|
||||||
user1Alerts, _ := app.CountRecords("alerts", dbx.HashExp{"user": user1.Id})
|
|
||||||
assert.EqualValues(t, 3, user1Alerts, "should have 3 alerts")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "Overwrite: false, should not overwrite existing alert",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/user-alerts",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": user1Token,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"\"success\":true"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"value": 45,
|
|
||||||
"min": 5,
|
|
||||||
"systems": []string{system1.Id},
|
|
||||||
"overwrite": false,
|
|
||||||
}),
|
|
||||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
|
||||||
beszelTests.ClearCollection(t, app, "alerts")
|
|
||||||
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"system": system1.Id,
|
|
||||||
"user": user1.Id,
|
|
||||||
"value": 80,
|
|
||||||
"min": 10,
|
|
||||||
})
|
|
||||||
},
|
|
||||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
|
||||||
alerts, _ := app.CountRecords("alerts")
|
|
||||||
assert.EqualValues(t, 1, alerts, "should have 1 alert")
|
|
||||||
alert, _ := app.FindFirstRecordByFilter("alerts", "name = 'CPU' && user = {:user}", dbx.Params{"user": user1.Id})
|
|
||||||
assert.EqualValues(t, 80, alert.Get("value"), "should have 80 as value")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "Overwrite: true, should overwrite existing alert",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/user-alerts",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": user2Token,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"\"success\":true"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"value": 45,
|
|
||||||
"min": 5,
|
|
||||||
"systems": []string{system2.Id},
|
|
||||||
"overwrite": true,
|
|
||||||
}),
|
|
||||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
|
||||||
beszelTests.ClearCollection(t, app, "alerts")
|
|
||||||
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"system": system2.Id,
|
|
||||||
"user": user2.Id,
|
|
||||||
"value": 80,
|
|
||||||
"min": 10,
|
|
||||||
})
|
|
||||||
},
|
|
||||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
|
||||||
alerts, _ := app.CountRecords("alerts")
|
|
||||||
assert.EqualValues(t, 1, alerts, "should have 1 alert")
|
|
||||||
alert, _ := app.FindFirstRecordByFilter("alerts", "name = 'CPU' && user = {:user}", dbx.Params{"user": user2.Id})
|
|
||||||
assert.EqualValues(t, 45, alert.Get("value"), "should have 45 as value")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "DELETE no auth",
|
|
||||||
Method: http.MethodDelete,
|
|
||||||
URL: "/api/beszel/user-alerts",
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"systems": []string{system1.Id},
|
|
||||||
}),
|
|
||||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
|
||||||
beszelTests.ClearCollection(t, app, "alerts")
|
|
||||||
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"system": system1.Id,
|
|
||||||
"user": user1.Id,
|
|
||||||
"value": 80,
|
|
||||||
"min": 10,
|
|
||||||
})
|
|
||||||
},
|
|
||||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
|
||||||
alerts, _ := app.CountRecords("alerts")
|
|
||||||
assert.EqualValues(t, 1, alerts, "should have 1 alert")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "DELETE alert",
|
|
||||||
Method: http.MethodDelete,
|
|
||||||
URL: "/api/beszel/user-alerts",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": user1Token,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"\"count\":1", "\"success\":true"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"systems": []string{system1.Id},
|
|
||||||
}),
|
|
||||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
|
||||||
beszelTests.ClearCollection(t, app, "alerts")
|
|
||||||
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"system": system1.Id,
|
|
||||||
"user": user1.Id,
|
|
||||||
"value": 80,
|
|
||||||
"min": 10,
|
|
||||||
})
|
|
||||||
},
|
|
||||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
|
||||||
alerts, _ := app.CountRecords("alerts")
|
|
||||||
assert.Zero(t, alerts, "should have 0 alerts")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "DELETE alert multiple systems",
|
|
||||||
Method: http.MethodDelete,
|
|
||||||
URL: "/api/beszel/user-alerts",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": user1Token,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"\"count\":2", "\"success\":true"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"name": "Memory",
|
|
||||||
"systems": []string{system1.Id, system2.Id},
|
|
||||||
}),
|
|
||||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
|
||||||
beszelTests.ClearCollection(t, app, "alerts")
|
|
||||||
for _, systemId := range []string{system1.Id, system2.Id} {
|
|
||||||
_, err := beszelTests.CreateRecord(app, "alerts", map[string]any{
|
|
||||||
"name": "Memory",
|
|
||||||
"system": systemId,
|
|
||||||
"user": user1.Id,
|
|
||||||
"value": 90,
|
|
||||||
"min": 10,
|
|
||||||
})
|
|
||||||
assert.NoError(t, err, "should create alert")
|
|
||||||
}
|
|
||||||
alerts, _ := app.CountRecords("alerts")
|
|
||||||
assert.EqualValues(t, 2, alerts, "should have 2 alerts")
|
|
||||||
},
|
|
||||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
|
||||||
alerts, _ := app.CountRecords("alerts")
|
|
||||||
assert.Zero(t, alerts, "should have 0 alerts")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "User 2 should not be able to delete alert of user 1",
|
|
||||||
Method: http.MethodDelete,
|
|
||||||
URL: "/api/beszel/user-alerts",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": user2Token,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"\"count\":1", "\"success\":true"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"systems": []string{system2.Id},
|
|
||||||
}),
|
|
||||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
|
||||||
beszelTests.ClearCollection(t, app, "alerts")
|
|
||||||
for _, user := range []string{user1.Id, user2.Id} {
|
|
||||||
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"system": system2.Id,
|
|
||||||
"user": user,
|
|
||||||
"value": 80,
|
|
||||||
"min": 10,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
alerts, _ := app.CountRecords("alerts")
|
|
||||||
assert.EqualValues(t, 2, alerts, "should have 2 alerts")
|
|
||||||
user1AlertCount, _ := app.CountRecords("alerts", dbx.HashExp{"user": user1.Id})
|
|
||||||
assert.EqualValues(t, 1, user1AlertCount, "should have 1 alert")
|
|
||||||
user2AlertCount, _ := app.CountRecords("alerts", dbx.HashExp{"user": user2.Id})
|
|
||||||
assert.EqualValues(t, 1, user2AlertCount, "should have 1 alert")
|
|
||||||
},
|
|
||||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
|
||||||
user1AlertCount, _ := app.CountRecords("alerts", dbx.HashExp{"user": user1.Id})
|
|
||||||
assert.EqualValues(t, 1, user1AlertCount, "should have 1 alert")
|
|
||||||
user2AlertCount, _ := app.CountRecords("alerts", dbx.HashExp{"user": user2.Id})
|
|
||||||
assert.Zero(t, user2AlertCount, "should have 0 alerts")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, scenario := range scenarios {
|
|
||||||
scenario.Test(t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAlertsHistory(t *testing.T) {
|
func TestAlertsHistory(t *testing.T) {
|
||||||
synctest.Test(t, func(t *testing.T) {
|
synctest.Test(t, func(t *testing.T) {
|
||||||
hub, user := beszelTests.GetHubWithUser(t)
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
|||||||
@@ -95,3 +95,7 @@ func (am *AlertManager) RestorePendingStatusAlerts() error {
|
|||||||
func (am *AlertManager) SetAlertTriggered(alert CachedAlertData, triggered bool) error {
|
func (am *AlertManager) SetAlertTriggered(alert CachedAlertData, triggered bool) error {
|
||||||
return am.setAlertTriggered(alert, triggered)
|
return am.setAlertTriggered(alert, triggered)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func IsInternalURL(rawURL string) (bool, error) {
|
||||||
|
return isInternalURL(rawURL)
|
||||||
|
}
|
||||||
|
|||||||
@@ -195,6 +195,6 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := a.Start(serverConfig); err != nil {
|
if err := a.Start(serverConfig); err != nil {
|
||||||
log.Fatal("Failed to start server: ", err)
|
log.Fatal("Failed to start: ", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -28,8 +28,8 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
baseApp := getBaseApp()
|
baseApp := getBaseApp()
|
||||||
h, _ := hub.NewHub(baseApp)
|
hub := hub.NewHub(baseApp)
|
||||||
if err := h.StartHub(); err != nil {
|
if err := hub.StartHub(); err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -494,7 +494,7 @@ type SmartInfoForNvme struct {
|
|||||||
FirmwareVersion string `json:"firmware_version"`
|
FirmwareVersion string `json:"firmware_version"`
|
||||||
// NVMePCIVendor NVMePCIVendor `json:"nvme_pci_vendor"`
|
// NVMePCIVendor NVMePCIVendor `json:"nvme_pci_vendor"`
|
||||||
// NVMeIEEEOUIIdentifier uint32 `json:"nvme_ieee_oui_identifier"`
|
// NVMeIEEEOUIIdentifier uint32 `json:"nvme_ieee_oui_identifier"`
|
||||||
// NVMeTotalCapacity uint64 `json:"nvme_total_capacity"`
|
NVMeTotalCapacity uint64 `json:"nvme_total_capacity"`
|
||||||
// NVMeUnallocatedCapacity uint64 `json:"nvme_unallocated_capacity"`
|
// NVMeUnallocatedCapacity uint64 `json:"nvme_unallocated_capacity"`
|
||||||
// NVMeControllerID uint16 `json:"nvme_controller_id"`
|
// NVMeControllerID uint16 `json:"nvme_controller_id"`
|
||||||
// NVMeVersion VersionStringInfo `json:"nvme_version"`
|
// NVMeVersion VersionStringInfo `json:"nvme_version"`
|
||||||
|
|||||||
@@ -48,6 +48,8 @@ type Stats struct {
|
|||||||
MaxDiskIO [2]uint64 `json:"diom,omitzero" cbor:"-"` // [max read bytes, max write bytes]
|
MaxDiskIO [2]uint64 `json:"diom,omitzero" cbor:"-"` // [max read bytes, max write bytes]
|
||||||
CpuBreakdown []float64 `json:"cpub,omitempty" cbor:"33,keyasint,omitempty"` // [user, system, iowait, steal, idle]
|
CpuBreakdown []float64 `json:"cpub,omitempty" cbor:"33,keyasint,omitempty"` // [user, system, iowait, steal, idle]
|
||||||
CpuCoresUsage Uint8Slice `json:"cpus,omitempty" cbor:"34,keyasint,omitempty"` // per-core busy usage [CPU0..]
|
CpuCoresUsage Uint8Slice `json:"cpus,omitempty" cbor:"34,keyasint,omitempty"` // per-core busy usage [CPU0..]
|
||||||
|
DiskIoStats [6]float64 `json:"dios,omitzero" cbor:"35,keyasint,omitzero"` // [read time %, write time %, io utilization %, r_await ms, w_await ms, weighted io %]
|
||||||
|
MaxDiskIoStats [6]float64 `json:"diosm,omitzero" cbor:"-"` // max values for DiskIoStats
|
||||||
}
|
}
|
||||||
|
|
||||||
// Uint8Slice wraps []uint8 to customize JSON encoding while keeping CBOR efficient.
|
// Uint8Slice wraps []uint8 to customize JSON encoding while keeping CBOR efficient.
|
||||||
@@ -97,6 +99,8 @@ type FsStats struct {
|
|||||||
DiskWriteBytes uint64 `json:"wb" cbor:"7,keyasint,omitempty"`
|
DiskWriteBytes uint64 `json:"wb" cbor:"7,keyasint,omitempty"`
|
||||||
MaxDiskReadBytes uint64 `json:"rbm,omitempty" cbor:"-"`
|
MaxDiskReadBytes uint64 `json:"rbm,omitempty" cbor:"-"`
|
||||||
MaxDiskWriteBytes uint64 `json:"wbm,omitempty" cbor:"-"`
|
MaxDiskWriteBytes uint64 `json:"wbm,omitempty" cbor:"-"`
|
||||||
|
DiskIoStats [6]float64 `json:"dios,omitzero" cbor:"8,keyasint,omitzero"` // [read time %, write time %, io utilization %, r_await ms, w_await ms, weighted io %]
|
||||||
|
MaxDiskIoStats [6]float64 `json:"diosm,omitzero" cbor:"-"` // max values for DiskIoStats
|
||||||
}
|
}
|
||||||
|
|
||||||
type NetIoStats struct {
|
type NetIoStats struct {
|
||||||
|
|||||||
@@ -110,21 +110,13 @@ func (p *updater) update() (updated bool, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var latest *release
|
var latest *release
|
||||||
var useMirror bool
|
|
||||||
|
|
||||||
// Determine the API endpoint based on UseMirror flag
|
apiURL := getApiURL(p.config.UseMirror, p.config.Owner, p.config.Repo)
|
||||||
apiURL := fmt.Sprintf("https://api.github.com/repos/%s/%s/releases/latest", p.config.Owner, p.config.Repo)
|
|
||||||
if p.config.UseMirror {
|
if p.config.UseMirror {
|
||||||
useMirror = true
|
|
||||||
apiURL = fmt.Sprintf("https://gh.beszel.dev/repos/%s/%s/releases/latest?api=true", p.config.Owner, p.config.Repo)
|
|
||||||
ColorPrint(ColorYellow, "Using mirror for update.")
|
ColorPrint(ColorYellow, "Using mirror for update.")
|
||||||
}
|
}
|
||||||
|
|
||||||
latest, err = fetchLatestRelease(
|
latest, err = FetchLatestRelease(p.config.Context, p.config.HttpClient, apiURL)
|
||||||
p.config.Context,
|
|
||||||
p.config.HttpClient,
|
|
||||||
apiURL,
|
|
||||||
)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@@ -150,7 +142,7 @@ func (p *updater) update() (updated bool, err error) {
|
|||||||
|
|
||||||
// download the release asset
|
// download the release asset
|
||||||
assetPath := filepath.Join(releaseDir, asset.Name)
|
assetPath := filepath.Join(releaseDir, asset.Name)
|
||||||
if err := downloadFile(p.config.Context, p.config.HttpClient, asset.DownloadUrl, assetPath, useMirror); err != nil {
|
if err := downloadFile(p.config.Context, p.config.HttpClient, asset.DownloadUrl, assetPath, p.config.UseMirror); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -226,11 +218,11 @@ func (p *updater) update() (updated bool, err error) {
|
|||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func fetchLatestRelease(
|
func FetchLatestRelease(ctx context.Context, client HttpClient, url string) (*release, error) {
|
||||||
ctx context.Context,
|
if url == "" {
|
||||||
client HttpClient,
|
url = getApiURL(false, "henrygd", "beszel")
|
||||||
url string,
|
}
|
||||||
) (*release, error) {
|
|
||||||
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
|
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -375,3 +367,10 @@ func isGlibc() bool {
|
|||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getApiURL(useMirror bool, owner, repo string) string {
|
||||||
|
if useMirror {
|
||||||
|
return fmt.Sprintf("https://gh.beszel.dev/repos/%s/%s/releases/latest?api=true", owner, repo)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("https://api.github.com/repos/%s/%s/releases/latest", owner, repo)
|
||||||
|
}
|
||||||
|
|||||||
@@ -32,8 +32,7 @@ func createTestHub(t testing.TB) (*Hub, *pbtests.TestApp, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
h, err := NewHub(testApp)
|
return NewHub(testApp), testApp, err
|
||||||
return h, testApp, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// cleanupTestHub stops background system goroutines before tearing down the app.
|
// cleanupTestHub stops background system goroutines before tearing down the app.
|
||||||
|
|||||||
391
internal/hub/api.go
Normal file
391
internal/hub/api.go
Normal file
@@ -0,0 +1,391 @@
|
|||||||
|
package hub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net/http"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/blang/semver"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/henrygd/beszel"
|
||||||
|
"github.com/henrygd/beszel/internal/alerts"
|
||||||
|
"github.com/henrygd/beszel/internal/ghupdate"
|
||||||
|
"github.com/henrygd/beszel/internal/hub/config"
|
||||||
|
"github.com/henrygd/beszel/internal/hub/systems"
|
||||||
|
"github.com/henrygd/beszel/internal/hub/utils"
|
||||||
|
"github.com/pocketbase/dbx"
|
||||||
|
"github.com/pocketbase/pocketbase/apis"
|
||||||
|
"github.com/pocketbase/pocketbase/core"
|
||||||
|
)
|
||||||
|
|
||||||
|
// UpdateInfo holds information about the latest update check
|
||||||
|
type UpdateInfo struct {
|
||||||
|
lastCheck time.Time
|
||||||
|
Version string `json:"v"`
|
||||||
|
Url string `json:"url"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var containerIDPattern = regexp.MustCompile(`^[a-fA-F0-9]{12,64}$`)
|
||||||
|
|
||||||
|
// Middleware to allow only admin role users
|
||||||
|
var requireAdminRole = customAuthMiddleware(func(e *core.RequestEvent) bool {
|
||||||
|
return e.Auth.GetString("role") == "admin"
|
||||||
|
})
|
||||||
|
|
||||||
|
// Middleware to exclude readonly users
|
||||||
|
var excludeReadOnlyRole = customAuthMiddleware(func(e *core.RequestEvent) bool {
|
||||||
|
return e.Auth.GetString("role") != "readonly"
|
||||||
|
})
|
||||||
|
|
||||||
|
// customAuthMiddleware handles boilerplate for custom authentication middlewares. fn should
|
||||||
|
// return true if the request is allowed, false otherwise. e.Auth is guaranteed to be non-nil.
|
||||||
|
func customAuthMiddleware(fn func(*core.RequestEvent) bool) func(*core.RequestEvent) error {
|
||||||
|
return func(e *core.RequestEvent) error {
|
||||||
|
if e.Auth == nil {
|
||||||
|
return e.UnauthorizedError("The request requires valid record authorization token.", nil)
|
||||||
|
}
|
||||||
|
if !fn(e) {
|
||||||
|
return e.ForbiddenError("The authorized record is not allowed to perform this action.", nil)
|
||||||
|
}
|
||||||
|
return e.Next()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// registerMiddlewares registers custom middlewares
|
||||||
|
func (h *Hub) registerMiddlewares(se *core.ServeEvent) {
|
||||||
|
// authorizes request with user matching the provided email
|
||||||
|
authorizeRequestWithEmail := func(e *core.RequestEvent, email string) (err error) {
|
||||||
|
if e.Auth != nil || email == "" {
|
||||||
|
return e.Next()
|
||||||
|
}
|
||||||
|
isAuthRefresh := e.Request.URL.Path == "/api/collections/users/auth-refresh" && e.Request.Method == http.MethodPost
|
||||||
|
e.Auth, err = e.App.FindAuthRecordByEmail("users", email)
|
||||||
|
if err != nil || !isAuthRefresh {
|
||||||
|
return e.Next()
|
||||||
|
}
|
||||||
|
// auth refresh endpoint, make sure token is set in header
|
||||||
|
token, _ := e.Auth.NewAuthToken()
|
||||||
|
e.Request.Header.Set("Authorization", token)
|
||||||
|
return e.Next()
|
||||||
|
}
|
||||||
|
// authenticate with trusted header
|
||||||
|
if autoLogin, _ := utils.GetEnv("AUTO_LOGIN"); autoLogin != "" {
|
||||||
|
se.Router.BindFunc(func(e *core.RequestEvent) error {
|
||||||
|
return authorizeRequestWithEmail(e, autoLogin)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
// authenticate with trusted header
|
||||||
|
if trustedHeader, _ := utils.GetEnv("TRUSTED_AUTH_HEADER"); trustedHeader != "" {
|
||||||
|
se.Router.BindFunc(func(e *core.RequestEvent) error {
|
||||||
|
return authorizeRequestWithEmail(e, e.Request.Header.Get(trustedHeader))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// registerApiRoutes registers custom API routes
|
||||||
|
func (h *Hub) registerApiRoutes(se *core.ServeEvent) error {
|
||||||
|
// auth protected routes
|
||||||
|
apiAuth := se.Router.Group("/api/beszel")
|
||||||
|
apiAuth.Bind(apis.RequireAuth())
|
||||||
|
// auth optional routes
|
||||||
|
apiNoAuth := se.Router.Group("/api/beszel")
|
||||||
|
|
||||||
|
// create first user endpoint only needed if no users exist
|
||||||
|
if totalUsers, _ := se.App.CountRecords("users"); totalUsers == 0 {
|
||||||
|
apiNoAuth.POST("/create-user", h.um.CreateFirstUser)
|
||||||
|
}
|
||||||
|
// check if first time setup on login page
|
||||||
|
apiNoAuth.GET("/first-run", func(e *core.RequestEvent) error {
|
||||||
|
total, err := e.App.CountRecords("users")
|
||||||
|
return e.JSON(http.StatusOK, map[string]bool{"firstRun": err == nil && total == 0})
|
||||||
|
})
|
||||||
|
// get public key and version
|
||||||
|
apiAuth.GET("/info", h.getInfo)
|
||||||
|
apiAuth.GET("/getkey", h.getInfo) // deprecated - keep for compatibility w/ integrations
|
||||||
|
// check for updates
|
||||||
|
if optIn, _ := utils.GetEnv("CHECK_UPDATES"); optIn == "true" {
|
||||||
|
var updateInfo UpdateInfo
|
||||||
|
apiAuth.GET("/update", updateInfo.getUpdate)
|
||||||
|
}
|
||||||
|
// send test notification
|
||||||
|
apiAuth.POST("/test-notification", h.SendTestNotification)
|
||||||
|
// heartbeat status and test
|
||||||
|
apiAuth.GET("/heartbeat-status", h.getHeartbeatStatus).BindFunc(requireAdminRole)
|
||||||
|
apiAuth.POST("/test-heartbeat", h.testHeartbeat).BindFunc(requireAdminRole)
|
||||||
|
// get config.yml content
|
||||||
|
apiAuth.GET("/config-yaml", config.GetYamlConfig).BindFunc(requireAdminRole)
|
||||||
|
// handle agent websocket connection
|
||||||
|
apiNoAuth.GET("/agent-connect", h.handleAgentConnect)
|
||||||
|
// get or create universal tokens
|
||||||
|
apiAuth.GET("/universal-token", h.getUniversalToken).BindFunc(excludeReadOnlyRole)
|
||||||
|
// update / delete user alerts
|
||||||
|
apiAuth.POST("/user-alerts", alerts.UpsertUserAlerts)
|
||||||
|
apiAuth.DELETE("/user-alerts", alerts.DeleteUserAlerts)
|
||||||
|
// refresh SMART devices for a system
|
||||||
|
apiAuth.POST("/smart/refresh", h.refreshSmartData).BindFunc(excludeReadOnlyRole)
|
||||||
|
// get systemd service details
|
||||||
|
apiAuth.GET("/systemd/info", h.getSystemdInfo)
|
||||||
|
// /containers routes
|
||||||
|
if enabled, _ := utils.GetEnv("CONTAINER_DETAILS"); enabled != "false" {
|
||||||
|
// get container logs
|
||||||
|
apiAuth.GET("/containers/logs", h.getContainerLogs)
|
||||||
|
// get container info
|
||||||
|
apiAuth.GET("/containers/info", h.getContainerInfo)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getInfo returns data needed by authenticated users, such as the public key and current version
|
||||||
|
func (h *Hub) getInfo(e *core.RequestEvent) error {
|
||||||
|
type infoResponse struct {
|
||||||
|
Key string `json:"key"`
|
||||||
|
Version string `json:"v"`
|
||||||
|
CheckUpdate bool `json:"cu"`
|
||||||
|
}
|
||||||
|
info := infoResponse{
|
||||||
|
Key: h.pubKey,
|
||||||
|
Version: beszel.Version,
|
||||||
|
}
|
||||||
|
if optIn, _ := utils.GetEnv("CHECK_UPDATES"); optIn == "true" {
|
||||||
|
info.CheckUpdate = true
|
||||||
|
}
|
||||||
|
return e.JSON(http.StatusOK, info)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getUpdate checks for the latest release on GitHub and returns update info if a newer version is available
|
||||||
|
func (info *UpdateInfo) getUpdate(e *core.RequestEvent) error {
|
||||||
|
if time.Since(info.lastCheck) < 6*time.Hour {
|
||||||
|
return e.JSON(http.StatusOK, info)
|
||||||
|
}
|
||||||
|
info.lastCheck = time.Now()
|
||||||
|
latestRelease, err := ghupdate.FetchLatestRelease(context.Background(), http.DefaultClient, "")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
currentVersion, err := semver.Parse(strings.TrimPrefix(beszel.Version, "v"))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
latestVersion, err := semver.Parse(strings.TrimPrefix(latestRelease.Tag, "v"))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if latestVersion.GT(currentVersion) {
|
||||||
|
info.Version = strings.TrimPrefix(latestRelease.Tag, "v")
|
||||||
|
info.Url = latestRelease.Url
|
||||||
|
}
|
||||||
|
return e.JSON(http.StatusOK, info)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetUniversalToken handles the universal token API endpoint (create, read, delete)
|
||||||
|
func (h *Hub) getUniversalToken(e *core.RequestEvent) error {
|
||||||
|
if e.Auth.IsSuperuser() {
|
||||||
|
return e.ForbiddenError("Superusers cannot use universal tokens", nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
tokenMap := universalTokenMap.GetMap()
|
||||||
|
userID := e.Auth.Id
|
||||||
|
query := e.Request.URL.Query()
|
||||||
|
token := query.Get("token")
|
||||||
|
enable := query.Get("enable")
|
||||||
|
permanent := query.Get("permanent")
|
||||||
|
|
||||||
|
// helper for deleting any existing permanent token record for this user
|
||||||
|
deletePermanent := func() error {
|
||||||
|
rec, err := h.FindFirstRecordByFilter("universal_tokens", "user = {:user}", dbx.Params{"user": userID})
|
||||||
|
if err != nil {
|
||||||
|
return nil // no record
|
||||||
|
}
|
||||||
|
return h.Delete(rec)
|
||||||
|
}
|
||||||
|
|
||||||
|
// helper for upserting a permanent token record for this user
|
||||||
|
upsertPermanent := func(token string) error {
|
||||||
|
rec, err := h.FindFirstRecordByFilter("universal_tokens", "user = {:user}", dbx.Params{"user": userID})
|
||||||
|
if err == nil {
|
||||||
|
rec.Set("token", token)
|
||||||
|
return h.Save(rec)
|
||||||
|
}
|
||||||
|
|
||||||
|
col, err := h.FindCachedCollectionByNameOrId("universal_tokens")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
newRec := core.NewRecord(col)
|
||||||
|
newRec.Set("user", userID)
|
||||||
|
newRec.Set("token", token)
|
||||||
|
return h.Save(newRec)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disable universal tokens (both ephemeral and permanent)
|
||||||
|
if enable == "0" {
|
||||||
|
tokenMap.RemovebyValue(userID)
|
||||||
|
_ = deletePermanent()
|
||||||
|
return e.JSON(http.StatusOK, map[string]any{"token": token, "active": false, "permanent": false})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enable universal token (ephemeral or permanent)
|
||||||
|
if enable == "1" {
|
||||||
|
if token == "" {
|
||||||
|
token = uuid.New().String()
|
||||||
|
}
|
||||||
|
|
||||||
|
if permanent == "1" {
|
||||||
|
// make token permanent (persist across restarts)
|
||||||
|
tokenMap.RemovebyValue(userID)
|
||||||
|
if err := upsertPermanent(token); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return e.JSON(http.StatusOK, map[string]any{"token": token, "active": true, "permanent": true})
|
||||||
|
}
|
||||||
|
|
||||||
|
// default: ephemeral mode (1 hour)
|
||||||
|
_ = deletePermanent()
|
||||||
|
tokenMap.Set(token, userID, time.Hour)
|
||||||
|
return e.JSON(http.StatusOK, map[string]any{"token": token, "active": true, "permanent": false})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read current state
|
||||||
|
// Prefer permanent token if it exists.
|
||||||
|
if rec, err := h.FindFirstRecordByFilter("universal_tokens", "user = {:user}", dbx.Params{"user": userID}); err == nil {
|
||||||
|
dbToken := rec.GetString("token")
|
||||||
|
// If no token was provided, or the caller is asking about their permanent token, return it.
|
||||||
|
if token == "" || token == dbToken {
|
||||||
|
return e.JSON(http.StatusOK, map[string]any{"token": dbToken, "active": true, "permanent": true})
|
||||||
|
}
|
||||||
|
// Token doesn't match their permanent token (avoid leaking other info)
|
||||||
|
return e.JSON(http.StatusOK, map[string]any{"token": token, "active": false, "permanent": false})
|
||||||
|
}
|
||||||
|
|
||||||
|
// No permanent token; fall back to ephemeral token map.
|
||||||
|
if token == "" {
|
||||||
|
// return existing token if it exists
|
||||||
|
if token, _, ok := tokenMap.GetByValue(userID); ok {
|
||||||
|
return e.JSON(http.StatusOK, map[string]any{"token": token, "active": true, "permanent": false})
|
||||||
|
}
|
||||||
|
// if no token is provided, generate a new one
|
||||||
|
token = uuid.New().String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Token is considered active only if it belongs to the current user.
|
||||||
|
activeUser, ok := tokenMap.GetOk(token)
|
||||||
|
active := ok && activeUser == userID
|
||||||
|
response := map[string]any{"token": token, "active": active, "permanent": false}
|
||||||
|
return e.JSON(http.StatusOK, response)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getHeartbeatStatus returns current heartbeat configuration and whether it's enabled
|
||||||
|
func (h *Hub) getHeartbeatStatus(e *core.RequestEvent) error {
|
||||||
|
if h.hb == nil {
|
||||||
|
return e.JSON(http.StatusOK, map[string]any{
|
||||||
|
"enabled": false,
|
||||||
|
"msg": "Set HEARTBEAT_URL to enable outbound heartbeat monitoring",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
cfg := h.hb.GetConfig()
|
||||||
|
return e.JSON(http.StatusOK, map[string]any{
|
||||||
|
"enabled": true,
|
||||||
|
"url": cfg.URL,
|
||||||
|
"interval": cfg.Interval,
|
||||||
|
"method": cfg.Method,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// testHeartbeat triggers a single heartbeat ping and returns the result
|
||||||
|
func (h *Hub) testHeartbeat(e *core.RequestEvent) error {
|
||||||
|
if h.hb == nil {
|
||||||
|
return e.JSON(http.StatusOK, map[string]any{
|
||||||
|
"err": "Heartbeat not configured. Set HEARTBEAT_URL environment variable.",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err := h.hb.Send(); err != nil {
|
||||||
|
return e.JSON(http.StatusOK, map[string]any{"err": err.Error()})
|
||||||
|
}
|
||||||
|
return e.JSON(http.StatusOK, map[string]any{"err": false})
|
||||||
|
}
|
||||||
|
|
||||||
|
// containerRequestHandler handles both container logs and info requests
|
||||||
|
func (h *Hub) containerRequestHandler(e *core.RequestEvent, fetchFunc func(*systems.System, string) (string, error), responseKey string) error {
|
||||||
|
systemID := e.Request.URL.Query().Get("system")
|
||||||
|
containerID := e.Request.URL.Query().Get("container")
|
||||||
|
|
||||||
|
if systemID == "" || containerID == "" || !containerIDPattern.MatchString(containerID) {
|
||||||
|
return e.BadRequestError("Invalid system or container parameter", nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
system, err := h.sm.GetSystem(systemID)
|
||||||
|
if err != nil || !system.HasUser(e.App, e.Auth) {
|
||||||
|
return e.NotFoundError("", nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := fetchFunc(system, containerID)
|
||||||
|
if err != nil {
|
||||||
|
return e.InternalServerError("", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return e.JSON(http.StatusOK, map[string]string{responseKey: data})
|
||||||
|
}
|
||||||
|
|
||||||
|
// getContainerLogs handles GET /api/beszel/containers/logs requests
|
||||||
|
func (h *Hub) getContainerLogs(e *core.RequestEvent) error {
|
||||||
|
return h.containerRequestHandler(e, func(system *systems.System, containerID string) (string, error) {
|
||||||
|
return system.FetchContainerLogsFromAgent(containerID)
|
||||||
|
}, "logs")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Hub) getContainerInfo(e *core.RequestEvent) error {
|
||||||
|
return h.containerRequestHandler(e, func(system *systems.System, containerID string) (string, error) {
|
||||||
|
return system.FetchContainerInfoFromAgent(containerID)
|
||||||
|
}, "info")
|
||||||
|
}
|
||||||
|
|
||||||
|
// getSystemdInfo handles GET /api/beszel/systemd/info requests
|
||||||
|
func (h *Hub) getSystemdInfo(e *core.RequestEvent) error {
|
||||||
|
query := e.Request.URL.Query()
|
||||||
|
systemID := query.Get("system")
|
||||||
|
serviceName := query.Get("service")
|
||||||
|
|
||||||
|
if systemID == "" || serviceName == "" {
|
||||||
|
return e.BadRequestError("Invalid system or service parameter", nil)
|
||||||
|
}
|
||||||
|
system, err := h.sm.GetSystem(systemID)
|
||||||
|
if err != nil || !system.HasUser(e.App, e.Auth) {
|
||||||
|
return e.NotFoundError("", nil)
|
||||||
|
}
|
||||||
|
// verify service exists before fetching details
|
||||||
|
_, err = e.App.FindFirstRecordByFilter("systemd_services", "system = {:system} && name = {:name}", dbx.Params{
|
||||||
|
"system": systemID,
|
||||||
|
"name": serviceName,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return e.NotFoundError("", err)
|
||||||
|
}
|
||||||
|
details, err := system.FetchSystemdInfoFromAgent(serviceName)
|
||||||
|
if err != nil {
|
||||||
|
return e.InternalServerError("", err)
|
||||||
|
}
|
||||||
|
e.Response.Header().Set("Cache-Control", "public, max-age=60")
|
||||||
|
return e.JSON(http.StatusOK, map[string]any{"details": details})
|
||||||
|
}
|
||||||
|
|
||||||
|
// refreshSmartData handles POST /api/beszel/smart/refresh requests
|
||||||
|
// Fetches fresh SMART data from the agent and updates the collection
|
||||||
|
func (h *Hub) refreshSmartData(e *core.RequestEvent) error {
|
||||||
|
systemID := e.Request.URL.Query().Get("system")
|
||||||
|
if systemID == "" {
|
||||||
|
return e.BadRequestError("Invalid system parameter", nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
system, err := h.sm.GetSystem(systemID)
|
||||||
|
if err != nil || !system.HasUser(e.App, e.Auth) {
|
||||||
|
return e.NotFoundError("", nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := system.FetchAndSaveSmartDevices(); err != nil {
|
||||||
|
return e.InternalServerError("", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return e.JSON(http.StatusOK, map[string]string{"status": "ok"})
|
||||||
|
}
|
||||||
972
internal/hub/api_test.go
Normal file
972
internal/hub/api_test.go
Normal file
@@ -0,0 +1,972 @@
|
|||||||
|
package hub_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
beszelTests "github.com/henrygd/beszel/internal/tests"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/migrations"
|
||||||
|
"github.com/pocketbase/pocketbase/core"
|
||||||
|
pbTests "github.com/pocketbase/pocketbase/tests"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// marshal to json and return an io.Reader (for use in ApiScenario.Body)
|
||||||
|
func jsonReader(v any) io.Reader {
|
||||||
|
data, err := json.Marshal(v)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return bytes.NewReader(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestApiRoutesAuthentication(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
userToken, err := user.NewAuthToken()
|
||||||
|
require.NoError(t, err, "Failed to create auth token")
|
||||||
|
|
||||||
|
// Create test user and get auth token
|
||||||
|
user2, err := beszelTests.CreateUser(hub, "testuser@example.com", "password123")
|
||||||
|
require.NoError(t, err, "Failed to create test user")
|
||||||
|
user2Token, err := user2.NewAuthToken()
|
||||||
|
require.NoError(t, err, "Failed to create user2 auth token")
|
||||||
|
|
||||||
|
adminUser, err := beszelTests.CreateUserWithRole(hub, "admin@example.com", "password123", "admin")
|
||||||
|
require.NoError(t, err, "Failed to create admin user")
|
||||||
|
adminUserToken, err := adminUser.NewAuthToken()
|
||||||
|
|
||||||
|
readOnlyUser, err := beszelTests.CreateUserWithRole(hub, "readonly@example.com", "password123", "readonly")
|
||||||
|
require.NoError(t, err, "Failed to create readonly user")
|
||||||
|
readOnlyUserToken, err := readOnlyUser.NewAuthToken()
|
||||||
|
require.NoError(t, err, "Failed to create readonly user auth token")
|
||||||
|
|
||||||
|
superuser, err := beszelTests.CreateSuperuser(hub, "superuser@example.com", "password123")
|
||||||
|
require.NoError(t, err, "Failed to create superuser")
|
||||||
|
superuserToken, err := superuser.NewAuthToken()
|
||||||
|
require.NoError(t, err, "Failed to create superuser auth token")
|
||||||
|
|
||||||
|
// Create test system
|
||||||
|
system, err := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||||
|
"name": "test-system",
|
||||||
|
"users": []string{user.Id},
|
||||||
|
"host": "127.0.0.1",
|
||||||
|
})
|
||||||
|
require.NoError(t, err, "Failed to create test system")
|
||||||
|
|
||||||
|
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
||||||
|
return hub.TestApp
|
||||||
|
}
|
||||||
|
|
||||||
|
scenarios := []beszelTests.ApiScenario{
|
||||||
|
// Auth Protected Routes - Should require authentication
|
||||||
|
{
|
||||||
|
Name: "GET /config-yaml - no auth should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/config-yaml",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /config-yaml - with user auth should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/config-yaml",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 403,
|
||||||
|
ExpectedContent: []string{"The authorized record is not allowed to perform this action."},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /config-yaml - with admin auth should succeed",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/config-yaml",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": adminUserToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"test-system"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /heartbeat-status - no auth should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/heartbeat-status",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /heartbeat-status - with user auth should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/heartbeat-status",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 403,
|
||||||
|
ExpectedContent: []string{"The authorized record is not allowed to perform this action."},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /heartbeat-status - with admin auth should succeed",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/heartbeat-status",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": adminUserToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{`"enabled":false`},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /test-heartbeat - with user auth should fail",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/test-heartbeat",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 403,
|
||||||
|
ExpectedContent: []string{"The authorized record is not allowed to perform this action."},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /test-heartbeat - with admin auth should report disabled state",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/test-heartbeat",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": adminUserToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"Heartbeat not configured"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /universal-token - no auth should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/universal-token",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /universal-token - with auth should succeed",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/universal-token",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"active", "token", "permanent"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /universal-token - enable permanent should succeed",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/universal-token?enable=1&permanent=1&token=permanent-token-123",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"permanent\":true", "permanent-token-123"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /universal-token - superuser should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/universal-token",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": superuserToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 403,
|
||||||
|
ExpectedContent: []string{"Superusers cannot use universal tokens"},
|
||||||
|
TestAppFactory: func(t testing.TB) *pbTests.TestApp {
|
||||||
|
return hub.TestApp
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /universal-token - with readonly auth should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/universal-token",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": readOnlyUserToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 403,
|
||||||
|
ExpectedContent: []string{"The authorized record is not allowed to perform this action."},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /smart/refresh - missing system should fail 400 with user auth",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/smart/refresh",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 400,
|
||||||
|
ExpectedContent: []string{"Invalid", "system", "parameter"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /smart/refresh - with readonly auth should fail",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: fmt.Sprintf("/api/beszel/smart/refresh?system=%s", system.Id),
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": readOnlyUserToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 403,
|
||||||
|
ExpectedContent: []string{"The authorized record is not allowed to perform this action."},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /smart/refresh - non-user system should fail",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: fmt.Sprintf("/api/beszel/smart/refresh?system=%s", system.Id),
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user2Token,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 404,
|
||||||
|
ExpectedContent: []string{"The requested resource wasn't found."},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /smart/refresh - good user should pass validation",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: fmt.Sprintf("/api/beszel/smart/refresh?system=%s", system.Id),
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 500,
|
||||||
|
ExpectedContent: []string{"Something went wrong while processing your request."},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /user-alerts - no auth should fail",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/user-alerts",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"value": 80,
|
||||||
|
"min": 10,
|
||||||
|
"systems": []string{system.Id},
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /user-alerts - with auth should succeed",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/user-alerts",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"success\":true"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"value": 80,
|
||||||
|
"min": 10,
|
||||||
|
"systems": []string{system.Id},
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "DELETE /user-alerts - no auth should fail",
|
||||||
|
Method: http.MethodDelete,
|
||||||
|
URL: "/api/beszel/user-alerts",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"systems": []string{system.Id},
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "DELETE /user-alerts - with auth should succeed",
|
||||||
|
Method: http.MethodDelete,
|
||||||
|
URL: "/api/beszel/user-alerts",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"success\":true"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"systems": []string{system.Id},
|
||||||
|
}),
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
// Create an alert to delete
|
||||||
|
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"system": system.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"value": 80,
|
||||||
|
"min": 10,
|
||||||
|
})
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /containers/logs - no auth should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/containers/logs?system=test-system&container=abababababab",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /containers/logs - request for valid non-user system should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: fmt.Sprintf("/api/beszel/containers/logs?system=%s&container=abababababab", system.Id),
|
||||||
|
ExpectedStatus: 404,
|
||||||
|
ExpectedContent: []string{"The requested resource wasn't found."},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user2Token,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /containers/info - request for valid non-user system should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: fmt.Sprintf("/api/beszel/containers/info?system=%s&container=abababababab", system.Id),
|
||||||
|
ExpectedStatus: 404,
|
||||||
|
ExpectedContent: []string{"The requested resource wasn't found."},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user2Token,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /containers/info - SHARE_ALL_SYSTEMS allows non-member user",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: fmt.Sprintf("/api/beszel/containers/info?system=%s&container=abababababab", system.Id),
|
||||||
|
ExpectedStatus: 500,
|
||||||
|
ExpectedContent: []string{"Something went wrong while processing your request."},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user2Token,
|
||||||
|
},
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
t.Setenv("SHARE_ALL_SYSTEMS", "true")
|
||||||
|
},
|
||||||
|
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||||
|
t.Setenv("SHARE_ALL_SYSTEMS", "")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /containers/logs - with auth but missing system param should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/containers/logs?container=abababababab",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 400,
|
||||||
|
ExpectedContent: []string{"Invalid", "parameter"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /containers/logs - with auth but missing container param should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/containers/logs?system=test-system",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 400,
|
||||||
|
ExpectedContent: []string{"Invalid", "parameter"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /containers/logs - with auth but invalid system should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/containers/logs?system=invalid-system&container=0123456789ab",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 404,
|
||||||
|
ExpectedContent: []string{"The requested resource wasn't found."},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /containers/logs - traversal container should fail validation",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/containers/logs?system=" + system.Id + "&container=..%2F..%2Fversion",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 400,
|
||||||
|
ExpectedContent: []string{"Invalid", "parameter"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /containers/info - traversal container should fail validation",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/containers/info?system=" + system.Id + "&container=../../version?x=",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 400,
|
||||||
|
ExpectedContent: []string{"Invalid", "parameter"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /containers/info - non-hex container should fail validation",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/containers/info?system=" + system.Id + "&container=container_name",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 400,
|
||||||
|
ExpectedContent: []string{"Invalid", "parameter"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /containers/logs - good user should pass validation",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/containers/logs?system=" + system.Id + "&container=0123456789ab",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 500,
|
||||||
|
ExpectedContent: []string{"Something went wrong while processing your request."},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /containers/info - good user should pass validation",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/containers/info?system=" + system.Id + "&container=0123456789ab",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 500,
|
||||||
|
ExpectedContent: []string{"Something went wrong while processing your request."},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
// /systemd routes
|
||||||
|
{
|
||||||
|
Name: "GET /systemd/info - no auth should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: fmt.Sprintf("/api/beszel/systemd/info?system=%s&service=nginx.service", system.Id),
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /systemd/info - request for valid non-user system should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: fmt.Sprintf("/api/beszel/systemd/info?system=%s&service=nginx.service", system.Id),
|
||||||
|
ExpectedStatus: 404,
|
||||||
|
ExpectedContent: []string{"The requested resource wasn't found."},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user2Token,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /systemd/info - with auth but missing system param should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/systemd/info?service=nginx.service",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 400,
|
||||||
|
ExpectedContent: []string{"Invalid", "parameter"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /systemd/info - with auth but missing service param should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: fmt.Sprintf("/api/beszel/systemd/info?system=%s", system.Id),
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 400,
|
||||||
|
ExpectedContent: []string{"Invalid", "parameter"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /systemd/info - with auth but invalid system should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/systemd/info?system=invalid-system&service=nginx.service",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 404,
|
||||||
|
ExpectedContent: []string{"The requested resource wasn't found."},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /systemd/info - service not in systemd_services collection should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: fmt.Sprintf("/api/beszel/systemd/info?system=%s&service=notregistered.service", system.Id),
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 404,
|
||||||
|
ExpectedContent: []string{"The requested resource wasn't found."},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /systemd/info - with auth and existing service record should pass validation",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: fmt.Sprintf("/api/beszel/systemd/info?system=%s&service=nginx.service", system.Id),
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 500,
|
||||||
|
ExpectedContent: []string{"Something went wrong while processing your request."},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
beszelTests.CreateRecord(app, "systemd_services", map[string]any{
|
||||||
|
"system": system.Id,
|
||||||
|
"name": "nginx.service",
|
||||||
|
"state": 0,
|
||||||
|
"sub": 1,
|
||||||
|
})
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
// Auth Optional Routes - Should work without authentication
|
||||||
|
{
|
||||||
|
Name: "GET /getkey - no auth should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/getkey",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /getkey - with auth should also succeed",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/getkey",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"key\":", "\"v\":"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /info - should return the same as /getkey",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/info",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"key\":", "\"v\":"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /first-run - no auth should succeed",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/first-run",
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"firstRun\":false"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /first-run - with auth should also succeed",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/first-run",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"firstRun\":false"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /agent-connect - no auth should succeed (websocket upgrade fails but route is accessible)",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/agent-connect",
|
||||||
|
ExpectedStatus: 400,
|
||||||
|
ExpectedContent: []string{},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /test-notification - invalid auth token should fail",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/test-notification",
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"url": "generic://127.0.0.1",
|
||||||
|
}),
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": "invalid-token",
|
||||||
|
},
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /user-alerts - invalid auth token should fail",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/user-alerts",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": "invalid-token",
|
||||||
|
},
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"value": 80,
|
||||||
|
"min": 10,
|
||||||
|
"systems": []string{system.Id},
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
// this works but diff behavior on prod vs dev.
|
||||||
|
// dev returns 502; prod returns 200 with static html page 404
|
||||||
|
// TODO: align dev and prod behavior and re-enable this test
|
||||||
|
// {
|
||||||
|
// Name: "GET /update - shouldn't exist without CHECK_UPDATES env var",
|
||||||
|
// Method: http.MethodGet,
|
||||||
|
// URL: "/api/beszel/update",
|
||||||
|
// NotExpectedContent: []string{"v:", "\"v\":"},
|
||||||
|
// ExpectedStatus: 502,
|
||||||
|
// TestAppFactory: testAppFactory,
|
||||||
|
// },
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, scenario := range scenarios {
|
||||||
|
scenario.Test(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFirstUserCreation(t *testing.T) {
|
||||||
|
t.Run("CreateUserEndpoint available when no users exist", func(t *testing.T) {
|
||||||
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
hub.StartHub()
|
||||||
|
|
||||||
|
testAppFactoryExisting := func(t testing.TB) *pbTests.TestApp {
|
||||||
|
return hub.TestApp
|
||||||
|
}
|
||||||
|
|
||||||
|
scenarios := []beszelTests.ApiScenario{
|
||||||
|
{
|
||||||
|
Name: "POST /create-user - should be available when no users exist",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/create-user",
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"email": "firstuser@example.com",
|
||||||
|
"password": "password123",
|
||||||
|
}),
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"User created"},
|
||||||
|
TestAppFactory: testAppFactoryExisting,
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
userCount, err := hub.CountRecords("users")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Zero(t, userCount, "Should start with no users")
|
||||||
|
superusers, err := hub.FindAllRecords(core.CollectionNameSuperusers)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.EqualValues(t, 1, len(superusers), "Should start with one temporary superuser")
|
||||||
|
require.EqualValues(t, migrations.TempAdminEmail, superusers[0].GetString("email"), "Should have created one temporary superuser")
|
||||||
|
},
|
||||||
|
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||||
|
userCount, err := hub.CountRecords("users")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.EqualValues(t, 1, userCount, "Should have created one user")
|
||||||
|
superusers, err := hub.FindAllRecords(core.CollectionNameSuperusers)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.EqualValues(t, 1, len(superusers), "Should have created one superuser")
|
||||||
|
require.EqualValues(t, "firstuser@example.com", superusers[0].GetString("email"), "Should have created one superuser")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /create-user - should not be available when users exist",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/create-user",
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"email": "firstuser@example.com",
|
||||||
|
"password": "password123",
|
||||||
|
}),
|
||||||
|
ExpectedStatus: 404,
|
||||||
|
ExpectedContent: []string{"wasn't found"},
|
||||||
|
TestAppFactory: testAppFactoryExisting,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, scenario := range scenarios {
|
||||||
|
scenario.Test(t)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("CreateUserEndpoint not available when USER_EMAIL, USER_PASSWORD are set", func(t *testing.T) {
|
||||||
|
t.Setenv("BESZEL_HUB_USER_EMAIL", "me@example.com")
|
||||||
|
t.Setenv("BESZEL_HUB_USER_PASSWORD", "password123")
|
||||||
|
|
||||||
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
hub.StartHub()
|
||||||
|
|
||||||
|
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
||||||
|
return hub.TestApp
|
||||||
|
}
|
||||||
|
|
||||||
|
scenario := beszelTests.ApiScenario{
|
||||||
|
Name: "POST /create-user - should not be available when USER_EMAIL, USER_PASSWORD are set",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/create-user",
|
||||||
|
ExpectedStatus: 404,
|
||||||
|
ExpectedContent: []string{"wasn't found"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
users, err := hub.FindAllRecords("users")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.EqualValues(t, 1, len(users), "Should start with one user")
|
||||||
|
require.EqualValues(t, "me@example.com", users[0].GetString("email"), "Should have created one user")
|
||||||
|
superusers, err := hub.FindAllRecords(core.CollectionNameSuperusers)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.EqualValues(t, 1, len(superusers), "Should start with one superuser")
|
||||||
|
require.EqualValues(t, "me@example.com", superusers[0].GetString("email"), "Should have created one superuser")
|
||||||
|
},
|
||||||
|
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||||
|
users, err := hub.FindAllRecords("users")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.EqualValues(t, 1, len(users), "Should still have one user")
|
||||||
|
require.EqualValues(t, "me@example.com", users[0].GetString("email"), "Should have created one user")
|
||||||
|
superusers, err := hub.FindAllRecords(core.CollectionNameSuperusers)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.EqualValues(t, 1, len(superusers), "Should still have one superuser")
|
||||||
|
require.EqualValues(t, "me@example.com", superusers[0].GetString("email"), "Should have created one superuser")
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
scenario.Test(t)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCreateUserEndpointAvailability(t *testing.T) {
|
||||||
|
t.Run("CreateUserEndpoint available when no users exist", func(t *testing.T) {
|
||||||
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
// Ensure no users exist
|
||||||
|
userCount, err := hub.CountRecords("users")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Zero(t, userCount, "Should start with no users")
|
||||||
|
|
||||||
|
hub.StartHub()
|
||||||
|
|
||||||
|
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
||||||
|
return hub.TestApp
|
||||||
|
}
|
||||||
|
|
||||||
|
scenario := beszelTests.ApiScenario{
|
||||||
|
Name: "POST /create-user - should be available when no users exist",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/create-user",
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"email": "firstuser@example.com",
|
||||||
|
"password": "password123",
|
||||||
|
}),
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"User created"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
}
|
||||||
|
|
||||||
|
scenario.Test(t)
|
||||||
|
|
||||||
|
// Verify user was created
|
||||||
|
userCount, err = hub.CountRecords("users")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.EqualValues(t, 1, userCount, "Should have created one user")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("CreateUserEndpoint not available when users exist", func(t *testing.T) {
|
||||||
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
// Create a user first
|
||||||
|
_, err := beszelTests.CreateUser(hub, "existing@example.com", "password")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
hub.StartHub()
|
||||||
|
|
||||||
|
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
||||||
|
return hub.TestApp
|
||||||
|
}
|
||||||
|
|
||||||
|
scenario := beszelTests.ApiScenario{
|
||||||
|
Name: "POST /create-user - should not be available when users exist",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/create-user",
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"email": "another@example.com",
|
||||||
|
"password": "password123",
|
||||||
|
}),
|
||||||
|
ExpectedStatus: 404,
|
||||||
|
ExpectedContent: []string{"wasn't found"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
}
|
||||||
|
|
||||||
|
scenario.Test(t)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAutoLoginMiddleware(t *testing.T) {
|
||||||
|
var hubs []*beszelTests.TestHub
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
for _, hub := range hubs {
|
||||||
|
hub.Cleanup()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
t.Setenv("AUTO_LOGIN", "user@test.com")
|
||||||
|
|
||||||
|
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
||||||
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
hubs = append(hubs, hub)
|
||||||
|
hub.StartHub()
|
||||||
|
return hub.TestApp
|
||||||
|
}
|
||||||
|
|
||||||
|
scenarios := []beszelTests.ApiScenario{
|
||||||
|
{
|
||||||
|
Name: "GET /getkey - without auto login should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/getkey",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /getkey - with auto login should fail if no matching user",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/getkey",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /getkey - with auto login should succeed",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/getkey",
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"key\":", "\"v\":"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
beszelTests.CreateUser(app, "user@test.com", "password123")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, scenario := range scenarios {
|
||||||
|
scenario.Test(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTrustedHeaderMiddleware(t *testing.T) {
|
||||||
|
var hubs []*beszelTests.TestHub
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
for _, hub := range hubs {
|
||||||
|
hub.Cleanup()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
t.Setenv("TRUSTED_AUTH_HEADER", "X-Beszel-Trusted")
|
||||||
|
|
||||||
|
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
||||||
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
hubs = append(hubs, hub)
|
||||||
|
hub.StartHub()
|
||||||
|
return hub.TestApp
|
||||||
|
}
|
||||||
|
|
||||||
|
scenarios := []beszelTests.ApiScenario{
|
||||||
|
{
|
||||||
|
Name: "GET /getkey - without trusted header should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/getkey",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /getkey - with trusted header should fail if no matching user",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/getkey",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"X-Beszel-Trusted": "user@test.com",
|
||||||
|
},
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /getkey - with trusted header should succeed",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/getkey",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"X-Beszel-Trusted": "user@test.com",
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"key\":", "\"v\":"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
beszelTests.CreateUser(app, "user@test.com", "password123")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, scenario := range scenarios {
|
||||||
|
scenario.Test(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUpdateEndpoint(t *testing.T) {
|
||||||
|
t.Setenv("CHECK_UPDATES", "true")
|
||||||
|
|
||||||
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
defer hub.Cleanup()
|
||||||
|
hub.StartHub()
|
||||||
|
|
||||||
|
// Create test user and get auth token
|
||||||
|
// user, err := beszelTests.CreateUser(hub, "testuser@example.com", "password123")
|
||||||
|
// require.NoError(t, err, "Failed to create test user")
|
||||||
|
// userToken, err := user.NewAuthToken()
|
||||||
|
|
||||||
|
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
||||||
|
return hub.TestApp
|
||||||
|
}
|
||||||
|
|
||||||
|
scenarios := []beszelTests.ApiScenario{
|
||||||
|
{
|
||||||
|
Name: "update endpoint shouldn't work without auth",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/update",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
// leave this out for now since it actually makes a request to github
|
||||||
|
// {
|
||||||
|
// Name: "GET /update - with valid auth should succeed",
|
||||||
|
// Method: http.MethodGet,
|
||||||
|
// URL: "/api/beszel/update",
|
||||||
|
// Headers: map[string]string{
|
||||||
|
// "Authorization": userToken,
|
||||||
|
// },
|
||||||
|
// ExpectedStatus: 200,
|
||||||
|
// ExpectedContent: []string{`"v":`},
|
||||||
|
// TestAppFactory: testAppFactory,
|
||||||
|
// },
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, scenario := range scenarios {
|
||||||
|
scenario.Test(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,6 +1,9 @@
|
|||||||
package hub
|
package hub
|
||||||
|
|
||||||
import "github.com/pocketbase/pocketbase/core"
|
import (
|
||||||
|
"github.com/henrygd/beszel/internal/hub/utils"
|
||||||
|
"github.com/pocketbase/pocketbase/core"
|
||||||
|
)
|
||||||
|
|
||||||
type collectionRules struct {
|
type collectionRules struct {
|
||||||
list *string
|
list *string
|
||||||
@@ -22,11 +25,11 @@ func setCollectionAuthSettings(app core.App) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// disable email auth if DISABLE_PASSWORD_AUTH env var is set
|
// disable email auth if DISABLE_PASSWORD_AUTH env var is set
|
||||||
disablePasswordAuth, _ := GetEnv("DISABLE_PASSWORD_AUTH")
|
disablePasswordAuth, _ := utils.GetEnv("DISABLE_PASSWORD_AUTH")
|
||||||
usersCollection.PasswordAuth.Enabled = disablePasswordAuth != "true"
|
usersCollection.PasswordAuth.Enabled = disablePasswordAuth != "true"
|
||||||
usersCollection.PasswordAuth.IdentityFields = []string{"email"}
|
usersCollection.PasswordAuth.IdentityFields = []string{"email"}
|
||||||
// allow oauth user creation if USER_CREATION is set
|
// allow oauth user creation if USER_CREATION is set
|
||||||
if userCreation, _ := GetEnv("USER_CREATION"); userCreation == "true" {
|
if userCreation, _ := utils.GetEnv("USER_CREATION"); userCreation == "true" {
|
||||||
cr := "@request.context = 'oauth2'"
|
cr := "@request.context = 'oauth2'"
|
||||||
usersCollection.CreateRule = &cr
|
usersCollection.CreateRule = &cr
|
||||||
} else {
|
} else {
|
||||||
@@ -34,7 +37,7 @@ func setCollectionAuthSettings(app core.App) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// enable mfaOtp mfa if MFA_OTP env var is set
|
// enable mfaOtp mfa if MFA_OTP env var is set
|
||||||
mfaOtp, _ := GetEnv("MFA_OTP")
|
mfaOtp, _ := utils.GetEnv("MFA_OTP")
|
||||||
usersCollection.OTP.Length = 6
|
usersCollection.OTP.Length = 6
|
||||||
superusersCollection.OTP.Length = 6
|
superusersCollection.OTP.Length = 6
|
||||||
usersCollection.OTP.Enabled = mfaOtp == "true"
|
usersCollection.OTP.Enabled = mfaOtp == "true"
|
||||||
@@ -50,7 +53,7 @@ func setCollectionAuthSettings(app core.App) error {
|
|||||||
|
|
||||||
// When SHARE_ALL_SYSTEMS is enabled, any authenticated user can read
|
// When SHARE_ALL_SYSTEMS is enabled, any authenticated user can read
|
||||||
// system-scoped data. Write rules continue to block readonly users.
|
// system-scoped data. Write rules continue to block readonly users.
|
||||||
shareAllSystems, _ := GetEnv("SHARE_ALL_SYSTEMS")
|
shareAllSystems, _ := utils.GetEnv("SHARE_ALL_SYSTEMS")
|
||||||
|
|
||||||
authenticatedRule := "@request.auth.id != \"\""
|
authenticatedRule := "@request.auth.id != \"\""
|
||||||
systemsMemberRule := authenticatedRule + " && users.id ?= @request.auth.id"
|
systemsMemberRule := authenticatedRule + " && users.id ?= @request.auth.id"
|
||||||
|
|||||||
@@ -279,9 +279,6 @@ func createFingerprintRecord(app core.App, systemID, token string) error {
|
|||||||
|
|
||||||
// Returns the current config.yml file as a JSON object
|
// Returns the current config.yml file as a JSON object
|
||||||
func GetYamlConfig(e *core.RequestEvent) error {
|
func GetYamlConfig(e *core.RequestEvent) error {
|
||||||
if e.Auth.GetString("role") != "admin" {
|
|
||||||
return e.ForbiddenError("Requires admin role", nil)
|
|
||||||
}
|
|
||||||
configContent, err := generateYAML(e.App)
|
configContent, err := generateYAML(e.App)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
@@ -6,30 +6,25 @@ import (
|
|||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"regexp"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/henrygd/beszel"
|
|
||||||
"github.com/henrygd/beszel/internal/alerts"
|
"github.com/henrygd/beszel/internal/alerts"
|
||||||
"github.com/henrygd/beszel/internal/hub/config"
|
"github.com/henrygd/beszel/internal/hub/config"
|
||||||
"github.com/henrygd/beszel/internal/hub/heartbeat"
|
"github.com/henrygd/beszel/internal/hub/heartbeat"
|
||||||
"github.com/henrygd/beszel/internal/hub/systems"
|
"github.com/henrygd/beszel/internal/hub/systems"
|
||||||
|
"github.com/henrygd/beszel/internal/hub/utils"
|
||||||
"github.com/henrygd/beszel/internal/records"
|
"github.com/henrygd/beszel/internal/records"
|
||||||
"github.com/henrygd/beszel/internal/users"
|
"github.com/henrygd/beszel/internal/users"
|
||||||
|
|
||||||
"github.com/google/uuid"
|
|
||||||
"github.com/pocketbase/dbx"
|
|
||||||
"github.com/pocketbase/pocketbase"
|
"github.com/pocketbase/pocketbase"
|
||||||
"github.com/pocketbase/pocketbase/apis"
|
|
||||||
"github.com/pocketbase/pocketbase/core"
|
"github.com/pocketbase/pocketbase/core"
|
||||||
"golang.org/x/crypto/ssh"
|
"golang.org/x/crypto/ssh"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Hub is the application. It embeds the PocketBase app and keeps references to subcomponents.
|
||||||
type Hub struct {
|
type Hub struct {
|
||||||
core.App
|
core.App
|
||||||
*alerts.AlertManager
|
*alerts.AlertManager
|
||||||
@@ -43,31 +38,41 @@ type Hub struct {
|
|||||||
appURL string
|
appURL string
|
||||||
}
|
}
|
||||||
|
|
||||||
var containerIDPattern = regexp.MustCompile(`^[a-fA-F0-9]{12,64}$`)
|
|
||||||
|
|
||||||
// NewHub creates a new Hub instance with default configuration
|
// NewHub creates a new Hub instance with default configuration
|
||||||
func NewHub(app core.App) (*Hub, error) {
|
func NewHub(app core.App) *Hub {
|
||||||
hub := &Hub{App: app}
|
hub := &Hub{App: app}
|
||||||
hub.AlertManager = alerts.NewAlertManager(hub)
|
hub.AlertManager = alerts.NewAlertManager(hub)
|
||||||
hub.um = users.NewUserManager(hub)
|
hub.um = users.NewUserManager(hub)
|
||||||
hub.rm = records.NewRecordManager(hub)
|
hub.rm = records.NewRecordManager(hub)
|
||||||
hub.sm = systems.NewSystemManager(hub)
|
hub.sm = systems.NewSystemManager(hub)
|
||||||
hub.hb = heartbeat.New(app, GetEnv)
|
hub.hb = heartbeat.New(app, utils.GetEnv)
|
||||||
if hub.hb != nil {
|
if hub.hb != nil {
|
||||||
hub.hbStop = make(chan struct{})
|
hub.hbStop = make(chan struct{})
|
||||||
}
|
}
|
||||||
return hub, initialize(hub)
|
_ = onAfterBootstrapAndMigrations(app, hub.initialize)
|
||||||
|
return hub
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetEnv retrieves an environment variable with a "BESZEL_HUB_" prefix, or falls back to the unprefixed key.
|
// onAfterBootstrapAndMigrations ensures the provided function runs after the database is set up and migrations are applied.
|
||||||
func GetEnv(key string) (value string, exists bool) {
|
// This is a workaround for behavior in PocketBase where onBootstrap runs before migrations, forcing use of onServe for this purpose.
|
||||||
if value, exists = os.LookupEnv("BESZEL_HUB_" + key); exists {
|
// However, PB's tests.TestApp is already bootstrapped, generally doesn't serve, but does handle migrations.
|
||||||
return value, exists
|
// So this ensures that the provided function runs at the right time either way, after DB is ready and migrations are done.
|
||||||
|
func onAfterBootstrapAndMigrations(app core.App, fn func(app core.App) error) error {
|
||||||
|
// pb tests.TestApp is already bootstrapped and doesn't serve
|
||||||
|
if app.IsBootstrapped() {
|
||||||
|
return fn(app)
|
||||||
}
|
}
|
||||||
// Fallback to the old unprefixed key
|
// Must use OnServe because OnBootstrap appears to run before migrations, even if calling e.Next() before anything else
|
||||||
return os.LookupEnv(key)
|
app.OnServe().BindFunc(func(e *core.ServeEvent) error {
|
||||||
|
if err := fn(e.App); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return e.Next()
|
||||||
|
})
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// StartHub sets up event handlers and starts the PocketBase server
|
||||||
func (h *Hub) StartHub() error {
|
func (h *Hub) StartHub() error {
|
||||||
h.App.OnServe().BindFunc(func(e *core.ServeEvent) error {
|
h.App.OnServe().BindFunc(func(e *core.ServeEvent) error {
|
||||||
// sync systems with config
|
// sync systems with config
|
||||||
@@ -112,24 +117,21 @@ func (h *Hub) StartHub() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// initialize sets up initial configuration (collections, settings, etc.)
|
// initialize sets up initial configuration (collections, settings, etc.)
|
||||||
func initialize(hub *Hub) error {
|
func (h *Hub) initialize(app core.App) error {
|
||||||
if !hub.App.IsBootstrapped() {
|
|
||||||
hub.App.Bootstrap()
|
|
||||||
}
|
|
||||||
// set general settings
|
// set general settings
|
||||||
settings := hub.App.Settings()
|
settings := app.Settings()
|
||||||
// batch requests (for alerts)
|
// batch requests (for alerts)
|
||||||
settings.Batch.Enabled = true
|
settings.Batch.Enabled = true
|
||||||
// set URL if APP_URL env is set
|
// set URL if APP_URL env is set
|
||||||
if appURL, isSet := GetEnv("APP_URL"); isSet {
|
if appURL, isSet := utils.GetEnv("APP_URL"); isSet {
|
||||||
hub.appURL = appURL
|
h.appURL = appURL
|
||||||
settings.Meta.AppURL = hub.appURL
|
settings.Meta.AppURL = appURL
|
||||||
}
|
}
|
||||||
if err := hub.App.Save(settings); err != nil {
|
if err := app.Save(settings); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// set auth settings
|
// set auth settings
|
||||||
return setCollectionAuthSettings(hub.App)
|
return setCollectionAuthSettings(app)
|
||||||
}
|
}
|
||||||
|
|
||||||
// registerCronJobs sets up scheduled tasks
|
// registerCronJobs sets up scheduled tasks
|
||||||
@@ -141,296 +143,7 @@ func (h *Hub) registerCronJobs(_ *core.ServeEvent) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// custom middlewares
|
// GetSSHKey generates key pair if it doesn't exist and returns signer
|
||||||
func (h *Hub) registerMiddlewares(se *core.ServeEvent) {
|
|
||||||
// authorizes request with user matching the provided email
|
|
||||||
authorizeRequestWithEmail := func(e *core.RequestEvent, email string) (err error) {
|
|
||||||
if e.Auth != nil || email == "" {
|
|
||||||
return e.Next()
|
|
||||||
}
|
|
||||||
isAuthRefresh := e.Request.URL.Path == "/api/collections/users/auth-refresh" && e.Request.Method == http.MethodPost
|
|
||||||
e.Auth, err = e.App.FindFirstRecordByData("users", "email", email)
|
|
||||||
if err != nil || !isAuthRefresh {
|
|
||||||
return e.Next()
|
|
||||||
}
|
|
||||||
// auth refresh endpoint, make sure token is set in header
|
|
||||||
token, _ := e.Auth.NewAuthToken()
|
|
||||||
e.Request.Header.Set("Authorization", token)
|
|
||||||
return e.Next()
|
|
||||||
}
|
|
||||||
// authenticate with trusted header
|
|
||||||
if autoLogin, _ := GetEnv("AUTO_LOGIN"); autoLogin != "" {
|
|
||||||
se.Router.BindFunc(func(e *core.RequestEvent) error {
|
|
||||||
return authorizeRequestWithEmail(e, autoLogin)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
// authenticate with trusted header
|
|
||||||
if trustedHeader, _ := GetEnv("TRUSTED_AUTH_HEADER"); trustedHeader != "" {
|
|
||||||
se.Router.BindFunc(func(e *core.RequestEvent) error {
|
|
||||||
return authorizeRequestWithEmail(e, e.Request.Header.Get(trustedHeader))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// custom api routes
|
|
||||||
func (h *Hub) registerApiRoutes(se *core.ServeEvent) error {
|
|
||||||
// auth protected routes
|
|
||||||
apiAuth := se.Router.Group("/api/beszel")
|
|
||||||
apiAuth.Bind(apis.RequireAuth())
|
|
||||||
// auth optional routes
|
|
||||||
apiNoAuth := se.Router.Group("/api/beszel")
|
|
||||||
|
|
||||||
// create first user endpoint only needed if no users exist
|
|
||||||
if totalUsers, _ := se.App.CountRecords("users"); totalUsers == 0 {
|
|
||||||
apiNoAuth.POST("/create-user", h.um.CreateFirstUser)
|
|
||||||
}
|
|
||||||
// check if first time setup on login page
|
|
||||||
apiNoAuth.GET("/first-run", func(e *core.RequestEvent) error {
|
|
||||||
total, err := e.App.CountRecords("users")
|
|
||||||
return e.JSON(http.StatusOK, map[string]bool{"firstRun": err == nil && total == 0})
|
|
||||||
})
|
|
||||||
// get public key and version
|
|
||||||
apiAuth.GET("/getkey", func(e *core.RequestEvent) error {
|
|
||||||
return e.JSON(http.StatusOK, map[string]string{"key": h.pubKey, "v": beszel.Version})
|
|
||||||
})
|
|
||||||
// send test notification
|
|
||||||
apiAuth.POST("/test-notification", h.SendTestNotification)
|
|
||||||
// heartbeat status and test
|
|
||||||
apiAuth.GET("/heartbeat-status", h.getHeartbeatStatus)
|
|
||||||
apiAuth.POST("/test-heartbeat", h.testHeartbeat)
|
|
||||||
// get config.yml content
|
|
||||||
apiAuth.GET("/config-yaml", config.GetYamlConfig)
|
|
||||||
// handle agent websocket connection
|
|
||||||
apiNoAuth.GET("/agent-connect", h.handleAgentConnect)
|
|
||||||
// get or create universal tokens
|
|
||||||
apiAuth.GET("/universal-token", h.getUniversalToken)
|
|
||||||
// update / delete user alerts
|
|
||||||
apiAuth.POST("/user-alerts", alerts.UpsertUserAlerts)
|
|
||||||
apiAuth.DELETE("/user-alerts", alerts.DeleteUserAlerts)
|
|
||||||
// refresh SMART devices for a system
|
|
||||||
apiAuth.POST("/smart/refresh", h.refreshSmartData)
|
|
||||||
// get systemd service details
|
|
||||||
apiAuth.GET("/systemd/info", h.getSystemdInfo)
|
|
||||||
// /containers routes
|
|
||||||
if enabled, _ := GetEnv("CONTAINER_DETAILS"); enabled != "false" {
|
|
||||||
// get container logs
|
|
||||||
apiAuth.GET("/containers/logs", h.getContainerLogs)
|
|
||||||
// get container info
|
|
||||||
apiAuth.GET("/containers/info", h.getContainerInfo)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handler for universal token API endpoint (create, read, delete)
|
|
||||||
func (h *Hub) getUniversalToken(e *core.RequestEvent) error {
|
|
||||||
tokenMap := universalTokenMap.GetMap()
|
|
||||||
userID := e.Auth.Id
|
|
||||||
query := e.Request.URL.Query()
|
|
||||||
token := query.Get("token")
|
|
||||||
enable := query.Get("enable")
|
|
||||||
permanent := query.Get("permanent")
|
|
||||||
|
|
||||||
// helper for deleting any existing permanent token record for this user
|
|
||||||
deletePermanent := func() error {
|
|
||||||
rec, err := h.FindFirstRecordByFilter("universal_tokens", "user = {:user}", dbx.Params{"user": userID})
|
|
||||||
if err != nil {
|
|
||||||
return nil // no record
|
|
||||||
}
|
|
||||||
return h.Delete(rec)
|
|
||||||
}
|
|
||||||
|
|
||||||
// helper for upserting a permanent token record for this user
|
|
||||||
upsertPermanent := func(token string) error {
|
|
||||||
rec, err := h.FindFirstRecordByFilter("universal_tokens", "user = {:user}", dbx.Params{"user": userID})
|
|
||||||
if err == nil {
|
|
||||||
rec.Set("token", token)
|
|
||||||
return h.Save(rec)
|
|
||||||
}
|
|
||||||
|
|
||||||
col, err := h.FindCachedCollectionByNameOrId("universal_tokens")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
newRec := core.NewRecord(col)
|
|
||||||
newRec.Set("user", userID)
|
|
||||||
newRec.Set("token", token)
|
|
||||||
return h.Save(newRec)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Disable universal tokens (both ephemeral and permanent)
|
|
||||||
if enable == "0" {
|
|
||||||
tokenMap.RemovebyValue(userID)
|
|
||||||
_ = deletePermanent()
|
|
||||||
return e.JSON(http.StatusOK, map[string]any{"token": token, "active": false, "permanent": false})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Enable universal token (ephemeral or permanent)
|
|
||||||
if enable == "1" {
|
|
||||||
if token == "" {
|
|
||||||
token = uuid.New().String()
|
|
||||||
}
|
|
||||||
|
|
||||||
if permanent == "1" {
|
|
||||||
// make token permanent (persist across restarts)
|
|
||||||
tokenMap.RemovebyValue(userID)
|
|
||||||
if err := upsertPermanent(token); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return e.JSON(http.StatusOK, map[string]any{"token": token, "active": true, "permanent": true})
|
|
||||||
}
|
|
||||||
|
|
||||||
// default: ephemeral mode (1 hour)
|
|
||||||
_ = deletePermanent()
|
|
||||||
tokenMap.Set(token, userID, time.Hour)
|
|
||||||
return e.JSON(http.StatusOK, map[string]any{"token": token, "active": true, "permanent": false})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read current state
|
|
||||||
// Prefer permanent token if it exists.
|
|
||||||
if rec, err := h.FindFirstRecordByFilter("universal_tokens", "user = {:user}", dbx.Params{"user": userID}); err == nil {
|
|
||||||
dbToken := rec.GetString("token")
|
|
||||||
// If no token was provided, or the caller is asking about their permanent token, return it.
|
|
||||||
if token == "" || token == dbToken {
|
|
||||||
return e.JSON(http.StatusOK, map[string]any{"token": dbToken, "active": true, "permanent": true})
|
|
||||||
}
|
|
||||||
// Token doesn't match their permanent token (avoid leaking other info)
|
|
||||||
return e.JSON(http.StatusOK, map[string]any{"token": token, "active": false, "permanent": false})
|
|
||||||
}
|
|
||||||
|
|
||||||
// No permanent token; fall back to ephemeral token map.
|
|
||||||
if token == "" {
|
|
||||||
// return existing token if it exists
|
|
||||||
if token, _, ok := tokenMap.GetByValue(userID); ok {
|
|
||||||
return e.JSON(http.StatusOK, map[string]any{"token": token, "active": true, "permanent": false})
|
|
||||||
}
|
|
||||||
// if no token is provided, generate a new one
|
|
||||||
token = uuid.New().String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Token is considered active only if it belongs to the current user.
|
|
||||||
activeUser, ok := tokenMap.GetOk(token)
|
|
||||||
active := ok && activeUser == userID
|
|
||||||
response := map[string]any{"token": token, "active": active, "permanent": false}
|
|
||||||
return e.JSON(http.StatusOK, response)
|
|
||||||
}
|
|
||||||
|
|
||||||
// getHeartbeatStatus returns current heartbeat configuration and whether it's enabled
|
|
||||||
func (h *Hub) getHeartbeatStatus(e *core.RequestEvent) error {
|
|
||||||
if e.Auth.GetString("role") != "admin" {
|
|
||||||
return e.ForbiddenError("Requires admin role", nil)
|
|
||||||
}
|
|
||||||
if h.hb == nil {
|
|
||||||
return e.JSON(http.StatusOK, map[string]any{
|
|
||||||
"enabled": false,
|
|
||||||
"msg": "Set HEARTBEAT_URL to enable outbound heartbeat monitoring",
|
|
||||||
})
|
|
||||||
}
|
|
||||||
cfg := h.hb.GetConfig()
|
|
||||||
return e.JSON(http.StatusOK, map[string]any{
|
|
||||||
"enabled": true,
|
|
||||||
"url": cfg.URL,
|
|
||||||
"interval": cfg.Interval,
|
|
||||||
"method": cfg.Method,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// testHeartbeat triggers a single heartbeat ping and returns the result
|
|
||||||
func (h *Hub) testHeartbeat(e *core.RequestEvent) error {
|
|
||||||
if e.Auth.GetString("role") != "admin" {
|
|
||||||
return e.ForbiddenError("Requires admin role", nil)
|
|
||||||
}
|
|
||||||
if h.hb == nil {
|
|
||||||
return e.JSON(http.StatusOK, map[string]any{
|
|
||||||
"err": "Heartbeat not configured. Set HEARTBEAT_URL environment variable.",
|
|
||||||
})
|
|
||||||
}
|
|
||||||
if err := h.hb.Send(); err != nil {
|
|
||||||
return e.JSON(http.StatusOK, map[string]any{"err": err.Error()})
|
|
||||||
}
|
|
||||||
return e.JSON(http.StatusOK, map[string]any{"err": false})
|
|
||||||
}
|
|
||||||
|
|
||||||
// containerRequestHandler handles both container logs and info requests
|
|
||||||
func (h *Hub) containerRequestHandler(e *core.RequestEvent, fetchFunc func(*systems.System, string) (string, error), responseKey string) error {
|
|
||||||
systemID := e.Request.URL.Query().Get("system")
|
|
||||||
containerID := e.Request.URL.Query().Get("container")
|
|
||||||
|
|
||||||
if systemID == "" || containerID == "" {
|
|
||||||
return e.JSON(http.StatusBadRequest, map[string]string{"error": "system and container parameters are required"})
|
|
||||||
}
|
|
||||||
if !containerIDPattern.MatchString(containerID) {
|
|
||||||
return e.JSON(http.StatusBadRequest, map[string]string{"error": "invalid container parameter"})
|
|
||||||
}
|
|
||||||
|
|
||||||
system, err := h.sm.GetSystem(systemID)
|
|
||||||
if err != nil {
|
|
||||||
return e.JSON(http.StatusNotFound, map[string]string{"error": "system not found"})
|
|
||||||
}
|
|
||||||
|
|
||||||
data, err := fetchFunc(system, containerID)
|
|
||||||
if err != nil {
|
|
||||||
return e.JSON(http.StatusNotFound, map[string]string{"error": err.Error()})
|
|
||||||
}
|
|
||||||
|
|
||||||
return e.JSON(http.StatusOK, map[string]string{responseKey: data})
|
|
||||||
}
|
|
||||||
|
|
||||||
// getContainerLogs handles GET /api/beszel/containers/logs requests
|
|
||||||
func (h *Hub) getContainerLogs(e *core.RequestEvent) error {
|
|
||||||
return h.containerRequestHandler(e, func(system *systems.System, containerID string) (string, error) {
|
|
||||||
return system.FetchContainerLogsFromAgent(containerID)
|
|
||||||
}, "logs")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Hub) getContainerInfo(e *core.RequestEvent) error {
|
|
||||||
return h.containerRequestHandler(e, func(system *systems.System, containerID string) (string, error) {
|
|
||||||
return system.FetchContainerInfoFromAgent(containerID)
|
|
||||||
}, "info")
|
|
||||||
}
|
|
||||||
|
|
||||||
// getSystemdInfo handles GET /api/beszel/systemd/info requests
|
|
||||||
func (h *Hub) getSystemdInfo(e *core.RequestEvent) error {
|
|
||||||
query := e.Request.URL.Query()
|
|
||||||
systemID := query.Get("system")
|
|
||||||
serviceName := query.Get("service")
|
|
||||||
|
|
||||||
if systemID == "" || serviceName == "" {
|
|
||||||
return e.JSON(http.StatusBadRequest, map[string]string{"error": "system and service parameters are required"})
|
|
||||||
}
|
|
||||||
system, err := h.sm.GetSystem(systemID)
|
|
||||||
if err != nil {
|
|
||||||
return e.JSON(http.StatusNotFound, map[string]string{"error": "system not found"})
|
|
||||||
}
|
|
||||||
details, err := system.FetchSystemdInfoFromAgent(serviceName)
|
|
||||||
if err != nil {
|
|
||||||
return e.JSON(http.StatusNotFound, map[string]string{"error": err.Error()})
|
|
||||||
}
|
|
||||||
e.Response.Header().Set("Cache-Control", "public, max-age=60")
|
|
||||||
return e.JSON(http.StatusOK, map[string]any{"details": details})
|
|
||||||
}
|
|
||||||
|
|
||||||
// refreshSmartData handles POST /api/beszel/smart/refresh requests
|
|
||||||
// Fetches fresh SMART data from the agent and updates the collection
|
|
||||||
func (h *Hub) refreshSmartData(e *core.RequestEvent) error {
|
|
||||||
systemID := e.Request.URL.Query().Get("system")
|
|
||||||
if systemID == "" {
|
|
||||||
return e.JSON(http.StatusBadRequest, map[string]string{"error": "system parameter is required"})
|
|
||||||
}
|
|
||||||
|
|
||||||
system, err := h.sm.GetSystem(systemID)
|
|
||||||
if err != nil {
|
|
||||||
return e.JSON(http.StatusNotFound, map[string]string{"error": "system not found"})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fetch and save SMART devices
|
|
||||||
if err := system.FetchAndSaveSmartDevices(); err != nil {
|
|
||||||
return e.JSON(http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
|
||||||
}
|
|
||||||
|
|
||||||
return e.JSON(http.StatusOK, map[string]string{"status": "ok"})
|
|
||||||
}
|
|
||||||
|
|
||||||
// generates key pair if it doesn't exist and returns signer
|
|
||||||
func (h *Hub) GetSSHKey(dataDir string) (ssh.Signer, error) {
|
func (h *Hub) GetSSHKey(dataDir string) (ssh.Signer, error) {
|
||||||
if h.signer != nil {
|
if h.signer != nil {
|
||||||
return h.signer, nil
|
return h.signer, nil
|
||||||
|
|||||||
@@ -3,36 +3,20 @@
|
|||||||
package hub_test
|
package hub_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"crypto/ed25519"
|
"crypto/ed25519"
|
||||||
"encoding/json"
|
|
||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/henrygd/beszel/internal/migrations"
|
|
||||||
beszelTests "github.com/henrygd/beszel/internal/tests"
|
beszelTests "github.com/henrygd/beszel/internal/tests"
|
||||||
|
|
||||||
"github.com/pocketbase/pocketbase/core"
|
|
||||||
pbTests "github.com/pocketbase/pocketbase/tests"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"golang.org/x/crypto/ssh"
|
"golang.org/x/crypto/ssh"
|
||||||
)
|
)
|
||||||
|
|
||||||
// marshal to json and return an io.Reader (for use in ApiScenario.Body)
|
|
||||||
func jsonReader(v any) io.Reader {
|
|
||||||
data, err := json.Marshal(v)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return bytes.NewReader(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMakeLink(t *testing.T) {
|
func TestMakeLink(t *testing.T) {
|
||||||
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
|
||||||
@@ -265,699 +249,6 @@ func TestGetSSHKey(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestApiRoutesAuthentication(t *testing.T) {
|
|
||||||
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
|
||||||
defer hub.Cleanup()
|
|
||||||
|
|
||||||
hub.StartHub()
|
|
||||||
|
|
||||||
// Create test user and get auth token
|
|
||||||
user, err := beszelTests.CreateUser(hub, "testuser@example.com", "password123")
|
|
||||||
require.NoError(t, err, "Failed to create test user")
|
|
||||||
|
|
||||||
adminUser, err := beszelTests.CreateRecord(hub, "users", map[string]any{
|
|
||||||
"email": "admin@example.com",
|
|
||||||
"password": "password123",
|
|
||||||
"role": "admin",
|
|
||||||
})
|
|
||||||
require.NoError(t, err, "Failed to create admin user")
|
|
||||||
adminUserToken, err := adminUser.NewAuthToken()
|
|
||||||
|
|
||||||
// superUser, err := beszelTests.CreateRecord(hub, core.CollectionNameSuperusers, map[string]any{
|
|
||||||
// "email": "superuser@example.com",
|
|
||||||
// "password": "password123",
|
|
||||||
// })
|
|
||||||
// require.NoError(t, err, "Failed to create superuser")
|
|
||||||
|
|
||||||
userToken, err := user.NewAuthToken()
|
|
||||||
require.NoError(t, err, "Failed to create auth token")
|
|
||||||
|
|
||||||
// Create test system for user-alerts endpoints
|
|
||||||
system, err := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
|
||||||
"name": "test-system",
|
|
||||||
"users": []string{user.Id},
|
|
||||||
"host": "127.0.0.1",
|
|
||||||
})
|
|
||||||
require.NoError(t, err, "Failed to create test system")
|
|
||||||
|
|
||||||
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
|
||||||
return hub.TestApp
|
|
||||||
}
|
|
||||||
|
|
||||||
scenarios := []beszelTests.ApiScenario{
|
|
||||||
// Auth Protected Routes - Should require authentication
|
|
||||||
{
|
|
||||||
Name: "POST /test-notification - no auth should fail",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/test-notification",
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"url": "generic://127.0.0.1",
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "POST /test-notification - with auth should succeed",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/test-notification",
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"url": "generic://127.0.0.1",
|
|
||||||
}),
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"sending message"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /config-yaml - no auth should fail",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/config-yaml",
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /config-yaml - with user auth should fail",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/config-yaml",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 403,
|
|
||||||
ExpectedContent: []string{"Requires admin"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /config-yaml - with admin auth should succeed",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/config-yaml",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": adminUserToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"test-system"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /heartbeat-status - no auth should fail",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/heartbeat-status",
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /heartbeat-status - with user auth should fail",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/heartbeat-status",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 403,
|
|
||||||
ExpectedContent: []string{"Requires admin role"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /heartbeat-status - with admin auth should succeed",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/heartbeat-status",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": adminUserToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{`"enabled":false`},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "POST /test-heartbeat - with user auth should fail",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/test-heartbeat",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 403,
|
|
||||||
ExpectedContent: []string{"Requires admin role"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "POST /test-heartbeat - with admin auth should report disabled state",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/test-heartbeat",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": adminUserToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"Heartbeat not configured"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /universal-token - no auth should fail",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/universal-token",
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /universal-token - with auth should succeed",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/universal-token",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"active", "token", "permanent"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /universal-token - enable permanent should succeed",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/universal-token?enable=1&permanent=1&token=permanent-token-123",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"\"permanent\":true", "permanent-token-123"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "POST /user-alerts - no auth should fail",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/user-alerts",
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"value": 80,
|
|
||||||
"min": 10,
|
|
||||||
"systems": []string{system.Id},
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "POST /user-alerts - with auth should succeed",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/user-alerts",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"\"success\":true"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"value": 80,
|
|
||||||
"min": 10,
|
|
||||||
"systems": []string{system.Id},
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "DELETE /user-alerts - no auth should fail",
|
|
||||||
Method: http.MethodDelete,
|
|
||||||
URL: "/api/beszel/user-alerts",
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"systems": []string{system.Id},
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "DELETE /user-alerts - with auth should succeed",
|
|
||||||
Method: http.MethodDelete,
|
|
||||||
URL: "/api/beszel/user-alerts",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"\"success\":true"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"systems": []string{system.Id},
|
|
||||||
}),
|
|
||||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
|
||||||
// Create an alert to delete
|
|
||||||
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"system": system.Id,
|
|
||||||
"user": user.Id,
|
|
||||||
"value": 80,
|
|
||||||
"min": 10,
|
|
||||||
})
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /containers/logs - no auth should fail",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/containers/logs?system=test-system&container=test-container",
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /containers/logs - with auth but missing system param should fail",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/containers/logs?container=test-container",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 400,
|
|
||||||
ExpectedContent: []string{"system and container parameters are required"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /containers/logs - with auth but missing container param should fail",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/containers/logs?system=test-system",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 400,
|
|
||||||
ExpectedContent: []string{"system and container parameters are required"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /containers/logs - with auth but invalid system should fail",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/containers/logs?system=invalid-system&container=0123456789ab",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 404,
|
|
||||||
ExpectedContent: []string{"system not found"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /containers/logs - traversal container should fail validation",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/containers/logs?system=" + system.Id + "&container=..%2F..%2Fversion",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 400,
|
|
||||||
ExpectedContent: []string{"invalid container parameter"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /containers/info - traversal container should fail validation",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/containers/info?system=" + system.Id + "&container=../../version?x=",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 400,
|
|
||||||
ExpectedContent: []string{"invalid container parameter"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /containers/info - non-hex container should fail validation",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/containers/info?system=" + system.Id + "&container=container_name",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 400,
|
|
||||||
ExpectedContent: []string{"invalid container parameter"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
|
|
||||||
// Auth Optional Routes - Should work without authentication
|
|
||||||
{
|
|
||||||
Name: "GET /getkey - no auth should fail",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/getkey",
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /getkey - with auth should also succeed",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/getkey",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"\"key\":", "\"v\":"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /first-run - no auth should succeed",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/first-run",
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"\"firstRun\":false"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /first-run - with auth should also succeed",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/first-run",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"\"firstRun\":false"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /agent-connect - no auth should succeed (websocket upgrade fails but route is accessible)",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/agent-connect",
|
|
||||||
ExpectedStatus: 400,
|
|
||||||
ExpectedContent: []string{},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "POST /test-notification - invalid auth token should fail",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/test-notification",
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"url": "generic://127.0.0.1",
|
|
||||||
}),
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": "invalid-token",
|
|
||||||
},
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "POST /user-alerts - invalid auth token should fail",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/user-alerts",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": "invalid-token",
|
|
||||||
},
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"value": 80,
|
|
||||||
"min": 10,
|
|
||||||
"systems": []string{system.Id},
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, scenario := range scenarios {
|
|
||||||
scenario.Test(t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFirstUserCreation(t *testing.T) {
|
|
||||||
t.Run("CreateUserEndpoint available when no users exist", func(t *testing.T) {
|
|
||||||
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
|
||||||
defer hub.Cleanup()
|
|
||||||
|
|
||||||
hub.StartHub()
|
|
||||||
|
|
||||||
testAppFactoryExisting := func(t testing.TB) *pbTests.TestApp {
|
|
||||||
return hub.TestApp
|
|
||||||
}
|
|
||||||
|
|
||||||
scenarios := []beszelTests.ApiScenario{
|
|
||||||
{
|
|
||||||
Name: "POST /create-user - should be available when no users exist",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/create-user",
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"email": "firstuser@example.com",
|
|
||||||
"password": "password123",
|
|
||||||
}),
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"User created"},
|
|
||||||
TestAppFactory: testAppFactoryExisting,
|
|
||||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
|
||||||
userCount, err := hub.CountRecords("users")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Zero(t, userCount, "Should start with no users")
|
|
||||||
superusers, err := hub.FindAllRecords(core.CollectionNameSuperusers)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.EqualValues(t, 1, len(superusers), "Should start with one temporary superuser")
|
|
||||||
require.EqualValues(t, migrations.TempAdminEmail, superusers[0].GetString("email"), "Should have created one temporary superuser")
|
|
||||||
},
|
|
||||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
|
||||||
userCount, err := hub.CountRecords("users")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.EqualValues(t, 1, userCount, "Should have created one user")
|
|
||||||
superusers, err := hub.FindAllRecords(core.CollectionNameSuperusers)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.EqualValues(t, 1, len(superusers), "Should have created one superuser")
|
|
||||||
require.EqualValues(t, "firstuser@example.com", superusers[0].GetString("email"), "Should have created one superuser")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "POST /create-user - should not be available when users exist",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/create-user",
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"email": "firstuser@example.com",
|
|
||||||
"password": "password123",
|
|
||||||
}),
|
|
||||||
ExpectedStatus: 404,
|
|
||||||
ExpectedContent: []string{"wasn't found"},
|
|
||||||
TestAppFactory: testAppFactoryExisting,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, scenario := range scenarios {
|
|
||||||
scenario.Test(t)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("CreateUserEndpoint not available when USER_EMAIL, USER_PASSWORD are set", func(t *testing.T) {
|
|
||||||
t.Setenv("BESZEL_HUB_USER_EMAIL", "me@example.com")
|
|
||||||
t.Setenv("BESZEL_HUB_USER_PASSWORD", "password123")
|
|
||||||
|
|
||||||
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
|
||||||
defer hub.Cleanup()
|
|
||||||
|
|
||||||
hub.StartHub()
|
|
||||||
|
|
||||||
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
|
||||||
return hub.TestApp
|
|
||||||
}
|
|
||||||
|
|
||||||
scenario := beszelTests.ApiScenario{
|
|
||||||
Name: "POST /create-user - should not be available when USER_EMAIL, USER_PASSWORD are set",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/create-user",
|
|
||||||
ExpectedStatus: 404,
|
|
||||||
ExpectedContent: []string{"wasn't found"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
|
||||||
users, err := hub.FindAllRecords("users")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.EqualValues(t, 1, len(users), "Should start with one user")
|
|
||||||
require.EqualValues(t, "me@example.com", users[0].GetString("email"), "Should have created one user")
|
|
||||||
superusers, err := hub.FindAllRecords(core.CollectionNameSuperusers)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.EqualValues(t, 1, len(superusers), "Should start with one superuser")
|
|
||||||
require.EqualValues(t, "me@example.com", superusers[0].GetString("email"), "Should have created one superuser")
|
|
||||||
},
|
|
||||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
|
||||||
users, err := hub.FindAllRecords("users")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.EqualValues(t, 1, len(users), "Should still have one user")
|
|
||||||
require.EqualValues(t, "me@example.com", users[0].GetString("email"), "Should have created one user")
|
|
||||||
superusers, err := hub.FindAllRecords(core.CollectionNameSuperusers)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.EqualValues(t, 1, len(superusers), "Should still have one superuser")
|
|
||||||
require.EqualValues(t, "me@example.com", superusers[0].GetString("email"), "Should have created one superuser")
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
scenario.Test(t)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCreateUserEndpointAvailability(t *testing.T) {
|
|
||||||
t.Run("CreateUserEndpoint available when no users exist", func(t *testing.T) {
|
|
||||||
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
|
||||||
defer hub.Cleanup()
|
|
||||||
|
|
||||||
// Ensure no users exist
|
|
||||||
userCount, err := hub.CountRecords("users")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Zero(t, userCount, "Should start with no users")
|
|
||||||
|
|
||||||
hub.StartHub()
|
|
||||||
|
|
||||||
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
|
||||||
return hub.TestApp
|
|
||||||
}
|
|
||||||
|
|
||||||
scenario := beszelTests.ApiScenario{
|
|
||||||
Name: "POST /create-user - should be available when no users exist",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/create-user",
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"email": "firstuser@example.com",
|
|
||||||
"password": "password123",
|
|
||||||
}),
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"User created"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
}
|
|
||||||
|
|
||||||
scenario.Test(t)
|
|
||||||
|
|
||||||
// Verify user was created
|
|
||||||
userCount, err = hub.CountRecords("users")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.EqualValues(t, 1, userCount, "Should have created one user")
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("CreateUserEndpoint not available when users exist", func(t *testing.T) {
|
|
||||||
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
|
||||||
defer hub.Cleanup()
|
|
||||||
|
|
||||||
// Create a user first
|
|
||||||
_, err := beszelTests.CreateUser(hub, "existing@example.com", "password")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
hub.StartHub()
|
|
||||||
|
|
||||||
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
|
||||||
return hub.TestApp
|
|
||||||
}
|
|
||||||
|
|
||||||
scenario := beszelTests.ApiScenario{
|
|
||||||
Name: "POST /create-user - should not be available when users exist",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/create-user",
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"email": "another@example.com",
|
|
||||||
"password": "password123",
|
|
||||||
}),
|
|
||||||
ExpectedStatus: 404,
|
|
||||||
ExpectedContent: []string{"wasn't found"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
}
|
|
||||||
|
|
||||||
scenario.Test(t)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAutoLoginMiddleware(t *testing.T) {
|
|
||||||
var hubs []*beszelTests.TestHub
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
for _, hub := range hubs {
|
|
||||||
hub.Cleanup()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
t.Setenv("AUTO_LOGIN", "user@test.com")
|
|
||||||
|
|
||||||
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
|
||||||
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
|
||||||
hubs = append(hubs, hub)
|
|
||||||
hub.StartHub()
|
|
||||||
return hub.TestApp
|
|
||||||
}
|
|
||||||
|
|
||||||
scenarios := []beszelTests.ApiScenario{
|
|
||||||
{
|
|
||||||
Name: "GET /getkey - without auto login should fail",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/getkey",
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /getkey - with auto login should fail if no matching user",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/getkey",
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /getkey - with auto login should succeed",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/getkey",
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"\"key\":", "\"v\":"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
|
||||||
beszelTests.CreateUser(app, "user@test.com", "password123")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, scenario := range scenarios {
|
|
||||||
scenario.Test(t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTrustedHeaderMiddleware(t *testing.T) {
|
|
||||||
var hubs []*beszelTests.TestHub
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
for _, hub := range hubs {
|
|
||||||
hub.Cleanup()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
t.Setenv("TRUSTED_AUTH_HEADER", "X-Beszel-Trusted")
|
|
||||||
|
|
||||||
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
|
||||||
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
|
||||||
hubs = append(hubs, hub)
|
|
||||||
hub.StartHub()
|
|
||||||
return hub.TestApp
|
|
||||||
}
|
|
||||||
|
|
||||||
scenarios := []beszelTests.ApiScenario{
|
|
||||||
{
|
|
||||||
Name: "GET /getkey - without trusted header should fail",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/getkey",
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /getkey - with trusted header should fail if no matching user",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/getkey",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"X-Beszel-Trusted": "user@test.com",
|
|
||||||
},
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /getkey - with trusted header should succeed",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/getkey",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"X-Beszel-Trusted": "user@test.com",
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"\"key\":", "\"v\":"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
|
||||||
beszelTests.CreateUser(app, "user@test.com", "password123")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, scenario := range scenarios {
|
|
||||||
scenario.Test(t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAppUrl(t *testing.T) {
|
func TestAppUrl(t *testing.T) {
|
||||||
t.Run("no APP_URL does't change app url", func(t *testing.T) {
|
t.Run("no APP_URL does't change app url", func(t *testing.T) {
|
||||||
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
|||||||
42
internal/hub/server.go
Normal file
42
internal/hub/server.go
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
package hub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel"
|
||||||
|
"github.com/henrygd/beszel/internal/hub/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PublicAppInfo defines the structure of the public app information that will be injected into the HTML
|
||||||
|
type PublicAppInfo struct {
|
||||||
|
BASE_PATH string
|
||||||
|
HUB_VERSION string
|
||||||
|
HUB_URL string
|
||||||
|
OAUTH_DISABLE_POPUP bool `json:"OAUTH_DISABLE_POPUP,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// modifyIndexHTML injects the public app information into the index.html content
|
||||||
|
func modifyIndexHTML(hub *Hub, html []byte) string {
|
||||||
|
info := getPublicAppInfo(hub)
|
||||||
|
content, err := json.Marshal(info)
|
||||||
|
if err != nil {
|
||||||
|
return string(html)
|
||||||
|
}
|
||||||
|
htmlContent := strings.ReplaceAll(string(html), "./", info.BASE_PATH)
|
||||||
|
return strings.Replace(htmlContent, "\"{info}\"", string(content), 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getPublicAppInfo(hub *Hub) PublicAppInfo {
|
||||||
|
parsedURL, _ := url.Parse(hub.appURL)
|
||||||
|
info := PublicAppInfo{
|
||||||
|
BASE_PATH: strings.TrimSuffix(parsedURL.Path, "/") + "/",
|
||||||
|
HUB_VERSION: beszel.Version,
|
||||||
|
HUB_URL: hub.appURL,
|
||||||
|
}
|
||||||
|
if val, _ := utils.GetEnv("OAUTH_DISABLE_POPUP"); val == "true" {
|
||||||
|
info.OAUTH_DISABLE_POPUP = true
|
||||||
|
}
|
||||||
|
return info
|
||||||
|
}
|
||||||
@@ -5,14 +5,11 @@ package hub
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log/slog"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httputil"
|
"net/http/httputil"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/henrygd/beszel"
|
|
||||||
|
|
||||||
"github.com/pocketbase/pocketbase/core"
|
"github.com/pocketbase/pocketbase/core"
|
||||||
"github.com/pocketbase/pocketbase/tools/osutils"
|
"github.com/pocketbase/pocketbase/tools/osutils"
|
||||||
)
|
)
|
||||||
@@ -39,7 +36,7 @@ func (rm *responseModifier) RoundTrip(req *http.Request) (*http.Response, error)
|
|||||||
}
|
}
|
||||||
resp.Body.Close()
|
resp.Body.Close()
|
||||||
// Create a new response with the modified body
|
// Create a new response with the modified body
|
||||||
modifiedBody := rm.modifyHTML(string(body))
|
modifiedBody := modifyIndexHTML(rm.hub, body)
|
||||||
resp.Body = io.NopCloser(strings.NewReader(modifiedBody))
|
resp.Body = io.NopCloser(strings.NewReader(modifiedBody))
|
||||||
resp.ContentLength = int64(len(modifiedBody))
|
resp.ContentLength = int64(len(modifiedBody))
|
||||||
resp.Header.Set("Content-Length", fmt.Sprintf("%d", len(modifiedBody)))
|
resp.Header.Set("Content-Length", fmt.Sprintf("%d", len(modifiedBody)))
|
||||||
@@ -47,22 +44,8 @@ func (rm *responseModifier) RoundTrip(req *http.Request) (*http.Response, error)
|
|||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rm *responseModifier) modifyHTML(html string) string {
|
|
||||||
parsedURL, err := url.Parse(rm.hub.appURL)
|
|
||||||
if err != nil {
|
|
||||||
return html
|
|
||||||
}
|
|
||||||
// fix base paths in html if using subpath
|
|
||||||
basePath := strings.TrimSuffix(parsedURL.Path, "/") + "/"
|
|
||||||
html = strings.ReplaceAll(html, "./", basePath)
|
|
||||||
html = strings.Replace(html, "{{V}}", beszel.Version, 1)
|
|
||||||
html = strings.Replace(html, "{{HUB_URL}}", rm.hub.appURL, 1)
|
|
||||||
return html
|
|
||||||
}
|
|
||||||
|
|
||||||
// startServer sets up the development server for Beszel
|
// startServer sets up the development server for Beszel
|
||||||
func (h *Hub) startServer(se *core.ServeEvent) error {
|
func (h *Hub) startServer(se *core.ServeEvent) error {
|
||||||
slog.Info("starting server", "appURL", h.appURL)
|
|
||||||
proxy := httputil.NewSingleHostReverseProxy(&url.URL{
|
proxy := httputil.NewSingleHostReverseProxy(&url.URL{
|
||||||
Scheme: "http",
|
Scheme: "http",
|
||||||
Host: "localhost:5173",
|
Host: "localhost:5173",
|
||||||
|
|||||||
@@ -5,10 +5,9 @@ package hub
|
|||||||
import (
|
import (
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/henrygd/beszel"
|
"github.com/henrygd/beszel/internal/hub/utils"
|
||||||
"github.com/henrygd/beszel/internal/site"
|
"github.com/henrygd/beszel/internal/site"
|
||||||
|
|
||||||
"github.com/pocketbase/pocketbase/apis"
|
"github.com/pocketbase/pocketbase/apis"
|
||||||
@@ -17,22 +16,13 @@ import (
|
|||||||
|
|
||||||
// startServer sets up the production server for Beszel
|
// startServer sets up the production server for Beszel
|
||||||
func (h *Hub) startServer(se *core.ServeEvent) error {
|
func (h *Hub) startServer(se *core.ServeEvent) error {
|
||||||
// parse app url
|
|
||||||
parsedURL, err := url.Parse(h.appURL)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// fix base paths in html if using subpath
|
|
||||||
basePath := strings.TrimSuffix(parsedURL.Path, "/") + "/"
|
|
||||||
indexFile, _ := fs.ReadFile(site.DistDirFS, "index.html")
|
indexFile, _ := fs.ReadFile(site.DistDirFS, "index.html")
|
||||||
html := strings.ReplaceAll(string(indexFile), "./", basePath)
|
html := modifyIndexHTML(h, indexFile)
|
||||||
html = strings.Replace(html, "{{V}}", beszel.Version, 1)
|
|
||||||
html = strings.Replace(html, "{{HUB_URL}}", h.appURL, 1)
|
|
||||||
// set up static asset serving
|
// set up static asset serving
|
||||||
staticPaths := [2]string{"/static/", "/assets/"}
|
staticPaths := [2]string{"/static/", "/assets/"}
|
||||||
serveStatic := apis.Static(site.DistDirFS, false)
|
serveStatic := apis.Static(site.DistDirFS, false)
|
||||||
// get CSP configuration
|
// get CSP configuration
|
||||||
csp, cspExists := GetEnv("CSP")
|
csp, cspExists := utils.GetEnv("CSP")
|
||||||
// add route
|
// add route
|
||||||
se.Router.GET("/{path...}", func(e *core.RequestEvent) error {
|
se.Router.GET("/{path...}", func(e *core.RequestEvent) error {
|
||||||
// serve static assets if path is in staticPaths
|
// serve static assets if path is in staticPaths
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ import (
|
|||||||
|
|
||||||
"github.com/henrygd/beszel/internal/common"
|
"github.com/henrygd/beszel/internal/common"
|
||||||
"github.com/henrygd/beszel/internal/hub/transport"
|
"github.com/henrygd/beszel/internal/hub/transport"
|
||||||
|
"github.com/henrygd/beszel/internal/hub/utils"
|
||||||
"github.com/henrygd/beszel/internal/hub/ws"
|
"github.com/henrygd/beszel/internal/hub/ws"
|
||||||
|
|
||||||
"github.com/henrygd/beszel/internal/entities/container"
|
"github.com/henrygd/beszel/internal/entities/container"
|
||||||
@@ -145,6 +146,7 @@ func (sys *System) update() error {
|
|||||||
// update smart interval if it's set on the agent side
|
// update smart interval if it's set on the agent side
|
||||||
if data.Details.SmartInterval > 0 {
|
if data.Details.SmartInterval > 0 {
|
||||||
sys.smartInterval = data.Details.SmartInterval
|
sys.smartInterval = data.Details.SmartInterval
|
||||||
|
sys.manager.hub.Logger().Info("SMART interval updated from agent details", "system", sys.Id, "interval", sys.smartInterval.String())
|
||||||
// make sure we reset expiration of lastFetch to remain as long as the new smart interval
|
// make sure we reset expiration of lastFetch to remain as long as the new smart interval
|
||||||
// to prevent premature expiration leading to new fetch if interval is different.
|
// to prevent premature expiration leading to new fetch if interval is different.
|
||||||
sys.manager.smartFetchMap.UpdateExpiration(sys.Id, sys.smartInterval+time.Minute)
|
sys.manager.smartFetchMap.UpdateExpiration(sys.Id, sys.smartInterval+time.Minute)
|
||||||
@@ -156,11 +158,10 @@ func (sys *System) update() error {
|
|||||||
if sys.smartInterval <= 0 {
|
if sys.smartInterval <= 0 {
|
||||||
sys.smartInterval = time.Hour
|
sys.smartInterval = time.Hour
|
||||||
}
|
}
|
||||||
lastFetch, _ := sys.manager.smartFetchMap.GetOk(sys.Id)
|
if sys.shouldFetchSmart() && sys.smartFetching.CompareAndSwap(false, true) {
|
||||||
if time.Since(time.UnixMilli(lastFetch-1e4)) >= sys.smartInterval && sys.smartFetching.CompareAndSwap(false, true) {
|
sys.manager.hub.Logger().Info("SMART fetch", "system", sys.Id, "interval", sys.smartInterval.String())
|
||||||
go func() {
|
go func() {
|
||||||
defer sys.smartFetching.Store(false)
|
defer sys.smartFetching.Store(false)
|
||||||
sys.manager.smartFetchMap.Set(sys.Id, time.Now().UnixMilli(), sys.smartInterval+time.Minute)
|
|
||||||
_ = sys.FetchAndSaveSmartDevices()
|
_ = sys.FetchAndSaveSmartDevices()
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
@@ -184,7 +185,7 @@ func (sys *System) handlePaused() {
|
|||||||
|
|
||||||
// createRecords updates the system record and adds system_stats and container_stats records
|
// createRecords updates the system record and adds system_stats and container_stats records
|
||||||
func (sys *System) createRecords(data *system.CombinedData) (*core.Record, error) {
|
func (sys *System) createRecords(data *system.CombinedData) (*core.Record, error) {
|
||||||
systemRecord, err := sys.getRecord()
|
systemRecord, err := sys.getRecord(sys.manager.hub)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -343,8 +344,8 @@ func createContainerRecords(app core.App, data []*container.Stats, systemId stri
|
|||||||
|
|
||||||
// getRecord retrieves the system record from the database.
|
// getRecord retrieves the system record from the database.
|
||||||
// If the record is not found, it removes the system from the manager.
|
// If the record is not found, it removes the system from the manager.
|
||||||
func (sys *System) getRecord() (*core.Record, error) {
|
func (sys *System) getRecord(app core.App) (*core.Record, error) {
|
||||||
record, err := sys.manager.hub.FindRecordById("systems", sys.Id)
|
record, err := app.FindRecordById("systems", sys.Id)
|
||||||
if err != nil || record == nil {
|
if err != nil || record == nil {
|
||||||
_ = sys.manager.RemoveSystem(sys.Id)
|
_ = sys.manager.RemoveSystem(sys.Id)
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -352,6 +353,27 @@ func (sys *System) getRecord() (*core.Record, error) {
|
|||||||
return record, nil
|
return record, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HasUser checks if the given user is in the system's users list.
|
||||||
|
// Returns true if SHARE_ALL_SYSTEMS is enabled (any authenticated user can access any system).
|
||||||
|
func (sys *System) HasUser(app core.App, user *core.Record) bool {
|
||||||
|
if user == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if v, _ := utils.GetEnv("SHARE_ALL_SYSTEMS"); v == "true" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
var recordData = struct {
|
||||||
|
Users string
|
||||||
|
}{}
|
||||||
|
err := app.DB().NewQuery("SELECT users FROM systems WHERE id={:id}").
|
||||||
|
Bind(dbx.Params{"id": sys.Id}).
|
||||||
|
One(&recordData)
|
||||||
|
if err != nil || recordData.Users == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return strings.Contains(recordData.Users, user.Id)
|
||||||
|
}
|
||||||
|
|
||||||
// setDown marks a system as down in the database.
|
// setDown marks a system as down in the database.
|
||||||
// It takes the original error that caused the system to go down and returns any error
|
// It takes the original error that caused the system to go down and returns any error
|
||||||
// encountered during the process of updating the system status.
|
// encountered during the process of updating the system status.
|
||||||
@@ -359,7 +381,7 @@ func (sys *System) setDown(originalError error) error {
|
|||||||
if sys.Status == down || sys.Status == paused {
|
if sys.Status == down || sys.Status == paused {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
record, err := sys.getRecord()
|
record, err := sys.getRecord(sys.manager.hub)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -643,6 +665,7 @@ func (s *System) createSSHClient() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
s.agentVersion, _ = extractAgentVersion(string(s.client.Conn.ServerVersion()))
|
s.agentVersion, _ = extractAgentVersion(string(s.client.Conn.ServerVersion()))
|
||||||
|
s.manager.resetFailedSmartFetchState(s.Id)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -44,7 +44,7 @@ type SystemManager struct {
|
|||||||
hub hubLike // Hub interface for database and alert operations
|
hub hubLike // Hub interface for database and alert operations
|
||||||
systems *store.Store[string, *System] // Thread-safe store of active systems
|
systems *store.Store[string, *System] // Thread-safe store of active systems
|
||||||
sshConfig *ssh.ClientConfig // SSH client configuration for system connections
|
sshConfig *ssh.ClientConfig // SSH client configuration for system connections
|
||||||
smartFetchMap *expirymap.ExpiryMap[int64] // Stores last SMART fetch time per system ID
|
smartFetchMap *expirymap.ExpiryMap[smartFetchState] // Stores last SMART fetch time/result; TTL is only for cleanup
|
||||||
}
|
}
|
||||||
|
|
||||||
// hubLike defines the interface requirements for the hub dependency.
|
// hubLike defines the interface requirements for the hub dependency.
|
||||||
@@ -54,6 +54,7 @@ type hubLike interface {
|
|||||||
GetSSHKey(dataDir string) (ssh.Signer, error)
|
GetSSHKey(dataDir string) (ssh.Signer, error)
|
||||||
HandleSystemAlerts(systemRecord *core.Record, data *system.CombinedData) error
|
HandleSystemAlerts(systemRecord *core.Record, data *system.CombinedData) error
|
||||||
HandleStatusAlerts(status string, systemRecord *core.Record) error
|
HandleStatusAlerts(status string, systemRecord *core.Record) error
|
||||||
|
CancelPendingStatusAlerts(systemID string)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSystemManager creates a new SystemManager instance with the provided hub.
|
// NewSystemManager creates a new SystemManager instance with the provided hub.
|
||||||
@@ -62,7 +63,7 @@ func NewSystemManager(hub hubLike) *SystemManager {
|
|||||||
return &SystemManager{
|
return &SystemManager{
|
||||||
systems: store.New(map[string]*System{}),
|
systems: store.New(map[string]*System{}),
|
||||||
hub: hub,
|
hub: hub,
|
||||||
smartFetchMap: expirymap.New[int64](time.Hour),
|
smartFetchMap: expirymap.New[smartFetchState](time.Hour),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -189,6 +190,7 @@ func (sm *SystemManager) onRecordAfterUpdateSuccess(e *core.RecordEvent) error {
|
|||||||
system.closeSSHConnection()
|
system.closeSSHConnection()
|
||||||
}
|
}
|
||||||
_ = deactivateAlerts(e.App, e.Record.Id)
|
_ = deactivateAlerts(e.App, e.Record.Id)
|
||||||
|
sm.hub.CancelPendingStatusAlerts(e.Record.Id)
|
||||||
return e.Next()
|
return e.Next()
|
||||||
case pending:
|
case pending:
|
||||||
// Resume monitoring, preferring existing WebSocket connection
|
// Resume monitoring, preferring existing WebSocket connection
|
||||||
@@ -306,6 +308,7 @@ func (sm *SystemManager) AddWebSocketSystem(systemId string, agentVersion semver
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
sm.resetFailedSmartFetchState(systemId)
|
||||||
|
|
||||||
system := sm.NewSystem(systemId)
|
system := sm.NewSystem(systemId)
|
||||||
system.WsConn = wsConn
|
system.WsConn = wsConn
|
||||||
@@ -317,6 +320,15 @@ func (sm *SystemManager) AddWebSocketSystem(systemId string, agentVersion semver
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// resetFailedSmartFetchState clears only failed SMART cooldown entries so a fresh
|
||||||
|
// agent reconnect retries SMART discovery immediately after configuration changes.
|
||||||
|
func (sm *SystemManager) resetFailedSmartFetchState(systemID string) {
|
||||||
|
state, ok := sm.smartFetchMap.GetOk(systemID)
|
||||||
|
if ok && !state.Successful {
|
||||||
|
sm.smartFetchMap.Remove(systemID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// createSSHClientConfig initializes the SSH client configuration for connecting to an agent's server
|
// createSSHClientConfig initializes the SSH client configuration for connecting to an agent's server
|
||||||
func (sm *SystemManager) createSSHClientConfig() error {
|
func (sm *SystemManager) createSSHClientConfig() error {
|
||||||
privateKey, err := sm.hub.GetSSHKey("")
|
privateKey, err := sm.hub.GetSSHKey("")
|
||||||
|
|||||||
@@ -19,7 +19,6 @@ type subscriptionInfo struct {
|
|||||||
var (
|
var (
|
||||||
activeSubscriptions = make(map[string]*subscriptionInfo)
|
activeSubscriptions = make(map[string]*subscriptionInfo)
|
||||||
workerRunning bool
|
workerRunning bool
|
||||||
realtimeTicker *time.Ticker
|
|
||||||
tickerStopChan chan struct{}
|
tickerStopChan chan struct{}
|
||||||
realtimeMutex sync.Mutex
|
realtimeMutex sync.Mutex
|
||||||
)
|
)
|
||||||
@@ -70,7 +69,7 @@ func (sm *SystemManager) onRealtimeSubscribeRequest(e *core.RealtimeSubscribeReq
|
|||||||
}
|
}
|
||||||
|
|
||||||
// onRealtimeSubscriptionAdded initializes or starts the realtime worker when the first subscription is added.
|
// onRealtimeSubscriptionAdded initializes or starts the realtime worker when the first subscription is added.
|
||||||
// It ensures only one worker runs at a time and creates the ticker for periodic data fetching.
|
// It ensures only one worker runs at a time.
|
||||||
func (sm *SystemManager) onRealtimeSubscriptionAdded() {
|
func (sm *SystemManager) onRealtimeSubscriptionAdded() {
|
||||||
realtimeMutex.Lock()
|
realtimeMutex.Lock()
|
||||||
defer realtimeMutex.Unlock()
|
defer realtimeMutex.Unlock()
|
||||||
@@ -82,11 +81,6 @@ func (sm *SystemManager) onRealtimeSubscriptionAdded() {
|
|||||||
tickerStopChan = make(chan struct{})
|
tickerStopChan = make(chan struct{})
|
||||||
go sm.startRealtimeWorker()
|
go sm.startRealtimeWorker()
|
||||||
}
|
}
|
||||||
|
|
||||||
// If no ticker exists, create one
|
|
||||||
if realtimeTicker == nil {
|
|
||||||
realtimeTicker = time.NewTicker(1 * time.Second)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkSubscriptions stops the realtime worker when there are no active subscriptions.
|
// checkSubscriptions stops the realtime worker when there are no active subscriptions.
|
||||||
@@ -107,11 +101,6 @@ func (sm *SystemManager) checkSubscriptions() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if realtimeTicker != nil {
|
|
||||||
realtimeTicker.Stop()
|
|
||||||
realtimeTicker = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mark worker as stopped (will be reset when next subscription comes in)
|
// Mark worker as stopped (will be reset when next subscription comes in)
|
||||||
workerRunning = false
|
workerRunning = false
|
||||||
}
|
}
|
||||||
@@ -135,17 +124,16 @@ func (sm *SystemManager) removeRealtimeSubscription(subscription string, options
|
|||||||
// It continuously fetches system data and broadcasts it to subscribed clients via WebSocket.
|
// It continuously fetches system data and broadcasts it to subscribed clients via WebSocket.
|
||||||
func (sm *SystemManager) startRealtimeWorker() {
|
func (sm *SystemManager) startRealtimeWorker() {
|
||||||
sm.fetchRealtimeDataAndNotify()
|
sm.fetchRealtimeDataAndNotify()
|
||||||
|
tick := time.Tick(1 * time.Second)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-tickerStopChan:
|
case <-tickerStopChan:
|
||||||
return
|
return
|
||||||
case <-realtimeTicker.C:
|
case <-tick:
|
||||||
// Check if ticker is still valid (might have been stopped)
|
if len(activeSubscriptions) == 0 {
|
||||||
if realtimeTicker == nil || len(activeSubscriptions) == 0 {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// slog.Debug("activeSubscriptions", "count", len(activeSubscriptions))
|
|
||||||
sm.fetchRealtimeDataAndNotify()
|
sm.fetchRealtimeDataAndNotify()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,18 +4,61 @@ import (
|
|||||||
"database/sql"
|
"database/sql"
|
||||||
"errors"
|
"errors"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/henrygd/beszel/internal/entities/smart"
|
"github.com/henrygd/beszel/internal/entities/smart"
|
||||||
"github.com/pocketbase/pocketbase/core"
|
"github.com/pocketbase/pocketbase/core"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type smartFetchState struct {
|
||||||
|
LastAttempt int64
|
||||||
|
Successful bool
|
||||||
|
}
|
||||||
|
|
||||||
// FetchAndSaveSmartDevices fetches SMART data from the agent and saves it to the database
|
// FetchAndSaveSmartDevices fetches SMART data from the agent and saves it to the database
|
||||||
func (sys *System) FetchAndSaveSmartDevices() error {
|
func (sys *System) FetchAndSaveSmartDevices() error {
|
||||||
smartData, err := sys.FetchSmartDataFromAgent()
|
smartData, err := sys.FetchSmartDataFromAgent()
|
||||||
if err != nil || len(smartData) == 0 {
|
if err != nil {
|
||||||
|
sys.recordSmartFetchResult(err, 0)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return sys.saveSmartDevices(smartData)
|
err = sys.saveSmartDevices(smartData)
|
||||||
|
sys.recordSmartFetchResult(err, len(smartData))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// recordSmartFetchResult stores a cooldown entry for the SMART interval and marks
|
||||||
|
// whether the last fetch produced any devices, so failed setup can retry on reconnect.
|
||||||
|
func (sys *System) recordSmartFetchResult(err error, deviceCount int) {
|
||||||
|
if sys.manager == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
interval := sys.smartFetchInterval()
|
||||||
|
success := err == nil && deviceCount > 0
|
||||||
|
if sys.manager.hub != nil {
|
||||||
|
sys.manager.hub.Logger().Info("SMART fetch result", "system", sys.Id, "success", success, "devices", deviceCount, "interval", interval.String(), "err", err)
|
||||||
|
}
|
||||||
|
sys.manager.smartFetchMap.Set(sys.Id, smartFetchState{LastAttempt: time.Now().UnixMilli(), Successful: success}, interval+time.Minute)
|
||||||
|
}
|
||||||
|
|
||||||
|
// shouldFetchSmart returns true when there is no active SMART cooldown entry for this system.
|
||||||
|
func (sys *System) shouldFetchSmart() bool {
|
||||||
|
if sys.manager == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
state, ok := sys.manager.smartFetchMap.GetOk(sys.Id)
|
||||||
|
if !ok {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return !time.UnixMilli(state.LastAttempt).Add(sys.smartFetchInterval()).After(time.Now())
|
||||||
|
}
|
||||||
|
|
||||||
|
// smartFetchInterval returns the agent-provided SMART interval or the default when unset.
|
||||||
|
func (sys *System) smartFetchInterval() time.Duration {
|
||||||
|
if sys.smartInterval > 0 {
|
||||||
|
return sys.smartInterval
|
||||||
|
}
|
||||||
|
return time.Hour
|
||||||
}
|
}
|
||||||
|
|
||||||
// saveSmartDevices saves SMART device data to the smart_devices collection
|
// saveSmartDevices saves SMART device data to the smart_devices collection
|
||||||
|
|||||||
94
internal/hub/systems/system_smart_test.go
Normal file
94
internal/hub/systems/system_smart_test.go
Normal file
@@ -0,0 +1,94 @@
|
|||||||
|
//go:build testing
|
||||||
|
|
||||||
|
package systems
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/hub/expirymap"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestRecordSmartFetchResult(t *testing.T) {
|
||||||
|
sm := &SystemManager{smartFetchMap: expirymap.New[smartFetchState](time.Hour)}
|
||||||
|
t.Cleanup(sm.smartFetchMap.StopCleaner)
|
||||||
|
|
||||||
|
sys := &System{
|
||||||
|
Id: "system-1",
|
||||||
|
manager: sm,
|
||||||
|
smartInterval: time.Hour,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Successful fetch with devices
|
||||||
|
sys.recordSmartFetchResult(nil, 5)
|
||||||
|
state, ok := sm.smartFetchMap.GetOk(sys.Id)
|
||||||
|
assert.True(t, ok, "expected smart fetch result to be stored")
|
||||||
|
assert.True(t, state.Successful, "expected successful fetch state to be recorded")
|
||||||
|
|
||||||
|
// Failed fetch
|
||||||
|
sys.recordSmartFetchResult(errors.New("failed"), 0)
|
||||||
|
state, ok = sm.smartFetchMap.GetOk(sys.Id)
|
||||||
|
assert.True(t, ok, "expected failed smart fetch state to be stored")
|
||||||
|
assert.False(t, state.Successful, "expected failed smart fetch state to be marked unsuccessful")
|
||||||
|
|
||||||
|
// Successful fetch but no devices
|
||||||
|
sys.recordSmartFetchResult(nil, 0)
|
||||||
|
state, ok = sm.smartFetchMap.GetOk(sys.Id)
|
||||||
|
assert.True(t, ok, "expected fetch with zero devices to be stored")
|
||||||
|
assert.False(t, state.Successful, "expected fetch with zero devices to be marked unsuccessful")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestShouldFetchSmart(t *testing.T) {
|
||||||
|
sm := &SystemManager{smartFetchMap: expirymap.New[smartFetchState](time.Hour)}
|
||||||
|
t.Cleanup(sm.smartFetchMap.StopCleaner)
|
||||||
|
|
||||||
|
sys := &System{
|
||||||
|
Id: "system-1",
|
||||||
|
manager: sm,
|
||||||
|
smartInterval: time.Hour,
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.True(t, sys.shouldFetchSmart(), "expected initial smart fetch to be allowed")
|
||||||
|
|
||||||
|
sys.recordSmartFetchResult(errors.New("failed"), 0)
|
||||||
|
assert.False(t, sys.shouldFetchSmart(), "expected smart fetch to be blocked while interval entry exists")
|
||||||
|
|
||||||
|
sm.smartFetchMap.Remove(sys.Id)
|
||||||
|
assert.True(t, sys.shouldFetchSmart(), "expected smart fetch to be allowed after interval entry is cleared")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestShouldFetchSmart_IgnoresExtendedTTLWhenFetchIsDue(t *testing.T) {
|
||||||
|
sm := &SystemManager{smartFetchMap: expirymap.New[smartFetchState](time.Hour)}
|
||||||
|
t.Cleanup(sm.smartFetchMap.StopCleaner)
|
||||||
|
|
||||||
|
sys := &System{
|
||||||
|
Id: "system-1",
|
||||||
|
manager: sm,
|
||||||
|
smartInterval: time.Hour,
|
||||||
|
}
|
||||||
|
|
||||||
|
sm.smartFetchMap.Set(sys.Id, smartFetchState{
|
||||||
|
LastAttempt: time.Now().Add(-2 * time.Hour).UnixMilli(),
|
||||||
|
Successful: true,
|
||||||
|
}, 10*time.Minute)
|
||||||
|
sm.smartFetchMap.UpdateExpiration(sys.Id, 3*time.Hour)
|
||||||
|
|
||||||
|
assert.True(t, sys.shouldFetchSmart(), "expected fetch time to take precedence over updated TTL")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResetFailedSmartFetchState(t *testing.T) {
|
||||||
|
sm := &SystemManager{smartFetchMap: expirymap.New[smartFetchState](time.Hour)}
|
||||||
|
t.Cleanup(sm.smartFetchMap.StopCleaner)
|
||||||
|
|
||||||
|
sm.smartFetchMap.Set("system-1", smartFetchState{LastAttempt: time.Now().UnixMilli(), Successful: false}, time.Hour)
|
||||||
|
sm.resetFailedSmartFetchState("system-1")
|
||||||
|
_, ok := sm.smartFetchMap.GetOk("system-1")
|
||||||
|
assert.False(t, ok, "expected failed smart fetch state to be cleared on reconnect")
|
||||||
|
|
||||||
|
sm.smartFetchMap.Set("system-1", smartFetchState{LastAttempt: time.Now().UnixMilli(), Successful: true}, time.Hour)
|
||||||
|
sm.resetFailedSmartFetchState("system-1")
|
||||||
|
_, ok = sm.smartFetchMap.GetOk("system-1")
|
||||||
|
assert.True(t, ok, "expected successful smart fetch state to be preserved")
|
||||||
|
}
|
||||||
@@ -421,3 +421,60 @@ func testOld(t *testing.T, hub *tests.TestHub) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestHasUser(t *testing.T) {
|
||||||
|
hub, err := tests.NewTestHub(t.TempDir())
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
sm := hub.GetSystemManager()
|
||||||
|
err = sm.Initialize()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
user1, err := tests.CreateUser(hub, "user1@test.com", "password123")
|
||||||
|
require.NoError(t, err)
|
||||||
|
user2, err := tests.CreateUser(hub, "user2@test.com", "password123")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
systemRecord, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||||
|
"name": "has-user-test",
|
||||||
|
"host": "127.0.0.1",
|
||||||
|
"port": "33914",
|
||||||
|
"users": []string{user1.Id},
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
sys, err := sm.GetSystemFromStore(systemRecord.Id)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
t.Run("user in list returns true", func(t *testing.T) {
|
||||||
|
assert.True(t, sys.HasUser(hub, user1))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("user not in list returns false", func(t *testing.T) {
|
||||||
|
assert.False(t, sys.HasUser(hub, user2))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("unknown user ID returns false", func(t *testing.T) {
|
||||||
|
assert.False(t, sys.HasUser(hub, nil))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("SHARE_ALL_SYSTEMS=true grants access to non-member", func(t *testing.T) {
|
||||||
|
t.Setenv("SHARE_ALL_SYSTEMS", "true")
|
||||||
|
assert.True(t, sys.HasUser(hub, user2))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("BESZEL_HUB_SHARE_ALL_SYSTEMS=true grants access to non-member", func(t *testing.T) {
|
||||||
|
t.Setenv("BESZEL_HUB_SHARE_ALL_SYSTEMS", "true")
|
||||||
|
assert.True(t, sys.HasUser(hub, user2))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("additional user works", func(t *testing.T) {
|
||||||
|
assert.False(t, sys.HasUser(hub, user2))
|
||||||
|
systemRecord.Set("users", []string{user1.Id, user2.Id})
|
||||||
|
err = hub.Save(systemRecord)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.True(t, sys.HasUser(hub, user1))
|
||||||
|
assert.True(t, sys.HasUser(hub, user2))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|||||||
12
internal/hub/utils/utils.go
Normal file
12
internal/hub/utils/utils.go
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
// Package utils provides utility functions for the hub.
|
||||||
|
package utils
|
||||||
|
|
||||||
|
import "os"
|
||||||
|
|
||||||
|
// GetEnv retrieves an environment variable with a "BESZEL_HUB_" prefix, or falls back to the unprefixed key.
|
||||||
|
func GetEnv(key string) (value string, exists bool) {
|
||||||
|
if value, exists = os.LookupEnv("BESZEL_HUB_" + key); exists {
|
||||||
|
return value, exists
|
||||||
|
}
|
||||||
|
return os.LookupEnv(key)
|
||||||
|
}
|
||||||
@@ -111,6 +111,9 @@ func (ws *WsConn) Close(msg []byte) {
|
|||||||
|
|
||||||
// Ping sends a ping frame to keep the connection alive.
|
// Ping sends a ping frame to keep the connection alive.
|
||||||
func (ws *WsConn) Ping() error {
|
func (ws *WsConn) Ping() error {
|
||||||
|
if ws.conn == nil {
|
||||||
|
return gws.ErrConnClosed
|
||||||
|
}
|
||||||
ws.conn.SetDeadline(time.Now().Add(deadline))
|
ws.conn.SetDeadline(time.Now().Add(deadline))
|
||||||
return ws.conn.WritePing(nil)
|
return ws.conn.WritePing(nil)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,10 +3,8 @@ package records
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
|
||||||
"log"
|
"log"
|
||||||
"math"
|
"math"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/henrygd/beszel/internal/entities/container"
|
"github.com/henrygd/beszel/internal/entities/container"
|
||||||
@@ -39,16 +37,6 @@ type StatsRecord struct {
|
|||||||
Stats []byte `db:"stats"`
|
Stats []byte `db:"stats"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// global variables for reusing allocations
|
|
||||||
var (
|
|
||||||
statsRecord StatsRecord
|
|
||||||
containerStats []container.Stats
|
|
||||||
sumStats system.Stats
|
|
||||||
tempStats system.Stats
|
|
||||||
queryParams = make(dbx.Params, 1)
|
|
||||||
containerSums = make(map[string]*container.Stats)
|
|
||||||
)
|
|
||||||
|
|
||||||
// Create longer records by averaging shorter records
|
// Create longer records by averaging shorter records
|
||||||
func (rm *RecordManager) CreateLongerRecords() {
|
func (rm *RecordManager) CreateLongerRecords() {
|
||||||
// start := time.Now()
|
// start := time.Now()
|
||||||
@@ -163,41 +151,47 @@ func (rm *RecordManager) CreateLongerRecords() {
|
|||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
||||||
statsRecord.Stats = statsRecord.Stats[:0]
|
|
||||||
|
|
||||||
// log.Println("finished creating longer records", "time (ms)", time.Since(start).Milliseconds())
|
// log.Println("finished creating longer records", "time (ms)", time.Since(start).Milliseconds())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Calculate the average stats of a list of system_stats records without reflect
|
// Calculate the average stats of a list of system_stats records without reflect
|
||||||
func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *system.Stats {
|
func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *system.Stats {
|
||||||
// Clear/reset global structs for reuse
|
stats := make([]system.Stats, 0, len(records))
|
||||||
sumStats = system.Stats{}
|
var row StatsRecord
|
||||||
tempStats = system.Stats{}
|
params := make(dbx.Params, 1)
|
||||||
sum := &sumStats
|
for _, rec := range records {
|
||||||
stats := &tempStats
|
row.Stats = row.Stats[:0]
|
||||||
|
params["id"] = rec.Id
|
||||||
|
db.NewQuery("SELECT stats FROM system_stats WHERE id = {:id}").Bind(params).One(&row)
|
||||||
|
var s system.Stats
|
||||||
|
if err := json.Unmarshal(row.Stats, &s); err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
stats = append(stats, s)
|
||||||
|
}
|
||||||
|
result := AverageSystemStatsSlice(stats)
|
||||||
|
return &result
|
||||||
|
}
|
||||||
|
|
||||||
|
// AverageSystemStatsSlice computes the average of a slice of system stats.
|
||||||
|
func AverageSystemStatsSlice(records []system.Stats) system.Stats {
|
||||||
|
var sum system.Stats
|
||||||
|
count := float64(len(records))
|
||||||
|
if count == 0 {
|
||||||
|
return sum
|
||||||
|
}
|
||||||
|
|
||||||
// necessary because uint8 is not big enough for the sum
|
// necessary because uint8 is not big enough for the sum
|
||||||
batterySum := 0
|
batterySum := 0
|
||||||
// accumulate per-core usage across records
|
// accumulate per-core usage across records
|
||||||
var cpuCoresSums []uint64
|
var cpuCoresSums []uint64
|
||||||
// accumulate cpu breakdown [user, system, iowait, steal, idle]
|
// accumulate cpu breakdown [user, system, iowait, steal, idle]
|
||||||
var cpuBreakdownSums []float64
|
var cpuBreakdownSums []float64
|
||||||
|
|
||||||
count := float64(len(records))
|
|
||||||
tempCount := float64(0)
|
tempCount := float64(0)
|
||||||
|
|
||||||
// Accumulate totals
|
// Accumulate totals
|
||||||
for _, record := range records {
|
for i := range records {
|
||||||
id := record.Id
|
stats := &records[i]
|
||||||
// clear global statsRecord for reuse
|
|
||||||
statsRecord.Stats = statsRecord.Stats[:0]
|
|
||||||
// reset tempStats each iteration to avoid omitzero fields retaining stale values
|
|
||||||
*stats = system.Stats{}
|
|
||||||
|
|
||||||
queryParams["id"] = id
|
|
||||||
db.NewQuery("SELECT stats FROM system_stats WHERE id = {:id}").Bind(queryParams).One(&statsRecord)
|
|
||||||
if err := json.Unmarshal(statsRecord.Stats, stats); err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
sum.Cpu += stats.Cpu
|
sum.Cpu += stats.Cpu
|
||||||
// accumulate cpu time breakdowns if present
|
// accumulate cpu time breakdowns if present
|
||||||
@@ -205,8 +199,8 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
|||||||
if len(cpuBreakdownSums) < len(stats.CpuBreakdown) {
|
if len(cpuBreakdownSums) < len(stats.CpuBreakdown) {
|
||||||
cpuBreakdownSums = append(cpuBreakdownSums, make([]float64, len(stats.CpuBreakdown)-len(cpuBreakdownSums))...)
|
cpuBreakdownSums = append(cpuBreakdownSums, make([]float64, len(stats.CpuBreakdown)-len(cpuBreakdownSums))...)
|
||||||
}
|
}
|
||||||
for i, v := range stats.CpuBreakdown {
|
for j, v := range stats.CpuBreakdown {
|
||||||
cpuBreakdownSums[i] += v
|
cpuBreakdownSums[j] += v
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
sum.Mem += stats.Mem
|
sum.Mem += stats.Mem
|
||||||
@@ -230,6 +224,9 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
|||||||
sum.Bandwidth[1] += stats.Bandwidth[1]
|
sum.Bandwidth[1] += stats.Bandwidth[1]
|
||||||
sum.DiskIO[0] += stats.DiskIO[0]
|
sum.DiskIO[0] += stats.DiskIO[0]
|
||||||
sum.DiskIO[1] += stats.DiskIO[1]
|
sum.DiskIO[1] += stats.DiskIO[1]
|
||||||
|
for i := range stats.DiskIoStats {
|
||||||
|
sum.DiskIoStats[i] += stats.DiskIoStats[i]
|
||||||
|
}
|
||||||
batterySum += int(stats.Battery[0])
|
batterySum += int(stats.Battery[0])
|
||||||
sum.Battery[1] = stats.Battery[1]
|
sum.Battery[1] = stats.Battery[1]
|
||||||
|
|
||||||
@@ -239,8 +236,8 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
|||||||
// extend slices to accommodate core count
|
// extend slices to accommodate core count
|
||||||
cpuCoresSums = append(cpuCoresSums, make([]uint64, len(stats.CpuCoresUsage)-len(cpuCoresSums))...)
|
cpuCoresSums = append(cpuCoresSums, make([]uint64, len(stats.CpuCoresUsage)-len(cpuCoresSums))...)
|
||||||
}
|
}
|
||||||
for i, v := range stats.CpuCoresUsage {
|
for j, v := range stats.CpuCoresUsage {
|
||||||
cpuCoresSums[i] += uint64(v)
|
cpuCoresSums[j] += uint64(v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Set peak values
|
// Set peak values
|
||||||
@@ -254,6 +251,9 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
|||||||
sum.MaxBandwidth[1] = max(sum.MaxBandwidth[1], stats.MaxBandwidth[1], stats.Bandwidth[1])
|
sum.MaxBandwidth[1] = max(sum.MaxBandwidth[1], stats.MaxBandwidth[1], stats.Bandwidth[1])
|
||||||
sum.MaxDiskIO[0] = max(sum.MaxDiskIO[0], stats.MaxDiskIO[0], stats.DiskIO[0])
|
sum.MaxDiskIO[0] = max(sum.MaxDiskIO[0], stats.MaxDiskIO[0], stats.DiskIO[0])
|
||||||
sum.MaxDiskIO[1] = max(sum.MaxDiskIO[1], stats.MaxDiskIO[1], stats.DiskIO[1])
|
sum.MaxDiskIO[1] = max(sum.MaxDiskIO[1], stats.MaxDiskIO[1], stats.DiskIO[1])
|
||||||
|
for i := range stats.DiskIoStats {
|
||||||
|
sum.MaxDiskIoStats[i] = max(sum.MaxDiskIoStats[i], stats.MaxDiskIoStats[i], stats.DiskIoStats[i])
|
||||||
|
}
|
||||||
|
|
||||||
// Accumulate network interfaces
|
// Accumulate network interfaces
|
||||||
if sum.NetworkInterfaces == nil {
|
if sum.NetworkInterfaces == nil {
|
||||||
@@ -299,6 +299,10 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
|||||||
fs.DiskWriteBytes += value.DiskWriteBytes
|
fs.DiskWriteBytes += value.DiskWriteBytes
|
||||||
fs.MaxDiskReadBytes = max(fs.MaxDiskReadBytes, value.MaxDiskReadBytes, value.DiskReadBytes)
|
fs.MaxDiskReadBytes = max(fs.MaxDiskReadBytes, value.MaxDiskReadBytes, value.DiskReadBytes)
|
||||||
fs.MaxDiskWriteBytes = max(fs.MaxDiskWriteBytes, value.MaxDiskWriteBytes, value.DiskWriteBytes)
|
fs.MaxDiskWriteBytes = max(fs.MaxDiskWriteBytes, value.MaxDiskWriteBytes, value.DiskWriteBytes)
|
||||||
|
for i := range value.DiskIoStats {
|
||||||
|
fs.DiskIoStats[i] += value.DiskIoStats[i]
|
||||||
|
fs.MaxDiskIoStats[i] = max(fs.MaxDiskIoStats[i], value.MaxDiskIoStats[i], value.DiskIoStats[i])
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -333,8 +337,7 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compute averages in place
|
// Compute averages
|
||||||
if count > 0 {
|
|
||||||
sum.Cpu = twoDecimals(sum.Cpu / count)
|
sum.Cpu = twoDecimals(sum.Cpu / count)
|
||||||
sum.Mem = twoDecimals(sum.Mem / count)
|
sum.Mem = twoDecimals(sum.Mem / count)
|
||||||
sum.MemUsed = twoDecimals(sum.MemUsed / count)
|
sum.MemUsed = twoDecimals(sum.MemUsed / count)
|
||||||
@@ -350,6 +353,9 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
|||||||
sum.DiskWritePs = twoDecimals(sum.DiskWritePs / count)
|
sum.DiskWritePs = twoDecimals(sum.DiskWritePs / count)
|
||||||
sum.DiskIO[0] = sum.DiskIO[0] / uint64(count)
|
sum.DiskIO[0] = sum.DiskIO[0] / uint64(count)
|
||||||
sum.DiskIO[1] = sum.DiskIO[1] / uint64(count)
|
sum.DiskIO[1] = sum.DiskIO[1] / uint64(count)
|
||||||
|
for i := range sum.DiskIoStats {
|
||||||
|
sum.DiskIoStats[i] = twoDecimals(sum.DiskIoStats[i] / count)
|
||||||
|
}
|
||||||
sum.NetworkSent = twoDecimals(sum.NetworkSent / count)
|
sum.NetworkSent = twoDecimals(sum.NetworkSent / count)
|
||||||
sum.NetworkRecv = twoDecimals(sum.NetworkRecv / count)
|
sum.NetworkRecv = twoDecimals(sum.NetworkRecv / count)
|
||||||
sum.LoadAvg[0] = twoDecimals(sum.LoadAvg[0] / count)
|
sum.LoadAvg[0] = twoDecimals(sum.LoadAvg[0] / count)
|
||||||
@@ -388,6 +394,9 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
|||||||
fs.DiskReadPs = twoDecimals(fs.DiskReadPs / count)
|
fs.DiskReadPs = twoDecimals(fs.DiskReadPs / count)
|
||||||
fs.DiskReadBytes = fs.DiskReadBytes / uint64(count)
|
fs.DiskReadBytes = fs.DiskReadBytes / uint64(count)
|
||||||
fs.DiskWriteBytes = fs.DiskWriteBytes / uint64(count)
|
fs.DiskWriteBytes = fs.DiskWriteBytes / uint64(count)
|
||||||
|
for i := range fs.DiskIoStats {
|
||||||
|
fs.DiskIoStats[i] = twoDecimals(fs.DiskIoStats[i] / count)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -430,36 +439,39 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
|||||||
}
|
}
|
||||||
sum.CpuBreakdown = avg
|
sum.CpuBreakdown = avg
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
return sum
|
return sum
|
||||||
}
|
}
|
||||||
|
|
||||||
// Calculate the average stats of a list of container_stats records
|
// Calculate the average stats of a list of container_stats records
|
||||||
func (rm *RecordManager) AverageContainerStats(db dbx.Builder, records RecordIds) []container.Stats {
|
func (rm *RecordManager) AverageContainerStats(db dbx.Builder, records RecordIds) []container.Stats {
|
||||||
// Clear global map for reuse
|
allStats := make([][]container.Stats, 0, len(records))
|
||||||
for k := range containerSums {
|
var row StatsRecord
|
||||||
delete(containerSums, k)
|
params := make(dbx.Params, 1)
|
||||||
}
|
for _, rec := range records {
|
||||||
sums := containerSums
|
row.Stats = row.Stats[:0]
|
||||||
count := float64(len(records))
|
params["id"] = rec.Id
|
||||||
|
db.NewQuery("SELECT stats FROM container_stats WHERE id = {:id}").Bind(params).One(&row)
|
||||||
for i := range records {
|
var cs []container.Stats
|
||||||
id := records[i].Id
|
if err := json.Unmarshal(row.Stats, &cs); err != nil {
|
||||||
// clear global statsRecord for reuse
|
|
||||||
statsRecord.Stats = statsRecord.Stats[:0]
|
|
||||||
// must set to nil (not [:0]) to avoid json.Unmarshal reusing backing array
|
|
||||||
// which causes omitzero fields to inherit stale values from previous iterations
|
|
||||||
containerStats = nil
|
|
||||||
|
|
||||||
queryParams["id"] = id
|
|
||||||
db.NewQuery("SELECT stats FROM container_stats WHERE id = {:id}").Bind(queryParams).One(&statsRecord)
|
|
||||||
|
|
||||||
if err := json.Unmarshal(statsRecord.Stats, &containerStats); err != nil {
|
|
||||||
return []container.Stats{}
|
return []container.Stats{}
|
||||||
}
|
}
|
||||||
|
allStats = append(allStats, cs)
|
||||||
|
}
|
||||||
|
return AverageContainerStatsSlice(allStats)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AverageContainerStatsSlice computes the average of container stats across multiple time periods.
|
||||||
|
func AverageContainerStatsSlice(records [][]container.Stats) []container.Stats {
|
||||||
|
if len(records) == 0 {
|
||||||
|
return []container.Stats{}
|
||||||
|
}
|
||||||
|
sums := make(map[string]*container.Stats)
|
||||||
|
count := float64(len(records))
|
||||||
|
|
||||||
|
for _, containerStats := range records {
|
||||||
for i := range containerStats {
|
for i := range containerStats {
|
||||||
stat := containerStats[i]
|
stat := &containerStats[i]
|
||||||
if _, ok := sums[stat.Name]; !ok {
|
if _, ok := sums[stat.Name]; !ok {
|
||||||
sums[stat.Name] = &container.Stats{Name: stat.Name}
|
sums[stat.Name] = &container.Stats{Name: stat.Name}
|
||||||
}
|
}
|
||||||
@@ -488,133 +500,6 @@ func (rm *RecordManager) AverageContainerStats(db dbx.Builder, records RecordIds
|
|||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete old records
|
|
||||||
func (rm *RecordManager) DeleteOldRecords() {
|
|
||||||
rm.app.RunInTransaction(func(txApp core.App) error {
|
|
||||||
err := deleteOldSystemStats(txApp)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = deleteOldContainerRecords(txApp)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = deleteOldSystemdServiceRecords(txApp)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = deleteOldAlertsHistory(txApp, 200, 250)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = deleteOldQuietHours(txApp)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete old alerts history records
|
|
||||||
func deleteOldAlertsHistory(app core.App, countToKeep, countBeforeDeletion int) error {
|
|
||||||
db := app.DB()
|
|
||||||
var users []struct {
|
|
||||||
Id string `db:"user"`
|
|
||||||
}
|
|
||||||
err := db.NewQuery("SELECT user, COUNT(*) as count FROM alerts_history GROUP BY user HAVING count > {:countBeforeDeletion}").Bind(dbx.Params{"countBeforeDeletion": countBeforeDeletion}).All(&users)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, user := range users {
|
|
||||||
_, err = db.NewQuery("DELETE FROM alerts_history WHERE user = {:user} AND id NOT IN (SELECT id FROM alerts_history WHERE user = {:user} ORDER BY created DESC LIMIT {:countToKeep})").Bind(dbx.Params{"user": user.Id, "countToKeep": countToKeep}).Execute()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deletes system_stats records older than what is displayed in the UI
|
|
||||||
func deleteOldSystemStats(app core.App) error {
|
|
||||||
// Collections to process
|
|
||||||
collections := [2]string{"system_stats", "container_stats"}
|
|
||||||
|
|
||||||
// Record types and their retention periods
|
|
||||||
type RecordDeletionData struct {
|
|
||||||
recordType string
|
|
||||||
retention time.Duration
|
|
||||||
}
|
|
||||||
recordData := []RecordDeletionData{
|
|
||||||
{recordType: "1m", retention: time.Hour}, // 1 hour
|
|
||||||
{recordType: "10m", retention: 12 * time.Hour}, // 12 hours
|
|
||||||
{recordType: "20m", retention: 24 * time.Hour}, // 1 day
|
|
||||||
{recordType: "120m", retention: 7 * 24 * time.Hour}, // 7 days
|
|
||||||
{recordType: "480m", retention: 30 * 24 * time.Hour}, // 30 days
|
|
||||||
}
|
|
||||||
|
|
||||||
now := time.Now().UTC()
|
|
||||||
|
|
||||||
for _, collection := range collections {
|
|
||||||
// Build the WHERE clause
|
|
||||||
var conditionParts []string
|
|
||||||
var params dbx.Params = make(map[string]any)
|
|
||||||
for i := range recordData {
|
|
||||||
rd := recordData[i]
|
|
||||||
// Create parameterized condition for this record type
|
|
||||||
dateParam := fmt.Sprintf("date%d", i)
|
|
||||||
conditionParts = append(conditionParts, fmt.Sprintf("(type = '%s' AND created < {:%s})", rd.recordType, dateParam))
|
|
||||||
params[dateParam] = now.Add(-rd.retention)
|
|
||||||
}
|
|
||||||
// Combine conditions with OR
|
|
||||||
conditionStr := strings.Join(conditionParts, " OR ")
|
|
||||||
// Construct and execute the full raw query
|
|
||||||
rawQuery := fmt.Sprintf("DELETE FROM %s WHERE %s", collection, conditionStr)
|
|
||||||
if _, err := app.DB().NewQuery(rawQuery).Bind(params).Execute(); err != nil {
|
|
||||||
return fmt.Errorf("failed to delete from %s: %v", collection, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deletes systemd service records that haven't been updated in the last 20 minutes
|
|
||||||
func deleteOldSystemdServiceRecords(app core.App) error {
|
|
||||||
now := time.Now().UTC()
|
|
||||||
twentyMinutesAgo := now.Add(-20 * time.Minute)
|
|
||||||
|
|
||||||
// Delete systemd service records where updated < twentyMinutesAgo
|
|
||||||
_, err := app.DB().NewQuery("DELETE FROM systemd_services WHERE updated < {:updated}").Bind(dbx.Params{"updated": twentyMinutesAgo.UnixMilli()}).Execute()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to delete old systemd service records: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deletes container records that haven't been updated in the last 10 minutes
|
|
||||||
func deleteOldContainerRecords(app core.App) error {
|
|
||||||
now := time.Now().UTC()
|
|
||||||
tenMinutesAgo := now.Add(-10 * time.Minute)
|
|
||||||
|
|
||||||
// Delete container records where updated < tenMinutesAgo
|
|
||||||
_, err := app.DB().NewQuery("DELETE FROM containers WHERE updated < {:updated}").Bind(dbx.Params{"updated": tenMinutesAgo.UnixMilli()}).Execute()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to delete old container records: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deletes old quiet hours records where end date has passed
|
|
||||||
func deleteOldQuietHours(app core.App) error {
|
|
||||||
now := time.Now().UTC()
|
|
||||||
_, err := app.DB().NewQuery("DELETE FROM quiet_hours WHERE type = 'one-time' AND end < {:now}").Bind(dbx.Params{"now": now}).Execute()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Round float to two decimals */
|
/* Round float to two decimals */
|
||||||
func twoDecimals(value float64) float64 {
|
func twoDecimals(value float64) float64 {
|
||||||
return math.Round(value*100) / 100
|
return math.Round(value*100) / 100
|
||||||
|
|||||||
820
internal/records/records_averaging_test.go
Normal file
820
internal/records/records_averaging_test.go
Normal file
@@ -0,0 +1,820 @@
|
|||||||
|
//go:build testing
|
||||||
|
|
||||||
|
package records_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sort"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/entities/container"
|
||||||
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
"github.com/henrygd/beszel/internal/records"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAverageSystemStatsSlice_Empty(t *testing.T) {
|
||||||
|
result := records.AverageSystemStatsSlice(nil)
|
||||||
|
assert.Equal(t, system.Stats{}, result)
|
||||||
|
|
||||||
|
result = records.AverageSystemStatsSlice([]system.Stats{})
|
||||||
|
assert.Equal(t, system.Stats{}, result)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAverageSystemStatsSlice_SingleRecord(t *testing.T) {
|
||||||
|
input := []system.Stats{
|
||||||
|
{
|
||||||
|
Cpu: 45.67,
|
||||||
|
Mem: 16.0,
|
||||||
|
MemUsed: 8.5,
|
||||||
|
MemPct: 53.12,
|
||||||
|
MemBuffCache: 2.0,
|
||||||
|
Swap: 4.0,
|
||||||
|
SwapUsed: 1.0,
|
||||||
|
DiskTotal: 500.0,
|
||||||
|
DiskUsed: 250.0,
|
||||||
|
DiskPct: 50.0,
|
||||||
|
DiskReadPs: 100.5,
|
||||||
|
DiskWritePs: 200.75,
|
||||||
|
NetworkSent: 10.5,
|
||||||
|
NetworkRecv: 20.25,
|
||||||
|
LoadAvg: [3]float64{1.5, 2.0, 3.5},
|
||||||
|
Bandwidth: [2]uint64{1000, 2000},
|
||||||
|
DiskIO: [2]uint64{500, 600},
|
||||||
|
Battery: [2]uint8{80, 1},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
assert.Equal(t, 45.67, result.Cpu)
|
||||||
|
assert.Equal(t, 16.0, result.Mem)
|
||||||
|
assert.Equal(t, 8.5, result.MemUsed)
|
||||||
|
assert.Equal(t, 53.12, result.MemPct)
|
||||||
|
assert.Equal(t, 2.0, result.MemBuffCache)
|
||||||
|
assert.Equal(t, 4.0, result.Swap)
|
||||||
|
assert.Equal(t, 1.0, result.SwapUsed)
|
||||||
|
assert.Equal(t, 500.0, result.DiskTotal)
|
||||||
|
assert.Equal(t, 250.0, result.DiskUsed)
|
||||||
|
assert.Equal(t, 50.0, result.DiskPct)
|
||||||
|
assert.Equal(t, 100.5, result.DiskReadPs)
|
||||||
|
assert.Equal(t, 200.75, result.DiskWritePs)
|
||||||
|
assert.Equal(t, 10.5, result.NetworkSent)
|
||||||
|
assert.Equal(t, 20.25, result.NetworkRecv)
|
||||||
|
assert.Equal(t, [3]float64{1.5, 2.0, 3.5}, result.LoadAvg)
|
||||||
|
assert.Equal(t, [2]uint64{1000, 2000}, result.Bandwidth)
|
||||||
|
assert.Equal(t, [2]uint64{500, 600}, result.DiskIO)
|
||||||
|
assert.Equal(t, uint8(80), result.Battery[0])
|
||||||
|
assert.Equal(t, uint8(1), result.Battery[1])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAverageSystemStatsSlice_BasicAveraging(t *testing.T) {
|
||||||
|
input := []system.Stats{
|
||||||
|
{
|
||||||
|
Cpu: 20.0,
|
||||||
|
Mem: 16.0,
|
||||||
|
MemUsed: 6.0,
|
||||||
|
MemPct: 37.5,
|
||||||
|
MemBuffCache: 1.0,
|
||||||
|
MemZfsArc: 0.5,
|
||||||
|
Swap: 4.0,
|
||||||
|
SwapUsed: 1.0,
|
||||||
|
DiskTotal: 500.0,
|
||||||
|
DiskUsed: 200.0,
|
||||||
|
DiskPct: 40.0,
|
||||||
|
DiskReadPs: 100.0,
|
||||||
|
DiskWritePs: 200.0,
|
||||||
|
NetworkSent: 10.0,
|
||||||
|
NetworkRecv: 20.0,
|
||||||
|
LoadAvg: [3]float64{1.0, 2.0, 3.0},
|
||||||
|
Bandwidth: [2]uint64{1000, 2000},
|
||||||
|
DiskIO: [2]uint64{400, 600},
|
||||||
|
Battery: [2]uint8{80, 1},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Cpu: 40.0,
|
||||||
|
Mem: 16.0,
|
||||||
|
MemUsed: 10.0,
|
||||||
|
MemPct: 62.5,
|
||||||
|
MemBuffCache: 3.0,
|
||||||
|
MemZfsArc: 1.5,
|
||||||
|
Swap: 4.0,
|
||||||
|
SwapUsed: 3.0,
|
||||||
|
DiskTotal: 500.0,
|
||||||
|
DiskUsed: 300.0,
|
||||||
|
DiskPct: 60.0,
|
||||||
|
DiskReadPs: 200.0,
|
||||||
|
DiskWritePs: 400.0,
|
||||||
|
NetworkSent: 30.0,
|
||||||
|
NetworkRecv: 40.0,
|
||||||
|
LoadAvg: [3]float64{3.0, 4.0, 5.0},
|
||||||
|
Bandwidth: [2]uint64{3000, 4000},
|
||||||
|
DiskIO: [2]uint64{600, 800},
|
||||||
|
Battery: [2]uint8{60, 1},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
assert.Equal(t, 30.0, result.Cpu)
|
||||||
|
assert.Equal(t, 16.0, result.Mem)
|
||||||
|
assert.Equal(t, 8.0, result.MemUsed)
|
||||||
|
assert.Equal(t, 50.0, result.MemPct)
|
||||||
|
assert.Equal(t, 2.0, result.MemBuffCache)
|
||||||
|
assert.Equal(t, 1.0, result.MemZfsArc)
|
||||||
|
assert.Equal(t, 4.0, result.Swap)
|
||||||
|
assert.Equal(t, 2.0, result.SwapUsed)
|
||||||
|
assert.Equal(t, 500.0, result.DiskTotal)
|
||||||
|
assert.Equal(t, 250.0, result.DiskUsed)
|
||||||
|
assert.Equal(t, 50.0, result.DiskPct)
|
||||||
|
assert.Equal(t, 150.0, result.DiskReadPs)
|
||||||
|
assert.Equal(t, 300.0, result.DiskWritePs)
|
||||||
|
assert.Equal(t, 20.0, result.NetworkSent)
|
||||||
|
assert.Equal(t, 30.0, result.NetworkRecv)
|
||||||
|
assert.Equal(t, [3]float64{2.0, 3.0, 4.0}, result.LoadAvg)
|
||||||
|
assert.Equal(t, [2]uint64{2000, 3000}, result.Bandwidth)
|
||||||
|
assert.Equal(t, [2]uint64{500, 700}, result.DiskIO)
|
||||||
|
assert.Equal(t, uint8(70), result.Battery[0])
|
||||||
|
assert.Equal(t, uint8(1), result.Battery[1])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAverageSystemStatsSlice_PeakValues(t *testing.T) {
|
||||||
|
input := []system.Stats{
|
||||||
|
{
|
||||||
|
Cpu: 20.0,
|
||||||
|
MaxCpu: 25.0,
|
||||||
|
MemUsed: 6.0,
|
||||||
|
MaxMem: 7.0,
|
||||||
|
NetworkSent: 10.0,
|
||||||
|
MaxNetworkSent: 15.0,
|
||||||
|
NetworkRecv: 20.0,
|
||||||
|
MaxNetworkRecv: 25.0,
|
||||||
|
DiskReadPs: 100.0,
|
||||||
|
MaxDiskReadPs: 120.0,
|
||||||
|
DiskWritePs: 200.0,
|
||||||
|
MaxDiskWritePs: 220.0,
|
||||||
|
Bandwidth: [2]uint64{1000, 2000},
|
||||||
|
MaxBandwidth: [2]uint64{1500, 2500},
|
||||||
|
DiskIO: [2]uint64{400, 600},
|
||||||
|
MaxDiskIO: [2]uint64{500, 700},
|
||||||
|
DiskIoStats: [6]float64{10.0, 20.0, 30.0, 5.0, 8.0, 12.0},
|
||||||
|
MaxDiskIoStats: [6]float64{15.0, 25.0, 35.0, 6.0, 9.0, 14.0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Cpu: 40.0,
|
||||||
|
MaxCpu: 50.0,
|
||||||
|
MemUsed: 10.0,
|
||||||
|
MaxMem: 12.0,
|
||||||
|
NetworkSent: 30.0,
|
||||||
|
MaxNetworkSent: 35.0,
|
||||||
|
NetworkRecv: 40.0,
|
||||||
|
MaxNetworkRecv: 45.0,
|
||||||
|
DiskReadPs: 200.0,
|
||||||
|
MaxDiskReadPs: 210.0,
|
||||||
|
DiskWritePs: 400.0,
|
||||||
|
MaxDiskWritePs: 410.0,
|
||||||
|
Bandwidth: [2]uint64{3000, 4000},
|
||||||
|
MaxBandwidth: [2]uint64{3500, 4500},
|
||||||
|
DiskIO: [2]uint64{600, 800},
|
||||||
|
MaxDiskIO: [2]uint64{650, 850},
|
||||||
|
DiskIoStats: [6]float64{50.0, 60.0, 70.0, 15.0, 18.0, 22.0},
|
||||||
|
MaxDiskIoStats: [6]float64{55.0, 65.0, 75.0, 16.0, 19.0, 23.0},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
assert.Equal(t, 50.0, result.MaxCpu)
|
||||||
|
assert.Equal(t, 12.0, result.MaxMem)
|
||||||
|
assert.Equal(t, 35.0, result.MaxNetworkSent)
|
||||||
|
assert.Equal(t, 45.0, result.MaxNetworkRecv)
|
||||||
|
assert.Equal(t, 210.0, result.MaxDiskReadPs)
|
||||||
|
assert.Equal(t, 410.0, result.MaxDiskWritePs)
|
||||||
|
assert.Equal(t, [2]uint64{3500, 4500}, result.MaxBandwidth)
|
||||||
|
assert.Equal(t, [2]uint64{650, 850}, result.MaxDiskIO)
|
||||||
|
assert.Equal(t, [6]float64{30.0, 40.0, 50.0, 10.0, 13.0, 17.0}, result.DiskIoStats)
|
||||||
|
assert.Equal(t, [6]float64{55.0, 65.0, 75.0, 16.0, 19.0, 23.0}, result.MaxDiskIoStats)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAverageSystemStatsSlice_DiskIoStats(t *testing.T) {
|
||||||
|
input := []system.Stats{
|
||||||
|
{
|
||||||
|
Cpu: 10.0,
|
||||||
|
DiskIoStats: [6]float64{10.0, 20.0, 30.0, 5.0, 8.0, 12.0},
|
||||||
|
MaxDiskIoStats: [6]float64{12.0, 22.0, 32.0, 6.0, 9.0, 13.0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Cpu: 20.0,
|
||||||
|
DiskIoStats: [6]float64{30.0, 40.0, 50.0, 15.0, 18.0, 22.0},
|
||||||
|
MaxDiskIoStats: [6]float64{28.0, 38.0, 48.0, 14.0, 17.0, 21.0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Cpu: 30.0,
|
||||||
|
DiskIoStats: [6]float64{20.0, 30.0, 40.0, 10.0, 12.0, 16.0},
|
||||||
|
MaxDiskIoStats: [6]float64{25.0, 35.0, 45.0, 11.0, 13.0, 17.0},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
// Average: (10+30+20)/3=20, (20+40+30)/3=30, (30+50+40)/3=40, (5+15+10)/3=10, (8+18+12)/3≈12.67, (12+22+16)/3≈16.67
|
||||||
|
assert.Equal(t, 20.0, result.DiskIoStats[0])
|
||||||
|
assert.Equal(t, 30.0, result.DiskIoStats[1])
|
||||||
|
assert.Equal(t, 40.0, result.DiskIoStats[2])
|
||||||
|
assert.Equal(t, 10.0, result.DiskIoStats[3])
|
||||||
|
assert.Equal(t, 12.67, result.DiskIoStats[4])
|
||||||
|
assert.Equal(t, 16.67, result.DiskIoStats[5])
|
||||||
|
// Max: current DiskIoStats[0] wins for record 2 (30 > MaxDiskIoStats 28)
|
||||||
|
assert.Equal(t, 30.0, result.MaxDiskIoStats[0])
|
||||||
|
assert.Equal(t, 40.0, result.MaxDiskIoStats[1])
|
||||||
|
assert.Equal(t, 50.0, result.MaxDiskIoStats[2])
|
||||||
|
assert.Equal(t, 15.0, result.MaxDiskIoStats[3])
|
||||||
|
assert.Equal(t, 18.0, result.MaxDiskIoStats[4])
|
||||||
|
assert.Equal(t, 22.0, result.MaxDiskIoStats[5])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that current DiskIoStats values are considered when computing MaxDiskIoStats.
|
||||||
|
func TestAverageSystemStatsSlice_DiskIoStatsPeakFromCurrentValues(t *testing.T) {
|
||||||
|
input := []system.Stats{
|
||||||
|
{Cpu: 10.0, DiskIoStats: [6]float64{95.0, 90.0, 85.0, 50.0, 60.0, 80.0}, MaxDiskIoStats: [6]float64{80.0, 80.0, 80.0, 40.0, 50.0, 70.0}},
|
||||||
|
{Cpu: 20.0, DiskIoStats: [6]float64{10.0, 10.0, 10.0, 5.0, 6.0, 8.0}, MaxDiskIoStats: [6]float64{20.0, 20.0, 20.0, 10.0, 12.0, 16.0}},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
// Current value from first record (95, 90, 85, 50, 60, 80) beats MaxDiskIoStats in both records
|
||||||
|
assert.Equal(t, 95.0, result.MaxDiskIoStats[0])
|
||||||
|
assert.Equal(t, 90.0, result.MaxDiskIoStats[1])
|
||||||
|
assert.Equal(t, 85.0, result.MaxDiskIoStats[2])
|
||||||
|
assert.Equal(t, 50.0, result.MaxDiskIoStats[3])
|
||||||
|
assert.Equal(t, 60.0, result.MaxDiskIoStats[4])
|
||||||
|
assert.Equal(t, 80.0, result.MaxDiskIoStats[5])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that current values are considered when computing peaks
|
||||||
|
// (i.e., current cpu > MaxCpu should still win).
|
||||||
|
func TestAverageSystemStatsSlice_PeakFromCurrentValues(t *testing.T) {
|
||||||
|
input := []system.Stats{
|
||||||
|
{Cpu: 95.0, MaxCpu: 80.0, MemUsed: 15.0, MaxMem: 10.0},
|
||||||
|
{Cpu: 10.0, MaxCpu: 20.0, MemUsed: 5.0, MaxMem: 8.0},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
assert.Equal(t, 95.0, result.MaxCpu)
|
||||||
|
assert.Equal(t, 15.0, result.MaxMem)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that records without temperature data are excluded from the temperature average.
|
||||||
|
func TestAverageSystemStatsSlice_Temperatures(t *testing.T) {
|
||||||
|
input := []system.Stats{
|
||||||
|
{
|
||||||
|
Cpu: 10.0,
|
||||||
|
Temperatures: map[string]float64{"cpu": 60.0, "gpu": 70.0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Cpu: 20.0,
|
||||||
|
Temperatures: map[string]float64{"cpu": 80.0, "gpu": 90.0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// No temperatures - should not affect temp averaging
|
||||||
|
Cpu: 30.0,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
require.NotNil(t, result.Temperatures)
|
||||||
|
// Average over 2 records that had temps, not 3
|
||||||
|
assert.Equal(t, 70.0, result.Temperatures["cpu"])
|
||||||
|
assert.Equal(t, 80.0, result.Temperatures["gpu"])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAverageSystemStatsSlice_NetworkInterfaces(t *testing.T) {
|
||||||
|
input := []system.Stats{
|
||||||
|
{
|
||||||
|
Cpu: 10.0,
|
||||||
|
NetworkInterfaces: map[string][4]uint64{
|
||||||
|
"eth0": {100, 200, 150, 250},
|
||||||
|
"eth1": {50, 60, 70, 80},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Cpu: 20.0,
|
||||||
|
NetworkInterfaces: map[string][4]uint64{
|
||||||
|
"eth0": {200, 400, 300, 500},
|
||||||
|
"eth1": {150, 160, 170, 180},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
require.NotNil(t, result.NetworkInterfaces)
|
||||||
|
// [0] and [1] are averaged, [2] and [3] are max
|
||||||
|
assert.Equal(t, [4]uint64{150, 300, 300, 500}, result.NetworkInterfaces["eth0"])
|
||||||
|
assert.Equal(t, [4]uint64{100, 110, 170, 180}, result.NetworkInterfaces["eth1"])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAverageSystemStatsSlice_ExtraFs(t *testing.T) {
|
||||||
|
input := []system.Stats{
|
||||||
|
{
|
||||||
|
Cpu: 10.0,
|
||||||
|
ExtraFs: map[string]*system.FsStats{
|
||||||
|
"/data": {
|
||||||
|
DiskTotal: 1000.0,
|
||||||
|
DiskUsed: 400.0,
|
||||||
|
DiskReadPs: 50.0,
|
||||||
|
DiskWritePs: 100.0,
|
||||||
|
MaxDiskReadPS: 60.0,
|
||||||
|
MaxDiskWritePS: 110.0,
|
||||||
|
DiskReadBytes: 5000,
|
||||||
|
DiskWriteBytes: 10000,
|
||||||
|
MaxDiskReadBytes: 6000,
|
||||||
|
MaxDiskWriteBytes: 11000,
|
||||||
|
DiskIoStats: [6]float64{10.0, 20.0, 30.0, 5.0, 8.0, 12.0},
|
||||||
|
MaxDiskIoStats: [6]float64{12.0, 22.0, 32.0, 6.0, 9.0, 13.0},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Cpu: 20.0,
|
||||||
|
ExtraFs: map[string]*system.FsStats{
|
||||||
|
"/data": {
|
||||||
|
DiskTotal: 1000.0,
|
||||||
|
DiskUsed: 600.0,
|
||||||
|
DiskReadPs: 150.0,
|
||||||
|
DiskWritePs: 200.0,
|
||||||
|
MaxDiskReadPS: 160.0,
|
||||||
|
MaxDiskWritePS: 210.0,
|
||||||
|
DiskReadBytes: 15000,
|
||||||
|
DiskWriteBytes: 20000,
|
||||||
|
MaxDiskReadBytes: 16000,
|
||||||
|
MaxDiskWriteBytes: 21000,
|
||||||
|
DiskIoStats: [6]float64{50.0, 60.0, 70.0, 15.0, 18.0, 22.0},
|
||||||
|
MaxDiskIoStats: [6]float64{55.0, 65.0, 75.0, 16.0, 19.0, 23.0},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
require.NotNil(t, result.ExtraFs)
|
||||||
|
require.NotNil(t, result.ExtraFs["/data"])
|
||||||
|
fs := result.ExtraFs["/data"]
|
||||||
|
assert.Equal(t, 1000.0, fs.DiskTotal)
|
||||||
|
assert.Equal(t, 500.0, fs.DiskUsed)
|
||||||
|
assert.Equal(t, 100.0, fs.DiskReadPs)
|
||||||
|
assert.Equal(t, 150.0, fs.DiskWritePs)
|
||||||
|
assert.Equal(t, 160.0, fs.MaxDiskReadPS)
|
||||||
|
assert.Equal(t, 210.0, fs.MaxDiskWritePS)
|
||||||
|
assert.Equal(t, uint64(10000), fs.DiskReadBytes)
|
||||||
|
assert.Equal(t, uint64(15000), fs.DiskWriteBytes)
|
||||||
|
assert.Equal(t, uint64(16000), fs.MaxDiskReadBytes)
|
||||||
|
assert.Equal(t, uint64(21000), fs.MaxDiskWriteBytes)
|
||||||
|
assert.Equal(t, [6]float64{30.0, 40.0, 50.0, 10.0, 13.0, 17.0}, fs.DiskIoStats)
|
||||||
|
assert.Equal(t, [6]float64{55.0, 65.0, 75.0, 16.0, 19.0, 23.0}, fs.MaxDiskIoStats)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that ExtraFs DiskIoStats peak considers current values, not just previous peaks.
|
||||||
|
func TestAverageSystemStatsSlice_ExtraFsDiskIoStatsPeakFromCurrentValues(t *testing.T) {
|
||||||
|
input := []system.Stats{
|
||||||
|
{
|
||||||
|
Cpu: 10.0,
|
||||||
|
ExtraFs: map[string]*system.FsStats{
|
||||||
|
"/data": {
|
||||||
|
DiskIoStats: [6]float64{95.0, 90.0, 85.0, 50.0, 60.0, 80.0}, // exceeds MaxDiskIoStats
|
||||||
|
MaxDiskIoStats: [6]float64{80.0, 80.0, 80.0, 40.0, 50.0, 70.0},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Cpu: 20.0,
|
||||||
|
ExtraFs: map[string]*system.FsStats{
|
||||||
|
"/data": {
|
||||||
|
DiskIoStats: [6]float64{10.0, 10.0, 10.0, 5.0, 6.0, 8.0},
|
||||||
|
MaxDiskIoStats: [6]float64{20.0, 20.0, 20.0, 10.0, 12.0, 16.0},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
fs := result.ExtraFs["/data"]
|
||||||
|
assert.Equal(t, 95.0, fs.MaxDiskIoStats[0])
|
||||||
|
assert.Equal(t, 90.0, fs.MaxDiskIoStats[1])
|
||||||
|
assert.Equal(t, 85.0, fs.MaxDiskIoStats[2])
|
||||||
|
assert.Equal(t, 50.0, fs.MaxDiskIoStats[3])
|
||||||
|
assert.Equal(t, 60.0, fs.MaxDiskIoStats[4])
|
||||||
|
assert.Equal(t, 80.0, fs.MaxDiskIoStats[5])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that extra FS peak values consider current values, not just previous peaks.
|
||||||
|
func TestAverageSystemStatsSlice_ExtraFsPeaksFromCurrentValues(t *testing.T) {
|
||||||
|
input := []system.Stats{
|
||||||
|
{
|
||||||
|
Cpu: 10.0,
|
||||||
|
ExtraFs: map[string]*system.FsStats{
|
||||||
|
"/data": {
|
||||||
|
DiskReadPs: 500.0, // exceeds MaxDiskReadPS
|
||||||
|
MaxDiskReadPS: 100.0,
|
||||||
|
DiskReadBytes: 50000,
|
||||||
|
MaxDiskReadBytes: 10000,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Cpu: 20.0,
|
||||||
|
ExtraFs: map[string]*system.FsStats{
|
||||||
|
"/data": {
|
||||||
|
DiskReadPs: 50.0,
|
||||||
|
MaxDiskReadPS: 200.0,
|
||||||
|
DiskReadBytes: 5000,
|
||||||
|
MaxDiskReadBytes: 20000,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
fs := result.ExtraFs["/data"]
|
||||||
|
assert.Equal(t, 500.0, fs.MaxDiskReadPS)
|
||||||
|
assert.Equal(t, uint64(50000), fs.MaxDiskReadBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAverageSystemStatsSlice_GPUData(t *testing.T) {
|
||||||
|
input := []system.Stats{
|
||||||
|
{
|
||||||
|
Cpu: 10.0,
|
||||||
|
GPUData: map[string]system.GPUData{
|
||||||
|
"gpu0": {
|
||||||
|
Name: "RTX 4090",
|
||||||
|
Temperature: 60.0,
|
||||||
|
MemoryUsed: 4.0,
|
||||||
|
MemoryTotal: 24.0,
|
||||||
|
Usage: 30.0,
|
||||||
|
Power: 200.0,
|
||||||
|
Count: 1.0,
|
||||||
|
Engines: map[string]float64{
|
||||||
|
"3D": 50.0,
|
||||||
|
"Video": 10.0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Cpu: 20.0,
|
||||||
|
GPUData: map[string]system.GPUData{
|
||||||
|
"gpu0": {
|
||||||
|
Name: "RTX 4090",
|
||||||
|
Temperature: 80.0,
|
||||||
|
MemoryUsed: 8.0,
|
||||||
|
MemoryTotal: 24.0,
|
||||||
|
Usage: 70.0,
|
||||||
|
Power: 300.0,
|
||||||
|
Count: 1.0,
|
||||||
|
Engines: map[string]float64{
|
||||||
|
"3D": 90.0,
|
||||||
|
"Video": 30.0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
require.NotNil(t, result.GPUData)
|
||||||
|
gpu := result.GPUData["gpu0"]
|
||||||
|
assert.Equal(t, "RTX 4090", gpu.Name)
|
||||||
|
assert.Equal(t, 70.0, gpu.Temperature)
|
||||||
|
assert.Equal(t, 6.0, gpu.MemoryUsed)
|
||||||
|
assert.Equal(t, 24.0, gpu.MemoryTotal)
|
||||||
|
assert.Equal(t, 50.0, gpu.Usage)
|
||||||
|
assert.Equal(t, 250.0, gpu.Power)
|
||||||
|
assert.Equal(t, 1.0, gpu.Count)
|
||||||
|
require.NotNil(t, gpu.Engines)
|
||||||
|
assert.Equal(t, 70.0, gpu.Engines["3D"])
|
||||||
|
assert.Equal(t, 20.0, gpu.Engines["Video"])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAverageSystemStatsSlice_MultipleGPUs(t *testing.T) {
|
||||||
|
input := []system.Stats{
|
||||||
|
{
|
||||||
|
Cpu: 10.0,
|
||||||
|
GPUData: map[string]system.GPUData{
|
||||||
|
"gpu0": {Name: "GPU A", Usage: 20.0, Temperature: 50.0},
|
||||||
|
"gpu1": {Name: "GPU B", Usage: 60.0, Temperature: 70.0},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Cpu: 20.0,
|
||||||
|
GPUData: map[string]system.GPUData{
|
||||||
|
"gpu0": {Name: "GPU A", Usage: 40.0, Temperature: 60.0},
|
||||||
|
"gpu1": {Name: "GPU B", Usage: 80.0, Temperature: 80.0},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
require.NotNil(t, result.GPUData)
|
||||||
|
assert.Equal(t, 30.0, result.GPUData["gpu0"].Usage)
|
||||||
|
assert.Equal(t, 55.0, result.GPUData["gpu0"].Temperature)
|
||||||
|
assert.Equal(t, 70.0, result.GPUData["gpu1"].Usage)
|
||||||
|
assert.Equal(t, 75.0, result.GPUData["gpu1"].Temperature)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAverageSystemStatsSlice_CpuCoresUsage(t *testing.T) {
|
||||||
|
input := []system.Stats{
|
||||||
|
{Cpu: 10.0, CpuCoresUsage: system.Uint8Slice{10, 20, 30, 40}},
|
||||||
|
{Cpu: 20.0, CpuCoresUsage: system.Uint8Slice{30, 40, 50, 60}},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
require.NotNil(t, result.CpuCoresUsage)
|
||||||
|
assert.Equal(t, system.Uint8Slice{20, 30, 40, 50}, result.CpuCoresUsage)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that per-core usage rounds correctly (e.g., 15.5 -> 16 via math.Round).
|
||||||
|
func TestAverageSystemStatsSlice_CpuCoresUsageRounding(t *testing.T) {
|
||||||
|
input := []system.Stats{
|
||||||
|
{Cpu: 10.0, CpuCoresUsage: system.Uint8Slice{11}},
|
||||||
|
{Cpu: 20.0, CpuCoresUsage: system.Uint8Slice{20}},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
require.NotNil(t, result.CpuCoresUsage)
|
||||||
|
// (11+20)/2 = 15.5, rounds to 16
|
||||||
|
assert.Equal(t, uint8(16), result.CpuCoresUsage[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAverageSystemStatsSlice_CpuBreakdown(t *testing.T) {
|
||||||
|
input := []system.Stats{
|
||||||
|
{Cpu: 10.0, CpuBreakdown: []float64{5.0, 3.0, 1.0, 0.5, 90.5}},
|
||||||
|
{Cpu: 20.0, CpuBreakdown: []float64{15.0, 7.0, 3.0, 1.5, 73.5}},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
require.NotNil(t, result.CpuBreakdown)
|
||||||
|
assert.Equal(t, []float64{10.0, 5.0, 2.0, 1.0, 82.0}, result.CpuBreakdown)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that Battery[1] (charge state) uses the last record's value.
|
||||||
|
func TestAverageSystemStatsSlice_BatteryLastChargeState(t *testing.T) {
|
||||||
|
input := []system.Stats{
|
||||||
|
{Cpu: 10.0, Battery: [2]uint8{100, 1}}, // charging
|
||||||
|
{Cpu: 20.0, Battery: [2]uint8{90, 0}}, // not charging
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
assert.Equal(t, uint8(95), result.Battery[0])
|
||||||
|
assert.Equal(t, uint8(0), result.Battery[1]) // last record's charge state
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAverageSystemStatsSlice_ThreeRecordsRounding(t *testing.T) {
|
||||||
|
input := []system.Stats{
|
||||||
|
{Cpu: 10.0, Mem: 8.0},
|
||||||
|
{Cpu: 20.0, Mem: 8.0},
|
||||||
|
{Cpu: 30.0, Mem: 8.0},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
assert.Equal(t, 20.0, result.Cpu)
|
||||||
|
assert.Equal(t, 8.0, result.Mem)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests records where some have optional fields and others don't.
|
||||||
|
func TestAverageSystemStatsSlice_MixedOptionalFields(t *testing.T) {
|
||||||
|
input := []system.Stats{
|
||||||
|
{
|
||||||
|
Cpu: 10.0,
|
||||||
|
CpuCoresUsage: system.Uint8Slice{50, 60},
|
||||||
|
CpuBreakdown: []float64{5.0, 3.0, 1.0, 0.5, 90.5},
|
||||||
|
GPUData: map[string]system.GPUData{
|
||||||
|
"gpu0": {Name: "GPU", Usage: 40.0},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Cpu: 20.0,
|
||||||
|
// No CpuCoresUsage, CpuBreakdown, or GPUData
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
assert.Equal(t, 15.0, result.Cpu)
|
||||||
|
// CpuCoresUsage: only 1 record had it, so sum/2
|
||||||
|
require.NotNil(t, result.CpuCoresUsage)
|
||||||
|
assert.Equal(t, uint8(25), result.CpuCoresUsage[0])
|
||||||
|
assert.Equal(t, uint8(30), result.CpuCoresUsage[1])
|
||||||
|
// CpuBreakdown: only 1 record had it, so sum/2
|
||||||
|
require.NotNil(t, result.CpuBreakdown)
|
||||||
|
assert.Equal(t, 2.5, result.CpuBreakdown[0])
|
||||||
|
// GPUData: only 1 record had it, so sum/2
|
||||||
|
require.NotNil(t, result.GPUData)
|
||||||
|
assert.Equal(t, 20.0, result.GPUData["gpu0"].Usage)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests with 10 records matching the common real-world case (10 x 1m -> 1 x 10m).
|
||||||
|
func TestAverageSystemStatsSlice_TenRecords(t *testing.T) {
|
||||||
|
input := make([]system.Stats, 10)
|
||||||
|
for i := range input {
|
||||||
|
input[i] = system.Stats{
|
||||||
|
Cpu: float64(i * 10), // 0, 10, 20, ..., 90
|
||||||
|
Mem: 16.0,
|
||||||
|
MemUsed: float64(4 + i), // 4, 5, 6, ..., 13
|
||||||
|
MemPct: float64(25 + i), // 25, 26, ..., 34
|
||||||
|
DiskTotal: 500.0,
|
||||||
|
DiskUsed: 250.0,
|
||||||
|
DiskPct: 50.0,
|
||||||
|
NetworkSent: float64(i),
|
||||||
|
NetworkRecv: float64(i * 2),
|
||||||
|
Bandwidth: [2]uint64{uint64(i * 1000), uint64(i * 2000)},
|
||||||
|
LoadAvg: [3]float64{float64(i), float64(i) * 0.5, float64(i) * 0.25},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
assert.Equal(t, 45.0, result.Cpu) // avg of 0..90
|
||||||
|
assert.Equal(t, 16.0, result.Mem) // constant
|
||||||
|
assert.Equal(t, 8.5, result.MemUsed) // avg of 4..13
|
||||||
|
assert.Equal(t, 29.5, result.MemPct) // avg of 25..34
|
||||||
|
assert.Equal(t, 500.0, result.DiskTotal)
|
||||||
|
assert.Equal(t, 250.0, result.DiskUsed)
|
||||||
|
assert.Equal(t, 50.0, result.DiskPct)
|
||||||
|
assert.Equal(t, 4.5, result.NetworkSent)
|
||||||
|
assert.Equal(t, 9.0, result.NetworkRecv)
|
||||||
|
assert.Equal(t, [2]uint64{4500, 9000}, result.Bandwidth)
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- Container Stats Tests ---
|
||||||
|
|
||||||
|
func TestAverageContainerStatsSlice_Empty(t *testing.T) {
|
||||||
|
result := records.AverageContainerStatsSlice(nil)
|
||||||
|
assert.Equal(t, []container.Stats{}, result)
|
||||||
|
|
||||||
|
result = records.AverageContainerStatsSlice([][]container.Stats{})
|
||||||
|
assert.Equal(t, []container.Stats{}, result)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAverageContainerStatsSlice_SingleRecord(t *testing.T) {
|
||||||
|
input := [][]container.Stats{
|
||||||
|
{
|
||||||
|
{Name: "nginx", Cpu: 5.0, Mem: 128.0, Bandwidth: [2]uint64{1000, 2000}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageContainerStatsSlice(input)
|
||||||
|
|
||||||
|
require.Len(t, result, 1)
|
||||||
|
assert.Equal(t, "nginx", result[0].Name)
|
||||||
|
assert.Equal(t, 5.0, result[0].Cpu)
|
||||||
|
assert.Equal(t, 128.0, result[0].Mem)
|
||||||
|
assert.Equal(t, [2]uint64{1000, 2000}, result[0].Bandwidth)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAverageContainerStatsSlice_BasicAveraging(t *testing.T) {
|
||||||
|
input := [][]container.Stats{
|
||||||
|
{
|
||||||
|
{Name: "nginx", Cpu: 10.0, Mem: 100.0, Bandwidth: [2]uint64{1000, 2000}},
|
||||||
|
{Name: "redis", Cpu: 5.0, Mem: 64.0, Bandwidth: [2]uint64{500, 1000}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{Name: "nginx", Cpu: 20.0, Mem: 200.0, Bandwidth: [2]uint64{3000, 4000}},
|
||||||
|
{Name: "redis", Cpu: 15.0, Mem: 128.0, Bandwidth: [2]uint64{1500, 2000}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageContainerStatsSlice(input)
|
||||||
|
sort.Slice(result, func(i, j int) bool { return result[i].Name < result[j].Name })
|
||||||
|
|
||||||
|
require.Len(t, result, 2)
|
||||||
|
|
||||||
|
assert.Equal(t, "nginx", result[0].Name)
|
||||||
|
assert.Equal(t, 15.0, result[0].Cpu)
|
||||||
|
assert.Equal(t, 150.0, result[0].Mem)
|
||||||
|
assert.Equal(t, [2]uint64{2000, 3000}, result[0].Bandwidth)
|
||||||
|
|
||||||
|
assert.Equal(t, "redis", result[1].Name)
|
||||||
|
assert.Equal(t, 10.0, result[1].Cpu)
|
||||||
|
assert.Equal(t, 96.0, result[1].Mem)
|
||||||
|
assert.Equal(t, [2]uint64{1000, 1500}, result[1].Bandwidth)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests containers that appear in some records but not all.
|
||||||
|
func TestAverageContainerStatsSlice_ContainerAppearsInSomeRecords(t *testing.T) {
|
||||||
|
input := [][]container.Stats{
|
||||||
|
{
|
||||||
|
{Name: "nginx", Cpu: 10.0, Mem: 100.0},
|
||||||
|
{Name: "redis", Cpu: 5.0, Mem: 64.0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{Name: "nginx", Cpu: 20.0, Mem: 200.0},
|
||||||
|
// redis not present
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageContainerStatsSlice(input)
|
||||||
|
sort.Slice(result, func(i, j int) bool { return result[i].Name < result[j].Name })
|
||||||
|
|
||||||
|
require.Len(t, result, 2)
|
||||||
|
|
||||||
|
assert.Equal(t, "nginx", result[0].Name)
|
||||||
|
assert.Equal(t, 15.0, result[0].Cpu)
|
||||||
|
assert.Equal(t, 150.0, result[0].Mem)
|
||||||
|
|
||||||
|
// redis: sum / count where count = total records (2), not records containing redis
|
||||||
|
assert.Equal(t, "redis", result[1].Name)
|
||||||
|
assert.Equal(t, 2.5, result[1].Cpu)
|
||||||
|
assert.Equal(t, 32.0, result[1].Mem)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests backward compatibility with deprecated NetworkSent/NetworkRecv (MB) when Bandwidth is zero.
|
||||||
|
func TestAverageContainerStatsSlice_DeprecatedNetworkFields(t *testing.T) {
|
||||||
|
input := [][]container.Stats{
|
||||||
|
{
|
||||||
|
{Name: "nginx", Cpu: 10.0, Mem: 100.0, NetworkSent: 1.0, NetworkRecv: 2.0}, // 1 MB, 2 MB
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{Name: "nginx", Cpu: 20.0, Mem: 200.0, NetworkSent: 3.0, NetworkRecv: 4.0}, // 3 MB, 4 MB
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageContainerStatsSlice(input)
|
||||||
|
|
||||||
|
require.Len(t, result, 1)
|
||||||
|
assert.Equal(t, "nginx", result[0].Name)
|
||||||
|
// avg sent = (1*1048576 + 3*1048576) / 2 = 2*1048576
|
||||||
|
assert.Equal(t, uint64(2*1048576), result[0].Bandwidth[0])
|
||||||
|
// avg recv = (2*1048576 + 4*1048576) / 2 = 3*1048576
|
||||||
|
assert.Equal(t, uint64(3*1048576), result[0].Bandwidth[1])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that when Bandwidth is set, deprecated NetworkSent/NetworkRecv are ignored.
|
||||||
|
func TestAverageContainerStatsSlice_MixedBandwidthAndDeprecated(t *testing.T) {
|
||||||
|
input := [][]container.Stats{
|
||||||
|
{
|
||||||
|
{Name: "nginx", Cpu: 10.0, Mem: 100.0, Bandwidth: [2]uint64{5000, 6000}, NetworkSent: 99.0, NetworkRecv: 99.0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{Name: "nginx", Cpu: 20.0, Mem: 200.0, Bandwidth: [2]uint64{7000, 8000}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageContainerStatsSlice(input)
|
||||||
|
|
||||||
|
require.Len(t, result, 1)
|
||||||
|
assert.Equal(t, uint64(6000), result[0].Bandwidth[0])
|
||||||
|
assert.Equal(t, uint64(7000), result[0].Bandwidth[1])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAverageContainerStatsSlice_ThreeRecords(t *testing.T) {
|
||||||
|
input := [][]container.Stats{
|
||||||
|
{{Name: "app", Cpu: 1.0, Mem: 100.0}},
|
||||||
|
{{Name: "app", Cpu: 2.0, Mem: 200.0}},
|
||||||
|
{{Name: "app", Cpu: 3.0, Mem: 300.0}},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageContainerStatsSlice(input)
|
||||||
|
|
||||||
|
require.Len(t, result, 1)
|
||||||
|
assert.Equal(t, 2.0, result[0].Cpu)
|
||||||
|
assert.Equal(t, 200.0, result[0].Mem)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAverageContainerStatsSlice_ManyContainers(t *testing.T) {
|
||||||
|
input := [][]container.Stats{
|
||||||
|
{
|
||||||
|
{Name: "a", Cpu: 10.0, Mem: 100.0},
|
||||||
|
{Name: "b", Cpu: 20.0, Mem: 200.0},
|
||||||
|
{Name: "c", Cpu: 30.0, Mem: 300.0},
|
||||||
|
{Name: "d", Cpu: 40.0, Mem: 400.0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{Name: "a", Cpu: 20.0, Mem: 200.0},
|
||||||
|
{Name: "b", Cpu: 30.0, Mem: 300.0},
|
||||||
|
{Name: "c", Cpu: 40.0, Mem: 400.0},
|
||||||
|
{Name: "d", Cpu: 50.0, Mem: 500.0},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageContainerStatsSlice(input)
|
||||||
|
sort.Slice(result, func(i, j int) bool { return result[i].Name < result[j].Name })
|
||||||
|
|
||||||
|
require.Len(t, result, 4)
|
||||||
|
assert.Equal(t, 15.0, result[0].Cpu)
|
||||||
|
assert.Equal(t, 25.0, result[1].Cpu)
|
||||||
|
assert.Equal(t, 35.0, result[2].Cpu)
|
||||||
|
assert.Equal(t, 45.0, result[3].Cpu)
|
||||||
|
}
|
||||||
138
internal/records/records_deletion.go
Normal file
138
internal/records/records_deletion.go
Normal file
@@ -0,0 +1,138 @@
|
|||||||
|
package records
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/pocketbase/dbx"
|
||||||
|
"github.com/pocketbase/pocketbase/core"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Delete old records
|
||||||
|
func (rm *RecordManager) DeleteOldRecords() {
|
||||||
|
rm.app.RunInTransaction(func(txApp core.App) error {
|
||||||
|
err := deleteOldSystemStats(txApp)
|
||||||
|
if err != nil {
|
||||||
|
slog.Error("Error deleting old system stats", "err", err)
|
||||||
|
}
|
||||||
|
err = deleteOldContainerRecords(txApp)
|
||||||
|
if err != nil {
|
||||||
|
slog.Error("Error deleting old container records", "err", err)
|
||||||
|
}
|
||||||
|
err = deleteOldSystemdServiceRecords(txApp)
|
||||||
|
if err != nil {
|
||||||
|
slog.Error("Error deleting old systemd service records", "err", err)
|
||||||
|
}
|
||||||
|
err = deleteOldAlertsHistory(txApp, 200, 250)
|
||||||
|
if err != nil {
|
||||||
|
slog.Error("Error deleting old alerts history", "err", err)
|
||||||
|
}
|
||||||
|
err = deleteOldQuietHours(txApp)
|
||||||
|
if err != nil {
|
||||||
|
slog.Error("Error deleting old quiet hours", "err", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete old alerts history records
|
||||||
|
func deleteOldAlertsHistory(app core.App, countToKeep, countBeforeDeletion int) error {
|
||||||
|
db := app.DB()
|
||||||
|
var users []struct {
|
||||||
|
Id string `db:"user"`
|
||||||
|
}
|
||||||
|
err := db.NewQuery("SELECT user, COUNT(*) as count FROM alerts_history GROUP BY user HAVING count > {:countBeforeDeletion}").Bind(dbx.Params{"countBeforeDeletion": countBeforeDeletion}).All(&users)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, user := range users {
|
||||||
|
_, err = db.NewQuery("DELETE FROM alerts_history WHERE user = {:user} AND id NOT IN (SELECT id FROM alerts_history WHERE user = {:user} ORDER BY created DESC LIMIT {:countToKeep})").Bind(dbx.Params{"user": user.Id, "countToKeep": countToKeep}).Execute()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deletes system_stats records older than what is displayed in the UI
|
||||||
|
func deleteOldSystemStats(app core.App) error {
|
||||||
|
// Collections to process
|
||||||
|
collections := [2]string{"system_stats", "container_stats"}
|
||||||
|
|
||||||
|
// Record types and their retention periods
|
||||||
|
type RecordDeletionData struct {
|
||||||
|
recordType string
|
||||||
|
retention time.Duration
|
||||||
|
}
|
||||||
|
recordData := []RecordDeletionData{
|
||||||
|
{recordType: "1m", retention: time.Hour}, // 1 hour
|
||||||
|
{recordType: "10m", retention: 12 * time.Hour}, // 12 hours
|
||||||
|
{recordType: "20m", retention: 24 * time.Hour}, // 1 day
|
||||||
|
{recordType: "120m", retention: 7 * 24 * time.Hour}, // 7 days
|
||||||
|
{recordType: "480m", retention: 30 * 24 * time.Hour}, // 30 days
|
||||||
|
}
|
||||||
|
|
||||||
|
now := time.Now().UTC()
|
||||||
|
|
||||||
|
for _, collection := range collections {
|
||||||
|
// Build the WHERE clause
|
||||||
|
var conditionParts []string
|
||||||
|
var params dbx.Params = make(map[string]any)
|
||||||
|
for i := range recordData {
|
||||||
|
rd := recordData[i]
|
||||||
|
// Create parameterized condition for this record type
|
||||||
|
dateParam := fmt.Sprintf("date%d", i)
|
||||||
|
conditionParts = append(conditionParts, fmt.Sprintf("(type = '%s' AND created < {:%s})", rd.recordType, dateParam))
|
||||||
|
params[dateParam] = now.Add(-rd.retention)
|
||||||
|
}
|
||||||
|
// Combine conditions with OR
|
||||||
|
conditionStr := strings.Join(conditionParts, " OR ")
|
||||||
|
// Construct and execute the full raw query
|
||||||
|
rawQuery := fmt.Sprintf("DELETE FROM %s WHERE %s", collection, conditionStr)
|
||||||
|
if _, err := app.DB().NewQuery(rawQuery).Bind(params).Execute(); err != nil {
|
||||||
|
return fmt.Errorf("failed to delete from %s: %v", collection, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deletes systemd service records that haven't been updated in the last 20 minutes
|
||||||
|
func deleteOldSystemdServiceRecords(app core.App) error {
|
||||||
|
now := time.Now().UTC()
|
||||||
|
twentyMinutesAgo := now.Add(-20 * time.Minute)
|
||||||
|
|
||||||
|
// Delete systemd service records where updated < twentyMinutesAgo
|
||||||
|
_, err := app.DB().NewQuery("DELETE FROM systemd_services WHERE updated < {:updated}").Bind(dbx.Params{"updated": twentyMinutesAgo.UnixMilli()}).Execute()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to delete old systemd service records: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deletes container records that haven't been updated in the last 10 minutes
|
||||||
|
func deleteOldContainerRecords(app core.App) error {
|
||||||
|
now := time.Now().UTC()
|
||||||
|
tenMinutesAgo := now.Add(-10 * time.Minute)
|
||||||
|
|
||||||
|
// Delete container records where updated < tenMinutesAgo
|
||||||
|
_, err := app.DB().NewQuery("DELETE FROM containers WHERE updated < {:updated}").Bind(dbx.Params{"updated": tenMinutesAgo.UnixMilli()}).Execute()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to delete old container records: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deletes old quiet hours records where end date has passed
|
||||||
|
func deleteOldQuietHours(app core.App) error {
|
||||||
|
now := time.Now().UTC()
|
||||||
|
_, err := app.DB().NewQuery("DELETE FROM quiet_hours WHERE type = 'one-time' AND end < {:now}").Bind(dbx.Params{"now": now}).Execute()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
428
internal/records/records_deletion_test.go
Normal file
428
internal/records/records_deletion_test.go
Normal file
@@ -0,0 +1,428 @@
|
|||||||
|
//go:build testing
|
||||||
|
|
||||||
|
package records_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/records"
|
||||||
|
"github.com/henrygd/beszel/internal/tests"
|
||||||
|
|
||||||
|
"github.com/pocketbase/dbx"
|
||||||
|
"github.com/pocketbase/pocketbase/core"
|
||||||
|
"github.com/pocketbase/pocketbase/tools/types"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestDeleteOldRecords tests the main DeleteOldRecords function
|
||||||
|
func TestDeleteOldRecords(t *testing.T) {
|
||||||
|
hub, err := tests.NewTestHub(t.TempDir())
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
rm := records.NewRecordManager(hub)
|
||||||
|
|
||||||
|
// Create test user for alerts history
|
||||||
|
user, err := tests.CreateUser(hub, "test@example.com", "testtesttest")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Create test system
|
||||||
|
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||||
|
"name": "test-system",
|
||||||
|
"host": "localhost",
|
||||||
|
"port": "45876",
|
||||||
|
"status": "up",
|
||||||
|
"users": []string{user.Id},
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
// Create old system_stats records that should be deleted
|
||||||
|
var record *core.Record
|
||||||
|
record, err = tests.CreateRecord(hub, "system_stats", map[string]any{
|
||||||
|
"system": system.Id,
|
||||||
|
"type": "1m",
|
||||||
|
"stats": `{"cpu": 50.0, "mem": 1024}`,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
// created is autodate field, so we need to set it manually
|
||||||
|
record.SetRaw("created", now.UTC().Add(-2*time.Hour).Format(types.DefaultDateLayout))
|
||||||
|
err = hub.SaveNoValidate(record)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, record)
|
||||||
|
require.InDelta(t, record.GetDateTime("created").Time().UTC().Unix(), now.UTC().Add(-2*time.Hour).Unix(), 1)
|
||||||
|
require.Equal(t, record.Get("system"), system.Id)
|
||||||
|
require.Equal(t, record.Get("type"), "1m")
|
||||||
|
|
||||||
|
// Create recent system_stats record that should be kept
|
||||||
|
_, err = tests.CreateRecord(hub, "system_stats", map[string]any{
|
||||||
|
"system": system.Id,
|
||||||
|
"type": "1m",
|
||||||
|
"stats": `{"cpu": 30.0, "mem": 512}`,
|
||||||
|
"created": now.Add(-30 * time.Minute), // 30 minutes old, should be kept
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Create many alerts history records to trigger deletion
|
||||||
|
for i := range 260 { // More than countBeforeDeletion (250)
|
||||||
|
_, err = tests.CreateRecord(hub, "alerts_history", map[string]any{
|
||||||
|
"user": user.Id,
|
||||||
|
"name": "CPU",
|
||||||
|
"value": i + 1,
|
||||||
|
"system": system.Id,
|
||||||
|
"created": now.Add(-time.Duration(i) * time.Minute),
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count records before deletion
|
||||||
|
systemStatsCountBefore, err := hub.CountRecords("system_stats")
|
||||||
|
require.NoError(t, err)
|
||||||
|
alertsCountBefore, err := hub.CountRecords("alerts_history")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Run deletion
|
||||||
|
rm.DeleteOldRecords()
|
||||||
|
|
||||||
|
// Count records after deletion
|
||||||
|
systemStatsCountAfter, err := hub.CountRecords("system_stats")
|
||||||
|
require.NoError(t, err)
|
||||||
|
alertsCountAfter, err := hub.CountRecords("alerts_history")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Verify old system stats were deleted
|
||||||
|
assert.Less(t, systemStatsCountAfter, systemStatsCountBefore, "Old system stats should be deleted")
|
||||||
|
|
||||||
|
// Verify alerts history was trimmed
|
||||||
|
assert.Less(t, alertsCountAfter, alertsCountBefore, "Excessive alerts history should be deleted")
|
||||||
|
assert.Equal(t, alertsCountAfter, int64(200), "Alerts count should be equal to countToKeep (200)")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestDeleteOldSystemStats tests the deleteOldSystemStats function
|
||||||
|
func TestDeleteOldSystemStats(t *testing.T) {
|
||||||
|
hub, err := tests.NewTestHub(t.TempDir())
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
// Create test system
|
||||||
|
user, err := tests.CreateUser(hub, "test@example.com", "testtesttest")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||||
|
"name": "test-system",
|
||||||
|
"host": "localhost",
|
||||||
|
"port": "45876",
|
||||||
|
"status": "up",
|
||||||
|
"users": []string{user.Id},
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
now := time.Now().UTC()
|
||||||
|
|
||||||
|
// Test data for different record types and their retention periods
|
||||||
|
testCases := []struct {
|
||||||
|
recordType string
|
||||||
|
retention time.Duration
|
||||||
|
shouldBeKept bool
|
||||||
|
ageFromNow time.Duration
|
||||||
|
description string
|
||||||
|
}{
|
||||||
|
{"1m", time.Hour, true, 30 * time.Minute, "1m record within 1 hour should be kept"},
|
||||||
|
{"1m", time.Hour, false, 2 * time.Hour, "1m record older than 1 hour should be deleted"},
|
||||||
|
{"10m", 12 * time.Hour, true, 6 * time.Hour, "10m record within 12 hours should be kept"},
|
||||||
|
{"10m", 12 * time.Hour, false, 24 * time.Hour, "10m record older than 12 hours should be deleted"},
|
||||||
|
{"20m", 24 * time.Hour, true, 12 * time.Hour, "20m record within 24 hours should be kept"},
|
||||||
|
{"20m", 24 * time.Hour, false, 48 * time.Hour, "20m record older than 24 hours should be deleted"},
|
||||||
|
{"120m", 7 * 24 * time.Hour, true, 3 * 24 * time.Hour, "120m record within 7 days should be kept"},
|
||||||
|
{"120m", 7 * 24 * time.Hour, false, 10 * 24 * time.Hour, "120m record older than 7 days should be deleted"},
|
||||||
|
{"480m", 30 * 24 * time.Hour, true, 15 * 24 * time.Hour, "480m record within 30 days should be kept"},
|
||||||
|
{"480m", 30 * 24 * time.Hour, false, 45 * 24 * time.Hour, "480m record older than 30 days should be deleted"},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create test records for both system_stats and container_stats
|
||||||
|
collections := []string{"system_stats", "container_stats"}
|
||||||
|
recordIds := make(map[string][]string)
|
||||||
|
|
||||||
|
for _, collection := range collections {
|
||||||
|
recordIds[collection] = make([]string, 0)
|
||||||
|
|
||||||
|
for i, tc := range testCases {
|
||||||
|
recordTime := now.Add(-tc.ageFromNow)
|
||||||
|
|
||||||
|
var stats string
|
||||||
|
if collection == "system_stats" {
|
||||||
|
stats = fmt.Sprintf(`{"cpu": %d.0, "mem": %d}`, i*10, i*100)
|
||||||
|
} else {
|
||||||
|
stats = fmt.Sprintf(`[{"name": "container%d", "cpu": %d.0, "mem": %d}]`, i, i*5, i*50)
|
||||||
|
}
|
||||||
|
|
||||||
|
record, err := tests.CreateRecord(hub, collection, map[string]any{
|
||||||
|
"system": system.Id,
|
||||||
|
"type": tc.recordType,
|
||||||
|
"stats": stats,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
record.SetRaw("created", recordTime.Format(types.DefaultDateLayout))
|
||||||
|
err = hub.SaveNoValidate(record)
|
||||||
|
require.NoError(t, err)
|
||||||
|
recordIds[collection] = append(recordIds[collection], record.Id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run deletion
|
||||||
|
err = records.DeleteOldSystemStats(hub)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Verify results
|
||||||
|
for _, collection := range collections {
|
||||||
|
for i, tc := range testCases {
|
||||||
|
recordId := recordIds[collection][i]
|
||||||
|
|
||||||
|
// Try to find the record
|
||||||
|
_, err := hub.FindRecordById(collection, recordId)
|
||||||
|
|
||||||
|
if tc.shouldBeKept {
|
||||||
|
assert.NoError(t, err, "Record should exist: %s", tc.description)
|
||||||
|
} else {
|
||||||
|
assert.Error(t, err, "Record should be deleted: %s", tc.description)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestDeleteOldAlertsHistory tests the deleteOldAlertsHistory function
|
||||||
|
func TestDeleteOldAlertsHistory(t *testing.T) {
|
||||||
|
hub, err := tests.NewTestHub(t.TempDir())
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
// Create test users
|
||||||
|
user1, err := tests.CreateUser(hub, "user1@example.com", "testtesttest")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
user2, err := tests.CreateUser(hub, "user2@example.com", "testtesttest")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||||
|
"name": "test-system",
|
||||||
|
"host": "localhost",
|
||||||
|
"port": "45876",
|
||||||
|
"status": "up",
|
||||||
|
"users": []string{user1.Id, user2.Id},
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
now := time.Now().UTC()
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
user *core.Record
|
||||||
|
alertCount int
|
||||||
|
countToKeep int
|
||||||
|
countBeforeDeletion int
|
||||||
|
expectedAfterDeletion int
|
||||||
|
description string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "User with few alerts (below threshold)",
|
||||||
|
user: user1,
|
||||||
|
alertCount: 100,
|
||||||
|
countToKeep: 50,
|
||||||
|
countBeforeDeletion: 150,
|
||||||
|
expectedAfterDeletion: 100, // No deletion because below threshold
|
||||||
|
description: "User with alerts below countBeforeDeletion should not have any deleted",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "User with many alerts (above threshold)",
|
||||||
|
user: user2,
|
||||||
|
alertCount: 300,
|
||||||
|
countToKeep: 100,
|
||||||
|
countBeforeDeletion: 200,
|
||||||
|
expectedAfterDeletion: 100, // Should be trimmed to countToKeep
|
||||||
|
description: "User with alerts above countBeforeDeletion should be trimmed to countToKeep",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
// Create alerts for this user
|
||||||
|
for i := 0; i < tc.alertCount; i++ {
|
||||||
|
_, err := tests.CreateRecord(hub, "alerts_history", map[string]any{
|
||||||
|
"user": tc.user.Id,
|
||||||
|
"name": "CPU",
|
||||||
|
"value": i + 1,
|
||||||
|
"system": system.Id,
|
||||||
|
"created": now.Add(-time.Duration(i) * time.Minute),
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count before deletion
|
||||||
|
countBefore, err := hub.CountRecords("alerts_history",
|
||||||
|
dbx.NewExp("user = {:user}", dbx.Params{"user": tc.user.Id}))
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, int64(tc.alertCount), countBefore, "Initial count should match")
|
||||||
|
|
||||||
|
// Run deletion
|
||||||
|
err = records.DeleteOldAlertsHistory(hub, tc.countToKeep, tc.countBeforeDeletion)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Count after deletion
|
||||||
|
countAfter, err := hub.CountRecords("alerts_history",
|
||||||
|
dbx.NewExp("user = {:user}", dbx.Params{"user": tc.user.Id}))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Equal(t, int64(tc.expectedAfterDeletion), countAfter, tc.description)
|
||||||
|
|
||||||
|
// If deletion occurred, verify the most recent records were kept
|
||||||
|
if tc.expectedAfterDeletion < tc.alertCount {
|
||||||
|
records, err := hub.FindRecordsByFilter("alerts_history",
|
||||||
|
"user = {:user}",
|
||||||
|
"-created", // Order by created DESC
|
||||||
|
tc.countToKeep,
|
||||||
|
0,
|
||||||
|
map[string]any{"user": tc.user.Id})
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Len(t, records, tc.expectedAfterDeletion, "Should have exactly countToKeep records")
|
||||||
|
|
||||||
|
// Verify records are in descending order by created time
|
||||||
|
for i := 1; i < len(records); i++ {
|
||||||
|
prev := records[i-1].GetDateTime("created").Time()
|
||||||
|
curr := records[i].GetDateTime("created").Time()
|
||||||
|
assert.True(t, prev.After(curr) || prev.Equal(curr),
|
||||||
|
"Records should be ordered by created time (newest first)")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestDeleteOldAlertsHistoryEdgeCases tests edge cases for alerts history deletion
|
||||||
|
func TestDeleteOldAlertsHistoryEdgeCases(t *testing.T) {
|
||||||
|
hub, err := tests.NewTestHub(t.TempDir())
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
t.Run("No users with excessive alerts", func(t *testing.T) {
|
||||||
|
// Create user with few alerts
|
||||||
|
user, err := tests.CreateUser(hub, "few@example.com", "testtesttest")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||||
|
"name": "test-system",
|
||||||
|
"host": "localhost",
|
||||||
|
"port": "45876",
|
||||||
|
"status": "up",
|
||||||
|
"users": []string{user.Id},
|
||||||
|
})
|
||||||
|
|
||||||
|
// Create only 5 alerts (well below threshold)
|
||||||
|
for i := range 5 {
|
||||||
|
_, err := tests.CreateRecord(hub, "alerts_history", map[string]any{
|
||||||
|
"user": user.Id,
|
||||||
|
"name": "CPU",
|
||||||
|
"value": i + 1,
|
||||||
|
"system": system.Id,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should not error and should not delete anything
|
||||||
|
err = records.DeleteOldAlertsHistory(hub, 10, 20)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
count, err := hub.CountRecords("alerts_history")
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, int64(5), count, "All alerts should remain")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Empty alerts_history table", func(t *testing.T) {
|
||||||
|
// Clear any existing alerts
|
||||||
|
_, err := hub.DB().NewQuery("DELETE FROM alerts_history").Execute()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Should not error with empty table
|
||||||
|
err = records.DeleteOldAlertsHistory(hub, 10, 20)
|
||||||
|
require.NoError(t, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestDeleteOldSystemdServiceRecords tests systemd service cleanup via DeleteOldRecords
|
||||||
|
func TestDeleteOldSystemdServiceRecords(t *testing.T) {
|
||||||
|
hub, err := tests.NewTestHub(t.TempDir())
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
rm := records.NewRecordManager(hub)
|
||||||
|
|
||||||
|
// Create test user and system
|
||||||
|
user, err := tests.CreateUser(hub, "test@example.com", "testtesttest")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||||
|
"name": "test-system",
|
||||||
|
"host": "localhost",
|
||||||
|
"port": "45876",
|
||||||
|
"status": "up",
|
||||||
|
"users": []string{user.Id},
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
now := time.Now().UTC()
|
||||||
|
|
||||||
|
// Create old systemd service records that should be deleted (older than 20 minutes)
|
||||||
|
oldRecord, err := tests.CreateRecord(hub, "systemd_services", map[string]any{
|
||||||
|
"system": system.Id,
|
||||||
|
"name": "nginx.service",
|
||||||
|
"state": 0, // Active
|
||||||
|
"sub": 1, // Running
|
||||||
|
"cpu": 5.0,
|
||||||
|
"cpuPeak": 10.0,
|
||||||
|
"memory": 1024000,
|
||||||
|
"memPeak": 2048000,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
// Set updated time to 25 minutes ago (should be deleted)
|
||||||
|
oldRecord.SetRaw("updated", now.Add(-25*time.Minute).UnixMilli())
|
||||||
|
err = hub.SaveNoValidate(oldRecord)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Create recent systemd service record that should be kept (within 20 minutes)
|
||||||
|
recentRecord, err := tests.CreateRecord(hub, "systemd_services", map[string]any{
|
||||||
|
"system": system.Id,
|
||||||
|
"name": "apache.service",
|
||||||
|
"state": 1, // Inactive
|
||||||
|
"sub": 0, // Dead
|
||||||
|
"cpu": 2.0,
|
||||||
|
"cpuPeak": 3.0,
|
||||||
|
"memory": 512000,
|
||||||
|
"memPeak": 1024000,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
// Set updated time to 10 minutes ago (should be kept)
|
||||||
|
recentRecord.SetRaw("updated", now.Add(-10*time.Minute).UnixMilli())
|
||||||
|
err = hub.SaveNoValidate(recentRecord)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Count records before deletion
|
||||||
|
countBefore, err := hub.CountRecords("systemd_services")
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, int64(2), countBefore, "Should have 2 systemd service records initially")
|
||||||
|
|
||||||
|
// Run deletion via RecordManager
|
||||||
|
rm.DeleteOldRecords()
|
||||||
|
|
||||||
|
// Count records after deletion
|
||||||
|
countAfter, err := hub.CountRecords("systemd_services")
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, int64(1), countAfter, "Should have 1 systemd service record after deletion")
|
||||||
|
|
||||||
|
// Verify the correct record was kept
|
||||||
|
remainingRecords, err := hub.FindRecordsByFilter("systemd_services", "", "", 10, 0, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Len(t, remainingRecords, 1, "Should have exactly 1 record remaining")
|
||||||
|
assert.Equal(t, "apache.service", remainingRecords[0].Get("name"), "The recent record should be kept")
|
||||||
|
}
|
||||||
@@ -3,430 +3,15 @@
|
|||||||
package records_test
|
package records_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/henrygd/beszel/internal/records"
|
"github.com/henrygd/beszel/internal/records"
|
||||||
"github.com/henrygd/beszel/internal/tests"
|
"github.com/henrygd/beszel/internal/tests"
|
||||||
|
|
||||||
"github.com/pocketbase/dbx"
|
|
||||||
"github.com/pocketbase/pocketbase/core"
|
|
||||||
"github.com/pocketbase/pocketbase/tools/types"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestDeleteOldRecords tests the main DeleteOldRecords function
|
|
||||||
func TestDeleteOldRecords(t *testing.T) {
|
|
||||||
hub, err := tests.NewTestHub(t.TempDir())
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer hub.Cleanup()
|
|
||||||
|
|
||||||
rm := records.NewRecordManager(hub)
|
|
||||||
|
|
||||||
// Create test user for alerts history
|
|
||||||
user, err := tests.CreateUser(hub, "test@example.com", "testtesttest")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Create test system
|
|
||||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
|
||||||
"name": "test-system",
|
|
||||||
"host": "localhost",
|
|
||||||
"port": "45876",
|
|
||||||
"status": "up",
|
|
||||||
"users": []string{user.Id},
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
now := time.Now()
|
|
||||||
|
|
||||||
// Create old system_stats records that should be deleted
|
|
||||||
var record *core.Record
|
|
||||||
record, err = tests.CreateRecord(hub, "system_stats", map[string]any{
|
|
||||||
"system": system.Id,
|
|
||||||
"type": "1m",
|
|
||||||
"stats": `{"cpu": 50.0, "mem": 1024}`,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
// created is autodate field, so we need to set it manually
|
|
||||||
record.SetRaw("created", now.UTC().Add(-2*time.Hour).Format(types.DefaultDateLayout))
|
|
||||||
err = hub.SaveNoValidate(record)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, record)
|
|
||||||
require.InDelta(t, record.GetDateTime("created").Time().UTC().Unix(), now.UTC().Add(-2*time.Hour).Unix(), 1)
|
|
||||||
require.Equal(t, record.Get("system"), system.Id)
|
|
||||||
require.Equal(t, record.Get("type"), "1m")
|
|
||||||
|
|
||||||
// Create recent system_stats record that should be kept
|
|
||||||
_, err = tests.CreateRecord(hub, "system_stats", map[string]any{
|
|
||||||
"system": system.Id,
|
|
||||||
"type": "1m",
|
|
||||||
"stats": `{"cpu": 30.0, "mem": 512}`,
|
|
||||||
"created": now.Add(-30 * time.Minute), // 30 minutes old, should be kept
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Create many alerts history records to trigger deletion
|
|
||||||
for i := range 260 { // More than countBeforeDeletion (250)
|
|
||||||
_, err = tests.CreateRecord(hub, "alerts_history", map[string]any{
|
|
||||||
"user": user.Id,
|
|
||||||
"name": "CPU",
|
|
||||||
"value": i + 1,
|
|
||||||
"system": system.Id,
|
|
||||||
"created": now.Add(-time.Duration(i) * time.Minute),
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Count records before deletion
|
|
||||||
systemStatsCountBefore, err := hub.CountRecords("system_stats")
|
|
||||||
require.NoError(t, err)
|
|
||||||
alertsCountBefore, err := hub.CountRecords("alerts_history")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Run deletion
|
|
||||||
rm.DeleteOldRecords()
|
|
||||||
|
|
||||||
// Count records after deletion
|
|
||||||
systemStatsCountAfter, err := hub.CountRecords("system_stats")
|
|
||||||
require.NoError(t, err)
|
|
||||||
alertsCountAfter, err := hub.CountRecords("alerts_history")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Verify old system stats were deleted
|
|
||||||
assert.Less(t, systemStatsCountAfter, systemStatsCountBefore, "Old system stats should be deleted")
|
|
||||||
|
|
||||||
// Verify alerts history was trimmed
|
|
||||||
assert.Less(t, alertsCountAfter, alertsCountBefore, "Excessive alerts history should be deleted")
|
|
||||||
assert.Equal(t, alertsCountAfter, int64(200), "Alerts count should be equal to countToKeep (200)")
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestDeleteOldSystemStats tests the deleteOldSystemStats function
|
|
||||||
func TestDeleteOldSystemStats(t *testing.T) {
|
|
||||||
hub, err := tests.NewTestHub(t.TempDir())
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer hub.Cleanup()
|
|
||||||
|
|
||||||
// Create test system
|
|
||||||
user, err := tests.CreateUser(hub, "test@example.com", "testtesttest")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
|
||||||
"name": "test-system",
|
|
||||||
"host": "localhost",
|
|
||||||
"port": "45876",
|
|
||||||
"status": "up",
|
|
||||||
"users": []string{user.Id},
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
now := time.Now().UTC()
|
|
||||||
|
|
||||||
// Test data for different record types and their retention periods
|
|
||||||
testCases := []struct {
|
|
||||||
recordType string
|
|
||||||
retention time.Duration
|
|
||||||
shouldBeKept bool
|
|
||||||
ageFromNow time.Duration
|
|
||||||
description string
|
|
||||||
}{
|
|
||||||
{"1m", time.Hour, true, 30 * time.Minute, "1m record within 1 hour should be kept"},
|
|
||||||
{"1m", time.Hour, false, 2 * time.Hour, "1m record older than 1 hour should be deleted"},
|
|
||||||
{"10m", 12 * time.Hour, true, 6 * time.Hour, "10m record within 12 hours should be kept"},
|
|
||||||
{"10m", 12 * time.Hour, false, 24 * time.Hour, "10m record older than 12 hours should be deleted"},
|
|
||||||
{"20m", 24 * time.Hour, true, 12 * time.Hour, "20m record within 24 hours should be kept"},
|
|
||||||
{"20m", 24 * time.Hour, false, 48 * time.Hour, "20m record older than 24 hours should be deleted"},
|
|
||||||
{"120m", 7 * 24 * time.Hour, true, 3 * 24 * time.Hour, "120m record within 7 days should be kept"},
|
|
||||||
{"120m", 7 * 24 * time.Hour, false, 10 * 24 * time.Hour, "120m record older than 7 days should be deleted"},
|
|
||||||
{"480m", 30 * 24 * time.Hour, true, 15 * 24 * time.Hour, "480m record within 30 days should be kept"},
|
|
||||||
{"480m", 30 * 24 * time.Hour, false, 45 * 24 * time.Hour, "480m record older than 30 days should be deleted"},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create test records for both system_stats and container_stats
|
|
||||||
collections := []string{"system_stats", "container_stats"}
|
|
||||||
recordIds := make(map[string][]string)
|
|
||||||
|
|
||||||
for _, collection := range collections {
|
|
||||||
recordIds[collection] = make([]string, 0)
|
|
||||||
|
|
||||||
for i, tc := range testCases {
|
|
||||||
recordTime := now.Add(-tc.ageFromNow)
|
|
||||||
|
|
||||||
var stats string
|
|
||||||
if collection == "system_stats" {
|
|
||||||
stats = fmt.Sprintf(`{"cpu": %d.0, "mem": %d}`, i*10, i*100)
|
|
||||||
} else {
|
|
||||||
stats = fmt.Sprintf(`[{"name": "container%d", "cpu": %d.0, "mem": %d}]`, i, i*5, i*50)
|
|
||||||
}
|
|
||||||
|
|
||||||
record, err := tests.CreateRecord(hub, collection, map[string]any{
|
|
||||||
"system": system.Id,
|
|
||||||
"type": tc.recordType,
|
|
||||||
"stats": stats,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
record.SetRaw("created", recordTime.Format(types.DefaultDateLayout))
|
|
||||||
err = hub.SaveNoValidate(record)
|
|
||||||
require.NoError(t, err)
|
|
||||||
recordIds[collection] = append(recordIds[collection], record.Id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run deletion
|
|
||||||
err = records.DeleteOldSystemStats(hub)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Verify results
|
|
||||||
for _, collection := range collections {
|
|
||||||
for i, tc := range testCases {
|
|
||||||
recordId := recordIds[collection][i]
|
|
||||||
|
|
||||||
// Try to find the record
|
|
||||||
_, err := hub.FindRecordById(collection, recordId)
|
|
||||||
|
|
||||||
if tc.shouldBeKept {
|
|
||||||
assert.NoError(t, err, "Record should exist: %s", tc.description)
|
|
||||||
} else {
|
|
||||||
assert.Error(t, err, "Record should be deleted: %s", tc.description)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestDeleteOldAlertsHistory tests the deleteOldAlertsHistory function
|
|
||||||
func TestDeleteOldAlertsHistory(t *testing.T) {
|
|
||||||
hub, err := tests.NewTestHub(t.TempDir())
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer hub.Cleanup()
|
|
||||||
|
|
||||||
// Create test users
|
|
||||||
user1, err := tests.CreateUser(hub, "user1@example.com", "testtesttest")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
user2, err := tests.CreateUser(hub, "user2@example.com", "testtesttest")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
|
||||||
"name": "test-system",
|
|
||||||
"host": "localhost",
|
|
||||||
"port": "45876",
|
|
||||||
"status": "up",
|
|
||||||
"users": []string{user1.Id, user2.Id},
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
now := time.Now().UTC()
|
|
||||||
|
|
||||||
testCases := []struct {
|
|
||||||
name string
|
|
||||||
user *core.Record
|
|
||||||
alertCount int
|
|
||||||
countToKeep int
|
|
||||||
countBeforeDeletion int
|
|
||||||
expectedAfterDeletion int
|
|
||||||
description string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "User with few alerts (below threshold)",
|
|
||||||
user: user1,
|
|
||||||
alertCount: 100,
|
|
||||||
countToKeep: 50,
|
|
||||||
countBeforeDeletion: 150,
|
|
||||||
expectedAfterDeletion: 100, // No deletion because below threshold
|
|
||||||
description: "User with alerts below countBeforeDeletion should not have any deleted",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "User with many alerts (above threshold)",
|
|
||||||
user: user2,
|
|
||||||
alertCount: 300,
|
|
||||||
countToKeep: 100,
|
|
||||||
countBeforeDeletion: 200,
|
|
||||||
expectedAfterDeletion: 100, // Should be trimmed to countToKeep
|
|
||||||
description: "User with alerts above countBeforeDeletion should be trimmed to countToKeep",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range testCases {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
// Create alerts for this user
|
|
||||||
for i := 0; i < tc.alertCount; i++ {
|
|
||||||
_, err := tests.CreateRecord(hub, "alerts_history", map[string]any{
|
|
||||||
"user": tc.user.Id,
|
|
||||||
"name": "CPU",
|
|
||||||
"value": i + 1,
|
|
||||||
"system": system.Id,
|
|
||||||
"created": now.Add(-time.Duration(i) * time.Minute),
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Count before deletion
|
|
||||||
countBefore, err := hub.CountRecords("alerts_history",
|
|
||||||
dbx.NewExp("user = {:user}", dbx.Params{"user": tc.user.Id}))
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, int64(tc.alertCount), countBefore, "Initial count should match")
|
|
||||||
|
|
||||||
// Run deletion
|
|
||||||
err = records.DeleteOldAlertsHistory(hub, tc.countToKeep, tc.countBeforeDeletion)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Count after deletion
|
|
||||||
countAfter, err := hub.CountRecords("alerts_history",
|
|
||||||
dbx.NewExp("user = {:user}", dbx.Params{"user": tc.user.Id}))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, int64(tc.expectedAfterDeletion), countAfter, tc.description)
|
|
||||||
|
|
||||||
// If deletion occurred, verify the most recent records were kept
|
|
||||||
if tc.expectedAfterDeletion < tc.alertCount {
|
|
||||||
records, err := hub.FindRecordsByFilter("alerts_history",
|
|
||||||
"user = {:user}",
|
|
||||||
"-created", // Order by created DESC
|
|
||||||
tc.countToKeep,
|
|
||||||
0,
|
|
||||||
map[string]any{"user": tc.user.Id})
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Len(t, records, tc.expectedAfterDeletion, "Should have exactly countToKeep records")
|
|
||||||
|
|
||||||
// Verify records are in descending order by created time
|
|
||||||
for i := 1; i < len(records); i++ {
|
|
||||||
prev := records[i-1].GetDateTime("created").Time()
|
|
||||||
curr := records[i].GetDateTime("created").Time()
|
|
||||||
assert.True(t, prev.After(curr) || prev.Equal(curr),
|
|
||||||
"Records should be ordered by created time (newest first)")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestDeleteOldAlertsHistoryEdgeCases tests edge cases for alerts history deletion
|
|
||||||
func TestDeleteOldAlertsHistoryEdgeCases(t *testing.T) {
|
|
||||||
hub, err := tests.NewTestHub(t.TempDir())
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer hub.Cleanup()
|
|
||||||
|
|
||||||
t.Run("No users with excessive alerts", func(t *testing.T) {
|
|
||||||
// Create user with few alerts
|
|
||||||
user, err := tests.CreateUser(hub, "few@example.com", "testtesttest")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
|
||||||
"name": "test-system",
|
|
||||||
"host": "localhost",
|
|
||||||
"port": "45876",
|
|
||||||
"status": "up",
|
|
||||||
"users": []string{user.Id},
|
|
||||||
})
|
|
||||||
|
|
||||||
// Create only 5 alerts (well below threshold)
|
|
||||||
for i := range 5 {
|
|
||||||
_, err := tests.CreateRecord(hub, "alerts_history", map[string]any{
|
|
||||||
"user": user.Id,
|
|
||||||
"name": "CPU",
|
|
||||||
"value": i + 1,
|
|
||||||
"system": system.Id,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Should not error and should not delete anything
|
|
||||||
err = records.DeleteOldAlertsHistory(hub, 10, 20)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
count, err := hub.CountRecords("alerts_history")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, int64(5), count, "All alerts should remain")
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("Empty alerts_history table", func(t *testing.T) {
|
|
||||||
// Clear any existing alerts
|
|
||||||
_, err := hub.DB().NewQuery("DELETE FROM alerts_history").Execute()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Should not error with empty table
|
|
||||||
err = records.DeleteOldAlertsHistory(hub, 10, 20)
|
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestDeleteOldSystemdServiceRecords tests systemd service cleanup via DeleteOldRecords
|
|
||||||
func TestDeleteOldSystemdServiceRecords(t *testing.T) {
|
|
||||||
hub, err := tests.NewTestHub(t.TempDir())
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer hub.Cleanup()
|
|
||||||
|
|
||||||
rm := records.NewRecordManager(hub)
|
|
||||||
|
|
||||||
// Create test user and system
|
|
||||||
user, err := tests.CreateUser(hub, "test@example.com", "testtesttest")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
|
||||||
"name": "test-system",
|
|
||||||
"host": "localhost",
|
|
||||||
"port": "45876",
|
|
||||||
"status": "up",
|
|
||||||
"users": []string{user.Id},
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
now := time.Now().UTC()
|
|
||||||
|
|
||||||
// Create old systemd service records that should be deleted (older than 20 minutes)
|
|
||||||
oldRecord, err := tests.CreateRecord(hub, "systemd_services", map[string]any{
|
|
||||||
"system": system.Id,
|
|
||||||
"name": "nginx.service",
|
|
||||||
"state": 0, // Active
|
|
||||||
"sub": 1, // Running
|
|
||||||
"cpu": 5.0,
|
|
||||||
"cpuPeak": 10.0,
|
|
||||||
"memory": 1024000,
|
|
||||||
"memPeak": 2048000,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
// Set updated time to 25 minutes ago (should be deleted)
|
|
||||||
oldRecord.SetRaw("updated", now.Add(-25*time.Minute).UnixMilli())
|
|
||||||
err = hub.SaveNoValidate(oldRecord)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Create recent systemd service record that should be kept (within 20 minutes)
|
|
||||||
recentRecord, err := tests.CreateRecord(hub, "systemd_services", map[string]any{
|
|
||||||
"system": system.Id,
|
|
||||||
"name": "apache.service",
|
|
||||||
"state": 1, // Inactive
|
|
||||||
"sub": 0, // Dead
|
|
||||||
"cpu": 2.0,
|
|
||||||
"cpuPeak": 3.0,
|
|
||||||
"memory": 512000,
|
|
||||||
"memPeak": 1024000,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
// Set updated time to 10 minutes ago (should be kept)
|
|
||||||
recentRecord.SetRaw("updated", now.Add(-10*time.Minute).UnixMilli())
|
|
||||||
err = hub.SaveNoValidate(recentRecord)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Count records before deletion
|
|
||||||
countBefore, err := hub.CountRecords("systemd_services")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, int64(2), countBefore, "Should have 2 systemd service records initially")
|
|
||||||
|
|
||||||
// Run deletion via RecordManager
|
|
||||||
rm.DeleteOldRecords()
|
|
||||||
|
|
||||||
// Count records after deletion
|
|
||||||
countAfter, err := hub.CountRecords("systemd_services")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, int64(1), countAfter, "Should have 1 systemd service record after deletion")
|
|
||||||
|
|
||||||
// Verify the correct record was kept
|
|
||||||
remainingRecords, err := hub.FindRecordsByFilter("systemd_services", "", "", 10, 0, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Len(t, remainingRecords, 1, "Should have exactly 1 record remaining")
|
|
||||||
assert.Equal(t, "apache.service", remainingRecords[0].Get("name"), "The recent record should be kept")
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestRecordManagerCreation tests RecordManager creation
|
// TestRecordManagerCreation tests RecordManager creation
|
||||||
func TestRecordManagerCreation(t *testing.T) {
|
func TestRecordManagerCreation(t *testing.T) {
|
||||||
hub, err := tests.NewTestHub(t.TempDir())
|
hub, err := tests.NewTestHub(t.TempDir())
|
||||||
|
|||||||
@@ -41,7 +41,7 @@
|
|||||||
"recharts": "^2.15.4",
|
"recharts": "^2.15.4",
|
||||||
"shiki": "^3.13.0",
|
"shiki": "^3.13.0",
|
||||||
"tailwind-merge": "^3.3.1",
|
"tailwind-merge": "^3.3.1",
|
||||||
"valibot": "^0.42.1",
|
"valibot": "^1.3.1",
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@biomejs/biome": "2.2.4",
|
"@biomejs/biome": "2.2.4",
|
||||||
@@ -927,7 +927,7 @@
|
|||||||
|
|
||||||
"util-deprecate": ["util-deprecate@1.0.2", "", {}, "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw=="],
|
"util-deprecate": ["util-deprecate@1.0.2", "", {}, "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw=="],
|
||||||
|
|
||||||
"valibot": ["valibot@0.42.1", "", { "peerDependencies": { "typescript": ">=5" } }, "sha512-3keXV29Ar5b//Hqi4MbSdV7lfVp6zuYLZuA9V1PvQUsXqogr+u5lvLPLk3A4f74VUXDnf/JfWMN6sB+koJ/FFw=="],
|
"valibot": ["valibot@1.3.1", "", { "peerDependencies": { "typescript": ">=5" }, "optionalPeers": ["typescript"] }, "sha512-sfdRir/QFM0JaF22hqTroPc5xy4DimuGQVKFrzF1YfGwaS1nJot3Y8VqMdLO2Lg27fMzat2yD3pY5PbAYO39Gg=="],
|
||||||
|
|
||||||
"vfile": ["vfile@6.0.3", "", { "dependencies": { "@types/unist": "^3.0.0", "vfile-message": "^4.0.0" } }, "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q=="],
|
"vfile": ["vfile@6.0.3", "", { "dependencies": { "@types/unist": "^3.0.0", "vfile-message": "^4.0.0" } }, "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q=="],
|
||||||
|
|
||||||
|
|||||||
@@ -4,15 +4,25 @@
|
|||||||
<meta charset="UTF-8" />
|
<meta charset="UTF-8" />
|
||||||
<link rel="manifest" href="./static/manifest.json" crossorigin="use-credentials" />
|
<link rel="manifest" href="./static/manifest.json" crossorigin="use-credentials" />
|
||||||
<link rel="icon" type="image/svg+xml" href="./static/icon.svg" />
|
<link rel="icon" type="image/svg+xml" href="./static/icon.svg" />
|
||||||
|
<link rel="apple-touch-icon" href="./static/icon.png" />
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=1.0,maximum-scale=1.0, user-scalable=no, viewport-fit=cover" />
|
<meta name="viewport" content="width=device-width, initial-scale=1.0,maximum-scale=1.0, user-scalable=no, viewport-fit=cover" />
|
||||||
<meta name="robots" content="noindex, nofollow" />
|
<meta name="robots" content="noindex, nofollow" />
|
||||||
<title>Beszel</title>
|
<title>Beszel</title>
|
||||||
|
<style>
|
||||||
|
.dark { background: hsl(220 5.5% 9%); color-scheme: dark; }
|
||||||
|
</style>
|
||||||
<script>
|
<script>
|
||||||
globalThis.BESZEL = {
|
(function() {
|
||||||
BASE_PATH: "%BASE_URL%",
|
try {
|
||||||
HUB_VERSION: "{{V}}",
|
var theme = localStorage.getItem('ui-theme');
|
||||||
HUB_URL: "{{HUB_URL}}"
|
var isDark = theme === 'dark' ||
|
||||||
}
|
(theme !== 'light' && window.matchMedia('(prefers-color-scheme: dark)').matches);
|
||||||
|
document.documentElement.classList.add(isDark ? 'dark' : 'light');
|
||||||
|
} catch (e) {}
|
||||||
|
})();
|
||||||
|
</script>
|
||||||
|
<script>
|
||||||
|
globalThis.BESZEL = "{info}"
|
||||||
</script>
|
</script>
|
||||||
</head>
|
</head>
|
||||||
<body>
|
<body>
|
||||||
|
|||||||
407
internal/site/package-lock.json
generated
407
internal/site/package-lock.json
generated
@@ -1,12 +1,12 @@
|
|||||||
{
|
{
|
||||||
"name": "beszel",
|
"name": "beszel",
|
||||||
"version": "0.18.3",
|
"version": "0.18.7",
|
||||||
"lockfileVersion": 3,
|
"lockfileVersion": 3,
|
||||||
"requires": true,
|
"requires": true,
|
||||||
"packages": {
|
"packages": {
|
||||||
"": {
|
"": {
|
||||||
"name": "beszel",
|
"name": "beszel",
|
||||||
"version": "0.18.3",
|
"version": "0.18.7",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@henrygd/queue": "^1.0.7",
|
"@henrygd/queue": "^1.0.7",
|
||||||
"@henrygd/semaphore": "^0.0.2",
|
"@henrygd/semaphore": "^0.0.2",
|
||||||
@@ -44,7 +44,7 @@
|
|||||||
"recharts": "^2.15.4",
|
"recharts": "^2.15.4",
|
||||||
"shiki": "^3.13.0",
|
"shiki": "^3.13.0",
|
||||||
"tailwind-merge": "^3.3.1",
|
"tailwind-merge": "^3.3.1",
|
||||||
"valibot": "^0.42.1"
|
"valibot": "^1.3.1"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@biomejs/biome": "2.2.4",
|
"@biomejs/biome": "2.2.4",
|
||||||
@@ -986,29 +986,6 @@
|
|||||||
"integrity": "sha512-N3W7MKwTRmAxOjeG0NAT18oe2Xn3KdjkpMR6crbkF1UDamMGPjyigqEsefiv+qTaxibtc1a+zXCVzb9YXANVqw==",
|
"integrity": "sha512-N3W7MKwTRmAxOjeG0NAT18oe2Xn3KdjkpMR6crbkF1UDamMGPjyigqEsefiv+qTaxibtc1a+zXCVzb9YXANVqw==",
|
||||||
"license": "MIT"
|
"license": "MIT"
|
||||||
},
|
},
|
||||||
"node_modules/@isaacs/balanced-match": {
|
|
||||||
"version": "4.0.1",
|
|
||||||
"resolved": "https://registry.npmjs.org/@isaacs/balanced-match/-/balanced-match-4.0.1.tgz",
|
|
||||||
"integrity": "sha512-yzMTt9lEb8Gv7zRioUilSglI0c0smZ9k5D65677DLWLtWJaXIS3CqcGyUFByYKlnUj6TkjLVs54fBl6+TiGQDQ==",
|
|
||||||
"dev": true,
|
|
||||||
"license": "MIT",
|
|
||||||
"engines": {
|
|
||||||
"node": "20 || >=22"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@isaacs/brace-expansion": {
|
|
||||||
"version": "5.0.0",
|
|
||||||
"resolved": "https://registry.npmjs.org/@isaacs/brace-expansion/-/brace-expansion-5.0.0.tgz",
|
|
||||||
"integrity": "sha512-ZT55BDLV0yv0RBm2czMiZ+SqCGO7AvmOM3G/w2xhVPH+te0aKgFjmBvGlL1dH+ql2tgGO3MVrbb3jCKyvpgnxA==",
|
|
||||||
"dev": true,
|
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
|
||||||
"@isaacs/balanced-match": "^4.0.1"
|
|
||||||
},
|
|
||||||
"engines": {
|
|
||||||
"node": "20 || >=22"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/@isaacs/cliui": {
|
"node_modules/@isaacs/cliui": {
|
||||||
"version": "8.0.2",
|
"version": "8.0.2",
|
||||||
"resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz",
|
"resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz",
|
||||||
@@ -1243,9 +1220,9 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@lingui/cli/node_modules/picomatch": {
|
"node_modules/@lingui/cli/node_modules/picomatch": {
|
||||||
"version": "2.3.1",
|
"version": "2.3.2",
|
||||||
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
|
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.2.tgz",
|
||||||
"integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
|
"integrity": "sha512-V7+vQEJ06Z+c5tSye8S+nHUfI51xoXIXjHQ99cQtKUkQqqO1kO/KCJUfZXuB47h/YBlDhah2H3hdUGXn8ie0oA==",
|
||||||
"dev": true,
|
"dev": true,
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"engines": {
|
"engines": {
|
||||||
@@ -2408,9 +2385,9 @@
|
|||||||
"license": "MIT"
|
"license": "MIT"
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-android-arm-eabi": {
|
"node_modules/@rollup/rollup-android-arm-eabi": {
|
||||||
"version": "4.48.1",
|
"version": "4.60.1",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.48.1.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.60.1.tgz",
|
||||||
"integrity": "sha512-rGmb8qoG/zdmKoYELCBwu7vt+9HxZ7Koos3pD0+sH5fR3u3Wb/jGcpnqxcnWsPEKDUyzeLSqksN8LJtgXjqBYw==",
|
"integrity": "sha512-d6FinEBLdIiK+1uACUttJKfgZREXrF0Qc2SmLII7W2AD8FfiZ9Wjd+rD/iRuf5s5dWrr1GgwXCvPqOuDquOowA==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"arm"
|
"arm"
|
||||||
],
|
],
|
||||||
@@ -2422,9 +2399,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-android-arm64": {
|
"node_modules/@rollup/rollup-android-arm64": {
|
||||||
"version": "4.48.1",
|
"version": "4.60.1",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.48.1.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.60.1.tgz",
|
||||||
"integrity": "sha512-4e9WtTxrk3gu1DFE+imNJr4WsL13nWbD/Y6wQcyku5qadlKHY3OQ3LJ/INrrjngv2BJIHnIzbqMk1GTAC2P8yQ==",
|
"integrity": "sha512-YjG/EwIDvvYI1YvYbHvDz/BYHtkY4ygUIXHnTdLhG+hKIQFBiosfWiACWortsKPKU/+dUwQQCKQM3qrDe8c9BA==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"arm64"
|
"arm64"
|
||||||
],
|
],
|
||||||
@@ -2436,9 +2413,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-darwin-arm64": {
|
"node_modules/@rollup/rollup-darwin-arm64": {
|
||||||
"version": "4.48.1",
|
"version": "4.60.1",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.48.1.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.60.1.tgz",
|
||||||
"integrity": "sha512-+XjmyChHfc4TSs6WUQGmVf7Hkg8ferMAE2aNYYWjiLzAS/T62uOsdfnqv+GHRjq7rKRnYh4mwWb4Hz7h/alp8A==",
|
"integrity": "sha512-mjCpF7GmkRtSJwon+Rq1N8+pI+8l7w5g9Z3vWj4T7abguC4Czwi3Yu/pFaLvA3TTeMVjnu3ctigusqWUfjZzvw==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"arm64"
|
"arm64"
|
||||||
],
|
],
|
||||||
@@ -2450,9 +2427,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-darwin-x64": {
|
"node_modules/@rollup/rollup-darwin-x64": {
|
||||||
"version": "4.48.1",
|
"version": "4.60.1",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.48.1.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.60.1.tgz",
|
||||||
"integrity": "sha512-upGEY7Ftw8M6BAJyGwnwMw91rSqXTcOKZnnveKrVWsMTF8/k5mleKSuh7D4v4IV1pLxKAk3Tbs0Lo9qYmii5mQ==",
|
"integrity": "sha512-haZ7hJ1JT4e9hqkoT9R/19XW2QKqjfJVv+i5AGg57S+nLk9lQnJ1F/eZloRO3o9Scy9CM3wQ9l+dkXtcBgN5Ew==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"x64"
|
"x64"
|
||||||
],
|
],
|
||||||
@@ -2464,9 +2441,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-freebsd-arm64": {
|
"node_modules/@rollup/rollup-freebsd-arm64": {
|
||||||
"version": "4.48.1",
|
"version": "4.60.1",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.48.1.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.60.1.tgz",
|
||||||
"integrity": "sha512-P9ViWakdoynYFUOZhqq97vBrhuvRLAbN/p2tAVJvhLb8SvN7rbBnJQcBu8e/rQts42pXGLVhfsAP0k9KXWa3nQ==",
|
"integrity": "sha512-czw90wpQq3ZsAVBlinZjAYTKduOjTywlG7fEeWKUA7oCmpA8xdTkxZZlwNJKWqILlq0wehoZcJYfBvOyhPTQ6w==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"arm64"
|
"arm64"
|
||||||
],
|
],
|
||||||
@@ -2478,9 +2455,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-freebsd-x64": {
|
"node_modules/@rollup/rollup-freebsd-x64": {
|
||||||
"version": "4.48.1",
|
"version": "4.60.1",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.48.1.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.60.1.tgz",
|
||||||
"integrity": "sha512-VLKIwIpnBya5/saccM8JshpbxfyJt0Dsli0PjXozHwbSVaHTvWXJH1bbCwPXxnMzU4zVEfgD1HpW3VQHomi2AQ==",
|
"integrity": "sha512-KVB2rqsxTHuBtfOeySEyzEOB7ltlB/ux38iu2rBQzkjbwRVlkhAGIEDiiYnO2kFOkJp+Z7pUXKyrRRFuFUKt+g==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"x64"
|
"x64"
|
||||||
],
|
],
|
||||||
@@ -2492,9 +2469,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-linux-arm-gnueabihf": {
|
"node_modules/@rollup/rollup-linux-arm-gnueabihf": {
|
||||||
"version": "4.48.1",
|
"version": "4.60.1",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.48.1.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.60.1.tgz",
|
||||||
"integrity": "sha512-3zEuZsXfKaw8n/yF7t8N6NNdhyFw3s8xJTqjbTDXlipwrEHo4GtIKcMJr5Ed29leLpB9AugtAQpAHW0jvtKKaQ==",
|
"integrity": "sha512-L+34Qqil+v5uC0zEubW7uByo78WOCIrBvci69E7sFASRl0X7b/MB6Cqd1lky/CtcSVTydWa2WZwFuWexjS5o6g==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"arm"
|
"arm"
|
||||||
],
|
],
|
||||||
@@ -2506,9 +2483,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-linux-arm-musleabihf": {
|
"node_modules/@rollup/rollup-linux-arm-musleabihf": {
|
||||||
"version": "4.48.1",
|
"version": "4.60.1",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.48.1.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.60.1.tgz",
|
||||||
"integrity": "sha512-leo9tOIlKrcBmmEypzunV/2w946JeLbTdDlwEZ7OnnsUyelZ72NMnT4B2vsikSgwQifjnJUbdXzuW4ToN1wV+Q==",
|
"integrity": "sha512-n83O8rt4v34hgFzlkb1ycniJh7IR5RCIqt6mz1VRJD6pmhRi0CXdmfnLu9dIUS6buzh60IvACM842Ffb3xd6Gg==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"arm"
|
"arm"
|
||||||
],
|
],
|
||||||
@@ -2520,9 +2497,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-linux-arm64-gnu": {
|
"node_modules/@rollup/rollup-linux-arm64-gnu": {
|
||||||
"version": "4.48.1",
|
"version": "4.60.1",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.48.1.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.60.1.tgz",
|
||||||
"integrity": "sha512-Vy/WS4z4jEyvnJm+CnPfExIv5sSKqZrUr98h03hpAMbE2aI0aD2wvK6GiSe8Gx2wGp3eD81cYDpLLBqNb2ydwQ==",
|
"integrity": "sha512-Nql7sTeAzhTAja3QXeAI48+/+GjBJ+QmAH13snn0AJSNL50JsDqotyudHyMbO2RbJkskbMbFJfIJKWA6R1LCJQ==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"arm64"
|
"arm64"
|
||||||
],
|
],
|
||||||
@@ -2534,9 +2511,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-linux-arm64-musl": {
|
"node_modules/@rollup/rollup-linux-arm64-musl": {
|
||||||
"version": "4.48.1",
|
"version": "4.60.1",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.48.1.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.60.1.tgz",
|
||||||
"integrity": "sha512-x5Kzn7XTwIssU9UYqWDB9VpLpfHYuXw5c6bJr4Mzv9kIv242vmJHbI5PJJEnmBYitUIfoMCODDhR7KoZLot2VQ==",
|
"integrity": "sha512-+pUymDhd0ys9GcKZPPWlFiZ67sTWV5UU6zOJat02M1+PiuSGDziyRuI/pPue3hoUwm2uGfxdL+trT6Z9rxnlMA==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"arm64"
|
"arm64"
|
||||||
],
|
],
|
||||||
@@ -2547,10 +2524,24 @@
|
|||||||
"linux"
|
"linux"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-linux-loongarch64-gnu": {
|
"node_modules/@rollup/rollup-linux-loong64-gnu": {
|
||||||
"version": "4.48.1",
|
"version": "4.60.1",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loongarch64-gnu/-/rollup-linux-loongarch64-gnu-4.48.1.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.60.1.tgz",
|
||||||
"integrity": "sha512-yzCaBbwkkWt/EcgJOKDUdUpMHjhiZT/eDktOPWvSRpqrVE04p0Nd6EGV4/g7MARXXeOqstflqsKuXVM3H9wOIQ==",
|
"integrity": "sha512-VSvgvQeIcsEvY4bKDHEDWcpW4Yw7BtlKG1GUT4FzBUlEKQK0rWHYBqQt6Fm2taXS+1bXvJT6kICu5ZwqKCnvlQ==",
|
||||||
|
"cpu": [
|
||||||
|
"loong64"
|
||||||
|
],
|
||||||
|
"dev": true,
|
||||||
|
"license": "MIT",
|
||||||
|
"optional": true,
|
||||||
|
"os": [
|
||||||
|
"linux"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"node_modules/@rollup/rollup-linux-loong64-musl": {
|
||||||
|
"version": "4.60.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.60.1.tgz",
|
||||||
|
"integrity": "sha512-4LqhUomJqwe641gsPp6xLfhqWMbQV04KtPp7/dIp0nzPxAkNY1AbwL5W0MQpcalLYk07vaW9Kp1PBhdpZYYcEw==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"loong64"
|
"loong64"
|
||||||
],
|
],
|
||||||
@@ -2562,9 +2553,23 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-linux-ppc64-gnu": {
|
"node_modules/@rollup/rollup-linux-ppc64-gnu": {
|
||||||
"version": "4.48.1",
|
"version": "4.60.1",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.48.1.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.60.1.tgz",
|
||||||
"integrity": "sha512-UK0WzWUjMAJccHIeOpPhPcKBqax7QFg47hwZTp6kiMhQHeOYJeaMwzeRZe1q5IiTKsaLnHu9s6toSYVUlZ2QtQ==",
|
"integrity": "sha512-tLQQ9aPvkBxOc/EUT6j3pyeMD6Hb8QF2BTBnCQWP/uu1lhc9AIrIjKnLYMEroIz/JvtGYgI9dF3AxHZNaEH0rw==",
|
||||||
|
"cpu": [
|
||||||
|
"ppc64"
|
||||||
|
],
|
||||||
|
"dev": true,
|
||||||
|
"license": "MIT",
|
||||||
|
"optional": true,
|
||||||
|
"os": [
|
||||||
|
"linux"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"node_modules/@rollup/rollup-linux-ppc64-musl": {
|
||||||
|
"version": "4.60.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.60.1.tgz",
|
||||||
|
"integrity": "sha512-RMxFhJwc9fSXP6PqmAz4cbv3kAyvD1etJFjTx4ONqFP9DkTkXsAMU4v3Vyc5BgzC+anz7nS/9tp4obsKfqkDHg==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"ppc64"
|
"ppc64"
|
||||||
],
|
],
|
||||||
@@ -2576,9 +2581,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-linux-riscv64-gnu": {
|
"node_modules/@rollup/rollup-linux-riscv64-gnu": {
|
||||||
"version": "4.48.1",
|
"version": "4.60.1",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.48.1.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.60.1.tgz",
|
||||||
"integrity": "sha512-3NADEIlt+aCdCbWVZ7D3tBjBX1lHpXxcvrLt/kdXTiBrOds8APTdtk2yRL2GgmnSVeX4YS1JIf0imFujg78vpw==",
|
"integrity": "sha512-QKgFl+Yc1eEk6MmOBfRHYF6lTxiiiV3/z/BRrbSiW2I7AFTXoBFvdMEyglohPj//2mZS4hDOqeB0H1ACh3sBbg==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"riscv64"
|
"riscv64"
|
||||||
],
|
],
|
||||||
@@ -2590,9 +2595,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-linux-riscv64-musl": {
|
"node_modules/@rollup/rollup-linux-riscv64-musl": {
|
||||||
"version": "4.48.1",
|
"version": "4.60.1",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.48.1.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.60.1.tgz",
|
||||||
"integrity": "sha512-euuwm/QTXAMOcyiFCcrx0/S2jGvFlKJ2Iro8rsmYL53dlblp3LkUQVFzEidHhvIPPvcIsxDhl2wkBE+I6YVGzA==",
|
"integrity": "sha512-RAjXjP/8c6ZtzatZcA1RaQr6O1TRhzC+adn8YZDnChliZHviqIjmvFwHcxi4JKPSDAt6Uhf/7vqcBzQJy0PDJg==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"riscv64"
|
"riscv64"
|
||||||
],
|
],
|
||||||
@@ -2604,9 +2609,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-linux-s390x-gnu": {
|
"node_modules/@rollup/rollup-linux-s390x-gnu": {
|
||||||
"version": "4.48.1",
|
"version": "4.60.1",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.48.1.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.60.1.tgz",
|
||||||
"integrity": "sha512-w8mULUjmPdWLJgmTYJx/W6Qhln1a+yqvgwmGXcQl2vFBkWsKGUBRbtLRuKJUln8Uaimf07zgJNxOhHOvjSQmBQ==",
|
"integrity": "sha512-wcuocpaOlaL1COBYiA89O6yfjlp3RwKDeTIA0hM7OpmhR1Bjo9j31G1uQVpDlTvwxGn2nQs65fBFL5UFd76FcQ==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"s390x"
|
"s390x"
|
||||||
],
|
],
|
||||||
@@ -2618,9 +2623,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-linux-x64-gnu": {
|
"node_modules/@rollup/rollup-linux-x64-gnu": {
|
||||||
"version": "4.48.1",
|
"version": "4.60.1",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.48.1.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.60.1.tgz",
|
||||||
"integrity": "sha512-90taWXCWxTbClWuMZD0DKYohY1EovA+W5iytpE89oUPmT5O1HFdf8cuuVIylE6vCbrGdIGv85lVRzTcpTRZ+kA==",
|
"integrity": "sha512-77PpsFQUCOiZR9+LQEFg9GClyfkNXj1MP6wRnzYs0EeWbPcHs02AXu4xuUbM1zhwn3wqaizle3AEYg5aeoohhg==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"x64"
|
"x64"
|
||||||
],
|
],
|
||||||
@@ -2632,9 +2637,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-linux-x64-musl": {
|
"node_modules/@rollup/rollup-linux-x64-musl": {
|
||||||
"version": "4.48.1",
|
"version": "4.60.1",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.48.1.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.60.1.tgz",
|
||||||
"integrity": "sha512-2Gu29SkFh1FfTRuN1GR1afMuND2GKzlORQUP3mNMJbqdndOg7gNsa81JnORctazHRokiDzQ5+MLE5XYmZW5VWg==",
|
"integrity": "sha512-5cIATbk5vynAjqqmyBjlciMJl1+R/CwX9oLk/EyiFXDWd95KpHdrOJT//rnUl4cUcskrd0jCCw3wpZnhIHdD9w==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"x64"
|
"x64"
|
||||||
],
|
],
|
||||||
@@ -2645,10 +2650,38 @@
|
|||||||
"linux"
|
"linux"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"node_modules/@rollup/rollup-openbsd-x64": {
|
||||||
|
"version": "4.60.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.60.1.tgz",
|
||||||
|
"integrity": "sha512-cl0w09WsCi17mcmWqqglez9Gk8isgeWvoUZ3WiJFYSR3zjBQc2J5/ihSjpl+VLjPqjQ/1hJRcqBfLjssREQILw==",
|
||||||
|
"cpu": [
|
||||||
|
"x64"
|
||||||
|
],
|
||||||
|
"dev": true,
|
||||||
|
"license": "MIT",
|
||||||
|
"optional": true,
|
||||||
|
"os": [
|
||||||
|
"openbsd"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"node_modules/@rollup/rollup-openharmony-arm64": {
|
||||||
|
"version": "4.60.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.60.1.tgz",
|
||||||
|
"integrity": "sha512-4Cv23ZrONRbNtbZa37mLSueXUCtN7MXccChtKpUnQNgF010rjrjfHx3QxkS2PI7LqGT5xXyYs1a7LbzAwT0iCA==",
|
||||||
|
"cpu": [
|
||||||
|
"arm64"
|
||||||
|
],
|
||||||
|
"dev": true,
|
||||||
|
"license": "MIT",
|
||||||
|
"optional": true,
|
||||||
|
"os": [
|
||||||
|
"openharmony"
|
||||||
|
]
|
||||||
|
},
|
||||||
"node_modules/@rollup/rollup-win32-arm64-msvc": {
|
"node_modules/@rollup/rollup-win32-arm64-msvc": {
|
||||||
"version": "4.48.1",
|
"version": "4.60.1",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.48.1.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.60.1.tgz",
|
||||||
"integrity": "sha512-6kQFR1WuAO50bxkIlAVeIYsz3RUx+xymwhTo9j94dJ+kmHe9ly7muH23sdfWduD0BA8pD9/yhonUvAjxGh34jQ==",
|
"integrity": "sha512-i1okWYkA4FJICtr7KpYzFpRTHgy5jdDbZiWfvny21iIKky5YExiDXP+zbXzm3dUcFpkEeYNHgQ5fuG236JPq0g==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"arm64"
|
"arm64"
|
||||||
],
|
],
|
||||||
@@ -2660,9 +2693,9 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"node_modules/@rollup/rollup-win32-ia32-msvc": {
|
"node_modules/@rollup/rollup-win32-ia32-msvc": {
|
||||||
"version": "4.48.1",
|
"version": "4.60.1",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.48.1.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.60.1.tgz",
|
||||||
"integrity": "sha512-RUyZZ/mga88lMI3RlXFs4WQ7n3VyU07sPXmMG7/C1NOi8qisUg57Y7LRarqoGoAiopmGmChUhSwfpvQ3H5iGSQ==",
|
"integrity": "sha512-u09m3CuwLzShA0EYKMNiFgcjjzwqtUMLmuCJLeZWjjOYA3IT2Di09KaxGBTP9xVztWyIWjVdsB2E9goMjZvTQg==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"ia32"
|
"ia32"
|
||||||
],
|
],
|
||||||
@@ -2673,10 +2706,24 @@
|
|||||||
"win32"
|
"win32"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
"node_modules/@rollup/rollup-win32-x64-gnu": {
|
||||||
|
"version": "4.60.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.60.1.tgz",
|
||||||
|
"integrity": "sha512-k+600V9Zl1CM7eZxJgMyTUzmrmhB/0XZnF4pRypKAlAgxmedUA+1v9R+XOFv56W4SlHEzfeMtzujLJD22Uz5zg==",
|
||||||
|
"cpu": [
|
||||||
|
"x64"
|
||||||
|
],
|
||||||
|
"dev": true,
|
||||||
|
"license": "MIT",
|
||||||
|
"optional": true,
|
||||||
|
"os": [
|
||||||
|
"win32"
|
||||||
|
]
|
||||||
|
},
|
||||||
"node_modules/@rollup/rollup-win32-x64-msvc": {
|
"node_modules/@rollup/rollup-win32-x64-msvc": {
|
||||||
"version": "4.48.1",
|
"version": "4.60.1",
|
||||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.48.1.tgz",
|
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.60.1.tgz",
|
||||||
"integrity": "sha512-8a/caCUN4vkTChxkaIJcMtwIVcBhi4X2PQRoT+yCK3qRYaZ7cURrmJFL5Ux9H9RaMIXj9RuihckdmkBX3zZsgg==",
|
"integrity": "sha512-lWMnixq/QzxyhTV6NjQJ4SFo1J6PvOX8vUx5Wb4bBPsEb+8xZ89Bz6kOXpfXj9ak9AHTQVQzlgzBEc1SyM27xQ==",
|
||||||
"cpu": [
|
"cpu": [
|
||||||
"x64"
|
"x64"
|
||||||
],
|
],
|
||||||
@@ -3235,6 +3282,66 @@
|
|||||||
"node": ">=14.0.0"
|
"node": ">=14.0.0"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@emnapi/core": {
|
||||||
|
"version": "1.4.5",
|
||||||
|
"dev": true,
|
||||||
|
"inBundle": true,
|
||||||
|
"license": "MIT",
|
||||||
|
"optional": true,
|
||||||
|
"dependencies": {
|
||||||
|
"@emnapi/wasi-threads": "1.0.4",
|
||||||
|
"tslib": "^2.4.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@emnapi/runtime": {
|
||||||
|
"version": "1.4.5",
|
||||||
|
"dev": true,
|
||||||
|
"inBundle": true,
|
||||||
|
"license": "MIT",
|
||||||
|
"optional": true,
|
||||||
|
"dependencies": {
|
||||||
|
"tslib": "^2.4.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@emnapi/wasi-threads": {
|
||||||
|
"version": "1.0.4",
|
||||||
|
"dev": true,
|
||||||
|
"inBundle": true,
|
||||||
|
"license": "MIT",
|
||||||
|
"optional": true,
|
||||||
|
"dependencies": {
|
||||||
|
"tslib": "^2.4.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@napi-rs/wasm-runtime": {
|
||||||
|
"version": "0.2.12",
|
||||||
|
"dev": true,
|
||||||
|
"inBundle": true,
|
||||||
|
"license": "MIT",
|
||||||
|
"optional": true,
|
||||||
|
"dependencies": {
|
||||||
|
"@emnapi/core": "^1.4.3",
|
||||||
|
"@emnapi/runtime": "^1.4.3",
|
||||||
|
"@tybys/wasm-util": "^0.10.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@tybys/wasm-util": {
|
||||||
|
"version": "0.10.0",
|
||||||
|
"dev": true,
|
||||||
|
"inBundle": true,
|
||||||
|
"license": "MIT",
|
||||||
|
"optional": true,
|
||||||
|
"dependencies": {
|
||||||
|
"tslib": "^2.4.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/tslib": {
|
||||||
|
"version": "2.8.0",
|
||||||
|
"dev": true,
|
||||||
|
"inBundle": true,
|
||||||
|
"license": "0BSD",
|
||||||
|
"optional": true
|
||||||
|
},
|
||||||
"node_modules/@tailwindcss/oxide-win32-arm64-msvc": {
|
"node_modules/@tailwindcss/oxide-win32-arm64-msvc": {
|
||||||
"version": "4.1.12",
|
"version": "4.1.12",
|
||||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-arm64-msvc/-/oxide-win32-arm64-msvc-4.1.12.tgz",
|
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-arm64-msvc/-/oxide-win32-arm64-msvc-4.1.12.tgz",
|
||||||
@@ -3589,9 +3696,9 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/anymatch/node_modules/picomatch": {
|
"node_modules/anymatch/node_modules/picomatch": {
|
||||||
"version": "2.3.1",
|
"version": "2.3.2",
|
||||||
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
|
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.2.tgz",
|
||||||
"integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
|
"integrity": "sha512-V7+vQEJ06Z+c5tSye8S+nHUfI51xoXIXjHQ99cQtKUkQqqO1kO/KCJUfZXuB47h/YBlDhah2H3hdUGXn8ie0oA==",
|
||||||
"dev": true,
|
"dev": true,
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"engines": {
|
"engines": {
|
||||||
@@ -3620,6 +3727,16 @@
|
|||||||
"node": ">=10"
|
"node": ">=10"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/balanced-match": {
|
||||||
|
"version": "4.0.4",
|
||||||
|
"resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-4.0.4.tgz",
|
||||||
|
"integrity": "sha512-BLrgEcRTwX2o6gGxGOCNyMvGSp35YofuYzw9h1IMTRmKqttAZZVU67bdb9Pr2vUHA8+j3i2tJfjO6C6+4myGTA==",
|
||||||
|
"dev": true,
|
||||||
|
"license": "MIT",
|
||||||
|
"engines": {
|
||||||
|
"node": "18 || 20 || >=22"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/base64-js": {
|
"node_modules/base64-js": {
|
||||||
"version": "1.5.1",
|
"version": "1.5.1",
|
||||||
"resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz",
|
"resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz",
|
||||||
@@ -3666,6 +3783,19 @@
|
|||||||
"readable-stream": "^3.4.0"
|
"readable-stream": "^3.4.0"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/brace-expansion": {
|
||||||
|
"version": "5.0.5",
|
||||||
|
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-5.0.5.tgz",
|
||||||
|
"integrity": "sha512-VZznLgtwhn+Mact9tfiwx64fA9erHH/MCXEUfB/0bX/6Fz6ny5EGTXYltMocqg4xFAQZtnO3DHWWXi8RiuN7cQ==",
|
||||||
|
"dev": true,
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"balanced-match": "^4.0.2"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": "18 || 20 || >=22"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/braces": {
|
"node_modules/braces": {
|
||||||
"version": "3.0.3",
|
"version": "3.0.3",
|
||||||
"resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz",
|
"resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz",
|
||||||
@@ -5072,9 +5202,9 @@
|
|||||||
"license": "MIT"
|
"license": "MIT"
|
||||||
},
|
},
|
||||||
"node_modules/lodash": {
|
"node_modules/lodash": {
|
||||||
"version": "4.17.23",
|
"version": "4.18.1",
|
||||||
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.23.tgz",
|
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.18.1.tgz",
|
||||||
"integrity": "sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w==",
|
"integrity": "sha512-dMInicTPVE8d1e5otfwmmjlxkZoUpiVLwyeTdUsi/Caj/gfzzblBcCE5sRHV/AsjuCmxWrte2TNGSYuCeCq+0Q==",
|
||||||
"license": "MIT"
|
"license": "MIT"
|
||||||
},
|
},
|
||||||
"node_modules/lodash.sortby": {
|
"node_modules/lodash.sortby": {
|
||||||
@@ -5267,9 +5397,9 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/micromatch/node_modules/picomatch": {
|
"node_modules/micromatch/node_modules/picomatch": {
|
||||||
"version": "2.3.1",
|
"version": "2.3.2",
|
||||||
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
|
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.2.tgz",
|
||||||
"integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
|
"integrity": "sha512-V7+vQEJ06Z+c5tSye8S+nHUfI51xoXIXjHQ99cQtKUkQqqO1kO/KCJUfZXuB47h/YBlDhah2H3hdUGXn8ie0oA==",
|
||||||
"dev": true,
|
"dev": true,
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"engines": {
|
"engines": {
|
||||||
@@ -5290,16 +5420,16 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/minimatch": {
|
"node_modules/minimatch": {
|
||||||
"version": "10.1.1",
|
"version": "10.2.5",
|
||||||
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-10.1.1.tgz",
|
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-10.2.5.tgz",
|
||||||
"integrity": "sha512-enIvLvRAFZYXJzkCYG5RKmPfrFArdLv+R+lbQ53BmIMLIry74bjKzX6iHAm8WYamJkhSSEabrWN5D97XnKObjQ==",
|
"integrity": "sha512-MULkVLfKGYDFYejP07QOurDLLQpcjk7Fw+7jXS2R2czRQzR56yHRveU5NDJEOviH+hETZKSkIk5c+T23GjFUMg==",
|
||||||
"dev": true,
|
"dev": true,
|
||||||
"license": "BlueOak-1.0.0",
|
"license": "BlueOak-1.0.0",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@isaacs/brace-expansion": "^5.0.0"
|
"brace-expansion": "^5.0.5"
|
||||||
},
|
},
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": "20 || >=22"
|
"node": "18 || 20 || >=22"
|
||||||
},
|
},
|
||||||
"funding": {
|
"funding": {
|
||||||
"url": "https://github.com/sponsors/isaacs"
|
"url": "https://github.com/sponsors/isaacs"
|
||||||
@@ -5575,9 +5705,9 @@
|
|||||||
"license": "ISC"
|
"license": "ISC"
|
||||||
},
|
},
|
||||||
"node_modules/picomatch": {
|
"node_modules/picomatch": {
|
||||||
"version": "4.0.3",
|
"version": "4.0.4",
|
||||||
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz",
|
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.4.tgz",
|
||||||
"integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
|
"integrity": "sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A==",
|
||||||
"dev": true,
|
"dev": true,
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"engines": {
|
"engines": {
|
||||||
@@ -5956,9 +6086,9 @@
|
|||||||
"license": "ISC"
|
"license": "ISC"
|
||||||
},
|
},
|
||||||
"node_modules/rollup": {
|
"node_modules/rollup": {
|
||||||
"version": "4.48.1",
|
"version": "4.60.1",
|
||||||
"resolved": "https://registry.npmjs.org/rollup/-/rollup-4.48.1.tgz",
|
"resolved": "https://registry.npmjs.org/rollup/-/rollup-4.60.1.tgz",
|
||||||
"integrity": "sha512-jVG20NvbhTYDkGAty2/Yh7HK6/q3DGSRH4o8ALKGArmMuaauM9kLfoMZ+WliPwA5+JHr2lTn3g557FxBV87ifg==",
|
"integrity": "sha512-VmtB2rFU/GroZ4oL8+ZqXgSA38O6GR8KSIvWmEFv63pQ0G6KaBH9s07PO8XTXP4vI+3UJUEypOfjkGfmSBBR0w==",
|
||||||
"dev": true,
|
"dev": true,
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
@@ -5972,26 +6102,31 @@
|
|||||||
"npm": ">=8.0.0"
|
"npm": ">=8.0.0"
|
||||||
},
|
},
|
||||||
"optionalDependencies": {
|
"optionalDependencies": {
|
||||||
"@rollup/rollup-android-arm-eabi": "4.48.1",
|
"@rollup/rollup-android-arm-eabi": "4.60.1",
|
||||||
"@rollup/rollup-android-arm64": "4.48.1",
|
"@rollup/rollup-android-arm64": "4.60.1",
|
||||||
"@rollup/rollup-darwin-arm64": "4.48.1",
|
"@rollup/rollup-darwin-arm64": "4.60.1",
|
||||||
"@rollup/rollup-darwin-x64": "4.48.1",
|
"@rollup/rollup-darwin-x64": "4.60.1",
|
||||||
"@rollup/rollup-freebsd-arm64": "4.48.1",
|
"@rollup/rollup-freebsd-arm64": "4.60.1",
|
||||||
"@rollup/rollup-freebsd-x64": "4.48.1",
|
"@rollup/rollup-freebsd-x64": "4.60.1",
|
||||||
"@rollup/rollup-linux-arm-gnueabihf": "4.48.1",
|
"@rollup/rollup-linux-arm-gnueabihf": "4.60.1",
|
||||||
"@rollup/rollup-linux-arm-musleabihf": "4.48.1",
|
"@rollup/rollup-linux-arm-musleabihf": "4.60.1",
|
||||||
"@rollup/rollup-linux-arm64-gnu": "4.48.1",
|
"@rollup/rollup-linux-arm64-gnu": "4.60.1",
|
||||||
"@rollup/rollup-linux-arm64-musl": "4.48.1",
|
"@rollup/rollup-linux-arm64-musl": "4.60.1",
|
||||||
"@rollup/rollup-linux-loongarch64-gnu": "4.48.1",
|
"@rollup/rollup-linux-loong64-gnu": "4.60.1",
|
||||||
"@rollup/rollup-linux-ppc64-gnu": "4.48.1",
|
"@rollup/rollup-linux-loong64-musl": "4.60.1",
|
||||||
"@rollup/rollup-linux-riscv64-gnu": "4.48.1",
|
"@rollup/rollup-linux-ppc64-gnu": "4.60.1",
|
||||||
"@rollup/rollup-linux-riscv64-musl": "4.48.1",
|
"@rollup/rollup-linux-ppc64-musl": "4.60.1",
|
||||||
"@rollup/rollup-linux-s390x-gnu": "4.48.1",
|
"@rollup/rollup-linux-riscv64-gnu": "4.60.1",
|
||||||
"@rollup/rollup-linux-x64-gnu": "4.48.1",
|
"@rollup/rollup-linux-riscv64-musl": "4.60.1",
|
||||||
"@rollup/rollup-linux-x64-musl": "4.48.1",
|
"@rollup/rollup-linux-s390x-gnu": "4.60.1",
|
||||||
"@rollup/rollup-win32-arm64-msvc": "4.48.1",
|
"@rollup/rollup-linux-x64-gnu": "4.60.1",
|
||||||
"@rollup/rollup-win32-ia32-msvc": "4.48.1",
|
"@rollup/rollup-linux-x64-musl": "4.60.1",
|
||||||
"@rollup/rollup-win32-x64-msvc": "4.48.1",
|
"@rollup/rollup-openbsd-x64": "4.60.1",
|
||||||
|
"@rollup/rollup-openharmony-arm64": "4.60.1",
|
||||||
|
"@rollup/rollup-win32-arm64-msvc": "4.60.1",
|
||||||
|
"@rollup/rollup-win32-ia32-msvc": "4.60.1",
|
||||||
|
"@rollup/rollup-win32-x64-gnu": "4.60.1",
|
||||||
|
"@rollup/rollup-win32-x64-msvc": "4.60.1",
|
||||||
"fsevents": "~2.3.2"
|
"fsevents": "~2.3.2"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@@ -6290,9 +6425,9 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/tar": {
|
"node_modules/tar": {
|
||||||
"version": "7.5.7",
|
"version": "7.5.13",
|
||||||
"resolved": "https://registry.npmjs.org/tar/-/tar-7.5.7.tgz",
|
"resolved": "https://registry.npmjs.org/tar/-/tar-7.5.13.tgz",
|
||||||
"integrity": "sha512-fov56fJiRuThVFXD6o6/Q354S7pnWMJIVlDBYijsTNx6jKSE4pvrDTs6lUnmGvNyfJwFQQwWy3owKz1ucIhveQ==",
|
"integrity": "sha512-tOG/7GyXpFevhXVh8jOPJrmtRpOTsYqUIkVdVooZYJS/z8WhfQUX8RJILmeuJNinGAMSu1veBr4asSHFt5/hng==",
|
||||||
"dev": true,
|
"dev": true,
|
||||||
"license": "BlueOak-1.0.0",
|
"license": "BlueOak-1.0.0",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
@@ -6559,9 +6694,9 @@
|
|||||||
"license": "MIT"
|
"license": "MIT"
|
||||||
},
|
},
|
||||||
"node_modules/valibot": {
|
"node_modules/valibot": {
|
||||||
"version": "0.42.1",
|
"version": "1.3.1",
|
||||||
"resolved": "https://registry.npmjs.org/valibot/-/valibot-0.42.1.tgz",
|
"resolved": "https://registry.npmjs.org/valibot/-/valibot-1.3.1.tgz",
|
||||||
"integrity": "sha512-3keXV29Ar5b//Hqi4MbSdV7lfVp6zuYLZuA9V1PvQUsXqogr+u5lvLPLk3A4f74VUXDnf/JfWMN6sB+koJ/FFw==",
|
"integrity": "sha512-sfdRir/QFM0JaF22hqTroPc5xy4DimuGQVKFrzF1YfGwaS1nJot3Y8VqMdLO2Lg27fMzat2yD3pY5PbAYO39Gg==",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"peerDependencies": {
|
"peerDependencies": {
|
||||||
"typescript": ">=5"
|
"typescript": ">=5"
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
{
|
{
|
||||||
"name": "beszel",
|
"name": "beszel",
|
||||||
"private": true,
|
"private": true,
|
||||||
"version": "0.18.4",
|
"version": "0.18.7",
|
||||||
"type": "module",
|
"type": "module",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"dev": "vite --host",
|
"dev": "vite --host",
|
||||||
@@ -52,7 +52,7 @@
|
|||||||
"recharts": "^2.15.4",
|
"recharts": "^2.15.4",
|
||||||
"shiki": "^3.13.0",
|
"shiki": "^3.13.0",
|
||||||
"tailwind-merge": "^3.3.1",
|
"tailwind-merge": "^3.3.1",
|
||||||
"valibot": "^0.42.1"
|
"valibot": "^1.3.1"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@biomejs/biome": "2.2.4",
|
"@biomejs/biome": "2.2.4",
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
import { msg, t } from "@lingui/core/macro"
|
import { t } from "@lingui/core/macro"
|
||||||
import { Trans } from "@lingui/react/macro"
|
import { Trans } from "@lingui/react/macro"
|
||||||
import { useStore } from "@nanostores/react"
|
import { useStore } from "@nanostores/react"
|
||||||
import { getPagePath } from "@nanostores/router"
|
import { getPagePath } from "@nanostores/router"
|
||||||
import { ChevronDownIcon, ExternalLinkIcon, PlusIcon } from "lucide-react"
|
import { ChevronDownIcon, ExternalLinkIcon } from "lucide-react"
|
||||||
import { memo, useEffect, useRef, useState } from "react"
|
import { memo, useEffect, useRef, useState } from "react"
|
||||||
import { Button } from "@/components/ui/button"
|
import { Button } from "@/components/ui/button"
|
||||||
import {
|
import {
|
||||||
@@ -12,7 +12,6 @@ import {
|
|||||||
DialogFooter,
|
DialogFooter,
|
||||||
DialogHeader,
|
DialogHeader,
|
||||||
DialogTitle,
|
DialogTitle,
|
||||||
DialogTrigger,
|
|
||||||
} from "@/components/ui/dialog"
|
} from "@/components/ui/dialog"
|
||||||
import { Input } from "@/components/ui/input"
|
import { Input } from "@/components/ui/input"
|
||||||
import { Label } from "@/components/ui/label"
|
import { Label } from "@/components/ui/label"
|
||||||
@@ -35,28 +34,19 @@ import { DropdownMenu, DropdownMenuTrigger } from "./ui/dropdown-menu"
|
|||||||
import { AppleIcon, DockerIcon, FreeBsdIcon, TuxIcon, WindowsIcon } from "./ui/icons"
|
import { AppleIcon, DockerIcon, FreeBsdIcon, TuxIcon, WindowsIcon } from "./ui/icons"
|
||||||
import { InputCopy } from "./ui/input-copy"
|
import { InputCopy } from "./ui/input-copy"
|
||||||
|
|
||||||
export function AddSystemButton({ className }: { className?: string }) {
|
// To avoid a refactor of the dialog, we will just keep this function as a "skeleton" for the actual dialog
|
||||||
if (isReadOnlyUser()) {
|
export function AddSystemDialog({ open, setOpen }: { open: boolean; setOpen: (open: boolean) => void }) {
|
||||||
return null
|
|
||||||
}
|
|
||||||
const [open, setOpen] = useState(false)
|
|
||||||
const opened = useRef(false)
|
const opened = useRef(false)
|
||||||
if (open) {
|
if (open) {
|
||||||
opened.current = true
|
opened.current = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (isReadOnlyUser()) {
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<Dialog open={open} onOpenChange={setOpen}>
|
<Dialog open={open} onOpenChange={setOpen}>
|
||||||
<DialogTrigger asChild>
|
|
||||||
<Button variant="outline" className={cn("flex gap-1 max-xs:h-[2.4rem]", className)}>
|
|
||||||
<PlusIcon className="h-4 w-4 450:-ms-1" />
|
|
||||||
<span className="hidden 450:inline">
|
|
||||||
<Trans>
|
|
||||||
Add <span className="hidden sm:inline">System</span>
|
|
||||||
</Trans>
|
|
||||||
</span>
|
|
||||||
</Button>
|
|
||||||
</DialogTrigger>
|
|
||||||
{opened.current && <SystemDialog setOpen={setOpen} />}
|
{opened.current && <SystemDialog setOpen={setOpen} />}
|
||||||
</Dialog>
|
</Dialog>
|
||||||
)
|
)
|
||||||
@@ -276,7 +266,13 @@ export const SystemDialog = ({ setOpen, system }: { setOpen: (open: boolean) =>
|
|||||||
/>
|
/>
|
||||||
</TabsContent>
|
</TabsContent>
|
||||||
{/* Save */}
|
{/* Save */}
|
||||||
<Button>{system ? <Trans>Save system</Trans> : <Trans>Add system</Trans>}</Button>
|
<Button>
|
||||||
|
{system ? (
|
||||||
|
<Trans>Save {{ foo: systemTranslation }}</Trans>
|
||||||
|
) : (
|
||||||
|
<Trans>Add {{ foo: systemTranslation }}</Trans>
|
||||||
|
)}
|
||||||
|
</Button>
|
||||||
</DialogFooter>
|
</DialogFooter>
|
||||||
</form>
|
</form>
|
||||||
</Tabs>
|
</Tabs>
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ export default memo(function AlertsButton({ system }: { system: SystemRecord })
|
|||||||
<SheetTrigger asChild>
|
<SheetTrigger asChild>
|
||||||
<Button variant="ghost" size="icon" aria-label={t`Alerts`} data-nolink onClick={() => setOpened(true)}>
|
<Button variant="ghost" size="icon" aria-label={t`Alerts`} data-nolink onClick={() => setOpened(true)}>
|
||||||
<BellIcon
|
<BellIcon
|
||||||
className={cn("h-[1.2em] w-[1.2em] pointer-events-none", {
|
className={cn("size-[1.2em] pointer-events-none", {
|
||||||
"fill-primary": hasSystemAlert,
|
"fill-primary": hasSystemAlert,
|
||||||
})}
|
})}
|
||||||
/>
|
/>
|
||||||
|
|||||||
@@ -2,11 +2,13 @@ import { t } from "@lingui/core/macro"
|
|||||||
import { Plural, Trans } from "@lingui/react/macro"
|
import { Plural, Trans } from "@lingui/react/macro"
|
||||||
import { useStore } from "@nanostores/react"
|
import { useStore } from "@nanostores/react"
|
||||||
import { getPagePath } from "@nanostores/router"
|
import { getPagePath } from "@nanostores/router"
|
||||||
import { GlobeIcon, ServerIcon } from "lucide-react"
|
import { ChevronDownIcon, GlobeIcon, ServerIcon } from "lucide-react"
|
||||||
import { lazy, memo, Suspense, useMemo, useState } from "react"
|
import { lazy, memo, Suspense, useMemo, useState } from "react"
|
||||||
import { $router, Link } from "@/components/router"
|
import { $router, Link } from "@/components/router"
|
||||||
|
import { Button } from "@/components/ui/button"
|
||||||
import { Checkbox } from "@/components/ui/checkbox"
|
import { Checkbox } from "@/components/ui/checkbox"
|
||||||
import { DialogDescription, DialogHeader, DialogTitle } from "@/components/ui/dialog"
|
import { DialogDescription, DialogHeader, DialogTitle } from "@/components/ui/dialog"
|
||||||
|
import { DropdownMenu, DropdownMenuContent, DropdownMenuItem, DropdownMenuTrigger } from "@/components/ui/dropdown-menu"
|
||||||
import { Input } from "@/components/ui/input"
|
import { Input } from "@/components/ui/input"
|
||||||
import { Switch } from "@/components/ui/switch"
|
import { Switch } from "@/components/ui/switch"
|
||||||
import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs"
|
import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs"
|
||||||
@@ -64,11 +66,57 @@ const deleteAlerts = debounce(async ({ name, systems }: { name: string; systems:
|
|||||||
|
|
||||||
export const AlertDialogContent = memo(function AlertDialogContent({ system }: { system: SystemRecord }) {
|
export const AlertDialogContent = memo(function AlertDialogContent({ system }: { system: SystemRecord }) {
|
||||||
const alerts = useStore($alerts)
|
const alerts = useStore($alerts)
|
||||||
|
const systems = useStore($systems)
|
||||||
const [overwriteExisting, setOverwriteExisting] = useState<boolean | "indeterminate">(false)
|
const [overwriteExisting, setOverwriteExisting] = useState<boolean | "indeterminate">(false)
|
||||||
const [currentTab, setCurrentTab] = useState("system")
|
const [currentTab, setCurrentTab] = useState("system")
|
||||||
|
// copyKey is used to force remount AlertContent components with
|
||||||
|
// new alert data after copying alerts from another system
|
||||||
|
const [copyKey, setCopyKey] = useState(0)
|
||||||
|
|
||||||
const systemAlerts = alerts[system.id] ?? new Map()
|
const systemAlerts = alerts[system.id] ?? new Map()
|
||||||
|
|
||||||
|
// Systems that have at least one alert configured (excluding the current system)
|
||||||
|
const systemsWithAlerts = useMemo(
|
||||||
|
() => systems.filter((s) => s.id !== system.id && alerts[s.id]?.size),
|
||||||
|
[systems, alerts, system.id]
|
||||||
|
)
|
||||||
|
|
||||||
|
async function copyAlertsFromSystem(sourceSystemId: string) {
|
||||||
|
const sourceAlerts = $alerts.get()[sourceSystemId]
|
||||||
|
if (!sourceAlerts?.size) return
|
||||||
|
try {
|
||||||
|
const currentTargetAlerts = $alerts.get()[system.id] ?? new Map()
|
||||||
|
// Alert names present on target but absent from source should be deleted
|
||||||
|
const namesToDelete = Array.from(currentTargetAlerts.keys()).filter((name) => !sourceAlerts.has(name))
|
||||||
|
await Promise.all([
|
||||||
|
...Array.from(sourceAlerts.values()).map(({ name, value, min }) =>
|
||||||
|
pb.send<{ success: boolean }>(endpoint, {
|
||||||
|
method: "POST",
|
||||||
|
body: { name, value, min, systems: [system.id], overwrite: true },
|
||||||
|
requestKey: name,
|
||||||
|
})
|
||||||
|
),
|
||||||
|
...namesToDelete.map((name) =>
|
||||||
|
pb.send<{ success: boolean }>(endpoint, {
|
||||||
|
method: "DELETE",
|
||||||
|
body: { name, systems: [system.id] },
|
||||||
|
requestKey: name,
|
||||||
|
})
|
||||||
|
),
|
||||||
|
])
|
||||||
|
// Optimistically update the store so components re-mount with correct data
|
||||||
|
// before the realtime subscription event arrives.
|
||||||
|
const newSystemAlerts = new Map<string, AlertRecord>()
|
||||||
|
for (const alert of sourceAlerts.values()) {
|
||||||
|
newSystemAlerts.set(alert.name, { ...alert, system: system.id, triggered: false })
|
||||||
|
}
|
||||||
|
$alerts.setKey(system.id, newSystemAlerts)
|
||||||
|
setCopyKey((k) => k + 1)
|
||||||
|
} catch (error) {
|
||||||
|
failedUpdateToast(error)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// We need to keep a copy of alerts when we switch to global tab. If we always compare to
|
// We need to keep a copy of alerts when we switch to global tab. If we always compare to
|
||||||
// current alerts, it will only be updated when first checked, then won't be updated because
|
// current alerts, it will only be updated when first checked, then won't be updated because
|
||||||
// after that it exists.
|
// after that it exists.
|
||||||
@@ -93,7 +141,8 @@ export const AlertDialogContent = memo(function AlertDialogContent({ system }: {
|
|||||||
</DialogDescription>
|
</DialogDescription>
|
||||||
</DialogHeader>
|
</DialogHeader>
|
||||||
<Tabs defaultValue="system" onValueChange={setCurrentTab}>
|
<Tabs defaultValue="system" onValueChange={setCurrentTab}>
|
||||||
<TabsList className="mb-1 -mt-0.5">
|
<div className="flex items-center justify-between mb-1 -mt-0.5">
|
||||||
|
<TabsList>
|
||||||
<TabsTrigger value="system">
|
<TabsTrigger value="system">
|
||||||
<ServerIcon className="me-2 h-3.5 w-3.5" />
|
<ServerIcon className="me-2 h-3.5 w-3.5" />
|
||||||
<span className="truncate max-w-60">{system.name}</span>
|
<span className="truncate max-w-60">{system.name}</span>
|
||||||
@@ -103,8 +152,26 @@ export const AlertDialogContent = memo(function AlertDialogContent({ system }: {
|
|||||||
<Trans>All Systems</Trans>
|
<Trans>All Systems</Trans>
|
||||||
</TabsTrigger>
|
</TabsTrigger>
|
||||||
</TabsList>
|
</TabsList>
|
||||||
|
{systemsWithAlerts.length > 0 && currentTab === "system" && (
|
||||||
|
<DropdownMenu>
|
||||||
|
<DropdownMenuTrigger asChild>
|
||||||
|
<Button variant="ghost" size="sm" className="text-muted-foreground text-xs gap-1.5">
|
||||||
|
<Trans context="Copy alerts from another system">Copy from</Trans>
|
||||||
|
<ChevronDownIcon className="h-3.5 w-3.5" />
|
||||||
|
</Button>
|
||||||
|
</DropdownMenuTrigger>
|
||||||
|
<DropdownMenuContent align="end" className="max-h-100 overflow-auto">
|
||||||
|
{systemsWithAlerts.map((s) => (
|
||||||
|
<DropdownMenuItem key={s.id} className="min-w-44" onSelect={() => copyAlertsFromSystem(s.id)}>
|
||||||
|
{s.name}
|
||||||
|
</DropdownMenuItem>
|
||||||
|
))}
|
||||||
|
</DropdownMenuContent>
|
||||||
|
</DropdownMenu>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
<TabsContent value="system">
|
<TabsContent value="system">
|
||||||
<div className="grid gap-3">
|
<div key={copyKey} className="grid gap-3">
|
||||||
{alertKeys.map((name) => (
|
{alertKeys.map((name) => (
|
||||||
<AlertContent
|
<AlertContent
|
||||||
key={name}
|
key={name}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
import { useMemo } from "react"
|
import { type ReactNode, useEffect, useMemo, useState } from "react"
|
||||||
import { Area, AreaChart, CartesianGrid, YAxis } from "recharts"
|
import { Area, AreaChart, CartesianGrid, YAxis } from "recharts"
|
||||||
import {
|
import {
|
||||||
ChartContainer,
|
ChartContainer,
|
||||||
@@ -11,18 +11,23 @@ import {
|
|||||||
import { chartMargin, cn, formatShortDate } from "@/lib/utils"
|
import { chartMargin, cn, formatShortDate } from "@/lib/utils"
|
||||||
import type { ChartData, SystemStatsRecord } from "@/types"
|
import type { ChartData, SystemStatsRecord } from "@/types"
|
||||||
import { useYAxisWidth } from "./hooks"
|
import { useYAxisWidth } from "./hooks"
|
||||||
import { AxisDomain } from "recharts/types/util/types"
|
import type { AxisDomain } from "recharts/types/util/types"
|
||||||
|
import { useIntersectionObserver } from "@/lib/use-intersection-observer"
|
||||||
|
|
||||||
export type DataPoint = {
|
export type DataPoint<T = SystemStatsRecord> = {
|
||||||
label: string
|
label: string
|
||||||
dataKey: (data: SystemStatsRecord) => number | undefined
|
dataKey: (data: T) => number | null | undefined
|
||||||
color: number | string
|
color: number | string
|
||||||
opacity: number
|
opacity: number
|
||||||
stackId?: string | number
|
stackId?: string | number
|
||||||
|
order?: number
|
||||||
|
strokeOpacity?: number
|
||||||
|
activeDot?: boolean
|
||||||
}
|
}
|
||||||
|
|
||||||
export default function AreaChartDefault({
|
export default function AreaChartDefault({
|
||||||
chartData,
|
chartData,
|
||||||
|
customData,
|
||||||
max,
|
max,
|
||||||
maxToggled,
|
maxToggled,
|
||||||
tickFormatter,
|
tickFormatter,
|
||||||
@@ -34,35 +39,88 @@ export default function AreaChartDefault({
|
|||||||
showTotal = false,
|
showTotal = false,
|
||||||
reverseStackOrder = false,
|
reverseStackOrder = false,
|
||||||
hideYAxis = false,
|
hideYAxis = false,
|
||||||
}: // logRender = false,
|
filter,
|
||||||
{
|
truncate = false,
|
||||||
|
chartProps,
|
||||||
|
}: {
|
||||||
chartData: ChartData
|
chartData: ChartData
|
||||||
|
// biome-ignore lint/suspicious/noExplicitAny: accepts different data source types (systemStats or containerData)
|
||||||
|
customData?: any[]
|
||||||
max?: number
|
max?: number
|
||||||
maxToggled?: boolean
|
maxToggled?: boolean
|
||||||
tickFormatter: (value: number, index: number) => string
|
tickFormatter: (value: number, index: number) => string
|
||||||
contentFormatter: ({ value, payload }: { value: number; payload: SystemStatsRecord }) => string
|
// biome-ignore lint/suspicious/noExplicitAny: recharts tooltip item interop
|
||||||
dataPoints?: DataPoint[]
|
contentFormatter: (item: any, key: string) => ReactNode
|
||||||
|
// biome-ignore lint/suspicious/noExplicitAny: accepts DataPoint with different generic types
|
||||||
|
dataPoints?: DataPoint<any>[]
|
||||||
domain?: AxisDomain
|
domain?: AxisDomain
|
||||||
legend?: boolean
|
legend?: boolean
|
||||||
showTotal?: boolean
|
showTotal?: boolean
|
||||||
|
// biome-ignore lint/suspicious/noExplicitAny: recharts tooltip item interop
|
||||||
itemSorter?: (a: any, b: any) => number
|
itemSorter?: (a: any, b: any) => number
|
||||||
reverseStackOrder?: boolean
|
reverseStackOrder?: boolean
|
||||||
hideYAxis?: boolean
|
hideYAxis?: boolean
|
||||||
// logRender?: boolean
|
filter?: string
|
||||||
}) {
|
truncate?: boolean
|
||||||
|
chartProps?: Omit<React.ComponentProps<typeof AreaChart>, "data" | "margin">
|
||||||
|
}) {
|
||||||
const { yAxisWidth, updateYAxisWidth } = useYAxisWidth()
|
const { yAxisWidth, updateYAxisWidth } = useYAxisWidth()
|
||||||
|
const { isIntersecting, ref } = useIntersectionObserver({ freeze: false })
|
||||||
|
const sourceData = customData ?? chartData.systemStats
|
||||||
|
const [displayData, setDisplayData] = useState(sourceData)
|
||||||
|
const [displayMaxToggled, setDisplayMaxToggled] = useState(maxToggled)
|
||||||
|
|
||||||
|
// Reduce chart redraws by only updating while visible or when chart time changes
|
||||||
|
useEffect(() => {
|
||||||
|
const shouldPrimeData = sourceData.length && !displayData.length
|
||||||
|
const sourceChanged = sourceData !== displayData
|
||||||
|
const shouldUpdate = shouldPrimeData || (sourceChanged && isIntersecting)
|
||||||
|
if (shouldUpdate) {
|
||||||
|
setDisplayData(sourceData)
|
||||||
|
}
|
||||||
|
if (isIntersecting && maxToggled !== displayMaxToggled) {
|
||||||
|
setDisplayMaxToggled(maxToggled)
|
||||||
|
}
|
||||||
|
}, [displayData, displayMaxToggled, isIntersecting, maxToggled, sourceData])
|
||||||
|
|
||||||
|
// Use a stable key derived from data point identities and visual properties
|
||||||
|
const areasKey = dataPoints?.map((d) => `${d.label}:${d.opacity}`).join("\0")
|
||||||
|
|
||||||
|
const Areas = useMemo(() => {
|
||||||
|
return dataPoints?.map((dataPoint, i) => {
|
||||||
|
let { color } = dataPoint
|
||||||
|
if (typeof color === "number") {
|
||||||
|
color = `var(--chart-${color})`
|
||||||
|
}
|
||||||
|
return (
|
||||||
|
<Area
|
||||||
|
key={dataPoint.label}
|
||||||
|
dataKey={dataPoint.dataKey}
|
||||||
|
name={dataPoint.label}
|
||||||
|
type="monotoneX"
|
||||||
|
fill={color}
|
||||||
|
fillOpacity={dataPoint.opacity}
|
||||||
|
stroke={color}
|
||||||
|
strokeOpacity={dataPoint.strokeOpacity}
|
||||||
|
isAnimationActive={false}
|
||||||
|
stackId={dataPoint.stackId}
|
||||||
|
order={dataPoint.order || i}
|
||||||
|
activeDot={dataPoint.activeDot ?? true}
|
||||||
|
/>
|
||||||
|
)
|
||||||
|
})
|
||||||
|
}, [areasKey, displayMaxToggled])
|
||||||
|
|
||||||
// biome-ignore lint/correctness/useExhaustiveDependencies: ignore
|
|
||||||
return useMemo(() => {
|
return useMemo(() => {
|
||||||
if (chartData.systemStats.length === 0) {
|
if (displayData.length === 0) {
|
||||||
return null
|
return null
|
||||||
}
|
}
|
||||||
// if (logRender) {
|
// if (logRender) {
|
||||||
// console.log("Rendered at", new Date())
|
// console.log("Rendered", dataPoints?.map((d) => d.label).join(", "), new Date())
|
||||||
// }
|
// }
|
||||||
return (
|
return (
|
||||||
<div>
|
|
||||||
<ChartContainer
|
<ChartContainer
|
||||||
|
ref={ref}
|
||||||
className={cn("h-full w-full absolute aspect-auto bg-card opacity-0 transition-opacity", {
|
className={cn("h-full w-full absolute aspect-auto bg-card opacity-0 transition-opacity", {
|
||||||
"opacity-100": yAxisWidth || hideYAxis,
|
"opacity-100": yAxisWidth || hideYAxis,
|
||||||
"ps-4": hideYAxis,
|
"ps-4": hideYAxis,
|
||||||
@@ -71,8 +129,9 @@ export default function AreaChartDefault({
|
|||||||
<AreaChart
|
<AreaChart
|
||||||
reverseStackOrder={reverseStackOrder}
|
reverseStackOrder={reverseStackOrder}
|
||||||
accessibilityLayer
|
accessibilityLayer
|
||||||
data={chartData.systemStats}
|
data={displayData}
|
||||||
margin={hideYAxis ? { ...chartMargin, left: 5 } : chartMargin}
|
margin={hideYAxis ? { ...chartMargin, left: 5 } : chartMargin}
|
||||||
|
{...chartProps}
|
||||||
>
|
>
|
||||||
<CartesianGrid vertical={false} />
|
<CartesianGrid vertical={false} />
|
||||||
{!hideYAxis && (
|
{!hideYAxis && (
|
||||||
@@ -98,32 +157,15 @@ export default function AreaChartDefault({
|
|||||||
labelFormatter={(_, data) => formatShortDate(data[0].payload.created)}
|
labelFormatter={(_, data) => formatShortDate(data[0].payload.created)}
|
||||||
contentFormatter={contentFormatter}
|
contentFormatter={contentFormatter}
|
||||||
showTotal={showTotal}
|
showTotal={showTotal}
|
||||||
|
filter={filter}
|
||||||
|
truncate={truncate}
|
||||||
/>
|
/>
|
||||||
}
|
}
|
||||||
/>
|
/>
|
||||||
{dataPoints?.map((dataPoint) => {
|
{Areas}
|
||||||
let { color } = dataPoint
|
{legend && <ChartLegend content={<ChartLegendContent />} />}
|
||||||
if (typeof color === "number") {
|
|
||||||
color = `var(--chart-${color})`
|
|
||||||
}
|
|
||||||
return (
|
|
||||||
<Area
|
|
||||||
key={dataPoint.label}
|
|
||||||
dataKey={dataPoint.dataKey}
|
|
||||||
name={dataPoint.label}
|
|
||||||
type="monotoneX"
|
|
||||||
fill={color}
|
|
||||||
fillOpacity={dataPoint.opacity}
|
|
||||||
stroke={color}
|
|
||||||
isAnimationActive={false}
|
|
||||||
stackId={dataPoint.stackId}
|
|
||||||
/>
|
|
||||||
)
|
|
||||||
})}
|
|
||||||
{legend && <ChartLegend content={<ChartLegendContent reverse={reverseStackOrder} />} />}
|
|
||||||
</AreaChart>
|
</AreaChart>
|
||||||
</ChartContainer>
|
</ChartContainer>
|
||||||
</div>
|
|
||||||
)
|
)
|
||||||
}, [chartData.systemStats.at(-1), yAxisWidth, maxToggled, showTotal])
|
}, [displayData, yAxisWidth, filter, Areas])
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,215 +0,0 @@
|
|||||||
// import Spinner from '../spinner'
|
|
||||||
import { useStore } from "@nanostores/react"
|
|
||||||
import { memo, useMemo } from "react"
|
|
||||||
import { Area, AreaChart, CartesianGrid, YAxis } from "recharts"
|
|
||||||
import {
|
|
||||||
type ChartConfig,
|
|
||||||
ChartContainer,
|
|
||||||
ChartTooltip,
|
|
||||||
ChartTooltipContent,
|
|
||||||
pinnedAxisDomain,
|
|
||||||
xAxis,
|
|
||||||
} from "@/components/ui/chart"
|
|
||||||
import { ChartType, Unit } from "@/lib/enums"
|
|
||||||
import { $containerFilter, $userSettings } from "@/lib/stores"
|
|
||||||
import { chartMargin, cn, decimalString, formatBytes, formatShortDate, toFixedFloat } from "@/lib/utils"
|
|
||||||
import type { ChartData } from "@/types"
|
|
||||||
import { Separator } from "../ui/separator"
|
|
||||||
import { useYAxisWidth } from "./hooks"
|
|
||||||
|
|
||||||
export default memo(function ContainerChart({
|
|
||||||
dataKey,
|
|
||||||
chartData,
|
|
||||||
chartType,
|
|
||||||
chartConfig,
|
|
||||||
unit = "%",
|
|
||||||
}: {
|
|
||||||
dataKey: string
|
|
||||||
chartData: ChartData
|
|
||||||
chartType: ChartType
|
|
||||||
chartConfig: ChartConfig
|
|
||||||
unit?: string
|
|
||||||
}) {
|
|
||||||
const filter = useStore($containerFilter)
|
|
||||||
const userSettings = useStore($userSettings)
|
|
||||||
const { yAxisWidth, updateYAxisWidth } = useYAxisWidth()
|
|
||||||
|
|
||||||
const { containerData } = chartData
|
|
||||||
|
|
||||||
const isNetChart = chartType === ChartType.Network
|
|
||||||
|
|
||||||
// Filter with set lookup
|
|
||||||
const filteredKeys = useMemo(() => {
|
|
||||||
if (!filter) {
|
|
||||||
return new Set<string>()
|
|
||||||
}
|
|
||||||
const filterTerms = filter
|
|
||||||
.toLowerCase()
|
|
||||||
.split(" ")
|
|
||||||
.filter((term) => term.length > 0)
|
|
||||||
return new Set(
|
|
||||||
Object.keys(chartConfig).filter((key) => {
|
|
||||||
const keyLower = key.toLowerCase()
|
|
||||||
return !filterTerms.some((term) => keyLower.includes(term))
|
|
||||||
})
|
|
||||||
)
|
|
||||||
}, [chartConfig, filter])
|
|
||||||
|
|
||||||
// biome-ignore lint/correctness/useExhaustiveDependencies: not necessary
|
|
||||||
const { toolTipFormatter, dataFunction, tickFormatter } = useMemo(() => {
|
|
||||||
const obj = {} as {
|
|
||||||
toolTipFormatter: (item: any, key: string) => React.ReactNode | string
|
|
||||||
dataFunction: (key: string, data: any) => number | null
|
|
||||||
tickFormatter: (value: any) => string
|
|
||||||
}
|
|
||||||
// tick formatter
|
|
||||||
if (chartType === ChartType.CPU) {
|
|
||||||
obj.tickFormatter = (value) => {
|
|
||||||
const val = `${toFixedFloat(value, 2)}%`
|
|
||||||
return updateYAxisWidth(val)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
const chartUnit = isNetChart ? userSettings.unitNet : Unit.Bytes
|
|
||||||
obj.tickFormatter = (val) => {
|
|
||||||
const { value, unit } = formatBytes(val, isNetChart, chartUnit, !isNetChart)
|
|
||||||
return updateYAxisWidth(`${toFixedFloat(value, value >= 10 ? 0 : 1)} ${unit}`)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// tooltip formatter
|
|
||||||
if (isNetChart) {
|
|
||||||
const getRxTxBytes = (record?: { b?: [number, number]; ns?: number; nr?: number }) => {
|
|
||||||
if (record?.b?.length && record.b.length >= 2) {
|
|
||||||
return [Number(record.b[0]) || 0, Number(record.b[1]) || 0]
|
|
||||||
}
|
|
||||||
return [(record?.ns ?? 0) * 1024 * 1024, (record?.nr ?? 0) * 1024 * 1024]
|
|
||||||
}
|
|
||||||
const formatRxTx = (recv: number, sent: number) => {
|
|
||||||
const { value: receivedValue, unit: receivedUnit } = formatBytes(recv, true, userSettings.unitNet, false)
|
|
||||||
const { value: sentValue, unit: sentUnit } = formatBytes(sent, true, userSettings.unitNet, false)
|
|
||||||
return (
|
|
||||||
<span className="flex">
|
|
||||||
{decimalString(receivedValue)} {receivedUnit}
|
|
||||||
<span className="opacity-70 ms-0.5"> rx </span>
|
|
||||||
<Separator orientation="vertical" className="h-3 mx-1.5 bg-primary/40" />
|
|
||||||
{decimalString(sentValue)} {sentUnit}
|
|
||||||
<span className="opacity-70 ms-0.5"> tx</span>
|
|
||||||
</span>
|
|
||||||
)
|
|
||||||
}
|
|
||||||
obj.toolTipFormatter = (item: any, key: string) => {
|
|
||||||
try {
|
|
||||||
if (key === "__total__") {
|
|
||||||
let totalSent = 0
|
|
||||||
let totalRecv = 0
|
|
||||||
const payloadData = item?.payload && typeof item.payload === "object" ? item.payload : {}
|
|
||||||
for (const [containerKey, value] of Object.entries(payloadData)) {
|
|
||||||
if (!value || typeof value !== "object") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Skip filtered out containers
|
|
||||||
if (filteredKeys.has(containerKey)) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
const [sent, recv] = getRxTxBytes(value as { b?: [number, number]; ns?: number; nr?: number })
|
|
||||||
totalSent += sent
|
|
||||||
totalRecv += recv
|
|
||||||
}
|
|
||||||
return formatRxTx(totalRecv, totalSent)
|
|
||||||
}
|
|
||||||
const [sent, recv] = getRxTxBytes(item?.payload?.[key])
|
|
||||||
return formatRxTx(recv, sent)
|
|
||||||
} catch (e) {
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if (chartType === ChartType.Memory) {
|
|
||||||
obj.toolTipFormatter = (item: any) => {
|
|
||||||
const { value, unit } = formatBytes(item.value, false, Unit.Bytes, true)
|
|
||||||
return `${decimalString(value)} ${unit}`
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
obj.toolTipFormatter = (item: any) => `${decimalString(item.value)}${unit}`
|
|
||||||
}
|
|
||||||
// data function
|
|
||||||
if (isNetChart) {
|
|
||||||
obj.dataFunction = (key: string, data: any) => {
|
|
||||||
const payload = data[key]
|
|
||||||
if (!payload) {
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
const sent = payload?.b?.[0] ?? (payload?.ns ?? 0) * 1024 * 1024
|
|
||||||
const recv = payload?.b?.[1] ?? (payload?.nr ?? 0) * 1024 * 1024
|
|
||||||
return sent + recv
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
obj.dataFunction = (key: string, data: any) => data[key]?.[dataKey] ?? null
|
|
||||||
}
|
|
||||||
return obj
|
|
||||||
}, [filteredKeys])
|
|
||||||
|
|
||||||
// console.log('rendered at', new Date())
|
|
||||||
|
|
||||||
if (containerData.length === 0) {
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
|
||||||
<div>
|
|
||||||
<ChartContainer
|
|
||||||
className={cn("h-full w-full absolute aspect-auto bg-card opacity-0 transition-opacity", {
|
|
||||||
"opacity-100": yAxisWidth,
|
|
||||||
})}
|
|
||||||
>
|
|
||||||
<AreaChart
|
|
||||||
accessibilityLayer
|
|
||||||
// syncId={'cpu'}
|
|
||||||
data={containerData}
|
|
||||||
margin={chartMargin}
|
|
||||||
reverseStackOrder={true}
|
|
||||||
>
|
|
||||||
<CartesianGrid vertical={false} />
|
|
||||||
<YAxis
|
|
||||||
direction="ltr"
|
|
||||||
domain={pinnedAxisDomain()}
|
|
||||||
orientation={chartData.orientation}
|
|
||||||
className="tracking-tighter"
|
|
||||||
width={yAxisWidth}
|
|
||||||
tickFormatter={tickFormatter}
|
|
||||||
tickLine={false}
|
|
||||||
axisLine={false}
|
|
||||||
/>
|
|
||||||
{xAxis(chartData)}
|
|
||||||
<ChartTooltip
|
|
||||||
animationEasing="ease-out"
|
|
||||||
animationDuration={150}
|
|
||||||
truncate={true}
|
|
||||||
labelFormatter={(_, data) => formatShortDate(data[0].payload.created)}
|
|
||||||
// @ts-expect-error
|
|
||||||
itemSorter={(a, b) => b.value - a.value}
|
|
||||||
content={<ChartTooltipContent filter={filter} contentFormatter={toolTipFormatter} showTotal={true} />}
|
|
||||||
/>
|
|
||||||
{Object.keys(chartConfig).map((key) => {
|
|
||||||
const filtered = filteredKeys.has(key)
|
|
||||||
const fillOpacity = filtered ? 0.05 : 0.4
|
|
||||||
const strokeOpacity = filtered ? 0.1 : 1
|
|
||||||
return (
|
|
||||||
<Area
|
|
||||||
key={key}
|
|
||||||
isAnimationActive={false}
|
|
||||||
dataKey={dataFunction.bind(null, key)}
|
|
||||||
name={key}
|
|
||||||
type="monotoneX"
|
|
||||||
fill={chartConfig[key].color}
|
|
||||||
fillOpacity={fillOpacity}
|
|
||||||
stroke={chartConfig[key].color}
|
|
||||||
strokeOpacity={strokeOpacity}
|
|
||||||
activeDot={{ opacity: filtered ? 0 : 1 }}
|
|
||||||
stackId="a"
|
|
||||||
/>
|
|
||||||
)
|
|
||||||
})}
|
|
||||||
</AreaChart>
|
|
||||||
</ChartContainer>
|
|
||||||
</div>
|
|
||||||
)
|
|
||||||
})
|
|
||||||
@@ -1,83 +0,0 @@
|
|||||||
import { useLingui } from "@lingui/react/macro"
|
|
||||||
import { memo } from "react"
|
|
||||||
import { Area, AreaChart, CartesianGrid, YAxis } from "recharts"
|
|
||||||
import { ChartContainer, ChartTooltip, ChartTooltipContent, xAxis } from "@/components/ui/chart"
|
|
||||||
import { Unit } from "@/lib/enums"
|
|
||||||
import { chartMargin, cn, decimalString, formatBytes, formatShortDate, toFixedFloat } from "@/lib/utils"
|
|
||||||
import type { ChartData, SystemStatsRecord } from "@/types"
|
|
||||||
import { useYAxisWidth } from "./hooks"
|
|
||||||
|
|
||||||
export default memo(function DiskChart({
|
|
||||||
dataKey,
|
|
||||||
diskSize,
|
|
||||||
chartData,
|
|
||||||
}: {
|
|
||||||
dataKey: string | ((data: SystemStatsRecord) => number | undefined)
|
|
||||||
diskSize: number
|
|
||||||
chartData: ChartData
|
|
||||||
}) {
|
|
||||||
const { yAxisWidth, updateYAxisWidth } = useYAxisWidth()
|
|
||||||
const { t } = useLingui()
|
|
||||||
|
|
||||||
// round to nearest GB
|
|
||||||
if (diskSize >= 100) {
|
|
||||||
diskSize = Math.round(diskSize)
|
|
||||||
}
|
|
||||||
|
|
||||||
if (chartData.systemStats.length === 0) {
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
|
||||||
<div>
|
|
||||||
<ChartContainer
|
|
||||||
className={cn("h-full w-full absolute aspect-auto bg-card opacity-0 transition-opacity", {
|
|
||||||
"opacity-100": yAxisWidth,
|
|
||||||
})}
|
|
||||||
>
|
|
||||||
<AreaChart accessibilityLayer data={chartData.systemStats} margin={chartMargin}>
|
|
||||||
<CartesianGrid vertical={false} />
|
|
||||||
<YAxis
|
|
||||||
direction="ltr"
|
|
||||||
orientation={chartData.orientation}
|
|
||||||
className="tracking-tighter"
|
|
||||||
width={yAxisWidth}
|
|
||||||
domain={[0, diskSize]}
|
|
||||||
tickCount={9}
|
|
||||||
minTickGap={6}
|
|
||||||
tickLine={false}
|
|
||||||
axisLine={false}
|
|
||||||
tickFormatter={(val) => {
|
|
||||||
const { value, unit } = formatBytes(val * 1024, false, Unit.Bytes, true)
|
|
||||||
return updateYAxisWidth(toFixedFloat(value, value >= 10 ? 0 : 1) + " " + unit)
|
|
||||||
}}
|
|
||||||
/>
|
|
||||||
{xAxis(chartData)}
|
|
||||||
<ChartTooltip
|
|
||||||
animationEasing="ease-out"
|
|
||||||
animationDuration={150}
|
|
||||||
content={
|
|
||||||
<ChartTooltipContent
|
|
||||||
labelFormatter={(_, data) => formatShortDate(data[0].payload.created)}
|
|
||||||
contentFormatter={({ value }) => {
|
|
||||||
const { value: convertedValue, unit } = formatBytes(value * 1024, false, Unit.Bytes, true)
|
|
||||||
return decimalString(convertedValue) + " " + unit
|
|
||||||
}}
|
|
||||||
/>
|
|
||||||
}
|
|
||||||
/>
|
|
||||||
<Area
|
|
||||||
dataKey={dataKey}
|
|
||||||
name={t`Disk Usage`}
|
|
||||||
type="monotoneX"
|
|
||||||
fill="var(--chart-4)"
|
|
||||||
fillOpacity={0.4}
|
|
||||||
stroke="var(--chart-4)"
|
|
||||||
// animationDuration={1200}
|
|
||||||
isAnimationActive={false}
|
|
||||||
/>
|
|
||||||
</AreaChart>
|
|
||||||
</ChartContainer>
|
|
||||||
</div>
|
|
||||||
)
|
|
||||||
})
|
|
||||||
@@ -1,117 +0,0 @@
|
|||||||
import { memo, useMemo } from "react"
|
|
||||||
import { CartesianGrid, Line, LineChart, YAxis } from "recharts"
|
|
||||||
import {
|
|
||||||
ChartContainer,
|
|
||||||
ChartLegend,
|
|
||||||
ChartLegendContent,
|
|
||||||
ChartTooltip,
|
|
||||||
ChartTooltipContent,
|
|
||||||
xAxis,
|
|
||||||
} from "@/components/ui/chart"
|
|
||||||
import { chartMargin, cn, decimalString, formatShortDate, toFixedFloat } from "@/lib/utils"
|
|
||||||
import type { ChartData, GPUData } from "@/types"
|
|
||||||
import { useYAxisWidth } from "./hooks"
|
|
||||||
import type { DataPoint } from "./line-chart"
|
|
||||||
|
|
||||||
export default memo(function GpuPowerChart({ chartData }: { chartData: ChartData }) {
|
|
||||||
const { yAxisWidth, updateYAxisWidth } = useYAxisWidth()
|
|
||||||
const packageKey = " package"
|
|
||||||
|
|
||||||
const { gpuData, dataPoints } = useMemo(() => {
|
|
||||||
const dataPoints = [] as DataPoint[]
|
|
||||||
const gpuData = [] as Record<string, GPUData | string>[]
|
|
||||||
const addedKeys = new Map<string, number>()
|
|
||||||
|
|
||||||
const addKey = (key: string, value: number) => {
|
|
||||||
addedKeys.set(key, (addedKeys.get(key) ?? 0) + value)
|
|
||||||
}
|
|
||||||
|
|
||||||
for (const stats of chartData.systemStats) {
|
|
||||||
const gpus = stats.stats?.g ?? {}
|
|
||||||
const data = { created: stats.created } as Record<string, GPUData | string>
|
|
||||||
for (const id in gpus) {
|
|
||||||
const gpu = gpus[id] as GPUData
|
|
||||||
data[gpu.n] = gpu
|
|
||||||
addKey(gpu.n, gpu.p ?? 0)
|
|
||||||
if (gpu.pp) {
|
|
||||||
data[`${gpu.n}${packageKey}`] = gpu
|
|
||||||
addKey(`${gpu.n}${packageKey}`, gpu.pp ?? 0)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
gpuData.push(data)
|
|
||||||
}
|
|
||||||
const sortedKeys = Array.from(addedKeys.entries())
|
|
||||||
.sort(([, a], [, b]) => b - a)
|
|
||||||
.map(([key]) => key)
|
|
||||||
|
|
||||||
for (let i = 0; i < sortedKeys.length; i++) {
|
|
||||||
const id = sortedKeys[i]
|
|
||||||
dataPoints.push({
|
|
||||||
label: id,
|
|
||||||
dataKey: (gpuData: Record<string, GPUData>) => {
|
|
||||||
return id.endsWith(packageKey) ? (gpuData[id]?.pp ?? 0) : (gpuData[id]?.p ?? 0)
|
|
||||||
},
|
|
||||||
color: `hsl(${226 + (((i * 360) / addedKeys.size) % 360)}, 65%, 52%)`,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return { gpuData, dataPoints }
|
|
||||||
}, [chartData])
|
|
||||||
|
|
||||||
if (chartData.systemStats.length === 0) {
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
|
||||||
<div>
|
|
||||||
<ChartContainer
|
|
||||||
className={cn("h-full w-full absolute aspect-auto bg-card opacity-0 transition-opacity", {
|
|
||||||
"opacity-100": yAxisWidth,
|
|
||||||
})}
|
|
||||||
>
|
|
||||||
<LineChart accessibilityLayer data={gpuData} margin={chartMargin}>
|
|
||||||
<CartesianGrid vertical={false} />
|
|
||||||
<YAxis
|
|
||||||
direction="ltr"
|
|
||||||
orientation={chartData.orientation}
|
|
||||||
className="tracking-tighter"
|
|
||||||
domain={[0, "auto"]}
|
|
||||||
width={yAxisWidth}
|
|
||||||
tickFormatter={(value) => {
|
|
||||||
const val = toFixedFloat(value, 2)
|
|
||||||
return updateYAxisWidth(`${val}W`)
|
|
||||||
}}
|
|
||||||
tickLine={false}
|
|
||||||
axisLine={false}
|
|
||||||
/>
|
|
||||||
{xAxis(chartData)}
|
|
||||||
<ChartTooltip
|
|
||||||
animationEasing="ease-out"
|
|
||||||
animationDuration={150}
|
|
||||||
// @ts-expect-error
|
|
||||||
itemSorter={(a, b) => b.value - a.value}
|
|
||||||
content={
|
|
||||||
<ChartTooltipContent
|
|
||||||
labelFormatter={(_, data) => formatShortDate(data[0].payload.created)}
|
|
||||||
contentFormatter={(item) => `${decimalString(item.value)}W`}
|
|
||||||
// indicator="line"
|
|
||||||
/>
|
|
||||||
}
|
|
||||||
/>
|
|
||||||
{dataPoints.map((dataPoint) => (
|
|
||||||
<Line
|
|
||||||
key={dataPoint.label}
|
|
||||||
dataKey={dataPoint.dataKey}
|
|
||||||
name={dataPoint.label}
|
|
||||||
type="monotoneX"
|
|
||||||
dot={false}
|
|
||||||
strokeWidth={1.5}
|
|
||||||
stroke={dataPoint.color as string}
|
|
||||||
isAnimationActive={false}
|
|
||||||
/>
|
|
||||||
))}
|
|
||||||
{dataPoints.length > 1 && <ChartLegend content={<ChartLegendContent />} />}
|
|
||||||
</LineChart>
|
|
||||||
</ChartContainer>
|
|
||||||
</div>
|
|
||||||
)
|
|
||||||
})
|
|
||||||
@@ -1,6 +1,9 @@
|
|||||||
import { useMemo, useState } from "react"
|
import { useMemo, useState } from "react"
|
||||||
|
import { useStore } from "@nanostores/react"
|
||||||
import type { ChartConfig } from "@/components/ui/chart"
|
import type { ChartConfig } from "@/components/ui/chart"
|
||||||
import type { ChartData, SystemStats, SystemStatsRecord } from "@/types"
|
import type { ChartData, SystemStats, SystemStatsRecord } from "@/types"
|
||||||
|
import type { DataPoint } from "./area-chart"
|
||||||
|
import { $containerFilter } from "@/lib/stores"
|
||||||
|
|
||||||
/** Chart configurations for CPU, memory, and network usage charts */
|
/** Chart configurations for CPU, memory, and network usage charts */
|
||||||
export interface ContainerChartConfigs {
|
export interface ContainerChartConfigs {
|
||||||
@@ -96,9 +99,9 @@ export function useYAxisWidth() {
|
|||||||
clearTimeout(timeout)
|
clearTimeout(timeout)
|
||||||
timeout = setTimeout(() => {
|
timeout = setTimeout(() => {
|
||||||
document.body.appendChild(div)
|
document.body.appendChild(div)
|
||||||
const width = div.offsetWidth + 24
|
const width = div.offsetWidth + 20
|
||||||
if (width > yAxisWidth) {
|
if (width > yAxisWidth) {
|
||||||
setYAxisWidth(div.offsetWidth + 24)
|
setYAxisWidth(width)
|
||||||
}
|
}
|
||||||
document.body.removeChild(div)
|
document.body.removeChild(div)
|
||||||
})
|
})
|
||||||
@@ -108,6 +111,44 @@ export function useYAxisWidth() {
|
|||||||
return { yAxisWidth, updateYAxisWidth }
|
return { yAxisWidth, updateYAxisWidth }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Subscribes to the container filter store and returns filtered DataPoints for container charts */
|
||||||
|
export function useContainerDataPoints(
|
||||||
|
chartConfig: ChartConfig,
|
||||||
|
// biome-ignore lint/suspicious/noExplicitAny: container data records have dynamic keys
|
||||||
|
dataFn: (key: string, data: Record<string, any>) => number | null
|
||||||
|
) {
|
||||||
|
const filter = useStore($containerFilter)
|
||||||
|
const { dataPoints, filteredKeys } = useMemo(() => {
|
||||||
|
const filterTerms = filter
|
||||||
|
? filter
|
||||||
|
.toLowerCase()
|
||||||
|
.split(" ")
|
||||||
|
.filter((term) => term.length > 0)
|
||||||
|
: []
|
||||||
|
const filtered = new Set<string>()
|
||||||
|
const points = Object.keys(chartConfig).map((key) => {
|
||||||
|
const isFiltered = filterTerms.length > 0 && !filterTerms.some((term) => key.toLowerCase().includes(term))
|
||||||
|
if (isFiltered) filtered.add(key)
|
||||||
|
return {
|
||||||
|
label: key,
|
||||||
|
// biome-ignore lint/suspicious/noExplicitAny: container data records have dynamic keys
|
||||||
|
dataKey: (data: Record<string, any>) => dataFn(key, data),
|
||||||
|
color: chartConfig[key].color ?? "",
|
||||||
|
opacity: isFiltered ? 0.05 : 0.4,
|
||||||
|
strokeOpacity: isFiltered ? 0.1 : 1,
|
||||||
|
activeDot: !isFiltered,
|
||||||
|
stackId: "a",
|
||||||
|
}
|
||||||
|
})
|
||||||
|
return {
|
||||||
|
// biome-ignore lint/suspicious/noExplicitAny: container data records have dynamic keys
|
||||||
|
dataPoints: points as DataPoint<Record<string, any>>[],
|
||||||
|
filteredKeys: filtered,
|
||||||
|
}
|
||||||
|
}, [chartConfig, filter])
|
||||||
|
return { filter, dataPoints, filteredKeys }
|
||||||
|
}
|
||||||
|
|
||||||
// Assures consistent colors for network interfaces
|
// Assures consistent colors for network interfaces
|
||||||
export function useNetworkInterfaces(interfaces: SystemStats["ni"]) {
|
export function useNetworkInterfaces(interfaces: SystemStats["ni"]) {
|
||||||
const keys = Object.keys(interfaces ?? {})
|
const keys = Object.keys(interfaces ?? {})
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
import { useMemo } from "react"
|
import { type ReactNode, useEffect, useMemo, useState } from "react"
|
||||||
import { CartesianGrid, Line, LineChart, YAxis } from "recharts"
|
import { CartesianGrid, Line, LineChart, YAxis } from "recharts"
|
||||||
import {
|
import {
|
||||||
ChartContainer,
|
ChartContainer,
|
||||||
@@ -11,15 +11,22 @@ import {
|
|||||||
import { chartMargin, cn, formatShortDate } from "@/lib/utils"
|
import { chartMargin, cn, formatShortDate } from "@/lib/utils"
|
||||||
import type { ChartData, SystemStatsRecord } from "@/types"
|
import type { ChartData, SystemStatsRecord } from "@/types"
|
||||||
import { useYAxisWidth } from "./hooks"
|
import { useYAxisWidth } from "./hooks"
|
||||||
|
import type { AxisDomain } from "recharts/types/util/types"
|
||||||
|
import { useIntersectionObserver } from "@/lib/use-intersection-observer"
|
||||||
|
|
||||||
export type DataPoint = {
|
export type DataPoint<T = SystemStatsRecord> = {
|
||||||
label: string
|
label: string
|
||||||
dataKey: (data: SystemStatsRecord) => number | undefined
|
dataKey: (data: T) => number | null | undefined
|
||||||
color: number | string
|
color: number | string
|
||||||
|
stackId?: string | number
|
||||||
|
order?: number
|
||||||
|
strokeOpacity?: number
|
||||||
|
activeDot?: boolean
|
||||||
}
|
}
|
||||||
|
|
||||||
export default function LineChartDefault({
|
export default function LineChartDefault({
|
||||||
chartData,
|
chartData,
|
||||||
|
customData,
|
||||||
max,
|
max,
|
||||||
maxToggled,
|
maxToggled,
|
||||||
tickFormatter,
|
tickFormatter,
|
||||||
@@ -28,62 +35,58 @@ export default function LineChartDefault({
|
|||||||
domain,
|
domain,
|
||||||
legend,
|
legend,
|
||||||
itemSorter,
|
itemSorter,
|
||||||
}: // logRender = false,
|
showTotal = false,
|
||||||
{
|
reverseStackOrder = false,
|
||||||
|
hideYAxis = false,
|
||||||
|
filter,
|
||||||
|
truncate = false,
|
||||||
|
chartProps,
|
||||||
|
}: {
|
||||||
chartData: ChartData
|
chartData: ChartData
|
||||||
|
// biome-ignore lint/suspicious/noExplicitAny: accepts different data source types (systemStats or containerData)
|
||||||
|
customData?: any[]
|
||||||
max?: number
|
max?: number
|
||||||
maxToggled?: boolean
|
maxToggled?: boolean
|
||||||
tickFormatter: (value: number, index: number) => string
|
tickFormatter: (value: number, index: number) => string
|
||||||
contentFormatter: ({ value, payload }: { value: number; payload: SystemStatsRecord }) => string
|
// biome-ignore lint/suspicious/noExplicitAny: recharts tooltip item interop
|
||||||
dataPoints?: DataPoint[]
|
contentFormatter: (item: any, key: string) => ReactNode
|
||||||
domain?: [number, number]
|
// biome-ignore lint/suspicious/noExplicitAny: accepts DataPoint with different generic types
|
||||||
|
dataPoints?: DataPoint<any>[]
|
||||||
|
domain?: AxisDomain
|
||||||
legend?: boolean
|
legend?: boolean
|
||||||
|
showTotal?: boolean
|
||||||
|
// biome-ignore lint/suspicious/noExplicitAny: recharts tooltip item interop
|
||||||
itemSorter?: (a: any, b: any) => number
|
itemSorter?: (a: any, b: any) => number
|
||||||
// logRender?: boolean
|
reverseStackOrder?: boolean
|
||||||
|
hideYAxis?: boolean
|
||||||
|
filter?: string
|
||||||
|
truncate?: boolean
|
||||||
|
chartProps?: Omit<React.ComponentProps<typeof LineChart>, "data" | "margin">
|
||||||
}) {
|
}) {
|
||||||
const { yAxisWidth, updateYAxisWidth } = useYAxisWidth()
|
const { yAxisWidth, updateYAxisWidth } = useYAxisWidth()
|
||||||
|
const { isIntersecting, ref } = useIntersectionObserver({ freeze: false })
|
||||||
|
const sourceData = customData ?? chartData.systemStats
|
||||||
|
const [displayData, setDisplayData] = useState(sourceData)
|
||||||
|
const [displayMaxToggled, setDisplayMaxToggled] = useState(maxToggled)
|
||||||
|
|
||||||
// biome-ignore lint/correctness/useExhaustiveDependencies: ignore
|
// Reduce chart redraws by only updating while visible or when chart time changes
|
||||||
return useMemo(() => {
|
useEffect(() => {
|
||||||
if (chartData.systemStats.length === 0) {
|
const shouldPrimeData = sourceData.length && !displayData.length
|
||||||
return null
|
const sourceChanged = sourceData !== displayData
|
||||||
|
const shouldUpdate = shouldPrimeData || (sourceChanged && isIntersecting)
|
||||||
|
if (shouldUpdate) {
|
||||||
|
setDisplayData(sourceData)
|
||||||
}
|
}
|
||||||
// if (logRender) {
|
if (isIntersecting && maxToggled !== displayMaxToggled) {
|
||||||
// console.log("Rendered at", new Date())
|
setDisplayMaxToggled(maxToggled)
|
||||||
// }
|
|
||||||
return (
|
|
||||||
<div>
|
|
||||||
<ChartContainer
|
|
||||||
className={cn("h-full w-full absolute aspect-auto bg-card opacity-0 transition-opacity", {
|
|
||||||
"opacity-100": yAxisWidth,
|
|
||||||
})}
|
|
||||||
>
|
|
||||||
<LineChart accessibilityLayer data={chartData.systemStats} margin={chartMargin}>
|
|
||||||
<CartesianGrid vertical={false} />
|
|
||||||
<YAxis
|
|
||||||
direction="ltr"
|
|
||||||
orientation={chartData.orientation}
|
|
||||||
className="tracking-tighter"
|
|
||||||
width={yAxisWidth}
|
|
||||||
domain={domain ?? [0, max ?? "auto"]}
|
|
||||||
tickFormatter={(value, index) => updateYAxisWidth(tickFormatter(value, index))}
|
|
||||||
tickLine={false}
|
|
||||||
axisLine={false}
|
|
||||||
/>
|
|
||||||
{xAxis(chartData)}
|
|
||||||
<ChartTooltip
|
|
||||||
animationEasing="ease-out"
|
|
||||||
animationDuration={150}
|
|
||||||
// @ts-expect-error
|
|
||||||
itemSorter={itemSorter}
|
|
||||||
content={
|
|
||||||
<ChartTooltipContent
|
|
||||||
labelFormatter={(_, data) => formatShortDate(data[0].payload.created)}
|
|
||||||
contentFormatter={contentFormatter}
|
|
||||||
/>
|
|
||||||
}
|
}
|
||||||
/>
|
}, [displayData, displayMaxToggled, isIntersecting, maxToggled, sourceData])
|
||||||
{dataPoints?.map((dataPoint) => {
|
|
||||||
|
// Use a stable key derived from data point identities and visual properties
|
||||||
|
const linesKey = dataPoints?.map((d) => `${d.label}:${d.strokeOpacity ?? ""}`).join("\0")
|
||||||
|
|
||||||
|
const Lines = useMemo(() => {
|
||||||
|
return dataPoints?.map((dataPoint, i) => {
|
||||||
let { color } = dataPoint
|
let { color } = dataPoint
|
||||||
if (typeof color === "number") {
|
if (typeof color === "number") {
|
||||||
color = `var(--chart-${color})`
|
color = `var(--chart-${color})`
|
||||||
@@ -97,14 +100,71 @@ export default function LineChartDefault({
|
|||||||
dot={false}
|
dot={false}
|
||||||
strokeWidth={1.5}
|
strokeWidth={1.5}
|
||||||
stroke={color}
|
stroke={color}
|
||||||
|
strokeOpacity={dataPoint.strokeOpacity}
|
||||||
isAnimationActive={false}
|
isAnimationActive={false}
|
||||||
|
// stackId={dataPoint.stackId}
|
||||||
|
order={dataPoint.order || i}
|
||||||
|
// activeDot={dataPoint.activeDot ?? true}
|
||||||
/>
|
/>
|
||||||
)
|
)
|
||||||
|
})
|
||||||
|
}, [linesKey, displayMaxToggled])
|
||||||
|
|
||||||
|
return useMemo(() => {
|
||||||
|
if (displayData.length === 0) {
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
// if (logRender) {
|
||||||
|
// console.log("Rendered", dataPoints?.map((d) => d.label).join(", "), new Date())
|
||||||
|
// }
|
||||||
|
return (
|
||||||
|
<ChartContainer
|
||||||
|
ref={ref}
|
||||||
|
className={cn("h-full w-full absolute aspect-auto bg-card opacity-0 transition-opacity", {
|
||||||
|
"opacity-100": yAxisWidth || hideYAxis,
|
||||||
|
"ps-4": hideYAxis,
|
||||||
})}
|
})}
|
||||||
|
>
|
||||||
|
<LineChart
|
||||||
|
reverseStackOrder={reverseStackOrder}
|
||||||
|
accessibilityLayer
|
||||||
|
data={displayData}
|
||||||
|
margin={hideYAxis ? { ...chartMargin, left: 5 } : chartMargin}
|
||||||
|
{...chartProps}
|
||||||
|
>
|
||||||
|
<CartesianGrid vertical={false} />
|
||||||
|
{!hideYAxis && (
|
||||||
|
<YAxis
|
||||||
|
direction="ltr"
|
||||||
|
orientation={chartData.orientation}
|
||||||
|
className="tracking-tighter"
|
||||||
|
width={yAxisWidth}
|
||||||
|
domain={domain ?? [0, max ?? "auto"]}
|
||||||
|
tickFormatter={(value, index) => updateYAxisWidth(tickFormatter(value, index))}
|
||||||
|
tickLine={false}
|
||||||
|
axisLine={false}
|
||||||
|
/>
|
||||||
|
)}
|
||||||
|
{xAxis(chartData)}
|
||||||
|
<ChartTooltip
|
||||||
|
animationEasing="ease-out"
|
||||||
|
animationDuration={150}
|
||||||
|
// @ts-expect-error
|
||||||
|
itemSorter={itemSorter}
|
||||||
|
content={
|
||||||
|
<ChartTooltipContent
|
||||||
|
labelFormatter={(_, data) => formatShortDate(data[0].payload.created)}
|
||||||
|
contentFormatter={contentFormatter}
|
||||||
|
showTotal={showTotal}
|
||||||
|
filter={filter}
|
||||||
|
truncate={truncate}
|
||||||
|
/>
|
||||||
|
}
|
||||||
|
/>
|
||||||
|
{Lines}
|
||||||
{legend && <ChartLegend content={<ChartLegendContent />} />}
|
{legend && <ChartLegend content={<ChartLegendContent />} />}
|
||||||
</LineChart>
|
</LineChart>
|
||||||
</ChartContainer>
|
</ChartContainer>
|
||||||
</div>
|
|
||||||
)
|
)
|
||||||
}, [chartData.systemStats.at(-1), yAxisWidth, maxToggled])
|
}, [displayData, yAxisWidth, filter, Lines])
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,83 +0,0 @@
|
|||||||
import { t } from "@lingui/core/macro"
|
|
||||||
import { memo } from "react"
|
|
||||||
import { CartesianGrid, Line, LineChart, YAxis } from "recharts"
|
|
||||||
import {
|
|
||||||
ChartContainer,
|
|
||||||
ChartLegend,
|
|
||||||
ChartLegendContent,
|
|
||||||
ChartTooltip,
|
|
||||||
ChartTooltipContent,
|
|
||||||
xAxis,
|
|
||||||
} from "@/components/ui/chart"
|
|
||||||
import { chartMargin, cn, decimalString, formatShortDate, toFixedFloat } from "@/lib/utils"
|
|
||||||
import type { ChartData, SystemStats } from "@/types"
|
|
||||||
import { useYAxisWidth } from "./hooks"
|
|
||||||
|
|
||||||
export default memo(function LoadAverageChart({ chartData }: { chartData: ChartData }) {
|
|
||||||
const { yAxisWidth, updateYAxisWidth } = useYAxisWidth()
|
|
||||||
|
|
||||||
const keys: { color: string; label: string }[] = [
|
|
||||||
{
|
|
||||||
color: "hsl(271, 81%, 60%)", // Purple
|
|
||||||
label: t({ message: `1 min`, comment: "Load average" }),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
color: "hsl(217, 91%, 60%)", // Blue
|
|
||||||
label: t({ message: `5 min`, comment: "Load average" }),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
color: "hsl(25, 95%, 53%)", // Orange
|
|
||||||
label: t({ message: `15 min`, comment: "Load average" }),
|
|
||||||
},
|
|
||||||
]
|
|
||||||
|
|
||||||
return (
|
|
||||||
<div>
|
|
||||||
<ChartContainer
|
|
||||||
className={cn("h-full w-full absolute aspect-auto bg-card opacity-0 transition-opacity", {
|
|
||||||
"opacity-100": yAxisWidth,
|
|
||||||
})}
|
|
||||||
>
|
|
||||||
<LineChart accessibilityLayer data={chartData.systemStats} margin={chartMargin}>
|
|
||||||
<CartesianGrid vertical={false} />
|
|
||||||
<YAxis
|
|
||||||
direction="ltr"
|
|
||||||
orientation={chartData.orientation}
|
|
||||||
className="tracking-tighter"
|
|
||||||
domain={[0, "auto"]}
|
|
||||||
width={yAxisWidth}
|
|
||||||
tickFormatter={(value) => {
|
|
||||||
return updateYAxisWidth(String(toFixedFloat(value, 2)))
|
|
||||||
}}
|
|
||||||
tickLine={false}
|
|
||||||
axisLine={false}
|
|
||||||
/>
|
|
||||||
{xAxis(chartData)}
|
|
||||||
<ChartTooltip
|
|
||||||
animationEasing="ease-out"
|
|
||||||
animationDuration={150}
|
|
||||||
content={
|
|
||||||
<ChartTooltipContent
|
|
||||||
labelFormatter={(_, data) => formatShortDate(data[0].payload.created)}
|
|
||||||
contentFormatter={(item) => decimalString(item.value)}
|
|
||||||
/>
|
|
||||||
}
|
|
||||||
/>
|
|
||||||
{keys.map(({ color, label }, i) => (
|
|
||||||
<Line
|
|
||||||
key={label}
|
|
||||||
dataKey={(value: { stats: SystemStats }) => value.stats?.la?.[i]}
|
|
||||||
name={label}
|
|
||||||
type="monotoneX"
|
|
||||||
dot={false}
|
|
||||||
strokeWidth={1.5}
|
|
||||||
stroke={color}
|
|
||||||
isAnimationActive={false}
|
|
||||||
/>
|
|
||||||
))}
|
|
||||||
<ChartLegend content={<ChartLegendContent />} />
|
|
||||||
</LineChart>
|
|
||||||
</ChartContainer>
|
|
||||||
</div>
|
|
||||||
)
|
|
||||||
})
|
|
||||||
@@ -1,108 +0,0 @@
|
|||||||
import { useLingui } from "@lingui/react/macro"
|
|
||||||
import { memo } from "react"
|
|
||||||
import { Area, AreaChart, CartesianGrid, YAxis } from "recharts"
|
|
||||||
import { ChartContainer, ChartTooltip, ChartTooltipContent, xAxis } from "@/components/ui/chart"
|
|
||||||
import { Unit } from "@/lib/enums"
|
|
||||||
import { chartMargin, cn, decimalString, formatBytes, formatShortDate, toFixedFloat } from "@/lib/utils"
|
|
||||||
import type { ChartData } from "@/types"
|
|
||||||
import { useYAxisWidth } from "./hooks"
|
|
||||||
|
|
||||||
export default memo(function MemChart({ chartData, showMax }: { chartData: ChartData; showMax: boolean }) {
|
|
||||||
const { yAxisWidth, updateYAxisWidth } = useYAxisWidth()
|
|
||||||
const { t } = useLingui()
|
|
||||||
|
|
||||||
const totalMem = toFixedFloat(chartData.systemStats.at(-1)?.stats.m ?? 0, 1)
|
|
||||||
|
|
||||||
// console.log('rendered at', new Date())
|
|
||||||
|
|
||||||
if (chartData.systemStats.length === 0) {
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
|
||||||
<div>
|
|
||||||
{/* {!yAxisSet && <Spinner />} */}
|
|
||||||
<ChartContainer
|
|
||||||
className={cn("h-full w-full absolute aspect-auto bg-card opacity-0 transition-opacity", {
|
|
||||||
"opacity-100": yAxisWidth,
|
|
||||||
})}
|
|
||||||
>
|
|
||||||
<AreaChart accessibilityLayer data={chartData.systemStats} margin={chartMargin}>
|
|
||||||
<CartesianGrid vertical={false} />
|
|
||||||
{totalMem && (
|
|
||||||
<YAxis
|
|
||||||
direction="ltr"
|
|
||||||
orientation={chartData.orientation}
|
|
||||||
// use "ticks" instead of domain / tickcount if need more control
|
|
||||||
domain={[0, totalMem]}
|
|
||||||
tickCount={9}
|
|
||||||
className="tracking-tighter"
|
|
||||||
width={yAxisWidth}
|
|
||||||
tickLine={false}
|
|
||||||
axisLine={false}
|
|
||||||
tickFormatter={(value) => {
|
|
||||||
const { value: convertedValue, unit } = formatBytes(value * 1024, false, Unit.Bytes, true)
|
|
||||||
return updateYAxisWidth(toFixedFloat(convertedValue, value >= 10 ? 0 : 1) + " " + unit)
|
|
||||||
}}
|
|
||||||
/>
|
|
||||||
)}
|
|
||||||
{xAxis(chartData)}
|
|
||||||
<ChartTooltip
|
|
||||||
// cursor={false}
|
|
||||||
animationEasing="ease-out"
|
|
||||||
animationDuration={150}
|
|
||||||
content={
|
|
||||||
<ChartTooltipContent
|
|
||||||
// @ts-expect-error
|
|
||||||
itemSorter={(a, b) => a.order - b.order}
|
|
||||||
labelFormatter={(_, data) => formatShortDate(data[0].payload.created)}
|
|
||||||
contentFormatter={({ value }) => {
|
|
||||||
// mem values are supplied as GB
|
|
||||||
const { value: convertedValue, unit } = formatBytes(value * 1024, false, Unit.Bytes, true)
|
|
||||||
return decimalString(convertedValue, convertedValue >= 100 ? 1 : 2) + " " + unit
|
|
||||||
}}
|
|
||||||
showTotal={true}
|
|
||||||
/>
|
|
||||||
}
|
|
||||||
/>
|
|
||||||
<Area
|
|
||||||
name={t`Used`}
|
|
||||||
order={3}
|
|
||||||
dataKey={({ stats }) => (showMax ? stats?.mm : stats?.mu)}
|
|
||||||
type="monotoneX"
|
|
||||||
fill="var(--chart-2)"
|
|
||||||
fillOpacity={0.4}
|
|
||||||
stroke="var(--chart-2)"
|
|
||||||
stackId="1"
|
|
||||||
isAnimationActive={false}
|
|
||||||
/>
|
|
||||||
{/* {chartData.systemStats.at(-1)?.stats.mz && ( */}
|
|
||||||
<Area
|
|
||||||
name="ZFS ARC"
|
|
||||||
order={2}
|
|
||||||
dataKey={({ stats }) => (showMax ? null : stats?.mz)}
|
|
||||||
type="monotoneX"
|
|
||||||
fill="hsla(175 60% 45% / 0.8)"
|
|
||||||
fillOpacity={0.5}
|
|
||||||
stroke="hsla(175 60% 45% / 0.8)"
|
|
||||||
stackId="1"
|
|
||||||
isAnimationActive={false}
|
|
||||||
/>
|
|
||||||
{/* )} */}
|
|
||||||
<Area
|
|
||||||
name={t`Cache / Buffers`}
|
|
||||||
order={1}
|
|
||||||
dataKey={({ stats }) => (showMax ? null : stats?.mb)}
|
|
||||||
type="monotoneX"
|
|
||||||
fill="hsla(160 60% 45% / 0.5)"
|
|
||||||
fillOpacity={0.4}
|
|
||||||
stroke="hsla(160 60% 45% / 0.5)"
|
|
||||||
stackId="1"
|
|
||||||
isAnimationActive={false}
|
|
||||||
/>
|
|
||||||
{/* <ChartLegend content={<ChartLegendContent />} /> */}
|
|
||||||
</AreaChart>
|
|
||||||
</ChartContainer>
|
|
||||||
</div>
|
|
||||||
)
|
|
||||||
})
|
|
||||||
@@ -1,70 +0,0 @@
|
|||||||
import { t } from "@lingui/core/macro"
|
|
||||||
import { useStore } from "@nanostores/react"
|
|
||||||
import { memo } from "react"
|
|
||||||
import { Area, AreaChart, CartesianGrid, YAxis } from "recharts"
|
|
||||||
import { ChartContainer, ChartTooltip, ChartTooltipContent, xAxis } from "@/components/ui/chart"
|
|
||||||
import { $userSettings } from "@/lib/stores"
|
|
||||||
import { chartMargin, cn, decimalString, formatBytes, formatShortDate, toFixedFloat } from "@/lib/utils"
|
|
||||||
import type { ChartData } from "@/types"
|
|
||||||
import { useYAxisWidth } from "./hooks"
|
|
||||||
|
|
||||||
export default memo(function SwapChart({ chartData }: { chartData: ChartData }) {
|
|
||||||
const { yAxisWidth, updateYAxisWidth } = useYAxisWidth()
|
|
||||||
const userSettings = useStore($userSettings)
|
|
||||||
|
|
||||||
if (chartData.systemStats.length === 0) {
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
|
||||||
<div>
|
|
||||||
<ChartContainer
|
|
||||||
className={cn("h-full w-full absolute aspect-auto bg-card opacity-0 transition-opacity", {
|
|
||||||
"opacity-100": yAxisWidth,
|
|
||||||
})}
|
|
||||||
>
|
|
||||||
<AreaChart accessibilityLayer data={chartData.systemStats} margin={chartMargin}>
|
|
||||||
<CartesianGrid vertical={false} />
|
|
||||||
<YAxis
|
|
||||||
direction="ltr"
|
|
||||||
orientation={chartData.orientation}
|
|
||||||
className="tracking-tighter"
|
|
||||||
domain={[0, () => toFixedFloat(chartData.systemStats.at(-1)?.stats.s ?? 0.04, 2)]}
|
|
||||||
width={yAxisWidth}
|
|
||||||
tickLine={false}
|
|
||||||
axisLine={false}
|
|
||||||
tickFormatter={(value) => {
|
|
||||||
const { value: convertedValue, unit } = formatBytes(value * 1024, false, userSettings.unitDisk, true)
|
|
||||||
return updateYAxisWidth(toFixedFloat(convertedValue, value >= 10 ? 0 : 1) + " " + unit)
|
|
||||||
}}
|
|
||||||
/>
|
|
||||||
{xAxis(chartData)}
|
|
||||||
<ChartTooltip
|
|
||||||
animationEasing="ease-out"
|
|
||||||
animationDuration={150}
|
|
||||||
content={
|
|
||||||
<ChartTooltipContent
|
|
||||||
labelFormatter={(_, data) => formatShortDate(data[0].payload.created)}
|
|
||||||
contentFormatter={({ value }) => {
|
|
||||||
// mem values are supplied as GB
|
|
||||||
const { value: convertedValue, unit } = formatBytes(value * 1024, false, userSettings.unitDisk, true)
|
|
||||||
return decimalString(convertedValue, convertedValue >= 100 ? 1 : 2) + " " + unit
|
|
||||||
}}
|
|
||||||
// indicator="line"
|
|
||||||
/>
|
|
||||||
}
|
|
||||||
/>
|
|
||||||
<Area
|
|
||||||
dataKey="stats.su"
|
|
||||||
name={t`Used`}
|
|
||||||
type="monotoneX"
|
|
||||||
fill="var(--chart-2)"
|
|
||||||
fillOpacity={0.4}
|
|
||||||
stroke="var(--chart-2)"
|
|
||||||
isAnimationActive={false}
|
|
||||||
/>
|
|
||||||
</AreaChart>
|
|
||||||
</ChartContainer>
|
|
||||||
</div>
|
|
||||||
)
|
|
||||||
})
|
|
||||||
@@ -1,117 +0,0 @@
|
|||||||
import { useStore } from "@nanostores/react"
|
|
||||||
import { memo, useMemo } from "react"
|
|
||||||
import { CartesianGrid, Line, LineChart, YAxis } from "recharts"
|
|
||||||
import {
|
|
||||||
ChartContainer,
|
|
||||||
ChartLegend,
|
|
||||||
ChartLegendContent,
|
|
||||||
ChartTooltip,
|
|
||||||
ChartTooltipContent,
|
|
||||||
xAxis,
|
|
||||||
} from "@/components/ui/chart"
|
|
||||||
import { $temperatureFilter, $userSettings } from "@/lib/stores"
|
|
||||||
import { chartMargin, cn, decimalString, formatShortDate, formatTemperature, toFixedFloat } from "@/lib/utils"
|
|
||||||
import type { ChartData } from "@/types"
|
|
||||||
import { useYAxisWidth } from "./hooks"
|
|
||||||
|
|
||||||
export default memo(function TemperatureChart({ chartData }: { chartData: ChartData }) {
|
|
||||||
const filter = useStore($temperatureFilter)
|
|
||||||
const userSettings = useStore($userSettings)
|
|
||||||
const { yAxisWidth, updateYAxisWidth } = useYAxisWidth()
|
|
||||||
|
|
||||||
if (chartData.systemStats.length === 0) {
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Format temperature data for chart and assign colors */
|
|
||||||
const newChartData = useMemo(() => {
|
|
||||||
const newChartData = { data: [], colors: {} } as {
|
|
||||||
data: Record<string, number | string>[]
|
|
||||||
colors: Record<string, string>
|
|
||||||
}
|
|
||||||
const tempSums = {} as Record<string, number>
|
|
||||||
for (const data of chartData.systemStats) {
|
|
||||||
const newData = { created: data.created } as Record<string, number | string>
|
|
||||||
const keys = Object.keys(data.stats?.t ?? {})
|
|
||||||
for (let i = 0; i < keys.length; i++) {
|
|
||||||
const key = keys[i]
|
|
||||||
newData[key] = data.stats.t![key]
|
|
||||||
tempSums[key] = (tempSums[key] ?? 0) + newData[key]
|
|
||||||
}
|
|
||||||
newChartData.data.push(newData)
|
|
||||||
}
|
|
||||||
const keys = Object.keys(tempSums).sort((a, b) => tempSums[b] - tempSums[a])
|
|
||||||
for (const key of keys) {
|
|
||||||
newChartData.colors[key] = `hsl(${((keys.indexOf(key) * 360) / keys.length) % 360}, 60%, 55%)`
|
|
||||||
}
|
|
||||||
return newChartData
|
|
||||||
}, [chartData])
|
|
||||||
|
|
||||||
const colors = Object.keys(newChartData.colors)
|
|
||||||
|
|
||||||
// console.log('rendered at', new Date())
|
|
||||||
|
|
||||||
return (
|
|
||||||
<div>
|
|
||||||
<ChartContainer
|
|
||||||
className={cn("h-full w-full absolute aspect-auto bg-card opacity-0 transition-opacity", {
|
|
||||||
"opacity-100": yAxisWidth,
|
|
||||||
})}
|
|
||||||
>
|
|
||||||
<LineChart accessibilityLayer data={newChartData.data} margin={chartMargin}>
|
|
||||||
<CartesianGrid vertical={false} />
|
|
||||||
<YAxis
|
|
||||||
direction="ltr"
|
|
||||||
orientation={chartData.orientation}
|
|
||||||
className="tracking-tighter"
|
|
||||||
domain={["auto", "auto"]}
|
|
||||||
width={yAxisWidth}
|
|
||||||
tickFormatter={(val) => {
|
|
||||||
const { value, unit } = formatTemperature(val, userSettings.unitTemp)
|
|
||||||
return updateYAxisWidth(toFixedFloat(value, 2) + " " + unit)
|
|
||||||
}}
|
|
||||||
tickLine={false}
|
|
||||||
axisLine={false}
|
|
||||||
/>
|
|
||||||
{xAxis(chartData)}
|
|
||||||
<ChartTooltip
|
|
||||||
animationEasing="ease-out"
|
|
||||||
animationDuration={150}
|
|
||||||
// @ts-expect-error
|
|
||||||
itemSorter={(a, b) => b.value - a.value}
|
|
||||||
content={
|
|
||||||
<ChartTooltipContent
|
|
||||||
labelFormatter={(_, data) => formatShortDate(data[0].payload.created)}
|
|
||||||
contentFormatter={(item) => {
|
|
||||||
const { value, unit } = formatTemperature(item.value, userSettings.unitTemp)
|
|
||||||
return decimalString(value) + " " + unit
|
|
||||||
}}
|
|
||||||
filter={filter}
|
|
||||||
/>
|
|
||||||
}
|
|
||||||
/>
|
|
||||||
{colors.map((key) => {
|
|
||||||
const filterTerms = filter ? filter.toLowerCase().split(" ").filter(term => term.length > 0) : []
|
|
||||||
const filtered = filterTerms.length > 0 && !filterTerms.some(term => key.toLowerCase().includes(term))
|
|
||||||
const strokeOpacity = filtered ? 0.1 : 1
|
|
||||||
return (
|
|
||||||
<Line
|
|
||||||
key={key}
|
|
||||||
dataKey={key}
|
|
||||||
name={key}
|
|
||||||
type="monotoneX"
|
|
||||||
dot={false}
|
|
||||||
strokeWidth={1.5}
|
|
||||||
stroke={newChartData.colors[key]}
|
|
||||||
strokeOpacity={strokeOpacity}
|
|
||||||
activeDot={{ opacity: filtered ? 0 : 1 }}
|
|
||||||
isAnimationActive={false}
|
|
||||||
/>
|
|
||||||
)
|
|
||||||
})}
|
|
||||||
{colors.length < 12 && <ChartLegend content={<ChartLegendContent />} />}
|
|
||||||
</LineChart>
|
|
||||||
</ChartContainer>
|
|
||||||
</div>
|
|
||||||
)
|
|
||||||
})
|
|
||||||
@@ -163,9 +163,9 @@ export default function ContainersTable({ systemId }: { systemId?: string }) {
|
|||||||
const visibleColumns = table.getVisibleLeafColumns()
|
const visibleColumns = table.getVisibleLeafColumns()
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<Card className="p-6 @container w-full">
|
<Card className="@container w-full px-3 py-5 sm:py-6 sm:px-6">
|
||||||
<CardHeader className="p-0 mb-4">
|
<CardHeader className="p-0 mb-3 sm:mb-4">
|
||||||
<div className="grid md:flex gap-5 w-full items-end">
|
<div className="grid md:flex gap-x-5 gap-y-3 w-full items-end">
|
||||||
<div className="px-2 sm:px-1">
|
<div className="px-2 sm:px-1">
|
||||||
<CardTitle className="mb-2">
|
<CardTitle className="mb-2">
|
||||||
<Trans>All Containers</Trans>
|
<Trans>All Containers</Trans>
|
||||||
@@ -462,7 +462,6 @@ function ContainerSheet({
|
|||||||
function ContainersTableHead({ table }: { table: TableType<ContainerRecord> }) {
|
function ContainersTableHead({ table }: { table: TableType<ContainerRecord> }) {
|
||||||
return (
|
return (
|
||||||
<TableHeader className="sticky top-0 z-50 w-full border-b-2">
|
<TableHeader className="sticky top-0 z-50 w-full border-b-2">
|
||||||
<div className="absolute -top-2 left-0 w-full h-4 bg-table-header z-50"></div>
|
|
||||||
{table.getHeaderGroups().map((headerGroup) => (
|
{table.getHeaderGroups().map((headerGroup) => (
|
||||||
<tr key={headerGroup.id}>
|
<tr key={headerGroup.id}>
|
||||||
{headerGroup.headers.map((header) => {
|
{headerGroup.headers.map((header) => {
|
||||||
|
|||||||
@@ -1,7 +1,11 @@
|
|||||||
|
import { useStore } from "@nanostores/react"
|
||||||
import { GithubIcon } from "lucide-react"
|
import { GithubIcon } from "lucide-react"
|
||||||
|
import { $newVersion } from "@/lib/stores"
|
||||||
import { Separator } from "./ui/separator"
|
import { Separator } from "./ui/separator"
|
||||||
|
import { Trans } from "@lingui/react/macro"
|
||||||
|
|
||||||
export function FooterRepoLink() {
|
export function FooterRepoLink() {
|
||||||
|
const newVersion = useStore($newVersion)
|
||||||
return (
|
return (
|
||||||
<div className="flex gap-1.5 justify-end items-center pe-3 sm:pe-6 mt-3.5 mb-4 text-xs opacity-80">
|
<div className="flex gap-1.5 justify-end items-center pe-3 sm:pe-6 mt-3.5 mb-4 text-xs opacity-80">
|
||||||
<a
|
<a
|
||||||
@@ -21,6 +25,19 @@ export function FooterRepoLink() {
|
|||||||
>
|
>
|
||||||
Beszel {globalThis.BESZEL.HUB_VERSION}
|
Beszel {globalThis.BESZEL.HUB_VERSION}
|
||||||
</a>
|
</a>
|
||||||
|
{newVersion?.v && (
|
||||||
|
<>
|
||||||
|
<Separator orientation="vertical" className="h-2.5 bg-muted-foreground opacity-70" />
|
||||||
|
<a
|
||||||
|
href={newVersion.url}
|
||||||
|
target="_blank"
|
||||||
|
className="text-yellow-500 hover:text-yellow-400 duration-75"
|
||||||
|
rel="noopener"
|
||||||
|
>
|
||||||
|
<Trans context="New version available">{newVersion.v} available</Trans>
|
||||||
|
</a>
|
||||||
|
</>
|
||||||
|
)}
|
||||||
</div>
|
</div>
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
import { Trans, useLingui } from "@lingui/react/macro"
|
import { Trans, useLingui } from "@lingui/react/macro"
|
||||||
import { LanguagesIcon } from "lucide-react"
|
import { LanguagesIcon } from "lucide-react"
|
||||||
import { Button } from "@/components/ui/button"
|
import { buttonVariants } from "@/components/ui/button"
|
||||||
import { DropdownMenu, DropdownMenuContent, DropdownMenuItem, DropdownMenuTrigger } from "@/components/ui/dropdown-menu"
|
import { DropdownMenu, DropdownMenuContent, DropdownMenuItem, DropdownMenuTrigger } from "@/components/ui/dropdown-menu"
|
||||||
import { dynamicActivate } from "@/lib/i18n"
|
import { dynamicActivate } from "@/lib/i18n"
|
||||||
import languages from "@/lib/languages"
|
import languages from "@/lib/languages"
|
||||||
@@ -14,17 +14,14 @@ export function LangToggle() {
|
|||||||
|
|
||||||
return (
|
return (
|
||||||
<DropdownMenu>
|
<DropdownMenu>
|
||||||
<DropdownMenuTrigger>
|
|
||||||
<Tooltip>
|
<Tooltip>
|
||||||
<TooltipTrigger asChild>
|
<TooltipTrigger asChild>
|
||||||
<Button variant={"ghost"} size="icon" className="hidden sm:flex">
|
<DropdownMenuTrigger className={cn(buttonVariants({ variant: "ghost", size: "icon" }))}>
|
||||||
<LanguagesIcon className="absolute h-[1.2rem] w-[1.2rem] light:opacity-85" />
|
<LanguagesIcon className="absolute h-[1.2rem] w-[1.2rem] light:opacity-85" />
|
||||||
<span className="sr-only">{LangTrans}</span>
|
<span className="sr-only">{LangTrans}</span>
|
||||||
</Button>
|
|
||||||
</TooltipTrigger>
|
|
||||||
<TooltipContent>{LangTrans}</TooltipContent>
|
<TooltipContent>{LangTrans}</TooltipContent>
|
||||||
</Tooltip>
|
|
||||||
</DropdownMenuTrigger>
|
</DropdownMenuTrigger>
|
||||||
|
</TooltipTrigger>
|
||||||
<DropdownMenuContent className="grid grid-cols-3">
|
<DropdownMenuContent className="grid grid-cols-3">
|
||||||
{languages.map(([lang, label, e]) => (
|
{languages.map(([lang, label, e]) => (
|
||||||
<DropdownMenuItem
|
<DropdownMenuItem
|
||||||
@@ -39,6 +36,7 @@ export function LangToggle() {
|
|||||||
</DropdownMenuItem>
|
</DropdownMenuItem>
|
||||||
))}
|
))}
|
||||||
</DropdownMenuContent>
|
</DropdownMenuContent>
|
||||||
|
</Tooltip>
|
||||||
</DropdownMenu>
|
</DropdownMenu>
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ import { Label } from "@/components/ui/label"
|
|||||||
import { pb } from "@/lib/api"
|
import { pb } from "@/lib/api"
|
||||||
import { $authenticated } from "@/lib/stores"
|
import { $authenticated } from "@/lib/stores"
|
||||||
import { cn } from "@/lib/utils"
|
import { cn } from "@/lib/utils"
|
||||||
import { $router, Link, prependBasePath } from "../router"
|
import { $router, Link, basePath, prependBasePath } from "../router"
|
||||||
import { toast } from "../ui/use-toast"
|
import { toast } from "../ui/use-toast"
|
||||||
import { OtpInputForm } from "./otp-forms"
|
import { OtpInputForm } from "./otp-forms"
|
||||||
|
|
||||||
@@ -37,8 +37,7 @@ const RegisterSchema = v.looseObject({
|
|||||||
passwordConfirm: passwordSchema,
|
passwordConfirm: passwordSchema,
|
||||||
})
|
})
|
||||||
|
|
||||||
export const showLoginFaliedToast = (description?: string) => {
|
export const showLoginFaliedToast = (description = t`Please check your credentials and try again`) => {
|
||||||
description ||= t`Please check your credentials and try again`
|
|
||||||
toast({
|
toast({
|
||||||
title: t`Login attempt failed`,
|
title: t`Login attempt failed`,
|
||||||
description,
|
description,
|
||||||
@@ -130,10 +129,6 @@ export function UserAuthForm({
|
|||||||
[isFirstRun]
|
[isFirstRun]
|
||||||
)
|
)
|
||||||
|
|
||||||
if (!authMethods) {
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
|
|
||||||
const authProviders = authMethods.oauth2.providers ?? []
|
const authProviders = authMethods.oauth2.providers ?? []
|
||||||
const oauthEnabled = authMethods.oauth2.enabled && authProviders.length > 0
|
const oauthEnabled = authMethods.oauth2.enabled && authProviders.length > 0
|
||||||
const passwordEnabled = authMethods.password.enabled
|
const passwordEnabled = authMethods.password.enabled
|
||||||
@@ -142,6 +137,12 @@ export function UserAuthForm({
|
|||||||
|
|
||||||
function loginWithOauth(provider: AuthProviderInfo, forcePopup = false) {
|
function loginWithOauth(provider: AuthProviderInfo, forcePopup = false) {
|
||||||
setIsOauthLoading(true)
|
setIsOauthLoading(true)
|
||||||
|
|
||||||
|
if (globalThis.BESZEL.OAUTH_DISABLE_POPUP) {
|
||||||
|
redirectToOauthProvider(provider)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
const oAuthOpts: OAuth2AuthConfig = {
|
const oAuthOpts: OAuth2AuthConfig = {
|
||||||
provider: provider.name,
|
provider: provider.name,
|
||||||
}
|
}
|
||||||
@@ -150,10 +151,7 @@ export function UserAuthForm({
|
|||||||
const authWindow = window.open()
|
const authWindow = window.open()
|
||||||
if (!authWindow) {
|
if (!authWindow) {
|
||||||
setIsOauthLoading(false)
|
setIsOauthLoading(false)
|
||||||
toast({
|
showLoginFaliedToast(t`Please enable pop-ups for this site`)
|
||||||
title: t`Error`,
|
|
||||||
description: t`Please enable pop-ups for this site`,
|
|
||||||
})
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
oAuthOpts.urlCallback = (url) => {
|
oAuthOpts.urlCallback = (url) => {
|
||||||
@@ -171,16 +169,57 @@ export function UserAuthForm({
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
useEffect(() => {
|
/**
|
||||||
// auto login if password disabled and only one auth provider
|
* Redirects the user to the OAuth provider's authentication page in the same window.
|
||||||
if (!passwordEnabled && authProviders.length === 1 && !sessionStorage.getItem("lo")) {
|
* Requires the app's base URL to be registered as a redirect URI with the OAuth provider.
|
||||||
// Add a small timeout to ensure browser is ready to handle popups
|
*/
|
||||||
setTimeout(() => {
|
function redirectToOauthProvider(provider: AuthProviderInfo) {
|
||||||
loginWithOauth(authProviders[0], true)
|
const url = new URL(provider.authURL)
|
||||||
}, 300)
|
// url.searchParams.set("redirect_uri", `${window.location.origin}${basePath}`)
|
||||||
|
sessionStorage.setItem("provider", JSON.stringify(provider))
|
||||||
|
window.location.href = url.toString()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
// handle redirect-based OAuth callback if we have a code
|
||||||
|
const params = new URLSearchParams(window.location.search)
|
||||||
|
const code = params.get("code")
|
||||||
|
if (code) {
|
||||||
|
const state = params.get("state")
|
||||||
|
const provider: AuthProviderInfo = JSON.parse(sessionStorage.getItem("provider") ?? "{}")
|
||||||
|
if (!state || provider.state !== state) {
|
||||||
|
showLoginFaliedToast()
|
||||||
|
} else {
|
||||||
|
setIsOauthLoading(true)
|
||||||
|
window.history.replaceState({}, "", window.location.pathname)
|
||||||
|
pb.collection("users")
|
||||||
|
.authWithOAuth2Code(provider.name, code, provider.codeVerifier, `${window.location.origin}${basePath}`)
|
||||||
|
.then(() => $authenticated.set(pb.authStore.isValid))
|
||||||
|
.catch((e: unknown) => showLoginFaliedToast((e as Error).message))
|
||||||
|
.finally(() => setIsOauthLoading(false))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// auto login if password disabled and only one auth provider
|
||||||
|
if (!code && !passwordEnabled && authProviders.length === 1 && !sessionStorage.getItem("lo")) {
|
||||||
|
// Add a small timeout to ensure browser is ready to handle popups
|
||||||
|
setTimeout(() => loginWithOauth(authProviders[0], false), 300)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// refresh auth if not in above states (required for trusted auth header)
|
||||||
|
pb.collection("users")
|
||||||
|
.authRefresh()
|
||||||
|
.then((res) => {
|
||||||
|
pb.authStore.save(res.token, res.record)
|
||||||
|
$authenticated.set(!!pb.authStore.isValid)
|
||||||
|
})
|
||||||
}, [])
|
}, [])
|
||||||
|
|
||||||
|
if (!authMethods) {
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
|
||||||
if (otpId && mfaId) {
|
if (otpId && mfaId) {
|
||||||
return <OtpInputForm otpId={otpId} mfaId={mfaId} />
|
return <OtpInputForm otpId={otpId} mfaId={mfaId} />
|
||||||
}
|
}
|
||||||
@@ -248,7 +287,7 @@ export function UserAuthForm({
|
|||||||
)}
|
)}
|
||||||
<div className="sr-only">
|
<div className="sr-only">
|
||||||
{/* honeypot */}
|
{/* honeypot */}
|
||||||
<label htmlFor="website"></label>
|
<label htmlFor="website">Website</label>
|
||||||
<input
|
<input
|
||||||
id="website"
|
id="website"
|
||||||
type="text"
|
type="text"
|
||||||
|
|||||||
@@ -1,28 +1,39 @@
|
|||||||
import { t } from "@lingui/core/macro"
|
import { t } from "@lingui/core/macro"
|
||||||
import { MoonStarIcon, SunIcon } from "lucide-react"
|
import { MoonStarIcon, SunIcon, SunMoonIcon } from "lucide-react"
|
||||||
import { useTheme } from "@/components/theme-provider"
|
import { useTheme } from "@/components/theme-provider"
|
||||||
import { Button } from "@/components/ui/button"
|
import { Button } from "@/components/ui/button"
|
||||||
import { Tooltip, TooltipContent, TooltipTrigger } from "./ui/tooltip"
|
import { Tooltip, TooltipContent, TooltipTrigger } from "./ui/tooltip"
|
||||||
import { Trans } from "@lingui/react/macro"
|
import { Trans } from "@lingui/react/macro"
|
||||||
|
import { cn } from "@/lib/utils"
|
||||||
|
|
||||||
|
const themes = ["light", "dark", "system"] as const
|
||||||
|
const icons = [SunIcon, MoonStarIcon, SunMoonIcon] as const
|
||||||
|
|
||||||
export function ModeToggle() {
|
export function ModeToggle() {
|
||||||
const { theme, setTheme } = useTheme()
|
const { theme, setTheme } = useTheme()
|
||||||
|
|
||||||
|
const currentIndex = themes.indexOf(theme)
|
||||||
|
const Icon = icons[currentIndex]
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<Tooltip>
|
<Tooltip>
|
||||||
<TooltipTrigger>
|
<TooltipTrigger asChild>
|
||||||
<Button
|
<Button
|
||||||
variant={"ghost"}
|
variant={"ghost"}
|
||||||
size="icon"
|
size="icon"
|
||||||
aria-label={t`Toggle theme`}
|
aria-label={t`Switch theme`}
|
||||||
onClick={() => setTheme(theme === "dark" ? "light" : "dark")}
|
onClick={() => setTheme(themes[(currentIndex + 1) % themes.length])}
|
||||||
>
|
>
|
||||||
<SunIcon className="h-[1.2rem] w-[1.2rem] transition-all -rotate-90 dark:opacity-0 dark:rotate-0" />
|
<Icon
|
||||||
<MoonStarIcon className="absolute h-[1.2rem] w-[1.2rem] transition-all opacity-0 -rotate-90 dark:opacity-100 dark:rotate-0" />
|
className={cn(
|
||||||
|
"animate-in fade-in spin-in-[-30deg] duration-200",
|
||||||
|
currentIndex === 2 ? "size-[1.35rem]" : "size-[1.2rem]"
|
||||||
|
)}
|
||||||
|
/>
|
||||||
</Button>
|
</Button>
|
||||||
</TooltipTrigger>
|
</TooltipTrigger>
|
||||||
<TooltipContent>
|
<TooltipContent>
|
||||||
<Trans>Toggle theme</Trans>
|
<Trans>Switch theme</Trans>
|
||||||
</TooltipContent>
|
</TooltipContent>
|
||||||
</Tooltip>
|
</Tooltip>
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
import { t } from "@lingui/core/macro"
|
||||||
import { Trans } from "@lingui/react/macro"
|
import { Trans } from "@lingui/react/macro"
|
||||||
import { getPagePath } from "@nanostores/router"
|
import { getPagePath } from "@nanostores/router"
|
||||||
import {
|
import {
|
||||||
@@ -6,6 +7,8 @@ import {
|
|||||||
HardDriveIcon,
|
HardDriveIcon,
|
||||||
LogOutIcon,
|
LogOutIcon,
|
||||||
LogsIcon,
|
LogsIcon,
|
||||||
|
MenuIcon,
|
||||||
|
PlusIcon,
|
||||||
SearchIcon,
|
SearchIcon,
|
||||||
ServerIcon,
|
ServerIcon,
|
||||||
SettingsIcon,
|
SettingsIcon,
|
||||||
@@ -21,15 +24,18 @@ import {
|
|||||||
DropdownMenuItem,
|
DropdownMenuItem,
|
||||||
DropdownMenuLabel,
|
DropdownMenuLabel,
|
||||||
DropdownMenuSeparator,
|
DropdownMenuSeparator,
|
||||||
|
DropdownMenuSub,
|
||||||
|
DropdownMenuSubContent,
|
||||||
|
DropdownMenuSubTrigger,
|
||||||
DropdownMenuTrigger,
|
DropdownMenuTrigger,
|
||||||
} from "@/components/ui/dropdown-menu"
|
} from "@/components/ui/dropdown-menu"
|
||||||
import { isAdmin, isReadOnlyUser, logOut, pb } from "@/lib/api"
|
import { isAdmin, isReadOnlyUser, logOut, pb } from "@/lib/api"
|
||||||
import { cn, runOnce } from "@/lib/utils"
|
import { cn, runOnce } from "@/lib/utils"
|
||||||
import { AddSystemButton } from "./add-system"
|
import { AddSystemDialog } from "./add-system"
|
||||||
import { LangToggle } from "./lang-toggle"
|
import { LangToggle } from "./lang-toggle"
|
||||||
import { Logo } from "./logo"
|
import { Logo } from "./logo"
|
||||||
import { ModeToggle } from "./mode-toggle"
|
import { ModeToggle } from "./mode-toggle"
|
||||||
import { $router, basePath, Link, prependBasePath } from "./router"
|
import { $router, basePath, Link, navigate, prependBasePath } from "./router"
|
||||||
import { Tooltip, TooltipContent, TooltipTrigger } from "./ui/tooltip"
|
import { Tooltip, TooltipContent, TooltipTrigger } from "./ui/tooltip"
|
||||||
|
|
||||||
const CommandPalette = lazy(() => import("./command-palette"))
|
const CommandPalette = lazy(() => import("./command-palette"))
|
||||||
@@ -37,20 +43,117 @@ const CommandPalette = lazy(() => import("./command-palette"))
|
|||||||
const isMac = navigator.platform.toUpperCase().indexOf("MAC") >= 0
|
const isMac = navigator.platform.toUpperCase().indexOf("MAC") >= 0
|
||||||
|
|
||||||
export default function Navbar() {
|
export default function Navbar() {
|
||||||
|
const [addSystemDialogOpen, setAddSystemDialogOpen] = useState(false)
|
||||||
|
const [commandPaletteOpen, setCommandPaletteOpen] = useState(false)
|
||||||
|
|
||||||
|
const AdminLinks = AdminDropdownGroup()
|
||||||
|
|
||||||
|
const systemTranslation = t`System`
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div className="flex items-center h-14 md:h-16 bg-card px-4 pe-3 sm:px-6 border border-border/60 bt-0 rounded-md my-4">
|
<div className="flex items-center h-14 md:h-16 bg-card px-4 pe-3 sm:px-6 border border-border/60 bt-0 rounded-md my-4">
|
||||||
|
<Suspense>
|
||||||
|
<CommandPalette open={commandPaletteOpen} setOpen={setCommandPaletteOpen} />
|
||||||
|
</Suspense>
|
||||||
|
<AddSystemDialog open={addSystemDialogOpen} setOpen={setAddSystemDialogOpen} />
|
||||||
|
|
||||||
<Link
|
<Link
|
||||||
href={basePath}
|
href={basePath}
|
||||||
aria-label="Home"
|
aria-label="Home"
|
||||||
className="p-2 ps-0 me-3 group"
|
className="p-2 ps-0 me-3 group"
|
||||||
onMouseEnter={runOnce(() => import("@/components/routes/home"))}
|
onMouseEnter={runOnce(() => import("@/components/routes/home"))}
|
||||||
>
|
>
|
||||||
<Logo className="h-[1.1rem] md:h-5 fill-foreground" />
|
<Logo className="h-[1.2rem] md:h-5 fill-foreground" />
|
||||||
</Link>
|
</Link>
|
||||||
<SearchButton />
|
<Button
|
||||||
|
variant="outline"
|
||||||
|
className="hidden md:block text-sm text-muted-foreground px-4"
|
||||||
|
onClick={() => setCommandPaletteOpen(true)}
|
||||||
|
>
|
||||||
|
<span className="flex items-center">
|
||||||
|
<SearchIcon className="me-1.5 h-4 w-4" />
|
||||||
|
<Trans>Search</Trans>
|
||||||
|
<span className="flex items-center ms-3.5">
|
||||||
|
<Kbd>{isMac ? "⌘" : "Ctrl"}</Kbd>
|
||||||
|
<Kbd>K</Kbd>
|
||||||
|
</span>
|
||||||
|
</span>
|
||||||
|
</Button>
|
||||||
|
|
||||||
|
{/* mobile menu */}
|
||||||
|
<div className="ms-auto flex items-center text-xl md:hidden">
|
||||||
|
<ModeToggle />
|
||||||
|
<Button variant="ghost" size="icon" onClick={() => setCommandPaletteOpen(true)}>
|
||||||
|
<SearchIcon className="h-[1.2rem] w-[1.2rem]" />
|
||||||
|
</Button>
|
||||||
|
<DropdownMenu>
|
||||||
|
<DropdownMenuTrigger
|
||||||
|
onMouseEnter={() => import("@/components/routes/settings/general")}
|
||||||
|
className="ms-3"
|
||||||
|
aria-label="Open Menu"
|
||||||
|
>
|
||||||
|
<MenuIcon />
|
||||||
|
</DropdownMenuTrigger>
|
||||||
|
<DropdownMenuContent align="end">
|
||||||
|
<DropdownMenuLabel className="max-w-40 truncate">{pb.authStore.record?.email}</DropdownMenuLabel>
|
||||||
|
<DropdownMenuSeparator />
|
||||||
|
<DropdownMenuGroup>
|
||||||
|
<DropdownMenuItem
|
||||||
|
onClick={() => navigate(getPagePath($router, "containers"))}
|
||||||
|
className="flex items-center"
|
||||||
|
>
|
||||||
|
<ContainerIcon className="h-4 w-4 me-2.5" strokeWidth={1.5} />
|
||||||
|
<Trans>All Containers</Trans>
|
||||||
|
</DropdownMenuItem>
|
||||||
|
<DropdownMenuItem onClick={() => navigate(getPagePath($router, "smart"))} className="flex items-center">
|
||||||
|
<HardDriveIcon className="h-4 w-4 me-2.5" strokeWidth={1.5} />
|
||||||
|
<span>S.M.A.R.T.</span>
|
||||||
|
</DropdownMenuItem>
|
||||||
|
<DropdownMenuItem
|
||||||
|
onClick={() => navigate(getPagePath($router, "settings", { name: "general" }))}
|
||||||
|
className="flex items-center"
|
||||||
|
>
|
||||||
|
<SettingsIcon className="h-4 w-4 me-2.5" />
|
||||||
|
<Trans>Settings</Trans>
|
||||||
|
</DropdownMenuItem>
|
||||||
|
{isAdmin() && (
|
||||||
|
<DropdownMenuSub>
|
||||||
|
<DropdownMenuSubTrigger>
|
||||||
|
<UserIcon className="h-4 w-4 me-2.5" />
|
||||||
|
<Trans>Admin</Trans>
|
||||||
|
</DropdownMenuSubTrigger>
|
||||||
|
<DropdownMenuSubContent>{AdminLinks}</DropdownMenuSubContent>
|
||||||
|
</DropdownMenuSub>
|
||||||
|
)}
|
||||||
|
{!isReadOnlyUser() && (
|
||||||
|
<DropdownMenuItem
|
||||||
|
className="flex items-center"
|
||||||
|
onSelect={() => {
|
||||||
|
setAddSystemDialogOpen(true)
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<PlusIcon className="h-4 w-4 me-2.5" />
|
||||||
|
<Trans>Add {{ foo: systemTranslation }}</Trans>
|
||||||
|
</DropdownMenuItem>
|
||||||
|
)}
|
||||||
|
</DropdownMenuGroup>
|
||||||
|
<DropdownMenuSeparator />
|
||||||
|
<DropdownMenuGroup>
|
||||||
|
<DropdownMenuItem onSelect={logOut} className="flex items-center">
|
||||||
|
<LogOutIcon className="h-4 w-4 me-2.5" />
|
||||||
|
<Trans>Log Out</Trans>
|
||||||
|
</DropdownMenuItem>
|
||||||
|
</DropdownMenuGroup>
|
||||||
|
</DropdownMenuContent>
|
||||||
|
</DropdownMenu>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* desktop nav */}
|
||||||
{/** biome-ignore lint/a11y/noStaticElementInteractions: ignore */}
|
{/** biome-ignore lint/a11y/noStaticElementInteractions: ignore */}
|
||||||
<div className="flex items-center ms-auto" onMouseEnter={() => import("@/components/routes/settings/general")}>
|
<div
|
||||||
|
className="hidden md:flex items-center ms-auto"
|
||||||
|
onMouseEnter={() => import("@/components/routes/settings/general")}
|
||||||
|
>
|
||||||
<Tooltip>
|
<Tooltip>
|
||||||
<TooltipTrigger asChild>
|
<TooltipTrigger asChild>
|
||||||
<Link
|
<Link
|
||||||
@@ -102,9 +205,40 @@ export default function Navbar() {
|
|||||||
<DropdownMenuContent align={isReadOnlyUser() ? "end" : "center"} className="min-w-44">
|
<DropdownMenuContent align={isReadOnlyUser() ? "end" : "center"} className="min-w-44">
|
||||||
<DropdownMenuLabel>{pb.authStore.record?.email}</DropdownMenuLabel>
|
<DropdownMenuLabel>{pb.authStore.record?.email}</DropdownMenuLabel>
|
||||||
<DropdownMenuSeparator />
|
<DropdownMenuSeparator />
|
||||||
<DropdownMenuGroup>
|
|
||||||
{isAdmin() && (
|
{isAdmin() && (
|
||||||
<>
|
<>
|
||||||
|
{AdminLinks}
|
||||||
|
<DropdownMenuSeparator />
|
||||||
|
</>
|
||||||
|
)}
|
||||||
|
<DropdownMenuItem onSelect={logOut}>
|
||||||
|
<LogOutIcon className="me-2.5 h-4 w-4" />
|
||||||
|
<span>
|
||||||
|
<Trans>Log Out</Trans>
|
||||||
|
</span>
|
||||||
|
</DropdownMenuItem>
|
||||||
|
</DropdownMenuContent>
|
||||||
|
</DropdownMenu>
|
||||||
|
{!isReadOnlyUser() && (
|
||||||
|
<Button variant="outline" className="flex gap-1 ms-2" onClick={() => setAddSystemDialogOpen(true)}>
|
||||||
|
<PlusIcon className="h-4 w-4 -ms-1" />
|
||||||
|
<Trans>Add {{ foo: systemTranslation }}</Trans>
|
||||||
|
</Button>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
const Kbd = ({ children }: { children: React.ReactNode }) => (
|
||||||
|
<kbd className="pointer-events-none inline-flex h-5 select-none items-center gap-1 rounded border bg-muted px-1.5 font-mono text-[10px] font-medium text-muted-foreground opacity-100">
|
||||||
|
{children}
|
||||||
|
</kbd>
|
||||||
|
)
|
||||||
|
|
||||||
|
function AdminDropdownGroup() {
|
||||||
|
return (
|
||||||
|
<DropdownMenuGroup>
|
||||||
<DropdownMenuItem asChild>
|
<DropdownMenuItem asChild>
|
||||||
<a href={prependBasePath("/_/")} target="_blank">
|
<a href={prependBasePath("/_/")} target="_blank">
|
||||||
<UsersIcon className="me-2.5 h-4 w-4" />
|
<UsersIcon className="me-2.5 h-4 w-4" />
|
||||||
@@ -137,52 +271,6 @@ export default function Navbar() {
|
|||||||
</span>
|
</span>
|
||||||
</a>
|
</a>
|
||||||
</DropdownMenuItem>
|
</DropdownMenuItem>
|
||||||
<DropdownMenuSeparator />
|
|
||||||
</>
|
|
||||||
)}
|
|
||||||
</DropdownMenuGroup>
|
</DropdownMenuGroup>
|
||||||
<DropdownMenuItem onSelect={logOut}>
|
|
||||||
<LogOutIcon className="me-2.5 h-4 w-4" />
|
|
||||||
<span>
|
|
||||||
<Trans>Log Out</Trans>
|
|
||||||
</span>
|
|
||||||
</DropdownMenuItem>
|
|
||||||
</DropdownMenuContent>
|
|
||||||
</DropdownMenu>
|
|
||||||
<AddSystemButton className="ms-2" />
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
const Kbd = ({ children }: { children: React.ReactNode }) => (
|
|
||||||
<kbd className="pointer-events-none inline-flex h-5 select-none items-center gap-1 rounded border bg-muted px-1.5 font-mono text-[10px] font-medium text-muted-foreground opacity-100">
|
|
||||||
{children}
|
|
||||||
</kbd>
|
|
||||||
)
|
|
||||||
|
|
||||||
function SearchButton() {
|
|
||||||
const [open, setOpen] = useState(false)
|
|
||||||
|
|
||||||
return (
|
|
||||||
<>
|
|
||||||
<Button
|
|
||||||
variant="outline"
|
|
||||||
className="hidden md:block text-sm text-muted-foreground px-4"
|
|
||||||
onClick={() => setOpen(true)}
|
|
||||||
>
|
|
||||||
<span className="flex items-center">
|
|
||||||
<SearchIcon className="me-1.5 h-4 w-4" />
|
|
||||||
<Trans>Search</Trans>
|
|
||||||
<span className="flex items-center ms-3.5">
|
|
||||||
<Kbd>{isMac ? "⌘" : "Ctrl"}</Kbd>
|
|
||||||
<Kbd>K</Kbd>
|
|
||||||
</span>
|
|
||||||
</span>
|
|
||||||
</Button>
|
|
||||||
<Suspense>
|
|
||||||
<CommandPalette open={open} setOpen={setOpen} />
|
|
||||||
</Suspense>
|
|
||||||
</>
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,6 +15,7 @@ import { isAdmin, pb } from "@/lib/api"
|
|||||||
import type { UserSettings } from "@/types"
|
import type { UserSettings } from "@/types"
|
||||||
import { saveSettings } from "./layout"
|
import { saveSettings } from "./layout"
|
||||||
import { QuietHours } from "./quiet-hours"
|
import { QuietHours } from "./quiet-hours"
|
||||||
|
import type { ClientResponseError } from "pocketbase"
|
||||||
|
|
||||||
interface ShoutrrrUrlCardProps {
|
interface ShoutrrrUrlCardProps {
|
||||||
url: string
|
url: string
|
||||||
@@ -59,10 +60,10 @@ const SettingsNotificationsPage = ({ userSettings }: { userSettings: UserSetting
|
|||||||
try {
|
try {
|
||||||
const parsedData = v.parse(NotificationSchema, { emails, webhooks })
|
const parsedData = v.parse(NotificationSchema, { emails, webhooks })
|
||||||
await saveSettings(parsedData)
|
await saveSettings(parsedData)
|
||||||
} catch (e: any) {
|
} catch (e: unknown) {
|
||||||
toast({
|
toast({
|
||||||
title: t`Failed to save settings`,
|
title: t`Failed to save settings`,
|
||||||
description: e.message,
|
description: (e as Error).message,
|
||||||
variant: "destructive",
|
variant: "destructive",
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -136,12 +137,7 @@ const SettingsNotificationsPage = ({ userSettings }: { userSettings: UserSetting
|
|||||||
</Trans>
|
</Trans>
|
||||||
</p>
|
</p>
|
||||||
</div>
|
</div>
|
||||||
<Button
|
<Button type="button" variant="outline" className="h-10 shrink-0" onClick={addWebhook}>
|
||||||
type="button"
|
|
||||||
variant="outline"
|
|
||||||
className="h-10 shrink-0"
|
|
||||||
onClick={addWebhook}
|
|
||||||
>
|
|
||||||
<PlusIcon className="size-4" />
|
<PlusIcon className="size-4" />
|
||||||
<span className="ms-1">
|
<span className="ms-1">
|
||||||
<Trans>Add URL</Trans>
|
<Trans>Add URL</Trans>
|
||||||
@@ -180,11 +176,20 @@ const SettingsNotificationsPage = ({ userSettings }: { userSettings: UserSetting
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function showTestNotificationError(msg: string) {
|
||||||
|
toast({
|
||||||
|
title: t`Error`,
|
||||||
|
description: msg ?? t`Failed to send test notification`,
|
||||||
|
variant: "destructive",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
const ShoutrrrUrlCard = ({ url, onUrlChange, onRemove }: ShoutrrrUrlCardProps) => {
|
const ShoutrrrUrlCard = ({ url, onUrlChange, onRemove }: ShoutrrrUrlCardProps) => {
|
||||||
const [isLoading, setIsLoading] = useState(false)
|
const [isLoading, setIsLoading] = useState(false)
|
||||||
|
|
||||||
const sendTestNotification = async () => {
|
const sendTestNotification = async () => {
|
||||||
setIsLoading(true)
|
setIsLoading(true)
|
||||||
|
try {
|
||||||
const res = await pb.send("/api/beszel/test-notification", { method: "POST", body: { url } })
|
const res = await pb.send("/api/beszel/test-notification", { method: "POST", body: { url } })
|
||||||
if ("err" in res && !res.err) {
|
if ("err" in res && !res.err) {
|
||||||
toast({
|
toast({
|
||||||
@@ -192,14 +197,14 @@ const ShoutrrrUrlCard = ({ url, onUrlChange, onRemove }: ShoutrrrUrlCardProps) =
|
|||||||
description: t`Check your notification service`,
|
description: t`Check your notification service`,
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
toast({
|
showTestNotificationError(res.err)
|
||||||
title: t`Error`,
|
|
||||||
description: res.err ?? t`Failed to send test notification`,
|
|
||||||
variant: "destructive",
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
} catch (e: unknown) {
|
||||||
|
showTestNotificationError((e as ClientResponseError).data?.message)
|
||||||
|
} finally {
|
||||||
setIsLoading(false)
|
setIsLoading(false)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<Card className="bg-table-header p-2 md:p-3">
|
<Card className="bg-table-header p-2 md:p-3">
|
||||||
|
|||||||
@@ -134,10 +134,10 @@ export function QuietHours() {
|
|||||||
const startMinutes = startDate.getUTCHours() * 60 + startDate.getUTCMinutes()
|
const startMinutes = startDate.getUTCHours() * 60 + startDate.getUTCMinutes()
|
||||||
const endMinutes = endDate.getUTCHours() * 60 + endDate.getUTCMinutes()
|
const endMinutes = endDate.getUTCHours() * 60 + endDate.getUTCMinutes()
|
||||||
|
|
||||||
// Convert UTC to local time offset
|
// Convert UTC to local time using the stored date's offset, not the current date's offset
|
||||||
const offset = now.getTimezoneOffset()
|
// This avoids DST mismatch when records were saved in a different DST period
|
||||||
const localStartMinutes = (startMinutes - offset + 1440) % 1440
|
const localStartMinutes = (startMinutes - startDate.getTimezoneOffset() + 1440) % 1440
|
||||||
const localEndMinutes = (endMinutes - offset + 1440) % 1440
|
const localEndMinutes = (endMinutes - endDate.getTimezoneOffset() + 1440) % 1440
|
||||||
|
|
||||||
// Handle cases where window spans midnight
|
// Handle cases where window spans midnight
|
||||||
if (localStartMinutes <= localEndMinutes) {
|
if (localStartMinutes <= localEndMinutes) {
|
||||||
@@ -347,12 +347,13 @@ function QuietHoursDialog({
|
|||||||
|
|
||||||
if (windowType === "daily") {
|
if (windowType === "daily") {
|
||||||
// For daily windows, convert local time to UTC
|
// For daily windows, convert local time to UTC
|
||||||
// Create a date with the time in local timezone, then convert to UTC
|
// Use today's date so the current DST offset is applied (not a fixed historical date)
|
||||||
const startDate = new Date(`2000-01-01T${startTime}:00`)
|
const today = new Date().toISOString().split("T")[0]
|
||||||
|
const startDate = new Date(`${today}T${startTime}:00`)
|
||||||
startValue = startDate.toISOString()
|
startValue = startDate.toISOString()
|
||||||
|
|
||||||
if (endTime) {
|
if (endTime) {
|
||||||
const endDate = new Date(`2000-01-01T${endTime}:00`)
|
const endDate = new Date(`${today}T${endTime}:00`)
|
||||||
endValue = endDate.toISOString()
|
endValue = endDate.toISOString()
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
133
internal/site/src/components/routes/system/chart-card.tsx
Normal file
133
internal/site/src/components/routes/system/chart-card.tsx
Normal file
@@ -0,0 +1,133 @@
|
|||||||
|
import { t } from "@lingui/core/macro"
|
||||||
|
import { Trans, useLingui } from "@lingui/react/macro"
|
||||||
|
import { useStore } from "@nanostores/react"
|
||||||
|
import { XIcon } from "lucide-react"
|
||||||
|
import React, { type JSX, memo, useCallback, useEffect, useState } from "react"
|
||||||
|
import { $containerFilter, $maxValues } from "@/lib/stores"
|
||||||
|
import { useIntersectionObserver } from "@/lib/use-intersection-observer"
|
||||||
|
import { cn } from "@/lib/utils"
|
||||||
|
import Spinner from "../../spinner"
|
||||||
|
import { Button } from "../../ui/button"
|
||||||
|
import { Card, CardDescription, CardHeader, CardTitle } from "../../ui/card"
|
||||||
|
import { ChartAverage, ChartMax } from "../../ui/icons"
|
||||||
|
import { Input } from "../../ui/input"
|
||||||
|
import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "../../ui/select"
|
||||||
|
|
||||||
|
export function FilterBar({ store = $containerFilter }: { store?: typeof $containerFilter }) {
|
||||||
|
const storeValue = useStore(store)
|
||||||
|
const [inputValue, setInputValue] = useState(storeValue)
|
||||||
|
const { t } = useLingui()
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
setInputValue(storeValue)
|
||||||
|
}, [storeValue])
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
if (inputValue === storeValue) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
const handle = window.setTimeout(() => store.set(inputValue), 80)
|
||||||
|
return () => clearTimeout(handle)
|
||||||
|
}, [inputValue, storeValue, store])
|
||||||
|
|
||||||
|
const handleChange = useCallback((e: React.ChangeEvent<HTMLInputElement>) => {
|
||||||
|
const value = e.target.value
|
||||||
|
setInputValue(value)
|
||||||
|
}, [])
|
||||||
|
|
||||||
|
const handleClear = useCallback(() => {
|
||||||
|
setInputValue("")
|
||||||
|
store.set("")
|
||||||
|
}, [store])
|
||||||
|
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
<Input
|
||||||
|
placeholder={t`Filter...`}
|
||||||
|
className="ps-4 pe-8 w-full sm:w-44"
|
||||||
|
onChange={handleChange}
|
||||||
|
value={inputValue}
|
||||||
|
/>
|
||||||
|
{inputValue && (
|
||||||
|
<Button
|
||||||
|
type="button"
|
||||||
|
variant="ghost"
|
||||||
|
size="icon"
|
||||||
|
aria-label="Clear"
|
||||||
|
className="absolute right-1 top-1/2 -translate-y-1/2 h-7 w-7 text-gray-500 hover:text-gray-900 dark:text-gray-400 dark:hover:text-gray-100"
|
||||||
|
onClick={handleClear}
|
||||||
|
>
|
||||||
|
<XIcon className="h-4 w-4" />
|
||||||
|
</Button>
|
||||||
|
)}
|
||||||
|
</>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
export const SelectAvgMax = memo(({ max }: { max: boolean }) => {
|
||||||
|
const Icon = max ? ChartMax : ChartAverage
|
||||||
|
return (
|
||||||
|
<Select value={max ? "max" : "avg"} onValueChange={(e) => $maxValues.set(e === "max")}>
|
||||||
|
<SelectTrigger className="relative ps-10 pe-5 w-full sm:w-44">
|
||||||
|
<Icon className="h-4 w-4 absolute start-4 top-1/2 -translate-y-1/2 opacity-85" />
|
||||||
|
<SelectValue />
|
||||||
|
</SelectTrigger>
|
||||||
|
<SelectContent>
|
||||||
|
<SelectItem key="avg" value="avg">
|
||||||
|
<Trans>Average</Trans>
|
||||||
|
</SelectItem>
|
||||||
|
<SelectItem key="max" value="max">
|
||||||
|
<Trans comment="Chart select field. Please try to keep this short.">Max 1 min</Trans>
|
||||||
|
</SelectItem>
|
||||||
|
</SelectContent>
|
||||||
|
</Select>
|
||||||
|
)
|
||||||
|
})
|
||||||
|
|
||||||
|
export function ChartCard({
|
||||||
|
title,
|
||||||
|
description,
|
||||||
|
children,
|
||||||
|
grid,
|
||||||
|
empty,
|
||||||
|
cornerEl,
|
||||||
|
legend,
|
||||||
|
className,
|
||||||
|
}: {
|
||||||
|
title: string
|
||||||
|
description: string
|
||||||
|
children: React.ReactNode
|
||||||
|
grid?: boolean
|
||||||
|
empty?: boolean
|
||||||
|
cornerEl?: JSX.Element | null
|
||||||
|
legend?: boolean
|
||||||
|
className?: string
|
||||||
|
}) {
|
||||||
|
const { isIntersecting, ref } = useIntersectionObserver()
|
||||||
|
|
||||||
|
return (
|
||||||
|
<Card
|
||||||
|
className={cn(
|
||||||
|
"px-3 py-5 sm:py-6 sm:px-6 odd:last-of-type:col-span-full min-h-full",
|
||||||
|
{ "col-span-full": !grid },
|
||||||
|
className
|
||||||
|
)}
|
||||||
|
ref={ref}
|
||||||
|
>
|
||||||
|
<CardHeader className="gap-1.5 relative p-0 mb-3 sm:mb-4">
|
||||||
|
<CardTitle>{title}</CardTitle>
|
||||||
|
<CardDescription>{description}</CardDescription>
|
||||||
|
{cornerEl && <div className="grid sm:justify-end sm:absolute sm:top-0 sm:end-0 my-1 sm:my-0">{cornerEl}</div>}
|
||||||
|
</CardHeader>
|
||||||
|
<div className={cn("ps-0 -me-1 -ms-3.5 relative group", legend ? "h-54 md:h-56" : "h-48 md:h-52")}>
|
||||||
|
{
|
||||||
|
<Spinner
|
||||||
|
msg={empty ? t`Waiting for enough records to display` : undefined}
|
||||||
|
className="group-has-[.opacity-100]:invisible duration-100"
|
||||||
|
/>
|
||||||
|
}
|
||||||
|
{isIntersecting && children}
|
||||||
|
</div>
|
||||||
|
</Card>
|
||||||
|
)
|
||||||
|
}
|
||||||
116
internal/site/src/components/routes/system/chart-data.ts
Normal file
116
internal/site/src/components/routes/system/chart-data.ts
Normal file
@@ -0,0 +1,116 @@
|
|||||||
|
import { timeTicks } from "d3-time"
|
||||||
|
import { getPbTimestamp, pb } from "@/lib/api"
|
||||||
|
import { chartTimeData } from "@/lib/utils"
|
||||||
|
import type { ChartData, ChartTimes, ContainerStatsRecord, SystemStatsRecord } from "@/types"
|
||||||
|
|
||||||
|
type ChartTimeData = {
|
||||||
|
time: number
|
||||||
|
data: {
|
||||||
|
ticks: number[]
|
||||||
|
domain: number[]
|
||||||
|
}
|
||||||
|
chartTime: ChartTimes
|
||||||
|
}
|
||||||
|
|
||||||
|
export const cache = new Map<
|
||||||
|
string,
|
||||||
|
ChartTimeData | SystemStatsRecord[] | ContainerStatsRecord[] | ChartData["containerData"]
|
||||||
|
>()
|
||||||
|
|
||||||
|
// create ticks and domain for charts
|
||||||
|
export function getTimeData(chartTime: ChartTimes, lastCreated: number) {
|
||||||
|
const cached = cache.get("td") as ChartTimeData | undefined
|
||||||
|
if (cached && cached.chartTime === chartTime) {
|
||||||
|
if (!lastCreated || cached.time >= lastCreated) {
|
||||||
|
return cached.data
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// const buffer = chartTime === "1m" ? 400 : 20_000
|
||||||
|
const now = new Date(Date.now())
|
||||||
|
const startTime = chartTimeData[chartTime].getOffset(now)
|
||||||
|
const ticks = timeTicks(startTime, now, chartTimeData[chartTime].ticks ?? 12).map((date) => date.getTime())
|
||||||
|
const data = {
|
||||||
|
ticks,
|
||||||
|
domain: [chartTimeData[chartTime].getOffset(now).getTime(), now.getTime()],
|
||||||
|
}
|
||||||
|
cache.set("td", { time: now.getTime(), data, chartTime })
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Append new records onto prev with gap detection. Converts string `created` values to ms timestamps in place.
|
||||||
|
* Pass `maxLen` to cap the result length in one copy instead of slicing again after the call. */
|
||||||
|
export function appendData<T extends { created: string | number | null }>(
|
||||||
|
prev: T[],
|
||||||
|
newRecords: T[],
|
||||||
|
expectedInterval: number,
|
||||||
|
maxLen?: number
|
||||||
|
): T[] {
|
||||||
|
if (!newRecords.length) return prev
|
||||||
|
// Pre-trim prev so the single slice() below is the only copy we make
|
||||||
|
const trimmed = maxLen && prev.length >= maxLen ? prev.slice(-(maxLen - newRecords.length)) : prev
|
||||||
|
const result = trimmed.slice()
|
||||||
|
let prevTime = (trimmed.at(-1)?.created as number) ?? 0
|
||||||
|
for (const record of newRecords) {
|
||||||
|
if (record.created !== null) {
|
||||||
|
if (typeof record.created === "string") {
|
||||||
|
record.created = new Date(record.created).getTime()
|
||||||
|
}
|
||||||
|
if (prevTime && (record.created as number) - prevTime > expectedInterval * 1.5) {
|
||||||
|
result.push({ created: null, ...("stats" in record ? { stats: null } : {}) } as T)
|
||||||
|
}
|
||||||
|
prevTime = record.created as number
|
||||||
|
}
|
||||||
|
result.push(record)
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function getStats<T extends SystemStatsRecord | ContainerStatsRecord>(
|
||||||
|
collection: string,
|
||||||
|
systemId: string,
|
||||||
|
chartTime: ChartTimes
|
||||||
|
): Promise<T[]> {
|
||||||
|
const cachedStats = cache.get(`${systemId}_${chartTime}_${collection}`) as T[] | undefined
|
||||||
|
const lastCached = cachedStats?.at(-1)?.created as number
|
||||||
|
return await pb.collection<T>(collection).getFullList({
|
||||||
|
filter: pb.filter("system={:id} && created > {:created} && type={:type}", {
|
||||||
|
id: systemId,
|
||||||
|
created: getPbTimestamp(chartTime, lastCached ? new Date(lastCached + 1000) : undefined),
|
||||||
|
type: chartTimeData[chartTime].type,
|
||||||
|
}),
|
||||||
|
fields: "created,stats",
|
||||||
|
sort: "created",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
export function makeContainerData(containers: ContainerStatsRecord[]): ChartData["containerData"] {
|
||||||
|
const result = [] as ChartData["containerData"]
|
||||||
|
for (const { created, stats } of containers) {
|
||||||
|
if (!created) {
|
||||||
|
result.push({ created: null } as ChartData["containerData"][0])
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
result.push(makeContainerPoint(new Date(created).getTime(), stats))
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Transform a single realtime container stats message into a ChartDataContainer point. */
|
||||||
|
export function makeContainerPoint(
|
||||||
|
created: number,
|
||||||
|
stats: ContainerStatsRecord["stats"]
|
||||||
|
): ChartData["containerData"][0] {
|
||||||
|
const point: ChartData["containerData"][0] = { created } as ChartData["containerData"][0]
|
||||||
|
for (const container of stats) {
|
||||||
|
;(point as Record<string, unknown>)[container.n] = container
|
||||||
|
}
|
||||||
|
return point
|
||||||
|
}
|
||||||
|
|
||||||
|
export function dockerOrPodman(str: string, isPodman: boolean): string {
|
||||||
|
if (isPodman) {
|
||||||
|
return str.replace("docker", "podman").replace("Docker", "Podman")
|
||||||
|
}
|
||||||
|
return str
|
||||||
|
}
|
||||||
@@ -0,0 +1,99 @@
|
|||||||
|
import { t } from "@lingui/core/macro"
|
||||||
|
import AreaChartDefault from "@/components/charts/area-chart"
|
||||||
|
import { useContainerDataPoints } from "@/components/charts/hooks"
|
||||||
|
import { decimalString, toFixedFloat } from "@/lib/utils"
|
||||||
|
import type { ChartConfig } from "@/components/ui/chart"
|
||||||
|
import type { ChartData } from "@/types"
|
||||||
|
import { pinnedAxisDomain } from "@/components/ui/chart"
|
||||||
|
import CpuCoresSheet from "../cpu-sheet"
|
||||||
|
import { ChartCard, FilterBar, SelectAvgMax } from "../chart-card"
|
||||||
|
import { dockerOrPodman } from "../chart-data"
|
||||||
|
|
||||||
|
export function CpuChart({
|
||||||
|
chartData,
|
||||||
|
grid,
|
||||||
|
dataEmpty,
|
||||||
|
showMax,
|
||||||
|
isLongerChart,
|
||||||
|
maxValues,
|
||||||
|
}: {
|
||||||
|
chartData: ChartData
|
||||||
|
grid: boolean
|
||||||
|
dataEmpty: boolean
|
||||||
|
showMax: boolean
|
||||||
|
isLongerChart: boolean
|
||||||
|
maxValues: boolean
|
||||||
|
}) {
|
||||||
|
const maxValSelect = isLongerChart ? <SelectAvgMax max={maxValues} /> : null
|
||||||
|
|
||||||
|
return (
|
||||||
|
<ChartCard
|
||||||
|
empty={dataEmpty}
|
||||||
|
grid={grid}
|
||||||
|
title={t`CPU Usage`}
|
||||||
|
description={t`Average system-wide CPU utilization`}
|
||||||
|
cornerEl={
|
||||||
|
<div className="flex gap-2">
|
||||||
|
{maxValSelect}
|
||||||
|
<CpuCoresSheet chartData={chartData} dataEmpty={dataEmpty} grid={grid} maxValues={maxValues} />
|
||||||
|
</div>
|
||||||
|
}
|
||||||
|
>
|
||||||
|
<AreaChartDefault
|
||||||
|
chartData={chartData}
|
||||||
|
maxToggled={showMax}
|
||||||
|
dataPoints={[
|
||||||
|
{
|
||||||
|
label: t`CPU Usage`,
|
||||||
|
dataKey: ({ stats }) => (showMax ? stats?.cpum : stats?.cpu),
|
||||||
|
color: 1,
|
||||||
|
opacity: 0.4,
|
||||||
|
},
|
||||||
|
]}
|
||||||
|
tickFormatter={(val) => `${toFixedFloat(val, 2)}%`}
|
||||||
|
contentFormatter={({ value }) => `${decimalString(value)}%`}
|
||||||
|
domain={pinnedAxisDomain()}
|
||||||
|
/>
|
||||||
|
</ChartCard>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
export function ContainerCpuChart({
|
||||||
|
chartData,
|
||||||
|
grid,
|
||||||
|
dataEmpty,
|
||||||
|
isPodman,
|
||||||
|
cpuConfig,
|
||||||
|
}: {
|
||||||
|
chartData: ChartData
|
||||||
|
grid: boolean
|
||||||
|
dataEmpty: boolean
|
||||||
|
isPodman: boolean
|
||||||
|
cpuConfig: ChartConfig
|
||||||
|
}) {
|
||||||
|
const { filter, dataPoints } = useContainerDataPoints(cpuConfig, (key, data) => data[key]?.c ?? null)
|
||||||
|
|
||||||
|
return (
|
||||||
|
<ChartCard
|
||||||
|
empty={dataEmpty}
|
||||||
|
grid={grid}
|
||||||
|
title={dockerOrPodman(t`Docker CPU Usage`, isPodman)}
|
||||||
|
description={t`Average CPU utilization of containers`}
|
||||||
|
cornerEl={<FilterBar />}
|
||||||
|
>
|
||||||
|
<AreaChartDefault
|
||||||
|
chartData={chartData}
|
||||||
|
customData={chartData.containerData}
|
||||||
|
dataPoints={dataPoints}
|
||||||
|
tickFormatter={(val) => `${toFixedFloat(val, 2)}%`}
|
||||||
|
contentFormatter={({ value }) => `${decimalString(value)}%`}
|
||||||
|
domain={pinnedAxisDomain()}
|
||||||
|
showTotal={true}
|
||||||
|
reverseStackOrder={true}
|
||||||
|
filter={filter}
|
||||||
|
truncate={true}
|
||||||
|
itemSorter={(a, b) => b.value - a.value}
|
||||||
|
/>
|
||||||
|
</ChartCard>
|
||||||
|
)
|
||||||
|
}
|
||||||
@@ -0,0 +1,283 @@
|
|||||||
|
import { t } from "@lingui/core/macro"
|
||||||
|
import AreaChartDefault from "@/components/charts/area-chart"
|
||||||
|
import { decimalString, formatBytes, toFixedFloat } from "@/lib/utils"
|
||||||
|
import type { SystemStatsRecord } from "@/types"
|
||||||
|
import { ChartCard, SelectAvgMax } from "../chart-card"
|
||||||
|
import { Unit } from "@/lib/enums"
|
||||||
|
import { pinnedAxisDomain } from "@/components/ui/chart"
|
||||||
|
import DiskIoSheet from "../disk-io-sheet"
|
||||||
|
import type { SystemData } from "../use-system-data"
|
||||||
|
import { useStore } from "@nanostores/react"
|
||||||
|
import { $userSettings } from "@/lib/stores"
|
||||||
|
|
||||||
|
// Helpers for indexed dios/diosm access
|
||||||
|
const dios =
|
||||||
|
(i: number) =>
|
||||||
|
({ stats }: SystemStatsRecord) =>
|
||||||
|
stats?.dios?.[i] ?? 0
|
||||||
|
const diosMax =
|
||||||
|
(i: number) =>
|
||||||
|
({ stats }: SystemStatsRecord) =>
|
||||||
|
stats?.diosm?.[i] ?? 0
|
||||||
|
const extraDios =
|
||||||
|
(name: string, i: number) =>
|
||||||
|
({ stats }: SystemStatsRecord) =>
|
||||||
|
stats?.efs?.[name]?.dios?.[i] ?? 0
|
||||||
|
const extraDiosMax =
|
||||||
|
(name: string, i: number) =>
|
||||||
|
({ stats }: SystemStatsRecord) =>
|
||||||
|
stats?.efs?.[name]?.diosm?.[i] ?? 0
|
||||||
|
|
||||||
|
export const diskDataFns = {
|
||||||
|
// usage
|
||||||
|
usage: ({ stats }: SystemStatsRecord) => stats?.du ?? 0,
|
||||||
|
extraUsage:
|
||||||
|
(name: string) =>
|
||||||
|
({ stats }: SystemStatsRecord) =>
|
||||||
|
stats?.efs?.[name]?.du ?? 0,
|
||||||
|
// throughput
|
||||||
|
read: ({ stats }: SystemStatsRecord) => stats?.dio?.[0] ?? (stats?.dr ?? 0) * 1024 * 1024,
|
||||||
|
readMax: ({ stats }: SystemStatsRecord) => stats?.diom?.[0] ?? (stats?.drm ?? 0) * 1024 * 1024,
|
||||||
|
write: ({ stats }: SystemStatsRecord) => stats?.dio?.[1] ?? (stats?.dw ?? 0) * 1024 * 1024,
|
||||||
|
writeMax: ({ stats }: SystemStatsRecord) => stats?.diom?.[1] ?? (stats?.dwm ?? 0) * 1024 * 1024,
|
||||||
|
// extra fs throughput
|
||||||
|
extraRead:
|
||||||
|
(name: string) =>
|
||||||
|
({ stats }: SystemStatsRecord) =>
|
||||||
|
stats?.efs?.[name]?.rb ?? (stats?.efs?.[name]?.r ?? 0) * 1024 * 1024,
|
||||||
|
extraReadMax:
|
||||||
|
(name: string) =>
|
||||||
|
({ stats }: SystemStatsRecord) =>
|
||||||
|
stats?.efs?.[name]?.rbm ?? (stats?.efs?.[name]?.rm ?? 0) * 1024 * 1024,
|
||||||
|
extraWrite:
|
||||||
|
(name: string) =>
|
||||||
|
({ stats }: SystemStatsRecord) =>
|
||||||
|
stats?.efs?.[name]?.wb ?? (stats?.efs?.[name]?.w ?? 0) * 1024 * 1024,
|
||||||
|
extraWriteMax:
|
||||||
|
(name: string) =>
|
||||||
|
({ stats }: SystemStatsRecord) =>
|
||||||
|
stats?.efs?.[name]?.wbm ?? (stats?.efs?.[name]?.wm ?? 0) * 1024 * 1024,
|
||||||
|
// read/write time
|
||||||
|
readTime: dios(0),
|
||||||
|
readTimeMax: diosMax(0),
|
||||||
|
extraReadTime: (name: string) => extraDios(name, 0),
|
||||||
|
extraReadTimeMax: (name: string) => extraDiosMax(name, 0),
|
||||||
|
writeTime: dios(1),
|
||||||
|
writeTimeMax: diosMax(1),
|
||||||
|
extraWriteTime: (name: string) => extraDios(name, 1),
|
||||||
|
extraWriteTimeMax: (name: string) => extraDiosMax(name, 1),
|
||||||
|
// utilization (IoTime-based, 0-100%)
|
||||||
|
util: dios(2),
|
||||||
|
utilMax: diosMax(2),
|
||||||
|
extraUtil: (name: string) => extraDios(name, 2),
|
||||||
|
extraUtilMax: (name: string) => extraDiosMax(name, 2),
|
||||||
|
// r_await / w_await: average service time per read/write operation (ms)
|
||||||
|
rAwait: dios(3),
|
||||||
|
rAwaitMax: diosMax(3),
|
||||||
|
extraRAwait: (name: string) => extraDios(name, 3),
|
||||||
|
extraRAwaitMax: (name: string) => extraDiosMax(name, 3),
|
||||||
|
wAwait: dios(4),
|
||||||
|
wAwaitMax: diosMax(4),
|
||||||
|
extraWAwait: (name: string) => extraDios(name, 4),
|
||||||
|
extraWAwaitMax: (name: string) => extraDiosMax(name, 4),
|
||||||
|
// average queue depth: stored as queue_depth * 100 in Go, divided here
|
||||||
|
weightedIO: ({ stats }: SystemStatsRecord) => (stats?.dios?.[5] ?? 0) / 100,
|
||||||
|
weightedIOMax: ({ stats }: SystemStatsRecord) => (stats?.diosm?.[5] ?? 0) / 100,
|
||||||
|
extraWeightedIO:
|
||||||
|
(name: string) =>
|
||||||
|
({ stats }: SystemStatsRecord) =>
|
||||||
|
(stats?.efs?.[name]?.dios?.[5] ?? 0) / 100,
|
||||||
|
extraWeightedIOMax:
|
||||||
|
(name: string) =>
|
||||||
|
({ stats }: SystemStatsRecord) =>
|
||||||
|
(stats?.efs?.[name]?.diosm?.[5] ?? 0) / 100,
|
||||||
|
}
|
||||||
|
|
||||||
|
export function RootDiskCharts({ systemData }: { systemData: SystemData }) {
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
<DiskUsageChart systemData={systemData} />
|
||||||
|
<DiskIOChart systemData={systemData} />
|
||||||
|
</>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
export function DiskUsageChart({ systemData, extraFsName }: { systemData: SystemData; extraFsName?: string }) {
|
||||||
|
const { chartData, grid, dataEmpty } = systemData
|
||||||
|
|
||||||
|
let diskSize = chartData.systemStats?.at(-1)?.stats.d ?? NaN
|
||||||
|
if (extraFsName) {
|
||||||
|
diskSize = chartData.systemStats?.at(-1)?.stats.efs?.[extraFsName]?.d ?? NaN
|
||||||
|
}
|
||||||
|
// round to nearest GB
|
||||||
|
if (diskSize >= 100) {
|
||||||
|
diskSize = Math.round(diskSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
const title = extraFsName ? `${extraFsName} ${t`Usage`}` : t`Disk Usage`
|
||||||
|
const description = extraFsName ? t`Disk usage of ${extraFsName}` : t`Usage of root partition`
|
||||||
|
|
||||||
|
return (
|
||||||
|
<ChartCard empty={dataEmpty} grid={grid} title={title} description={description}>
|
||||||
|
<AreaChartDefault
|
||||||
|
chartData={chartData}
|
||||||
|
domain={[0, diskSize]}
|
||||||
|
tickFormatter={(val) => {
|
||||||
|
const { value, unit } = formatBytes(val * 1024, false, Unit.Bytes, true)
|
||||||
|
return `${toFixedFloat(value, value >= 10 ? 0 : 1)} ${unit}`
|
||||||
|
}}
|
||||||
|
contentFormatter={({ value }) => {
|
||||||
|
const { value: convertedValue, unit } = formatBytes(value * 1024, false, Unit.Bytes, true)
|
||||||
|
return `${decimalString(convertedValue)} ${unit}`
|
||||||
|
}}
|
||||||
|
dataPoints={[
|
||||||
|
{
|
||||||
|
label: t`Disk Usage`,
|
||||||
|
color: 4,
|
||||||
|
opacity: 0.4,
|
||||||
|
dataKey: extraFsName ? diskDataFns.extraUsage(extraFsName) : diskDataFns.usage,
|
||||||
|
},
|
||||||
|
]}
|
||||||
|
></AreaChartDefault>
|
||||||
|
</ChartCard>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
export function DiskIOChart({ systemData, extraFsName }: { systemData: SystemData; extraFsName?: string }) {
|
||||||
|
const { chartData, grid, dataEmpty, showMax, isLongerChart, maxValues } = systemData
|
||||||
|
const maxValSelect = isLongerChart ? <SelectAvgMax max={maxValues} /> : null
|
||||||
|
const userSettings = useStore($userSettings)
|
||||||
|
|
||||||
|
if (!chartData.systemStats?.length) {
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
|
||||||
|
const title = extraFsName ? `${extraFsName} I/O` : t`Disk I/O`
|
||||||
|
const description = extraFsName ? t`Throughput of ${extraFsName}` : t`Throughput of root filesystem`
|
||||||
|
|
||||||
|
const hasMoreIOMetrics = chartData.systemStats?.some((record) => record.stats?.dios?.at(0))
|
||||||
|
|
||||||
|
let CornerEl = maxValSelect
|
||||||
|
if (hasMoreIOMetrics) {
|
||||||
|
CornerEl = (
|
||||||
|
<div className="flex gap-2">
|
||||||
|
{maxValSelect}
|
||||||
|
<DiskIoSheet systemData={systemData} extraFsName={extraFsName} title={title} description={description} />
|
||||||
|
</div>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
let readFn = showMax ? diskDataFns.readMax : diskDataFns.read
|
||||||
|
let writeFn = showMax ? diskDataFns.writeMax : diskDataFns.write
|
||||||
|
if (extraFsName) {
|
||||||
|
readFn = showMax ? diskDataFns.extraReadMax(extraFsName) : diskDataFns.extraRead(extraFsName)
|
||||||
|
writeFn = showMax ? diskDataFns.extraWriteMax(extraFsName) : diskDataFns.extraWrite(extraFsName)
|
||||||
|
}
|
||||||
|
|
||||||
|
return (
|
||||||
|
<ChartCard empty={dataEmpty} grid={grid} title={title} description={description} cornerEl={CornerEl}>
|
||||||
|
<AreaChartDefault
|
||||||
|
chartData={chartData}
|
||||||
|
maxToggled={showMax}
|
||||||
|
// domain={pinnedAxisDomain(true)}
|
||||||
|
showTotal={true}
|
||||||
|
dataPoints={[
|
||||||
|
{
|
||||||
|
label: t({ message: "Write", comment: "Disk write" }),
|
||||||
|
dataKey: writeFn,
|
||||||
|
color: 3,
|
||||||
|
opacity: 0.3,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
label: t({ message: "Read", comment: "Disk read" }),
|
||||||
|
dataKey: readFn,
|
||||||
|
color: 1,
|
||||||
|
opacity: 0.3,
|
||||||
|
},
|
||||||
|
]}
|
||||||
|
tickFormatter={(val) => {
|
||||||
|
const { value, unit } = formatBytes(val, true, userSettings.unitDisk, false)
|
||||||
|
return `${toFixedFloat(value, value >= 10 ? 0 : 1)} ${unit}`
|
||||||
|
}}
|
||||||
|
contentFormatter={({ value }) => {
|
||||||
|
const { value: convertedValue, unit } = formatBytes(value, true, userSettings.unitDisk, false)
|
||||||
|
return `${decimalString(convertedValue, convertedValue >= 100 ? 1 : 2)} ${unit}`
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
</ChartCard>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
export function DiskUtilizationChart({ systemData, extraFsName }: { systemData: SystemData; extraFsName?: string }) {
|
||||||
|
const { chartData, grid, dataEmpty, showMax, isLongerChart, maxValues } = systemData
|
||||||
|
const maxValSelect = isLongerChart ? <SelectAvgMax max={maxValues} /> : null
|
||||||
|
|
||||||
|
if (!chartData.systemStats?.length) {
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
|
||||||
|
let utilFn = showMax ? diskDataFns.utilMax : diskDataFns.util
|
||||||
|
if (extraFsName) {
|
||||||
|
utilFn = showMax ? diskDataFns.extraUtilMax(extraFsName) : diskDataFns.extraUtil(extraFsName)
|
||||||
|
}
|
||||||
|
return (
|
||||||
|
<ChartCard
|
||||||
|
cornerEl={maxValSelect}
|
||||||
|
empty={dataEmpty}
|
||||||
|
grid={grid}
|
||||||
|
title={t({
|
||||||
|
message: `I/O Utilization`,
|
||||||
|
context: "Percent of time the disk is busy with I/O",
|
||||||
|
})}
|
||||||
|
description={t`Percent of time the disk is busy with I/O`}
|
||||||
|
// legend={true}
|
||||||
|
className="min-h-auto"
|
||||||
|
>
|
||||||
|
<AreaChartDefault
|
||||||
|
chartData={chartData}
|
||||||
|
domain={pinnedAxisDomain()}
|
||||||
|
tickFormatter={(val) => `${toFixedFloat(val, 2)}%`}
|
||||||
|
contentFormatter={({ value }) => `${decimalString(value)}%`}
|
||||||
|
maxToggled={showMax}
|
||||||
|
chartProps={{ syncId: "io" }}
|
||||||
|
dataPoints={[
|
||||||
|
{
|
||||||
|
label: t({ message: "Utilization", context: "Disk I/O utilization" }),
|
||||||
|
dataKey: utilFn,
|
||||||
|
color: 1,
|
||||||
|
opacity: 0.4,
|
||||||
|
},
|
||||||
|
]}
|
||||||
|
/>
|
||||||
|
</ChartCard>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
export function ExtraFsCharts({ systemData }: { systemData: SystemData }) {
|
||||||
|
const { systemStats } = systemData.chartData
|
||||||
|
|
||||||
|
const extraFs = systemStats?.at(-1)?.stats.efs
|
||||||
|
|
||||||
|
if (!extraFs || Object.keys(extraFs).length === 0) {
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div className="grid xl:grid-cols-2 gap-4">
|
||||||
|
{Object.keys(extraFs).map((extraFsName) => {
|
||||||
|
let diskSize = systemStats.at(-1)?.stats.efs?.[extraFsName].d ?? NaN
|
||||||
|
// round to nearest GB
|
||||||
|
if (diskSize >= 100) {
|
||||||
|
diskSize = Math.round(diskSize)
|
||||||
|
}
|
||||||
|
return (
|
||||||
|
<div key={extraFsName} className="contents">
|
||||||
|
<DiskUsageChart systemData={systemData} extraFsName={extraFsName} />
|
||||||
|
|
||||||
|
<DiskIOChart systemData={systemData} extraFsName={extraFsName} />
|
||||||
|
</div>
|
||||||
|
)
|
||||||
|
})}
|
||||||
|
</div>
|
||||||
|
)
|
||||||
|
}
|
||||||
232
internal/site/src/components/routes/system/charts/gpu-charts.tsx
Normal file
232
internal/site/src/components/routes/system/charts/gpu-charts.tsx
Normal file
@@ -0,0 +1,232 @@
|
|||||||
|
import { t } from "@lingui/core/macro"
|
||||||
|
import { useRef, useMemo } from "react"
|
||||||
|
import AreaChartDefault, { type DataPoint } from "@/components/charts/area-chart"
|
||||||
|
import LineChartDefault from "@/components/charts/line-chart"
|
||||||
|
import { Unit } from "@/lib/enums"
|
||||||
|
import { cn, decimalString, formatBytes, toFixedFloat } from "@/lib/utils"
|
||||||
|
import type { ChartData, GPUData, SystemStatsRecord } from "@/types"
|
||||||
|
import { ChartCard } from "../chart-card"
|
||||||
|
|
||||||
|
/** GPU power draw chart for the main grid */
|
||||||
|
export function GpuPowerChart({
|
||||||
|
chartData,
|
||||||
|
grid,
|
||||||
|
dataEmpty,
|
||||||
|
}: {
|
||||||
|
chartData: ChartData
|
||||||
|
grid: boolean
|
||||||
|
dataEmpty: boolean
|
||||||
|
}) {
|
||||||
|
const packageKey = " package"
|
||||||
|
const statsRef = useRef(chartData.systemStats)
|
||||||
|
statsRef.current = chartData.systemStats
|
||||||
|
|
||||||
|
// Derive GPU power config key (cheap per render)
|
||||||
|
let gpuPowerKey = ""
|
||||||
|
for (let i = chartData.systemStats.length - 1; i >= 0; i--) {
|
||||||
|
const gpus = chartData.systemStats[i].stats?.g
|
||||||
|
if (gpus) {
|
||||||
|
const parts: string[] = []
|
||||||
|
for (const id in gpus) {
|
||||||
|
const gpu = gpus[id] as GPUData
|
||||||
|
if (gpu.p !== undefined) parts.push(`${id}:${gpu.n}`)
|
||||||
|
if (gpu.pp !== undefined) parts.push(`${id}:${gpu.n}${packageKey}`)
|
||||||
|
}
|
||||||
|
gpuPowerKey = parts.sort().join("\0")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const dataPoints = useMemo((): DataPoint[] => {
|
||||||
|
if (!gpuPowerKey) return []
|
||||||
|
const totals = new Map<string, { label: string; gpuId: string; isPackage: boolean; total: number }>()
|
||||||
|
for (const record of statsRef.current) {
|
||||||
|
const gpus = record.stats?.g
|
||||||
|
if (!gpus) continue
|
||||||
|
for (const id in gpus) {
|
||||||
|
const gpu = gpus[id] as GPUData
|
||||||
|
const key = gpu.n
|
||||||
|
const existing = totals.get(key)
|
||||||
|
if (existing) {
|
||||||
|
existing.total += gpu.p ?? 0
|
||||||
|
} else {
|
||||||
|
totals.set(key, { label: gpu.n, gpuId: id, isPackage: false, total: gpu.p ?? 0 })
|
||||||
|
}
|
||||||
|
if (gpu.pp !== undefined) {
|
||||||
|
const pkgKey = `${gpu.n}${packageKey}`
|
||||||
|
const existingPkg = totals.get(pkgKey)
|
||||||
|
if (existingPkg) {
|
||||||
|
existingPkg.total += gpu.pp
|
||||||
|
} else {
|
||||||
|
totals.set(pkgKey, { label: pkgKey, gpuId: id, isPackage: true, total: gpu.pp })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
const sorted = Array.from(totals.values()).sort((a, b) => b.total - a.total)
|
||||||
|
return sorted.map(
|
||||||
|
(entry, i): DataPoint => ({
|
||||||
|
label: entry.label,
|
||||||
|
dataKey: (data: SystemStatsRecord) => {
|
||||||
|
const gpu = data.stats?.g?.[entry.gpuId]
|
||||||
|
return entry.isPackage ? (gpu?.pp ?? 0) : (gpu?.p ?? 0)
|
||||||
|
},
|
||||||
|
color: `hsl(${226 + (((i * 360) / sorted.length) % 360)}, 65%, 52%)`,
|
||||||
|
opacity: 1,
|
||||||
|
})
|
||||||
|
)
|
||||||
|
}, [gpuPowerKey])
|
||||||
|
|
||||||
|
return (
|
||||||
|
<ChartCard
|
||||||
|
empty={dataEmpty}
|
||||||
|
grid={grid}
|
||||||
|
title={t`GPU Power Draw`}
|
||||||
|
description={t`Average power consumption of GPUs`}
|
||||||
|
>
|
||||||
|
<LineChartDefault
|
||||||
|
legend={dataPoints.length > 1}
|
||||||
|
chartData={chartData}
|
||||||
|
dataPoints={dataPoints}
|
||||||
|
itemSorter={(a: { value: number }, b: { value: number }) => b.value - a.value}
|
||||||
|
tickFormatter={(val) => `${toFixedFloat(val, 2)}W`}
|
||||||
|
contentFormatter={({ value }) => `${decimalString(value)}W`}
|
||||||
|
/>
|
||||||
|
</ChartCard>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
/** GPU detail grid (engines + per-GPU usage/VRAM) — rendered outside the main 2-col grid */
|
||||||
|
export function GpuDetailCharts({
|
||||||
|
chartData,
|
||||||
|
grid,
|
||||||
|
dataEmpty,
|
||||||
|
lastGpus,
|
||||||
|
hasGpuEnginesData,
|
||||||
|
}: {
|
||||||
|
chartData: ChartData
|
||||||
|
grid: boolean
|
||||||
|
dataEmpty: boolean
|
||||||
|
lastGpus: Record<string, GPUData>
|
||||||
|
hasGpuEnginesData: boolean
|
||||||
|
}) {
|
||||||
|
return (
|
||||||
|
<div className="grid xl:grid-cols-2 gap-4">
|
||||||
|
{hasGpuEnginesData && (
|
||||||
|
<ChartCard
|
||||||
|
legend={true}
|
||||||
|
empty={dataEmpty}
|
||||||
|
grid={grid}
|
||||||
|
title={t`GPU Engines`}
|
||||||
|
description={t`Average utilization of GPU engines`}
|
||||||
|
>
|
||||||
|
<GpuEnginesChart chartData={chartData} />
|
||||||
|
</ChartCard>
|
||||||
|
)}
|
||||||
|
{Object.keys(lastGpus).map((id) => {
|
||||||
|
const gpu = lastGpus[id] as GPUData
|
||||||
|
return (
|
||||||
|
<div key={id} className="contents">
|
||||||
|
<ChartCard
|
||||||
|
className={cn(grid && "!col-span-1")}
|
||||||
|
empty={dataEmpty}
|
||||||
|
grid={grid}
|
||||||
|
title={`${gpu.n} ${t`Usage`}`}
|
||||||
|
description={t`Average utilization of ${gpu.n}`}
|
||||||
|
>
|
||||||
|
<AreaChartDefault
|
||||||
|
chartData={chartData}
|
||||||
|
dataPoints={[
|
||||||
|
{
|
||||||
|
label: t`Usage`,
|
||||||
|
dataKey: ({ stats }) => stats?.g?.[id]?.u ?? 0,
|
||||||
|
color: 1,
|
||||||
|
opacity: 0.35,
|
||||||
|
},
|
||||||
|
]}
|
||||||
|
tickFormatter={(val) => `${toFixedFloat(val, 2)}%`}
|
||||||
|
contentFormatter={({ value }) => `${decimalString(value)}%`}
|
||||||
|
/>
|
||||||
|
</ChartCard>
|
||||||
|
|
||||||
|
{(gpu.mt ?? 0) > 0 && (
|
||||||
|
<ChartCard
|
||||||
|
empty={dataEmpty}
|
||||||
|
grid={grid}
|
||||||
|
title={`${gpu.n} VRAM`}
|
||||||
|
description={t`Precise utilization at the recorded time`}
|
||||||
|
>
|
||||||
|
<AreaChartDefault
|
||||||
|
chartData={chartData}
|
||||||
|
dataPoints={[
|
||||||
|
{
|
||||||
|
label: t`Usage`,
|
||||||
|
dataKey: ({ stats }) => stats?.g?.[id]?.mu ?? 0,
|
||||||
|
color: 2,
|
||||||
|
opacity: 0.25,
|
||||||
|
},
|
||||||
|
]}
|
||||||
|
max={gpu.mt}
|
||||||
|
tickFormatter={(val) => {
|
||||||
|
const { value, unit } = formatBytes(val, false, Unit.Bytes, true)
|
||||||
|
return `${toFixedFloat(value, value >= 10 ? 0 : 1)} ${unit}`
|
||||||
|
}}
|
||||||
|
contentFormatter={({ value }) => {
|
||||||
|
const { value: convertedValue, unit } = formatBytes(value, false, Unit.Bytes, true)
|
||||||
|
return `${decimalString(convertedValue)} ${unit}`
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
</ChartCard>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
)
|
||||||
|
})}
|
||||||
|
</div>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
function GpuEnginesChart({ chartData }: { chartData: ChartData }) {
|
||||||
|
// Derive stable engine config key (cheap per render)
|
||||||
|
let enginesKey = ""
|
||||||
|
for (let i = chartData.systemStats.length - 1; i >= 0; i--) {
|
||||||
|
const gpus = chartData.systemStats[i].stats?.g
|
||||||
|
if (!gpus) continue
|
||||||
|
for (const id in gpus) {
|
||||||
|
if (gpus[id].e) {
|
||||||
|
enginesKey = id + "\0" + Object.keys(gpus[id].e).sort().join("\0")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (enginesKey) break
|
||||||
|
}
|
||||||
|
|
||||||
|
const { gpuId, dataPoints } = useMemo((): { gpuId: string | null; dataPoints: DataPoint[] } => {
|
||||||
|
if (!enginesKey) return { gpuId: null, dataPoints: [] }
|
||||||
|
const parts = enginesKey.split("\0")
|
||||||
|
const gId = parts[0]
|
||||||
|
const engineNames = parts.slice(1)
|
||||||
|
return {
|
||||||
|
gpuId: gId,
|
||||||
|
dataPoints: engineNames.map((engine, i) => ({
|
||||||
|
label: engine,
|
||||||
|
dataKey: ({ stats }: SystemStatsRecord) => stats?.g?.[gId]?.e?.[engine] ?? 0,
|
||||||
|
color: `hsl(${140 + (((i * 360) / engineNames.length) % 360)}, 65%, 52%)`,
|
||||||
|
opacity: 0.35,
|
||||||
|
})),
|
||||||
|
}
|
||||||
|
}, [enginesKey])
|
||||||
|
|
||||||
|
if (!gpuId) {
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
|
||||||
|
return (
|
||||||
|
<LineChartDefault
|
||||||
|
legend={true}
|
||||||
|
chartData={chartData}
|
||||||
|
dataPoints={dataPoints}
|
||||||
|
tickFormatter={(val) => `${toFixedFloat(val, 2)}%`}
|
||||||
|
contentFormatter={({ value }) => `${decimalString(value)}%`}
|
||||||
|
/>
|
||||||
|
)
|
||||||
|
}
|
||||||
@@ -0,0 +1,55 @@
|
|||||||
|
import { t } from "@lingui/core/macro"
|
||||||
|
import type { ChartData } from "@/types"
|
||||||
|
import { ChartCard } from "../chart-card"
|
||||||
|
import LineChartDefault from "@/components/charts/line-chart"
|
||||||
|
import { decimalString, toFixedFloat } from "@/lib/utils"
|
||||||
|
|
||||||
|
export function LoadAverageChart({
|
||||||
|
chartData,
|
||||||
|
grid,
|
||||||
|
dataEmpty,
|
||||||
|
}: {
|
||||||
|
chartData: ChartData
|
||||||
|
grid: boolean
|
||||||
|
dataEmpty: boolean
|
||||||
|
}) {
|
||||||
|
const { major, minor } = chartData.agentVersion
|
||||||
|
if (major === 0 && minor <= 12) {
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
return (
|
||||||
|
<ChartCard
|
||||||
|
empty={dataEmpty}
|
||||||
|
grid={grid}
|
||||||
|
title={t`Load Average`}
|
||||||
|
description={t`System load averages over time`}
|
||||||
|
legend={true}
|
||||||
|
>
|
||||||
|
<LineChartDefault
|
||||||
|
chartData={chartData}
|
||||||
|
contentFormatter={(item) => decimalString(item.value)}
|
||||||
|
tickFormatter={(value) => {
|
||||||
|
return String(toFixedFloat(value, 2))
|
||||||
|
}}
|
||||||
|
legend={true}
|
||||||
|
dataPoints={[
|
||||||
|
{
|
||||||
|
label: t({ message: `1 min`, comment: "Load average" }),
|
||||||
|
color: "hsl(271, 81%, 60%)", // Purple
|
||||||
|
dataKey: ({ stats }) => stats?.la?.[0],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
label: t({ message: `5 min`, comment: "Load average" }),
|
||||||
|
color: "hsl(217, 91%, 60%)", // Blue
|
||||||
|
dataKey: ({ stats }) => stats?.la?.[1],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
label: t({ message: `15 min`, comment: "Load average" }),
|
||||||
|
color: "hsl(25, 95%, 53%)", // Orange
|
||||||
|
dataKey: ({ stats }) => stats?.la?.[2],
|
||||||
|
},
|
||||||
|
]}
|
||||||
|
></LineChartDefault>
|
||||||
|
</ChartCard>
|
||||||
|
)
|
||||||
|
}
|
||||||
@@ -0,0 +1,170 @@
|
|||||||
|
import { t } from "@lingui/core/macro"
|
||||||
|
import AreaChartDefault from "@/components/charts/area-chart"
|
||||||
|
import { useContainerDataPoints } from "@/components/charts/hooks"
|
||||||
|
import { Unit } from "@/lib/enums"
|
||||||
|
import type { ChartConfig } from "@/components/ui/chart"
|
||||||
|
import type { ChartData, SystemStatsRecord } from "@/types"
|
||||||
|
import { ChartCard, FilterBar, SelectAvgMax } from "../chart-card"
|
||||||
|
import { dockerOrPodman } from "../chart-data"
|
||||||
|
import { decimalString, formatBytes, toFixedFloat } from "@/lib/utils"
|
||||||
|
import { pinnedAxisDomain } from "@/components/ui/chart"
|
||||||
|
|
||||||
|
export function MemoryChart({
|
||||||
|
chartData,
|
||||||
|
grid,
|
||||||
|
dataEmpty,
|
||||||
|
showMax,
|
||||||
|
isLongerChart,
|
||||||
|
maxValues,
|
||||||
|
}: {
|
||||||
|
chartData: ChartData
|
||||||
|
grid: boolean
|
||||||
|
dataEmpty: boolean
|
||||||
|
showMax: boolean
|
||||||
|
isLongerChart: boolean
|
||||||
|
maxValues: boolean
|
||||||
|
}) {
|
||||||
|
const maxValSelect = isLongerChart ? <SelectAvgMax max={maxValues} /> : null
|
||||||
|
const totalMem = toFixedFloat(chartData.systemStats.at(-1)?.stats.m ?? 0, 1)
|
||||||
|
|
||||||
|
return (
|
||||||
|
<ChartCard
|
||||||
|
empty={dataEmpty}
|
||||||
|
grid={grid}
|
||||||
|
title={t`Memory Usage`}
|
||||||
|
description={t`Precise utilization at the recorded time`}
|
||||||
|
cornerEl={maxValSelect}
|
||||||
|
>
|
||||||
|
<AreaChartDefault
|
||||||
|
chartData={chartData}
|
||||||
|
domain={[0, totalMem]}
|
||||||
|
itemSorter={(a, b) => a.order - b.order}
|
||||||
|
maxToggled={showMax}
|
||||||
|
showTotal={true}
|
||||||
|
tickFormatter={(value) => {
|
||||||
|
const { value: convertedValue, unit } = formatBytes(value * 1024, false, Unit.Bytes, true)
|
||||||
|
return `${toFixedFloat(convertedValue, value >= 10 ? 0 : 1)} ${unit}`
|
||||||
|
}}
|
||||||
|
contentFormatter={({ value }) => {
|
||||||
|
const { value: convertedValue, unit } = formatBytes(value * 1024, false, Unit.Bytes, true)
|
||||||
|
return `${decimalString(convertedValue, convertedValue >= 100 ? 1 : 2)} ${unit}`
|
||||||
|
}}
|
||||||
|
dataPoints={[
|
||||||
|
{
|
||||||
|
label: t`Used`,
|
||||||
|
dataKey: ({ stats }) => (showMax ? stats?.mm : stats?.mu),
|
||||||
|
color: 2,
|
||||||
|
opacity: 0.4,
|
||||||
|
stackId: "1",
|
||||||
|
order: 3,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
label: "ZFS ARC",
|
||||||
|
dataKey: ({ stats }) => (showMax ? null : stats?.mz),
|
||||||
|
color: "hsla(175 60% 45% / 0.8)",
|
||||||
|
opacity: 0.5,
|
||||||
|
order: 2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
label: t`Cache / Buffers`,
|
||||||
|
dataKey: ({ stats }) => (showMax ? null : stats?.mb),
|
||||||
|
color: "hsla(160 60% 45% / 0.5)",
|
||||||
|
opacity: 0.4,
|
||||||
|
stackId: "1",
|
||||||
|
order: 1,
|
||||||
|
},
|
||||||
|
]}
|
||||||
|
/>
|
||||||
|
</ChartCard>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
export function ContainerMemoryChart({
|
||||||
|
chartData,
|
||||||
|
grid,
|
||||||
|
dataEmpty,
|
||||||
|
isPodman,
|
||||||
|
memoryConfig,
|
||||||
|
}: {
|
||||||
|
chartData: ChartData
|
||||||
|
grid: boolean
|
||||||
|
dataEmpty: boolean
|
||||||
|
isPodman: boolean
|
||||||
|
memoryConfig: ChartConfig
|
||||||
|
}) {
|
||||||
|
const { filter, dataPoints } = useContainerDataPoints(memoryConfig, (key, data) => data[key]?.m ?? null)
|
||||||
|
|
||||||
|
return (
|
||||||
|
<ChartCard
|
||||||
|
empty={dataEmpty}
|
||||||
|
grid={grid}
|
||||||
|
title={dockerOrPodman(t`Docker Memory Usage`, isPodman)}
|
||||||
|
description={dockerOrPodman(t`Memory usage of docker containers`, isPodman)}
|
||||||
|
cornerEl={<FilterBar />}
|
||||||
|
>
|
||||||
|
<AreaChartDefault
|
||||||
|
chartData={chartData}
|
||||||
|
customData={chartData.containerData}
|
||||||
|
dataPoints={dataPoints}
|
||||||
|
tickFormatter={(val) => {
|
||||||
|
const { value, unit } = formatBytes(val, false, Unit.Bytes, true)
|
||||||
|
return `${toFixedFloat(value, val >= 10 ? 0 : 1)} ${unit}`
|
||||||
|
}}
|
||||||
|
contentFormatter={(item) => {
|
||||||
|
const { value, unit } = formatBytes(item.value, false, Unit.Bytes, true)
|
||||||
|
return `${decimalString(value)} ${unit}`
|
||||||
|
}}
|
||||||
|
domain={pinnedAxisDomain()}
|
||||||
|
showTotal={true}
|
||||||
|
reverseStackOrder={true}
|
||||||
|
filter={filter}
|
||||||
|
truncate={true}
|
||||||
|
itemSorter={(a, b) => b.value - a.value}
|
||||||
|
/>
|
||||||
|
</ChartCard>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
export function SwapChart({
|
||||||
|
chartData,
|
||||||
|
grid,
|
||||||
|
dataEmpty,
|
||||||
|
systemStats,
|
||||||
|
}: {
|
||||||
|
chartData: ChartData
|
||||||
|
grid: boolean
|
||||||
|
dataEmpty: boolean
|
||||||
|
systemStats: SystemStatsRecord[]
|
||||||
|
}) {
|
||||||
|
// const userSettings = useStore($userSettings)
|
||||||
|
|
||||||
|
const hasSwapData = (systemStats.at(-1)?.stats.su ?? 0) > 0
|
||||||
|
if (!hasSwapData) {
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
return (
|
||||||
|
<ChartCard empty={dataEmpty} grid={grid} title={t`Swap Usage`} description={t`Swap space used by the system`}>
|
||||||
|
<AreaChartDefault
|
||||||
|
chartData={chartData}
|
||||||
|
domain={[0, () => toFixedFloat(chartData.systemStats.at(-1)?.stats.s ?? 0.04, 2)]}
|
||||||
|
contentFormatter={({ value }) => {
|
||||||
|
// mem values are supplied as GB
|
||||||
|
const { value: convertedValue, unit } = formatBytes(value * 1024, false, Unit.Bytes, true)
|
||||||
|
return `${decimalString(convertedValue, convertedValue >= 100 ? 1 : 2)} ${unit}`
|
||||||
|
}}
|
||||||
|
tickFormatter={(value) => {
|
||||||
|
const { value: convertedValue, unit } = formatBytes(value * 1024, false, Unit.Bytes, true)
|
||||||
|
return `${toFixedFloat(convertedValue, value >= 10 ? 0 : 1)} ${unit}`
|
||||||
|
}}
|
||||||
|
dataPoints={[
|
||||||
|
{
|
||||||
|
label: t`Used`,
|
||||||
|
dataKey: ({ stats }) => stats?.su,
|
||||||
|
color: 2,
|
||||||
|
opacity: 0.4,
|
||||||
|
},
|
||||||
|
]}
|
||||||
|
></AreaChartDefault>
|
||||||
|
</ChartCard>
|
||||||
|
)
|
||||||
|
}
|
||||||
@@ -0,0 +1,183 @@
|
|||||||
|
import { useMemo } from "react"
|
||||||
|
import { t } from "@lingui/core/macro"
|
||||||
|
import AreaChartDefault from "@/components/charts/area-chart"
|
||||||
|
import { useContainerDataPoints } from "@/components/charts/hooks"
|
||||||
|
import { $userSettings } from "@/lib/stores"
|
||||||
|
import { decimalString, formatBytes, toFixedFloat } from "@/lib/utils"
|
||||||
|
import type { ChartConfig } from "@/components/ui/chart"
|
||||||
|
import { pinnedAxisDomain } from "@/components/ui/chart"
|
||||||
|
import type { ChartData, SystemStatsRecord } from "@/types"
|
||||||
|
import { Separator } from "@/components/ui/separator"
|
||||||
|
import NetworkSheet from "../network-sheet"
|
||||||
|
import { ChartCard, FilterBar, SelectAvgMax } from "../chart-card"
|
||||||
|
import { dockerOrPodman } from "../chart-data"
|
||||||
|
|
||||||
|
export function BandwidthChart({
|
||||||
|
chartData,
|
||||||
|
grid,
|
||||||
|
dataEmpty,
|
||||||
|
showMax,
|
||||||
|
isLongerChart,
|
||||||
|
maxValues,
|
||||||
|
systemStats,
|
||||||
|
}: {
|
||||||
|
chartData: ChartData
|
||||||
|
grid: boolean
|
||||||
|
dataEmpty: boolean
|
||||||
|
showMax: boolean
|
||||||
|
isLongerChart: boolean
|
||||||
|
maxValues: boolean
|
||||||
|
systemStats: SystemStatsRecord[]
|
||||||
|
}) {
|
||||||
|
const maxValSelect = isLongerChart ? <SelectAvgMax max={maxValues} /> : null
|
||||||
|
const userSettings = $userSettings.get()
|
||||||
|
|
||||||
|
return (
|
||||||
|
<ChartCard
|
||||||
|
empty={dataEmpty}
|
||||||
|
grid={grid}
|
||||||
|
title={t`Bandwidth`}
|
||||||
|
cornerEl={
|
||||||
|
<div className="flex gap-2">
|
||||||
|
{maxValSelect}
|
||||||
|
<NetworkSheet chartData={chartData} dataEmpty={dataEmpty} grid={grid} maxValues={maxValues} />
|
||||||
|
</div>
|
||||||
|
}
|
||||||
|
description={t`Network traffic of public interfaces`}
|
||||||
|
>
|
||||||
|
<AreaChartDefault
|
||||||
|
chartData={chartData}
|
||||||
|
maxToggled={showMax}
|
||||||
|
dataPoints={[
|
||||||
|
{
|
||||||
|
label: t`Sent`,
|
||||||
|
dataKey(data: SystemStatsRecord) {
|
||||||
|
if (showMax) {
|
||||||
|
return data?.stats?.bm?.[0] ?? (data?.stats?.nsm ?? 0) * 1024 * 1024
|
||||||
|
}
|
||||||
|
return data?.stats?.b?.[0] ?? (data?.stats?.ns ?? 0) * 1024 * 1024
|
||||||
|
},
|
||||||
|
color: 5,
|
||||||
|
opacity: 0.2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
label: t`Received`,
|
||||||
|
dataKey(data: SystemStatsRecord) {
|
||||||
|
if (showMax) {
|
||||||
|
return data?.stats?.bm?.[1] ?? (data?.stats?.nrm ?? 0) * 1024 * 1024
|
||||||
|
}
|
||||||
|
return data?.stats?.b?.[1] ?? (data?.stats?.nr ?? 0) * 1024 * 1024
|
||||||
|
},
|
||||||
|
color: 2,
|
||||||
|
opacity: 0.2,
|
||||||
|
},
|
||||||
|
]
|
||||||
|
// try to place the lesser number in front for better visibility
|
||||||
|
.sort(() => (systemStats.at(-1)?.stats.b?.[1] ?? 0) - (systemStats.at(-1)?.stats.b?.[0] ?? 0))}
|
||||||
|
tickFormatter={(val) => {
|
||||||
|
const { value, unit } = formatBytes(val, true, userSettings.unitNet, false)
|
||||||
|
return `${toFixedFloat(value, value >= 10 ? 0 : 1)} ${unit}`
|
||||||
|
}}
|
||||||
|
contentFormatter={(data) => {
|
||||||
|
const { value, unit } = formatBytes(data.value, true, userSettings.unitNet, false)
|
||||||
|
return `${decimalString(value, value >= 100 ? 1 : 2)} ${unit}`
|
||||||
|
}}
|
||||||
|
showTotal={true}
|
||||||
|
/>
|
||||||
|
</ChartCard>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
export function ContainerNetworkChart({
|
||||||
|
chartData,
|
||||||
|
grid,
|
||||||
|
dataEmpty,
|
||||||
|
isPodman,
|
||||||
|
networkConfig,
|
||||||
|
}: {
|
||||||
|
chartData: ChartData
|
||||||
|
grid: boolean
|
||||||
|
dataEmpty: boolean
|
||||||
|
isPodman: boolean
|
||||||
|
networkConfig: ChartConfig
|
||||||
|
}) {
|
||||||
|
const userSettings = $userSettings.get()
|
||||||
|
const { filter, dataPoints, filteredKeys } = useContainerDataPoints(networkConfig, (key, data) => {
|
||||||
|
const payload = data[key]
|
||||||
|
if (!payload) return null
|
||||||
|
const sent = payload?.b?.[0] ?? (payload?.ns ?? 0) * 1024 * 1024
|
||||||
|
const recv = payload?.b?.[1] ?? (payload?.nr ?? 0) * 1024 * 1024
|
||||||
|
return sent + recv
|
||||||
|
})
|
||||||
|
|
||||||
|
const contentFormatter = useMemo(() => {
|
||||||
|
const getRxTxBytes = (record?: { b?: [number, number]; ns?: number; nr?: number }) => {
|
||||||
|
if (record?.b?.length && record.b.length >= 2) {
|
||||||
|
return [Number(record.b[0]) || 0, Number(record.b[1]) || 0]
|
||||||
|
}
|
||||||
|
return [(record?.ns ?? 0) * 1024 * 1024, (record?.nr ?? 0) * 1024 * 1024]
|
||||||
|
}
|
||||||
|
const formatRxTx = (recv: number, sent: number) => {
|
||||||
|
const { value: receivedValue, unit: receivedUnit } = formatBytes(recv, true, userSettings.unitNet, false)
|
||||||
|
const { value: sentValue, unit: sentUnit } = formatBytes(sent, true, userSettings.unitNet, false)
|
||||||
|
return (
|
||||||
|
<span className="flex">
|
||||||
|
{decimalString(receivedValue)} {receivedUnit}
|
||||||
|
<span className="opacity-70 ms-0.5"> rx </span>
|
||||||
|
<Separator orientation="vertical" className="h-3 mx-1.5 bg-primary/40" />
|
||||||
|
{decimalString(sentValue)} {sentUnit}
|
||||||
|
<span className="opacity-70 ms-0.5"> tx</span>
|
||||||
|
</span>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
// biome-ignore lint/suspicious/noExplicitAny: recharts tooltip item
|
||||||
|
return (item: any, key: string) => {
|
||||||
|
try {
|
||||||
|
if (key === "__total__") {
|
||||||
|
let totalSent = 0
|
||||||
|
let totalRecv = 0
|
||||||
|
const payloadData = item?.payload && typeof item.payload === "object" ? item.payload : {}
|
||||||
|
for (const [containerKey, value] of Object.entries(payloadData)) {
|
||||||
|
if (!value || typeof value !== "object") continue
|
||||||
|
if (filteredKeys.has(containerKey)) continue
|
||||||
|
const [sent, recv] = getRxTxBytes(value as { b?: [number, number]; ns?: number; nr?: number })
|
||||||
|
totalSent += sent
|
||||||
|
totalRecv += recv
|
||||||
|
}
|
||||||
|
return formatRxTx(totalRecv, totalSent)
|
||||||
|
}
|
||||||
|
const [sent, recv] = getRxTxBytes(item?.payload?.[key])
|
||||||
|
return formatRxTx(recv, sent)
|
||||||
|
} catch {
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}, [filteredKeys, userSettings.unitNet])
|
||||||
|
|
||||||
|
return (
|
||||||
|
<ChartCard
|
||||||
|
empty={dataEmpty}
|
||||||
|
grid={grid}
|
||||||
|
title={dockerOrPodman(t`Docker Network I/O`, isPodman)}
|
||||||
|
description={dockerOrPodman(t`Network traffic of docker containers`, isPodman)}
|
||||||
|
cornerEl={<FilterBar />}
|
||||||
|
>
|
||||||
|
<AreaChartDefault
|
||||||
|
chartData={chartData}
|
||||||
|
customData={chartData.containerData}
|
||||||
|
dataPoints={dataPoints}
|
||||||
|
tickFormatter={(val) => {
|
||||||
|
const { value, unit } = formatBytes(val, true, userSettings.unitNet, false)
|
||||||
|
return `${toFixedFloat(value, value >= 10 ? 0 : 1)} ${unit}`
|
||||||
|
}}
|
||||||
|
contentFormatter={contentFormatter}
|
||||||
|
domain={pinnedAxisDomain()}
|
||||||
|
showTotal={true}
|
||||||
|
reverseStackOrder={true}
|
||||||
|
filter={filter}
|
||||||
|
truncate={true}
|
||||||
|
itemSorter={(a, b) => b.value - a.value}
|
||||||
|
/>
|
||||||
|
</ChartCard>
|
||||||
|
)
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user