mirror of
https://github.com/henrygd/beszel.git
synced 2026-05-06 19:01:48 +02:00
Compare commits
96 Commits
7f4f14b505
...
dev-probes
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3534552d37 | ||
|
|
723401819f | ||
|
|
2ea576c989 | ||
|
|
526a2c6aab | ||
|
|
aaa8eb773f | ||
|
|
099935e78e | ||
|
|
d2eb3b259a | ||
|
|
b89314889d | ||
|
|
04e2b8b974 | ||
|
|
891b03426f | ||
|
|
b182b699d7 | ||
|
|
e65a4a515e | ||
|
|
df249b24f6 | ||
|
|
788483ac56 | ||
|
|
f830665984 | ||
|
|
af49ebf2df | ||
|
|
0378023b6f | ||
|
|
89ac8dc585 | ||
|
|
9896bcdf43 | ||
|
|
ddd47e67ac | ||
|
|
027159420c | ||
|
|
e154123511 | ||
|
|
9f7c1b22bb | ||
|
|
0d440e5fb9 | ||
|
|
5fc774666f | ||
|
|
8f03cbf11c | ||
|
|
1c5808f430 | ||
|
|
a35cc6ef39 | ||
|
|
16e0f6c4a2 | ||
|
|
6472af1ba4 | ||
|
|
e931165566 | ||
|
|
48fe407292 | ||
|
|
a95376b4a2 | ||
|
|
732983493a | ||
|
|
264b17f429 | ||
|
|
cef5ab10a5 | ||
|
|
3a881e1d5e | ||
|
|
209bb4ebb4 | ||
|
|
e71ffd4d2a | ||
|
|
ea19ef6334 | ||
|
|
40da2b4358 | ||
|
|
d0d5912d85 | ||
|
|
4162186ae0 | ||
|
|
a71617e058 | ||
|
|
578ba985e9 | ||
|
|
e5507fa106 | ||
|
|
a024c3cfd0 | ||
|
|
07466804e7 | ||
|
|
981c788d6f | ||
|
|
f5576759de | ||
|
|
be0b708064 | ||
|
|
485830452e | ||
|
|
2fd00cd0b5 | ||
|
|
853a294157 | ||
|
|
aa9ab49654 | ||
|
|
9a5959b57e | ||
|
|
50f8548479 | ||
|
|
bc0581ea61 | ||
|
|
ab3a3de46c | ||
|
|
1556e53926 | ||
|
|
e3ade3aeb8 | ||
|
|
b013f06956 | ||
|
|
fab5e8a656 | ||
|
|
3a0896e57e | ||
|
|
7fdc403470 | ||
|
|
e833d44c43 | ||
|
|
77dd4bdaf5 | ||
|
|
ecba63c4bb | ||
|
|
f9feaf5343 | ||
|
|
ddf5e925c8 | ||
|
|
865e6db90f | ||
|
|
a42d899e64 | ||
|
|
3eaf12a7d5 | ||
|
|
3793b27958 | ||
|
|
5b02158228 | ||
|
|
0ae8c42ae0 | ||
|
|
ea80f3c5a2 | ||
|
|
c3dffff5e4 | ||
|
|
06fdd0e7a8 | ||
|
|
6e3fd90834 | ||
|
|
5ab82183fa | ||
|
|
a68e02ca84 | ||
|
|
0f2e16c63c | ||
|
|
c4009f2b43 | ||
|
|
ef0c1420d1 | ||
|
|
eb9a8e1ef9 | ||
|
|
6b5e6ffa9a | ||
|
|
d656036d3b | ||
|
|
80b73c7faf | ||
|
|
afe9eb7a70 | ||
|
|
7f565a3086 | ||
|
|
77862d4cb1 | ||
|
|
e158a9001b | ||
|
|
f670e868e4 | ||
|
|
0fff699bf6 | ||
|
|
ba10da1b9f |
@@ -48,6 +48,7 @@ type Agent struct {
|
||||
keys []gossh.PublicKey // SSH public keys
|
||||
smartManager *SmartManager // Manages SMART data
|
||||
systemdManager *systemdManager // Manages systemd services
|
||||
probeManager *ProbeManager // Manages network probes
|
||||
}
|
||||
|
||||
// NewAgent creates a new agent with the given data directory for persisting data.
|
||||
@@ -121,6 +122,9 @@ func NewAgent(dataDir ...string) (agent *Agent, err error) {
|
||||
// initialize handler registry
|
||||
agent.handlerRegistry = NewHandlerRegistry()
|
||||
|
||||
// initialize probe manager
|
||||
agent.probeManager = newProbeManager()
|
||||
|
||||
// initialize disk info
|
||||
agent.initializeDiskInfo()
|
||||
|
||||
@@ -178,6 +182,11 @@ func (a *Agent) gatherStats(options common.DataRequestOptions) *system.CombinedD
|
||||
}
|
||||
}
|
||||
|
||||
if a.probeManager != nil {
|
||||
data.Probes = a.probeManager.GetResults(cacheTimeMs)
|
||||
slog.Debug("Probes", "data", data.Probes)
|
||||
}
|
||||
|
||||
// skip updating systemd services if cache time is not the default 60sec interval
|
||||
if a.systemdManager != nil && cacheTimeMs == defaultDataCacheTimeMs {
|
||||
totalCount := uint16(a.systemdManager.getServiceStatsCount())
|
||||
|
||||
@@ -1,84 +1,11 @@
|
||||
//go:build !freebsd
|
||||
|
||||
// Package battery provides functions to check if the system has a battery and to get the battery stats.
|
||||
// Package battery provides functions to check if the system has a battery and return the charge state and percentage.
|
||||
package battery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log/slog"
|
||||
"math"
|
||||
|
||||
"github.com/distatus/battery"
|
||||
const (
|
||||
stateUnknown uint8 = iota
|
||||
stateEmpty
|
||||
stateFull
|
||||
stateCharging
|
||||
stateDischarging
|
||||
stateIdle
|
||||
)
|
||||
|
||||
var (
|
||||
systemHasBattery = false
|
||||
haveCheckedBattery = false
|
||||
)
|
||||
|
||||
// HasReadableBattery checks if the system has a battery and returns true if it does.
|
||||
func HasReadableBattery() bool {
|
||||
if haveCheckedBattery {
|
||||
return systemHasBattery
|
||||
}
|
||||
haveCheckedBattery = true
|
||||
batteries, err := battery.GetAll()
|
||||
for _, bat := range batteries {
|
||||
if bat != nil && (bat.Full > 0 || bat.Design > 0) {
|
||||
systemHasBattery = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !systemHasBattery {
|
||||
slog.Debug("No battery found", "err", err)
|
||||
}
|
||||
return systemHasBattery
|
||||
}
|
||||
|
||||
// GetBatteryStats returns the current battery percent and charge state
|
||||
// percent = (current charge of all batteries) / (sum of designed/full capacity of all batteries)
|
||||
func GetBatteryStats() (batteryPercent uint8, batteryState uint8, err error) {
|
||||
if !HasReadableBattery() {
|
||||
return batteryPercent, batteryState, errors.ErrUnsupported
|
||||
}
|
||||
batteries, err := battery.GetAll()
|
||||
// we'll handle errors later by skipping batteries with errors, rather
|
||||
// than skipping everything because of the presence of some errors.
|
||||
if len(batteries) == 0 {
|
||||
return batteryPercent, batteryState, errors.New("no batteries")
|
||||
}
|
||||
|
||||
totalCapacity := float64(0)
|
||||
totalCharge := float64(0)
|
||||
errs, partialErrs := err.(battery.Errors)
|
||||
|
||||
batteryState = math.MaxUint8
|
||||
|
||||
for i, bat := range batteries {
|
||||
if partialErrs && errs[i] != nil {
|
||||
// if there were some errors, like missing data, skip it
|
||||
continue
|
||||
}
|
||||
if bat == nil || bat.Full == 0 {
|
||||
// skip batteries with no capacity. Charge is unlikely to ever be zero, but
|
||||
// we can't guarantee that, so don't skip based on charge.
|
||||
continue
|
||||
}
|
||||
totalCapacity += bat.Full
|
||||
totalCharge += min(bat.Current, bat.Full)
|
||||
if bat.State.Raw >= 0 {
|
||||
batteryState = uint8(bat.State.Raw)
|
||||
}
|
||||
}
|
||||
|
||||
if totalCapacity == 0 || batteryState == math.MaxUint8 {
|
||||
// for macs there's sometimes a ghost battery with 0 capacity
|
||||
// https://github.com/distatus/battery/issues/34
|
||||
// Instead of skipping over those batteries, we'll check for total 0 capacity
|
||||
// and return an error. This also prevents a divide by zero.
|
||||
return batteryPercent, batteryState, errors.New("no battery capacity")
|
||||
}
|
||||
|
||||
batteryPercent = uint8(totalCharge / totalCapacity * 100)
|
||||
return batteryPercent, batteryState, nil
|
||||
}
|
||||
|
||||
96
agent/battery/battery_darwin.go
Normal file
96
agent/battery/battery_darwin.go
Normal file
@@ -0,0 +1,96 @@
|
||||
//go:build darwin
|
||||
|
||||
package battery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log/slog"
|
||||
"math"
|
||||
"os/exec"
|
||||
"sync"
|
||||
|
||||
"howett.net/plist"
|
||||
)
|
||||
|
||||
type macBattery struct {
|
||||
CurrentCapacity int `plist:"CurrentCapacity"`
|
||||
MaxCapacity int `plist:"MaxCapacity"`
|
||||
FullyCharged bool `plist:"FullyCharged"`
|
||||
IsCharging bool `plist:"IsCharging"`
|
||||
ExternalConnected bool `plist:"ExternalConnected"`
|
||||
}
|
||||
|
||||
func readMacBatteries() ([]macBattery, error) {
|
||||
out, err := exec.Command("ioreg", "-n", "AppleSmartBattery", "-r", "-a").Output()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(out) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
var batteries []macBattery
|
||||
if _, err := plist.Unmarshal(out, &batteries); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return batteries, nil
|
||||
}
|
||||
|
||||
// HasReadableBattery checks if the system has a battery and returns true if it does.
|
||||
var HasReadableBattery = sync.OnceValue(func() bool {
|
||||
systemHasBattery := false
|
||||
batteries, err := readMacBatteries()
|
||||
slog.Debug("Batteries", "batteries", batteries, "err", err)
|
||||
for _, bat := range batteries {
|
||||
if bat.MaxCapacity > 0 {
|
||||
systemHasBattery = true
|
||||
break
|
||||
}
|
||||
}
|
||||
return systemHasBattery
|
||||
})
|
||||
|
||||
// GetBatteryStats returns the current battery percent and charge state.
|
||||
// Uses CurrentCapacity/MaxCapacity to match the value macOS displays.
|
||||
func GetBatteryStats() (batteryPercent uint8, batteryState uint8, err error) {
|
||||
if !HasReadableBattery() {
|
||||
return batteryPercent, batteryState, errors.ErrUnsupported
|
||||
}
|
||||
batteries, err := readMacBatteries()
|
||||
if len(batteries) == 0 {
|
||||
return batteryPercent, batteryState, errors.New("no batteries")
|
||||
}
|
||||
|
||||
totalCapacity := 0
|
||||
totalCharge := 0
|
||||
batteryState = math.MaxUint8
|
||||
|
||||
for _, bat := range batteries {
|
||||
if bat.MaxCapacity == 0 {
|
||||
// skip ghost batteries with 0 capacity
|
||||
// https://github.com/distatus/battery/issues/34
|
||||
continue
|
||||
}
|
||||
totalCapacity += bat.MaxCapacity
|
||||
totalCharge += min(bat.CurrentCapacity, bat.MaxCapacity)
|
||||
|
||||
switch {
|
||||
case !bat.ExternalConnected:
|
||||
batteryState = stateDischarging
|
||||
case bat.IsCharging:
|
||||
batteryState = stateCharging
|
||||
case bat.CurrentCapacity == 0:
|
||||
batteryState = stateEmpty
|
||||
case !bat.FullyCharged:
|
||||
batteryState = stateIdle
|
||||
default:
|
||||
batteryState = stateFull
|
||||
}
|
||||
}
|
||||
|
||||
if totalCapacity == 0 || batteryState == math.MaxUint8 {
|
||||
return batteryPercent, batteryState, errors.New("no battery capacity")
|
||||
}
|
||||
|
||||
batteryPercent = uint8(float64(totalCharge) / float64(totalCapacity) * 100)
|
||||
return batteryPercent, batteryState, nil
|
||||
}
|
||||
120
agent/battery/battery_linux.go
Normal file
120
agent/battery/battery_linux.go
Normal file
@@ -0,0 +1,120 @@
|
||||
//go:build linux
|
||||
|
||||
package battery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log/slog"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/henrygd/beszel/agent/utils"
|
||||
)
|
||||
|
||||
// getBatteryPaths returns the paths of all batteries in /sys/class/power_supply
|
||||
var getBatteryPaths func() ([]string, error)
|
||||
|
||||
// HasReadableBattery checks if the system has a battery and returns true if it does.
|
||||
var HasReadableBattery func() bool
|
||||
|
||||
func init() {
|
||||
resetBatteryState("/sys/class/power_supply")
|
||||
}
|
||||
|
||||
// resetBatteryState resets the sync.Once functions to a fresh state.
|
||||
// Tests call this after swapping sysfsPowerSupply so the new path is picked up.
|
||||
func resetBatteryState(sysfsPowerSupplyPath string) {
|
||||
getBatteryPaths = sync.OnceValues(func() ([]string, error) {
|
||||
entries, err := os.ReadDir(sysfsPowerSupplyPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var paths []string
|
||||
for _, e := range entries {
|
||||
path := filepath.Join(sysfsPowerSupplyPath, e.Name())
|
||||
if utils.ReadStringFile(filepath.Join(path, "type")) == "Battery" {
|
||||
paths = append(paths, path)
|
||||
}
|
||||
}
|
||||
return paths, nil
|
||||
})
|
||||
HasReadableBattery = sync.OnceValue(func() bool {
|
||||
systemHasBattery := false
|
||||
paths, err := getBatteryPaths()
|
||||
for _, path := range paths {
|
||||
if _, ok := utils.ReadStringFileOK(filepath.Join(path, "capacity")); ok {
|
||||
systemHasBattery = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !systemHasBattery {
|
||||
slog.Debug("No battery found", "err", err)
|
||||
}
|
||||
return systemHasBattery
|
||||
})
|
||||
}
|
||||
|
||||
func parseSysfsState(status string) uint8 {
|
||||
switch status {
|
||||
case "Empty":
|
||||
return stateEmpty
|
||||
case "Full":
|
||||
return stateFull
|
||||
case "Charging":
|
||||
return stateCharging
|
||||
case "Discharging":
|
||||
return stateDischarging
|
||||
case "Not charging":
|
||||
return stateIdle
|
||||
default:
|
||||
return stateUnknown
|
||||
}
|
||||
}
|
||||
|
||||
// GetBatteryStats returns the current battery percent and charge state.
|
||||
// Reads /sys/class/power_supply/*/capacity directly so the kernel-reported
|
||||
// value is used, which is always 0-100 and matches what the OS displays.
|
||||
func GetBatteryStats() (batteryPercent uint8, batteryState uint8, err error) {
|
||||
if !HasReadableBattery() {
|
||||
return batteryPercent, batteryState, errors.ErrUnsupported
|
||||
}
|
||||
paths, err := getBatteryPaths()
|
||||
if err != nil {
|
||||
return batteryPercent, batteryState, err
|
||||
}
|
||||
if len(paths) == 0 {
|
||||
return batteryPercent, batteryState, errors.New("no batteries")
|
||||
}
|
||||
|
||||
batteryState = math.MaxUint8
|
||||
totalPercent := 0
|
||||
count := 0
|
||||
|
||||
for _, path := range paths {
|
||||
capStr, ok := utils.ReadStringFileOK(filepath.Join(path, "capacity"))
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
cap, parseErr := strconv.Atoi(capStr)
|
||||
if parseErr != nil {
|
||||
continue
|
||||
}
|
||||
totalPercent += cap
|
||||
count++
|
||||
|
||||
state := parseSysfsState(utils.ReadStringFile(filepath.Join(path, "status")))
|
||||
if state != stateUnknown {
|
||||
batteryState = state
|
||||
}
|
||||
}
|
||||
|
||||
if count == 0 || batteryState == math.MaxUint8 {
|
||||
return batteryPercent, batteryState, errors.New("no battery capacity")
|
||||
}
|
||||
|
||||
batteryPercent = uint8(totalPercent / count)
|
||||
return batteryPercent, batteryState, nil
|
||||
}
|
||||
201
agent/battery/battery_linux_test.go
Normal file
201
agent/battery/battery_linux_test.go
Normal file
@@ -0,0 +1,201 @@
|
||||
//go:build testing && linux
|
||||
|
||||
package battery
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// setupFakeSysfs creates a temporary sysfs-like tree under t.TempDir(),
|
||||
// swaps sysfsPowerSupply, resets the sync.Once caches, and restores
|
||||
// everything on cleanup. Returns a helper to create battery directories.
|
||||
func setupFakeSysfs(t *testing.T) (tmpDir string, addBattery func(name, capacity, status string)) {
|
||||
t.Helper()
|
||||
|
||||
tmp := t.TempDir()
|
||||
resetBatteryState(tmp)
|
||||
|
||||
write := func(path, content string) {
|
||||
t.Helper()
|
||||
dir := filepath.Dir(path)
|
||||
if err := os.MkdirAll(dir, 0o755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.WriteFile(path, []byte(content), 0o644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
addBattery = func(name, capacity, status string) {
|
||||
t.Helper()
|
||||
batDir := filepath.Join(tmp, name)
|
||||
write(filepath.Join(batDir, "type"), "Battery")
|
||||
write(filepath.Join(batDir, "capacity"), capacity)
|
||||
write(filepath.Join(batDir, "status"), status)
|
||||
}
|
||||
|
||||
return tmp, addBattery
|
||||
}
|
||||
|
||||
func TestParseSysfsState(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
want uint8
|
||||
}{
|
||||
{"Empty", stateEmpty},
|
||||
{"Full", stateFull},
|
||||
{"Charging", stateCharging},
|
||||
{"Discharging", stateDischarging},
|
||||
{"Not charging", stateIdle},
|
||||
{"", stateUnknown},
|
||||
{"SomethingElse", stateUnknown},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
assert.Equal(t, tt.want, parseSysfsState(tt.input), "parseSysfsState(%q)", tt.input)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetBatteryStats_SingleBattery(t *testing.T) {
|
||||
_, addBattery := setupFakeSysfs(t)
|
||||
addBattery("BAT0", "72", "Discharging")
|
||||
|
||||
pct, state, err := GetBatteryStats()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint8(72), pct)
|
||||
assert.Equal(t, stateDischarging, state)
|
||||
}
|
||||
|
||||
func TestGetBatteryStats_MultipleBatteries(t *testing.T) {
|
||||
_, addBattery := setupFakeSysfs(t)
|
||||
addBattery("BAT0", "80", "Charging")
|
||||
addBattery("BAT1", "40", "Charging")
|
||||
|
||||
pct, state, err := GetBatteryStats()
|
||||
assert.NoError(t, err)
|
||||
// average of 80 and 40 = 60
|
||||
assert.EqualValues(t, 60, pct)
|
||||
assert.Equal(t, stateCharging, state)
|
||||
}
|
||||
|
||||
func TestGetBatteryStats_FullBattery(t *testing.T) {
|
||||
_, addBattery := setupFakeSysfs(t)
|
||||
addBattery("BAT0", "100", "Full")
|
||||
|
||||
pct, state, err := GetBatteryStats()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint8(100), pct)
|
||||
assert.Equal(t, stateFull, state)
|
||||
}
|
||||
|
||||
func TestGetBatteryStats_EmptyBattery(t *testing.T) {
|
||||
_, addBattery := setupFakeSysfs(t)
|
||||
addBattery("BAT0", "0", "Empty")
|
||||
|
||||
pct, state, err := GetBatteryStats()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint8(0), pct)
|
||||
assert.Equal(t, stateEmpty, state)
|
||||
}
|
||||
|
||||
func TestGetBatteryStats_NotCharging(t *testing.T) {
|
||||
_, addBattery := setupFakeSysfs(t)
|
||||
addBattery("BAT0", "80", "Not charging")
|
||||
|
||||
pct, state, err := GetBatteryStats()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint8(80), pct)
|
||||
assert.Equal(t, stateIdle, state)
|
||||
}
|
||||
|
||||
func TestGetBatteryStats_NoBatteries(t *testing.T) {
|
||||
setupFakeSysfs(t) // empty directory, no batteries
|
||||
|
||||
_, _, err := GetBatteryStats()
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestGetBatteryStats_NonBatterySupplyIgnored(t *testing.T) {
|
||||
tmp, addBattery := setupFakeSysfs(t)
|
||||
|
||||
// Add a real battery
|
||||
addBattery("BAT0", "55", "Charging")
|
||||
|
||||
// Add an AC adapter (type != Battery) - should be ignored
|
||||
acDir := filepath.Join(tmp, "AC0")
|
||||
if err := os.MkdirAll(acDir, 0o755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(acDir, "type"), []byte("Mains"), 0o644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pct, state, err := GetBatteryStats()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint8(55), pct)
|
||||
assert.Equal(t, stateCharging, state)
|
||||
}
|
||||
|
||||
func TestGetBatteryStats_InvalidCapacitySkipped(t *testing.T) {
|
||||
tmp, addBattery := setupFakeSysfs(t)
|
||||
|
||||
// One battery with valid capacity
|
||||
addBattery("BAT0", "90", "Discharging")
|
||||
|
||||
// Another with invalid capacity text
|
||||
badDir := filepath.Join(tmp, "BAT1")
|
||||
if err := os.MkdirAll(badDir, 0o755); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(badDir, "type"), []byte("Battery"), 0o644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(badDir, "capacity"), []byte("not-a-number"), 0o644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(badDir, "status"), []byte("Discharging"), 0o644); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pct, _, err := GetBatteryStats()
|
||||
assert.NoError(t, err)
|
||||
// Only BAT0 counted
|
||||
assert.Equal(t, uint8(90), pct)
|
||||
}
|
||||
|
||||
func TestGetBatteryStats_UnknownStatusOnly(t *testing.T) {
|
||||
_, addBattery := setupFakeSysfs(t)
|
||||
addBattery("BAT0", "50", "SomethingWeird")
|
||||
|
||||
_, _, err := GetBatteryStats()
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestHasReadableBattery_True(t *testing.T) {
|
||||
_, addBattery := setupFakeSysfs(t)
|
||||
addBattery("BAT0", "50", "Charging")
|
||||
|
||||
assert.True(t, HasReadableBattery())
|
||||
}
|
||||
|
||||
func TestHasReadableBattery_False(t *testing.T) {
|
||||
setupFakeSysfs(t) // no batteries
|
||||
|
||||
assert.False(t, HasReadableBattery())
|
||||
}
|
||||
|
||||
func TestHasReadableBattery_NoCapacityFile(t *testing.T) {
|
||||
tmp, _ := setupFakeSysfs(t)
|
||||
|
||||
// Battery dir with type file but no capacity file
|
||||
batDir := filepath.Join(tmp, "BAT0")
|
||||
err := os.MkdirAll(batDir, 0o755)
|
||||
assert.NoError(t, err)
|
||||
err = os.WriteFile(filepath.Join(batDir, "type"), []byte("Battery"), 0o644)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.False(t, HasReadableBattery())
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
//go:build freebsd
|
||||
//go:build !darwin && !linux && !windows
|
||||
|
||||
package battery
|
||||
|
||||
298
agent/battery/battery_windows.go
Normal file
298
agent/battery/battery_windows.go
Normal file
@@ -0,0 +1,298 @@
|
||||
//go:build windows
|
||||
|
||||
// Most of the Windows battery code is based on
|
||||
// distatus/battery by Karol 'Kenji Takahashi' Woźniak
|
||||
|
||||
package battery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log/slog"
|
||||
"math"
|
||||
"sync"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
type batteryQueryInformation struct {
|
||||
BatteryTag uint32
|
||||
InformationLevel int32
|
||||
AtRate int32
|
||||
}
|
||||
|
||||
type batteryInformation struct {
|
||||
Capabilities uint32
|
||||
Technology uint8
|
||||
Reserved [3]uint8
|
||||
Chemistry [4]uint8
|
||||
DesignedCapacity uint32
|
||||
FullChargedCapacity uint32
|
||||
DefaultAlert1 uint32
|
||||
DefaultAlert2 uint32
|
||||
CriticalBias uint32
|
||||
CycleCount uint32
|
||||
}
|
||||
|
||||
type batteryWaitStatus struct {
|
||||
BatteryTag uint32
|
||||
Timeout uint32
|
||||
PowerState uint32
|
||||
LowCapacity uint32
|
||||
HighCapacity uint32
|
||||
}
|
||||
|
||||
type batteryStatus struct {
|
||||
PowerState uint32
|
||||
Capacity uint32
|
||||
Voltage uint32
|
||||
Rate int32
|
||||
}
|
||||
|
||||
type winGUID struct {
|
||||
Data1 uint32
|
||||
Data2 uint16
|
||||
Data3 uint16
|
||||
Data4 [8]byte
|
||||
}
|
||||
|
||||
type spDeviceInterfaceData struct {
|
||||
cbSize uint32
|
||||
InterfaceClassGuid winGUID
|
||||
Flags uint32
|
||||
Reserved uint
|
||||
}
|
||||
|
||||
var guidDeviceBattery = winGUID{
|
||||
0x72631e54,
|
||||
0x78A4,
|
||||
0x11d0,
|
||||
[8]byte{0xbc, 0xf7, 0x00, 0xaa, 0x00, 0xb7, 0xb3, 0x2a},
|
||||
}
|
||||
|
||||
var (
|
||||
setupapi = &windows.LazyDLL{Name: "setupapi.dll", System: true}
|
||||
setupDiGetClassDevsW = setupapi.NewProc("SetupDiGetClassDevsW")
|
||||
setupDiEnumDeviceInterfaces = setupapi.NewProc("SetupDiEnumDeviceInterfaces")
|
||||
setupDiGetDeviceInterfaceDetailW = setupapi.NewProc("SetupDiGetDeviceInterfaceDetailW")
|
||||
setupDiDestroyDeviceInfoList = setupapi.NewProc("SetupDiDestroyDeviceInfoList")
|
||||
)
|
||||
|
||||
// winBatteryGet reads one battery by index. Returns (fullCapacity, currentCapacity, state, error).
|
||||
// Returns error == errNotFound when there are no more batteries.
|
||||
var errNotFound = errors.New("no more batteries")
|
||||
|
||||
func setupDiSetup(proc *windows.LazyProc, nargs, a1, a2, a3, a4, a5, a6 uintptr) (uintptr, error) {
|
||||
_ = nargs
|
||||
r1, _, errno := syscall.SyscallN(proc.Addr(), a1, a2, a3, a4, a5, a6)
|
||||
if windows.Handle(r1) == windows.InvalidHandle {
|
||||
if errno != 0 {
|
||||
return 0, error(errno)
|
||||
}
|
||||
return 0, syscall.EINVAL
|
||||
}
|
||||
return r1, nil
|
||||
}
|
||||
|
||||
func setupDiCall(proc *windows.LazyProc, nargs, a1, a2, a3, a4, a5, a6 uintptr) syscall.Errno {
|
||||
_ = nargs
|
||||
r1, _, errno := syscall.SyscallN(proc.Addr(), a1, a2, a3, a4, a5, a6)
|
||||
if r1 == 0 {
|
||||
if errno != 0 {
|
||||
return errno
|
||||
}
|
||||
return syscall.EINVAL
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func readWinBatteryState(powerState uint32) uint8 {
|
||||
switch {
|
||||
case powerState&0x00000004 != 0:
|
||||
return stateCharging
|
||||
case powerState&0x00000008 != 0:
|
||||
return stateEmpty
|
||||
case powerState&0x00000002 != 0:
|
||||
return stateDischarging
|
||||
case powerState&0x00000001 != 0:
|
||||
return stateFull
|
||||
default:
|
||||
return stateUnknown
|
||||
}
|
||||
}
|
||||
|
||||
func winBatteryGet(idx int) (full, current uint32, state uint8, err error) {
|
||||
hdev, err := setupDiSetup(
|
||||
setupDiGetClassDevsW,
|
||||
4,
|
||||
uintptr(unsafe.Pointer(&guidDeviceBattery)),
|
||||
0, 0,
|
||||
2|16, // DIGCF_PRESENT|DIGCF_DEVICEINTERFACE
|
||||
0, 0,
|
||||
)
|
||||
if err != nil {
|
||||
return 0, 0, stateUnknown, err
|
||||
}
|
||||
defer syscall.SyscallN(setupDiDestroyDeviceInfoList.Addr(), hdev)
|
||||
|
||||
var did spDeviceInterfaceData
|
||||
did.cbSize = uint32(unsafe.Sizeof(did))
|
||||
errno := setupDiCall(
|
||||
setupDiEnumDeviceInterfaces,
|
||||
5,
|
||||
hdev, 0,
|
||||
uintptr(unsafe.Pointer(&guidDeviceBattery)),
|
||||
uintptr(idx),
|
||||
uintptr(unsafe.Pointer(&did)),
|
||||
0,
|
||||
)
|
||||
if errno == 259 { // ERROR_NO_MORE_ITEMS
|
||||
return 0, 0, stateUnknown, errNotFound
|
||||
}
|
||||
if errno != 0 {
|
||||
return 0, 0, stateUnknown, errno
|
||||
}
|
||||
|
||||
var cbRequired uint32
|
||||
errno = setupDiCall(
|
||||
setupDiGetDeviceInterfaceDetailW,
|
||||
6,
|
||||
hdev,
|
||||
uintptr(unsafe.Pointer(&did)),
|
||||
0, 0,
|
||||
uintptr(unsafe.Pointer(&cbRequired)),
|
||||
0,
|
||||
)
|
||||
if errno != 0 && errno != 122 { // ERROR_INSUFFICIENT_BUFFER
|
||||
return 0, 0, stateUnknown, errno
|
||||
}
|
||||
didd := make([]uint16, cbRequired/2)
|
||||
cbSize := (*uint32)(unsafe.Pointer(&didd[0]))
|
||||
if unsafe.Sizeof(uint(0)) == 8 {
|
||||
*cbSize = 8
|
||||
} else {
|
||||
*cbSize = 6
|
||||
}
|
||||
errno = setupDiCall(
|
||||
setupDiGetDeviceInterfaceDetailW,
|
||||
6,
|
||||
hdev,
|
||||
uintptr(unsafe.Pointer(&did)),
|
||||
uintptr(unsafe.Pointer(&didd[0])),
|
||||
uintptr(cbRequired),
|
||||
uintptr(unsafe.Pointer(&cbRequired)),
|
||||
0,
|
||||
)
|
||||
if errno != 0 {
|
||||
return 0, 0, stateUnknown, errno
|
||||
}
|
||||
devicePath := &didd[2:][0]
|
||||
|
||||
handle, err := windows.CreateFile(
|
||||
devicePath,
|
||||
windows.GENERIC_READ|windows.GENERIC_WRITE,
|
||||
windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE,
|
||||
nil,
|
||||
windows.OPEN_EXISTING,
|
||||
windows.FILE_ATTRIBUTE_NORMAL,
|
||||
0,
|
||||
)
|
||||
if err != nil {
|
||||
return 0, 0, stateUnknown, err
|
||||
}
|
||||
defer windows.CloseHandle(handle)
|
||||
|
||||
var dwOut uint32
|
||||
var dwWait uint32
|
||||
var bqi batteryQueryInformation
|
||||
err = windows.DeviceIoControl(
|
||||
handle,
|
||||
2703424, // IOCTL_BATTERY_QUERY_TAG
|
||||
(*byte)(unsafe.Pointer(&dwWait)),
|
||||
uint32(unsafe.Sizeof(dwWait)),
|
||||
(*byte)(unsafe.Pointer(&bqi.BatteryTag)),
|
||||
uint32(unsafe.Sizeof(bqi.BatteryTag)),
|
||||
&dwOut, nil,
|
||||
)
|
||||
if err != nil || bqi.BatteryTag == 0 {
|
||||
return 0, 0, stateUnknown, errors.New("battery tag not returned")
|
||||
}
|
||||
|
||||
var bi batteryInformation
|
||||
if err = windows.DeviceIoControl(
|
||||
handle,
|
||||
2703428, // IOCTL_BATTERY_QUERY_INFORMATION
|
||||
(*byte)(unsafe.Pointer(&bqi)),
|
||||
uint32(unsafe.Sizeof(bqi)),
|
||||
(*byte)(unsafe.Pointer(&bi)),
|
||||
uint32(unsafe.Sizeof(bi)),
|
||||
&dwOut, nil,
|
||||
); err != nil {
|
||||
return 0, 0, stateUnknown, err
|
||||
}
|
||||
|
||||
bws := batteryWaitStatus{BatteryTag: bqi.BatteryTag}
|
||||
var bs batteryStatus
|
||||
if err = windows.DeviceIoControl(
|
||||
handle,
|
||||
2703436, // IOCTL_BATTERY_QUERY_STATUS
|
||||
(*byte)(unsafe.Pointer(&bws)),
|
||||
uint32(unsafe.Sizeof(bws)),
|
||||
(*byte)(unsafe.Pointer(&bs)),
|
||||
uint32(unsafe.Sizeof(bs)),
|
||||
&dwOut, nil,
|
||||
); err != nil {
|
||||
return 0, 0, stateUnknown, err
|
||||
}
|
||||
|
||||
if bs.Capacity == 0xffffffff { // BATTERY_UNKNOWN_CAPACITY
|
||||
return 0, 0, stateUnknown, errors.New("battery capacity unknown")
|
||||
}
|
||||
|
||||
return bi.FullChargedCapacity, bs.Capacity, readWinBatteryState(bs.PowerState), nil
|
||||
}
|
||||
|
||||
// HasReadableBattery checks if the system has a battery and returns true if it does.
|
||||
var HasReadableBattery = sync.OnceValue(func() bool {
|
||||
systemHasBattery := false
|
||||
full, _, _, err := winBatteryGet(0)
|
||||
if err == nil && full > 0 {
|
||||
systemHasBattery = true
|
||||
}
|
||||
if !systemHasBattery {
|
||||
slog.Debug("No battery found", "err", err)
|
||||
}
|
||||
return systemHasBattery
|
||||
})
|
||||
|
||||
// GetBatteryStats returns the current battery percent and charge state.
|
||||
func GetBatteryStats() (batteryPercent uint8, batteryState uint8, err error) {
|
||||
if !HasReadableBattery() {
|
||||
return batteryPercent, batteryState, errors.ErrUnsupported
|
||||
}
|
||||
|
||||
totalFull := uint32(0)
|
||||
totalCurrent := uint32(0)
|
||||
batteryState = math.MaxUint8
|
||||
|
||||
for i := 0; ; i++ {
|
||||
full, current, state, bErr := winBatteryGet(i)
|
||||
if errors.Is(bErr, errNotFound) {
|
||||
break
|
||||
}
|
||||
if bErr != nil || full == 0 {
|
||||
continue
|
||||
}
|
||||
totalFull += full
|
||||
totalCurrent += min(current, full)
|
||||
batteryState = state
|
||||
}
|
||||
|
||||
if totalFull == 0 || batteryState == math.MaxUint8 {
|
||||
return batteryPercent, batteryState, errors.New("no battery capacity")
|
||||
}
|
||||
|
||||
batteryPercent = uint8(float64(totalCurrent) / float64(totalFull) * 100)
|
||||
return batteryPercent, batteryState, nil
|
||||
}
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
"github.com/lxzan/gws"
|
||||
"golang.org/x/crypto/ssh"
|
||||
"golang.org/x/net/proxy"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -104,6 +105,11 @@ func (client *WebSocketClient) getOptions() *gws.ClientOption {
|
||||
}
|
||||
client.hubURL.Path = path.Join(client.hubURL.Path, "api/beszel/agent-connect")
|
||||
|
||||
// make sure BESZEL_AGENT_ALL_PROXY works (GWS only checks ALL_PROXY)
|
||||
if val := os.Getenv("BESZEL_AGENT_ALL_PROXY"); val != "" {
|
||||
os.Setenv("ALL_PROXY", val)
|
||||
}
|
||||
|
||||
client.options = &gws.ClientOption{
|
||||
Addr: client.hubURL.String(),
|
||||
TlsConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
@@ -112,6 +118,9 @@ func (client *WebSocketClient) getOptions() *gws.ClientOption {
|
||||
"X-Token": []string{client.token},
|
||||
"X-Beszel": []string{beszel.Version},
|
||||
},
|
||||
NewDialer: func() (gws.Dialer, error) {
|
||||
return proxy.FromEnvironment(), nil
|
||||
},
|
||||
}
|
||||
return client.options
|
||||
}
|
||||
|
||||
@@ -4,11 +4,15 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"log/slog"
|
||||
"net"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/agent/health"
|
||||
"github.com/henrygd/beszel/agent/utils"
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
)
|
||||
|
||||
@@ -111,13 +115,37 @@ func (c *ConnectionManager) Start(serverOptions ServerOptions) error {
|
||||
_ = health.Update()
|
||||
case <-sigCtx.Done():
|
||||
slog.Info("Shutting down", "cause", context.Cause(sigCtx))
|
||||
_ = c.agent.StopServer()
|
||||
c.closeWebSocket()
|
||||
return health.CleanUp()
|
||||
return c.stop()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// stop does not stop the connection manager itself, just any active connections. The manager will attempt to reconnect after stopping, so this should only be called immediately before shutting down the entire agent.
|
||||
//
|
||||
// If we need or want to expose a graceful Stop method in the future, do something like this to actually stop the manager:
|
||||
//
|
||||
// func (c *ConnectionManager) Start(serverOptions ServerOptions) error {
|
||||
// ctx, cancel := context.WithCancel(context.Background())
|
||||
// c.cancel = cancel
|
||||
//
|
||||
// for {
|
||||
// select {
|
||||
// case <-ctx.Done():
|
||||
// return c.stop()
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// func (c *ConnectionManager) Stop() {
|
||||
// c.cancel()
|
||||
// }
|
||||
func (c *ConnectionManager) stop() error {
|
||||
_ = c.agent.StopServer()
|
||||
c.agent.probeManager.Stop()
|
||||
c.closeWebSocket()
|
||||
return health.CleanUp()
|
||||
}
|
||||
|
||||
// handleEvent processes connection events and updates the connection state accordingly.
|
||||
func (c *ConnectionManager) handleEvent(event ConnectionEvent) {
|
||||
switch event {
|
||||
@@ -185,9 +213,16 @@ func (c *ConnectionManager) connect() {
|
||||
|
||||
// Try WebSocket first, if it fails, start SSH server
|
||||
err := c.startWebSocketConnection()
|
||||
if err != nil && c.State == Disconnected {
|
||||
c.startSSHServer()
|
||||
c.startWsTicker()
|
||||
if err != nil {
|
||||
if shouldExitOnErr(err) {
|
||||
time.Sleep(2 * time.Second) // prevent tight restart loop
|
||||
_ = c.stop()
|
||||
os.Exit(1)
|
||||
}
|
||||
if c.State == Disconnected {
|
||||
c.startSSHServer()
|
||||
c.startWsTicker()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -224,3 +259,14 @@ func (c *ConnectionManager) closeWebSocket() {
|
||||
c.wsClient.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// shouldExitOnErr checks if the error is a DNS resolution failure and if the
|
||||
// EXIT_ON_DNS_ERROR env var is set. https://github.com/henrygd/beszel/issues/1924.
|
||||
func shouldExitOnErr(err error) bool {
|
||||
if val, _ := utils.GetEnv("EXIT_ON_DNS_ERROR"); val == "true" {
|
||||
if opErr, ok := errors.AsType[*net.OpError](err); ok {
|
||||
return strings.Contains(opErr.Err.Error(), "lookup")
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ package agent
|
||||
|
||||
import (
|
||||
"crypto/ed25519"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
@@ -298,3 +299,65 @@ func TestConnectionManager_ConnectFlow(t *testing.T) {
|
||||
cm.connect()
|
||||
}, "Connect should not panic without WebSocket client")
|
||||
}
|
||||
|
||||
func TestShouldExitOnErr(t *testing.T) {
|
||||
createDialErr := func(msg string) error {
|
||||
return &net.OpError{
|
||||
Op: "dial",
|
||||
Net: "tcp",
|
||||
Err: errors.New(msg),
|
||||
}
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
err error
|
||||
envValue string
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "no env var",
|
||||
err: createDialErr("lookup lkahsdfasdf: no such host"),
|
||||
envValue: "",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "env var false",
|
||||
err: createDialErr("lookup lkahsdfasdf: no such host"),
|
||||
envValue: "false",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "env var true, matching error",
|
||||
err: createDialErr("lookup lkahsdfasdf: no such host"),
|
||||
envValue: "true",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "env var true, matching error with extra context",
|
||||
err: createDialErr("lookup beszel.server.lan on [::1]:53: read udp [::1]:44557->[::1]:53: read: connection refused"),
|
||||
envValue: "true",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "env var true, non-matching error",
|
||||
err: errors.New("connection refused"),
|
||||
envValue: "true",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "env var true, dial but not lookup",
|
||||
err: createDialErr("connection timeout"),
|
||||
envValue: "true",
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Setenv("EXIT_ON_DNS_ERROR", tt.envValue)
|
||||
result := shouldExitOnErr(tt.err)
|
||||
assert.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,6 +34,34 @@ type diskDiscovery struct {
|
||||
ctx fsRegistrationContext
|
||||
}
|
||||
|
||||
// prevDisk stores previous per-device disk counters for a given cache interval
|
||||
type prevDisk struct {
|
||||
readBytes uint64
|
||||
writeBytes uint64
|
||||
readTime uint64 // cumulative ms spent on reads (from ReadTime)
|
||||
writeTime uint64 // cumulative ms spent on writes (from WriteTime)
|
||||
ioTime uint64 // cumulative ms spent doing I/O (from IoTime)
|
||||
weightedIO uint64 // cumulative weighted ms (queue-depth × ms, from WeightedIO)
|
||||
readCount uint64 // cumulative read operation count
|
||||
writeCount uint64 // cumulative write operation count
|
||||
at time.Time
|
||||
}
|
||||
|
||||
// prevDiskFromCounter creates a prevDisk snapshot from a disk.IOCountersStat at time t.
|
||||
func prevDiskFromCounter(d disk.IOCountersStat, t time.Time) prevDisk {
|
||||
return prevDisk{
|
||||
readBytes: d.ReadBytes,
|
||||
writeBytes: d.WriteBytes,
|
||||
readTime: d.ReadTime,
|
||||
writeTime: d.WriteTime,
|
||||
ioTime: d.IoTime,
|
||||
weightedIO: d.WeightedIO,
|
||||
readCount: d.ReadCount,
|
||||
writeCount: d.WriteCount,
|
||||
at: t,
|
||||
}
|
||||
}
|
||||
|
||||
// parseFilesystemEntry parses a filesystem entry in the format "device__customname"
|
||||
// Returns the device/filesystem part and the custom name part
|
||||
func parseFilesystemEntry(entry string) (device, customName string) {
|
||||
@@ -581,16 +609,29 @@ func (a *Agent) updateDiskIo(cacheTimeMs uint16, systemStats *system.Stats) {
|
||||
prev, hasPrev := a.diskPrev[cacheTimeMs][name]
|
||||
if !hasPrev {
|
||||
// Seed from agent-level fsStats if present, else seed from current
|
||||
prev = prevDisk{readBytes: stats.TotalRead, writeBytes: stats.TotalWrite, at: stats.Time}
|
||||
prev = prevDisk{
|
||||
readBytes: stats.TotalRead,
|
||||
writeBytes: stats.TotalWrite,
|
||||
readTime: d.ReadTime,
|
||||
writeTime: d.WriteTime,
|
||||
ioTime: d.IoTime,
|
||||
weightedIO: d.WeightedIO,
|
||||
readCount: d.ReadCount,
|
||||
writeCount: d.WriteCount,
|
||||
at: stats.Time,
|
||||
}
|
||||
if prev.at.IsZero() {
|
||||
prev = prevDisk{readBytes: d.ReadBytes, writeBytes: d.WriteBytes, at: now}
|
||||
prev = prevDiskFromCounter(d, now)
|
||||
}
|
||||
}
|
||||
|
||||
msElapsed := uint64(now.Sub(prev.at).Milliseconds())
|
||||
|
||||
// Update per-interval snapshot
|
||||
a.diskPrev[cacheTimeMs][name] = prevDiskFromCounter(d, now)
|
||||
|
||||
// Avoid division by zero or clock issues
|
||||
if msElapsed < 100 {
|
||||
// Avoid division by zero or clock issues; update snapshot and continue
|
||||
a.diskPrev[cacheTimeMs][name] = prevDisk{readBytes: d.ReadBytes, writeBytes: d.WriteBytes, at: now}
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -602,15 +643,38 @@ func (a *Agent) updateDiskIo(cacheTimeMs uint16, systemStats *system.Stats) {
|
||||
// validate values
|
||||
if readMbPerSecond > 50_000 || writeMbPerSecond > 50_000 {
|
||||
slog.Warn("Invalid disk I/O. Resetting.", "name", d.Name, "read", readMbPerSecond, "write", writeMbPerSecond)
|
||||
// Reset interval snapshot and seed from current
|
||||
a.diskPrev[cacheTimeMs][name] = prevDisk{readBytes: d.ReadBytes, writeBytes: d.WriteBytes, at: now}
|
||||
// also refresh agent baseline to avoid future negatives
|
||||
a.initializeDiskIoStats(ioCounters)
|
||||
continue
|
||||
}
|
||||
|
||||
// Update per-interval snapshot
|
||||
a.diskPrev[cacheTimeMs][name] = prevDisk{readBytes: d.ReadBytes, writeBytes: d.WriteBytes, at: now}
|
||||
// These properties are calculated differently on different platforms,
|
||||
// but generally represent cumulative time spent doing reads/writes on the device.
|
||||
// This can surpass 100% if there are multiple concurrent I/O operations.
|
||||
// Linux kernel docs:
|
||||
// This is the total number of milliseconds spent by all reads (as
|
||||
// measured from __make_request() to end_that_request_last()).
|
||||
// https://www.kernel.org/doc/Documentation/iostats.txt (fields 4, 8)
|
||||
diskReadTime := utils.TwoDecimals(float64(d.ReadTime-prev.readTime) / float64(msElapsed) * 100)
|
||||
diskWriteTime := utils.TwoDecimals(float64(d.WriteTime-prev.writeTime) / float64(msElapsed) * 100)
|
||||
|
||||
// I/O utilization %: fraction of wall time the device had any I/O in progress (0-100).
|
||||
diskIoUtilPct := utils.TwoDecimals(float64(d.IoTime-prev.ioTime) / float64(msElapsed) * 100)
|
||||
|
||||
// Weighted I/O: queue-depth weighted I/O time, normalized to interval (can exceed 100%).
|
||||
// Linux kernel field 11: incremented by iops_in_progress × ms_since_last_update.
|
||||
// Used to display queue depth. Multipled by 100 to increase accuracy of digit truncation (divided by 100 in UI).
|
||||
diskWeightedIO := utils.TwoDecimals(float64(d.WeightedIO-prev.weightedIO) / float64(msElapsed) * 100)
|
||||
|
||||
// r_await / w_await: average time per read/write operation in milliseconds.
|
||||
// Equivalent to r_await and w_await in iostat.
|
||||
var rAwait, wAwait float64
|
||||
if deltaReadCount := d.ReadCount - prev.readCount; deltaReadCount > 0 {
|
||||
rAwait = utils.TwoDecimals(float64(d.ReadTime-prev.readTime) / float64(deltaReadCount))
|
||||
}
|
||||
if deltaWriteCount := d.WriteCount - prev.writeCount; deltaWriteCount > 0 {
|
||||
wAwait = utils.TwoDecimals(float64(d.WriteTime-prev.writeTime) / float64(deltaWriteCount))
|
||||
}
|
||||
|
||||
// Update global fsStats baseline for cross-interval correctness
|
||||
stats.Time = now
|
||||
@@ -620,12 +684,24 @@ func (a *Agent) updateDiskIo(cacheTimeMs uint16, systemStats *system.Stats) {
|
||||
stats.DiskWritePs = writeMbPerSecond
|
||||
stats.DiskReadBytes = diskIORead
|
||||
stats.DiskWriteBytes = diskIOWrite
|
||||
stats.DiskIoStats[0] = diskReadTime
|
||||
stats.DiskIoStats[1] = diskWriteTime
|
||||
stats.DiskIoStats[2] = diskIoUtilPct
|
||||
stats.DiskIoStats[3] = rAwait
|
||||
stats.DiskIoStats[4] = wAwait
|
||||
stats.DiskIoStats[5] = diskWeightedIO
|
||||
|
||||
if stats.Root {
|
||||
systemStats.DiskReadPs = stats.DiskReadPs
|
||||
systemStats.DiskWritePs = stats.DiskWritePs
|
||||
systemStats.DiskIO[0] = diskIORead
|
||||
systemStats.DiskIO[1] = diskIOWrite
|
||||
systemStats.DiskIoStats[0] = diskReadTime
|
||||
systemStats.DiskIoStats[1] = diskWriteTime
|
||||
systemStats.DiskIoStats[2] = diskIoUtilPct
|
||||
systemStats.DiskIoStats[3] = rAwait
|
||||
systemStats.DiskIoStats[4] = wAwait
|
||||
systemStats.DiskIoStats[5] = diskWeightedIO
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -156,6 +156,7 @@ func (gm *GPUManager) updateAmdGpuData(cardPath string) bool {
|
||||
func readSysfsFloat(path string) (float64, error) {
|
||||
val, err := utils.ReadStringFileLimited(path, 64)
|
||||
if err != nil {
|
||||
slog.Debug("Failed to read sysfs value", "path", path, "error", err)
|
||||
return 0, err
|
||||
}
|
||||
return strconv.ParseFloat(val, 64)
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
"github.com/henrygd/beszel/internal/common"
|
||||
"github.com/henrygd/beszel/internal/entities/probe"
|
||||
"github.com/henrygd/beszel/internal/entities/smart"
|
||||
|
||||
"log/slog"
|
||||
@@ -51,6 +52,7 @@ func NewHandlerRegistry() *HandlerRegistry {
|
||||
registry.Register(common.GetContainerInfo, &GetContainerInfoHandler{})
|
||||
registry.Register(common.GetSmartData, &GetSmartDataHandler{})
|
||||
registry.Register(common.GetSystemdInfo, &GetSystemdInfoHandler{})
|
||||
registry.Register(common.SyncNetworkProbes, &SyncNetworkProbesHandler{})
|
||||
|
||||
return registry
|
||||
}
|
||||
@@ -203,3 +205,21 @@ func (h *GetSystemdInfoHandler) Handle(hctx *HandlerContext) error {
|
||||
|
||||
return hctx.SendResponse(details, hctx.RequestID)
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// SyncNetworkProbesHandler handles probe configuration sync from hub
|
||||
type SyncNetworkProbesHandler struct{}
|
||||
|
||||
func (h *SyncNetworkProbesHandler) Handle(hctx *HandlerContext) error {
|
||||
var req probe.SyncRequest
|
||||
if err := cbor.Unmarshal(hctx.Request.Data, &req); err != nil {
|
||||
return err
|
||||
}
|
||||
resp, err := hctx.Agent.probeManager.HandleSyncRequest(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return hctx.SendResponse(resp, hctx.RequestID)
|
||||
}
|
||||
|
||||
538
agent/probe.go
Normal file
538
agent/probe.go
Normal file
@@ -0,0 +1,538 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
// "strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"log/slog"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/probe"
|
||||
)
|
||||
|
||||
// Probes run at user-defined intervals (e.g., every 10s).
|
||||
// To keep memory usage low and constant, data is stored in two layers:
|
||||
// 1. Raw samples: The most recent individual results (kept for probeRawRetention).
|
||||
// 2. Minute buckets: A ring buffer of 61 buckets, each representing one
|
||||
// wall-clock minute. Samples collected within the same minute are aggregated
|
||||
// (sum, min, max, count) into a single bucket.
|
||||
//
|
||||
// Short-term requests (<= 70s) use raw samples.
|
||||
// Long-term requests (up to 1h) use the minute buckets to avoid storing thousands
|
||||
// of individual data points.
|
||||
|
||||
const (
|
||||
// probeRawRetention is the duration to keep individual samples
|
||||
probeRawRetention = 61 * time.Second
|
||||
// probeMinuteBucketLen is the number of 1-minute buckets to keep (1 hour + 1 for partials)
|
||||
probeMinuteBucketLen int32 = 61
|
||||
)
|
||||
|
||||
// ProbeManager manages network probe tasks.
|
||||
type ProbeManager struct {
|
||||
mu sync.RWMutex
|
||||
probes map[string]*probeTask // key = probe.Config.Key()
|
||||
httpClient *http.Client
|
||||
}
|
||||
|
||||
// probeTask owns retention buffers and cancellation for a single probe config.
|
||||
type probeTask struct {
|
||||
config probe.Config
|
||||
cancel chan struct{}
|
||||
mu sync.Mutex
|
||||
samples []probeSample
|
||||
buckets [probeMinuteBucketLen]probeBucket
|
||||
}
|
||||
|
||||
// probeSample stores one probe attempt and its collection time.
|
||||
type probeSample struct {
|
||||
responseUs int64 // -1 means loss
|
||||
timestamp time.Time
|
||||
}
|
||||
|
||||
// probeBucket stores one minute of aggregated probe data.
|
||||
type probeBucket struct {
|
||||
minute int32
|
||||
filled bool
|
||||
stats probeAggregate
|
||||
}
|
||||
|
||||
// probeAggregate accumulates successful response stats and total sample counts.
|
||||
type probeAggregate struct {
|
||||
sumUs int64
|
||||
minUs int64
|
||||
maxUs int64
|
||||
totalCount int64
|
||||
successCount int64
|
||||
}
|
||||
|
||||
func newProbeManager() *ProbeManager {
|
||||
return &ProbeManager{
|
||||
probes: make(map[string]*probeTask),
|
||||
httpClient: &http.Client{Timeout: 10 * time.Second},
|
||||
}
|
||||
}
|
||||
|
||||
func newProbeTask(config probe.Config) *probeTask {
|
||||
return &probeTask{
|
||||
config: config,
|
||||
cancel: make(chan struct{}),
|
||||
samples: make([]probeSample, 0, 64),
|
||||
}
|
||||
}
|
||||
|
||||
func newProbeTaskFromExisting(config probe.Config, existing *probeTask) *probeTask {
|
||||
task := newProbeTask(config)
|
||||
if existing == nil {
|
||||
return task
|
||||
}
|
||||
|
||||
existing.mu.Lock()
|
||||
defer existing.mu.Unlock()
|
||||
task.samples = append(task.samples, existing.samples...)
|
||||
task.buckets = existing.buckets
|
||||
return task
|
||||
}
|
||||
|
||||
// newProbeAggregate initializes an aggregate with an unset minimum value.
|
||||
func newProbeAggregate() probeAggregate {
|
||||
return probeAggregate{minUs: math.MaxInt64}
|
||||
}
|
||||
|
||||
// addResponse folds a single probe sample into the aggregate.
|
||||
func (agg *probeAggregate) addResponse(responseUs int64) {
|
||||
agg.totalCount++
|
||||
if responseUs < 0 {
|
||||
return
|
||||
}
|
||||
agg.successCount++
|
||||
agg.sumUs += responseUs
|
||||
if responseUs < agg.minUs {
|
||||
agg.minUs = responseUs
|
||||
}
|
||||
if responseUs > agg.maxUs {
|
||||
agg.maxUs = responseUs
|
||||
}
|
||||
}
|
||||
|
||||
// addAggregate merges another aggregate into this one.
|
||||
func (agg *probeAggregate) addAggregate(other probeAggregate) {
|
||||
if other.totalCount == 0 {
|
||||
return
|
||||
}
|
||||
agg.totalCount += other.totalCount
|
||||
agg.successCount += other.successCount
|
||||
agg.sumUs += other.sumUs
|
||||
if other.successCount == 0 {
|
||||
return
|
||||
}
|
||||
if agg.minUs == math.MaxInt64 || other.minUs < agg.minUs {
|
||||
agg.minUs = other.minUs
|
||||
}
|
||||
if other.maxUs > agg.maxUs {
|
||||
agg.maxUs = other.maxUs
|
||||
}
|
||||
}
|
||||
|
||||
// hasData reports whether the aggregate contains any samples.
|
||||
func (agg probeAggregate) hasData() bool {
|
||||
return agg.totalCount > 0
|
||||
}
|
||||
|
||||
// result converts the aggregate into the probe result format.
|
||||
func (agg probeAggregate) result() probe.Result {
|
||||
avg := agg.avgResponse()
|
||||
result := probe.Result{
|
||||
AvgResponse: avg,
|
||||
MinResponse: agg.minUs,
|
||||
MaxResponse: agg.maxUs,
|
||||
PacketLoss: agg.lossPercentage(),
|
||||
}
|
||||
if agg.successCount == 0 {
|
||||
result.MinResponse, result.MaxResponse = 0, 0
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// avgResponse returns the rounded average of successful samples.
|
||||
func (agg probeAggregate) avgResponse() int64 {
|
||||
if agg.successCount == 0 {
|
||||
return 0
|
||||
}
|
||||
return agg.sumUs / agg.successCount
|
||||
|
||||
}
|
||||
|
||||
// lossPercentage returns the rounded failure rate for the aggregate.
|
||||
func (agg probeAggregate) lossPercentage() float64 {
|
||||
if agg.totalCount == 0 {
|
||||
return 0
|
||||
}
|
||||
return math.Round(float64(agg.totalCount-agg.successCount)/float64(agg.totalCount)*10000) / 100
|
||||
}
|
||||
|
||||
// SyncProbes replaces all probe tasks with the given configs.
|
||||
func (pm *ProbeManager) SyncProbes(configs []probe.Config) {
|
||||
pm.mu.Lock()
|
||||
defer pm.mu.Unlock()
|
||||
|
||||
// Build set of new keys
|
||||
newKeys := make(map[string]probe.Config, len(configs))
|
||||
for _, cfg := range configs {
|
||||
if cfg.ID == "" {
|
||||
continue
|
||||
}
|
||||
newKeys[cfg.ID] = cfg
|
||||
}
|
||||
|
||||
// Stop removed probes
|
||||
for key, task := range pm.probes {
|
||||
if _, exists := newKeys[key]; !exists {
|
||||
close(task.cancel)
|
||||
delete(pm.probes, key)
|
||||
}
|
||||
}
|
||||
|
||||
// Start new probes and restart tasks whose config changed.
|
||||
for key, cfg := range newKeys {
|
||||
task, exists := pm.probes[key]
|
||||
if exists && task.config == cfg {
|
||||
continue
|
||||
}
|
||||
if exists {
|
||||
close(task.cancel)
|
||||
}
|
||||
task = newProbeTaskFromExisting(cfg, task)
|
||||
pm.probes[key] = task
|
||||
go pm.runProbe(task, false)
|
||||
}
|
||||
}
|
||||
|
||||
// HandleSyncRequest applies a full or incremental probe sync request.
|
||||
func (pm *ProbeManager) HandleSyncRequest(req probe.SyncRequest) (probe.SyncResponse, error) {
|
||||
switch req.Action {
|
||||
case probe.SyncActionReplace:
|
||||
pm.SyncProbes(req.Configs)
|
||||
return probe.SyncResponse{}, nil
|
||||
case probe.SyncActionUpsert:
|
||||
result, err := pm.UpsertProbe(req.Config, req.RunNow)
|
||||
if err != nil {
|
||||
return probe.SyncResponse{}, err
|
||||
}
|
||||
if result == nil {
|
||||
return probe.SyncResponse{}, nil
|
||||
}
|
||||
return probe.SyncResponse{Result: *result}, nil
|
||||
case probe.SyncActionDelete:
|
||||
if req.Config.ID == "" {
|
||||
return probe.SyncResponse{}, errors.New("missing probe ID for delete")
|
||||
}
|
||||
pm.DeleteProbe(req.Config.ID)
|
||||
return probe.SyncResponse{}, nil
|
||||
default:
|
||||
return probe.SyncResponse{}, fmt.Errorf("unknown probe sync action: %d", req.Action)
|
||||
}
|
||||
}
|
||||
|
||||
// UpsertProbe creates or replaces a single probe task.
|
||||
func (pm *ProbeManager) UpsertProbe(config probe.Config, runNow bool) (*probe.Result, error) {
|
||||
if config.ID == "" {
|
||||
return nil, errors.New("missing probe ID")
|
||||
}
|
||||
|
||||
pm.mu.Lock()
|
||||
task, exists := pm.probes[config.ID]
|
||||
startTask := false
|
||||
if exists && task.config == config {
|
||||
pm.mu.Unlock()
|
||||
if !runNow {
|
||||
return nil, nil
|
||||
}
|
||||
return pm.runProbeNow(task), nil
|
||||
}
|
||||
if exists {
|
||||
close(task.cancel)
|
||||
}
|
||||
task = newProbeTaskFromExisting(config, task)
|
||||
pm.probes[config.ID] = task
|
||||
startTask = true
|
||||
pm.mu.Unlock()
|
||||
|
||||
if runNow {
|
||||
result := pm.runProbeNow(task)
|
||||
if startTask {
|
||||
go pm.runProbe(task, false)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
if startTask {
|
||||
go pm.runProbe(task, false)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// DeleteProbe stops and removes a single probe task.
|
||||
func (pm *ProbeManager) DeleteProbe(id string) {
|
||||
if id == "" {
|
||||
return
|
||||
}
|
||||
pm.mu.Lock()
|
||||
defer pm.mu.Unlock()
|
||||
if task, exists := pm.probes[id]; exists {
|
||||
close(task.cancel)
|
||||
delete(pm.probes, id)
|
||||
}
|
||||
}
|
||||
|
||||
// GetResults returns aggregated results for all probes over the last supplied duration in ms.
|
||||
func (pm *ProbeManager) GetResults(durationMs uint16) map[string]probe.Result {
|
||||
pm.mu.RLock()
|
||||
defer pm.mu.RUnlock()
|
||||
|
||||
results := make(map[string]probe.Result, len(pm.probes))
|
||||
now := time.Now()
|
||||
duration := time.Duration(durationMs) * time.Millisecond
|
||||
|
||||
for _, task := range pm.probes {
|
||||
task.mu.Lock()
|
||||
result, ok := task.resultLocked(duration, now)
|
||||
task.mu.Unlock()
|
||||
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
results[task.config.ID] = result
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
// Stop stops all probe tasks.
|
||||
func (pm *ProbeManager) Stop() {
|
||||
pm.mu.Lock()
|
||||
defer pm.mu.Unlock()
|
||||
for key, task := range pm.probes {
|
||||
close(task.cancel)
|
||||
delete(pm.probes, key)
|
||||
}
|
||||
}
|
||||
|
||||
// runProbe executes a single probe task in a loop.
|
||||
func (pm *ProbeManager) runProbe(task *probeTask, runNow bool) {
|
||||
interval := time.Duration(task.config.Interval) * time.Second
|
||||
if interval < time.Second {
|
||||
interval = 30 * time.Second
|
||||
}
|
||||
|
||||
stagger := getStagger(interval.Milliseconds())
|
||||
|
||||
slog.Debug("starting probe task", "target", task.config.Target, "delay", stagger.String(), "interval", interval.String())
|
||||
|
||||
if runNow {
|
||||
pm.executeProbe(task)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-task.cancel:
|
||||
// slog.Info("removed probe", "target", task.config.Target)
|
||||
return
|
||||
case <-time.After(stagger):
|
||||
pm.executeProbe(task)
|
||||
}
|
||||
|
||||
ticker := time.Tick(interval)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-task.cancel:
|
||||
// slog.Info("removed probe", "target", task.config.Target)
|
||||
return
|
||||
case <-ticker:
|
||||
pm.executeProbe(task)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// getStagger returns a random duration between intervalSeconds/2 and intervalSeconds to stagger initial probe executions
|
||||
func getStagger(intervalMilli int64) time.Duration {
|
||||
intervalMilliInt := int(intervalMilli)
|
||||
randomDelayInt := rand.Intn(intervalMilliInt)
|
||||
if randomDelayInt < intervalMilliInt/2 {
|
||||
randomDelayInt += intervalMilliInt / 2
|
||||
}
|
||||
return time.Duration(randomDelayInt) * time.Millisecond
|
||||
}
|
||||
|
||||
func (pm *ProbeManager) runProbeNow(task *probeTask) *probe.Result {
|
||||
pm.executeProbe(task)
|
||||
task.mu.Lock()
|
||||
defer task.mu.Unlock()
|
||||
result, ok := task.resultLocked(time.Minute, time.Now())
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return &result
|
||||
}
|
||||
|
||||
// resultLocked returns the aggregated probe result for the requested duration along with a bool indicating whether any data was available.
|
||||
func (task *probeTask) resultLocked(duration time.Duration, now time.Time) (probe.Result, bool) {
|
||||
agg := task.aggregateLocked(duration, now)
|
||||
hourAgg := task.aggregateLocked(time.Hour, now)
|
||||
if !agg.hasData() {
|
||||
return probe.Result{}, false
|
||||
}
|
||||
|
||||
result := agg.result()
|
||||
|
||||
result.AvgResponse1h = hourAgg.avgResponse()
|
||||
result.MinResponse1h = hourAgg.minUs
|
||||
result.MaxResponse1h = hourAgg.maxUs
|
||||
result.PacketLoss1h = hourAgg.lossPercentage()
|
||||
|
||||
if hourAgg.successCount == 0 {
|
||||
result.MinResponse1h, result.MaxResponse1h = 0, 0
|
||||
}
|
||||
return result, true
|
||||
}
|
||||
|
||||
// aggregateLocked collects probe data for the requested time window.
|
||||
func (task *probeTask) aggregateLocked(duration time.Duration, now time.Time) probeAggregate {
|
||||
cutoff := now.Add(-duration)
|
||||
// Keep short windows exact; longer windows read from minute buckets to avoid raw-sample retention.
|
||||
if duration <= probeRawRetention {
|
||||
return aggregateSamplesSince(task.samples, cutoff)
|
||||
}
|
||||
return aggregateBucketsSince(task.buckets[:], cutoff, now)
|
||||
}
|
||||
|
||||
// aggregateSamplesSince aggregates raw samples newer than the cutoff.
|
||||
func aggregateSamplesSince(samples []probeSample, cutoff time.Time) probeAggregate {
|
||||
agg := newProbeAggregate()
|
||||
for _, sample := range samples {
|
||||
if sample.timestamp.Before(cutoff) {
|
||||
continue
|
||||
}
|
||||
agg.addResponse(sample.responseUs)
|
||||
}
|
||||
return agg
|
||||
}
|
||||
|
||||
// aggregateBucketsSince aggregates minute buckets overlapping the requested window.
|
||||
func aggregateBucketsSince(buckets []probeBucket, cutoff, now time.Time) probeAggregate {
|
||||
agg := newProbeAggregate()
|
||||
startMinute := int32(cutoff.Unix() / 60)
|
||||
endMinute := int32(now.Unix() / 60)
|
||||
for _, bucket := range buckets {
|
||||
if !bucket.filled || bucket.minute < startMinute || bucket.minute > endMinute {
|
||||
continue
|
||||
}
|
||||
agg.addAggregate(bucket.stats)
|
||||
}
|
||||
return agg
|
||||
}
|
||||
|
||||
// addSampleLocked stores a fresh sample in both raw and per-minute retention buffers.
|
||||
func (task *probeTask) addSampleLocked(sample probeSample) {
|
||||
cutoff := sample.timestamp.Add(-probeRawRetention)
|
||||
start := 0
|
||||
for i := range task.samples {
|
||||
if !task.samples[i].timestamp.Before(cutoff) {
|
||||
start = i
|
||||
break
|
||||
}
|
||||
if i == len(task.samples)-1 {
|
||||
start = len(task.samples)
|
||||
}
|
||||
}
|
||||
if start > 0 {
|
||||
size := copy(task.samples, task.samples[start:])
|
||||
task.samples = task.samples[:size]
|
||||
}
|
||||
task.samples = append(task.samples, sample)
|
||||
|
||||
minute := int32(sample.timestamp.Unix() / 60)
|
||||
// Each slot stores one wall-clock minute, so the ring stays fixed-size at ~1h per probe.
|
||||
bucket := &task.buckets[minute%probeMinuteBucketLen]
|
||||
if !bucket.filled || bucket.minute != minute {
|
||||
bucket.minute = minute
|
||||
bucket.filled = true
|
||||
bucket.stats = newProbeAggregate()
|
||||
}
|
||||
bucket.stats.addResponse(sample.responseUs)
|
||||
}
|
||||
|
||||
// executeProbe runs the configured probe and records the sample.
|
||||
func (pm *ProbeManager) executeProbe(task *probeTask) {
|
||||
// slog.Info("running probe", "id", task.config.ID, "interval", task.config.Interval)
|
||||
var responseUs int64
|
||||
var err error
|
||||
|
||||
switch task.config.Protocol {
|
||||
case "icmp":
|
||||
responseUs, err = probeICMP(task.config.Target)
|
||||
case "tcp":
|
||||
responseUs, err = probeTCP(task.config.Target, task.config.Port)
|
||||
case "http":
|
||||
responseUs, err = probeHTTP(pm.httpClient, task.config.Target)
|
||||
default:
|
||||
slog.Warn("unknown probe protocol", "protocol", task.config.Protocol)
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
slog.Warn("probe failed", "err", err, "target", task.config.Target, "protocol", task.config.Protocol)
|
||||
}
|
||||
|
||||
sample := probeSample{
|
||||
responseUs: responseUs,
|
||||
timestamp: time.Now(),
|
||||
}
|
||||
|
||||
task.mu.Lock()
|
||||
task.addSampleLocked(sample)
|
||||
task.mu.Unlock()
|
||||
}
|
||||
|
||||
// probeTCP measures pure TCP handshake response (excluding DNS resolution).
|
||||
// Returns -1 and an error on failure.
|
||||
func probeTCP(target string, port uint16) (int64, error) {
|
||||
// Resolve DNS first, outside the timing window
|
||||
ips, err := net.LookupHost(target)
|
||||
if err != nil || len(ips) == 0 {
|
||||
return -1, err
|
||||
}
|
||||
addr := net.JoinHostPort(ips[0], fmt.Sprintf("%d", port))
|
||||
|
||||
// Measure only the TCP handshake
|
||||
start := time.Now()
|
||||
conn, err := net.DialTimeout("tcp", addr, 3*time.Second)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
conn.Close()
|
||||
return time.Since(start).Microseconds(), nil
|
||||
}
|
||||
|
||||
// probeHTTP measures HTTP GET request response in microseconds. Returns -1 and an error on failure.
|
||||
func probeHTTP(client *http.Client, url string) (int64, error) {
|
||||
if client == nil {
|
||||
client = http.DefaultClient
|
||||
}
|
||||
start := time.Now()
|
||||
resp, err := client.Get(url)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode >= 400 {
|
||||
return -1, fmt.Errorf("HTTP error: %s", resp.Status)
|
||||
}
|
||||
return time.Since(start).Microseconds(), nil
|
||||
}
|
||||
241
agent/probe_ping.go
Normal file
241
agent/probe_ping.go
Normal file
@@ -0,0 +1,241 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/icmp"
|
||||
"golang.org/x/net/ipv4"
|
||||
"golang.org/x/net/ipv6"
|
||||
|
||||
"log/slog"
|
||||
)
|
||||
|
||||
var pingTimeRegex = regexp.MustCompile(`time[=<]([\d.]+)\s*ms`)
|
||||
|
||||
type icmpPacketConn interface {
|
||||
Close() error
|
||||
}
|
||||
|
||||
// icmpMethod tracks which ICMP approach to use. Once a method succeeds or
|
||||
// all native methods fail, the choice is cached so subsequent probes skip
|
||||
// the trial-and-error overhead.
|
||||
type icmpMethod uint8
|
||||
|
||||
const (
|
||||
icmpUntried icmpMethod = iota // haven't tried yet
|
||||
icmpRaw // privileged raw socket
|
||||
icmpDatagram // unprivileged datagram socket
|
||||
icmpExecFallback // shell out to system ping command
|
||||
)
|
||||
|
||||
// icmpFamily holds the network parameters and cached detection result for one address family.
|
||||
type icmpFamily struct {
|
||||
rawNetwork string // e.g. "ip4:icmp" or "ip6:ipv6-icmp"
|
||||
dgramNetwork string // e.g. "udp4" or "udp6"
|
||||
listenAddr string // "0.0.0.0" or "::"
|
||||
echoType icmp.Type // outgoing echo request type
|
||||
replyType icmp.Type // expected echo reply type
|
||||
proto int // IANA protocol number for parsing replies
|
||||
isIPv6 bool
|
||||
mode icmpMethod // cached detection result (guarded by icmpModeMu)
|
||||
}
|
||||
|
||||
var (
|
||||
icmpV4 = icmpFamily{
|
||||
rawNetwork: "ip4:icmp",
|
||||
dgramNetwork: "udp4",
|
||||
listenAddr: "0.0.0.0",
|
||||
echoType: ipv4.ICMPTypeEcho,
|
||||
replyType: ipv4.ICMPTypeEchoReply,
|
||||
proto: 1,
|
||||
}
|
||||
icmpV6 = icmpFamily{
|
||||
rawNetwork: "ip6:ipv6-icmp",
|
||||
dgramNetwork: "udp6",
|
||||
listenAddr: "::",
|
||||
echoType: ipv6.ICMPTypeEchoRequest,
|
||||
replyType: ipv6.ICMPTypeEchoReply,
|
||||
proto: 58,
|
||||
isIPv6: true,
|
||||
}
|
||||
icmpModeMu sync.Mutex
|
||||
icmpListen = func(network, listenAddr string) (icmpPacketConn, error) {
|
||||
return icmp.ListenPacket(network, listenAddr)
|
||||
}
|
||||
)
|
||||
|
||||
// probeICMP sends an ICMP echo request and measures round-trip response.
|
||||
// Supports both IPv4 and IPv6 targets. The ICMP method (raw socket,
|
||||
// unprivileged datagram, or exec fallback) is detected once per address
|
||||
// family and cached for subsequent probes.
|
||||
// Returns response in microseconds, or -1 and an error on failure.
|
||||
func probeICMP(target string) (int64, error) {
|
||||
family, ip, err := resolveICMPTarget(target)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
icmpModeMu.Lock()
|
||||
if family.mode == icmpUntried {
|
||||
family.mode = detectICMPMode(family, icmpListen)
|
||||
}
|
||||
mode := family.mode
|
||||
icmpModeMu.Unlock()
|
||||
|
||||
switch mode {
|
||||
case icmpRaw:
|
||||
return probeICMPNative(family.rawNetwork, family, &net.IPAddr{IP: ip})
|
||||
case icmpDatagram:
|
||||
return probeICMPNative(family.dgramNetwork, family, &net.UDPAddr{IP: ip})
|
||||
case icmpExecFallback:
|
||||
return probeICMPExec(target, family.isIPv6)
|
||||
default:
|
||||
return -1, errors.New("unsupported ICMP mode")
|
||||
}
|
||||
}
|
||||
|
||||
// resolveICMPTarget resolves a target hostname or IP to determine the address
|
||||
// family and concrete IP address. Prefers IPv4 for dual-stack hostnames.
|
||||
func resolveICMPTarget(target string) (*icmpFamily, net.IP, error) {
|
||||
if ip := net.ParseIP(target); ip != nil {
|
||||
if ip.To4() != nil {
|
||||
return &icmpV4, ip.To4(), nil
|
||||
}
|
||||
return &icmpV6, ip, nil
|
||||
}
|
||||
|
||||
ips, err := net.LookupIP(target)
|
||||
if err != nil || len(ips) == 0 {
|
||||
return nil, nil, err
|
||||
}
|
||||
for _, ip := range ips {
|
||||
if v4 := ip.To4(); v4 != nil {
|
||||
return &icmpV4, v4, nil
|
||||
}
|
||||
}
|
||||
return &icmpV6, ips[0], nil
|
||||
}
|
||||
|
||||
func detectICMPMode(family *icmpFamily, listen func(network, listenAddr string) (icmpPacketConn, error)) icmpMethod {
|
||||
label := "IPv4"
|
||||
if family.isIPv6 {
|
||||
label = "IPv6"
|
||||
}
|
||||
|
||||
conn, err := listen(family.rawNetwork, family.listenAddr)
|
||||
slog.Debug("ICMP raw socket test", "family", label, "err", err)
|
||||
if err == nil {
|
||||
conn.Close()
|
||||
return icmpRaw
|
||||
}
|
||||
|
||||
conn, err = listen(family.dgramNetwork, family.listenAddr)
|
||||
slog.Debug("ICMP datagram socket test", "family", label, "err", err)
|
||||
if err == nil {
|
||||
conn.Close()
|
||||
return icmpDatagram
|
||||
}
|
||||
|
||||
return icmpExecFallback
|
||||
}
|
||||
|
||||
// probeICMPNative sends an ICMP echo request using Go's x/net/icmp package.
|
||||
func probeICMPNative(network string, family *icmpFamily, dst net.Addr) (int64, error) {
|
||||
conn, err := icmp.ListenPacket(network, family.listenAddr)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
// Build ICMP echo request
|
||||
msg := &icmp.Message{
|
||||
Type: family.echoType,
|
||||
Code: 0,
|
||||
Body: &icmp.Echo{
|
||||
ID: os.Getpid() & 0xffff,
|
||||
Seq: 1,
|
||||
Data: []byte("beszel-probe"),
|
||||
},
|
||||
}
|
||||
msgBytes, err := msg.Marshal(nil)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
// Set deadline before sending
|
||||
conn.SetDeadline(time.Now().Add(3 * time.Second))
|
||||
|
||||
start := time.Now()
|
||||
if _, err := conn.WriteTo(msgBytes, dst); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
// Read reply
|
||||
buf := make([]byte, 1500)
|
||||
for {
|
||||
n, _, err := conn.ReadFrom(buf)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
reply, err := icmp.ParseMessage(family.proto, buf[:n])
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
if reply.Type == family.replyType {
|
||||
return time.Since(start).Microseconds(), nil
|
||||
}
|
||||
// Ignore non-echo-reply messages (e.g. destination unreachable) and keep reading
|
||||
}
|
||||
}
|
||||
|
||||
// probeICMPExec falls back to the system ping command. Returns -1 and an error on failure.
|
||||
func probeICMPExec(target string, isIPv6 bool) (int64, error) {
|
||||
var cmd *exec.Cmd
|
||||
switch runtime.GOOS {
|
||||
case "windows":
|
||||
if isIPv6 {
|
||||
cmd = exec.Command("ping", "-6", "-n", "1", "-w", "3000", target)
|
||||
} else {
|
||||
cmd = exec.Command("ping", "-n", "1", "-w", "3000", target)
|
||||
}
|
||||
default:
|
||||
if isIPv6 {
|
||||
cmd = exec.Command("ping", "-6", "-c", "1", "-W", "3", target)
|
||||
} else {
|
||||
cmd = exec.Command("ping", "-c", "1", "-W", "3", target)
|
||||
}
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
// If ping fails but we got output, still try to parse
|
||||
if len(output) == 0 {
|
||||
return -1, err
|
||||
}
|
||||
}
|
||||
|
||||
matches := pingTimeRegex.FindSubmatch(output)
|
||||
if len(matches) >= 2 {
|
||||
if ms, err := strconv.ParseFloat(string(matches[1]), 64); err == nil {
|
||||
return int64(math.Round(ms * 1000)), nil
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback: use wall clock time if ping succeeded but parsing failed
|
||||
if err == nil {
|
||||
return time.Since(start).Microseconds(), nil
|
||||
}
|
||||
return -1, err
|
||||
}
|
||||
121
agent/probe_ping_test.go
Normal file
121
agent/probe_ping_test.go
Normal file
@@ -0,0 +1,121 @@
|
||||
//go:build testing
|
||||
|
||||
package agent
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type testICMPPacketConn struct{}
|
||||
|
||||
func (testICMPPacketConn) Close() error { return nil }
|
||||
|
||||
func TestDetectICMPMode(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
family *icmpFamily
|
||||
rawErr error
|
||||
udpErr error
|
||||
want icmpMethod
|
||||
wantNetworks []string
|
||||
}{
|
||||
{
|
||||
name: "IPv4 prefers raw socket when available",
|
||||
family: &icmpV4,
|
||||
want: icmpRaw,
|
||||
wantNetworks: []string{"ip4:icmp"},
|
||||
},
|
||||
{
|
||||
name: "IPv4 uses datagram when raw unavailable",
|
||||
family: &icmpV4,
|
||||
rawErr: errors.New("operation not permitted"),
|
||||
want: icmpDatagram,
|
||||
wantNetworks: []string{"ip4:icmp", "udp4"},
|
||||
},
|
||||
{
|
||||
name: "IPv4 falls back to exec when both unavailable",
|
||||
family: &icmpV4,
|
||||
rawErr: errors.New("operation not permitted"),
|
||||
udpErr: errors.New("protocol not supported"),
|
||||
want: icmpExecFallback,
|
||||
wantNetworks: []string{"ip4:icmp", "udp4"},
|
||||
},
|
||||
{
|
||||
name: "IPv6 prefers raw socket when available",
|
||||
family: &icmpV6,
|
||||
want: icmpRaw,
|
||||
wantNetworks: []string{"ip6:ipv6-icmp"},
|
||||
},
|
||||
{
|
||||
name: "IPv6 uses datagram when raw unavailable",
|
||||
family: &icmpV6,
|
||||
rawErr: errors.New("operation not permitted"),
|
||||
want: icmpDatagram,
|
||||
wantNetworks: []string{"ip6:ipv6-icmp", "udp6"},
|
||||
},
|
||||
{
|
||||
name: "IPv6 falls back to exec when both unavailable",
|
||||
family: &icmpV6,
|
||||
rawErr: errors.New("operation not permitted"),
|
||||
udpErr: errors.New("protocol not supported"),
|
||||
want: icmpExecFallback,
|
||||
wantNetworks: []string{"ip6:ipv6-icmp", "udp6"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
calls := make([]string, 0, 2)
|
||||
listen := func(network, listenAddr string) (icmpPacketConn, error) {
|
||||
require.Equal(t, tt.family.listenAddr, listenAddr)
|
||||
calls = append(calls, network)
|
||||
switch network {
|
||||
case tt.family.rawNetwork:
|
||||
if tt.rawErr != nil {
|
||||
return nil, tt.rawErr
|
||||
}
|
||||
case tt.family.dgramNetwork:
|
||||
if tt.udpErr != nil {
|
||||
return nil, tt.udpErr
|
||||
}
|
||||
default:
|
||||
t.Fatalf("unexpected network %q", network)
|
||||
}
|
||||
return testICMPPacketConn{}, nil
|
||||
}
|
||||
|
||||
assert.Equal(t, tt.want, detectICMPMode(tt.family, listen))
|
||||
assert.Equal(t, tt.wantNetworks, calls)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestResolveICMPTarget(t *testing.T) {
|
||||
t.Run("IPv4 literal", func(t *testing.T) {
|
||||
family, ip, err := resolveICMPTarget("127.0.0.1")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, family)
|
||||
assert.False(t, family.isIPv6)
|
||||
assert.Equal(t, "127.0.0.1", ip.String())
|
||||
})
|
||||
|
||||
t.Run("IPv6 literal", func(t *testing.T) {
|
||||
family, ip, err := resolveICMPTarget("::1")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, family)
|
||||
assert.True(t, family.isIPv6)
|
||||
assert.Equal(t, "::1", ip.String())
|
||||
})
|
||||
|
||||
t.Run("IPv4-mapped IPv6 resolves as IPv4", func(t *testing.T) {
|
||||
family, ip, err := resolveICMPTarget("::ffff:127.0.0.1")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, family)
|
||||
assert.False(t, family.isIPv6)
|
||||
assert.Equal(t, "127.0.0.1", ip.String())
|
||||
})
|
||||
}
|
||||
356
agent/probe_test.go
Normal file
356
agent/probe_test.go
Normal file
@@ -0,0 +1,356 @@
|
||||
package agent
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/probe"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestProbeTaskAggregateLockedUsesRawSamplesForShortWindows(t *testing.T) {
|
||||
now := time.Date(2026, time.April, 21, 12, 0, 0, 0, time.UTC)
|
||||
task := &probeTask{}
|
||||
|
||||
task.addSampleLocked(probeSample{responseUs: 10, timestamp: now.Add(-90 * time.Second)})
|
||||
task.addSampleLocked(probeSample{responseUs: 20, timestamp: now.Add(-30 * time.Second)})
|
||||
task.addSampleLocked(probeSample{responseUs: -1, timestamp: now.Add(-10 * time.Second)})
|
||||
|
||||
agg := task.aggregateLocked(time.Minute, now)
|
||||
require.True(t, agg.hasData())
|
||||
assert.Equal(t, int64(2), agg.totalCount)
|
||||
assert.Equal(t, int64(1), agg.successCount)
|
||||
result := agg.result()
|
||||
assert.Equal(t, int64(20), result.AvgResponse)
|
||||
assert.Equal(t, int64(20), result.MinResponse)
|
||||
assert.Equal(t, int64(20), result.MaxResponse)
|
||||
assert.Equal(t, 50.0, result.PacketLoss)
|
||||
}
|
||||
|
||||
func TestProbeTaskAggregateLockedUsesMinuteBucketsForLongWindows(t *testing.T) {
|
||||
now := time.Date(2026, time.April, 21, 12, 0, 30, 0, time.UTC)
|
||||
task := &probeTask{}
|
||||
|
||||
task.addSampleLocked(probeSample{responseUs: 10, timestamp: now.Add(-11 * time.Minute)})
|
||||
task.addSampleLocked(probeSample{responseUs: 20, timestamp: now.Add(-9 * time.Minute)})
|
||||
task.addSampleLocked(probeSample{responseUs: 40, timestamp: now.Add(-5 * time.Minute)})
|
||||
task.addSampleLocked(probeSample{responseUs: -1, timestamp: now.Add(-90 * time.Second)})
|
||||
task.addSampleLocked(probeSample{responseUs: 30, timestamp: now.Add(-30 * time.Second)})
|
||||
|
||||
agg := task.aggregateLocked(10*time.Minute, now)
|
||||
require.True(t, agg.hasData())
|
||||
assert.Equal(t, int64(4), agg.totalCount)
|
||||
assert.Equal(t, int64(3), agg.successCount)
|
||||
result := agg.result()
|
||||
assert.Equal(t, int64(30), result.AvgResponse)
|
||||
assert.Equal(t, int64(20), result.MinResponse)
|
||||
assert.Equal(t, int64(40), result.MaxResponse)
|
||||
assert.Equal(t, 25.0, result.PacketLoss)
|
||||
}
|
||||
|
||||
func TestProbeTaskAddSampleLockedTrimsRawSamplesButKeepsBucketHistory(t *testing.T) {
|
||||
now := time.Date(2026, time.April, 21, 12, 0, 0, 0, time.UTC)
|
||||
task := &probeTask{}
|
||||
|
||||
task.addSampleLocked(probeSample{responseUs: 10, timestamp: now.Add(-10 * time.Minute)})
|
||||
task.addSampleLocked(probeSample{responseUs: 20, timestamp: now})
|
||||
|
||||
require.Len(t, task.samples, 1)
|
||||
assert.Equal(t, int64(20), task.samples[0].responseUs)
|
||||
|
||||
agg := task.aggregateLocked(10*time.Minute, now)
|
||||
require.True(t, agg.hasData())
|
||||
assert.Equal(t, int64(2), agg.totalCount)
|
||||
assert.Equal(t, int64(2), agg.successCount)
|
||||
result := agg.result()
|
||||
assert.Equal(t, int64(15), result.AvgResponse)
|
||||
assert.Equal(t, int64(10), result.MinResponse)
|
||||
assert.Equal(t, int64(20), result.MaxResponse)
|
||||
assert.Equal(t, 0.0, result.PacketLoss)
|
||||
}
|
||||
|
||||
func TestProbeManagerGetResultsIncludesHourResponseRange(t *testing.T) {
|
||||
now := time.Now().UTC()
|
||||
task := &probeTask{config: probe.Config{ID: "probe-1"}}
|
||||
task.addSampleLocked(probeSample{responseUs: 10, timestamp: now.Add(-30 * time.Minute)})
|
||||
task.addSampleLocked(probeSample{responseUs: 20, timestamp: now.Add(-9 * time.Minute)})
|
||||
task.addSampleLocked(probeSample{responseUs: 40, timestamp: now.Add(-5 * time.Minute)})
|
||||
task.addSampleLocked(probeSample{responseUs: 30, timestamp: now.Add(-50 * time.Second)})
|
||||
task.addSampleLocked(probeSample{responseUs: -1, timestamp: now.Add(-30 * time.Second)})
|
||||
|
||||
pm := &ProbeManager{probes: map[string]*probeTask{"icmp:example.com": task}}
|
||||
|
||||
results := pm.GetResults(uint16(time.Minute / time.Millisecond))
|
||||
result, ok := results["probe-1"]
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, int64(30), result.AvgResponse)
|
||||
assert.Equal(t, int64(25), result.AvgResponse1h)
|
||||
assert.Equal(t, int64(30), result.MinResponse)
|
||||
assert.Equal(t, int64(10), result.MinResponse1h)
|
||||
assert.Equal(t, int64(30), result.MaxResponse)
|
||||
assert.Equal(t, int64(40), result.MaxResponse1h)
|
||||
assert.Equal(t, 50.0, result.PacketLoss)
|
||||
assert.Equal(t, 20.0, result.PacketLoss1h)
|
||||
}
|
||||
|
||||
func TestProbeManagerGetResultsIncludesLossOnlyHourData(t *testing.T) {
|
||||
now := time.Now().UTC()
|
||||
task := &probeTask{config: probe.Config{ID: "probe-1"}}
|
||||
task.addSampleLocked(probeSample{responseUs: -1, timestamp: now.Add(-30 * time.Second)})
|
||||
task.addSampleLocked(probeSample{responseUs: -1, timestamp: now.Add(-10 * time.Second)})
|
||||
|
||||
pm := &ProbeManager{probes: map[string]*probeTask{"icmp:example.com": task}}
|
||||
|
||||
results := pm.GetResults(uint16(time.Minute / time.Millisecond))
|
||||
result, ok := results["probe-1"]
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, int64(0), result.AvgResponse)
|
||||
assert.Equal(t, int64(0), result.AvgResponse1h)
|
||||
assert.Equal(t, int64(0), result.MinResponse)
|
||||
assert.Equal(t, int64(0), result.MinResponse1h)
|
||||
assert.Equal(t, int64(0), result.MaxResponse)
|
||||
assert.Equal(t, int64(0), result.MaxResponse1h)
|
||||
assert.Equal(t, 100.0, result.PacketLoss)
|
||||
assert.Equal(t, 100.0, result.PacketLoss1h)
|
||||
}
|
||||
|
||||
func TestProbeConfigResultKeyUsesSyncedID(t *testing.T) {
|
||||
cfg := probe.Config{ID: "probe-1", Target: "1.1.1.1", Protocol: "icmp", Interval: 10}
|
||||
assert.Equal(t, "probe-1", cfg.ID)
|
||||
}
|
||||
|
||||
func TestProbeManagerSyncProbesSkipsConfigsWithoutStableID(t *testing.T) {
|
||||
validCfg := probe.Config{ID: "probe-1", Target: "ignored", Protocol: "noop", Interval: 10}
|
||||
invalidCfg := probe.Config{Target: "ignored", Protocol: "noop", Interval: 10}
|
||||
|
||||
pm := newProbeManager()
|
||||
pm.SyncProbes([]probe.Config{validCfg, invalidCfg})
|
||||
defer pm.Stop()
|
||||
|
||||
_, validExists := pm.probes[validCfg.ID]
|
||||
_, invalidExists := pm.probes[invalidCfg.ID]
|
||||
assert.True(t, validExists)
|
||||
assert.False(t, invalidExists)
|
||||
}
|
||||
|
||||
func TestProbeManagerSyncProbesStopsRemovedTasksButKeepsExisting(t *testing.T) {
|
||||
keepCfg := probe.Config{ID: "probe-1", Target: "ignored", Protocol: "noop", Interval: 10}
|
||||
removeCfg := probe.Config{ID: "probe-2", Target: "ignored", Protocol: "noop", Interval: 10}
|
||||
|
||||
keptTask := &probeTask{config: keepCfg, cancel: make(chan struct{})}
|
||||
removedTask := &probeTask{config: removeCfg, cancel: make(chan struct{})}
|
||||
pm := &ProbeManager{
|
||||
probes: map[string]*probeTask{
|
||||
keepCfg.ID: keptTask,
|
||||
removeCfg.ID: removedTask,
|
||||
},
|
||||
}
|
||||
|
||||
pm.SyncProbes([]probe.Config{keepCfg})
|
||||
|
||||
assert.Same(t, keptTask, pm.probes[keepCfg.ID])
|
||||
_, exists := pm.probes[removeCfg.ID]
|
||||
assert.False(t, exists)
|
||||
|
||||
select {
|
||||
case <-removedTask.cancel:
|
||||
default:
|
||||
t.Fatal("expected removed probe task to be cancelled")
|
||||
}
|
||||
|
||||
select {
|
||||
case <-keptTask.cancel:
|
||||
t.Fatal("expected existing probe task to remain active")
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func TestProbeManagerSyncProbesRestartsChangedConfig(t *testing.T) {
|
||||
originalCfg := probe.Config{ID: "probe-1", Target: "ignored-a", Protocol: "noop", Interval: 10}
|
||||
updatedCfg := probe.Config{ID: "probe-1", Target: "ignored-b", Protocol: "noop", Interval: 10}
|
||||
originalTask := &probeTask{config: originalCfg, cancel: make(chan struct{})}
|
||||
pm := &ProbeManager{
|
||||
probes: map[string]*probeTask{
|
||||
originalCfg.ID: originalTask,
|
||||
},
|
||||
}
|
||||
|
||||
pm.SyncProbes([]probe.Config{updatedCfg})
|
||||
defer pm.Stop()
|
||||
|
||||
restartedTask := pm.probes[updatedCfg.ID]
|
||||
assert.NotSame(t, originalTask, restartedTask)
|
||||
assert.Equal(t, updatedCfg, restartedTask.config)
|
||||
|
||||
select {
|
||||
case <-originalTask.cancel:
|
||||
default:
|
||||
t.Fatal("expected changed probe task to be cancelled")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProbeManagerApplySyncUpsertRunsImmediatelyAndReturnsResult(t *testing.T) {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
pm := &ProbeManager{
|
||||
probes: make(map[string]*probeTask),
|
||||
httpClient: server.Client(),
|
||||
}
|
||||
|
||||
resp, err := pm.HandleSyncRequest(probe.SyncRequest{
|
||||
Action: probe.SyncActionUpsert,
|
||||
Config: probe.Config{ID: "probe-1", Target: server.URL, Protocol: "http", Interval: 10},
|
||||
RunNow: true,
|
||||
})
|
||||
defer pm.Stop()
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.GreaterOrEqual(t, resp.Result.AvgResponse, int64(0))
|
||||
assert.Equal(t, 0.0, resp.Result.PacketLoss)
|
||||
assert.Equal(t, 0.0, resp.Result.PacketLoss1h)
|
||||
|
||||
task := pm.probes["probe-1"]
|
||||
require.NotNil(t, task)
|
||||
task.mu.Lock()
|
||||
defer task.mu.Unlock()
|
||||
require.Len(t, task.samples, 1)
|
||||
}
|
||||
|
||||
func TestProbeManagerUpsertProbeKeepsHistoryWhenOnlyIntervalChanges(t *testing.T) {
|
||||
originalCfg := probe.Config{ID: "probe-1", Target: "1.1.1.1", Protocol: "icmp", Interval: 10}
|
||||
updatedCfg := probe.Config{ID: "probe-1", Target: "1.1.1.1", Protocol: "icmp", Interval: 30}
|
||||
now := time.Now().UTC()
|
||||
|
||||
existingTask := &probeTask{config: originalCfg, cancel: make(chan struct{})}
|
||||
existingTask.addSampleLocked(probeSample{responseUs: 12, timestamp: now.Add(-50 * time.Minute)})
|
||||
existingTask.addSampleLocked(probeSample{responseUs: 24, timestamp: now.Add(-30 * time.Second)})
|
||||
|
||||
pm := &ProbeManager{
|
||||
probes: map[string]*probeTask{originalCfg.ID: existingTask},
|
||||
}
|
||||
|
||||
result, err := pm.UpsertProbe(updatedCfg, false)
|
||||
defer pm.Stop()
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, result)
|
||||
|
||||
updatedTask := pm.probes[updatedCfg.ID]
|
||||
require.NotNil(t, updatedTask)
|
||||
assert.NotSame(t, existingTask, updatedTask)
|
||||
assert.Equal(t, updatedCfg, updatedTask.config)
|
||||
|
||||
updatedTask.mu.Lock()
|
||||
defer updatedTask.mu.Unlock()
|
||||
require.Len(t, updatedTask.samples, 1)
|
||||
assert.Equal(t, int64(24), updatedTask.samples[0].responseUs)
|
||||
|
||||
agg := updatedTask.aggregateLocked(time.Hour, now)
|
||||
require.True(t, agg.hasData())
|
||||
assert.Equal(t, int64(2), agg.totalCount)
|
||||
assert.Equal(t, int64(2), agg.successCount)
|
||||
assert.Equal(t, int64(18), agg.avgResponse())
|
||||
|
||||
select {
|
||||
case <-existingTask.cancel:
|
||||
default:
|
||||
t.Fatal("expected original probe task to be cancelled")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProbeManagerApplySyncDeleteRemovesTask(t *testing.T) {
|
||||
config := probe.Config{ID: "probe-1", Target: "1.1.1.1", Protocol: "icmp", Interval: 10}
|
||||
task := &probeTask{config: config, cancel: make(chan struct{})}
|
||||
pm := &ProbeManager{
|
||||
probes: map[string]*probeTask{config.ID: task},
|
||||
}
|
||||
|
||||
_, err := pm.HandleSyncRequest(probe.SyncRequest{
|
||||
Action: probe.SyncActionDelete,
|
||||
Config: probe.Config{ID: config.ID},
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
_, exists := pm.probes[config.ID]
|
||||
assert.False(t, exists)
|
||||
|
||||
select {
|
||||
case <-task.cancel:
|
||||
default:
|
||||
t.Fatal("expected deleted probe task to be cancelled")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProbeManagerGetRandomDelay(t *testing.T) {
|
||||
for i := 1000; i < 360_000; i += 1000 {
|
||||
delay := getStagger(int64(i))
|
||||
assert.GreaterOrEqual(t, delay, time.Duration(i/2)*time.Millisecond)
|
||||
assert.LessOrEqual(t, delay, time.Duration(i)*time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProbeHTTP(t *testing.T) {
|
||||
t.Run("success", func(t *testing.T) {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
responseUs, err := probeHTTP(server.Client(), server.URL)
|
||||
require.NoError(t, err)
|
||||
assert.GreaterOrEqual(t, responseUs, int64(0))
|
||||
})
|
||||
|
||||
t.Run("server error", func(t *testing.T) {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
http.Error(w, "boom", http.StatusInternalServerError)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
responseUs, err := probeHTTP(server.Client(), server.URL)
|
||||
assert.Equal(t, int64(-1), responseUs)
|
||||
require.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestProbeTCP(t *testing.T) {
|
||||
t.Run("success", func(t *testing.T) {
|
||||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
defer listener.Close()
|
||||
|
||||
accepted := make(chan struct{})
|
||||
go func() {
|
||||
defer close(accepted)
|
||||
conn, err := listener.Accept()
|
||||
if err == nil {
|
||||
_ = conn.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
port := uint16(listener.Addr().(*net.TCPAddr).Port)
|
||||
responseUs, err := probeTCP("127.0.0.1", port)
|
||||
require.NoError(t, err)
|
||||
assert.GreaterOrEqual(t, responseUs, int64(0))
|
||||
<-accepted
|
||||
})
|
||||
|
||||
t.Run("connection failure", func(t *testing.T) {
|
||||
listener, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
|
||||
port := uint16(listener.Addr().(*net.TCPAddr).Port)
|
||||
require.NoError(t, listener.Close())
|
||||
|
||||
responseUs, err := probeTCP("127.0.0.1", port)
|
||||
assert.Equal(t, int64(-1), responseUs)
|
||||
require.Error(t, err)
|
||||
})
|
||||
}
|
||||
@@ -19,10 +19,16 @@ import (
|
||||
"github.com/shirou/gopsutil/v4/sensors"
|
||||
)
|
||||
|
||||
var errTemperatureFetchTimeout = errors.New("temperature collection timed out")
|
||||
|
||||
// Matches sensors.TemperaturesWithContext to allow for panic recovery (gopsutil/issues/1832)
|
||||
type getTempsFn func(ctx context.Context) ([]sensors.TemperatureStat, error)
|
||||
|
||||
type SensorConfig struct {
|
||||
context context.Context
|
||||
sensors map[string]struct{}
|
||||
primarySensor string
|
||||
timeout time.Duration
|
||||
isBlacklist bool
|
||||
hasWildcards bool
|
||||
skipCollection bool
|
||||
@@ -34,24 +40,27 @@ func (a *Agent) newSensorConfig() *SensorConfig {
|
||||
sysSensors, _ := utils.GetEnv("SYS_SENSORS")
|
||||
sensorsEnvVal, sensorsSet := utils.GetEnv("SENSORS")
|
||||
skipCollection := sensorsSet && sensorsEnvVal == ""
|
||||
sensorsTimeout, _ := utils.GetEnv("SENSORS_TIMEOUT")
|
||||
|
||||
return a.newSensorConfigWithEnv(primarySensor, sysSensors, sensorsEnvVal, skipCollection)
|
||||
return a.newSensorConfigWithEnv(primarySensor, sysSensors, sensorsEnvVal, sensorsTimeout, skipCollection)
|
||||
}
|
||||
|
||||
// Matches sensors.TemperaturesWithContext to allow for panic recovery (gopsutil/issues/1832)
|
||||
type getTempsFn func(ctx context.Context) ([]sensors.TemperatureStat, error)
|
||||
|
||||
var (
|
||||
errTemperatureFetchTimeout = errors.New("temperature collection timed out")
|
||||
temperatureFetchTimeout = 2 * time.Second
|
||||
)
|
||||
|
||||
// newSensorConfigWithEnv creates a SensorConfig with the provided environment variables
|
||||
// sensorsSet indicates if the SENSORS environment variable was explicitly set (even to empty string)
|
||||
func (a *Agent) newSensorConfigWithEnv(primarySensor, sysSensors, sensorsEnvVal string, skipCollection bool) *SensorConfig {
|
||||
func (a *Agent) newSensorConfigWithEnv(primarySensor, sysSensors, sensorsEnvVal, sensorsTimeout string, skipCollection bool) *SensorConfig {
|
||||
timeout := 2 * time.Second
|
||||
if sensorsTimeout != "" {
|
||||
if d, err := time.ParseDuration(sensorsTimeout); err == nil {
|
||||
timeout = d
|
||||
} else {
|
||||
slog.Warn("Invalid SENSORS_TIMEOUT", "value", sensorsTimeout)
|
||||
}
|
||||
}
|
||||
|
||||
config := &SensorConfig{
|
||||
context: context.Background(),
|
||||
primarySensor: primarySensor,
|
||||
timeout: timeout,
|
||||
skipCollection: skipCollection,
|
||||
firstRun: true,
|
||||
sensors: make(map[string]struct{}),
|
||||
@@ -171,7 +180,7 @@ func (a *Agent) getTempsWithTimeout(getTemps getTempsFn) ([]sensors.TemperatureS
|
||||
|
||||
// Use a longer timeout on the first run to allow for initialization
|
||||
// (e.g. Windows LHM subprocess startup)
|
||||
timeout := temperatureFetchTimeout
|
||||
timeout := a.sensorConfig.timeout
|
||||
if a.sensorConfig.firstRun {
|
||||
a.sensorConfig.firstRun = false
|
||||
timeout = 10 * time.Second
|
||||
|
||||
@@ -168,6 +168,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
||||
primarySensor string
|
||||
sysSensors string
|
||||
sensors string
|
||||
sensorsTimeout string
|
||||
skipCollection bool
|
||||
expectedConfig *SensorConfig
|
||||
}{
|
||||
@@ -179,12 +180,37 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
||||
expectedConfig: &SensorConfig{
|
||||
context: context.Background(),
|
||||
primarySensor: "",
|
||||
timeout: 2 * time.Second,
|
||||
sensors: map[string]struct{}{},
|
||||
isBlacklist: false,
|
||||
hasWildcards: false,
|
||||
skipCollection: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Custom timeout",
|
||||
primarySensor: "",
|
||||
sysSensors: "",
|
||||
sensors: "",
|
||||
sensorsTimeout: "5s",
|
||||
expectedConfig: &SensorConfig{
|
||||
context: context.Background(),
|
||||
timeout: 5 * time.Second,
|
||||
sensors: map[string]struct{}{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Invalid timeout falls back to default",
|
||||
primarySensor: "",
|
||||
sysSensors: "",
|
||||
sensors: "",
|
||||
sensorsTimeout: "notaduration",
|
||||
expectedConfig: &SensorConfig{
|
||||
context: context.Background(),
|
||||
timeout: 2 * time.Second,
|
||||
sensors: map[string]struct{}{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Explicitly set to empty string",
|
||||
primarySensor: "",
|
||||
@@ -194,6 +220,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
||||
expectedConfig: &SensorConfig{
|
||||
context: context.Background(),
|
||||
primarySensor: "",
|
||||
timeout: 2 * time.Second,
|
||||
sensors: map[string]struct{}{},
|
||||
isBlacklist: false,
|
||||
hasWildcards: false,
|
||||
@@ -208,6 +235,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
||||
expectedConfig: &SensorConfig{
|
||||
context: context.Background(),
|
||||
primarySensor: "cpu_temp",
|
||||
timeout: 2 * time.Second,
|
||||
sensors: map[string]struct{}{},
|
||||
isBlacklist: false,
|
||||
hasWildcards: false,
|
||||
@@ -221,6 +249,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
||||
expectedConfig: &SensorConfig{
|
||||
context: context.Background(),
|
||||
primarySensor: "cpu_temp",
|
||||
timeout: 2 * time.Second,
|
||||
sensors: map[string]struct{}{
|
||||
"cpu_temp": {},
|
||||
"gpu_temp": {},
|
||||
@@ -237,6 +266,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
||||
expectedConfig: &SensorConfig{
|
||||
context: context.Background(),
|
||||
primarySensor: "cpu_temp",
|
||||
timeout: 2 * time.Second,
|
||||
sensors: map[string]struct{}{
|
||||
"cpu_temp": {},
|
||||
"gpu_temp": {},
|
||||
@@ -253,6 +283,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
||||
expectedConfig: &SensorConfig{
|
||||
context: context.Background(),
|
||||
primarySensor: "cpu_temp",
|
||||
timeout: 2 * time.Second,
|
||||
sensors: map[string]struct{}{
|
||||
"cpu_*": {},
|
||||
"gpu_temp": {},
|
||||
@@ -269,6 +300,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
||||
expectedConfig: &SensorConfig{
|
||||
context: context.Background(),
|
||||
primarySensor: "cpu_temp",
|
||||
timeout: 2 * time.Second,
|
||||
sensors: map[string]struct{}{
|
||||
"cpu_*": {},
|
||||
"gpu_temp": {},
|
||||
@@ -284,6 +316,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
||||
sensors: "cpu_temp",
|
||||
expectedConfig: &SensorConfig{
|
||||
primarySensor: "cpu_temp",
|
||||
timeout: 2 * time.Second,
|
||||
sensors: map[string]struct{}{
|
||||
"cpu_temp": {},
|
||||
},
|
||||
@@ -295,7 +328,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := agent.newSensorConfigWithEnv(tt.primarySensor, tt.sysSensors, tt.sensors, tt.skipCollection)
|
||||
result := agent.newSensorConfigWithEnv(tt.primarySensor, tt.sysSensors, tt.sensors, tt.sensorsTimeout, tt.skipCollection)
|
||||
|
||||
// Check primary sensor
|
||||
assert.Equal(t, tt.expectedConfig.primarySensor, result.primarySensor)
|
||||
@@ -314,6 +347,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
||||
// Check flags
|
||||
assert.Equal(t, tt.expectedConfig.isBlacklist, result.isBlacklist)
|
||||
assert.Equal(t, tt.expectedConfig.hasWildcards, result.hasWildcards)
|
||||
assert.Equal(t, tt.expectedConfig.timeout, result.timeout)
|
||||
|
||||
// Check context
|
||||
if tt.sysSensors != "" {
|
||||
@@ -333,12 +367,14 @@ func TestNewSensorConfig(t *testing.T) {
|
||||
t.Setenv("BESZEL_AGENT_PRIMARY_SENSOR", "test_primary")
|
||||
t.Setenv("BESZEL_AGENT_SYS_SENSORS", "/test/path")
|
||||
t.Setenv("BESZEL_AGENT_SENSORS", "test_sensor1,test_*,test_sensor3")
|
||||
t.Setenv("BESZEL_AGENT_SENSORS_TIMEOUT", "7s")
|
||||
|
||||
agent := &Agent{}
|
||||
result := agent.newSensorConfig()
|
||||
|
||||
// Verify results
|
||||
assert.Equal(t, "test_primary", result.primarySensor)
|
||||
assert.Equal(t, 7*time.Second, result.timeout)
|
||||
assert.NotNil(t, result.sensors)
|
||||
assert.Equal(t, 3, len(result.sensors))
|
||||
assert.True(t, result.hasWildcards)
|
||||
@@ -532,15 +568,10 @@ func TestGetTempsWithTimeout(t *testing.T) {
|
||||
agent := &Agent{
|
||||
sensorConfig: &SensorConfig{
|
||||
context: context.Background(),
|
||||
timeout: 10 * time.Millisecond,
|
||||
},
|
||||
}
|
||||
|
||||
originalTimeout := temperatureFetchTimeout
|
||||
t.Cleanup(func() {
|
||||
temperatureFetchTimeout = originalTimeout
|
||||
})
|
||||
temperatureFetchTimeout = 10 * time.Millisecond
|
||||
|
||||
t.Run("returns temperatures before timeout", func(t *testing.T) {
|
||||
temps, err := agent.getTempsWithTimeout(func(ctx context.Context) ([]sensors.TemperatureStat, error) {
|
||||
return []sensors.TemperatureStat{{SensorKey: "cpu_temp", Temperature: 42}}, nil
|
||||
@@ -567,15 +598,13 @@ func TestUpdateTemperaturesSkipsOnTimeout(t *testing.T) {
|
||||
systemInfo: system.Info{DashboardTemp: 99},
|
||||
sensorConfig: &SensorConfig{
|
||||
context: context.Background(),
|
||||
timeout: 10 * time.Millisecond,
|
||||
},
|
||||
}
|
||||
|
||||
originalTimeout := temperatureFetchTimeout
|
||||
t.Cleanup(func() {
|
||||
temperatureFetchTimeout = originalTimeout
|
||||
getSensorTemps = sensors.TemperaturesWithContext
|
||||
})
|
||||
temperatureFetchTimeout = 10 * time.Millisecond
|
||||
getSensorTemps = func(ctx context.Context) ([]sensors.TemperatureStat, error) {
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
return nil, nil
|
||||
|
||||
@@ -25,12 +25,15 @@ import (
|
||||
// SmartManager manages data collection for SMART devices
|
||||
type SmartManager struct {
|
||||
sync.Mutex
|
||||
SmartDataMap map[string]*smart.SmartData
|
||||
SmartDevices []*DeviceInfo
|
||||
refreshMutex sync.Mutex
|
||||
lastScanTime time.Time
|
||||
smartctlPath string
|
||||
excludedDevices map[string]struct{}
|
||||
SmartDataMap map[string]*smart.SmartData
|
||||
SmartDevices []*DeviceInfo
|
||||
refreshMutex sync.Mutex
|
||||
lastScanTime time.Time
|
||||
smartctlPath string
|
||||
excludedDevices map[string]struct{}
|
||||
darwinNvmeOnce sync.Once
|
||||
darwinNvmeCapacity map[string]uint64 // serial → bytes cache, written once via darwinNvmeOnce
|
||||
darwinNvmeProvider func() ([]byte, error) // overridable for testing
|
||||
}
|
||||
|
||||
type scanOutput struct {
|
||||
@@ -1033,6 +1036,52 @@ func parseScsiGigabytesProcessed(value string) int64 {
|
||||
return parsed
|
||||
}
|
||||
|
||||
// lookupDarwinNvmeCapacity returns the capacity in bytes for a given NVMe serial number on Darwin.
|
||||
// It uses system_profiler SPNVMeDataType to get capacity since Apple SSDs don't report user_capacity
|
||||
// via smartctl. Results are cached after the first call via sync.Once.
|
||||
func (sm *SmartManager) lookupDarwinNvmeCapacity(serial string) uint64 {
|
||||
sm.darwinNvmeOnce.Do(func() {
|
||||
sm.darwinNvmeCapacity = make(map[string]uint64)
|
||||
|
||||
provider := sm.darwinNvmeProvider
|
||||
if provider == nil {
|
||||
provider = func() ([]byte, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
return exec.CommandContext(ctx, "system_profiler", "SPNVMeDataType", "-json").Output()
|
||||
}
|
||||
}
|
||||
|
||||
out, err := provider()
|
||||
if err != nil {
|
||||
slog.Debug("system_profiler NVMe lookup failed", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
var result struct {
|
||||
SPNVMeDataType []struct {
|
||||
Items []struct {
|
||||
DeviceSerial string `json:"device_serial"`
|
||||
SizeInBytes uint64 `json:"size_in_bytes"`
|
||||
} `json:"_items"`
|
||||
} `json:"SPNVMeDataType"`
|
||||
}
|
||||
if err := json.Unmarshal(out, &result); err != nil {
|
||||
slog.Debug("system_profiler NVMe parse failed", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, controller := range result.SPNVMeDataType {
|
||||
for _, item := range controller.Items {
|
||||
if item.DeviceSerial != "" && item.SizeInBytes > 0 {
|
||||
sm.darwinNvmeCapacity[item.DeviceSerial] = item.SizeInBytes
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
return sm.darwinNvmeCapacity[serial]
|
||||
}
|
||||
|
||||
// parseSmartForNvme parses the output of smartctl --all -j /dev/nvmeX and updates the SmartDataMap
|
||||
// Returns hasValidData and exitStatus
|
||||
func (sm *SmartManager) parseSmartForNvme(output []byte) (bool, int) {
|
||||
@@ -1069,6 +1118,12 @@ func (sm *SmartManager) parseSmartForNvme(output []byte) (bool, int) {
|
||||
smartData.SerialNumber = data.SerialNumber
|
||||
smartData.FirmwareVersion = data.FirmwareVersion
|
||||
smartData.Capacity = data.UserCapacity.Bytes
|
||||
if smartData.Capacity == 0 {
|
||||
smartData.Capacity = data.NVMeTotalCapacity
|
||||
}
|
||||
if smartData.Capacity == 0 && (runtime.GOOS == "darwin" || sm.darwinNvmeProvider != nil) {
|
||||
smartData.Capacity = sm.lookupDarwinNvmeCapacity(data.SerialNumber)
|
||||
}
|
||||
smartData.Temperature = data.NVMeSmartHealthInformationLog.Temperature
|
||||
smartData.SmartStatus = getSmartStatus(smartData.Temperature, data.SmartStatus.Passed)
|
||||
smartData.DiskName = data.Device.Name
|
||||
|
||||
@@ -1199,3 +1199,81 @@ func TestIsNvmeControllerPath(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseSmartForNvmeAppleSSD(t *testing.T) {
|
||||
// Apple SSDs don't report user_capacity via smartctl; capacity should be fetched
|
||||
// from system_profiler via the darwinNvmeProvider fallback.
|
||||
fixturePath := filepath.Join("test-data", "smart", "apple_nvme.json")
|
||||
data, err := os.ReadFile(fixturePath)
|
||||
require.NoError(t, err)
|
||||
|
||||
providerCalls := 0
|
||||
fakeProvider := func() ([]byte, error) {
|
||||
providerCalls++
|
||||
return []byte(`{
|
||||
"SPNVMeDataType": [{
|
||||
"_items": [{
|
||||
"device_serial": "0ba0147940253c15",
|
||||
"size_in_bytes": 251000193024
|
||||
}]
|
||||
}]
|
||||
}`), nil
|
||||
}
|
||||
|
||||
sm := &SmartManager{
|
||||
SmartDataMap: make(map[string]*smart.SmartData),
|
||||
darwinNvmeProvider: fakeProvider,
|
||||
}
|
||||
|
||||
hasData, _ := sm.parseSmartForNvme(data)
|
||||
require.True(t, hasData)
|
||||
|
||||
deviceData, ok := sm.SmartDataMap["0ba0147940253c15"]
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, "APPLE SSD AP0256Q", deviceData.ModelName)
|
||||
assert.Equal(t, uint64(251000193024), deviceData.Capacity)
|
||||
assert.Equal(t, uint8(42), deviceData.Temperature)
|
||||
assert.Equal(t, "PASSED", deviceData.SmartStatus)
|
||||
assert.Equal(t, 1, providerCalls, "system_profiler should be called once")
|
||||
|
||||
// Second parse: provider should NOT be called again (cache hit)
|
||||
_, _ = sm.parseSmartForNvme(data)
|
||||
assert.Equal(t, 1, providerCalls, "system_profiler should not be called again after caching")
|
||||
}
|
||||
|
||||
func TestLookupDarwinNvmeCapacityMultipleDisks(t *testing.T) {
|
||||
fakeProvider := func() ([]byte, error) {
|
||||
return []byte(`{
|
||||
"SPNVMeDataType": [
|
||||
{
|
||||
"_items": [
|
||||
{"device_serial": "serial-disk0", "size_in_bytes": 251000193024},
|
||||
{"device_serial": "serial-disk1", "size_in_bytes": 1000204886016}
|
||||
]
|
||||
},
|
||||
{
|
||||
"_items": [
|
||||
{"device_serial": "serial-disk2", "size_in_bytes": 512110190592}
|
||||
]
|
||||
}
|
||||
]
|
||||
}`), nil
|
||||
}
|
||||
|
||||
sm := &SmartManager{darwinNvmeProvider: fakeProvider}
|
||||
assert.Equal(t, uint64(251000193024), sm.lookupDarwinNvmeCapacity("serial-disk0"))
|
||||
assert.Equal(t, uint64(1000204886016), sm.lookupDarwinNvmeCapacity("serial-disk1"))
|
||||
assert.Equal(t, uint64(512110190592), sm.lookupDarwinNvmeCapacity("serial-disk2"))
|
||||
assert.Equal(t, uint64(0), sm.lookupDarwinNvmeCapacity("unknown-serial"))
|
||||
}
|
||||
|
||||
func TestLookupDarwinNvmeCapacityProviderError(t *testing.T) {
|
||||
fakeProvider := func() ([]byte, error) {
|
||||
return nil, errors.New("system_profiler not found")
|
||||
}
|
||||
|
||||
sm := &SmartManager{darwinNvmeProvider: fakeProvider}
|
||||
assert.Equal(t, uint64(0), sm.lookupDarwinNvmeCapacity("any-serial"))
|
||||
// Cache should be initialized even on error so we don't retry (Once already fired)
|
||||
assert.NotNil(t, sm.darwinNvmeCapacity)
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel"
|
||||
"github.com/henrygd/beszel/agent/battery"
|
||||
@@ -23,13 +22,6 @@ import (
|
||||
"github.com/shirou/gopsutil/v4/mem"
|
||||
)
|
||||
|
||||
// prevDisk stores previous per-device disk counters for a given cache interval
|
||||
type prevDisk struct {
|
||||
readBytes uint64
|
||||
writeBytes uint64
|
||||
at time.Time
|
||||
}
|
||||
|
||||
// Sets initial / non-changing values about the host system
|
||||
func (a *Agent) refreshSystemDetails() {
|
||||
a.systemInfo.AgentVersion = beszel.Version
|
||||
|
||||
51
agent/test-data/smart/apple_nvme.json
Normal file
51
agent/test-data/smart/apple_nvme.json
Normal file
@@ -0,0 +1,51 @@
|
||||
{
|
||||
"json_format_version": [1, 0],
|
||||
"smartctl": {
|
||||
"version": [7, 4],
|
||||
"argv": ["smartctl", "-aix", "-j", "IOService:/AppleARMPE/arm-io@10F00000/AppleT810xIO/ans@77400000/AppleASCWrapV4/iop-ans-nub/RTBuddy(ANS2)/RTBuddyService/AppleANS3NVMeController/NS_01@1"],
|
||||
"exit_status": 4
|
||||
},
|
||||
"device": {
|
||||
"name": "IOService:/AppleARMPE/arm-io@10F00000/AppleT810xIO/ans@77400000/AppleASCWrapV4/iop-ans-nub/RTBuddy(ANS2)/RTBuddyService/AppleANS3NVMeController/NS_01@1",
|
||||
"info_name": "IOService:/AppleARMPE/arm-io@10F00000/AppleT810xIO/ans@77400000/AppleASCWrapV4/iop-ans-nub/RTBuddy(ANS2)/RTBuddyService/AppleANS3NVMeController/NS_01@1",
|
||||
"type": "nvme",
|
||||
"protocol": "NVMe"
|
||||
},
|
||||
"model_name": "APPLE SSD AP0256Q",
|
||||
"serial_number": "0ba0147940253c15",
|
||||
"firmware_version": "555",
|
||||
"smart_support": {
|
||||
"available": true,
|
||||
"enabled": true
|
||||
},
|
||||
"smart_status": {
|
||||
"passed": true,
|
||||
"nvme": {
|
||||
"value": 0
|
||||
}
|
||||
},
|
||||
"nvme_smart_health_information_log": {
|
||||
"critical_warning": 0,
|
||||
"temperature": 42,
|
||||
"available_spare": 100,
|
||||
"available_spare_threshold": 99,
|
||||
"percentage_used": 1,
|
||||
"data_units_read": 270189386,
|
||||
"data_units_written": 166753862,
|
||||
"host_reads": 7543766995,
|
||||
"host_writes": 3761621926,
|
||||
"controller_busy_time": 0,
|
||||
"power_cycles": 366,
|
||||
"power_on_hours": 2850,
|
||||
"unsafe_shutdowns": 195,
|
||||
"media_errors": 0,
|
||||
"num_err_log_entries": 0
|
||||
},
|
||||
"temperature": {
|
||||
"current": 42
|
||||
},
|
||||
"power_cycle_count": 366,
|
||||
"power_on_time": {
|
||||
"hours": 2850
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,8 @@
|
||||
// Package utils provides utility functions for the agent.
|
||||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
@@ -68,6 +70,9 @@ func ReadStringFileLimited(path string, maxSize int) (string, error) {
|
||||
if err != nil && err != io.EOF {
|
||||
return "", err
|
||||
}
|
||||
if n < 0 {
|
||||
return "", fmt.Errorf("%s returned negative bytes: %d", path, n)
|
||||
}
|
||||
return strings.TrimSpace(string(buf[:n])), nil
|
||||
}
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ import "github.com/blang/semver"
|
||||
|
||||
const (
|
||||
// Version is the current version of the application.
|
||||
Version = "0.18.6"
|
||||
Version = "0.18.7"
|
||||
// AppName is the name of the application.
|
||||
AppName = "beszel"
|
||||
)
|
||||
|
||||
13
go.mod
13
go.mod
@@ -5,24 +5,25 @@ go 1.26.1
|
||||
require (
|
||||
github.com/blang/semver v3.5.1+incompatible
|
||||
github.com/coreos/go-systemd/v22 v22.7.0
|
||||
github.com/distatus/battery v0.11.0
|
||||
github.com/ebitengine/purego v0.10.0
|
||||
github.com/fxamacker/cbor/v2 v2.9.0
|
||||
github.com/gliderlabs/ssh v0.3.8
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/lxzan/gws v1.9.1
|
||||
github.com/nicholas-fedor/shoutrrr v0.14.1
|
||||
github.com/nicholas-fedor/shoutrrr v0.14.3
|
||||
github.com/pocketbase/dbx v1.12.0
|
||||
github.com/pocketbase/pocketbase v0.36.7
|
||||
github.com/shirou/gopsutil/v4 v4.26.2
|
||||
github.com/pocketbase/pocketbase v0.36.8
|
||||
github.com/shirou/gopsutil/v4 v4.26.3
|
||||
github.com/spf13/cast v1.10.0
|
||||
github.com/spf13/cobra v1.10.2
|
||||
github.com/spf13/pflag v1.0.10
|
||||
github.com/stretchr/testify v1.11.1
|
||||
golang.org/x/crypto v0.49.0
|
||||
golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90
|
||||
golang.org/x/net v0.52.0
|
||||
golang.org/x/sys v0.42.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
howett.net/plist v1.0.1
|
||||
)
|
||||
|
||||
require (
|
||||
@@ -56,14 +57,12 @@ require (
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
golang.org/x/image v0.38.0 // indirect
|
||||
golang.org/x/net v0.52.0 // indirect
|
||||
golang.org/x/oauth2 v0.36.0 // indirect
|
||||
golang.org/x/sync v0.20.0 // indirect
|
||||
golang.org/x/term v0.41.0 // indirect
|
||||
golang.org/x/text v0.35.0 // indirect
|
||||
howett.net/plist v1.0.1 // indirect
|
||||
modernc.org/libc v1.70.0 // indirect
|
||||
modernc.org/mathutil v1.7.1 // indirect
|
||||
modernc.org/memory v1.11.0 // indirect
|
||||
modernc.org/sqlite v1.46.2 // indirect
|
||||
modernc.org/sqlite v1.48.0 // indirect
|
||||
)
|
||||
|
||||
18
go.sum
18
go.sum
@@ -17,8 +17,6 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c=
|
||||
github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4=
|
||||
github.com/distatus/battery v0.11.0 h1:KJk89gz90Iq/wJtbjjM9yUzBXV+ASV/EG2WOOL7N8lc=
|
||||
github.com/distatus/battery v0.11.0/go.mod h1:KmVkE8A8hpIX4T78QRdMktYpEp35QfOL8A8dwZBxq2k=
|
||||
github.com/domodwyer/mailyak/v3 v3.6.2 h1:x3tGMsyFhTCaxp6ycgR0FE/bu5QiNp+hetUuCOBXMn8=
|
||||
github.com/domodwyer/mailyak/v3 v3.6.2/go.mod h1:lOm/u9CyCVWHeaAmHIdF4RiKVxKUT/H5XX10lIKAL6c=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
@@ -87,8 +85,8 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w=
|
||||
github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
||||
github.com/nicholas-fedor/shoutrrr v0.14.1 h1:6sx4cJNfNuUtD6ygGlB0dqcCQ+abfsUh+b+6jgujf6A=
|
||||
github.com/nicholas-fedor/shoutrrr v0.14.1/go.mod h1:U7IywBkLpBV7rgn8iLbQ9/LklJG1gm24bFv5cXXsDKs=
|
||||
github.com/nicholas-fedor/shoutrrr v0.14.3 h1:aBX2iw9a7jl5wfHd3bi9LnS5ucoYIy6KcLH9XVF+gig=
|
||||
github.com/nicholas-fedor/shoutrrr v0.14.3/go.mod h1:U7IywBkLpBV7rgn8iLbQ9/LklJG1gm24bFv5cXXsDKs=
|
||||
github.com/onsi/ginkgo/v2 v2.28.1 h1:S4hj+HbZp40fNKuLUQOYLDgZLwNUVn19N3Atb98NCyI=
|
||||
github.com/onsi/ginkgo/v2 v2.28.1/go.mod h1:CLtbVInNckU3/+gC8LzkGUb9oF+e8W8TdUsxPwvdOgE=
|
||||
github.com/onsi/gomega v1.39.1 h1:1IJLAad4zjPn2PsnhH70V4DKRFlrCzGBNrNaru+Vf28=
|
||||
@@ -98,8 +96,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pocketbase/dbx v1.12.0 h1:/oLErM+A0b4xI0PWTGPqSDVjzix48PqI/bng2l0PzoA=
|
||||
github.com/pocketbase/dbx v1.12.0/go.mod h1:xXRCIAKTHMgUCyCKZm55pUOdvFziJjQfXaWKhu2vhMs=
|
||||
github.com/pocketbase/pocketbase v0.36.7 h1:MrViB7BptPYrf2Nt25pJEYBqUdFjuhRKu1p5GTrkvPA=
|
||||
github.com/pocketbase/pocketbase v0.36.7/go.mod h1:qX4HuVjoKXtEg41fSJVM0JLfGWXbBmHxVv/FaE446r4=
|
||||
github.com/pocketbase/pocketbase v0.36.8 h1:gCNqoesZ44saYOD3J7edhi5nDwUWKyQG7boM/kVwz2c=
|
||||
github.com/pocketbase/pocketbase v0.36.8/go.mod h1:OY4WaXbP0WnF/EXoBbboWJK+ZSZ1A85tiA0sjrTKxTA=
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
@@ -107,8 +105,8 @@ github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qq
|
||||
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/shirou/gopsutil/v4 v4.26.2 h1:X8i6sicvUFih4BmYIGT1m2wwgw2VG9YgrDTi7cIRGUI=
|
||||
github.com/shirou/gopsutil/v4 v4.26.2/go.mod h1:LZ6ewCSkBqUpvSOf+LsTGnRinC6iaNUNMGBtDkJBaLQ=
|
||||
github.com/shirou/gopsutil/v4 v4.26.3 h1:2ESdQt90yU3oXF/CdOlRCJxrP+Am1aBYubTMTfxJ1qc=
|
||||
github.com/shirou/gopsutil/v4 v4.26.3/go.mod h1:LZ6ewCSkBqUpvSOf+LsTGnRinC6iaNUNMGBtDkJBaLQ=
|
||||
github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY=
|
||||
github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
|
||||
github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
|
||||
@@ -199,8 +197,8 @@ modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
|
||||
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
|
||||
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
|
||||
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
|
||||
modernc.org/sqlite v1.46.2 h1:gkXQ6R0+AjxFC/fTDaeIVLbNLNrRoOK7YYVz5BKhTcE=
|
||||
modernc.org/sqlite v1.46.2/go.mod h1:hWjRO6Tj/5Ik8ieqxQybiEOUXy0NJFNp2tpvVpKlvig=
|
||||
modernc.org/sqlite v1.48.0 h1:ElZyLop3Q2mHYk5IFPPXADejZrlHu7APbpB0sF78bq4=
|
||||
modernc.org/sqlite v1.48.0/go.mod h1:hWjRO6Tj/5Ik8ieqxQybiEOUXy0NJFNp2tpvVpKlvig=
|
||||
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
|
||||
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
|
||||
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
||||
|
||||
@@ -302,21 +302,6 @@ func (am *AlertManager) SendShoutrrrAlert(notificationUrl, title, message, link,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (am *AlertManager) SendTestNotification(e *core.RequestEvent) error {
|
||||
var data struct {
|
||||
URL string `json:"url"`
|
||||
}
|
||||
err := e.BindBody(&data)
|
||||
if err != nil || data.URL == "" {
|
||||
return e.BadRequestError("URL is required", err)
|
||||
}
|
||||
err = am.SendShoutrrrAlert(data.URL, "Test Alert", "This is a notification from Beszel.", am.hub.Settings().Meta.AppURL, "View Beszel")
|
||||
if err != nil {
|
||||
return e.JSON(200, map[string]string{"err": err.Error()})
|
||||
}
|
||||
return e.JSON(200, map[string]bool{"err": false})
|
||||
}
|
||||
|
||||
// setAlertTriggered updates the "triggered" status of an alert record in the database
|
||||
func (am *AlertManager) setAlertTriggered(alert CachedAlertData, triggered bool) error {
|
||||
alertRecord, err := am.hub.FindRecordById("alerts", alert.Id)
|
||||
|
||||
@@ -3,7 +3,11 @@ package alerts
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/pocketbase/dbx"
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
@@ -117,3 +121,72 @@ func DeleteUserAlerts(e *core.RequestEvent) error {
|
||||
|
||||
return e.JSON(http.StatusOK, map[string]any{"success": true, "count": numDeleted})
|
||||
}
|
||||
|
||||
// SendTestNotification handles API request to send a test notification to a specified Shoutrrr URL
|
||||
func (am *AlertManager) SendTestNotification(e *core.RequestEvent) error {
|
||||
var data struct {
|
||||
URL string `json:"url"`
|
||||
}
|
||||
err := e.BindBody(&data)
|
||||
if err != nil || data.URL == "" {
|
||||
return e.BadRequestError("URL is required", err)
|
||||
}
|
||||
// Only allow admins to send test notifications to internal URLs
|
||||
if !e.Auth.IsSuperuser() && e.Auth.GetString("role") != "admin" {
|
||||
internalURL, err := isInternalURL(data.URL)
|
||||
if err != nil {
|
||||
return e.BadRequestError(err.Error(), nil)
|
||||
}
|
||||
if internalURL {
|
||||
return e.ForbiddenError("Only admins can send to internal destinations", nil)
|
||||
}
|
||||
}
|
||||
err = am.SendShoutrrrAlert(data.URL, "Test Alert", "This is a notification from Beszel.", am.hub.Settings().Meta.AppURL, "View Beszel")
|
||||
if err != nil {
|
||||
return e.JSON(200, map[string]string{"err": err.Error()})
|
||||
}
|
||||
return e.JSON(200, map[string]bool{"err": false})
|
||||
}
|
||||
|
||||
// isInternalURL checks if the given shoutrrr URL points to an internal destination (localhost or private IP)
|
||||
func isInternalURL(rawURL string) (bool, error) {
|
||||
parsedURL, err := url.Parse(rawURL)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
host := parsedURL.Hostname()
|
||||
if host == "" {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if strings.EqualFold(host, "localhost") {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
return isInternalIP(ip), nil
|
||||
}
|
||||
|
||||
// Some Shoutrrr URLs use the host position for service identifiers rather than a
|
||||
// network hostname (for example, discord://token@webhookid). Restrict DNS lookups
|
||||
// to names that look like actual hostnames so valid service URLs keep working.
|
||||
if !strings.Contains(host, ".") {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
ips, err := net.LookupIP(host)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if slices.ContainsFunc(ips, isInternalIP) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func isInternalIP(ip net.IP) bool {
|
||||
return ip.IsPrivate() || ip.IsLoopback() || ip.IsUnspecified()
|
||||
}
|
||||
|
||||
501
internal/alerts/alerts_api_test.go
Normal file
501
internal/alerts/alerts_api_test.go
Normal file
@@ -0,0 +1,501 @@
|
||||
//go:build testing
|
||||
|
||||
package alerts_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/henrygd/beszel/internal/alerts"
|
||||
beszelTests "github.com/henrygd/beszel/internal/tests"
|
||||
pbTests "github.com/pocketbase/pocketbase/tests"
|
||||
|
||||
"github.com/pocketbase/dbx"
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// marshal to json and return an io.Reader (for use in ApiScenario.Body)
|
||||
func jsonReader(v any) io.Reader {
|
||||
data, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bytes.NewReader(data)
|
||||
}
|
||||
|
||||
func TestIsInternalURL(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
url string
|
||||
internal bool
|
||||
}{
|
||||
{name: "loopback ipv4", url: "generic://127.0.0.1", internal: true},
|
||||
{name: "localhost hostname", url: "generic://localhost", internal: true},
|
||||
{name: "localhost hostname", url: "generic+http://localhost/api/v1/postStuff", internal: true},
|
||||
{name: "localhost hostname", url: "generic+http://127.0.0.1:8080/api/v1/postStuff", internal: true},
|
||||
{name: "localhost hostname", url: "generic+https://beszel.dev/api/v1/postStuff", internal: false},
|
||||
{name: "public ipv4", url: "generic://8.8.8.8", internal: false},
|
||||
{name: "token style service url", url: "discord://abc123@123456789", internal: false},
|
||||
{name: "single label service url", url: "slack://token@team/channel", internal: false},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
internal, err := alerts.IsInternalURL(testCase.url)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, testCase.internal, internal)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUserAlertsApi(t *testing.T) {
|
||||
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||
defer hub.Cleanup()
|
||||
|
||||
hub.StartHub()
|
||||
|
||||
user1, _ := beszelTests.CreateUser(hub, "alertstest@example.com", "password")
|
||||
user1Token, _ := user1.NewAuthToken()
|
||||
|
||||
user2, _ := beszelTests.CreateUser(hub, "alertstest2@example.com", "password")
|
||||
user2Token, _ := user2.NewAuthToken()
|
||||
|
||||
system1, _ := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "system1",
|
||||
"users": []string{user1.Id},
|
||||
"host": "127.0.0.1",
|
||||
})
|
||||
|
||||
system2, _ := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "system2",
|
||||
"users": []string{user1.Id, user2.Id},
|
||||
"host": "127.0.0.2",
|
||||
})
|
||||
|
||||
userRecords, _ := hub.CountRecords("users")
|
||||
assert.EqualValues(t, 2, userRecords, "all users should be created")
|
||||
|
||||
systemRecords, _ := hub.CountRecords("systems")
|
||||
assert.EqualValues(t, 2, systemRecords, "all systems should be created")
|
||||
|
||||
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
||||
return hub.TestApp
|
||||
}
|
||||
|
||||
scenarios := []beszelTests.ApiScenario{
|
||||
// {
|
||||
// Name: "GET not implemented - returns index",
|
||||
// Method: http.MethodGet,
|
||||
// URL: "/api/beszel/user-alerts",
|
||||
// ExpectedStatus: 200,
|
||||
// ExpectedContent: []string{"<html ", "globalThis.BESZEL"},
|
||||
// TestAppFactory: testAppFactory,
|
||||
// },
|
||||
{
|
||||
Name: "POST no auth",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
ExpectedStatus: 401,
|
||||
ExpectedContent: []string{"requires valid"},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
Name: "POST no body",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user1Token,
|
||||
},
|
||||
ExpectedStatus: 400,
|
||||
ExpectedContent: []string{"Bad data"},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
Name: "POST bad data",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user1Token,
|
||||
},
|
||||
ExpectedStatus: 400,
|
||||
ExpectedContent: []string{"Bad data"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"invalidField": "this should cause validation error",
|
||||
"threshold": "not a number",
|
||||
}),
|
||||
},
|
||||
{
|
||||
Name: "POST malformed JSON",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user1Token,
|
||||
},
|
||||
ExpectedStatus: 400,
|
||||
ExpectedContent: []string{"Bad data"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: strings.NewReader(`{"alertType": "cpu", "threshold": 80, "enabled": true,}`),
|
||||
},
|
||||
{
|
||||
Name: "POST valid alert data multiple systems",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user1Token,
|
||||
},
|
||||
ExpectedStatus: 200,
|
||||
ExpectedContent: []string{"\"success\":true"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"name": "CPU",
|
||||
"value": 69,
|
||||
"min": 9,
|
||||
"systems": []string{system1.Id, system2.Id},
|
||||
"overwrite": false,
|
||||
}),
|
||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||
// check total alerts
|
||||
alerts, _ := app.CountRecords("alerts")
|
||||
assert.EqualValues(t, 2, alerts, "should have 2 alerts")
|
||||
// check alert has correct values
|
||||
matchingAlerts, _ := app.CountRecords("alerts", dbx.HashExp{"name": "CPU", "user": user1.Id, "system": system1.Id, "value": 69, "min": 9})
|
||||
assert.EqualValues(t, 1, matchingAlerts, "should have 1 alert")
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "POST valid alert data single system",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user1Token,
|
||||
},
|
||||
ExpectedStatus: 200,
|
||||
ExpectedContent: []string{"\"success\":true"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"name": "Memory",
|
||||
"systems": []string{system1.Id},
|
||||
"value": 90,
|
||||
"min": 10,
|
||||
}),
|
||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||
user1Alerts, _ := app.CountRecords("alerts", dbx.HashExp{"user": user1.Id})
|
||||
assert.EqualValues(t, 3, user1Alerts, "should have 3 alerts")
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "Overwrite: false, should not overwrite existing alert",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user1Token,
|
||||
},
|
||||
ExpectedStatus: 200,
|
||||
ExpectedContent: []string{"\"success\":true"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"name": "CPU",
|
||||
"value": 45,
|
||||
"min": 5,
|
||||
"systems": []string{system1.Id},
|
||||
"overwrite": false,
|
||||
}),
|
||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||
beszelTests.ClearCollection(t, app, "alerts")
|
||||
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
||||
"name": "CPU",
|
||||
"system": system1.Id,
|
||||
"user": user1.Id,
|
||||
"value": 80,
|
||||
"min": 10,
|
||||
})
|
||||
},
|
||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||
alerts, _ := app.CountRecords("alerts")
|
||||
assert.EqualValues(t, 1, alerts, "should have 1 alert")
|
||||
alert, _ := app.FindFirstRecordByFilter("alerts", "name = 'CPU' && user = {:user}", dbx.Params{"user": user1.Id})
|
||||
assert.EqualValues(t, 80, alert.Get("value"), "should have 80 as value")
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "Overwrite: true, should overwrite existing alert",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user2Token,
|
||||
},
|
||||
ExpectedStatus: 200,
|
||||
ExpectedContent: []string{"\"success\":true"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"name": "CPU",
|
||||
"value": 45,
|
||||
"min": 5,
|
||||
"systems": []string{system2.Id},
|
||||
"overwrite": true,
|
||||
}),
|
||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||
beszelTests.ClearCollection(t, app, "alerts")
|
||||
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
||||
"name": "CPU",
|
||||
"system": system2.Id,
|
||||
"user": user2.Id,
|
||||
"value": 80,
|
||||
"min": 10,
|
||||
})
|
||||
},
|
||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||
alerts, _ := app.CountRecords("alerts")
|
||||
assert.EqualValues(t, 1, alerts, "should have 1 alert")
|
||||
alert, _ := app.FindFirstRecordByFilter("alerts", "name = 'CPU' && user = {:user}", dbx.Params{"user": user2.Id})
|
||||
assert.EqualValues(t, 45, alert.Get("value"), "should have 45 as value")
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "DELETE no auth",
|
||||
Method: http.MethodDelete,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
ExpectedStatus: 401,
|
||||
ExpectedContent: []string{"requires valid"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"name": "CPU",
|
||||
"systems": []string{system1.Id},
|
||||
}),
|
||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||
beszelTests.ClearCollection(t, app, "alerts")
|
||||
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
||||
"name": "CPU",
|
||||
"system": system1.Id,
|
||||
"user": user1.Id,
|
||||
"value": 80,
|
||||
"min": 10,
|
||||
})
|
||||
},
|
||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||
alerts, _ := app.CountRecords("alerts")
|
||||
assert.EqualValues(t, 1, alerts, "should have 1 alert")
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "DELETE alert",
|
||||
Method: http.MethodDelete,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user1Token,
|
||||
},
|
||||
ExpectedStatus: 200,
|
||||
ExpectedContent: []string{"\"count\":1", "\"success\":true"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"name": "CPU",
|
||||
"systems": []string{system1.Id},
|
||||
}),
|
||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||
beszelTests.ClearCollection(t, app, "alerts")
|
||||
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
||||
"name": "CPU",
|
||||
"system": system1.Id,
|
||||
"user": user1.Id,
|
||||
"value": 80,
|
||||
"min": 10,
|
||||
})
|
||||
},
|
||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||
alerts, _ := app.CountRecords("alerts")
|
||||
assert.Zero(t, alerts, "should have 0 alerts")
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "DELETE alert multiple systems",
|
||||
Method: http.MethodDelete,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user1Token,
|
||||
},
|
||||
ExpectedStatus: 200,
|
||||
ExpectedContent: []string{"\"count\":2", "\"success\":true"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"name": "Memory",
|
||||
"systems": []string{system1.Id, system2.Id},
|
||||
}),
|
||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||
beszelTests.ClearCollection(t, app, "alerts")
|
||||
for _, systemId := range []string{system1.Id, system2.Id} {
|
||||
_, err := beszelTests.CreateRecord(app, "alerts", map[string]any{
|
||||
"name": "Memory",
|
||||
"system": systemId,
|
||||
"user": user1.Id,
|
||||
"value": 90,
|
||||
"min": 10,
|
||||
})
|
||||
assert.NoError(t, err, "should create alert")
|
||||
}
|
||||
alerts, _ := app.CountRecords("alerts")
|
||||
assert.EqualValues(t, 2, alerts, "should have 2 alerts")
|
||||
},
|
||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||
alerts, _ := app.CountRecords("alerts")
|
||||
assert.Zero(t, alerts, "should have 0 alerts")
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "User 2 should not be able to delete alert of user 1",
|
||||
Method: http.MethodDelete,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user2Token,
|
||||
},
|
||||
ExpectedStatus: 200,
|
||||
ExpectedContent: []string{"\"count\":1", "\"success\":true"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"name": "CPU",
|
||||
"systems": []string{system2.Id},
|
||||
}),
|
||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||
beszelTests.ClearCollection(t, app, "alerts")
|
||||
for _, user := range []string{user1.Id, user2.Id} {
|
||||
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
||||
"name": "CPU",
|
||||
"system": system2.Id,
|
||||
"user": user,
|
||||
"value": 80,
|
||||
"min": 10,
|
||||
})
|
||||
}
|
||||
alerts, _ := app.CountRecords("alerts")
|
||||
assert.EqualValues(t, 2, alerts, "should have 2 alerts")
|
||||
user1AlertCount, _ := app.CountRecords("alerts", dbx.HashExp{"user": user1.Id})
|
||||
assert.EqualValues(t, 1, user1AlertCount, "should have 1 alert")
|
||||
user2AlertCount, _ := app.CountRecords("alerts", dbx.HashExp{"user": user2.Id})
|
||||
assert.EqualValues(t, 1, user2AlertCount, "should have 1 alert")
|
||||
},
|
||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||
user1AlertCount, _ := app.CountRecords("alerts", dbx.HashExp{"user": user1.Id})
|
||||
assert.EqualValues(t, 1, user1AlertCount, "should have 1 alert")
|
||||
user2AlertCount, _ := app.CountRecords("alerts", dbx.HashExp{"user": user2.Id})
|
||||
assert.Zero(t, user2AlertCount, "should have 0 alerts")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, scenario := range scenarios {
|
||||
scenario.Test(t)
|
||||
}
|
||||
}
|
||||
func TestSendTestNotification(t *testing.T) {
|
||||
hub, user := beszelTests.GetHubWithUser(t)
|
||||
defer hub.Cleanup()
|
||||
|
||||
userToken, err := user.NewAuthToken()
|
||||
|
||||
adminUser, err := beszelTests.CreateUserWithRole(hub, "admin@example.com", "password123", "admin")
|
||||
assert.NoError(t, err, "Failed to create admin user")
|
||||
adminUserToken, err := adminUser.NewAuthToken()
|
||||
|
||||
superuser, err := beszelTests.CreateSuperuser(hub, "superuser@example.com", "password123")
|
||||
assert.NoError(t, err, "Failed to create superuser")
|
||||
superuserToken, err := superuser.NewAuthToken()
|
||||
assert.NoError(t, err, "Failed to create superuser auth token")
|
||||
|
||||
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
||||
return hub.TestApp
|
||||
}
|
||||
|
||||
scenarios := []beszelTests.ApiScenario{
|
||||
{
|
||||
Name: "POST /test-notification - no auth should fail",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/test-notification",
|
||||
ExpectedStatus: 401,
|
||||
ExpectedContent: []string{"requires valid"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"url": "generic://127.0.0.1",
|
||||
}),
|
||||
},
|
||||
{
|
||||
Name: "POST /test-notification - with external auth should succeed",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/test-notification",
|
||||
TestAppFactory: testAppFactory,
|
||||
Headers: map[string]string{
|
||||
"Authorization": userToken,
|
||||
},
|
||||
Body: jsonReader(map[string]any{
|
||||
"url": "generic://8.8.8.8",
|
||||
}),
|
||||
ExpectedStatus: 200,
|
||||
ExpectedContent: []string{"\"err\":"},
|
||||
},
|
||||
{
|
||||
Name: "POST /test-notification - local url with user auth should fail",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/test-notification",
|
||||
TestAppFactory: testAppFactory,
|
||||
Headers: map[string]string{
|
||||
"Authorization": userToken,
|
||||
},
|
||||
Body: jsonReader(map[string]any{
|
||||
"url": "generic://localhost:8010",
|
||||
}),
|
||||
ExpectedStatus: 403,
|
||||
ExpectedContent: []string{"Only admins"},
|
||||
},
|
||||
{
|
||||
Name: "POST /test-notification - internal url with user auth should fail",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/test-notification",
|
||||
TestAppFactory: testAppFactory,
|
||||
Headers: map[string]string{
|
||||
"Authorization": userToken,
|
||||
},
|
||||
Body: jsonReader(map[string]any{
|
||||
"url": "generic+http://192.168.0.5",
|
||||
}),
|
||||
ExpectedStatus: 403,
|
||||
ExpectedContent: []string{"Only admins"},
|
||||
},
|
||||
{
|
||||
Name: "POST /test-notification - internal url with admin auth should succeed",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/test-notification",
|
||||
TestAppFactory: testAppFactory,
|
||||
Headers: map[string]string{
|
||||
"Authorization": adminUserToken,
|
||||
},
|
||||
Body: jsonReader(map[string]any{
|
||||
"url": "generic://127.0.0.1",
|
||||
}),
|
||||
ExpectedStatus: 200,
|
||||
ExpectedContent: []string{"\"err\":"},
|
||||
},
|
||||
{
|
||||
Name: "POST /test-notification - internal url with superuser auth should succeed",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/test-notification",
|
||||
TestAppFactory: testAppFactory,
|
||||
Headers: map[string]string{
|
||||
"Authorization": superuserToken,
|
||||
},
|
||||
Body: jsonReader(map[string]any{
|
||||
"url": "generic://127.0.0.1",
|
||||
}),
|
||||
ExpectedStatus: 200,
|
||||
ExpectedContent: []string{"\"err\":"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, scenario := range scenarios {
|
||||
scenario.Test(t)
|
||||
}
|
||||
}
|
||||
@@ -3,11 +3,6 @@
|
||||
package alerts_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
"testing/synctest"
|
||||
"time"
|
||||
@@ -16,359 +11,9 @@ import (
|
||||
|
||||
"github.com/henrygd/beszel/internal/alerts"
|
||||
"github.com/pocketbase/dbx"
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
pbTests "github.com/pocketbase/pocketbase/tests"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// marshal to json and return an io.Reader (for use in ApiScenario.Body)
|
||||
func jsonReader(v any) io.Reader {
|
||||
data, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bytes.NewReader(data)
|
||||
}
|
||||
|
||||
func TestUserAlertsApi(t *testing.T) {
|
||||
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||
defer hub.Cleanup()
|
||||
|
||||
hub.StartHub()
|
||||
|
||||
user1, _ := beszelTests.CreateUser(hub, "alertstest@example.com", "password")
|
||||
user1Token, _ := user1.NewAuthToken()
|
||||
|
||||
user2, _ := beszelTests.CreateUser(hub, "alertstest2@example.com", "password")
|
||||
user2Token, _ := user2.NewAuthToken()
|
||||
|
||||
system1, _ := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "system1",
|
||||
"users": []string{user1.Id},
|
||||
"host": "127.0.0.1",
|
||||
})
|
||||
|
||||
system2, _ := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "system2",
|
||||
"users": []string{user1.Id, user2.Id},
|
||||
"host": "127.0.0.2",
|
||||
})
|
||||
|
||||
userRecords, _ := hub.CountRecords("users")
|
||||
assert.EqualValues(t, 2, userRecords, "all users should be created")
|
||||
|
||||
systemRecords, _ := hub.CountRecords("systems")
|
||||
assert.EqualValues(t, 2, systemRecords, "all systems should be created")
|
||||
|
||||
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
||||
return hub.TestApp
|
||||
}
|
||||
|
||||
scenarios := []beszelTests.ApiScenario{
|
||||
// {
|
||||
// Name: "GET not implemented - returns index",
|
||||
// Method: http.MethodGet,
|
||||
// URL: "/api/beszel/user-alerts",
|
||||
// ExpectedStatus: 200,
|
||||
// ExpectedContent: []string{"<html ", "globalThis.BESZEL"},
|
||||
// TestAppFactory: testAppFactory,
|
||||
// },
|
||||
{
|
||||
Name: "POST no auth",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
ExpectedStatus: 401,
|
||||
ExpectedContent: []string{"requires valid"},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
Name: "POST no body",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user1Token,
|
||||
},
|
||||
ExpectedStatus: 400,
|
||||
ExpectedContent: []string{"Bad data"},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
Name: "POST bad data",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user1Token,
|
||||
},
|
||||
ExpectedStatus: 400,
|
||||
ExpectedContent: []string{"Bad data"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"invalidField": "this should cause validation error",
|
||||
"threshold": "not a number",
|
||||
}),
|
||||
},
|
||||
{
|
||||
Name: "POST malformed JSON",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user1Token,
|
||||
},
|
||||
ExpectedStatus: 400,
|
||||
ExpectedContent: []string{"Bad data"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: strings.NewReader(`{"alertType": "cpu", "threshold": 80, "enabled": true,}`),
|
||||
},
|
||||
{
|
||||
Name: "POST valid alert data multiple systems",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user1Token,
|
||||
},
|
||||
ExpectedStatus: 200,
|
||||
ExpectedContent: []string{"\"success\":true"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"name": "CPU",
|
||||
"value": 69,
|
||||
"min": 9,
|
||||
"systems": []string{system1.Id, system2.Id},
|
||||
"overwrite": false,
|
||||
}),
|
||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||
// check total alerts
|
||||
alerts, _ := app.CountRecords("alerts")
|
||||
assert.EqualValues(t, 2, alerts, "should have 2 alerts")
|
||||
// check alert has correct values
|
||||
matchingAlerts, _ := app.CountRecords("alerts", dbx.HashExp{"name": "CPU", "user": user1.Id, "system": system1.Id, "value": 69, "min": 9})
|
||||
assert.EqualValues(t, 1, matchingAlerts, "should have 1 alert")
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "POST valid alert data single system",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user1Token,
|
||||
},
|
||||
ExpectedStatus: 200,
|
||||
ExpectedContent: []string{"\"success\":true"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"name": "Memory",
|
||||
"systems": []string{system1.Id},
|
||||
"value": 90,
|
||||
"min": 10,
|
||||
}),
|
||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||
user1Alerts, _ := app.CountRecords("alerts", dbx.HashExp{"user": user1.Id})
|
||||
assert.EqualValues(t, 3, user1Alerts, "should have 3 alerts")
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "Overwrite: false, should not overwrite existing alert",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user1Token,
|
||||
},
|
||||
ExpectedStatus: 200,
|
||||
ExpectedContent: []string{"\"success\":true"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"name": "CPU",
|
||||
"value": 45,
|
||||
"min": 5,
|
||||
"systems": []string{system1.Id},
|
||||
"overwrite": false,
|
||||
}),
|
||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||
beszelTests.ClearCollection(t, app, "alerts")
|
||||
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
||||
"name": "CPU",
|
||||
"system": system1.Id,
|
||||
"user": user1.Id,
|
||||
"value": 80,
|
||||
"min": 10,
|
||||
})
|
||||
},
|
||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||
alerts, _ := app.CountRecords("alerts")
|
||||
assert.EqualValues(t, 1, alerts, "should have 1 alert")
|
||||
alert, _ := app.FindFirstRecordByFilter("alerts", "name = 'CPU' && user = {:user}", dbx.Params{"user": user1.Id})
|
||||
assert.EqualValues(t, 80, alert.Get("value"), "should have 80 as value")
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "Overwrite: true, should overwrite existing alert",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user2Token,
|
||||
},
|
||||
ExpectedStatus: 200,
|
||||
ExpectedContent: []string{"\"success\":true"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"name": "CPU",
|
||||
"value": 45,
|
||||
"min": 5,
|
||||
"systems": []string{system2.Id},
|
||||
"overwrite": true,
|
||||
}),
|
||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||
beszelTests.ClearCollection(t, app, "alerts")
|
||||
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
||||
"name": "CPU",
|
||||
"system": system2.Id,
|
||||
"user": user2.Id,
|
||||
"value": 80,
|
||||
"min": 10,
|
||||
})
|
||||
},
|
||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||
alerts, _ := app.CountRecords("alerts")
|
||||
assert.EqualValues(t, 1, alerts, "should have 1 alert")
|
||||
alert, _ := app.FindFirstRecordByFilter("alerts", "name = 'CPU' && user = {:user}", dbx.Params{"user": user2.Id})
|
||||
assert.EqualValues(t, 45, alert.Get("value"), "should have 45 as value")
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "DELETE no auth",
|
||||
Method: http.MethodDelete,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
ExpectedStatus: 401,
|
||||
ExpectedContent: []string{"requires valid"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"name": "CPU",
|
||||
"systems": []string{system1.Id},
|
||||
}),
|
||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||
beszelTests.ClearCollection(t, app, "alerts")
|
||||
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
||||
"name": "CPU",
|
||||
"system": system1.Id,
|
||||
"user": user1.Id,
|
||||
"value": 80,
|
||||
"min": 10,
|
||||
})
|
||||
},
|
||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||
alerts, _ := app.CountRecords("alerts")
|
||||
assert.EqualValues(t, 1, alerts, "should have 1 alert")
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "DELETE alert",
|
||||
Method: http.MethodDelete,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user1Token,
|
||||
},
|
||||
ExpectedStatus: 200,
|
||||
ExpectedContent: []string{"\"count\":1", "\"success\":true"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"name": "CPU",
|
||||
"systems": []string{system1.Id},
|
||||
}),
|
||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||
beszelTests.ClearCollection(t, app, "alerts")
|
||||
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
||||
"name": "CPU",
|
||||
"system": system1.Id,
|
||||
"user": user1.Id,
|
||||
"value": 80,
|
||||
"min": 10,
|
||||
})
|
||||
},
|
||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||
alerts, _ := app.CountRecords("alerts")
|
||||
assert.Zero(t, alerts, "should have 0 alerts")
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "DELETE alert multiple systems",
|
||||
Method: http.MethodDelete,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user1Token,
|
||||
},
|
||||
ExpectedStatus: 200,
|
||||
ExpectedContent: []string{"\"count\":2", "\"success\":true"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"name": "Memory",
|
||||
"systems": []string{system1.Id, system2.Id},
|
||||
}),
|
||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||
beszelTests.ClearCollection(t, app, "alerts")
|
||||
for _, systemId := range []string{system1.Id, system2.Id} {
|
||||
_, err := beszelTests.CreateRecord(app, "alerts", map[string]any{
|
||||
"name": "Memory",
|
||||
"system": systemId,
|
||||
"user": user1.Id,
|
||||
"value": 90,
|
||||
"min": 10,
|
||||
})
|
||||
assert.NoError(t, err, "should create alert")
|
||||
}
|
||||
alerts, _ := app.CountRecords("alerts")
|
||||
assert.EqualValues(t, 2, alerts, "should have 2 alerts")
|
||||
},
|
||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||
alerts, _ := app.CountRecords("alerts")
|
||||
assert.Zero(t, alerts, "should have 0 alerts")
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "User 2 should not be able to delete alert of user 1",
|
||||
Method: http.MethodDelete,
|
||||
URL: "/api/beszel/user-alerts",
|
||||
Headers: map[string]string{
|
||||
"Authorization": user2Token,
|
||||
},
|
||||
ExpectedStatus: 200,
|
||||
ExpectedContent: []string{"\"count\":1", "\"success\":true"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"name": "CPU",
|
||||
"systems": []string{system2.Id},
|
||||
}),
|
||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||
beszelTests.ClearCollection(t, app, "alerts")
|
||||
for _, user := range []string{user1.Id, user2.Id} {
|
||||
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
||||
"name": "CPU",
|
||||
"system": system2.Id,
|
||||
"user": user,
|
||||
"value": 80,
|
||||
"min": 10,
|
||||
})
|
||||
}
|
||||
alerts, _ := app.CountRecords("alerts")
|
||||
assert.EqualValues(t, 2, alerts, "should have 2 alerts")
|
||||
user1AlertCount, _ := app.CountRecords("alerts", dbx.HashExp{"user": user1.Id})
|
||||
assert.EqualValues(t, 1, user1AlertCount, "should have 1 alert")
|
||||
user2AlertCount, _ := app.CountRecords("alerts", dbx.HashExp{"user": user2.Id})
|
||||
assert.EqualValues(t, 1, user2AlertCount, "should have 1 alert")
|
||||
},
|
||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||
user1AlertCount, _ := app.CountRecords("alerts", dbx.HashExp{"user": user1.Id})
|
||||
assert.EqualValues(t, 1, user1AlertCount, "should have 1 alert")
|
||||
user2AlertCount, _ := app.CountRecords("alerts", dbx.HashExp{"user": user2.Id})
|
||||
assert.Zero(t, user2AlertCount, "should have 0 alerts")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, scenario := range scenarios {
|
||||
scenario.Test(t)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAlertsHistory(t *testing.T) {
|
||||
synctest.Test(t, func(t *testing.T) {
|
||||
hub, user := beszelTests.GetHubWithUser(t)
|
||||
|
||||
@@ -95,3 +95,7 @@ func (am *AlertManager) RestorePendingStatusAlerts() error {
|
||||
func (am *AlertManager) SetAlertTriggered(alert CachedAlertData, triggered bool) error {
|
||||
return am.setAlertTriggered(alert, triggered)
|
||||
}
|
||||
|
||||
func IsInternalURL(rawURL string) (bool, error) {
|
||||
return isInternalURL(rawURL)
|
||||
}
|
||||
|
||||
@@ -195,6 +195,6 @@ func main() {
|
||||
}
|
||||
|
||||
if err := a.Start(serverConfig); err != nil {
|
||||
log.Fatal("Failed to start server: ", err)
|
||||
log.Fatal("Failed to start: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,6 +22,8 @@ const (
|
||||
GetSmartData
|
||||
// Request detailed systemd service info from agent
|
||||
GetSystemdInfo
|
||||
// Sync network probe configuration to agent
|
||||
SyncNetworkProbes
|
||||
// Add new actions here...
|
||||
)
|
||||
|
||||
|
||||
83
internal/entities/probe/probe.go
Normal file
83
internal/entities/probe/probe.go
Normal file
@@ -0,0 +1,83 @@
|
||||
package probe
|
||||
|
||||
type SyncAction uint8
|
||||
|
||||
const (
|
||||
// SyncActionReplace indicates a full sync where the provided configs should replace all existing probes for the system.
|
||||
SyncActionReplace SyncAction = iota
|
||||
// SyncActionUpsert indicates an incremental sync where the provided config should be added or updated.
|
||||
SyncActionUpsert
|
||||
// SyncActionDelete indicates an incremental sync where the provided config should be removed.
|
||||
SyncActionDelete
|
||||
)
|
||||
|
||||
// Config defines a network probe task sent from hub to agent.
|
||||
type Config struct {
|
||||
// ID is the stable network_probes record ID generated by the hub.
|
||||
ID string `cbor:"0,keyasint"`
|
||||
Target string `cbor:"1,keyasint"`
|
||||
Protocol string `cbor:"2,keyasint"` // "icmp", "tcp", or "http"
|
||||
Port uint16 `cbor:"3,keyasint,omitempty"`
|
||||
Interval uint16 `cbor:"4,keyasint"` // seconds
|
||||
}
|
||||
|
||||
// SyncRequest defines an incremental or full probe sync request sent to the agent.
|
||||
type SyncRequest struct {
|
||||
Action SyncAction `cbor:"0,keyasint"`
|
||||
Config Config `cbor:"1,keyasint,omitempty"`
|
||||
Configs []Config `cbor:"2,keyasint,omitempty"`
|
||||
RunNow bool `cbor:"3,keyasint,omitempty"`
|
||||
}
|
||||
|
||||
// SyncResponse returns the immediate result for an upsert when requested.
|
||||
type SyncResponse struct {
|
||||
Result Result `cbor:"0,keyasint,omitempty"`
|
||||
}
|
||||
|
||||
// Result holds aggregated probe results for a single target.
|
||||
//
|
||||
// 0: avg response in microseconds
|
||||
//
|
||||
// 1: 1h average response in microseconds
|
||||
//
|
||||
// 2: min response in microseconds
|
||||
//
|
||||
// 3: 1h min response in microseconds
|
||||
//
|
||||
// 4: max response in microseconds
|
||||
//
|
||||
// 5: 1h max response in microseconds
|
||||
//
|
||||
// 6: packet loss percentage (0-100)
|
||||
//
|
||||
// 7: 1h packet loss percentage (0-100)
|
||||
type Result struct {
|
||||
AvgResponse int64 `cbor:"0,keyasint,omitempty"`
|
||||
AvgResponse1h int64 `cbor:"1,keyasint,omitempty"`
|
||||
MinResponse int64 `cbor:"2,keyasint,omitempty"`
|
||||
MinResponse1h int64 `cbor:"3,keyasint,omitempty"`
|
||||
MaxResponse int64 `cbor:"4,keyasint,omitempty"`
|
||||
MaxResponse1h int64 `cbor:"5,keyasint,omitempty"`
|
||||
PacketLoss float64 `cbor:"6,keyasint,omitempty"`
|
||||
PacketLoss1h float64 `cbor:"7,keyasint,omitempty"`
|
||||
}
|
||||
|
||||
// Stats holds only 1m values for a single target, which are used for charts.
|
||||
//
|
||||
// 0: avg response in microseconds
|
||||
//
|
||||
// 1: min response in microseconds
|
||||
//
|
||||
// 2: max response in microseconds
|
||||
//
|
||||
// 3: packet loss percentage (0-100)
|
||||
type Stats []float64
|
||||
|
||||
func (s Stats) FromResult(result Result) Stats {
|
||||
return Stats{
|
||||
float64(result.AvgResponse),
|
||||
float64(result.MinResponse),
|
||||
float64(result.MaxResponse),
|
||||
result.PacketLoss,
|
||||
}
|
||||
}
|
||||
@@ -494,7 +494,7 @@ type SmartInfoForNvme struct {
|
||||
FirmwareVersion string `json:"firmware_version"`
|
||||
// NVMePCIVendor NVMePCIVendor `json:"nvme_pci_vendor"`
|
||||
// NVMeIEEEOUIIdentifier uint32 `json:"nvme_ieee_oui_identifier"`
|
||||
// NVMeTotalCapacity uint64 `json:"nvme_total_capacity"`
|
||||
NVMeTotalCapacity uint64 `json:"nvme_total_capacity"`
|
||||
// NVMeUnallocatedCapacity uint64 `json:"nvme_unallocated_capacity"`
|
||||
// NVMeControllerID uint16 `json:"nvme_controller_id"`
|
||||
// NVMeVersion VersionStringInfo `json:"nvme_version"`
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/container"
|
||||
"github.com/henrygd/beszel/internal/entities/probe"
|
||||
"github.com/henrygd/beszel/internal/entities/systemd"
|
||||
)
|
||||
|
||||
@@ -48,6 +49,8 @@ type Stats struct {
|
||||
MaxDiskIO [2]uint64 `json:"diom,omitzero" cbor:"-"` // [max read bytes, max write bytes]
|
||||
CpuBreakdown []float64 `json:"cpub,omitempty" cbor:"33,keyasint,omitempty"` // [user, system, iowait, steal, idle]
|
||||
CpuCoresUsage Uint8Slice `json:"cpus,omitempty" cbor:"34,keyasint,omitempty"` // per-core busy usage [CPU0..]
|
||||
DiskIoStats [6]float64 `json:"dios,omitzero" cbor:"35,keyasint,omitzero"` // [read time %, write time %, io utilization %, r_await ms, w_await ms, weighted io %]
|
||||
MaxDiskIoStats [6]float64 `json:"diosm,omitzero" cbor:"-"` // max values for DiskIoStats
|
||||
}
|
||||
|
||||
// Uint8Slice wraps []uint8 to customize JSON encoding while keeping CBOR efficient.
|
||||
@@ -93,10 +96,12 @@ type FsStats struct {
|
||||
MaxDiskReadPS float64 `json:"rm,omitempty" cbor:"-"`
|
||||
MaxDiskWritePS float64 `json:"wm,omitempty" cbor:"-"`
|
||||
// TODO: remove DiskReadPs and DiskWritePs in future release in favor of DiskReadBytes and DiskWriteBytes
|
||||
DiskReadBytes uint64 `json:"rb" cbor:"6,keyasint,omitempty"`
|
||||
DiskWriteBytes uint64 `json:"wb" cbor:"7,keyasint,omitempty"`
|
||||
MaxDiskReadBytes uint64 `json:"rbm,omitempty" cbor:"-"`
|
||||
MaxDiskWriteBytes uint64 `json:"wbm,omitempty" cbor:"-"`
|
||||
DiskReadBytes uint64 `json:"rb" cbor:"6,keyasint,omitempty"`
|
||||
DiskWriteBytes uint64 `json:"wb" cbor:"7,keyasint,omitempty"`
|
||||
MaxDiskReadBytes uint64 `json:"rbm,omitempty" cbor:"-"`
|
||||
MaxDiskWriteBytes uint64 `json:"wbm,omitempty" cbor:"-"`
|
||||
DiskIoStats [6]float64 `json:"dios,omitzero" cbor:"8,keyasint,omitzero"` // [read time %, write time %, io utilization %, r_await ms, w_await ms, weighted io %]
|
||||
MaxDiskIoStats [6]float64 `json:"diosm,omitzero" cbor:"-"` // max values for DiskIoStats
|
||||
}
|
||||
|
||||
type NetIoStats struct {
|
||||
@@ -170,9 +175,10 @@ type Details struct {
|
||||
|
||||
// Final data structure to return to the hub
|
||||
type CombinedData struct {
|
||||
Stats Stats `json:"stats" cbor:"0,keyasint"`
|
||||
Info Info `json:"info" cbor:"1,keyasint"`
|
||||
Containers []*container.Stats `json:"container" cbor:"2,keyasint"`
|
||||
SystemdServices []*systemd.Service `json:"systemd,omitempty" cbor:"3,keyasint,omitempty"`
|
||||
Details *Details `cbor:"4,keyasint,omitempty"`
|
||||
Stats Stats `json:"stats" cbor:"0,keyasint"`
|
||||
Info Info `json:"info" cbor:"1,keyasint"`
|
||||
Containers []*container.Stats `json:"container" cbor:"2,keyasint"`
|
||||
SystemdServices []*systemd.Service `json:"systemd,omitempty" cbor:"3,keyasint,omitempty"`
|
||||
Details *Details `cbor:"4,keyasint,omitempty"`
|
||||
Probes map[string]probe.Result `cbor:"5,keyasint,omitempty"`
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package hub
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -13,6 +14,7 @@ import (
|
||||
"github.com/henrygd/beszel/internal/ghupdate"
|
||||
"github.com/henrygd/beszel/internal/hub/config"
|
||||
"github.com/henrygd/beszel/internal/hub/systems"
|
||||
"github.com/henrygd/beszel/internal/hub/utils"
|
||||
"github.com/pocketbase/dbx"
|
||||
"github.com/pocketbase/pocketbase/apis"
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
@@ -25,6 +27,8 @@ type UpdateInfo struct {
|
||||
Url string `json:"url"`
|
||||
}
|
||||
|
||||
var containerIDPattern = regexp.MustCompile(`^[a-fA-F0-9]{12,64}$`)
|
||||
|
||||
// Middleware to allow only admin role users
|
||||
var requireAdminRole = customAuthMiddleware(func(e *core.RequestEvent) bool {
|
||||
return e.Auth.GetString("role") == "admin"
|
||||
@@ -67,13 +71,13 @@ func (h *Hub) registerMiddlewares(se *core.ServeEvent) {
|
||||
return e.Next()
|
||||
}
|
||||
// authenticate with trusted header
|
||||
if autoLogin, _ := GetEnv("AUTO_LOGIN"); autoLogin != "" {
|
||||
if autoLogin, _ := utils.GetEnv("AUTO_LOGIN"); autoLogin != "" {
|
||||
se.Router.BindFunc(func(e *core.RequestEvent) error {
|
||||
return authorizeRequestWithEmail(e, autoLogin)
|
||||
})
|
||||
}
|
||||
// authenticate with trusted header
|
||||
if trustedHeader, _ := GetEnv("TRUSTED_AUTH_HEADER"); trustedHeader != "" {
|
||||
if trustedHeader, _ := utils.GetEnv("TRUSTED_AUTH_HEADER"); trustedHeader != "" {
|
||||
se.Router.BindFunc(func(e *core.RequestEvent) error {
|
||||
return authorizeRequestWithEmail(e, e.Request.Header.Get(trustedHeader))
|
||||
})
|
||||
@@ -101,7 +105,7 @@ func (h *Hub) registerApiRoutes(se *core.ServeEvent) error {
|
||||
apiAuth.GET("/info", h.getInfo)
|
||||
apiAuth.GET("/getkey", h.getInfo) // deprecated - keep for compatibility w/ integrations
|
||||
// check for updates
|
||||
if optIn, _ := GetEnv("CHECK_UPDATES"); optIn == "true" {
|
||||
if optIn, _ := utils.GetEnv("CHECK_UPDATES"); optIn == "true" {
|
||||
var updateInfo UpdateInfo
|
||||
apiAuth.GET("/update", updateInfo.getUpdate)
|
||||
}
|
||||
@@ -124,7 +128,7 @@ func (h *Hub) registerApiRoutes(se *core.ServeEvent) error {
|
||||
// get systemd service details
|
||||
apiAuth.GET("/systemd/info", h.getSystemdInfo)
|
||||
// /containers routes
|
||||
if enabled, _ := GetEnv("CONTAINER_DETAILS"); enabled != "false" {
|
||||
if enabled, _ := utils.GetEnv("CONTAINER_DETAILS"); enabled != "false" {
|
||||
// get container logs
|
||||
apiAuth.GET("/containers/logs", h.getContainerLogs)
|
||||
// get container info
|
||||
@@ -144,7 +148,7 @@ func (h *Hub) getInfo(e *core.RequestEvent) error {
|
||||
Key: h.pubKey,
|
||||
Version: beszel.Version,
|
||||
}
|
||||
if optIn, _ := GetEnv("CHECK_UPDATES"); optIn == "true" {
|
||||
if optIn, _ := utils.GetEnv("CHECK_UPDATES"); optIn == "true" {
|
||||
info.CheckUpdate = true
|
||||
}
|
||||
return e.JSON(http.StatusOK, info)
|
||||
@@ -177,6 +181,10 @@ func (info *UpdateInfo) getUpdate(e *core.RequestEvent) error {
|
||||
|
||||
// GetUniversalToken handles the universal token API endpoint (create, read, delete)
|
||||
func (h *Hub) getUniversalToken(e *core.RequestEvent) error {
|
||||
if e.Auth.IsSuperuser() {
|
||||
return e.ForbiddenError("Superusers cannot use universal tokens", nil)
|
||||
}
|
||||
|
||||
tokenMap := universalTokenMap.GetMap()
|
||||
userID := e.Auth.Id
|
||||
query := e.Request.URL.Query()
|
||||
@@ -303,21 +311,18 @@ func (h *Hub) containerRequestHandler(e *core.RequestEvent, fetchFunc func(*syst
|
||||
systemID := e.Request.URL.Query().Get("system")
|
||||
containerID := e.Request.URL.Query().Get("container")
|
||||
|
||||
if systemID == "" || containerID == "" {
|
||||
return e.JSON(http.StatusBadRequest, map[string]string{"error": "system and container parameters are required"})
|
||||
}
|
||||
if !containerIDPattern.MatchString(containerID) {
|
||||
return e.JSON(http.StatusBadRequest, map[string]string{"error": "invalid container parameter"})
|
||||
if systemID == "" || containerID == "" || !containerIDPattern.MatchString(containerID) {
|
||||
return e.BadRequestError("Invalid system or container parameter", nil)
|
||||
}
|
||||
|
||||
system, err := h.sm.GetSystem(systemID)
|
||||
if err != nil {
|
||||
return e.JSON(http.StatusNotFound, map[string]string{"error": "system not found"})
|
||||
if err != nil || !system.HasUser(e.App, e.Auth) {
|
||||
return e.NotFoundError("", nil)
|
||||
}
|
||||
|
||||
data, err := fetchFunc(system, containerID)
|
||||
if err != nil {
|
||||
return e.JSON(http.StatusNotFound, map[string]string{"error": err.Error()})
|
||||
return e.InternalServerError("", err)
|
||||
}
|
||||
|
||||
return e.JSON(http.StatusOK, map[string]string{responseKey: data})
|
||||
@@ -343,15 +348,23 @@ func (h *Hub) getSystemdInfo(e *core.RequestEvent) error {
|
||||
serviceName := query.Get("service")
|
||||
|
||||
if systemID == "" || serviceName == "" {
|
||||
return e.JSON(http.StatusBadRequest, map[string]string{"error": "system and service parameters are required"})
|
||||
return e.BadRequestError("Invalid system or service parameter", nil)
|
||||
}
|
||||
system, err := h.sm.GetSystem(systemID)
|
||||
if err != nil || !system.HasUser(e.App, e.Auth) {
|
||||
return e.NotFoundError("", nil)
|
||||
}
|
||||
// verify service exists before fetching details
|
||||
_, err = e.App.FindFirstRecordByFilter("systemd_services", "system = {:system} && name = {:name}", dbx.Params{
|
||||
"system": systemID,
|
||||
"name": serviceName,
|
||||
})
|
||||
if err != nil {
|
||||
return e.JSON(http.StatusNotFound, map[string]string{"error": "system not found"})
|
||||
return e.NotFoundError("", err)
|
||||
}
|
||||
details, err := system.FetchSystemdInfoFromAgent(serviceName)
|
||||
if err != nil {
|
||||
return e.JSON(http.StatusNotFound, map[string]string{"error": err.Error()})
|
||||
return e.InternalServerError("", err)
|
||||
}
|
||||
e.Response.Header().Set("Cache-Control", "public, max-age=60")
|
||||
return e.JSON(http.StatusOK, map[string]any{"details": details})
|
||||
@@ -362,17 +375,16 @@ func (h *Hub) getSystemdInfo(e *core.RequestEvent) error {
|
||||
func (h *Hub) refreshSmartData(e *core.RequestEvent) error {
|
||||
systemID := e.Request.URL.Query().Get("system")
|
||||
if systemID == "" {
|
||||
return e.JSON(http.StatusBadRequest, map[string]string{"error": "system parameter is required"})
|
||||
return e.BadRequestError("Invalid system parameter", nil)
|
||||
}
|
||||
|
||||
system, err := h.sm.GetSystem(systemID)
|
||||
if err != nil {
|
||||
return e.JSON(http.StatusNotFound, map[string]string{"error": "system not found"})
|
||||
if err != nil || !system.HasUser(e.App, e.Auth) {
|
||||
return e.NotFoundError("", nil)
|
||||
}
|
||||
|
||||
// Fetch and save SMART devices
|
||||
if err := system.FetchAndSaveSmartDevices(); err != nil {
|
||||
return e.JSON(http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
||||
return e.InternalServerError("", err)
|
||||
}
|
||||
|
||||
return e.JSON(http.StatusOK, map[string]string{"status": "ok"})
|
||||
|
||||
@@ -3,6 +3,7 @@ package hub_test
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"testing"
|
||||
@@ -25,14 +26,17 @@ func jsonReader(v any) io.Reader {
|
||||
}
|
||||
|
||||
func TestApiRoutesAuthentication(t *testing.T) {
|
||||
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||
hub, user := beszelTests.GetHubWithUser(t)
|
||||
defer hub.Cleanup()
|
||||
|
||||
hub.StartHub()
|
||||
userToken, err := user.NewAuthToken()
|
||||
require.NoError(t, err, "Failed to create auth token")
|
||||
|
||||
// Create test user and get auth token
|
||||
user, err := beszelTests.CreateUser(hub, "testuser@example.com", "password123")
|
||||
user2, err := beszelTests.CreateUser(hub, "testuser@example.com", "password123")
|
||||
require.NoError(t, err, "Failed to create test user")
|
||||
user2Token, err := user2.NewAuthToken()
|
||||
require.NoError(t, err, "Failed to create user2 auth token")
|
||||
|
||||
adminUser, err := beszelTests.CreateUserWithRole(hub, "admin@example.com", "password123", "admin")
|
||||
require.NoError(t, err, "Failed to create admin user")
|
||||
@@ -41,11 +45,14 @@ func TestApiRoutesAuthentication(t *testing.T) {
|
||||
readOnlyUser, err := beszelTests.CreateUserWithRole(hub, "readonly@example.com", "password123", "readonly")
|
||||
require.NoError(t, err, "Failed to create readonly user")
|
||||
readOnlyUserToken, err := readOnlyUser.NewAuthToken()
|
||||
require.NoError(t, err, "Failed to create readonly user auth token")
|
||||
|
||||
userToken, err := user.NewAuthToken()
|
||||
require.NoError(t, err, "Failed to create auth token")
|
||||
superuser, err := beszelTests.CreateSuperuser(hub, "superuser@example.com", "password123")
|
||||
require.NoError(t, err, "Failed to create superuser")
|
||||
superuserToken, err := superuser.NewAuthToken()
|
||||
require.NoError(t, err, "Failed to create superuser auth token")
|
||||
|
||||
// Create test system for user-alerts endpoints
|
||||
// Create test system
|
||||
system, err := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "test-system",
|
||||
"users": []string{user.Id},
|
||||
@@ -59,31 +66,6 @@ func TestApiRoutesAuthentication(t *testing.T) {
|
||||
|
||||
scenarios := []beszelTests.ApiScenario{
|
||||
// Auth Protected Routes - Should require authentication
|
||||
{
|
||||
Name: "POST /test-notification - no auth should fail",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/test-notification",
|
||||
ExpectedStatus: 401,
|
||||
ExpectedContent: []string{"requires valid"},
|
||||
TestAppFactory: testAppFactory,
|
||||
Body: jsonReader(map[string]any{
|
||||
"url": "generic://127.0.0.1",
|
||||
}),
|
||||
},
|
||||
{
|
||||
Name: "POST /test-notification - with auth should succeed",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/test-notification",
|
||||
TestAppFactory: testAppFactory,
|
||||
Headers: map[string]string{
|
||||
"Authorization": userToken,
|
||||
},
|
||||
Body: jsonReader(map[string]any{
|
||||
"url": "generic://127.0.0.1",
|
||||
}),
|
||||
ExpectedStatus: 200,
|
||||
ExpectedContent: []string{"sending message"},
|
||||
},
|
||||
{
|
||||
Name: "GET /config-yaml - no auth should fail",
|
||||
Method: http.MethodGet,
|
||||
@@ -196,6 +178,19 @@ func TestApiRoutesAuthentication(t *testing.T) {
|
||||
ExpectedContent: []string{"\"permanent\":true", "permanent-token-123"},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
Name: "GET /universal-token - superuser should fail",
|
||||
Method: http.MethodGet,
|
||||
URL: "/api/beszel/universal-token",
|
||||
Headers: map[string]string{
|
||||
"Authorization": superuserToken,
|
||||
},
|
||||
ExpectedStatus: 403,
|
||||
ExpectedContent: []string{"Superusers cannot use universal tokens"},
|
||||
TestAppFactory: func(t testing.TB) *pbTests.TestApp {
|
||||
return hub.TestApp
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "GET /universal-token - with readonly auth should fail",
|
||||
Method: http.MethodGet,
|
||||
@@ -215,13 +210,13 @@ func TestApiRoutesAuthentication(t *testing.T) {
|
||||
"Authorization": userToken,
|
||||
},
|
||||
ExpectedStatus: 400,
|
||||
ExpectedContent: []string{"system parameter is required"},
|
||||
ExpectedContent: []string{"Invalid", "system", "parameter"},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
Name: "POST /smart/refresh - with readonly auth should fail",
|
||||
Method: http.MethodPost,
|
||||
URL: "/api/beszel/smart/refresh",
|
||||
URL: fmt.Sprintf("/api/beszel/smart/refresh?system=%s", system.Id),
|
||||
Headers: map[string]string{
|
||||
"Authorization": readOnlyUserToken,
|
||||
},
|
||||
@@ -229,6 +224,28 @@ func TestApiRoutesAuthentication(t *testing.T) {
|
||||
ExpectedContent: []string{"The authorized record is not allowed to perform this action."},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
Name: "POST /smart/refresh - non-user system should fail",
|
||||
Method: http.MethodPost,
|
||||
URL: fmt.Sprintf("/api/beszel/smart/refresh?system=%s", system.Id),
|
||||
Headers: map[string]string{
|
||||
"Authorization": user2Token,
|
||||
},
|
||||
ExpectedStatus: 404,
|
||||
ExpectedContent: []string{"The requested resource wasn't found."},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
Name: "POST /smart/refresh - good user should pass validation",
|
||||
Method: http.MethodPost,
|
||||
URL: fmt.Sprintf("/api/beszel/smart/refresh?system=%s", system.Id),
|
||||
Headers: map[string]string{
|
||||
"Authorization": userToken,
|
||||
},
|
||||
ExpectedStatus: 500,
|
||||
ExpectedContent: []string{"Something went wrong while processing your request."},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
Name: "POST /user-alerts - no auth should fail",
|
||||
Method: http.MethodPost,
|
||||
@@ -300,20 +317,59 @@ func TestApiRoutesAuthentication(t *testing.T) {
|
||||
{
|
||||
Name: "GET /containers/logs - no auth should fail",
|
||||
Method: http.MethodGet,
|
||||
URL: "/api/beszel/containers/logs?system=test-system&container=test-container",
|
||||
URL: "/api/beszel/containers/logs?system=test-system&container=abababababab",
|
||||
ExpectedStatus: 401,
|
||||
ExpectedContent: []string{"requires valid"},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
Name: "GET /containers/logs - request for valid non-user system should fail",
|
||||
Method: http.MethodGet,
|
||||
URL: fmt.Sprintf("/api/beszel/containers/logs?system=%s&container=abababababab", system.Id),
|
||||
ExpectedStatus: 404,
|
||||
ExpectedContent: []string{"The requested resource wasn't found."},
|
||||
TestAppFactory: testAppFactory,
|
||||
Headers: map[string]string{
|
||||
"Authorization": user2Token,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "GET /containers/info - request for valid non-user system should fail",
|
||||
Method: http.MethodGet,
|
||||
URL: fmt.Sprintf("/api/beszel/containers/info?system=%s&container=abababababab", system.Id),
|
||||
ExpectedStatus: 404,
|
||||
ExpectedContent: []string{"The requested resource wasn't found."},
|
||||
TestAppFactory: testAppFactory,
|
||||
Headers: map[string]string{
|
||||
"Authorization": user2Token,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "GET /containers/info - SHARE_ALL_SYSTEMS allows non-member user",
|
||||
Method: http.MethodGet,
|
||||
URL: fmt.Sprintf("/api/beszel/containers/info?system=%s&container=abababababab", system.Id),
|
||||
ExpectedStatus: 500,
|
||||
ExpectedContent: []string{"Something went wrong while processing your request."},
|
||||
TestAppFactory: testAppFactory,
|
||||
Headers: map[string]string{
|
||||
"Authorization": user2Token,
|
||||
},
|
||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||
t.Setenv("SHARE_ALL_SYSTEMS", "true")
|
||||
},
|
||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||
t.Setenv("SHARE_ALL_SYSTEMS", "")
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "GET /containers/logs - with auth but missing system param should fail",
|
||||
Method: http.MethodGet,
|
||||
URL: "/api/beszel/containers/logs?container=test-container",
|
||||
URL: "/api/beszel/containers/logs?container=abababababab",
|
||||
Headers: map[string]string{
|
||||
"Authorization": userToken,
|
||||
},
|
||||
ExpectedStatus: 400,
|
||||
ExpectedContent: []string{"system and container parameters are required"},
|
||||
ExpectedContent: []string{"Invalid", "parameter"},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
@@ -324,7 +380,7 @@ func TestApiRoutesAuthentication(t *testing.T) {
|
||||
"Authorization": userToken,
|
||||
},
|
||||
ExpectedStatus: 400,
|
||||
ExpectedContent: []string{"system and container parameters are required"},
|
||||
ExpectedContent: []string{"Invalid", "parameter"},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
@@ -335,7 +391,7 @@ func TestApiRoutesAuthentication(t *testing.T) {
|
||||
"Authorization": userToken,
|
||||
},
|
||||
ExpectedStatus: 404,
|
||||
ExpectedContent: []string{"system not found"},
|
||||
ExpectedContent: []string{"The requested resource wasn't found."},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
@@ -346,7 +402,7 @@ func TestApiRoutesAuthentication(t *testing.T) {
|
||||
"Authorization": userToken,
|
||||
},
|
||||
ExpectedStatus: 400,
|
||||
ExpectedContent: []string{"invalid container parameter"},
|
||||
ExpectedContent: []string{"Invalid", "parameter"},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
@@ -357,7 +413,7 @@ func TestApiRoutesAuthentication(t *testing.T) {
|
||||
"Authorization": userToken,
|
||||
},
|
||||
ExpectedStatus: 400,
|
||||
ExpectedContent: []string{"invalid container parameter"},
|
||||
ExpectedContent: []string{"Invalid", "parameter"},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
@@ -368,9 +424,114 @@ func TestApiRoutesAuthentication(t *testing.T) {
|
||||
"Authorization": userToken,
|
||||
},
|
||||
ExpectedStatus: 400,
|
||||
ExpectedContent: []string{"invalid container parameter"},
|
||||
ExpectedContent: []string{"Invalid", "parameter"},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
Name: "GET /containers/logs - good user should pass validation",
|
||||
Method: http.MethodGet,
|
||||
URL: "/api/beszel/containers/logs?system=" + system.Id + "&container=0123456789ab",
|
||||
Headers: map[string]string{
|
||||
"Authorization": userToken,
|
||||
},
|
||||
ExpectedStatus: 500,
|
||||
ExpectedContent: []string{"Something went wrong while processing your request."},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
Name: "GET /containers/info - good user should pass validation",
|
||||
Method: http.MethodGet,
|
||||
URL: "/api/beszel/containers/info?system=" + system.Id + "&container=0123456789ab",
|
||||
Headers: map[string]string{
|
||||
"Authorization": userToken,
|
||||
},
|
||||
ExpectedStatus: 500,
|
||||
ExpectedContent: []string{"Something went wrong while processing your request."},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
// /systemd routes
|
||||
{
|
||||
Name: "GET /systemd/info - no auth should fail",
|
||||
Method: http.MethodGet,
|
||||
URL: fmt.Sprintf("/api/beszel/systemd/info?system=%s&service=nginx.service", system.Id),
|
||||
ExpectedStatus: 401,
|
||||
ExpectedContent: []string{"requires valid"},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
Name: "GET /systemd/info - request for valid non-user system should fail",
|
||||
Method: http.MethodGet,
|
||||
URL: fmt.Sprintf("/api/beszel/systemd/info?system=%s&service=nginx.service", system.Id),
|
||||
ExpectedStatus: 404,
|
||||
ExpectedContent: []string{"The requested resource wasn't found."},
|
||||
TestAppFactory: testAppFactory,
|
||||
Headers: map[string]string{
|
||||
"Authorization": user2Token,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "GET /systemd/info - with auth but missing system param should fail",
|
||||
Method: http.MethodGet,
|
||||
URL: "/api/beszel/systemd/info?service=nginx.service",
|
||||
Headers: map[string]string{
|
||||
"Authorization": userToken,
|
||||
},
|
||||
ExpectedStatus: 400,
|
||||
ExpectedContent: []string{"Invalid", "parameter"},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
Name: "GET /systemd/info - with auth but missing service param should fail",
|
||||
Method: http.MethodGet,
|
||||
URL: fmt.Sprintf("/api/beszel/systemd/info?system=%s", system.Id),
|
||||
Headers: map[string]string{
|
||||
"Authorization": userToken,
|
||||
},
|
||||
ExpectedStatus: 400,
|
||||
ExpectedContent: []string{"Invalid", "parameter"},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
Name: "GET /systemd/info - with auth but invalid system should fail",
|
||||
Method: http.MethodGet,
|
||||
URL: "/api/beszel/systemd/info?system=invalid-system&service=nginx.service",
|
||||
Headers: map[string]string{
|
||||
"Authorization": userToken,
|
||||
},
|
||||
ExpectedStatus: 404,
|
||||
ExpectedContent: []string{"The requested resource wasn't found."},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
Name: "GET /systemd/info - service not in systemd_services collection should fail",
|
||||
Method: http.MethodGet,
|
||||
URL: fmt.Sprintf("/api/beszel/systemd/info?system=%s&service=notregistered.service", system.Id),
|
||||
Headers: map[string]string{
|
||||
"Authorization": userToken,
|
||||
},
|
||||
ExpectedStatus: 404,
|
||||
ExpectedContent: []string{"The requested resource wasn't found."},
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
{
|
||||
Name: "GET /systemd/info - with auth and existing service record should pass validation",
|
||||
Method: http.MethodGet,
|
||||
URL: fmt.Sprintf("/api/beszel/systemd/info?system=%s&service=nginx.service", system.Id),
|
||||
Headers: map[string]string{
|
||||
"Authorization": userToken,
|
||||
},
|
||||
ExpectedStatus: 500,
|
||||
ExpectedContent: []string{"Something went wrong while processing your request."},
|
||||
TestAppFactory: testAppFactory,
|
||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||
beszelTests.CreateRecord(app, "systemd_services", map[string]any{
|
||||
"system": system.Id,
|
||||
"name": "nginx.service",
|
||||
"state": 0,
|
||||
"sub": 1,
|
||||
})
|
||||
},
|
||||
},
|
||||
|
||||
// Auth Optional Routes - Should work without authentication
|
||||
{
|
||||
@@ -461,13 +622,17 @@ func TestApiRoutesAuthentication(t *testing.T) {
|
||||
"systems": []string{system.Id},
|
||||
}),
|
||||
},
|
||||
{
|
||||
Name: "GET /update - shouldn't exist without CHECK_UPDATES env var",
|
||||
Method: http.MethodGet,
|
||||
URL: "/api/beszel/update",
|
||||
ExpectedStatus: 502,
|
||||
TestAppFactory: testAppFactory,
|
||||
},
|
||||
// this works but diff behavior on prod vs dev.
|
||||
// dev returns 502; prod returns 200 with static html page 404
|
||||
// TODO: align dev and prod behavior and re-enable this test
|
||||
// {
|
||||
// Name: "GET /update - shouldn't exist without CHECK_UPDATES env var",
|
||||
// Method: http.MethodGet,
|
||||
// URL: "/api/beszel/update",
|
||||
// NotExpectedContent: []string{"v:", "\"v\":"},
|
||||
// ExpectedStatus: 502,
|
||||
// TestAppFactory: testAppFactory,
|
||||
// },
|
||||
}
|
||||
|
||||
for _, scenario := range scenarios {
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
package hub
|
||||
|
||||
import "github.com/pocketbase/pocketbase/core"
|
||||
import (
|
||||
"github.com/henrygd/beszel/internal/hub/utils"
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
)
|
||||
|
||||
type collectionRules struct {
|
||||
list *string
|
||||
@@ -22,11 +25,11 @@ func setCollectionAuthSettings(app core.App) error {
|
||||
}
|
||||
|
||||
// disable email auth if DISABLE_PASSWORD_AUTH env var is set
|
||||
disablePasswordAuth, _ := GetEnv("DISABLE_PASSWORD_AUTH")
|
||||
disablePasswordAuth, _ := utils.GetEnv("DISABLE_PASSWORD_AUTH")
|
||||
usersCollection.PasswordAuth.Enabled = disablePasswordAuth != "true"
|
||||
usersCollection.PasswordAuth.IdentityFields = []string{"email"}
|
||||
// allow oauth user creation if USER_CREATION is set
|
||||
if userCreation, _ := GetEnv("USER_CREATION"); userCreation == "true" {
|
||||
if userCreation, _ := utils.GetEnv("USER_CREATION"); userCreation == "true" {
|
||||
cr := "@request.context = 'oauth2'"
|
||||
usersCollection.CreateRule = &cr
|
||||
} else {
|
||||
@@ -34,7 +37,7 @@ func setCollectionAuthSettings(app core.App) error {
|
||||
}
|
||||
|
||||
// enable mfaOtp mfa if MFA_OTP env var is set
|
||||
mfaOtp, _ := GetEnv("MFA_OTP")
|
||||
mfaOtp, _ := utils.GetEnv("MFA_OTP")
|
||||
usersCollection.OTP.Length = 6
|
||||
superusersCollection.OTP.Length = 6
|
||||
usersCollection.OTP.Enabled = mfaOtp == "true"
|
||||
@@ -50,7 +53,7 @@ func setCollectionAuthSettings(app core.App) error {
|
||||
|
||||
// When SHARE_ALL_SYSTEMS is enabled, any authenticated user can read
|
||||
// system-scoped data. Write rules continue to block readonly users.
|
||||
shareAllSystems, _ := GetEnv("SHARE_ALL_SYSTEMS")
|
||||
shareAllSystems, _ := utils.GetEnv("SHARE_ALL_SYSTEMS")
|
||||
|
||||
authenticatedRule := "@request.auth.id != \"\""
|
||||
systemsMemberRule := authenticatedRule + " && users.id ?= @request.auth.id"
|
||||
@@ -75,7 +78,7 @@ func setCollectionAuthSettings(app core.App) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := applyCollectionRules(app, []string{"containers", "container_stats", "system_stats", "systemd_services"}, collectionRules{
|
||||
if err := applyCollectionRules(app, []string{"containers", "container_stats", "system_stats", "systemd_services", "network_probe_stats"}, collectionRules{
|
||||
list: &systemScopedReadRule,
|
||||
}); err != nil {
|
||||
return err
|
||||
@@ -89,7 +92,7 @@ func setCollectionAuthSettings(app core.App) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := applyCollectionRules(app, []string{"fingerprints"}, collectionRules{
|
||||
if err := applyCollectionRules(app, []string{"fingerprints", "network_probes"}, collectionRules{
|
||||
list: &systemScopedReadRule,
|
||||
view: &systemScopedReadRule,
|
||||
create: &systemScopedWriteRule,
|
||||
|
||||
@@ -9,13 +9,13 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/henrygd/beszel/internal/alerts"
|
||||
"github.com/henrygd/beszel/internal/hub/config"
|
||||
"github.com/henrygd/beszel/internal/hub/heartbeat"
|
||||
"github.com/henrygd/beszel/internal/hub/systems"
|
||||
"github.com/henrygd/beszel/internal/hub/utils"
|
||||
"github.com/henrygd/beszel/internal/records"
|
||||
"github.com/henrygd/beszel/internal/users"
|
||||
|
||||
@@ -38,8 +38,6 @@ type Hub struct {
|
||||
appURL string
|
||||
}
|
||||
|
||||
var containerIDPattern = regexp.MustCompile(`^[a-fA-F0-9]{12,64}$`)
|
||||
|
||||
// NewHub creates a new Hub instance with default configuration
|
||||
func NewHub(app core.App) *Hub {
|
||||
hub := &Hub{App: app}
|
||||
@@ -47,7 +45,7 @@ func NewHub(app core.App) *Hub {
|
||||
hub.um = users.NewUserManager(hub)
|
||||
hub.rm = records.NewRecordManager(hub)
|
||||
hub.sm = systems.NewSystemManager(hub)
|
||||
hub.hb = heartbeat.New(app, GetEnv)
|
||||
hub.hb = heartbeat.New(app, utils.GetEnv)
|
||||
if hub.hb != nil {
|
||||
hub.hbStop = make(chan struct{})
|
||||
}
|
||||
@@ -55,15 +53,6 @@ func NewHub(app core.App) *Hub {
|
||||
return hub
|
||||
}
|
||||
|
||||
// GetEnv retrieves an environment variable with a "BESZEL_HUB_" prefix, or falls back to the unprefixed key.
|
||||
func GetEnv(key string) (value string, exists bool) {
|
||||
if value, exists = os.LookupEnv("BESZEL_HUB_" + key); exists {
|
||||
return value, exists
|
||||
}
|
||||
// Fallback to the old unprefixed key
|
||||
return os.LookupEnv(key)
|
||||
}
|
||||
|
||||
// onAfterBootstrapAndMigrations ensures the provided function runs after the database is set up and migrations are applied.
|
||||
// This is a workaround for behavior in PocketBase where onBootstrap runs before migrations, forcing use of onServe for this purpose.
|
||||
// However, PB's tests.TestApp is already bootstrapped, generally doesn't serve, but does handle migrations.
|
||||
@@ -92,6 +81,7 @@ func (h *Hub) StartHub() error {
|
||||
}
|
||||
// register middlewares
|
||||
h.registerMiddlewares(e)
|
||||
// bind events that aren't set up in different
|
||||
// register api routes
|
||||
if err := h.registerApiRoutes(e); err != nil {
|
||||
return err
|
||||
@@ -120,6 +110,8 @@ func (h *Hub) StartHub() error {
|
||||
h.App.OnRecordCreate("users").BindFunc(h.um.InitializeUserRole)
|
||||
h.App.OnRecordCreate("user_settings").BindFunc(h.um.InitializeUserSettings)
|
||||
|
||||
bindNetworkProbesEvents(h)
|
||||
|
||||
pb, ok := h.App.(*pocketbase.PocketBase)
|
||||
if !ok {
|
||||
return errors.New("not a pocketbase app")
|
||||
@@ -134,7 +126,7 @@ func (h *Hub) initialize(app core.App) error {
|
||||
// batch requests (for alerts)
|
||||
settings.Batch.Enabled = true
|
||||
// set URL if APP_URL env is set
|
||||
if appURL, isSet := GetEnv("APP_URL"); isSet {
|
||||
if appURL, isSet := utils.GetEnv("APP_URL"); isSet {
|
||||
h.appURL = appURL
|
||||
settings.Meta.AppURL = appURL
|
||||
}
|
||||
|
||||
155
internal/hub/probes.go
Normal file
155
internal/hub/probes.go
Normal file
@@ -0,0 +1,155 @@
|
||||
package hub
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/probe"
|
||||
"github.com/henrygd/beszel/internal/hub/systems"
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
"github.com/pocketbase/pocketbase/tools/types"
|
||||
)
|
||||
|
||||
// generateProbeID creates a stable hash ID for a probe based on its configuration and the system it belongs to.
|
||||
func generateProbeID(systemId string, config probe.Config) string {
|
||||
args := []string{systemId, config.Target, config.Protocol}
|
||||
// only use port for TCP probes, since for other protocols it's not relevant as standalone value
|
||||
if config.Protocol == "tcp" {
|
||||
args = append(args, strconv.FormatUint(uint64(config.Port), 10))
|
||||
}
|
||||
return systems.MakeStableHashId(args...)
|
||||
}
|
||||
|
||||
// bindNetworkProbesEvents keeps probe records and agent probe state in sync.
|
||||
func bindNetworkProbesEvents(hub *Hub) {
|
||||
// on create, make sure the id is set to a stable hash
|
||||
hub.OnRecordCreate("network_probes").BindFunc(func(e *core.RecordEvent) error {
|
||||
systemID := e.Record.GetString("system")
|
||||
config := probeConfigFromRecord(e.Record)
|
||||
id := generateProbeID(systemID, *config)
|
||||
e.Record.Set("id", id)
|
||||
return e.Next()
|
||||
})
|
||||
|
||||
// sync probe to agent on creation and persist the first result immediately when available
|
||||
hub.OnRecordAfterCreateSuccess("network_probes").BindFunc(func(e *core.RecordEvent) error {
|
||||
err := e.Next()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !e.Record.GetBool("enabled") {
|
||||
return nil
|
||||
}
|
||||
// if system connected, run the probe immediately
|
||||
// if not, return and wait for the system to connect and sync probes on reg schedule
|
||||
system, err := hub.sm.GetSystem(e.Record.GetString("system"))
|
||||
if err == nil && system.Status == "up" {
|
||||
go hub.upsertNetworkProbe(e.Record, true)
|
||||
}
|
||||
return err
|
||||
})
|
||||
|
||||
// On API update requests, if the probe config changed in a way that requires a new ID, create a new
|
||||
// record with the new ID and delete the old one. Otherwise, just update the existing probe on the agent.
|
||||
hub.OnRecordUpdateRequest("network_probes").BindFunc(func(e *core.RecordRequestEvent) error {
|
||||
systemID := e.Record.GetString("system")
|
||||
// only tcp uses port - set other protocols port to zero
|
||||
if e.Record.GetString("protocol") != "tcp" {
|
||||
e.Record.Set("port", 0)
|
||||
}
|
||||
ID := generateProbeID(systemID, *probeConfigFromRecord(e.Record))
|
||||
if ID != e.Record.Id {
|
||||
newRecord := copyProbeToNewRecord(e.Record, ID)
|
||||
if err := e.App.Save(newRecord); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := e.App.Delete(e.Record); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
err := e.Next()
|
||||
if e.Record.GetBool("enabled") {
|
||||
// if the probe is enabled, sync the updated config to the agent now
|
||||
runNow := !e.Record.Original().GetBool("enabled")
|
||||
err = hub.upsertNetworkProbe(e.Record, runNow)
|
||||
} else {
|
||||
// if the probe is paused, remove it from the agent
|
||||
err = hub.deleteNetworkProbe(e.Record)
|
||||
}
|
||||
if err != nil {
|
||||
hub.Logger().Warn("failed to sync updated probe", "system", systemID, "probe", e.Record.Id, "err", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
// sync probe to agent on delete
|
||||
hub.OnRecordAfterDeleteSuccess("network_probes").BindFunc(func(e *core.RecordEvent) error {
|
||||
if err := hub.deleteNetworkProbe(e.Record); err != nil {
|
||||
hub.Logger().Warn("failed to delete probe on agent", "system", e.Record.GetString("system"), "probe", e.Record.Id, "err", err)
|
||||
}
|
||||
return e.Next()
|
||||
})
|
||||
}
|
||||
|
||||
// probeConfigFromRecord builds a probe config from a network_probes record.
|
||||
func probeConfigFromRecord(record *core.Record) *probe.Config {
|
||||
return &probe.Config{
|
||||
ID: record.Id,
|
||||
Target: record.GetString("target"),
|
||||
Protocol: record.GetString("protocol"),
|
||||
Port: uint16(record.GetInt("port")),
|
||||
Interval: uint16(record.GetInt("interval")),
|
||||
}
|
||||
}
|
||||
|
||||
// setProbeResultFields stores the latest probe result values on the record.
|
||||
func setProbeResultFields(record *core.Record, result probe.Result) {
|
||||
nowString := time.Now().UTC().Format(types.DefaultDateLayout)
|
||||
record.Set("res", result.AvgResponse)
|
||||
record.Set("resAvg1h", result.AvgResponse1h)
|
||||
record.Set("resMin1h", result.MinResponse1h)
|
||||
record.Set("resMax1h", result.MaxResponse1h)
|
||||
record.Set("loss1h", result.PacketLoss1h)
|
||||
record.Set("updated", nowString)
|
||||
}
|
||||
|
||||
// copyProbeToNewRecord creates a new record with the same field values as the old one.
|
||||
// This is used when the probe config changes in a way that requires a new ID, so we need
|
||||
// to create a new record with the new ID and delete the old one.
|
||||
func copyProbeToNewRecord(oldRecord *core.Record, newID string) *core.Record {
|
||||
collection := oldRecord.Collection()
|
||||
newRecord := core.NewRecord(collection)
|
||||
newRecord.Id = newID
|
||||
fields := []string{"system", "name", "target", "protocol", "port", "interval", "enabled"}
|
||||
for _, field := range fields {
|
||||
newRecord.Set(field, oldRecord.Get(field))
|
||||
}
|
||||
return newRecord
|
||||
}
|
||||
|
||||
// upsertNetworkProbe creates or updates the record's probe on the target system. If runNow
|
||||
// is true, it will also trigger an immediate probe run and update the record with the result.
|
||||
func (h *Hub) upsertNetworkProbe(record *core.Record, runNow bool) error {
|
||||
systemID := record.GetString("system")
|
||||
system, err := h.sm.GetSystem(systemID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
result, err := system.UpsertNetworkProbe(*probeConfigFromRecord(record), runNow)
|
||||
if err != nil || result == nil {
|
||||
return err
|
||||
}
|
||||
setProbeResultFields(record, *result)
|
||||
return h.App.SaveNoValidate(record)
|
||||
}
|
||||
|
||||
// deleteNetworkProbe removes the record's probe from the target system.
|
||||
func (h *Hub) deleteNetworkProbe(record *core.Record) error {
|
||||
systemID := record.GetString("system")
|
||||
system, err := h.sm.GetSystem(systemID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return system.DeleteNetworkProbe(record.Id)
|
||||
}
|
||||
155
internal/hub/probes_test.go
Normal file
155
internal/hub/probes_test.go
Normal file
@@ -0,0 +1,155 @@
|
||||
package hub
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/probe"
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGenerateProbeID(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
systemID string
|
||||
config probe.Config
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "HTTP probe on example.com",
|
||||
systemID: "sys123",
|
||||
config: probe.Config{
|
||||
Protocol: "http",
|
||||
Target: "example.com",
|
||||
Port: 0,
|
||||
Interval: 60,
|
||||
},
|
||||
expected: "a20a5827",
|
||||
},
|
||||
{
|
||||
name: "HTTP probe on example.com with different port",
|
||||
systemID: "sys123",
|
||||
config: probe.Config{
|
||||
Protocol: "http",
|
||||
Target: "example.com",
|
||||
Port: 8080,
|
||||
Interval: 60,
|
||||
},
|
||||
expected: "a20a5827",
|
||||
},
|
||||
{
|
||||
name: "HTTP probe on example.com with different system ID",
|
||||
systemID: "sys1234",
|
||||
config: probe.Config{
|
||||
Protocol: "http",
|
||||
Target: "example.com",
|
||||
Port: 80,
|
||||
Interval: 60,
|
||||
},
|
||||
expected: "ab602ae7",
|
||||
},
|
||||
{
|
||||
name: "Same probe, different interval",
|
||||
systemID: "sys1234",
|
||||
config: probe.Config{
|
||||
Protocol: "http",
|
||||
Target: "example.com",
|
||||
Port: 80,
|
||||
Interval: 120,
|
||||
},
|
||||
expected: "ab602ae7",
|
||||
},
|
||||
{
|
||||
name: "ICMP probe on 1.1.1.1",
|
||||
systemID: "sys456",
|
||||
config: probe.Config{
|
||||
Protocol: "icmp",
|
||||
Target: "1.1.1.1",
|
||||
Port: 0,
|
||||
Interval: 10,
|
||||
},
|
||||
expected: "6d13a4a4",
|
||||
}, {
|
||||
name: "ICMP probe on 1.1.1.1 with different system ID",
|
||||
systemID: "sys4567",
|
||||
config: probe.Config{
|
||||
Protocol: "icmp",
|
||||
Target: "1.1.1.1",
|
||||
Port: 0,
|
||||
Interval: 10,
|
||||
},
|
||||
expected: "ddd6c81",
|
||||
},
|
||||
{
|
||||
name: "TCP probe on example.com with port 443",
|
||||
systemID: "sys789",
|
||||
config: probe.Config{
|
||||
Protocol: "tcp",
|
||||
Target: "example.com",
|
||||
Port: 443,
|
||||
Interval: 30,
|
||||
},
|
||||
expected: "677b991",
|
||||
},
|
||||
{
|
||||
name: "TCP probe on example.com with port 8443",
|
||||
systemID: "sys789",
|
||||
config: probe.Config{
|
||||
Protocol: "tcp",
|
||||
Target: "example.com",
|
||||
Port: 8443,
|
||||
Interval: 30,
|
||||
},
|
||||
expected: "84167969",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := generateProbeID(tt.systemID, tt.config)
|
||||
assert.Equal(t, tt.expected, got, "generateProbeID() = %v, want %v", got, tt.expected)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCopyProbeToNewRecordDropsResultFields(t *testing.T) {
|
||||
hub, testApp, err := createTestHub(t)
|
||||
require.NoError(t, err)
|
||||
defer cleanupTestHub(hub, testApp)
|
||||
|
||||
collection, err := hub.FindCachedCollectionByNameOrId("network_probes")
|
||||
require.NoError(t, err)
|
||||
|
||||
oldRecord := core.NewRecord(collection)
|
||||
oldRecord.Load(map[string]any{
|
||||
"system": "sys123",
|
||||
"name": "Example",
|
||||
"target": "https://example.com",
|
||||
"protocol": "http",
|
||||
"port": 443,
|
||||
"interval": 60,
|
||||
"enabled": true,
|
||||
"res": 1200,
|
||||
"resAvg1h": 1300,
|
||||
"resMin1h": 900,
|
||||
"resMax1h": 1600,
|
||||
"loss1h": 5,
|
||||
"updated": "2026-04-29 12:00:00.000Z",
|
||||
})
|
||||
|
||||
newRecord := copyProbeToNewRecord(oldRecord, "next12345")
|
||||
|
||||
assert.Equal(t, "next12345", newRecord.Id)
|
||||
assert.Equal(t, "Example", newRecord.GetString("name"))
|
||||
assert.Equal(t, "https://example.com", newRecord.GetString("target"))
|
||||
assert.Equal(t, "http", newRecord.GetString("protocol"))
|
||||
assert.Equal(t, 443, newRecord.GetInt("port"))
|
||||
assert.True(t, newRecord.GetBool("enabled"))
|
||||
assert.Zero(t, newRecord.GetFloat("res"))
|
||||
assert.Zero(t, newRecord.GetFloat("resAvg1h"))
|
||||
assert.Zero(t, newRecord.GetFloat("resMin1h"))
|
||||
assert.Zero(t, newRecord.GetFloat("resMax1h"))
|
||||
assert.Zero(t, newRecord.GetFloat("loss1h"))
|
||||
assert.Equal(t, "", newRecord.GetString("updated"))
|
||||
}
|
||||
42
internal/hub/server.go
Normal file
42
internal/hub/server.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package hub
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/henrygd/beszel"
|
||||
"github.com/henrygd/beszel/internal/hub/utils"
|
||||
)
|
||||
|
||||
// PublicAppInfo defines the structure of the public app information that will be injected into the HTML
|
||||
type PublicAppInfo struct {
|
||||
BASE_PATH string
|
||||
HUB_VERSION string
|
||||
HUB_URL string
|
||||
OAUTH_DISABLE_POPUP bool `json:"OAUTH_DISABLE_POPUP,omitempty"`
|
||||
}
|
||||
|
||||
// modifyIndexHTML injects the public app information into the index.html content
|
||||
func modifyIndexHTML(hub *Hub, html []byte) string {
|
||||
info := getPublicAppInfo(hub)
|
||||
content, err := json.Marshal(info)
|
||||
if err != nil {
|
||||
return string(html)
|
||||
}
|
||||
htmlContent := strings.ReplaceAll(string(html), "./", info.BASE_PATH)
|
||||
return strings.Replace(htmlContent, "\"{info}\"", string(content), 1)
|
||||
}
|
||||
|
||||
func getPublicAppInfo(hub *Hub) PublicAppInfo {
|
||||
parsedURL, _ := url.Parse(hub.appURL)
|
||||
info := PublicAppInfo{
|
||||
BASE_PATH: strings.TrimSuffix(parsedURL.Path, "/") + "/",
|
||||
HUB_VERSION: beszel.Version,
|
||||
HUB_URL: hub.appURL,
|
||||
}
|
||||
if val, _ := utils.GetEnv("OAUTH_DISABLE_POPUP"); val == "true" {
|
||||
info.OAUTH_DISABLE_POPUP = true
|
||||
}
|
||||
return info
|
||||
}
|
||||
@@ -5,14 +5,11 @@ package hub
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/henrygd/beszel"
|
||||
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
"github.com/pocketbase/pocketbase/tools/osutils"
|
||||
)
|
||||
@@ -39,7 +36,7 @@ func (rm *responseModifier) RoundTrip(req *http.Request) (*http.Response, error)
|
||||
}
|
||||
resp.Body.Close()
|
||||
// Create a new response with the modified body
|
||||
modifiedBody := rm.modifyHTML(string(body))
|
||||
modifiedBody := modifyIndexHTML(rm.hub, body)
|
||||
resp.Body = io.NopCloser(strings.NewReader(modifiedBody))
|
||||
resp.ContentLength = int64(len(modifiedBody))
|
||||
resp.Header.Set("Content-Length", fmt.Sprintf("%d", len(modifiedBody)))
|
||||
@@ -47,22 +44,8 @@ func (rm *responseModifier) RoundTrip(req *http.Request) (*http.Response, error)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (rm *responseModifier) modifyHTML(html string) string {
|
||||
parsedURL, err := url.Parse(rm.hub.appURL)
|
||||
if err != nil {
|
||||
return html
|
||||
}
|
||||
// fix base paths in html if using subpath
|
||||
basePath := strings.TrimSuffix(parsedURL.Path, "/") + "/"
|
||||
html = strings.ReplaceAll(html, "./", basePath)
|
||||
html = strings.Replace(html, "{{V}}", beszel.Version, 1)
|
||||
html = strings.Replace(html, "{{HUB_URL}}", rm.hub.appURL, 1)
|
||||
return html
|
||||
}
|
||||
|
||||
// startServer sets up the development server for Beszel
|
||||
func (h *Hub) startServer(se *core.ServeEvent) error {
|
||||
slog.Info("starting server", "appURL", h.appURL)
|
||||
proxy := httputil.NewSingleHostReverseProxy(&url.URL{
|
||||
Scheme: "http",
|
||||
Host: "localhost:5173",
|
||||
|
||||
@@ -5,10 +5,9 @@ package hub
|
||||
import (
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/henrygd/beszel"
|
||||
"github.com/henrygd/beszel/internal/hub/utils"
|
||||
"github.com/henrygd/beszel/internal/site"
|
||||
|
||||
"github.com/pocketbase/pocketbase/apis"
|
||||
@@ -17,22 +16,13 @@ import (
|
||||
|
||||
// startServer sets up the production server for Beszel
|
||||
func (h *Hub) startServer(se *core.ServeEvent) error {
|
||||
// parse app url
|
||||
parsedURL, err := url.Parse(h.appURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// fix base paths in html if using subpath
|
||||
basePath := strings.TrimSuffix(parsedURL.Path, "/") + "/"
|
||||
indexFile, _ := fs.ReadFile(site.DistDirFS, "index.html")
|
||||
html := strings.ReplaceAll(string(indexFile), "./", basePath)
|
||||
html = strings.Replace(html, "{{V}}", beszel.Version, 1)
|
||||
html = strings.Replace(html, "{{HUB_URL}}", h.appURL, 1)
|
||||
html := modifyIndexHTML(h, indexFile)
|
||||
// set up static asset serving
|
||||
staticPaths := [2]string{"/static/", "/assets/"}
|
||||
serveStatic := apis.Static(site.DistDirFS, false)
|
||||
// get CSP configuration
|
||||
csp, cspExists := GetEnv("CSP")
|
||||
csp, cspExists := utils.GetEnv("CSP")
|
||||
// add route
|
||||
se.Router.GET("/{path...}", func(e *core.RequestEvent) error {
|
||||
// serve static assets if path is in staticPaths
|
||||
|
||||
@@ -14,9 +14,11 @@ import (
|
||||
|
||||
"github.com/henrygd/beszel/internal/common"
|
||||
"github.com/henrygd/beszel/internal/hub/transport"
|
||||
"github.com/henrygd/beszel/internal/hub/utils"
|
||||
"github.com/henrygd/beszel/internal/hub/ws"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/container"
|
||||
"github.com/henrygd/beszel/internal/entities/probe"
|
||||
"github.com/henrygd/beszel/internal/entities/smart"
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
"github.com/henrygd/beszel/internal/entities/systemd"
|
||||
@@ -28,6 +30,8 @@ import (
|
||||
"github.com/lxzan/gws"
|
||||
"github.com/pocketbase/dbx"
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
"github.com/pocketbase/pocketbase/tools/security"
|
||||
"github.com/pocketbase/pocketbase/tools/types"
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
@@ -184,7 +188,7 @@ func (sys *System) handlePaused() {
|
||||
|
||||
// createRecords updates the system record and adds system_stats and container_stats records
|
||||
func (sys *System) createRecords(data *system.CombinedData) (*core.Record, error) {
|
||||
systemRecord, err := sys.getRecord()
|
||||
systemRecord, err := sys.getRecord(sys.manager.hub)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -237,6 +241,12 @@ func (sys *System) createRecords(data *system.CombinedData) (*core.Record, error
|
||||
}
|
||||
}
|
||||
|
||||
if data.Probes != nil {
|
||||
if err := updateNetworkProbesRecords(txApp, data.Probes, sys.Id); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// update system record (do this last because it triggers alerts and we need above records to be inserted first)
|
||||
systemRecord.Set("status", up)
|
||||
systemRecord.Set("info", data.Info)
|
||||
@@ -288,7 +298,7 @@ func createSystemdStatsRecords(app core.App, data []*systemd.Service, systemId s
|
||||
for i, service := range data {
|
||||
suffix := fmt.Sprintf("%d", i)
|
||||
valueStrings = append(valueStrings, fmt.Sprintf("({:id%[1]s}, {:system}, {:name%[1]s}, {:state%[1]s}, {:sub%[1]s}, {:cpu%[1]s}, {:cpuPeak%[1]s}, {:memory%[1]s}, {:memPeak%[1]s}, {:updated})", suffix))
|
||||
params["id"+suffix] = makeStableHashId(systemId, service.Name)
|
||||
params["id"+suffix] = MakeStableHashId(systemId, service.Name)
|
||||
params["name"+suffix] = service.Name
|
||||
params["state"+suffix] = service.State
|
||||
params["sub"+suffix] = service.Sub
|
||||
@@ -305,6 +315,97 @@ func createSystemdStatsRecords(app core.App, data []*systemd.Service, systemId s
|
||||
return err
|
||||
}
|
||||
|
||||
func updateNetworkProbesRecords(app core.App, probeResults map[string]probe.Result, systemId string) error {
|
||||
if len(probeResults) == 0 {
|
||||
return nil
|
||||
}
|
||||
var err error
|
||||
const probeCollectionName = "network_probes"
|
||||
|
||||
// If realtime updates are active, we save via PocketBase records to trigger realtime events.
|
||||
// Otherwise we can do a more efficient direct update via SQL
|
||||
realtimeActive := utils.RealtimeActiveForCollection(app, probeCollectionName, func(filterQuery string) bool {
|
||||
return !strings.Contains(filterQuery, "system") || strings.Contains(filterQuery, systemId)
|
||||
})
|
||||
|
||||
now := time.Now().UTC()
|
||||
nowMilli := now.UnixMilli()
|
||||
nowString := now.Format(types.DefaultDateLayout)
|
||||
var db dbx.Builder
|
||||
var updateQuery *dbx.Query
|
||||
if !realtimeActive {
|
||||
db = app.DB()
|
||||
probeFields := []string{"res", "resMin1h", "resMax1h", "resAvg1h", "loss1h", "updated"}
|
||||
setClauses := make([]string, len(probeFields))
|
||||
for i, f := range probeFields {
|
||||
setClauses[i] = fmt.Sprintf("%s={:%s}", f, f)
|
||||
}
|
||||
queryString := fmt.Sprintf("UPDATE %s SET %s WHERE id={:id}", probeCollectionName, strings.Join(setClauses, ", "))
|
||||
updateQuery = db.NewQuery(queryString)
|
||||
}
|
||||
|
||||
// update network_probes records
|
||||
for id, result := range probeResults {
|
||||
probeData := map[string]any{
|
||||
"id": id,
|
||||
"res": result.AvgResponse,
|
||||
"resAvg1h": result.AvgResponse1h,
|
||||
"resMin1h": result.MinResponse1h,
|
||||
"resMax1h": result.MaxResponse1h,
|
||||
"loss1h": result.PacketLoss1h,
|
||||
"updated": nowString,
|
||||
}
|
||||
switch realtimeActive {
|
||||
case true:
|
||||
var record *core.Record
|
||||
record, err = app.FindRecordById(probeCollectionName, id)
|
||||
if err == nil {
|
||||
record.Load(probeData)
|
||||
err = app.SaveNoValidate(record)
|
||||
}
|
||||
default:
|
||||
_, err = updateQuery.Bind(dbx.Params(probeData)).Execute()
|
||||
}
|
||||
if err != nil {
|
||||
app.Logger().Warn("Failed to update probe", "system", systemId, "probe", id, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// handle stats collection as well
|
||||
const statsCollectionName = "network_probe_stats"
|
||||
|
||||
// we don't need the hour values for the stats collection
|
||||
stats := make(map[string]probe.Stats, len(probeResults))
|
||||
for key, result := range probeResults {
|
||||
stats[key] = probe.Stats{}.FromResult(result)
|
||||
}
|
||||
|
||||
statsRecordData := map[string]any{
|
||||
"system": systemId,
|
||||
"type": "1m",
|
||||
"created": nowMilli,
|
||||
}
|
||||
var statsJson types.JSONRaw
|
||||
if err = statsJson.Scan(stats); err == nil {
|
||||
statsRecordData["stats"] = statsJson
|
||||
switch realtimeActive {
|
||||
case true:
|
||||
collection, _ := app.FindCachedCollectionByNameOrId(statsCollectionName)
|
||||
record := core.NewRecord(collection)
|
||||
record.Load(statsRecordData)
|
||||
err = app.SaveNoValidate(record)
|
||||
default:
|
||||
statsRecordData["id"] = security.PseudorandomStringWithAlphabet(10, core.DefaultIdAlphabet)
|
||||
_, err = db.Insert(statsCollectionName, dbx.Params(statsRecordData)).Execute()
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
app.Logger().Error("Failed to update probe stats", "system", systemId, "err", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// createContainerRecords creates container records
|
||||
func createContainerRecords(app core.App, data []*container.Stats, systemId string) error {
|
||||
if len(data) == 0 {
|
||||
@@ -343,8 +444,8 @@ func createContainerRecords(app core.App, data []*container.Stats, systemId stri
|
||||
|
||||
// getRecord retrieves the system record from the database.
|
||||
// If the record is not found, it removes the system from the manager.
|
||||
func (sys *System) getRecord() (*core.Record, error) {
|
||||
record, err := sys.manager.hub.FindRecordById("systems", sys.Id)
|
||||
func (sys *System) getRecord(app core.App) (*core.Record, error) {
|
||||
record, err := app.FindRecordById("systems", sys.Id)
|
||||
if err != nil || record == nil {
|
||||
_ = sys.manager.RemoveSystem(sys.Id)
|
||||
return nil, err
|
||||
@@ -352,6 +453,27 @@ func (sys *System) getRecord() (*core.Record, error) {
|
||||
return record, nil
|
||||
}
|
||||
|
||||
// HasUser checks if the given user is in the system's users list.
|
||||
// Returns true if SHARE_ALL_SYSTEMS is enabled (any authenticated user can access any system).
|
||||
func (sys *System) HasUser(app core.App, user *core.Record) bool {
|
||||
if user == nil {
|
||||
return false
|
||||
}
|
||||
if v, _ := utils.GetEnv("SHARE_ALL_SYSTEMS"); v == "true" {
|
||||
return true
|
||||
}
|
||||
var recordData = struct {
|
||||
Users string
|
||||
}{}
|
||||
err := app.DB().NewQuery("SELECT users FROM systems WHERE id={:id}").
|
||||
Bind(dbx.Params{"id": sys.Id}).
|
||||
One(&recordData)
|
||||
if err != nil || recordData.Users == "" {
|
||||
return false
|
||||
}
|
||||
return strings.Contains(recordData.Users, user.Id)
|
||||
}
|
||||
|
||||
// setDown marks a system as down in the database.
|
||||
// It takes the original error that caused the system to go down and returns any error
|
||||
// encountered during the process of updating the system status.
|
||||
@@ -359,7 +481,7 @@ func (sys *System) setDown(originalError error) error {
|
||||
if sys.Status == down || sys.Status == paused {
|
||||
return nil
|
||||
}
|
||||
record, err := sys.getRecord()
|
||||
record, err := sys.getRecord(sys.manager.hub)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -518,7 +640,7 @@ func (sys *System) FetchSmartDataFromAgent() (map[string]smart.SmartData, error)
|
||||
return result, err
|
||||
}
|
||||
|
||||
func makeStableHashId(strings ...string) string {
|
||||
func MakeStableHashId(strings ...string) string {
|
||||
hash := fnv.New32a()
|
||||
for _, str := range strings {
|
||||
hash.Write([]byte(str))
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/henrygd/beszel/internal/hub/ws"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/probe"
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
"github.com/henrygd/beszel/internal/hub/expirymap"
|
||||
|
||||
@@ -15,6 +16,7 @@ import (
|
||||
"github.com/henrygd/beszel"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/pocketbase/dbx"
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
"github.com/pocketbase/pocketbase/tools/store"
|
||||
"golang.org/x/crypto/ssh"
|
||||
@@ -317,6 +319,17 @@ func (sm *SystemManager) AddWebSocketSystem(systemId string, agentVersion semver
|
||||
if err := sm.AddRecord(systemRecord, system); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Sync network probes to the newly connected agent
|
||||
go func() {
|
||||
configs := sm.GetProbeConfigsForSystem(systemId)
|
||||
if len(configs) > 0 {
|
||||
if err := system.SyncNetworkProbes(configs); err != nil {
|
||||
sm.hub.Logger().Warn("failed to sync probes to agent", "system", systemId, "err", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -329,6 +342,16 @@ func (sm *SystemManager) resetFailedSmartFetchState(systemID string) {
|
||||
}
|
||||
}
|
||||
|
||||
// GetProbeConfigsForSystem returns all enabled probe configs for a system.
|
||||
func (sm *SystemManager) GetProbeConfigsForSystem(systemID string) []probe.Config {
|
||||
var configs []probe.Config
|
||||
_ = sm.hub.DB().
|
||||
NewQuery("SELECT id, target, protocol, port, interval FROM network_probes WHERE system = {:system} AND enabled = true").
|
||||
Bind(dbx.Params{"system": systemID}).
|
||||
All(&configs)
|
||||
return configs
|
||||
}
|
||||
|
||||
// createSSHClientConfig initializes the SSH client configuration for connecting to an agent's server
|
||||
func (sm *SystemManager) createSSHClientConfig() error {
|
||||
privateKey, err := sm.hub.GetSSHKey("")
|
||||
|
||||
48
internal/hub/systems/system_probes.go
Normal file
48
internal/hub/systems/system_probes.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package systems
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/internal/common"
|
||||
"github.com/henrygd/beszel/internal/entities/probe"
|
||||
)
|
||||
|
||||
// SyncNetworkProbes sends probe configurations to the agent.
|
||||
func (sys *System) SyncNetworkProbes(configs []probe.Config) error {
|
||||
_, err := sys.syncNetworkProbes(probe.SyncRequest{Action: probe.SyncActionReplace, Configs: configs})
|
||||
return err
|
||||
}
|
||||
|
||||
// UpsertNetworkProbe sends a single probe configuration change to the agent.
|
||||
func (sys *System) UpsertNetworkProbe(config probe.Config, runNow bool) (*probe.Result, error) {
|
||||
resp, err := sys.syncNetworkProbes(probe.SyncRequest{
|
||||
Action: probe.SyncActionUpsert,
|
||||
Config: config,
|
||||
RunNow: runNow,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.Result == (probe.Result{}) {
|
||||
return nil, nil
|
||||
}
|
||||
result := resp.Result
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// DeleteNetworkProbe removes a single probe task from the agent.
|
||||
func (sys *System) DeleteNetworkProbe(id string) error {
|
||||
_, err := sys.syncNetworkProbes(probe.SyncRequest{
|
||||
Action: probe.SyncActionDelete,
|
||||
Config: probe.Config{ID: id},
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (sys *System) syncNetworkProbes(req probe.SyncRequest) (probe.SyncResponse, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
var result probe.SyncResponse
|
||||
return result, sys.request(ctx, common.SyncNetworkProbes, req, &result)
|
||||
}
|
||||
@@ -84,7 +84,7 @@ func (sys *System) saveSmartDevices(smartData map[string]smart.SmartData) error
|
||||
|
||||
func (sys *System) upsertSmartDeviceRecord(collection *core.Collection, deviceKey string, device smart.SmartData) error {
|
||||
hub := sys.manager.hub
|
||||
recordID := makeStableHashId(sys.Id, deviceKey)
|
||||
recordID := MakeStableHashId(sys.Id, deviceKey)
|
||||
|
||||
record, err := hub.FindRecordById(collection, recordID)
|
||||
if err != nil {
|
||||
|
||||
@@ -14,9 +14,9 @@ func TestGetSystemdServiceId(t *testing.T) {
|
||||
serviceName := "nginx.service"
|
||||
|
||||
// Call multiple times and ensure same result
|
||||
id1 := makeStableHashId(systemId, serviceName)
|
||||
id2 := makeStableHashId(systemId, serviceName)
|
||||
id3 := makeStableHashId(systemId, serviceName)
|
||||
id1 := MakeStableHashId(systemId, serviceName)
|
||||
id2 := MakeStableHashId(systemId, serviceName)
|
||||
id3 := MakeStableHashId(systemId, serviceName)
|
||||
|
||||
assert.Equal(t, id1, id2)
|
||||
assert.Equal(t, id2, id3)
|
||||
@@ -29,10 +29,10 @@ func TestGetSystemdServiceId(t *testing.T) {
|
||||
serviceName1 := "nginx.service"
|
||||
serviceName2 := "apache.service"
|
||||
|
||||
id1 := makeStableHashId(systemId1, serviceName1)
|
||||
id2 := makeStableHashId(systemId2, serviceName1)
|
||||
id3 := makeStableHashId(systemId1, serviceName2)
|
||||
id4 := makeStableHashId(systemId2, serviceName2)
|
||||
id1 := MakeStableHashId(systemId1, serviceName1)
|
||||
id2 := MakeStableHashId(systemId2, serviceName1)
|
||||
id3 := MakeStableHashId(systemId1, serviceName2)
|
||||
id4 := MakeStableHashId(systemId2, serviceName2)
|
||||
|
||||
// All IDs should be different
|
||||
assert.NotEqual(t, id1, id2)
|
||||
@@ -56,14 +56,14 @@ func TestGetSystemdServiceId(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
id := makeStableHashId(tc.systemId, tc.serviceName)
|
||||
id := MakeStableHashId(tc.systemId, tc.serviceName)
|
||||
// FNV-32 produces 8 hex characters
|
||||
assert.Len(t, id, 8, "ID should be 8 characters for systemId='%s', serviceName='%s'", tc.systemId, tc.serviceName)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("hexadecimal output", func(t *testing.T) {
|
||||
id := makeStableHashId("test-system", "test-service")
|
||||
id := MakeStableHashId("test-system", "test-service")
|
||||
assert.NotEmpty(t, id)
|
||||
|
||||
// Should only contain hexadecimal characters
|
||||
|
||||
@@ -421,3 +421,60 @@ func testOld(t *testing.T, hub *tests.TestHub) {
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestHasUser(t *testing.T) {
|
||||
hub, err := tests.NewTestHub(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
defer hub.Cleanup()
|
||||
|
||||
sm := hub.GetSystemManager()
|
||||
err = sm.Initialize()
|
||||
require.NoError(t, err)
|
||||
|
||||
user1, err := tests.CreateUser(hub, "user1@test.com", "password123")
|
||||
require.NoError(t, err)
|
||||
user2, err := tests.CreateUser(hub, "user2@test.com", "password123")
|
||||
require.NoError(t, err)
|
||||
|
||||
systemRecord, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "has-user-test",
|
||||
"host": "127.0.0.1",
|
||||
"port": "33914",
|
||||
"users": []string{user1.Id},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
sys, err := sm.GetSystemFromStore(systemRecord.Id)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("user in list returns true", func(t *testing.T) {
|
||||
assert.True(t, sys.HasUser(hub, user1))
|
||||
})
|
||||
|
||||
t.Run("user not in list returns false", func(t *testing.T) {
|
||||
assert.False(t, sys.HasUser(hub, user2))
|
||||
})
|
||||
|
||||
t.Run("unknown user ID returns false", func(t *testing.T) {
|
||||
assert.False(t, sys.HasUser(hub, nil))
|
||||
})
|
||||
|
||||
t.Run("SHARE_ALL_SYSTEMS=true grants access to non-member", func(t *testing.T) {
|
||||
t.Setenv("SHARE_ALL_SYSTEMS", "true")
|
||||
assert.True(t, sys.HasUser(hub, user2))
|
||||
})
|
||||
|
||||
t.Run("BESZEL_HUB_SHARE_ALL_SYSTEMS=true grants access to non-member", func(t *testing.T) {
|
||||
t.Setenv("BESZEL_HUB_SHARE_ALL_SYSTEMS", "true")
|
||||
assert.True(t, sys.HasUser(hub, user2))
|
||||
})
|
||||
|
||||
t.Run("additional user works", func(t *testing.T) {
|
||||
assert.False(t, sys.HasUser(hub, user2))
|
||||
systemRecord.Set("users", []string{user1.Id, user2.Id})
|
||||
err = hub.Save(systemRecord)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, sys.HasUser(hub, user1))
|
||||
assert.True(t, sys.HasUser(hub, user2))
|
||||
})
|
||||
}
|
||||
|
||||
39
internal/hub/utils/utils.go
Normal file
39
internal/hub/utils/utils.go
Normal file
@@ -0,0 +1,39 @@
|
||||
// Package utils provides utility functions for the hub.
|
||||
package utils
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
)
|
||||
|
||||
// GetEnv retrieves an environment variable with a "BESZEL_HUB_" prefix, or falls back to the unprefixed key.
|
||||
func GetEnv(key string) (value string, exists bool) {
|
||||
if value, exists = os.LookupEnv("BESZEL_HUB_" + key); exists {
|
||||
return value, exists
|
||||
}
|
||||
return os.LookupEnv(key)
|
||||
}
|
||||
|
||||
// realtimeActiveForCollection checks if there are active WebSocket subscriptions for the given collection.
|
||||
func RealtimeActiveForCollection(app core.App, collectionName string, validateFn func(filterQuery string) bool) bool {
|
||||
broker := app.SubscriptionsBroker()
|
||||
if broker.TotalClients() == 0 {
|
||||
return false
|
||||
}
|
||||
for _, client := range broker.Clients() {
|
||||
subs := client.Subscriptions(collectionName)
|
||||
if len(subs) > 0 {
|
||||
if validateFn == nil {
|
||||
return true
|
||||
}
|
||||
for k := range subs {
|
||||
filter := subs[k].Query["filter"]
|
||||
if validateFn(filter) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -1699,6 +1699,288 @@ func init() {
|
||||
"type": "base",
|
||||
"updateRule": null,
|
||||
"viewRule": null
|
||||
},
|
||||
{
|
||||
"createRule": null,
|
||||
"deleteRule": null,
|
||||
"fields": [
|
||||
{
|
||||
"autogeneratePattern": "[a-z0-9]{10}",
|
||||
"hidden": false,
|
||||
"id": "text3208210256",
|
||||
"max": 10,
|
||||
"min": 6,
|
||||
"name": "id",
|
||||
"pattern": "^[a-z0-9]+$",
|
||||
"presentable": false,
|
||||
"primaryKey": true,
|
||||
"required": true,
|
||||
"system": true,
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"cascadeDelete": true,
|
||||
"collectionId": "2hz5ncl8tizk5nx",
|
||||
"hidden": false,
|
||||
"id": "np_system",
|
||||
"maxSelect": 1,
|
||||
"minSelect": 0,
|
||||
"name": "system",
|
||||
"presentable": false,
|
||||
"required": true,
|
||||
"system": false,
|
||||
"type": "relation"
|
||||
},
|
||||
{
|
||||
"autogeneratePattern": "",
|
||||
"hidden": false,
|
||||
"id": "np_name",
|
||||
"max": 200,
|
||||
"min": 0,
|
||||
"name": "name",
|
||||
"pattern": "",
|
||||
"presentable": false,
|
||||
"primaryKey": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"autogeneratePattern": "",
|
||||
"hidden": false,
|
||||
"id": "np_target",
|
||||
"max": 500,
|
||||
"min": 1,
|
||||
"name": "target",
|
||||
"pattern": "",
|
||||
"presentable": false,
|
||||
"primaryKey": false,
|
||||
"required": true,
|
||||
"system": false,
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "np_protocol",
|
||||
"maxSelect": 1,
|
||||
"name": "protocol",
|
||||
"presentable": false,
|
||||
"required": true,
|
||||
"system": false,
|
||||
"type": "select",
|
||||
"values": [
|
||||
"icmp",
|
||||
"tcp",
|
||||
"http"
|
||||
]
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "np_port",
|
||||
"max": 65535,
|
||||
"min": 0,
|
||||
"name": "port",
|
||||
"onlyInt": true,
|
||||
"presentable": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "number"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "np_interval",
|
||||
"max": 3600,
|
||||
"min": 1,
|
||||
"name": "interval",
|
||||
"onlyInt": true,
|
||||
"presentable": false,
|
||||
"required": true,
|
||||
"system": false,
|
||||
"type": "number"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "number926446584",
|
||||
"max": null,
|
||||
"min": null,
|
||||
"name": "res",
|
||||
"onlyInt": false,
|
||||
"presentable": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "number"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "number1006954605",
|
||||
"max": null,
|
||||
"min": null,
|
||||
"name": "resAvg1h",
|
||||
"onlyInt": false,
|
||||
"presentable": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "number"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "number4267669802",
|
||||
"max": null,
|
||||
"min": null,
|
||||
"name": "resMin1h",
|
||||
"onlyInt": false,
|
||||
"presentable": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "number"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "number591433223",
|
||||
"max": null,
|
||||
"min": null,
|
||||
"name": "resMax1h",
|
||||
"onlyInt": false,
|
||||
"presentable": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "number"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "number3726709001",
|
||||
"max": null,
|
||||
"min": null,
|
||||
"name": "loss1h",
|
||||
"onlyInt": false,
|
||||
"presentable": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "number"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "np_enabled",
|
||||
"name": "enabled",
|
||||
"presentable": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "bool"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "autodate2990389176",
|
||||
"name": "created",
|
||||
"onCreate": true,
|
||||
"onUpdate": false,
|
||||
"presentable": false,
|
||||
"system": false,
|
||||
"type": "autodate"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "date3332085495",
|
||||
"max": "",
|
||||
"min": "",
|
||||
"name": "updated",
|
||||
"presentable": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "date"
|
||||
}
|
||||
],
|
||||
"id": "np_probes_001",
|
||||
"indexes": [
|
||||
"CREATE INDEX ` + "`" + `idx_np_system_enabled` + "`" + ` ON ` + "`" + `network_probes` + "`" + ` (` + "`" + `system` + "`" + `)"
|
||||
],
|
||||
"listRule": null,
|
||||
"name": "network_probes",
|
||||
"system": false,
|
||||
"type": "base",
|
||||
"updateRule": null,
|
||||
"viewRule": null
|
||||
},
|
||||
{
|
||||
"createRule": null,
|
||||
"deleteRule": null,
|
||||
"fields": [
|
||||
{
|
||||
"autogeneratePattern": "[a-z0-9]{10}",
|
||||
"hidden": false,
|
||||
"id": "text3208210256",
|
||||
"max": 10,
|
||||
"min": 10,
|
||||
"name": "id",
|
||||
"pattern": "^[a-z0-9]+$",
|
||||
"presentable": false,
|
||||
"primaryKey": true,
|
||||
"required": true,
|
||||
"system": true,
|
||||
"type": "text"
|
||||
},
|
||||
{
|
||||
"cascadeDelete": true,
|
||||
"collectionId": "2hz5ncl8tizk5nx",
|
||||
"hidden": false,
|
||||
"id": "nps_system",
|
||||
"maxSelect": 1,
|
||||
"minSelect": 0,
|
||||
"name": "system",
|
||||
"presentable": false,
|
||||
"required": true,
|
||||
"system": false,
|
||||
"type": "relation"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "nps_stats",
|
||||
"maxSize": 2000000,
|
||||
"name": "stats",
|
||||
"presentable": false,
|
||||
"required": true,
|
||||
"system": false,
|
||||
"type": "json"
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "nps_type",
|
||||
"maxSelect": 1,
|
||||
"name": "type",
|
||||
"presentable": false,
|
||||
"required": true,
|
||||
"system": false,
|
||||
"type": "select",
|
||||
"values": [
|
||||
"1m",
|
||||
"10m",
|
||||
"20m",
|
||||
"120m",
|
||||
"480m"
|
||||
]
|
||||
},
|
||||
{
|
||||
"hidden": false,
|
||||
"id": "number2990389176",
|
||||
"max": null,
|
||||
"min": null,
|
||||
"name": "created",
|
||||
"onlyInt": false,
|
||||
"presentable": false,
|
||||
"required": false,
|
||||
"system": false,
|
||||
"type": "number"
|
||||
}
|
||||
],
|
||||
"id": "np_stats_001",
|
||||
"indexes": [
|
||||
"CREATE INDEX ` + "`" + `idx_nps_system_type_created` + "`" + ` ON ` + "`" + `network_probe_stats` + "`" + ` (\n ` + "`" + `system` + "`" + `,\n ` + "`" + `type` + "`" + `,\n ` + "`" + `created` + "`" + `\n)"
|
||||
],
|
||||
"listRule": null,
|
||||
"name": "network_probe_stats",
|
||||
"system": false,
|
||||
"type": "base",
|
||||
"updateRule": null,
|
||||
"viewRule": null
|
||||
}
|
||||
]`
|
||||
|
||||
57
internal/records/probe_averaging_test.go
Normal file
57
internal/records/probe_averaging_test.go
Normal file
@@ -0,0 +1,57 @@
|
||||
//go:build testing
|
||||
|
||||
package records_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/henrygd/beszel/internal/records"
|
||||
"github.com/henrygd/beszel/internal/tests"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAverageProbeStats(t *testing.T) {
|
||||
hub, err := tests.NewTestHub(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
defer hub.Cleanup()
|
||||
|
||||
rm := records.NewRecordManager(hub)
|
||||
user, err := tests.CreateUser(hub, "probe-avg@example.com", "testtesttest")
|
||||
require.NoError(t, err)
|
||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "probe-avg-system",
|
||||
"host": "localhost",
|
||||
"port": "45876",
|
||||
"status": "up",
|
||||
"users": []string{user.Id},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
recordA, err := tests.CreateRecord(hub, "network_probe_stats", map[string]any{
|
||||
"system": system.Id,
|
||||
"type": "1m",
|
||||
"stats": `{"icmp:1.1.1.1":[10,5,20,1.5]}`,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
recordB, err := tests.CreateRecord(hub, "network_probe_stats", map[string]any{
|
||||
"system": system.Id,
|
||||
"type": "1m",
|
||||
"stats": `{"icmp:1.1.1.1":[22.5,10,60,0]}`,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
result := rm.AverageProbeStats(hub.DB(), records.RecordIds{
|
||||
{Id: recordA.Id},
|
||||
{Id: recordB.Id},
|
||||
})
|
||||
|
||||
stats, ok := result["icmp:1.1.1.1"]
|
||||
require.True(t, ok)
|
||||
require.Len(t, stats, 4)
|
||||
assert.InDelta(t, 16.25, stats[0], 0.001) // avg of avg
|
||||
assert.InDelta(t, 5, stats[1], 0.001) // min of mins
|
||||
assert.InDelta(t, 60, stats[2], 0.001) // max of maxes
|
||||
assert.InDelta(t, 0.75, stats[3], 0.001) // avg of packet loss
|
||||
}
|
||||
@@ -3,17 +3,17 @@ package records
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"math"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/container"
|
||||
"github.com/henrygd/beszel/internal/entities/probe"
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
|
||||
"github.com/pocketbase/dbx"
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
"github.com/pocketbase/pocketbase/tools/types"
|
||||
)
|
||||
|
||||
type RecordManager struct {
|
||||
@@ -39,19 +39,9 @@ type StatsRecord struct {
|
||||
Stats []byte `db:"stats"`
|
||||
}
|
||||
|
||||
// global variables for reusing allocations
|
||||
var (
|
||||
statsRecord StatsRecord
|
||||
containerStats []container.Stats
|
||||
sumStats system.Stats
|
||||
tempStats system.Stats
|
||||
queryParams = make(dbx.Params, 1)
|
||||
containerSums = make(map[string]*container.Stats)
|
||||
)
|
||||
|
||||
// Create longer records by averaging shorter records
|
||||
func (rm *RecordManager) CreateLongerRecords() {
|
||||
// start := time.Now()
|
||||
now := time.Now().UTC()
|
||||
longerRecordData := []LongerRecordData{
|
||||
{
|
||||
shorterType: "1m",
|
||||
@@ -82,7 +72,8 @@ func (rm *RecordManager) CreateLongerRecords() {
|
||||
// wrap the operations in a transaction
|
||||
rm.app.RunInTransaction(func(txApp core.App) error {
|
||||
var err error
|
||||
collections := [2]*core.Collection{}
|
||||
|
||||
collections := [3]*core.Collection{}
|
||||
collections[0], err = txApp.FindCachedCollectionByNameOrId("system_stats")
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -91,6 +82,10 @@ func (rm *RecordManager) CreateLongerRecords() {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
collections[2], err = txApp.FindCachedCollectionByNameOrId("network_probe_stats")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var systems RecordIds
|
||||
db := txApp.DB()
|
||||
|
||||
@@ -103,55 +98,71 @@ func (rm *RecordManager) CreateLongerRecords() {
|
||||
recordData := longerRecordData[i]
|
||||
// log.Println("processing longer record type", recordData.longerType)
|
||||
// add one minute padding for longer records because they are created slightly later than the job start time
|
||||
longerRecordPeriod := time.Now().UTC().Add(recordData.longerTimeDuration + time.Minute)
|
||||
longerRecordPeriod := now.Add(recordData.longerTimeDuration + time.Minute)
|
||||
// shorter records are created independently of longer records, so we shouldn't need to add padding
|
||||
shorterRecordPeriod := time.Now().UTC().Add(recordData.longerTimeDuration)
|
||||
shorterRecordPeriod := now.Add(recordData.longerTimeDuration)
|
||||
// loop through both collections
|
||||
for _, collection := range collections {
|
||||
// check creation time of last longer record if not 10m, since 10m is created every run
|
||||
if recordData.longerType != "10m" {
|
||||
count, err := txApp.CountRecords(
|
||||
collection.Id,
|
||||
dbx.NewExp(
|
||||
"system = {:system} AND type = {:type} AND created > {:created}",
|
||||
dbx.Params{"type": recordData.longerType, "system": system.Id, "created": longerRecordPeriod},
|
||||
),
|
||||
)
|
||||
var existingRecord struct {
|
||||
Id string
|
||||
}
|
||||
|
||||
params := dbx.Params{
|
||||
"type": recordData.longerType,
|
||||
"system": system.Id,
|
||||
"created": getCreatedTimeField(collection.Name, longerRecordPeriod),
|
||||
}
|
||||
|
||||
_ = db.Select("id").
|
||||
From(collection.Name).
|
||||
Where(dbx.NewExp("system = {:system} AND type = {:type} AND created > {:created}", params)).
|
||||
Limit(1).
|
||||
One(&existingRecord)
|
||||
|
||||
// continue if longer record exists
|
||||
if err != nil || count > 0 {
|
||||
if existingRecord.Id != "" {
|
||||
continue
|
||||
}
|
||||
}
|
||||
// get shorter records from the past x minutes
|
||||
var recordIds RecordIds
|
||||
|
||||
err := txApp.DB().
|
||||
params := dbx.Params{
|
||||
"type": recordData.shorterType,
|
||||
"system": system.Id,
|
||||
"created": getCreatedTimeField(collection.Name, shorterRecordPeriod),
|
||||
}
|
||||
|
||||
_ = txApp.DB().
|
||||
Select("id").
|
||||
From(collection.Name).
|
||||
AndWhere(dbx.NewExp(
|
||||
Where(dbx.NewExp(
|
||||
"system={:system} AND type={:type} AND created > {:created}",
|
||||
dbx.Params{
|
||||
"type": recordData.shorterType,
|
||||
"system": system.Id,
|
||||
"created": shorterRecordPeriod,
|
||||
},
|
||||
params,
|
||||
)).
|
||||
All(&recordIds)
|
||||
|
||||
// continue if not enough shorter records
|
||||
if err != nil || len(recordIds) < recordData.minShorterRecords {
|
||||
if len(recordIds) < recordData.minShorterRecords {
|
||||
continue
|
||||
}
|
||||
// average the shorter records and create longer record
|
||||
longerRecord := core.NewRecord(collection)
|
||||
longerRecord.Set("system", system.Id)
|
||||
longerRecord.Set("type", recordData.longerType)
|
||||
// network_probe_stats uses created as unix timestamp in milliseconds, so we need to set it manually here instead of relying on the default created field
|
||||
if collection.Name == "network_probe_stats" {
|
||||
longerRecord.Set("created", now.UnixMilli())
|
||||
}
|
||||
switch collection.Name {
|
||||
case "system_stats":
|
||||
longerRecord.Set("stats", rm.AverageSystemStats(db, recordIds))
|
||||
case "container_stats":
|
||||
|
||||
longerRecord.Set("stats", rm.AverageContainerStats(db, recordIds))
|
||||
case "network_probe_stats":
|
||||
longerRecord.Set("stats", rm.AverageProbeStats(db, recordIds))
|
||||
}
|
||||
if err := txApp.SaveNoValidate(longerRecord); err != nil {
|
||||
log.Println("failed to save longer record", "err", err)
|
||||
@@ -163,41 +174,54 @@ func (rm *RecordManager) CreateLongerRecords() {
|
||||
return nil
|
||||
})
|
||||
|
||||
statsRecord.Stats = statsRecord.Stats[:0]
|
||||
// slog.Info("finished creating longer records", "time (ms)", time.Since(now).Milliseconds())
|
||||
}
|
||||
|
||||
// log.Println("finished creating longer records", "time (ms)", time.Since(start).Milliseconds())
|
||||
func getCreatedTimeField(collectionName string, period time.Time) any {
|
||||
if collectionName == "network_probe_stats" {
|
||||
return period.UnixMilli()
|
||||
}
|
||||
return period.Format(types.DefaultDateLayout)
|
||||
}
|
||||
|
||||
// Calculate the average stats of a list of system_stats records without reflect
|
||||
func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *system.Stats {
|
||||
// Clear/reset global structs for reuse
|
||||
sumStats = system.Stats{}
|
||||
tempStats = system.Stats{}
|
||||
sum := &sumStats
|
||||
stats := &tempStats
|
||||
stats := make([]system.Stats, 0, len(records))
|
||||
var row StatsRecord
|
||||
params := make(dbx.Params, 1)
|
||||
for _, rec := range records {
|
||||
row.Stats = row.Stats[:0]
|
||||
params["id"] = rec.Id
|
||||
db.NewQuery("SELECT stats FROM system_stats WHERE id = {:id}").Bind(params).One(&row)
|
||||
var s system.Stats
|
||||
if err := json.Unmarshal(row.Stats, &s); err != nil {
|
||||
continue
|
||||
}
|
||||
stats = append(stats, s)
|
||||
}
|
||||
result := AverageSystemStatsSlice(stats)
|
||||
return &result
|
||||
}
|
||||
|
||||
// AverageSystemStatsSlice computes the average of a slice of system stats.
|
||||
func AverageSystemStatsSlice(records []system.Stats) system.Stats {
|
||||
var sum system.Stats
|
||||
count := float64(len(records))
|
||||
if count == 0 {
|
||||
return sum
|
||||
}
|
||||
|
||||
// necessary because uint8 is not big enough for the sum
|
||||
batterySum := 0
|
||||
// accumulate per-core usage across records
|
||||
var cpuCoresSums []uint64
|
||||
// accumulate cpu breakdown [user, system, iowait, steal, idle]
|
||||
var cpuBreakdownSums []float64
|
||||
|
||||
count := float64(len(records))
|
||||
tempCount := float64(0)
|
||||
|
||||
// Accumulate totals
|
||||
for _, record := range records {
|
||||
id := record.Id
|
||||
// clear global statsRecord for reuse
|
||||
statsRecord.Stats = statsRecord.Stats[:0]
|
||||
// reset tempStats each iteration to avoid omitzero fields retaining stale values
|
||||
*stats = system.Stats{}
|
||||
|
||||
queryParams["id"] = id
|
||||
db.NewQuery("SELECT stats FROM system_stats WHERE id = {:id}").Bind(queryParams).One(&statsRecord)
|
||||
if err := json.Unmarshal(statsRecord.Stats, stats); err != nil {
|
||||
continue
|
||||
}
|
||||
for i := range records {
|
||||
stats := &records[i]
|
||||
|
||||
sum.Cpu += stats.Cpu
|
||||
// accumulate cpu time breakdowns if present
|
||||
@@ -205,8 +229,8 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
||||
if len(cpuBreakdownSums) < len(stats.CpuBreakdown) {
|
||||
cpuBreakdownSums = append(cpuBreakdownSums, make([]float64, len(stats.CpuBreakdown)-len(cpuBreakdownSums))...)
|
||||
}
|
||||
for i, v := range stats.CpuBreakdown {
|
||||
cpuBreakdownSums[i] += v
|
||||
for j, v := range stats.CpuBreakdown {
|
||||
cpuBreakdownSums[j] += v
|
||||
}
|
||||
}
|
||||
sum.Mem += stats.Mem
|
||||
@@ -230,6 +254,9 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
||||
sum.Bandwidth[1] += stats.Bandwidth[1]
|
||||
sum.DiskIO[0] += stats.DiskIO[0]
|
||||
sum.DiskIO[1] += stats.DiskIO[1]
|
||||
for i := range stats.DiskIoStats {
|
||||
sum.DiskIoStats[i] += stats.DiskIoStats[i]
|
||||
}
|
||||
batterySum += int(stats.Battery[0])
|
||||
sum.Battery[1] = stats.Battery[1]
|
||||
|
||||
@@ -239,8 +266,8 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
||||
// extend slices to accommodate core count
|
||||
cpuCoresSums = append(cpuCoresSums, make([]uint64, len(stats.CpuCoresUsage)-len(cpuCoresSums))...)
|
||||
}
|
||||
for i, v := range stats.CpuCoresUsage {
|
||||
cpuCoresSums[i] += uint64(v)
|
||||
for j, v := range stats.CpuCoresUsage {
|
||||
cpuCoresSums[j] += uint64(v)
|
||||
}
|
||||
}
|
||||
// Set peak values
|
||||
@@ -254,6 +281,9 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
||||
sum.MaxBandwidth[1] = max(sum.MaxBandwidth[1], stats.MaxBandwidth[1], stats.Bandwidth[1])
|
||||
sum.MaxDiskIO[0] = max(sum.MaxDiskIO[0], stats.MaxDiskIO[0], stats.DiskIO[0])
|
||||
sum.MaxDiskIO[1] = max(sum.MaxDiskIO[1], stats.MaxDiskIO[1], stats.DiskIO[1])
|
||||
for i := range stats.DiskIoStats {
|
||||
sum.MaxDiskIoStats[i] = max(sum.MaxDiskIoStats[i], stats.MaxDiskIoStats[i], stats.DiskIoStats[i])
|
||||
}
|
||||
|
||||
// Accumulate network interfaces
|
||||
if sum.NetworkInterfaces == nil {
|
||||
@@ -299,6 +329,10 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
||||
fs.DiskWriteBytes += value.DiskWriteBytes
|
||||
fs.MaxDiskReadBytes = max(fs.MaxDiskReadBytes, value.MaxDiskReadBytes, value.DiskReadBytes)
|
||||
fs.MaxDiskWriteBytes = max(fs.MaxDiskWriteBytes, value.MaxDiskWriteBytes, value.DiskWriteBytes)
|
||||
for i := range value.DiskIoStats {
|
||||
fs.DiskIoStats[i] += value.DiskIoStats[i]
|
||||
fs.MaxDiskIoStats[i] = max(fs.MaxDiskIoStats[i], value.MaxDiskIoStats[i], value.DiskIoStats[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -333,103 +367,107 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
||||
}
|
||||
}
|
||||
|
||||
// Compute averages in place
|
||||
if count > 0 {
|
||||
sum.Cpu = twoDecimals(sum.Cpu / count)
|
||||
sum.Mem = twoDecimals(sum.Mem / count)
|
||||
sum.MemUsed = twoDecimals(sum.MemUsed / count)
|
||||
sum.MemPct = twoDecimals(sum.MemPct / count)
|
||||
sum.MemBuffCache = twoDecimals(sum.MemBuffCache / count)
|
||||
sum.MemZfsArc = twoDecimals(sum.MemZfsArc / count)
|
||||
sum.Swap = twoDecimals(sum.Swap / count)
|
||||
sum.SwapUsed = twoDecimals(sum.SwapUsed / count)
|
||||
sum.DiskTotal = twoDecimals(sum.DiskTotal / count)
|
||||
sum.DiskUsed = twoDecimals(sum.DiskUsed / count)
|
||||
sum.DiskPct = twoDecimals(sum.DiskPct / count)
|
||||
sum.DiskReadPs = twoDecimals(sum.DiskReadPs / count)
|
||||
sum.DiskWritePs = twoDecimals(sum.DiskWritePs / count)
|
||||
sum.DiskIO[0] = sum.DiskIO[0] / uint64(count)
|
||||
sum.DiskIO[1] = sum.DiskIO[1] / uint64(count)
|
||||
sum.NetworkSent = twoDecimals(sum.NetworkSent / count)
|
||||
sum.NetworkRecv = twoDecimals(sum.NetworkRecv / count)
|
||||
sum.LoadAvg[0] = twoDecimals(sum.LoadAvg[0] / count)
|
||||
sum.LoadAvg[1] = twoDecimals(sum.LoadAvg[1] / count)
|
||||
sum.LoadAvg[2] = twoDecimals(sum.LoadAvg[2] / count)
|
||||
sum.Bandwidth[0] = sum.Bandwidth[0] / uint64(count)
|
||||
sum.Bandwidth[1] = sum.Bandwidth[1] / uint64(count)
|
||||
sum.Battery[0] = uint8(batterySum / int(count))
|
||||
// Compute averages
|
||||
sum.Cpu = twoDecimals(sum.Cpu / count)
|
||||
sum.Mem = twoDecimals(sum.Mem / count)
|
||||
sum.MemUsed = twoDecimals(sum.MemUsed / count)
|
||||
sum.MemPct = twoDecimals(sum.MemPct / count)
|
||||
sum.MemBuffCache = twoDecimals(sum.MemBuffCache / count)
|
||||
sum.MemZfsArc = twoDecimals(sum.MemZfsArc / count)
|
||||
sum.Swap = twoDecimals(sum.Swap / count)
|
||||
sum.SwapUsed = twoDecimals(sum.SwapUsed / count)
|
||||
sum.DiskTotal = twoDecimals(sum.DiskTotal / count)
|
||||
sum.DiskUsed = twoDecimals(sum.DiskUsed / count)
|
||||
sum.DiskPct = twoDecimals(sum.DiskPct / count)
|
||||
sum.DiskReadPs = twoDecimals(sum.DiskReadPs / count)
|
||||
sum.DiskWritePs = twoDecimals(sum.DiskWritePs / count)
|
||||
sum.DiskIO[0] = sum.DiskIO[0] / uint64(count)
|
||||
sum.DiskIO[1] = sum.DiskIO[1] / uint64(count)
|
||||
for i := range sum.DiskIoStats {
|
||||
sum.DiskIoStats[i] = twoDecimals(sum.DiskIoStats[i] / count)
|
||||
}
|
||||
sum.NetworkSent = twoDecimals(sum.NetworkSent / count)
|
||||
sum.NetworkRecv = twoDecimals(sum.NetworkRecv / count)
|
||||
sum.LoadAvg[0] = twoDecimals(sum.LoadAvg[0] / count)
|
||||
sum.LoadAvg[1] = twoDecimals(sum.LoadAvg[1] / count)
|
||||
sum.LoadAvg[2] = twoDecimals(sum.LoadAvg[2] / count)
|
||||
sum.Bandwidth[0] = sum.Bandwidth[0] / uint64(count)
|
||||
sum.Bandwidth[1] = sum.Bandwidth[1] / uint64(count)
|
||||
sum.Battery[0] = uint8(batterySum / int(count))
|
||||
|
||||
// Average network interfaces
|
||||
if sum.NetworkInterfaces != nil {
|
||||
for key := range sum.NetworkInterfaces {
|
||||
sum.NetworkInterfaces[key] = [4]uint64{
|
||||
sum.NetworkInterfaces[key][0] / uint64(count),
|
||||
sum.NetworkInterfaces[key][1] / uint64(count),
|
||||
sum.NetworkInterfaces[key][2],
|
||||
sum.NetworkInterfaces[key][3],
|
||||
// Average network interfaces
|
||||
if sum.NetworkInterfaces != nil {
|
||||
for key := range sum.NetworkInterfaces {
|
||||
sum.NetworkInterfaces[key] = [4]uint64{
|
||||
sum.NetworkInterfaces[key][0] / uint64(count),
|
||||
sum.NetworkInterfaces[key][1] / uint64(count),
|
||||
sum.NetworkInterfaces[key][2],
|
||||
sum.NetworkInterfaces[key][3],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Average temperatures
|
||||
if sum.Temperatures != nil && tempCount > 0 {
|
||||
for key := range sum.Temperatures {
|
||||
sum.Temperatures[key] = twoDecimals(sum.Temperatures[key] / tempCount)
|
||||
}
|
||||
}
|
||||
|
||||
// Average extra filesystem stats
|
||||
if sum.ExtraFs != nil {
|
||||
for key := range sum.ExtraFs {
|
||||
fs := sum.ExtraFs[key]
|
||||
fs.DiskTotal = twoDecimals(fs.DiskTotal / count)
|
||||
fs.DiskUsed = twoDecimals(fs.DiskUsed / count)
|
||||
fs.DiskWritePs = twoDecimals(fs.DiskWritePs / count)
|
||||
fs.DiskReadPs = twoDecimals(fs.DiskReadPs / count)
|
||||
fs.DiskReadBytes = fs.DiskReadBytes / uint64(count)
|
||||
fs.DiskWriteBytes = fs.DiskWriteBytes / uint64(count)
|
||||
for i := range fs.DiskIoStats {
|
||||
fs.DiskIoStats[i] = twoDecimals(fs.DiskIoStats[i] / count)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Average GPU data
|
||||
if sum.GPUData != nil {
|
||||
for id := range sum.GPUData {
|
||||
gpu := sum.GPUData[id]
|
||||
gpu.Temperature = twoDecimals(gpu.Temperature / count)
|
||||
gpu.MemoryUsed = twoDecimals(gpu.MemoryUsed / count)
|
||||
gpu.MemoryTotal = twoDecimals(gpu.MemoryTotal / count)
|
||||
gpu.Usage = twoDecimals(gpu.Usage / count)
|
||||
gpu.Power = twoDecimals(gpu.Power / count)
|
||||
gpu.Count = twoDecimals(gpu.Count / count)
|
||||
|
||||
if gpu.Engines != nil {
|
||||
for engineKey := range gpu.Engines {
|
||||
gpu.Engines[engineKey] = twoDecimals(gpu.Engines[engineKey] / count)
|
||||
}
|
||||
}
|
||||
|
||||
sum.GPUData[id] = gpu
|
||||
}
|
||||
}
|
||||
|
||||
// Average temperatures
|
||||
if sum.Temperatures != nil && tempCount > 0 {
|
||||
for key := range sum.Temperatures {
|
||||
sum.Temperatures[key] = twoDecimals(sum.Temperatures[key] / tempCount)
|
||||
}
|
||||
// Average per-core usage
|
||||
if len(cpuCoresSums) > 0 {
|
||||
avg := make(system.Uint8Slice, len(cpuCoresSums))
|
||||
for i := range cpuCoresSums {
|
||||
v := math.Round(float64(cpuCoresSums[i]) / count)
|
||||
avg[i] = uint8(v)
|
||||
}
|
||||
sum.CpuCoresUsage = avg
|
||||
}
|
||||
|
||||
// Average extra filesystem stats
|
||||
if sum.ExtraFs != nil {
|
||||
for key := range sum.ExtraFs {
|
||||
fs := sum.ExtraFs[key]
|
||||
fs.DiskTotal = twoDecimals(fs.DiskTotal / count)
|
||||
fs.DiskUsed = twoDecimals(fs.DiskUsed / count)
|
||||
fs.DiskWritePs = twoDecimals(fs.DiskWritePs / count)
|
||||
fs.DiskReadPs = twoDecimals(fs.DiskReadPs / count)
|
||||
fs.DiskReadBytes = fs.DiskReadBytes / uint64(count)
|
||||
fs.DiskWriteBytes = fs.DiskWriteBytes / uint64(count)
|
||||
}
|
||||
}
|
||||
|
||||
// Average GPU data
|
||||
if sum.GPUData != nil {
|
||||
for id := range sum.GPUData {
|
||||
gpu := sum.GPUData[id]
|
||||
gpu.Temperature = twoDecimals(gpu.Temperature / count)
|
||||
gpu.MemoryUsed = twoDecimals(gpu.MemoryUsed / count)
|
||||
gpu.MemoryTotal = twoDecimals(gpu.MemoryTotal / count)
|
||||
gpu.Usage = twoDecimals(gpu.Usage / count)
|
||||
gpu.Power = twoDecimals(gpu.Power / count)
|
||||
gpu.Count = twoDecimals(gpu.Count / count)
|
||||
|
||||
if gpu.Engines != nil {
|
||||
for engineKey := range gpu.Engines {
|
||||
gpu.Engines[engineKey] = twoDecimals(gpu.Engines[engineKey] / count)
|
||||
}
|
||||
}
|
||||
|
||||
sum.GPUData[id] = gpu
|
||||
}
|
||||
}
|
||||
|
||||
// Average per-core usage
|
||||
if len(cpuCoresSums) > 0 {
|
||||
avg := make(system.Uint8Slice, len(cpuCoresSums))
|
||||
for i := range cpuCoresSums {
|
||||
v := math.Round(float64(cpuCoresSums[i]) / count)
|
||||
avg[i] = uint8(v)
|
||||
}
|
||||
sum.CpuCoresUsage = avg
|
||||
}
|
||||
|
||||
// Average CPU breakdown
|
||||
if len(cpuBreakdownSums) > 0 {
|
||||
avg := make([]float64, len(cpuBreakdownSums))
|
||||
for i := range cpuBreakdownSums {
|
||||
avg[i] = twoDecimals(cpuBreakdownSums[i] / count)
|
||||
}
|
||||
sum.CpuBreakdown = avg
|
||||
// Average CPU breakdown
|
||||
if len(cpuBreakdownSums) > 0 {
|
||||
avg := make([]float64, len(cpuBreakdownSums))
|
||||
for i := range cpuBreakdownSums {
|
||||
avg[i] = twoDecimals(cpuBreakdownSums[i] / count)
|
||||
}
|
||||
sum.CpuBreakdown = avg
|
||||
}
|
||||
|
||||
return sum
|
||||
@@ -437,29 +475,33 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
||||
|
||||
// Calculate the average stats of a list of container_stats records
|
||||
func (rm *RecordManager) AverageContainerStats(db dbx.Builder, records RecordIds) []container.Stats {
|
||||
// Clear global map for reuse
|
||||
for k := range containerSums {
|
||||
delete(containerSums, k)
|
||||
}
|
||||
sums := containerSums
|
||||
count := float64(len(records))
|
||||
|
||||
for i := range records {
|
||||
id := records[i].Id
|
||||
// clear global statsRecord for reuse
|
||||
statsRecord.Stats = statsRecord.Stats[:0]
|
||||
// must set to nil (not [:0]) to avoid json.Unmarshal reusing backing array
|
||||
// which causes omitzero fields to inherit stale values from previous iterations
|
||||
containerStats = nil
|
||||
|
||||
queryParams["id"] = id
|
||||
db.NewQuery("SELECT stats FROM container_stats WHERE id = {:id}").Bind(queryParams).One(&statsRecord)
|
||||
|
||||
if err := json.Unmarshal(statsRecord.Stats, &containerStats); err != nil {
|
||||
allStats := make([][]container.Stats, 0, len(records))
|
||||
var row StatsRecord
|
||||
params := make(dbx.Params, 1)
|
||||
for _, rec := range records {
|
||||
row.Stats = row.Stats[:0]
|
||||
params["id"] = rec.Id
|
||||
db.NewQuery("SELECT stats FROM container_stats WHERE id = {:id}").Bind(params).One(&row)
|
||||
var cs []container.Stats
|
||||
if err := json.Unmarshal(row.Stats, &cs); err != nil {
|
||||
return []container.Stats{}
|
||||
}
|
||||
allStats = append(allStats, cs)
|
||||
}
|
||||
return AverageContainerStatsSlice(allStats)
|
||||
}
|
||||
|
||||
// AverageContainerStatsSlice computes the average of container stats across multiple time periods.
|
||||
func AverageContainerStatsSlice(records [][]container.Stats) []container.Stats {
|
||||
if len(records) == 0 {
|
||||
return []container.Stats{}
|
||||
}
|
||||
sums := make(map[string]*container.Stats)
|
||||
count := float64(len(records))
|
||||
|
||||
for _, containerStats := range records {
|
||||
for i := range containerStats {
|
||||
stat := containerStats[i]
|
||||
stat := &containerStats[i]
|
||||
if _, ok := sums[stat.Name]; !ok {
|
||||
sums[stat.Name] = &container.Stats{Name: stat.Name}
|
||||
}
|
||||
@@ -488,131 +530,78 @@ func (rm *RecordManager) AverageContainerStats(db dbx.Builder, records RecordIds
|
||||
return result
|
||||
}
|
||||
|
||||
// Delete old records
|
||||
func (rm *RecordManager) DeleteOldRecords() {
|
||||
rm.app.RunInTransaction(func(txApp core.App) error {
|
||||
err := deleteOldSystemStats(txApp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = deleteOldContainerRecords(txApp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = deleteOldSystemdServiceRecords(txApp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = deleteOldAlertsHistory(txApp, 200, 250)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = deleteOldQuietHours(txApp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
// AverageProbeStats averages probe stats across multiple records.
|
||||
// For each probe key: avg of average fields, min of mins, and max of maxes.
|
||||
func (rm *RecordManager) AverageProbeStats(db dbx.Builder, records RecordIds) map[string]probe.Stats {
|
||||
type probeValues struct {
|
||||
sums probe.Stats
|
||||
counts []int
|
||||
}
|
||||
|
||||
// Delete old alerts history records
|
||||
func deleteOldAlertsHistory(app core.App, countToKeep, countBeforeDeletion int) error {
|
||||
db := app.DB()
|
||||
var users []struct {
|
||||
Id string `db:"user"`
|
||||
}
|
||||
err := db.NewQuery("SELECT user, COUNT(*) as count FROM alerts_history GROUP BY user HAVING count > {:countBeforeDeletion}").Bind(dbx.Params{"countBeforeDeletion": countBeforeDeletion}).All(&users)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, user := range users {
|
||||
_, err = db.NewQuery("DELETE FROM alerts_history WHERE user = {:user} AND id NOT IN (SELECT id FROM alerts_history WHERE user = {:user} ORDER BY created DESC LIMIT {:countToKeep})").Bind(dbx.Params{"user": user.Id, "countToKeep": countToKeep}).Execute()
|
||||
if err != nil {
|
||||
return err
|
||||
query := db.NewQuery("SELECT stats FROM network_probe_stats WHERE id = {:id}")
|
||||
|
||||
// accumulate sums for each probe key across records
|
||||
sums := make(map[string]*probeValues)
|
||||
var row StatsRecord
|
||||
for _, rec := range records {
|
||||
row.Stats = row.Stats[:0]
|
||||
query.Bind(dbx.Params{"id": rec.Id}).One(&row)
|
||||
var rawStats map[string]probe.Stats
|
||||
if err := json.Unmarshal(row.Stats, &rawStats); err != nil {
|
||||
continue
|
||||
}
|
||||
for key, vals := range rawStats {
|
||||
s, ok := sums[key]
|
||||
if !ok {
|
||||
s = &probeValues{sums: make(probe.Stats, len(vals)), counts: make([]int, len(vals))}
|
||||
sums[key] = s
|
||||
}
|
||||
if len(vals) > len(s.sums) {
|
||||
expandedSums := make(probe.Stats, len(vals))
|
||||
copy(expandedSums, s.sums)
|
||||
s.sums = expandedSums
|
||||
|
||||
expandedCounts := make([]int, len(vals))
|
||||
copy(expandedCounts, s.counts)
|
||||
s.counts = expandedCounts
|
||||
}
|
||||
for i := range vals {
|
||||
switch i {
|
||||
case 1: // min fields
|
||||
if s.counts[i] == 0 || vals[i] < s.sums[i] {
|
||||
s.sums[i] = vals[i]
|
||||
}
|
||||
case 2: // max fields
|
||||
if s.counts[i] == 0 || vals[i] > s.sums[i] {
|
||||
s.sums[i] = vals[i]
|
||||
}
|
||||
default: // average fields
|
||||
s.sums[i] += vals[i]
|
||||
}
|
||||
s.counts[i]++
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deletes system_stats records older than what is displayed in the UI
|
||||
func deleteOldSystemStats(app core.App) error {
|
||||
// Collections to process
|
||||
collections := [2]string{"system_stats", "container_stats"}
|
||||
|
||||
// Record types and their retention periods
|
||||
type RecordDeletionData struct {
|
||||
recordType string
|
||||
retention time.Duration
|
||||
}
|
||||
recordData := []RecordDeletionData{
|
||||
{recordType: "1m", retention: time.Hour}, // 1 hour
|
||||
{recordType: "10m", retention: 12 * time.Hour}, // 12 hours
|
||||
{recordType: "20m", retention: 24 * time.Hour}, // 1 day
|
||||
{recordType: "120m", retention: 7 * 24 * time.Hour}, // 7 days
|
||||
{recordType: "480m", retention: 30 * 24 * time.Hour}, // 30 days
|
||||
}
|
||||
|
||||
now := time.Now().UTC()
|
||||
|
||||
for _, collection := range collections {
|
||||
// Build the WHERE clause
|
||||
var conditionParts []string
|
||||
var params dbx.Params = make(map[string]any)
|
||||
for i := range recordData {
|
||||
rd := recordData[i]
|
||||
// Create parameterized condition for this record type
|
||||
dateParam := fmt.Sprintf("date%d", i)
|
||||
conditionParts = append(conditionParts, fmt.Sprintf("(type = '%s' AND created < {:%s})", rd.recordType, dateParam))
|
||||
params[dateParam] = now.Add(-rd.retention)
|
||||
// compute final averages
|
||||
result := make(map[string]probe.Stats, len(sums))
|
||||
for key, s := range sums {
|
||||
if len(s.counts) == 0 {
|
||||
continue
|
||||
}
|
||||
// Combine conditions with OR
|
||||
conditionStr := strings.Join(conditionParts, " OR ")
|
||||
// Construct and execute the full raw query
|
||||
rawQuery := fmt.Sprintf("DELETE FROM %s WHERE %s", collection, conditionStr)
|
||||
if _, err := app.DB().NewQuery(rawQuery).Bind(params).Execute(); err != nil {
|
||||
return fmt.Errorf("failed to delete from %s: %v", collection, err)
|
||||
for i := range s.sums {
|
||||
switch i {
|
||||
case 1, 2: // min and max fields should not be averaged
|
||||
continue
|
||||
default:
|
||||
if s.counts[i] > 0 {
|
||||
s.sums[i] = twoDecimals(s.sums[i] / float64(s.counts[i]))
|
||||
}
|
||||
}
|
||||
}
|
||||
result[key] = s.sums
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deletes systemd service records that haven't been updated in the last 20 minutes
|
||||
func deleteOldSystemdServiceRecords(app core.App) error {
|
||||
now := time.Now().UTC()
|
||||
twentyMinutesAgo := now.Add(-20 * time.Minute)
|
||||
|
||||
// Delete systemd service records where updated < twentyMinutesAgo
|
||||
_, err := app.DB().NewQuery("DELETE FROM systemd_services WHERE updated < {:updated}").Bind(dbx.Params{"updated": twentyMinutesAgo.UnixMilli()}).Execute()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete old systemd service records: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deletes container records that haven't been updated in the last 10 minutes
|
||||
func deleteOldContainerRecords(app core.App) error {
|
||||
now := time.Now().UTC()
|
||||
tenMinutesAgo := now.Add(-10 * time.Minute)
|
||||
|
||||
// Delete container records where updated < tenMinutesAgo
|
||||
_, err := app.DB().NewQuery("DELETE FROM containers WHERE updated < {:updated}").Bind(dbx.Params{"updated": tenMinutesAgo.UnixMilli()}).Execute()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete old container records: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deletes old quiet hours records where end date has passed
|
||||
func deleteOldQuietHours(app core.App) error {
|
||||
now := time.Now().UTC()
|
||||
_, err := app.DB().NewQuery("DELETE FROM quiet_hours WHERE type = 'one-time' AND end < {:now}").Bind(dbx.Params{"now": now}).Execute()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return result
|
||||
}
|
||||
|
||||
/* Round float to two decimals */
|
||||
|
||||
820
internal/records/records_averaging_test.go
Normal file
820
internal/records/records_averaging_test.go
Normal file
@@ -0,0 +1,820 @@
|
||||
//go:build testing
|
||||
|
||||
package records_test
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/container"
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
"github.com/henrygd/beszel/internal/records"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAverageSystemStatsSlice_Empty(t *testing.T) {
|
||||
result := records.AverageSystemStatsSlice(nil)
|
||||
assert.Equal(t, system.Stats{}, result)
|
||||
|
||||
result = records.AverageSystemStatsSlice([]system.Stats{})
|
||||
assert.Equal(t, system.Stats{}, result)
|
||||
}
|
||||
|
||||
func TestAverageSystemStatsSlice_SingleRecord(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{
|
||||
Cpu: 45.67,
|
||||
Mem: 16.0,
|
||||
MemUsed: 8.5,
|
||||
MemPct: 53.12,
|
||||
MemBuffCache: 2.0,
|
||||
Swap: 4.0,
|
||||
SwapUsed: 1.0,
|
||||
DiskTotal: 500.0,
|
||||
DiskUsed: 250.0,
|
||||
DiskPct: 50.0,
|
||||
DiskReadPs: 100.5,
|
||||
DiskWritePs: 200.75,
|
||||
NetworkSent: 10.5,
|
||||
NetworkRecv: 20.25,
|
||||
LoadAvg: [3]float64{1.5, 2.0, 3.5},
|
||||
Bandwidth: [2]uint64{1000, 2000},
|
||||
DiskIO: [2]uint64{500, 600},
|
||||
Battery: [2]uint8{80, 1},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
assert.Equal(t, 45.67, result.Cpu)
|
||||
assert.Equal(t, 16.0, result.Mem)
|
||||
assert.Equal(t, 8.5, result.MemUsed)
|
||||
assert.Equal(t, 53.12, result.MemPct)
|
||||
assert.Equal(t, 2.0, result.MemBuffCache)
|
||||
assert.Equal(t, 4.0, result.Swap)
|
||||
assert.Equal(t, 1.0, result.SwapUsed)
|
||||
assert.Equal(t, 500.0, result.DiskTotal)
|
||||
assert.Equal(t, 250.0, result.DiskUsed)
|
||||
assert.Equal(t, 50.0, result.DiskPct)
|
||||
assert.Equal(t, 100.5, result.DiskReadPs)
|
||||
assert.Equal(t, 200.75, result.DiskWritePs)
|
||||
assert.Equal(t, 10.5, result.NetworkSent)
|
||||
assert.Equal(t, 20.25, result.NetworkRecv)
|
||||
assert.Equal(t, [3]float64{1.5, 2.0, 3.5}, result.LoadAvg)
|
||||
assert.Equal(t, [2]uint64{1000, 2000}, result.Bandwidth)
|
||||
assert.Equal(t, [2]uint64{500, 600}, result.DiskIO)
|
||||
assert.Equal(t, uint8(80), result.Battery[0])
|
||||
assert.Equal(t, uint8(1), result.Battery[1])
|
||||
}
|
||||
|
||||
func TestAverageSystemStatsSlice_BasicAveraging(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{
|
||||
Cpu: 20.0,
|
||||
Mem: 16.0,
|
||||
MemUsed: 6.0,
|
||||
MemPct: 37.5,
|
||||
MemBuffCache: 1.0,
|
||||
MemZfsArc: 0.5,
|
||||
Swap: 4.0,
|
||||
SwapUsed: 1.0,
|
||||
DiskTotal: 500.0,
|
||||
DiskUsed: 200.0,
|
||||
DiskPct: 40.0,
|
||||
DiskReadPs: 100.0,
|
||||
DiskWritePs: 200.0,
|
||||
NetworkSent: 10.0,
|
||||
NetworkRecv: 20.0,
|
||||
LoadAvg: [3]float64{1.0, 2.0, 3.0},
|
||||
Bandwidth: [2]uint64{1000, 2000},
|
||||
DiskIO: [2]uint64{400, 600},
|
||||
Battery: [2]uint8{80, 1},
|
||||
},
|
||||
{
|
||||
Cpu: 40.0,
|
||||
Mem: 16.0,
|
||||
MemUsed: 10.0,
|
||||
MemPct: 62.5,
|
||||
MemBuffCache: 3.0,
|
||||
MemZfsArc: 1.5,
|
||||
Swap: 4.0,
|
||||
SwapUsed: 3.0,
|
||||
DiskTotal: 500.0,
|
||||
DiskUsed: 300.0,
|
||||
DiskPct: 60.0,
|
||||
DiskReadPs: 200.0,
|
||||
DiskWritePs: 400.0,
|
||||
NetworkSent: 30.0,
|
||||
NetworkRecv: 40.0,
|
||||
LoadAvg: [3]float64{3.0, 4.0, 5.0},
|
||||
Bandwidth: [2]uint64{3000, 4000},
|
||||
DiskIO: [2]uint64{600, 800},
|
||||
Battery: [2]uint8{60, 1},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
assert.Equal(t, 30.0, result.Cpu)
|
||||
assert.Equal(t, 16.0, result.Mem)
|
||||
assert.Equal(t, 8.0, result.MemUsed)
|
||||
assert.Equal(t, 50.0, result.MemPct)
|
||||
assert.Equal(t, 2.0, result.MemBuffCache)
|
||||
assert.Equal(t, 1.0, result.MemZfsArc)
|
||||
assert.Equal(t, 4.0, result.Swap)
|
||||
assert.Equal(t, 2.0, result.SwapUsed)
|
||||
assert.Equal(t, 500.0, result.DiskTotal)
|
||||
assert.Equal(t, 250.0, result.DiskUsed)
|
||||
assert.Equal(t, 50.0, result.DiskPct)
|
||||
assert.Equal(t, 150.0, result.DiskReadPs)
|
||||
assert.Equal(t, 300.0, result.DiskWritePs)
|
||||
assert.Equal(t, 20.0, result.NetworkSent)
|
||||
assert.Equal(t, 30.0, result.NetworkRecv)
|
||||
assert.Equal(t, [3]float64{2.0, 3.0, 4.0}, result.LoadAvg)
|
||||
assert.Equal(t, [2]uint64{2000, 3000}, result.Bandwidth)
|
||||
assert.Equal(t, [2]uint64{500, 700}, result.DiskIO)
|
||||
assert.Equal(t, uint8(70), result.Battery[0])
|
||||
assert.Equal(t, uint8(1), result.Battery[1])
|
||||
}
|
||||
|
||||
func TestAverageSystemStatsSlice_PeakValues(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{
|
||||
Cpu: 20.0,
|
||||
MaxCpu: 25.0,
|
||||
MemUsed: 6.0,
|
||||
MaxMem: 7.0,
|
||||
NetworkSent: 10.0,
|
||||
MaxNetworkSent: 15.0,
|
||||
NetworkRecv: 20.0,
|
||||
MaxNetworkRecv: 25.0,
|
||||
DiskReadPs: 100.0,
|
||||
MaxDiskReadPs: 120.0,
|
||||
DiskWritePs: 200.0,
|
||||
MaxDiskWritePs: 220.0,
|
||||
Bandwidth: [2]uint64{1000, 2000},
|
||||
MaxBandwidth: [2]uint64{1500, 2500},
|
||||
DiskIO: [2]uint64{400, 600},
|
||||
MaxDiskIO: [2]uint64{500, 700},
|
||||
DiskIoStats: [6]float64{10.0, 20.0, 30.0, 5.0, 8.0, 12.0},
|
||||
MaxDiskIoStats: [6]float64{15.0, 25.0, 35.0, 6.0, 9.0, 14.0},
|
||||
},
|
||||
{
|
||||
Cpu: 40.0,
|
||||
MaxCpu: 50.0,
|
||||
MemUsed: 10.0,
|
||||
MaxMem: 12.0,
|
||||
NetworkSent: 30.0,
|
||||
MaxNetworkSent: 35.0,
|
||||
NetworkRecv: 40.0,
|
||||
MaxNetworkRecv: 45.0,
|
||||
DiskReadPs: 200.0,
|
||||
MaxDiskReadPs: 210.0,
|
||||
DiskWritePs: 400.0,
|
||||
MaxDiskWritePs: 410.0,
|
||||
Bandwidth: [2]uint64{3000, 4000},
|
||||
MaxBandwidth: [2]uint64{3500, 4500},
|
||||
DiskIO: [2]uint64{600, 800},
|
||||
MaxDiskIO: [2]uint64{650, 850},
|
||||
DiskIoStats: [6]float64{50.0, 60.0, 70.0, 15.0, 18.0, 22.0},
|
||||
MaxDiskIoStats: [6]float64{55.0, 65.0, 75.0, 16.0, 19.0, 23.0},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
assert.Equal(t, 50.0, result.MaxCpu)
|
||||
assert.Equal(t, 12.0, result.MaxMem)
|
||||
assert.Equal(t, 35.0, result.MaxNetworkSent)
|
||||
assert.Equal(t, 45.0, result.MaxNetworkRecv)
|
||||
assert.Equal(t, 210.0, result.MaxDiskReadPs)
|
||||
assert.Equal(t, 410.0, result.MaxDiskWritePs)
|
||||
assert.Equal(t, [2]uint64{3500, 4500}, result.MaxBandwidth)
|
||||
assert.Equal(t, [2]uint64{650, 850}, result.MaxDiskIO)
|
||||
assert.Equal(t, [6]float64{30.0, 40.0, 50.0, 10.0, 13.0, 17.0}, result.DiskIoStats)
|
||||
assert.Equal(t, [6]float64{55.0, 65.0, 75.0, 16.0, 19.0, 23.0}, result.MaxDiskIoStats)
|
||||
}
|
||||
|
||||
func TestAverageSystemStatsSlice_DiskIoStats(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{
|
||||
Cpu: 10.0,
|
||||
DiskIoStats: [6]float64{10.0, 20.0, 30.0, 5.0, 8.0, 12.0},
|
||||
MaxDiskIoStats: [6]float64{12.0, 22.0, 32.0, 6.0, 9.0, 13.0},
|
||||
},
|
||||
{
|
||||
Cpu: 20.0,
|
||||
DiskIoStats: [6]float64{30.0, 40.0, 50.0, 15.0, 18.0, 22.0},
|
||||
MaxDiskIoStats: [6]float64{28.0, 38.0, 48.0, 14.0, 17.0, 21.0},
|
||||
},
|
||||
{
|
||||
Cpu: 30.0,
|
||||
DiskIoStats: [6]float64{20.0, 30.0, 40.0, 10.0, 12.0, 16.0},
|
||||
MaxDiskIoStats: [6]float64{25.0, 35.0, 45.0, 11.0, 13.0, 17.0},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
// Average: (10+30+20)/3=20, (20+40+30)/3=30, (30+50+40)/3=40, (5+15+10)/3=10, (8+18+12)/3≈12.67, (12+22+16)/3≈16.67
|
||||
assert.Equal(t, 20.0, result.DiskIoStats[0])
|
||||
assert.Equal(t, 30.0, result.DiskIoStats[1])
|
||||
assert.Equal(t, 40.0, result.DiskIoStats[2])
|
||||
assert.Equal(t, 10.0, result.DiskIoStats[3])
|
||||
assert.Equal(t, 12.67, result.DiskIoStats[4])
|
||||
assert.Equal(t, 16.67, result.DiskIoStats[5])
|
||||
// Max: current DiskIoStats[0] wins for record 2 (30 > MaxDiskIoStats 28)
|
||||
assert.Equal(t, 30.0, result.MaxDiskIoStats[0])
|
||||
assert.Equal(t, 40.0, result.MaxDiskIoStats[1])
|
||||
assert.Equal(t, 50.0, result.MaxDiskIoStats[2])
|
||||
assert.Equal(t, 15.0, result.MaxDiskIoStats[3])
|
||||
assert.Equal(t, 18.0, result.MaxDiskIoStats[4])
|
||||
assert.Equal(t, 22.0, result.MaxDiskIoStats[5])
|
||||
}
|
||||
|
||||
// Tests that current DiskIoStats values are considered when computing MaxDiskIoStats.
|
||||
func TestAverageSystemStatsSlice_DiskIoStatsPeakFromCurrentValues(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{Cpu: 10.0, DiskIoStats: [6]float64{95.0, 90.0, 85.0, 50.0, 60.0, 80.0}, MaxDiskIoStats: [6]float64{80.0, 80.0, 80.0, 40.0, 50.0, 70.0}},
|
||||
{Cpu: 20.0, DiskIoStats: [6]float64{10.0, 10.0, 10.0, 5.0, 6.0, 8.0}, MaxDiskIoStats: [6]float64{20.0, 20.0, 20.0, 10.0, 12.0, 16.0}},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
// Current value from first record (95, 90, 85, 50, 60, 80) beats MaxDiskIoStats in both records
|
||||
assert.Equal(t, 95.0, result.MaxDiskIoStats[0])
|
||||
assert.Equal(t, 90.0, result.MaxDiskIoStats[1])
|
||||
assert.Equal(t, 85.0, result.MaxDiskIoStats[2])
|
||||
assert.Equal(t, 50.0, result.MaxDiskIoStats[3])
|
||||
assert.Equal(t, 60.0, result.MaxDiskIoStats[4])
|
||||
assert.Equal(t, 80.0, result.MaxDiskIoStats[5])
|
||||
}
|
||||
|
||||
// Tests that current values are considered when computing peaks
|
||||
// (i.e., current cpu > MaxCpu should still win).
|
||||
func TestAverageSystemStatsSlice_PeakFromCurrentValues(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{Cpu: 95.0, MaxCpu: 80.0, MemUsed: 15.0, MaxMem: 10.0},
|
||||
{Cpu: 10.0, MaxCpu: 20.0, MemUsed: 5.0, MaxMem: 8.0},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
assert.Equal(t, 95.0, result.MaxCpu)
|
||||
assert.Equal(t, 15.0, result.MaxMem)
|
||||
}
|
||||
|
||||
// Tests that records without temperature data are excluded from the temperature average.
|
||||
func TestAverageSystemStatsSlice_Temperatures(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{
|
||||
Cpu: 10.0,
|
||||
Temperatures: map[string]float64{"cpu": 60.0, "gpu": 70.0},
|
||||
},
|
||||
{
|
||||
Cpu: 20.0,
|
||||
Temperatures: map[string]float64{"cpu": 80.0, "gpu": 90.0},
|
||||
},
|
||||
{
|
||||
// No temperatures - should not affect temp averaging
|
||||
Cpu: 30.0,
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
require.NotNil(t, result.Temperatures)
|
||||
// Average over 2 records that had temps, not 3
|
||||
assert.Equal(t, 70.0, result.Temperatures["cpu"])
|
||||
assert.Equal(t, 80.0, result.Temperatures["gpu"])
|
||||
}
|
||||
|
||||
func TestAverageSystemStatsSlice_NetworkInterfaces(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{
|
||||
Cpu: 10.0,
|
||||
NetworkInterfaces: map[string][4]uint64{
|
||||
"eth0": {100, 200, 150, 250},
|
||||
"eth1": {50, 60, 70, 80},
|
||||
},
|
||||
},
|
||||
{
|
||||
Cpu: 20.0,
|
||||
NetworkInterfaces: map[string][4]uint64{
|
||||
"eth0": {200, 400, 300, 500},
|
||||
"eth1": {150, 160, 170, 180},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
require.NotNil(t, result.NetworkInterfaces)
|
||||
// [0] and [1] are averaged, [2] and [3] are max
|
||||
assert.Equal(t, [4]uint64{150, 300, 300, 500}, result.NetworkInterfaces["eth0"])
|
||||
assert.Equal(t, [4]uint64{100, 110, 170, 180}, result.NetworkInterfaces["eth1"])
|
||||
}
|
||||
|
||||
func TestAverageSystemStatsSlice_ExtraFs(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{
|
||||
Cpu: 10.0,
|
||||
ExtraFs: map[string]*system.FsStats{
|
||||
"/data": {
|
||||
DiskTotal: 1000.0,
|
||||
DiskUsed: 400.0,
|
||||
DiskReadPs: 50.0,
|
||||
DiskWritePs: 100.0,
|
||||
MaxDiskReadPS: 60.0,
|
||||
MaxDiskWritePS: 110.0,
|
||||
DiskReadBytes: 5000,
|
||||
DiskWriteBytes: 10000,
|
||||
MaxDiskReadBytes: 6000,
|
||||
MaxDiskWriteBytes: 11000,
|
||||
DiskIoStats: [6]float64{10.0, 20.0, 30.0, 5.0, 8.0, 12.0},
|
||||
MaxDiskIoStats: [6]float64{12.0, 22.0, 32.0, 6.0, 9.0, 13.0},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Cpu: 20.0,
|
||||
ExtraFs: map[string]*system.FsStats{
|
||||
"/data": {
|
||||
DiskTotal: 1000.0,
|
||||
DiskUsed: 600.0,
|
||||
DiskReadPs: 150.0,
|
||||
DiskWritePs: 200.0,
|
||||
MaxDiskReadPS: 160.0,
|
||||
MaxDiskWritePS: 210.0,
|
||||
DiskReadBytes: 15000,
|
||||
DiskWriteBytes: 20000,
|
||||
MaxDiskReadBytes: 16000,
|
||||
MaxDiskWriteBytes: 21000,
|
||||
DiskIoStats: [6]float64{50.0, 60.0, 70.0, 15.0, 18.0, 22.0},
|
||||
MaxDiskIoStats: [6]float64{55.0, 65.0, 75.0, 16.0, 19.0, 23.0},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
require.NotNil(t, result.ExtraFs)
|
||||
require.NotNil(t, result.ExtraFs["/data"])
|
||||
fs := result.ExtraFs["/data"]
|
||||
assert.Equal(t, 1000.0, fs.DiskTotal)
|
||||
assert.Equal(t, 500.0, fs.DiskUsed)
|
||||
assert.Equal(t, 100.0, fs.DiskReadPs)
|
||||
assert.Equal(t, 150.0, fs.DiskWritePs)
|
||||
assert.Equal(t, 160.0, fs.MaxDiskReadPS)
|
||||
assert.Equal(t, 210.0, fs.MaxDiskWritePS)
|
||||
assert.Equal(t, uint64(10000), fs.DiskReadBytes)
|
||||
assert.Equal(t, uint64(15000), fs.DiskWriteBytes)
|
||||
assert.Equal(t, uint64(16000), fs.MaxDiskReadBytes)
|
||||
assert.Equal(t, uint64(21000), fs.MaxDiskWriteBytes)
|
||||
assert.Equal(t, [6]float64{30.0, 40.0, 50.0, 10.0, 13.0, 17.0}, fs.DiskIoStats)
|
||||
assert.Equal(t, [6]float64{55.0, 65.0, 75.0, 16.0, 19.0, 23.0}, fs.MaxDiskIoStats)
|
||||
}
|
||||
|
||||
// Tests that ExtraFs DiskIoStats peak considers current values, not just previous peaks.
|
||||
func TestAverageSystemStatsSlice_ExtraFsDiskIoStatsPeakFromCurrentValues(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{
|
||||
Cpu: 10.0,
|
||||
ExtraFs: map[string]*system.FsStats{
|
||||
"/data": {
|
||||
DiskIoStats: [6]float64{95.0, 90.0, 85.0, 50.0, 60.0, 80.0}, // exceeds MaxDiskIoStats
|
||||
MaxDiskIoStats: [6]float64{80.0, 80.0, 80.0, 40.0, 50.0, 70.0},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Cpu: 20.0,
|
||||
ExtraFs: map[string]*system.FsStats{
|
||||
"/data": {
|
||||
DiskIoStats: [6]float64{10.0, 10.0, 10.0, 5.0, 6.0, 8.0},
|
||||
MaxDiskIoStats: [6]float64{20.0, 20.0, 20.0, 10.0, 12.0, 16.0},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
fs := result.ExtraFs["/data"]
|
||||
assert.Equal(t, 95.0, fs.MaxDiskIoStats[0])
|
||||
assert.Equal(t, 90.0, fs.MaxDiskIoStats[1])
|
||||
assert.Equal(t, 85.0, fs.MaxDiskIoStats[2])
|
||||
assert.Equal(t, 50.0, fs.MaxDiskIoStats[3])
|
||||
assert.Equal(t, 60.0, fs.MaxDiskIoStats[4])
|
||||
assert.Equal(t, 80.0, fs.MaxDiskIoStats[5])
|
||||
}
|
||||
|
||||
// Tests that extra FS peak values consider current values, not just previous peaks.
|
||||
func TestAverageSystemStatsSlice_ExtraFsPeaksFromCurrentValues(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{
|
||||
Cpu: 10.0,
|
||||
ExtraFs: map[string]*system.FsStats{
|
||||
"/data": {
|
||||
DiskReadPs: 500.0, // exceeds MaxDiskReadPS
|
||||
MaxDiskReadPS: 100.0,
|
||||
DiskReadBytes: 50000,
|
||||
MaxDiskReadBytes: 10000,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Cpu: 20.0,
|
||||
ExtraFs: map[string]*system.FsStats{
|
||||
"/data": {
|
||||
DiskReadPs: 50.0,
|
||||
MaxDiskReadPS: 200.0,
|
||||
DiskReadBytes: 5000,
|
||||
MaxDiskReadBytes: 20000,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
fs := result.ExtraFs["/data"]
|
||||
assert.Equal(t, 500.0, fs.MaxDiskReadPS)
|
||||
assert.Equal(t, uint64(50000), fs.MaxDiskReadBytes)
|
||||
}
|
||||
|
||||
func TestAverageSystemStatsSlice_GPUData(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{
|
||||
Cpu: 10.0,
|
||||
GPUData: map[string]system.GPUData{
|
||||
"gpu0": {
|
||||
Name: "RTX 4090",
|
||||
Temperature: 60.0,
|
||||
MemoryUsed: 4.0,
|
||||
MemoryTotal: 24.0,
|
||||
Usage: 30.0,
|
||||
Power: 200.0,
|
||||
Count: 1.0,
|
||||
Engines: map[string]float64{
|
||||
"3D": 50.0,
|
||||
"Video": 10.0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Cpu: 20.0,
|
||||
GPUData: map[string]system.GPUData{
|
||||
"gpu0": {
|
||||
Name: "RTX 4090",
|
||||
Temperature: 80.0,
|
||||
MemoryUsed: 8.0,
|
||||
MemoryTotal: 24.0,
|
||||
Usage: 70.0,
|
||||
Power: 300.0,
|
||||
Count: 1.0,
|
||||
Engines: map[string]float64{
|
||||
"3D": 90.0,
|
||||
"Video": 30.0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
require.NotNil(t, result.GPUData)
|
||||
gpu := result.GPUData["gpu0"]
|
||||
assert.Equal(t, "RTX 4090", gpu.Name)
|
||||
assert.Equal(t, 70.0, gpu.Temperature)
|
||||
assert.Equal(t, 6.0, gpu.MemoryUsed)
|
||||
assert.Equal(t, 24.0, gpu.MemoryTotal)
|
||||
assert.Equal(t, 50.0, gpu.Usage)
|
||||
assert.Equal(t, 250.0, gpu.Power)
|
||||
assert.Equal(t, 1.0, gpu.Count)
|
||||
require.NotNil(t, gpu.Engines)
|
||||
assert.Equal(t, 70.0, gpu.Engines["3D"])
|
||||
assert.Equal(t, 20.0, gpu.Engines["Video"])
|
||||
}
|
||||
|
||||
func TestAverageSystemStatsSlice_MultipleGPUs(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{
|
||||
Cpu: 10.0,
|
||||
GPUData: map[string]system.GPUData{
|
||||
"gpu0": {Name: "GPU A", Usage: 20.0, Temperature: 50.0},
|
||||
"gpu1": {Name: "GPU B", Usage: 60.0, Temperature: 70.0},
|
||||
},
|
||||
},
|
||||
{
|
||||
Cpu: 20.0,
|
||||
GPUData: map[string]system.GPUData{
|
||||
"gpu0": {Name: "GPU A", Usage: 40.0, Temperature: 60.0},
|
||||
"gpu1": {Name: "GPU B", Usage: 80.0, Temperature: 80.0},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
require.NotNil(t, result.GPUData)
|
||||
assert.Equal(t, 30.0, result.GPUData["gpu0"].Usage)
|
||||
assert.Equal(t, 55.0, result.GPUData["gpu0"].Temperature)
|
||||
assert.Equal(t, 70.0, result.GPUData["gpu1"].Usage)
|
||||
assert.Equal(t, 75.0, result.GPUData["gpu1"].Temperature)
|
||||
}
|
||||
|
||||
func TestAverageSystemStatsSlice_CpuCoresUsage(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{Cpu: 10.0, CpuCoresUsage: system.Uint8Slice{10, 20, 30, 40}},
|
||||
{Cpu: 20.0, CpuCoresUsage: system.Uint8Slice{30, 40, 50, 60}},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
require.NotNil(t, result.CpuCoresUsage)
|
||||
assert.Equal(t, system.Uint8Slice{20, 30, 40, 50}, result.CpuCoresUsage)
|
||||
}
|
||||
|
||||
// Tests that per-core usage rounds correctly (e.g., 15.5 -> 16 via math.Round).
|
||||
func TestAverageSystemStatsSlice_CpuCoresUsageRounding(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{Cpu: 10.0, CpuCoresUsage: system.Uint8Slice{11}},
|
||||
{Cpu: 20.0, CpuCoresUsage: system.Uint8Slice{20}},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
require.NotNil(t, result.CpuCoresUsage)
|
||||
// (11+20)/2 = 15.5, rounds to 16
|
||||
assert.Equal(t, uint8(16), result.CpuCoresUsage[0])
|
||||
}
|
||||
|
||||
func TestAverageSystemStatsSlice_CpuBreakdown(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{Cpu: 10.0, CpuBreakdown: []float64{5.0, 3.0, 1.0, 0.5, 90.5}},
|
||||
{Cpu: 20.0, CpuBreakdown: []float64{15.0, 7.0, 3.0, 1.5, 73.5}},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
require.NotNil(t, result.CpuBreakdown)
|
||||
assert.Equal(t, []float64{10.0, 5.0, 2.0, 1.0, 82.0}, result.CpuBreakdown)
|
||||
}
|
||||
|
||||
// Tests that Battery[1] (charge state) uses the last record's value.
|
||||
func TestAverageSystemStatsSlice_BatteryLastChargeState(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{Cpu: 10.0, Battery: [2]uint8{100, 1}}, // charging
|
||||
{Cpu: 20.0, Battery: [2]uint8{90, 0}}, // not charging
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
assert.Equal(t, uint8(95), result.Battery[0])
|
||||
assert.Equal(t, uint8(0), result.Battery[1]) // last record's charge state
|
||||
}
|
||||
|
||||
func TestAverageSystemStatsSlice_ThreeRecordsRounding(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{Cpu: 10.0, Mem: 8.0},
|
||||
{Cpu: 20.0, Mem: 8.0},
|
||||
{Cpu: 30.0, Mem: 8.0},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
assert.Equal(t, 20.0, result.Cpu)
|
||||
assert.Equal(t, 8.0, result.Mem)
|
||||
}
|
||||
|
||||
// Tests records where some have optional fields and others don't.
|
||||
func TestAverageSystemStatsSlice_MixedOptionalFields(t *testing.T) {
|
||||
input := []system.Stats{
|
||||
{
|
||||
Cpu: 10.0,
|
||||
CpuCoresUsage: system.Uint8Slice{50, 60},
|
||||
CpuBreakdown: []float64{5.0, 3.0, 1.0, 0.5, 90.5},
|
||||
GPUData: map[string]system.GPUData{
|
||||
"gpu0": {Name: "GPU", Usage: 40.0},
|
||||
},
|
||||
},
|
||||
{
|
||||
Cpu: 20.0,
|
||||
// No CpuCoresUsage, CpuBreakdown, or GPUData
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
assert.Equal(t, 15.0, result.Cpu)
|
||||
// CpuCoresUsage: only 1 record had it, so sum/2
|
||||
require.NotNil(t, result.CpuCoresUsage)
|
||||
assert.Equal(t, uint8(25), result.CpuCoresUsage[0])
|
||||
assert.Equal(t, uint8(30), result.CpuCoresUsage[1])
|
||||
// CpuBreakdown: only 1 record had it, so sum/2
|
||||
require.NotNil(t, result.CpuBreakdown)
|
||||
assert.Equal(t, 2.5, result.CpuBreakdown[0])
|
||||
// GPUData: only 1 record had it, so sum/2
|
||||
require.NotNil(t, result.GPUData)
|
||||
assert.Equal(t, 20.0, result.GPUData["gpu0"].Usage)
|
||||
}
|
||||
|
||||
// Tests with 10 records matching the common real-world case (10 x 1m -> 1 x 10m).
|
||||
func TestAverageSystemStatsSlice_TenRecords(t *testing.T) {
|
||||
input := make([]system.Stats, 10)
|
||||
for i := range input {
|
||||
input[i] = system.Stats{
|
||||
Cpu: float64(i * 10), // 0, 10, 20, ..., 90
|
||||
Mem: 16.0,
|
||||
MemUsed: float64(4 + i), // 4, 5, 6, ..., 13
|
||||
MemPct: float64(25 + i), // 25, 26, ..., 34
|
||||
DiskTotal: 500.0,
|
||||
DiskUsed: 250.0,
|
||||
DiskPct: 50.0,
|
||||
NetworkSent: float64(i),
|
||||
NetworkRecv: float64(i * 2),
|
||||
Bandwidth: [2]uint64{uint64(i * 1000), uint64(i * 2000)},
|
||||
LoadAvg: [3]float64{float64(i), float64(i) * 0.5, float64(i) * 0.25},
|
||||
}
|
||||
}
|
||||
|
||||
result := records.AverageSystemStatsSlice(input)
|
||||
|
||||
assert.Equal(t, 45.0, result.Cpu) // avg of 0..90
|
||||
assert.Equal(t, 16.0, result.Mem) // constant
|
||||
assert.Equal(t, 8.5, result.MemUsed) // avg of 4..13
|
||||
assert.Equal(t, 29.5, result.MemPct) // avg of 25..34
|
||||
assert.Equal(t, 500.0, result.DiskTotal)
|
||||
assert.Equal(t, 250.0, result.DiskUsed)
|
||||
assert.Equal(t, 50.0, result.DiskPct)
|
||||
assert.Equal(t, 4.5, result.NetworkSent)
|
||||
assert.Equal(t, 9.0, result.NetworkRecv)
|
||||
assert.Equal(t, [2]uint64{4500, 9000}, result.Bandwidth)
|
||||
}
|
||||
|
||||
// --- Container Stats Tests ---
|
||||
|
||||
func TestAverageContainerStatsSlice_Empty(t *testing.T) {
|
||||
result := records.AverageContainerStatsSlice(nil)
|
||||
assert.Equal(t, []container.Stats{}, result)
|
||||
|
||||
result = records.AverageContainerStatsSlice([][]container.Stats{})
|
||||
assert.Equal(t, []container.Stats{}, result)
|
||||
}
|
||||
|
||||
func TestAverageContainerStatsSlice_SingleRecord(t *testing.T) {
|
||||
input := [][]container.Stats{
|
||||
{
|
||||
{Name: "nginx", Cpu: 5.0, Mem: 128.0, Bandwidth: [2]uint64{1000, 2000}},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageContainerStatsSlice(input)
|
||||
|
||||
require.Len(t, result, 1)
|
||||
assert.Equal(t, "nginx", result[0].Name)
|
||||
assert.Equal(t, 5.0, result[0].Cpu)
|
||||
assert.Equal(t, 128.0, result[0].Mem)
|
||||
assert.Equal(t, [2]uint64{1000, 2000}, result[0].Bandwidth)
|
||||
}
|
||||
|
||||
func TestAverageContainerStatsSlice_BasicAveraging(t *testing.T) {
|
||||
input := [][]container.Stats{
|
||||
{
|
||||
{Name: "nginx", Cpu: 10.0, Mem: 100.0, Bandwidth: [2]uint64{1000, 2000}},
|
||||
{Name: "redis", Cpu: 5.0, Mem: 64.0, Bandwidth: [2]uint64{500, 1000}},
|
||||
},
|
||||
{
|
||||
{Name: "nginx", Cpu: 20.0, Mem: 200.0, Bandwidth: [2]uint64{3000, 4000}},
|
||||
{Name: "redis", Cpu: 15.0, Mem: 128.0, Bandwidth: [2]uint64{1500, 2000}},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageContainerStatsSlice(input)
|
||||
sort.Slice(result, func(i, j int) bool { return result[i].Name < result[j].Name })
|
||||
|
||||
require.Len(t, result, 2)
|
||||
|
||||
assert.Equal(t, "nginx", result[0].Name)
|
||||
assert.Equal(t, 15.0, result[0].Cpu)
|
||||
assert.Equal(t, 150.0, result[0].Mem)
|
||||
assert.Equal(t, [2]uint64{2000, 3000}, result[0].Bandwidth)
|
||||
|
||||
assert.Equal(t, "redis", result[1].Name)
|
||||
assert.Equal(t, 10.0, result[1].Cpu)
|
||||
assert.Equal(t, 96.0, result[1].Mem)
|
||||
assert.Equal(t, [2]uint64{1000, 1500}, result[1].Bandwidth)
|
||||
}
|
||||
|
||||
// Tests containers that appear in some records but not all.
|
||||
func TestAverageContainerStatsSlice_ContainerAppearsInSomeRecords(t *testing.T) {
|
||||
input := [][]container.Stats{
|
||||
{
|
||||
{Name: "nginx", Cpu: 10.0, Mem: 100.0},
|
||||
{Name: "redis", Cpu: 5.0, Mem: 64.0},
|
||||
},
|
||||
{
|
||||
{Name: "nginx", Cpu: 20.0, Mem: 200.0},
|
||||
// redis not present
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageContainerStatsSlice(input)
|
||||
sort.Slice(result, func(i, j int) bool { return result[i].Name < result[j].Name })
|
||||
|
||||
require.Len(t, result, 2)
|
||||
|
||||
assert.Equal(t, "nginx", result[0].Name)
|
||||
assert.Equal(t, 15.0, result[0].Cpu)
|
||||
assert.Equal(t, 150.0, result[0].Mem)
|
||||
|
||||
// redis: sum / count where count = total records (2), not records containing redis
|
||||
assert.Equal(t, "redis", result[1].Name)
|
||||
assert.Equal(t, 2.5, result[1].Cpu)
|
||||
assert.Equal(t, 32.0, result[1].Mem)
|
||||
}
|
||||
|
||||
// Tests backward compatibility with deprecated NetworkSent/NetworkRecv (MB) when Bandwidth is zero.
|
||||
func TestAverageContainerStatsSlice_DeprecatedNetworkFields(t *testing.T) {
|
||||
input := [][]container.Stats{
|
||||
{
|
||||
{Name: "nginx", Cpu: 10.0, Mem: 100.0, NetworkSent: 1.0, NetworkRecv: 2.0}, // 1 MB, 2 MB
|
||||
},
|
||||
{
|
||||
{Name: "nginx", Cpu: 20.0, Mem: 200.0, NetworkSent: 3.0, NetworkRecv: 4.0}, // 3 MB, 4 MB
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageContainerStatsSlice(input)
|
||||
|
||||
require.Len(t, result, 1)
|
||||
assert.Equal(t, "nginx", result[0].Name)
|
||||
// avg sent = (1*1048576 + 3*1048576) / 2 = 2*1048576
|
||||
assert.Equal(t, uint64(2*1048576), result[0].Bandwidth[0])
|
||||
// avg recv = (2*1048576 + 4*1048576) / 2 = 3*1048576
|
||||
assert.Equal(t, uint64(3*1048576), result[0].Bandwidth[1])
|
||||
}
|
||||
|
||||
// Tests that when Bandwidth is set, deprecated NetworkSent/NetworkRecv are ignored.
|
||||
func TestAverageContainerStatsSlice_MixedBandwidthAndDeprecated(t *testing.T) {
|
||||
input := [][]container.Stats{
|
||||
{
|
||||
{Name: "nginx", Cpu: 10.0, Mem: 100.0, Bandwidth: [2]uint64{5000, 6000}, NetworkSent: 99.0, NetworkRecv: 99.0},
|
||||
},
|
||||
{
|
||||
{Name: "nginx", Cpu: 20.0, Mem: 200.0, Bandwidth: [2]uint64{7000, 8000}},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageContainerStatsSlice(input)
|
||||
|
||||
require.Len(t, result, 1)
|
||||
assert.Equal(t, uint64(6000), result[0].Bandwidth[0])
|
||||
assert.Equal(t, uint64(7000), result[0].Bandwidth[1])
|
||||
}
|
||||
|
||||
func TestAverageContainerStatsSlice_ThreeRecords(t *testing.T) {
|
||||
input := [][]container.Stats{
|
||||
{{Name: "app", Cpu: 1.0, Mem: 100.0}},
|
||||
{{Name: "app", Cpu: 2.0, Mem: 200.0}},
|
||||
{{Name: "app", Cpu: 3.0, Mem: 300.0}},
|
||||
}
|
||||
|
||||
result := records.AverageContainerStatsSlice(input)
|
||||
|
||||
require.Len(t, result, 1)
|
||||
assert.Equal(t, 2.0, result[0].Cpu)
|
||||
assert.Equal(t, 200.0, result[0].Mem)
|
||||
}
|
||||
|
||||
func TestAverageContainerStatsSlice_ManyContainers(t *testing.T) {
|
||||
input := [][]container.Stats{
|
||||
{
|
||||
{Name: "a", Cpu: 10.0, Mem: 100.0},
|
||||
{Name: "b", Cpu: 20.0, Mem: 200.0},
|
||||
{Name: "c", Cpu: 30.0, Mem: 300.0},
|
||||
{Name: "d", Cpu: 40.0, Mem: 400.0},
|
||||
},
|
||||
{
|
||||
{Name: "a", Cpu: 20.0, Mem: 200.0},
|
||||
{Name: "b", Cpu: 30.0, Mem: 300.0},
|
||||
{Name: "c", Cpu: 40.0, Mem: 400.0},
|
||||
{Name: "d", Cpu: 50.0, Mem: 500.0},
|
||||
},
|
||||
}
|
||||
|
||||
result := records.AverageContainerStatsSlice(input)
|
||||
sort.Slice(result, func(i, j int) bool { return result[i].Name < result[j].Name })
|
||||
|
||||
require.Len(t, result, 4)
|
||||
assert.Equal(t, 15.0, result[0].Cpu)
|
||||
assert.Equal(t, 25.0, result[1].Cpu)
|
||||
assert.Equal(t, 35.0, result[2].Cpu)
|
||||
assert.Equal(t, 45.0, result[3].Cpu)
|
||||
}
|
||||
130
internal/records/records_deletion.go
Normal file
130
internal/records/records_deletion.go
Normal file
@@ -0,0 +1,130 @@
|
||||
package records
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"github.com/pocketbase/dbx"
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
)
|
||||
|
||||
// Delete old records
|
||||
func (rm *RecordManager) DeleteOldRecords() {
|
||||
rm.app.RunInTransaction(func(txApp core.App) error {
|
||||
err := deleteOldSystemStats(txApp)
|
||||
if err != nil {
|
||||
slog.Error("Error deleting old system stats", "err", err)
|
||||
}
|
||||
err = deleteOldContainerRecords(txApp)
|
||||
if err != nil {
|
||||
slog.Error("Error deleting old container records", "err", err)
|
||||
}
|
||||
err = deleteOldSystemdServiceRecords(txApp)
|
||||
if err != nil {
|
||||
slog.Error("Error deleting old systemd service records", "err", err)
|
||||
}
|
||||
err = deleteOldAlertsHistory(txApp, 200, 250)
|
||||
if err != nil {
|
||||
slog.Error("Error deleting old alerts history", "err", err)
|
||||
}
|
||||
err = deleteOldQuietHours(txApp)
|
||||
if err != nil {
|
||||
slog.Error("Error deleting old quiet hours", "err", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Delete old alerts history records
|
||||
func deleteOldAlertsHistory(app core.App, countToKeep, countBeforeDeletion int) error {
|
||||
db := app.DB()
|
||||
var users []struct {
|
||||
Id string `db:"user"`
|
||||
}
|
||||
err := db.NewQuery("SELECT user, COUNT(*) as count FROM alerts_history GROUP BY user HAVING count > {:countBeforeDeletion}").Bind(dbx.Params{"countBeforeDeletion": countBeforeDeletion}).All(&users)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, user := range users {
|
||||
_, err = db.NewQuery("DELETE FROM alerts_history WHERE user = {:user} AND id NOT IN (SELECT id FROM alerts_history WHERE user = {:user} ORDER BY created DESC LIMIT {:countToKeep})").Bind(dbx.Params{"user": user.Id, "countToKeep": countToKeep}).Execute()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deletes system_stats records older than what is displayed in the UI
|
||||
func deleteOldSystemStats(app core.App) error {
|
||||
// Collections to process
|
||||
collections := [3]string{"system_stats", "container_stats", "network_probe_stats"}
|
||||
|
||||
// Record types and their retention periods
|
||||
type RecordDeletionData struct {
|
||||
recordType string
|
||||
retention time.Duration
|
||||
}
|
||||
recordData := []RecordDeletionData{
|
||||
{recordType: "1m", retention: time.Hour}, // 1 hour
|
||||
{recordType: "10m", retention: 12 * time.Hour}, // 12 hours
|
||||
{recordType: "20m", retention: 24 * time.Hour}, // 1 day
|
||||
{recordType: "120m", retention: 7 * 24 * time.Hour}, // 7 days
|
||||
{recordType: "480m", retention: 30 * 24 * time.Hour}, // 30 days
|
||||
}
|
||||
|
||||
now := time.Now().UTC()
|
||||
db := app.DB()
|
||||
|
||||
for _, collection := range collections {
|
||||
query := db.Delete(collection, dbx.NewExp("type={:type} AND created<{:created}"))
|
||||
for _, rd := range recordData {
|
||||
if _, err := query.Bind(dbx.Params{
|
||||
"type": rd.recordType,
|
||||
"created": getCreatedTimeField(collection, now.Add(-rd.retention)),
|
||||
}).Execute(); err != nil {
|
||||
return fmt.Errorf("failed to delete from %s: %v", collection, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deletes systemd service records that haven't been updated in the last 20 minutes
|
||||
func deleteOldSystemdServiceRecords(app core.App) error {
|
||||
now := time.Now().UTC()
|
||||
twentyMinutesAgo := now.Add(-20 * time.Minute)
|
||||
|
||||
// Delete systemd service records where updated < twentyMinutesAgo
|
||||
_, err := app.DB().NewQuery("DELETE FROM systemd_services WHERE updated < {:updated}").Bind(dbx.Params{"updated": twentyMinutesAgo.UnixMilli()}).Execute()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete old systemd service records: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deletes container records that haven't been updated in the last 10 minutes
|
||||
func deleteOldContainerRecords(app core.App) error {
|
||||
now := time.Now().UTC()
|
||||
tenMinutesAgo := now.Add(-10 * time.Minute)
|
||||
|
||||
// Delete container records where updated < tenMinutesAgo
|
||||
_, err := app.DB().NewQuery("DELETE FROM containers WHERE updated < {:updated}").Bind(dbx.Params{"updated": tenMinutesAgo.UnixMilli()}).Execute()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete old container records: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deletes old quiet hours records where end date has passed
|
||||
func deleteOldQuietHours(app core.App) error {
|
||||
now := time.Now().UTC()
|
||||
_, err := app.DB().NewQuery("DELETE FROM quiet_hours WHERE type = 'one-time' AND end < {:now}").Bind(dbx.Params{"now": now}).Execute()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
428
internal/records/records_deletion_test.go
Normal file
428
internal/records/records_deletion_test.go
Normal file
@@ -0,0 +1,428 @@
|
||||
//go:build testing
|
||||
|
||||
package records_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/internal/records"
|
||||
"github.com/henrygd/beszel/internal/tests"
|
||||
|
||||
"github.com/pocketbase/dbx"
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
"github.com/pocketbase/pocketbase/tools/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestDeleteOldRecords tests the main DeleteOldRecords function
|
||||
func TestDeleteOldRecords(t *testing.T) {
|
||||
hub, err := tests.NewTestHub(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
defer hub.Cleanup()
|
||||
|
||||
rm := records.NewRecordManager(hub)
|
||||
|
||||
// Create test user for alerts history
|
||||
user, err := tests.CreateUser(hub, "test@example.com", "testtesttest")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create test system
|
||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "test-system",
|
||||
"host": "localhost",
|
||||
"port": "45876",
|
||||
"status": "up",
|
||||
"users": []string{user.Id},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
now := time.Now()
|
||||
|
||||
// Create old system_stats records that should be deleted
|
||||
var record *core.Record
|
||||
record, err = tests.CreateRecord(hub, "system_stats", map[string]any{
|
||||
"system": system.Id,
|
||||
"type": "1m",
|
||||
"stats": `{"cpu": 50.0, "mem": 1024}`,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
// created is autodate field, so we need to set it manually
|
||||
record.SetRaw("created", now.UTC().Add(-2*time.Hour).Format(types.DefaultDateLayout))
|
||||
err = hub.SaveNoValidate(record)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, record)
|
||||
require.InDelta(t, record.GetDateTime("created").Time().UTC().Unix(), now.UTC().Add(-2*time.Hour).Unix(), 1)
|
||||
require.Equal(t, record.Get("system"), system.Id)
|
||||
require.Equal(t, record.Get("type"), "1m")
|
||||
|
||||
// Create recent system_stats record that should be kept
|
||||
_, err = tests.CreateRecord(hub, "system_stats", map[string]any{
|
||||
"system": system.Id,
|
||||
"type": "1m",
|
||||
"stats": `{"cpu": 30.0, "mem": 512}`,
|
||||
"created": now.Add(-30 * time.Minute), // 30 minutes old, should be kept
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create many alerts history records to trigger deletion
|
||||
for i := range 260 { // More than countBeforeDeletion (250)
|
||||
_, err = tests.CreateRecord(hub, "alerts_history", map[string]any{
|
||||
"user": user.Id,
|
||||
"name": "CPU",
|
||||
"value": i + 1,
|
||||
"system": system.Id,
|
||||
"created": now.Add(-time.Duration(i) * time.Minute),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Count records before deletion
|
||||
systemStatsCountBefore, err := hub.CountRecords("system_stats")
|
||||
require.NoError(t, err)
|
||||
alertsCountBefore, err := hub.CountRecords("alerts_history")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Run deletion
|
||||
rm.DeleteOldRecords()
|
||||
|
||||
// Count records after deletion
|
||||
systemStatsCountAfter, err := hub.CountRecords("system_stats")
|
||||
require.NoError(t, err)
|
||||
alertsCountAfter, err := hub.CountRecords("alerts_history")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify old system stats were deleted
|
||||
assert.Less(t, systemStatsCountAfter, systemStatsCountBefore, "Old system stats should be deleted")
|
||||
|
||||
// Verify alerts history was trimmed
|
||||
assert.Less(t, alertsCountAfter, alertsCountBefore, "Excessive alerts history should be deleted")
|
||||
assert.Equal(t, alertsCountAfter, int64(200), "Alerts count should be equal to countToKeep (200)")
|
||||
}
|
||||
|
||||
// TestDeleteOldSystemStats tests the deleteOldSystemStats function
|
||||
func TestDeleteOldSystemStats(t *testing.T) {
|
||||
hub, err := tests.NewTestHub(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
defer hub.Cleanup()
|
||||
|
||||
// Create test system
|
||||
user, err := tests.CreateUser(hub, "test@example.com", "testtesttest")
|
||||
require.NoError(t, err)
|
||||
|
||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "test-system",
|
||||
"host": "localhost",
|
||||
"port": "45876",
|
||||
"status": "up",
|
||||
"users": []string{user.Id},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
now := time.Now().UTC()
|
||||
|
||||
// Test data for different record types and their retention periods
|
||||
testCases := []struct {
|
||||
recordType string
|
||||
retention time.Duration
|
||||
shouldBeKept bool
|
||||
ageFromNow time.Duration
|
||||
description string
|
||||
}{
|
||||
{"1m", time.Hour, true, 30 * time.Minute, "1m record within 1 hour should be kept"},
|
||||
{"1m", time.Hour, false, 2 * time.Hour, "1m record older than 1 hour should be deleted"},
|
||||
{"10m", 12 * time.Hour, true, 6 * time.Hour, "10m record within 12 hours should be kept"},
|
||||
{"10m", 12 * time.Hour, false, 24 * time.Hour, "10m record older than 12 hours should be deleted"},
|
||||
{"20m", 24 * time.Hour, true, 12 * time.Hour, "20m record within 24 hours should be kept"},
|
||||
{"20m", 24 * time.Hour, false, 48 * time.Hour, "20m record older than 24 hours should be deleted"},
|
||||
{"120m", 7 * 24 * time.Hour, true, 3 * 24 * time.Hour, "120m record within 7 days should be kept"},
|
||||
{"120m", 7 * 24 * time.Hour, false, 10 * 24 * time.Hour, "120m record older than 7 days should be deleted"},
|
||||
{"480m", 30 * 24 * time.Hour, true, 15 * 24 * time.Hour, "480m record within 30 days should be kept"},
|
||||
{"480m", 30 * 24 * time.Hour, false, 45 * 24 * time.Hour, "480m record older than 30 days should be deleted"},
|
||||
}
|
||||
|
||||
// Create test records for both system_stats and container_stats
|
||||
collections := []string{"system_stats", "container_stats"}
|
||||
recordIds := make(map[string][]string)
|
||||
|
||||
for _, collection := range collections {
|
||||
recordIds[collection] = make([]string, 0)
|
||||
|
||||
for i, tc := range testCases {
|
||||
recordTime := now.Add(-tc.ageFromNow)
|
||||
|
||||
var stats string
|
||||
if collection == "system_stats" {
|
||||
stats = fmt.Sprintf(`{"cpu": %d.0, "mem": %d}`, i*10, i*100)
|
||||
} else {
|
||||
stats = fmt.Sprintf(`[{"name": "container%d", "cpu": %d.0, "mem": %d}]`, i, i*5, i*50)
|
||||
}
|
||||
|
||||
record, err := tests.CreateRecord(hub, collection, map[string]any{
|
||||
"system": system.Id,
|
||||
"type": tc.recordType,
|
||||
"stats": stats,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
record.SetRaw("created", recordTime.Format(types.DefaultDateLayout))
|
||||
err = hub.SaveNoValidate(record)
|
||||
require.NoError(t, err)
|
||||
recordIds[collection] = append(recordIds[collection], record.Id)
|
||||
}
|
||||
}
|
||||
|
||||
// Run deletion
|
||||
err = records.DeleteOldSystemStats(hub)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify results
|
||||
for _, collection := range collections {
|
||||
for i, tc := range testCases {
|
||||
recordId := recordIds[collection][i]
|
||||
|
||||
// Try to find the record
|
||||
_, err := hub.FindRecordById(collection, recordId)
|
||||
|
||||
if tc.shouldBeKept {
|
||||
assert.NoError(t, err, "Record should exist: %s", tc.description)
|
||||
} else {
|
||||
assert.Error(t, err, "Record should be deleted: %s", tc.description)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestDeleteOldAlertsHistory tests the deleteOldAlertsHistory function
|
||||
func TestDeleteOldAlertsHistory(t *testing.T) {
|
||||
hub, err := tests.NewTestHub(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
defer hub.Cleanup()
|
||||
|
||||
// Create test users
|
||||
user1, err := tests.CreateUser(hub, "user1@example.com", "testtesttest")
|
||||
require.NoError(t, err)
|
||||
|
||||
user2, err := tests.CreateUser(hub, "user2@example.com", "testtesttest")
|
||||
require.NoError(t, err)
|
||||
|
||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "test-system",
|
||||
"host": "localhost",
|
||||
"port": "45876",
|
||||
"status": "up",
|
||||
"users": []string{user1.Id, user2.Id},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
now := time.Now().UTC()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
user *core.Record
|
||||
alertCount int
|
||||
countToKeep int
|
||||
countBeforeDeletion int
|
||||
expectedAfterDeletion int
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "User with few alerts (below threshold)",
|
||||
user: user1,
|
||||
alertCount: 100,
|
||||
countToKeep: 50,
|
||||
countBeforeDeletion: 150,
|
||||
expectedAfterDeletion: 100, // No deletion because below threshold
|
||||
description: "User with alerts below countBeforeDeletion should not have any deleted",
|
||||
},
|
||||
{
|
||||
name: "User with many alerts (above threshold)",
|
||||
user: user2,
|
||||
alertCount: 300,
|
||||
countToKeep: 100,
|
||||
countBeforeDeletion: 200,
|
||||
expectedAfterDeletion: 100, // Should be trimmed to countToKeep
|
||||
description: "User with alerts above countBeforeDeletion should be trimmed to countToKeep",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Create alerts for this user
|
||||
for i := 0; i < tc.alertCount; i++ {
|
||||
_, err := tests.CreateRecord(hub, "alerts_history", map[string]any{
|
||||
"user": tc.user.Id,
|
||||
"name": "CPU",
|
||||
"value": i + 1,
|
||||
"system": system.Id,
|
||||
"created": now.Add(-time.Duration(i) * time.Minute),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Count before deletion
|
||||
countBefore, err := hub.CountRecords("alerts_history",
|
||||
dbx.NewExp("user = {:user}", dbx.Params{"user": tc.user.Id}))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(tc.alertCount), countBefore, "Initial count should match")
|
||||
|
||||
// Run deletion
|
||||
err = records.DeleteOldAlertsHistory(hub, tc.countToKeep, tc.countBeforeDeletion)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Count after deletion
|
||||
countAfter, err := hub.CountRecords("alerts_history",
|
||||
dbx.NewExp("user = {:user}", dbx.Params{"user": tc.user.Id}))
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, int64(tc.expectedAfterDeletion), countAfter, tc.description)
|
||||
|
||||
// If deletion occurred, verify the most recent records were kept
|
||||
if tc.expectedAfterDeletion < tc.alertCount {
|
||||
records, err := hub.FindRecordsByFilter("alerts_history",
|
||||
"user = {:user}",
|
||||
"-created", // Order by created DESC
|
||||
tc.countToKeep,
|
||||
0,
|
||||
map[string]any{"user": tc.user.Id})
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, records, tc.expectedAfterDeletion, "Should have exactly countToKeep records")
|
||||
|
||||
// Verify records are in descending order by created time
|
||||
for i := 1; i < len(records); i++ {
|
||||
prev := records[i-1].GetDateTime("created").Time()
|
||||
curr := records[i].GetDateTime("created").Time()
|
||||
assert.True(t, prev.After(curr) || prev.Equal(curr),
|
||||
"Records should be ordered by created time (newest first)")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestDeleteOldAlertsHistoryEdgeCases tests edge cases for alerts history deletion
|
||||
func TestDeleteOldAlertsHistoryEdgeCases(t *testing.T) {
|
||||
hub, err := tests.NewTestHub(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
defer hub.Cleanup()
|
||||
|
||||
t.Run("No users with excessive alerts", func(t *testing.T) {
|
||||
// Create user with few alerts
|
||||
user, err := tests.CreateUser(hub, "few@example.com", "testtesttest")
|
||||
require.NoError(t, err)
|
||||
|
||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "test-system",
|
||||
"host": "localhost",
|
||||
"port": "45876",
|
||||
"status": "up",
|
||||
"users": []string{user.Id},
|
||||
})
|
||||
|
||||
// Create only 5 alerts (well below threshold)
|
||||
for i := range 5 {
|
||||
_, err := tests.CreateRecord(hub, "alerts_history", map[string]any{
|
||||
"user": user.Id,
|
||||
"name": "CPU",
|
||||
"value": i + 1,
|
||||
"system": system.Id,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Should not error and should not delete anything
|
||||
err = records.DeleteOldAlertsHistory(hub, 10, 20)
|
||||
require.NoError(t, err)
|
||||
|
||||
count, err := hub.CountRecords("alerts_history")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(5), count, "All alerts should remain")
|
||||
})
|
||||
|
||||
t.Run("Empty alerts_history table", func(t *testing.T) {
|
||||
// Clear any existing alerts
|
||||
_, err := hub.DB().NewQuery("DELETE FROM alerts_history").Execute()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Should not error with empty table
|
||||
err = records.DeleteOldAlertsHistory(hub, 10, 20)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
// TestDeleteOldSystemdServiceRecords tests systemd service cleanup via DeleteOldRecords
|
||||
func TestDeleteOldSystemdServiceRecords(t *testing.T) {
|
||||
hub, err := tests.NewTestHub(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
defer hub.Cleanup()
|
||||
|
||||
rm := records.NewRecordManager(hub)
|
||||
|
||||
// Create test user and system
|
||||
user, err := tests.CreateUser(hub, "test@example.com", "testtesttest")
|
||||
require.NoError(t, err)
|
||||
|
||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "test-system",
|
||||
"host": "localhost",
|
||||
"port": "45876",
|
||||
"status": "up",
|
||||
"users": []string{user.Id},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
now := time.Now().UTC()
|
||||
|
||||
// Create old systemd service records that should be deleted (older than 20 minutes)
|
||||
oldRecord, err := tests.CreateRecord(hub, "systemd_services", map[string]any{
|
||||
"system": system.Id,
|
||||
"name": "nginx.service",
|
||||
"state": 0, // Active
|
||||
"sub": 1, // Running
|
||||
"cpu": 5.0,
|
||||
"cpuPeak": 10.0,
|
||||
"memory": 1024000,
|
||||
"memPeak": 2048000,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
// Set updated time to 25 minutes ago (should be deleted)
|
||||
oldRecord.SetRaw("updated", now.Add(-25*time.Minute).UnixMilli())
|
||||
err = hub.SaveNoValidate(oldRecord)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create recent systemd service record that should be kept (within 20 minutes)
|
||||
recentRecord, err := tests.CreateRecord(hub, "systemd_services", map[string]any{
|
||||
"system": system.Id,
|
||||
"name": "apache.service",
|
||||
"state": 1, // Inactive
|
||||
"sub": 0, // Dead
|
||||
"cpu": 2.0,
|
||||
"cpuPeak": 3.0,
|
||||
"memory": 512000,
|
||||
"memPeak": 1024000,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
// Set updated time to 10 minutes ago (should be kept)
|
||||
recentRecord.SetRaw("updated", now.Add(-10*time.Minute).UnixMilli())
|
||||
err = hub.SaveNoValidate(recentRecord)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Count records before deletion
|
||||
countBefore, err := hub.CountRecords("systemd_services")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(2), countBefore, "Should have 2 systemd service records initially")
|
||||
|
||||
// Run deletion via RecordManager
|
||||
rm.DeleteOldRecords()
|
||||
|
||||
// Count records after deletion
|
||||
countAfter, err := hub.CountRecords("systemd_services")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(1), countAfter, "Should have 1 systemd service record after deletion")
|
||||
|
||||
// Verify the correct record was kept
|
||||
remainingRecords, err := hub.FindRecordsByFilter("systemd_services", "", "", 10, 0, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, remainingRecords, 1, "Should have exactly 1 record remaining")
|
||||
assert.Equal(t, "apache.service", remainingRecords[0].Get("name"), "The recent record should be kept")
|
||||
}
|
||||
@@ -3,430 +3,15 @@
|
||||
package records_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/internal/records"
|
||||
"github.com/henrygd/beszel/internal/tests"
|
||||
|
||||
"github.com/pocketbase/dbx"
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
"github.com/pocketbase/pocketbase/tools/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestDeleteOldRecords tests the main DeleteOldRecords function
|
||||
func TestDeleteOldRecords(t *testing.T) {
|
||||
hub, err := tests.NewTestHub(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
defer hub.Cleanup()
|
||||
|
||||
rm := records.NewRecordManager(hub)
|
||||
|
||||
// Create test user for alerts history
|
||||
user, err := tests.CreateUser(hub, "test@example.com", "testtesttest")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create test system
|
||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "test-system",
|
||||
"host": "localhost",
|
||||
"port": "45876",
|
||||
"status": "up",
|
||||
"users": []string{user.Id},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
now := time.Now()
|
||||
|
||||
// Create old system_stats records that should be deleted
|
||||
var record *core.Record
|
||||
record, err = tests.CreateRecord(hub, "system_stats", map[string]any{
|
||||
"system": system.Id,
|
||||
"type": "1m",
|
||||
"stats": `{"cpu": 50.0, "mem": 1024}`,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
// created is autodate field, so we need to set it manually
|
||||
record.SetRaw("created", now.UTC().Add(-2*time.Hour).Format(types.DefaultDateLayout))
|
||||
err = hub.SaveNoValidate(record)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, record)
|
||||
require.InDelta(t, record.GetDateTime("created").Time().UTC().Unix(), now.UTC().Add(-2*time.Hour).Unix(), 1)
|
||||
require.Equal(t, record.Get("system"), system.Id)
|
||||
require.Equal(t, record.Get("type"), "1m")
|
||||
|
||||
// Create recent system_stats record that should be kept
|
||||
_, err = tests.CreateRecord(hub, "system_stats", map[string]any{
|
||||
"system": system.Id,
|
||||
"type": "1m",
|
||||
"stats": `{"cpu": 30.0, "mem": 512}`,
|
||||
"created": now.Add(-30 * time.Minute), // 30 minutes old, should be kept
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create many alerts history records to trigger deletion
|
||||
for i := range 260 { // More than countBeforeDeletion (250)
|
||||
_, err = tests.CreateRecord(hub, "alerts_history", map[string]any{
|
||||
"user": user.Id,
|
||||
"name": "CPU",
|
||||
"value": i + 1,
|
||||
"system": system.Id,
|
||||
"created": now.Add(-time.Duration(i) * time.Minute),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Count records before deletion
|
||||
systemStatsCountBefore, err := hub.CountRecords("system_stats")
|
||||
require.NoError(t, err)
|
||||
alertsCountBefore, err := hub.CountRecords("alerts_history")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Run deletion
|
||||
rm.DeleteOldRecords()
|
||||
|
||||
// Count records after deletion
|
||||
systemStatsCountAfter, err := hub.CountRecords("system_stats")
|
||||
require.NoError(t, err)
|
||||
alertsCountAfter, err := hub.CountRecords("alerts_history")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify old system stats were deleted
|
||||
assert.Less(t, systemStatsCountAfter, systemStatsCountBefore, "Old system stats should be deleted")
|
||||
|
||||
// Verify alerts history was trimmed
|
||||
assert.Less(t, alertsCountAfter, alertsCountBefore, "Excessive alerts history should be deleted")
|
||||
assert.Equal(t, alertsCountAfter, int64(200), "Alerts count should be equal to countToKeep (200)")
|
||||
}
|
||||
|
||||
// TestDeleteOldSystemStats tests the deleteOldSystemStats function
|
||||
func TestDeleteOldSystemStats(t *testing.T) {
|
||||
hub, err := tests.NewTestHub(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
defer hub.Cleanup()
|
||||
|
||||
// Create test system
|
||||
user, err := tests.CreateUser(hub, "test@example.com", "testtesttest")
|
||||
require.NoError(t, err)
|
||||
|
||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "test-system",
|
||||
"host": "localhost",
|
||||
"port": "45876",
|
||||
"status": "up",
|
||||
"users": []string{user.Id},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
now := time.Now().UTC()
|
||||
|
||||
// Test data for different record types and their retention periods
|
||||
testCases := []struct {
|
||||
recordType string
|
||||
retention time.Duration
|
||||
shouldBeKept bool
|
||||
ageFromNow time.Duration
|
||||
description string
|
||||
}{
|
||||
{"1m", time.Hour, true, 30 * time.Minute, "1m record within 1 hour should be kept"},
|
||||
{"1m", time.Hour, false, 2 * time.Hour, "1m record older than 1 hour should be deleted"},
|
||||
{"10m", 12 * time.Hour, true, 6 * time.Hour, "10m record within 12 hours should be kept"},
|
||||
{"10m", 12 * time.Hour, false, 24 * time.Hour, "10m record older than 12 hours should be deleted"},
|
||||
{"20m", 24 * time.Hour, true, 12 * time.Hour, "20m record within 24 hours should be kept"},
|
||||
{"20m", 24 * time.Hour, false, 48 * time.Hour, "20m record older than 24 hours should be deleted"},
|
||||
{"120m", 7 * 24 * time.Hour, true, 3 * 24 * time.Hour, "120m record within 7 days should be kept"},
|
||||
{"120m", 7 * 24 * time.Hour, false, 10 * 24 * time.Hour, "120m record older than 7 days should be deleted"},
|
||||
{"480m", 30 * 24 * time.Hour, true, 15 * 24 * time.Hour, "480m record within 30 days should be kept"},
|
||||
{"480m", 30 * 24 * time.Hour, false, 45 * 24 * time.Hour, "480m record older than 30 days should be deleted"},
|
||||
}
|
||||
|
||||
// Create test records for both system_stats and container_stats
|
||||
collections := []string{"system_stats", "container_stats"}
|
||||
recordIds := make(map[string][]string)
|
||||
|
||||
for _, collection := range collections {
|
||||
recordIds[collection] = make([]string, 0)
|
||||
|
||||
for i, tc := range testCases {
|
||||
recordTime := now.Add(-tc.ageFromNow)
|
||||
|
||||
var stats string
|
||||
if collection == "system_stats" {
|
||||
stats = fmt.Sprintf(`{"cpu": %d.0, "mem": %d}`, i*10, i*100)
|
||||
} else {
|
||||
stats = fmt.Sprintf(`[{"name": "container%d", "cpu": %d.0, "mem": %d}]`, i, i*5, i*50)
|
||||
}
|
||||
|
||||
record, err := tests.CreateRecord(hub, collection, map[string]any{
|
||||
"system": system.Id,
|
||||
"type": tc.recordType,
|
||||
"stats": stats,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
record.SetRaw("created", recordTime.Format(types.DefaultDateLayout))
|
||||
err = hub.SaveNoValidate(record)
|
||||
require.NoError(t, err)
|
||||
recordIds[collection] = append(recordIds[collection], record.Id)
|
||||
}
|
||||
}
|
||||
|
||||
// Run deletion
|
||||
err = records.DeleteOldSystemStats(hub)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify results
|
||||
for _, collection := range collections {
|
||||
for i, tc := range testCases {
|
||||
recordId := recordIds[collection][i]
|
||||
|
||||
// Try to find the record
|
||||
_, err := hub.FindRecordById(collection, recordId)
|
||||
|
||||
if tc.shouldBeKept {
|
||||
assert.NoError(t, err, "Record should exist: %s", tc.description)
|
||||
} else {
|
||||
assert.Error(t, err, "Record should be deleted: %s", tc.description)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestDeleteOldAlertsHistory tests the deleteOldAlertsHistory function
|
||||
func TestDeleteOldAlertsHistory(t *testing.T) {
|
||||
hub, err := tests.NewTestHub(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
defer hub.Cleanup()
|
||||
|
||||
// Create test users
|
||||
user1, err := tests.CreateUser(hub, "user1@example.com", "testtesttest")
|
||||
require.NoError(t, err)
|
||||
|
||||
user2, err := tests.CreateUser(hub, "user2@example.com", "testtesttest")
|
||||
require.NoError(t, err)
|
||||
|
||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "test-system",
|
||||
"host": "localhost",
|
||||
"port": "45876",
|
||||
"status": "up",
|
||||
"users": []string{user1.Id, user2.Id},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
now := time.Now().UTC()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
user *core.Record
|
||||
alertCount int
|
||||
countToKeep int
|
||||
countBeforeDeletion int
|
||||
expectedAfterDeletion int
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "User with few alerts (below threshold)",
|
||||
user: user1,
|
||||
alertCount: 100,
|
||||
countToKeep: 50,
|
||||
countBeforeDeletion: 150,
|
||||
expectedAfterDeletion: 100, // No deletion because below threshold
|
||||
description: "User with alerts below countBeforeDeletion should not have any deleted",
|
||||
},
|
||||
{
|
||||
name: "User with many alerts (above threshold)",
|
||||
user: user2,
|
||||
alertCount: 300,
|
||||
countToKeep: 100,
|
||||
countBeforeDeletion: 200,
|
||||
expectedAfterDeletion: 100, // Should be trimmed to countToKeep
|
||||
description: "User with alerts above countBeforeDeletion should be trimmed to countToKeep",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Create alerts for this user
|
||||
for i := 0; i < tc.alertCount; i++ {
|
||||
_, err := tests.CreateRecord(hub, "alerts_history", map[string]any{
|
||||
"user": tc.user.Id,
|
||||
"name": "CPU",
|
||||
"value": i + 1,
|
||||
"system": system.Id,
|
||||
"created": now.Add(-time.Duration(i) * time.Minute),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Count before deletion
|
||||
countBefore, err := hub.CountRecords("alerts_history",
|
||||
dbx.NewExp("user = {:user}", dbx.Params{"user": tc.user.Id}))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(tc.alertCount), countBefore, "Initial count should match")
|
||||
|
||||
// Run deletion
|
||||
err = records.DeleteOldAlertsHistory(hub, tc.countToKeep, tc.countBeforeDeletion)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Count after deletion
|
||||
countAfter, err := hub.CountRecords("alerts_history",
|
||||
dbx.NewExp("user = {:user}", dbx.Params{"user": tc.user.Id}))
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, int64(tc.expectedAfterDeletion), countAfter, tc.description)
|
||||
|
||||
// If deletion occurred, verify the most recent records were kept
|
||||
if tc.expectedAfterDeletion < tc.alertCount {
|
||||
records, err := hub.FindRecordsByFilter("alerts_history",
|
||||
"user = {:user}",
|
||||
"-created", // Order by created DESC
|
||||
tc.countToKeep,
|
||||
0,
|
||||
map[string]any{"user": tc.user.Id})
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, records, tc.expectedAfterDeletion, "Should have exactly countToKeep records")
|
||||
|
||||
// Verify records are in descending order by created time
|
||||
for i := 1; i < len(records); i++ {
|
||||
prev := records[i-1].GetDateTime("created").Time()
|
||||
curr := records[i].GetDateTime("created").Time()
|
||||
assert.True(t, prev.After(curr) || prev.Equal(curr),
|
||||
"Records should be ordered by created time (newest first)")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestDeleteOldAlertsHistoryEdgeCases tests edge cases for alerts history deletion
|
||||
func TestDeleteOldAlertsHistoryEdgeCases(t *testing.T) {
|
||||
hub, err := tests.NewTestHub(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
defer hub.Cleanup()
|
||||
|
||||
t.Run("No users with excessive alerts", func(t *testing.T) {
|
||||
// Create user with few alerts
|
||||
user, err := tests.CreateUser(hub, "few@example.com", "testtesttest")
|
||||
require.NoError(t, err)
|
||||
|
||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "test-system",
|
||||
"host": "localhost",
|
||||
"port": "45876",
|
||||
"status": "up",
|
||||
"users": []string{user.Id},
|
||||
})
|
||||
|
||||
// Create only 5 alerts (well below threshold)
|
||||
for i := range 5 {
|
||||
_, err := tests.CreateRecord(hub, "alerts_history", map[string]any{
|
||||
"user": user.Id,
|
||||
"name": "CPU",
|
||||
"value": i + 1,
|
||||
"system": system.Id,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Should not error and should not delete anything
|
||||
err = records.DeleteOldAlertsHistory(hub, 10, 20)
|
||||
require.NoError(t, err)
|
||||
|
||||
count, err := hub.CountRecords("alerts_history")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(5), count, "All alerts should remain")
|
||||
})
|
||||
|
||||
t.Run("Empty alerts_history table", func(t *testing.T) {
|
||||
// Clear any existing alerts
|
||||
_, err := hub.DB().NewQuery("DELETE FROM alerts_history").Execute()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Should not error with empty table
|
||||
err = records.DeleteOldAlertsHistory(hub, 10, 20)
|
||||
require.NoError(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
// TestDeleteOldSystemdServiceRecords tests systemd service cleanup via DeleteOldRecords
|
||||
func TestDeleteOldSystemdServiceRecords(t *testing.T) {
|
||||
hub, err := tests.NewTestHub(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
defer hub.Cleanup()
|
||||
|
||||
rm := records.NewRecordManager(hub)
|
||||
|
||||
// Create test user and system
|
||||
user, err := tests.CreateUser(hub, "test@example.com", "testtesttest")
|
||||
require.NoError(t, err)
|
||||
|
||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||
"name": "test-system",
|
||||
"host": "localhost",
|
||||
"port": "45876",
|
||||
"status": "up",
|
||||
"users": []string{user.Id},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
now := time.Now().UTC()
|
||||
|
||||
// Create old systemd service records that should be deleted (older than 20 minutes)
|
||||
oldRecord, err := tests.CreateRecord(hub, "systemd_services", map[string]any{
|
||||
"system": system.Id,
|
||||
"name": "nginx.service",
|
||||
"state": 0, // Active
|
||||
"sub": 1, // Running
|
||||
"cpu": 5.0,
|
||||
"cpuPeak": 10.0,
|
||||
"memory": 1024000,
|
||||
"memPeak": 2048000,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
// Set updated time to 25 minutes ago (should be deleted)
|
||||
oldRecord.SetRaw("updated", now.Add(-25*time.Minute).UnixMilli())
|
||||
err = hub.SaveNoValidate(oldRecord)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create recent systemd service record that should be kept (within 20 minutes)
|
||||
recentRecord, err := tests.CreateRecord(hub, "systemd_services", map[string]any{
|
||||
"system": system.Id,
|
||||
"name": "apache.service",
|
||||
"state": 1, // Inactive
|
||||
"sub": 0, // Dead
|
||||
"cpu": 2.0,
|
||||
"cpuPeak": 3.0,
|
||||
"memory": 512000,
|
||||
"memPeak": 1024000,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
// Set updated time to 10 minutes ago (should be kept)
|
||||
recentRecord.SetRaw("updated", now.Add(-10*time.Minute).UnixMilli())
|
||||
err = hub.SaveNoValidate(recentRecord)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Count records before deletion
|
||||
countBefore, err := hub.CountRecords("systemd_services")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(2), countBefore, "Should have 2 systemd service records initially")
|
||||
|
||||
// Run deletion via RecordManager
|
||||
rm.DeleteOldRecords()
|
||||
|
||||
// Count records after deletion
|
||||
countAfter, err := hub.CountRecords("systemd_services")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(1), countAfter, "Should have 1 systemd service record after deletion")
|
||||
|
||||
// Verify the correct record was kept
|
||||
remainingRecords, err := hub.FindRecordsByFilter("systemd_services", "", "", 10, 0, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, remainingRecords, 1, "Should have exactly 1 record remaining")
|
||||
assert.Equal(t, "apache.service", remainingRecords[0].Get("name"), "The recent record should be kept")
|
||||
}
|
||||
|
||||
// TestRecordManagerCreation tests RecordManager creation
|
||||
func TestRecordManagerCreation(t *testing.T) {
|
||||
hub, err := tests.NewTestHub(t.TempDir())
|
||||
|
||||
@@ -41,7 +41,7 @@
|
||||
"recharts": "^2.15.4",
|
||||
"shiki": "^3.13.0",
|
||||
"tailwind-merge": "^3.3.1",
|
||||
"valibot": "^0.42.1",
|
||||
"valibot": "^1.3.1",
|
||||
},
|
||||
"devDependencies": {
|
||||
"@biomejs/biome": "2.2.4",
|
||||
@@ -927,7 +927,7 @@
|
||||
|
||||
"util-deprecate": ["util-deprecate@1.0.2", "", {}, "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw=="],
|
||||
|
||||
"valibot": ["valibot@0.42.1", "", { "peerDependencies": { "typescript": ">=5" } }, "sha512-3keXV29Ar5b//Hqi4MbSdV7lfVp6zuYLZuA9V1PvQUsXqogr+u5lvLPLk3A4f74VUXDnf/JfWMN6sB+koJ/FFw=="],
|
||||
"valibot": ["valibot@1.3.1", "", { "peerDependencies": { "typescript": ">=5" }, "optionalPeers": ["typescript"] }, "sha512-sfdRir/QFM0JaF22hqTroPc5xy4DimuGQVKFrzF1YfGwaS1nJot3Y8VqMdLO2Lg27fMzat2yD3pY5PbAYO39Gg=="],
|
||||
|
||||
"vfile": ["vfile@6.0.3", "", { "dependencies": { "@types/unist": "^3.0.0", "vfile-message": "^4.0.0" } }, "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q=="],
|
||||
|
||||
|
||||
@@ -22,11 +22,7 @@
|
||||
})();
|
||||
</script>
|
||||
<script>
|
||||
globalThis.BESZEL = {
|
||||
BASE_PATH: "%BASE_URL%",
|
||||
HUB_VERSION: "{{V}}",
|
||||
HUB_URL: "{{HUB_URL}}"
|
||||
}
|
||||
globalThis.BESZEL = "{info}"
|
||||
</script>
|
||||
</head>
|
||||
<body>
|
||||
|
||||
407
internal/site/package-lock.json
generated
407
internal/site/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "beszel",
|
||||
"version": "0.18.3",
|
||||
"version": "0.18.7",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "beszel",
|
||||
"version": "0.18.3",
|
||||
"version": "0.18.7",
|
||||
"dependencies": {
|
||||
"@henrygd/queue": "^1.0.7",
|
||||
"@henrygd/semaphore": "^0.0.2",
|
||||
@@ -44,7 +44,7 @@
|
||||
"recharts": "^2.15.4",
|
||||
"shiki": "^3.13.0",
|
||||
"tailwind-merge": "^3.3.1",
|
||||
"valibot": "^0.42.1"
|
||||
"valibot": "^1.3.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@biomejs/biome": "2.2.4",
|
||||
@@ -986,29 +986,6 @@
|
||||
"integrity": "sha512-N3W7MKwTRmAxOjeG0NAT18oe2Xn3KdjkpMR6crbkF1UDamMGPjyigqEsefiv+qTaxibtc1a+zXCVzb9YXANVqw==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@isaacs/balanced-match": {
|
||||
"version": "4.0.1",
|
||||
"resolved": "https://registry.npmjs.org/@isaacs/balanced-match/-/balanced-match-4.0.1.tgz",
|
||||
"integrity": "sha512-yzMTt9lEb8Gv7zRioUilSglI0c0smZ9k5D65677DLWLtWJaXIS3CqcGyUFByYKlnUj6TkjLVs54fBl6+TiGQDQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": "20 || >=22"
|
||||
}
|
||||
},
|
||||
"node_modules/@isaacs/brace-expansion": {
|
||||
"version": "5.0.0",
|
||||
"resolved": "https://registry.npmjs.org/@isaacs/brace-expansion/-/brace-expansion-5.0.0.tgz",
|
||||
"integrity": "sha512-ZT55BDLV0yv0RBm2czMiZ+SqCGO7AvmOM3G/w2xhVPH+te0aKgFjmBvGlL1dH+ql2tgGO3MVrbb3jCKyvpgnxA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@isaacs/balanced-match": "^4.0.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": "20 || >=22"
|
||||
}
|
||||
},
|
||||
"node_modules/@isaacs/cliui": {
|
||||
"version": "8.0.2",
|
||||
"resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz",
|
||||
@@ -1243,9 +1220,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@lingui/cli/node_modules/picomatch": {
|
||||
"version": "2.3.1",
|
||||
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
|
||||
"integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
|
||||
"version": "2.3.2",
|
||||
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.2.tgz",
|
||||
"integrity": "sha512-V7+vQEJ06Z+c5tSye8S+nHUfI51xoXIXjHQ99cQtKUkQqqO1kO/KCJUfZXuB47h/YBlDhah2H3hdUGXn8ie0oA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
@@ -2408,9 +2385,9 @@
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@rollup/rollup-android-arm-eabi": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.48.1.tgz",
|
||||
"integrity": "sha512-rGmb8qoG/zdmKoYELCBwu7vt+9HxZ7Koos3pD0+sH5fR3u3Wb/jGcpnqxcnWsPEKDUyzeLSqksN8LJtgXjqBYw==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.60.1.tgz",
|
||||
"integrity": "sha512-d6FinEBLdIiK+1uACUttJKfgZREXrF0Qc2SmLII7W2AD8FfiZ9Wjd+rD/iRuf5s5dWrr1GgwXCvPqOuDquOowA==",
|
||||
"cpu": [
|
||||
"arm"
|
||||
],
|
||||
@@ -2422,9 +2399,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-android-arm64": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.48.1.tgz",
|
||||
"integrity": "sha512-4e9WtTxrk3gu1DFE+imNJr4WsL13nWbD/Y6wQcyku5qadlKHY3OQ3LJ/INrrjngv2BJIHnIzbqMk1GTAC2P8yQ==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.60.1.tgz",
|
||||
"integrity": "sha512-YjG/EwIDvvYI1YvYbHvDz/BYHtkY4ygUIXHnTdLhG+hKIQFBiosfWiACWortsKPKU/+dUwQQCKQM3qrDe8c9BA==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -2436,9 +2413,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-darwin-arm64": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.48.1.tgz",
|
||||
"integrity": "sha512-+XjmyChHfc4TSs6WUQGmVf7Hkg8ferMAE2aNYYWjiLzAS/T62uOsdfnqv+GHRjq7rKRnYh4mwWb4Hz7h/alp8A==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.60.1.tgz",
|
||||
"integrity": "sha512-mjCpF7GmkRtSJwon+Rq1N8+pI+8l7w5g9Z3vWj4T7abguC4Czwi3Yu/pFaLvA3TTeMVjnu3ctigusqWUfjZzvw==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -2450,9 +2427,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-darwin-x64": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.48.1.tgz",
|
||||
"integrity": "sha512-upGEY7Ftw8M6BAJyGwnwMw91rSqXTcOKZnnveKrVWsMTF8/k5mleKSuh7D4v4IV1pLxKAk3Tbs0Lo9qYmii5mQ==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.60.1.tgz",
|
||||
"integrity": "sha512-haZ7hJ1JT4e9hqkoT9R/19XW2QKqjfJVv+i5AGg57S+nLk9lQnJ1F/eZloRO3o9Scy9CM3wQ9l+dkXtcBgN5Ew==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -2464,9 +2441,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-freebsd-arm64": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.48.1.tgz",
|
||||
"integrity": "sha512-P9ViWakdoynYFUOZhqq97vBrhuvRLAbN/p2tAVJvhLb8SvN7rbBnJQcBu8e/rQts42pXGLVhfsAP0k9KXWa3nQ==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.60.1.tgz",
|
||||
"integrity": "sha512-czw90wpQq3ZsAVBlinZjAYTKduOjTywlG7fEeWKUA7oCmpA8xdTkxZZlwNJKWqILlq0wehoZcJYfBvOyhPTQ6w==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -2478,9 +2455,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-freebsd-x64": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.48.1.tgz",
|
||||
"integrity": "sha512-VLKIwIpnBya5/saccM8JshpbxfyJt0Dsli0PjXozHwbSVaHTvWXJH1bbCwPXxnMzU4zVEfgD1HpW3VQHomi2AQ==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.60.1.tgz",
|
||||
"integrity": "sha512-KVB2rqsxTHuBtfOeySEyzEOB7ltlB/ux38iu2rBQzkjbwRVlkhAGIEDiiYnO2kFOkJp+Z7pUXKyrRRFuFUKt+g==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -2492,9 +2469,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-linux-arm-gnueabihf": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.48.1.tgz",
|
||||
"integrity": "sha512-3zEuZsXfKaw8n/yF7t8N6NNdhyFw3s8xJTqjbTDXlipwrEHo4GtIKcMJr5Ed29leLpB9AugtAQpAHW0jvtKKaQ==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.60.1.tgz",
|
||||
"integrity": "sha512-L+34Qqil+v5uC0zEubW7uByo78WOCIrBvci69E7sFASRl0X7b/MB6Cqd1lky/CtcSVTydWa2WZwFuWexjS5o6g==",
|
||||
"cpu": [
|
||||
"arm"
|
||||
],
|
||||
@@ -2506,9 +2483,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-linux-arm-musleabihf": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.48.1.tgz",
|
||||
"integrity": "sha512-leo9tOIlKrcBmmEypzunV/2w946JeLbTdDlwEZ7OnnsUyelZ72NMnT4B2vsikSgwQifjnJUbdXzuW4ToN1wV+Q==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.60.1.tgz",
|
||||
"integrity": "sha512-n83O8rt4v34hgFzlkb1ycniJh7IR5RCIqt6mz1VRJD6pmhRi0CXdmfnLu9dIUS6buzh60IvACM842Ffb3xd6Gg==",
|
||||
"cpu": [
|
||||
"arm"
|
||||
],
|
||||
@@ -2520,9 +2497,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-linux-arm64-gnu": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.48.1.tgz",
|
||||
"integrity": "sha512-Vy/WS4z4jEyvnJm+CnPfExIv5sSKqZrUr98h03hpAMbE2aI0aD2wvK6GiSe8Gx2wGp3eD81cYDpLLBqNb2ydwQ==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.60.1.tgz",
|
||||
"integrity": "sha512-Nql7sTeAzhTAja3QXeAI48+/+GjBJ+QmAH13snn0AJSNL50JsDqotyudHyMbO2RbJkskbMbFJfIJKWA6R1LCJQ==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -2534,9 +2511,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-linux-arm64-musl": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.48.1.tgz",
|
||||
"integrity": "sha512-x5Kzn7XTwIssU9UYqWDB9VpLpfHYuXw5c6bJr4Mzv9kIv242vmJHbI5PJJEnmBYitUIfoMCODDhR7KoZLot2VQ==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.60.1.tgz",
|
||||
"integrity": "sha512-+pUymDhd0ys9GcKZPPWlFiZ67sTWV5UU6zOJat02M1+PiuSGDziyRuI/pPue3hoUwm2uGfxdL+trT6Z9rxnlMA==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -2547,10 +2524,24 @@
|
||||
"linux"
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-linux-loongarch64-gnu": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loongarch64-gnu/-/rollup-linux-loongarch64-gnu-4.48.1.tgz",
|
||||
"integrity": "sha512-yzCaBbwkkWt/EcgJOKDUdUpMHjhiZT/eDktOPWvSRpqrVE04p0Nd6EGV4/g7MARXXeOqstflqsKuXVM3H9wOIQ==",
|
||||
"node_modules/@rollup/rollup-linux-loong64-gnu": {
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.60.1.tgz",
|
||||
"integrity": "sha512-VSvgvQeIcsEvY4bKDHEDWcpW4Yw7BtlKG1GUT4FzBUlEKQK0rWHYBqQt6Fm2taXS+1bXvJT6kICu5ZwqKCnvlQ==",
|
||||
"cpu": [
|
||||
"loong64"
|
||||
],
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-linux-loong64-musl": {
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.60.1.tgz",
|
||||
"integrity": "sha512-4LqhUomJqwe641gsPp6xLfhqWMbQV04KtPp7/dIp0nzPxAkNY1AbwL5W0MQpcalLYk07vaW9Kp1PBhdpZYYcEw==",
|
||||
"cpu": [
|
||||
"loong64"
|
||||
],
|
||||
@@ -2562,9 +2553,23 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-linux-ppc64-gnu": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.48.1.tgz",
|
||||
"integrity": "sha512-UK0WzWUjMAJccHIeOpPhPcKBqax7QFg47hwZTp6kiMhQHeOYJeaMwzeRZe1q5IiTKsaLnHu9s6toSYVUlZ2QtQ==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.60.1.tgz",
|
||||
"integrity": "sha512-tLQQ9aPvkBxOc/EUT6j3pyeMD6Hb8QF2BTBnCQWP/uu1lhc9AIrIjKnLYMEroIz/JvtGYgI9dF3AxHZNaEH0rw==",
|
||||
"cpu": [
|
||||
"ppc64"
|
||||
],
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"linux"
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-linux-ppc64-musl": {
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.60.1.tgz",
|
||||
"integrity": "sha512-RMxFhJwc9fSXP6PqmAz4cbv3kAyvD1etJFjTx4ONqFP9DkTkXsAMU4v3Vyc5BgzC+anz7nS/9tp4obsKfqkDHg==",
|
||||
"cpu": [
|
||||
"ppc64"
|
||||
],
|
||||
@@ -2576,9 +2581,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-linux-riscv64-gnu": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.48.1.tgz",
|
||||
"integrity": "sha512-3NADEIlt+aCdCbWVZ7D3tBjBX1lHpXxcvrLt/kdXTiBrOds8APTdtk2yRL2GgmnSVeX4YS1JIf0imFujg78vpw==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.60.1.tgz",
|
||||
"integrity": "sha512-QKgFl+Yc1eEk6MmOBfRHYF6lTxiiiV3/z/BRrbSiW2I7AFTXoBFvdMEyglohPj//2mZS4hDOqeB0H1ACh3sBbg==",
|
||||
"cpu": [
|
||||
"riscv64"
|
||||
],
|
||||
@@ -2590,9 +2595,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-linux-riscv64-musl": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.48.1.tgz",
|
||||
"integrity": "sha512-euuwm/QTXAMOcyiFCcrx0/S2jGvFlKJ2Iro8rsmYL53dlblp3LkUQVFzEidHhvIPPvcIsxDhl2wkBE+I6YVGzA==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.60.1.tgz",
|
||||
"integrity": "sha512-RAjXjP/8c6ZtzatZcA1RaQr6O1TRhzC+adn8YZDnChliZHviqIjmvFwHcxi4JKPSDAt6Uhf/7vqcBzQJy0PDJg==",
|
||||
"cpu": [
|
||||
"riscv64"
|
||||
],
|
||||
@@ -2604,9 +2609,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-linux-s390x-gnu": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.48.1.tgz",
|
||||
"integrity": "sha512-w8mULUjmPdWLJgmTYJx/W6Qhln1a+yqvgwmGXcQl2vFBkWsKGUBRbtLRuKJUln8Uaimf07zgJNxOhHOvjSQmBQ==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.60.1.tgz",
|
||||
"integrity": "sha512-wcuocpaOlaL1COBYiA89O6yfjlp3RwKDeTIA0hM7OpmhR1Bjo9j31G1uQVpDlTvwxGn2nQs65fBFL5UFd76FcQ==",
|
||||
"cpu": [
|
||||
"s390x"
|
||||
],
|
||||
@@ -2618,9 +2623,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-linux-x64-gnu": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.48.1.tgz",
|
||||
"integrity": "sha512-90taWXCWxTbClWuMZD0DKYohY1EovA+W5iytpE89oUPmT5O1HFdf8cuuVIylE6vCbrGdIGv85lVRzTcpTRZ+kA==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.60.1.tgz",
|
||||
"integrity": "sha512-77PpsFQUCOiZR9+LQEFg9GClyfkNXj1MP6wRnzYs0EeWbPcHs02AXu4xuUbM1zhwn3wqaizle3AEYg5aeoohhg==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -2632,9 +2637,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-linux-x64-musl": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.48.1.tgz",
|
||||
"integrity": "sha512-2Gu29SkFh1FfTRuN1GR1afMuND2GKzlORQUP3mNMJbqdndOg7gNsa81JnORctazHRokiDzQ5+MLE5XYmZW5VWg==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.60.1.tgz",
|
||||
"integrity": "sha512-5cIATbk5vynAjqqmyBjlciMJl1+R/CwX9oLk/EyiFXDWd95KpHdrOJT//rnUl4cUcskrd0jCCw3wpZnhIHdD9w==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -2645,10 +2650,38 @@
|
||||
"linux"
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-openbsd-x64": {
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.60.1.tgz",
|
||||
"integrity": "sha512-cl0w09WsCi17mcmWqqglez9Gk8isgeWvoUZ3WiJFYSR3zjBQc2J5/ihSjpl+VLjPqjQ/1hJRcqBfLjssREQILw==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"openbsd"
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-openharmony-arm64": {
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.60.1.tgz",
|
||||
"integrity": "sha512-4Cv23ZrONRbNtbZa37mLSueXUCtN7MXccChtKpUnQNgF010rjrjfHx3QxkS2PI7LqGT5xXyYs1a7LbzAwT0iCA==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"openharmony"
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-win32-arm64-msvc": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.48.1.tgz",
|
||||
"integrity": "sha512-6kQFR1WuAO50bxkIlAVeIYsz3RUx+xymwhTo9j94dJ+kmHe9ly7muH23sdfWduD0BA8pD9/yhonUvAjxGh34jQ==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.60.1.tgz",
|
||||
"integrity": "sha512-i1okWYkA4FJICtr7KpYzFpRTHgy5jdDbZiWfvny21iIKky5YExiDXP+zbXzm3dUcFpkEeYNHgQ5fuG236JPq0g==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
@@ -2660,9 +2693,9 @@
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-win32-ia32-msvc": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.48.1.tgz",
|
||||
"integrity": "sha512-RUyZZ/mga88lMI3RlXFs4WQ7n3VyU07sPXmMG7/C1NOi8qisUg57Y7LRarqoGoAiopmGmChUhSwfpvQ3H5iGSQ==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.60.1.tgz",
|
||||
"integrity": "sha512-u09m3CuwLzShA0EYKMNiFgcjjzwqtUMLmuCJLeZWjjOYA3IT2Di09KaxGBTP9xVztWyIWjVdsB2E9goMjZvTQg==",
|
||||
"cpu": [
|
||||
"ia32"
|
||||
],
|
||||
@@ -2673,10 +2706,24 @@
|
||||
"win32"
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-win32-x64-gnu": {
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.60.1.tgz",
|
||||
"integrity": "sha512-k+600V9Zl1CM7eZxJgMyTUzmrmhB/0XZnF4pRypKAlAgxmedUA+1v9R+XOFv56W4SlHEzfeMtzujLJD22Uz5zg==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"os": [
|
||||
"win32"
|
||||
]
|
||||
},
|
||||
"node_modules/@rollup/rollup-win32-x64-msvc": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.48.1.tgz",
|
||||
"integrity": "sha512-8a/caCUN4vkTChxkaIJcMtwIVcBhi4X2PQRoT+yCK3qRYaZ7cURrmJFL5Ux9H9RaMIXj9RuihckdmkBX3zZsgg==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.60.1.tgz",
|
||||
"integrity": "sha512-lWMnixq/QzxyhTV6NjQJ4SFo1J6PvOX8vUx5Wb4bBPsEb+8xZ89Bz6kOXpfXj9ak9AHTQVQzlgzBEc1SyM27xQ==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
@@ -3235,6 +3282,66 @@
|
||||
"node": ">=14.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@emnapi/core": {
|
||||
"version": "1.4.5",
|
||||
"dev": true,
|
||||
"inBundle": true,
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"dependencies": {
|
||||
"@emnapi/wasi-threads": "1.0.4",
|
||||
"tslib": "^2.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@emnapi/runtime": {
|
||||
"version": "1.4.5",
|
||||
"dev": true,
|
||||
"inBundle": true,
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"dependencies": {
|
||||
"tslib": "^2.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@emnapi/wasi-threads": {
|
||||
"version": "1.0.4",
|
||||
"dev": true,
|
||||
"inBundle": true,
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"dependencies": {
|
||||
"tslib": "^2.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@napi-rs/wasm-runtime": {
|
||||
"version": "0.2.12",
|
||||
"dev": true,
|
||||
"inBundle": true,
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"dependencies": {
|
||||
"@emnapi/core": "^1.4.3",
|
||||
"@emnapi/runtime": "^1.4.3",
|
||||
"@tybys/wasm-util": "^0.10.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@tybys/wasm-util": {
|
||||
"version": "0.10.0",
|
||||
"dev": true,
|
||||
"inBundle": true,
|
||||
"license": "MIT",
|
||||
"optional": true,
|
||||
"dependencies": {
|
||||
"tslib": "^2.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/tslib": {
|
||||
"version": "2.8.0",
|
||||
"dev": true,
|
||||
"inBundle": true,
|
||||
"license": "0BSD",
|
||||
"optional": true
|
||||
},
|
||||
"node_modules/@tailwindcss/oxide-win32-arm64-msvc": {
|
||||
"version": "4.1.12",
|
||||
"resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-arm64-msvc/-/oxide-win32-arm64-msvc-4.1.12.tgz",
|
||||
@@ -3589,9 +3696,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/anymatch/node_modules/picomatch": {
|
||||
"version": "2.3.1",
|
||||
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
|
||||
"integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
|
||||
"version": "2.3.2",
|
||||
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.2.tgz",
|
||||
"integrity": "sha512-V7+vQEJ06Z+c5tSye8S+nHUfI51xoXIXjHQ99cQtKUkQqqO1kO/KCJUfZXuB47h/YBlDhah2H3hdUGXn8ie0oA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
@@ -3620,6 +3727,16 @@
|
||||
"node": ">=10"
|
||||
}
|
||||
},
|
||||
"node_modules/balanced-match": {
|
||||
"version": "4.0.4",
|
||||
"resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-4.0.4.tgz",
|
||||
"integrity": "sha512-BLrgEcRTwX2o6gGxGOCNyMvGSp35YofuYzw9h1IMTRmKqttAZZVU67bdb9Pr2vUHA8+j3i2tJfjO6C6+4myGTA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": "18 || 20 || >=22"
|
||||
}
|
||||
},
|
||||
"node_modules/base64-js": {
|
||||
"version": "1.5.1",
|
||||
"resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz",
|
||||
@@ -3666,6 +3783,19 @@
|
||||
"readable-stream": "^3.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/brace-expansion": {
|
||||
"version": "5.0.5",
|
||||
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-5.0.5.tgz",
|
||||
"integrity": "sha512-VZznLgtwhn+Mact9tfiwx64fA9erHH/MCXEUfB/0bX/6Fz6ny5EGTXYltMocqg4xFAQZtnO3DHWWXi8RiuN7cQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"balanced-match": "^4.0.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": "18 || 20 || >=22"
|
||||
}
|
||||
},
|
||||
"node_modules/braces": {
|
||||
"version": "3.0.3",
|
||||
"resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz",
|
||||
@@ -5072,9 +5202,9 @@
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/lodash": {
|
||||
"version": "4.17.23",
|
||||
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.23.tgz",
|
||||
"integrity": "sha512-LgVTMpQtIopCi79SJeDiP0TfWi5CNEc/L/aRdTh3yIvmZXTnheWpKjSZhnvMl8iXbC1tFg9gdHHDMLoV7CnG+w==",
|
||||
"version": "4.18.1",
|
||||
"resolved": "https://registry.npmjs.org/lodash/-/lodash-4.18.1.tgz",
|
||||
"integrity": "sha512-dMInicTPVE8d1e5otfwmmjlxkZoUpiVLwyeTdUsi/Caj/gfzzblBcCE5sRHV/AsjuCmxWrte2TNGSYuCeCq+0Q==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/lodash.sortby": {
|
||||
@@ -5267,9 +5397,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/micromatch/node_modules/picomatch": {
|
||||
"version": "2.3.1",
|
||||
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
|
||||
"integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
|
||||
"version": "2.3.2",
|
||||
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.2.tgz",
|
||||
"integrity": "sha512-V7+vQEJ06Z+c5tSye8S+nHUfI51xoXIXjHQ99cQtKUkQqqO1kO/KCJUfZXuB47h/YBlDhah2H3hdUGXn8ie0oA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
@@ -5290,16 +5420,16 @@
|
||||
}
|
||||
},
|
||||
"node_modules/minimatch": {
|
||||
"version": "10.1.1",
|
||||
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-10.1.1.tgz",
|
||||
"integrity": "sha512-enIvLvRAFZYXJzkCYG5RKmPfrFArdLv+R+lbQ53BmIMLIry74bjKzX6iHAm8WYamJkhSSEabrWN5D97XnKObjQ==",
|
||||
"version": "10.2.5",
|
||||
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-10.2.5.tgz",
|
||||
"integrity": "sha512-MULkVLfKGYDFYejP07QOurDLLQpcjk7Fw+7jXS2R2czRQzR56yHRveU5NDJEOviH+hETZKSkIk5c+T23GjFUMg==",
|
||||
"dev": true,
|
||||
"license": "BlueOak-1.0.0",
|
||||
"dependencies": {
|
||||
"@isaacs/brace-expansion": "^5.0.0"
|
||||
"brace-expansion": "^5.0.5"
|
||||
},
|
||||
"engines": {
|
||||
"node": "20 || >=22"
|
||||
"node": "18 || 20 || >=22"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/isaacs"
|
||||
@@ -5575,9 +5705,9 @@
|
||||
"license": "ISC"
|
||||
},
|
||||
"node_modules/picomatch": {
|
||||
"version": "4.0.3",
|
||||
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz",
|
||||
"integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
|
||||
"version": "4.0.4",
|
||||
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.4.tgz",
|
||||
"integrity": "sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
@@ -5956,9 +6086,9 @@
|
||||
"license": "ISC"
|
||||
},
|
||||
"node_modules/rollup": {
|
||||
"version": "4.48.1",
|
||||
"resolved": "https://registry.npmjs.org/rollup/-/rollup-4.48.1.tgz",
|
||||
"integrity": "sha512-jVG20NvbhTYDkGAty2/Yh7HK6/q3DGSRH4o8ALKGArmMuaauM9kLfoMZ+WliPwA5+JHr2lTn3g557FxBV87ifg==",
|
||||
"version": "4.60.1",
|
||||
"resolved": "https://registry.npmjs.org/rollup/-/rollup-4.60.1.tgz",
|
||||
"integrity": "sha512-VmtB2rFU/GroZ4oL8+ZqXgSA38O6GR8KSIvWmEFv63pQ0G6KaBH9s07PO8XTXP4vI+3UJUEypOfjkGfmSBBR0w==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
@@ -5972,26 +6102,31 @@
|
||||
"npm": ">=8.0.0"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@rollup/rollup-android-arm-eabi": "4.48.1",
|
||||
"@rollup/rollup-android-arm64": "4.48.1",
|
||||
"@rollup/rollup-darwin-arm64": "4.48.1",
|
||||
"@rollup/rollup-darwin-x64": "4.48.1",
|
||||
"@rollup/rollup-freebsd-arm64": "4.48.1",
|
||||
"@rollup/rollup-freebsd-x64": "4.48.1",
|
||||
"@rollup/rollup-linux-arm-gnueabihf": "4.48.1",
|
||||
"@rollup/rollup-linux-arm-musleabihf": "4.48.1",
|
||||
"@rollup/rollup-linux-arm64-gnu": "4.48.1",
|
||||
"@rollup/rollup-linux-arm64-musl": "4.48.1",
|
||||
"@rollup/rollup-linux-loongarch64-gnu": "4.48.1",
|
||||
"@rollup/rollup-linux-ppc64-gnu": "4.48.1",
|
||||
"@rollup/rollup-linux-riscv64-gnu": "4.48.1",
|
||||
"@rollup/rollup-linux-riscv64-musl": "4.48.1",
|
||||
"@rollup/rollup-linux-s390x-gnu": "4.48.1",
|
||||
"@rollup/rollup-linux-x64-gnu": "4.48.1",
|
||||
"@rollup/rollup-linux-x64-musl": "4.48.1",
|
||||
"@rollup/rollup-win32-arm64-msvc": "4.48.1",
|
||||
"@rollup/rollup-win32-ia32-msvc": "4.48.1",
|
||||
"@rollup/rollup-win32-x64-msvc": "4.48.1",
|
||||
"@rollup/rollup-android-arm-eabi": "4.60.1",
|
||||
"@rollup/rollup-android-arm64": "4.60.1",
|
||||
"@rollup/rollup-darwin-arm64": "4.60.1",
|
||||
"@rollup/rollup-darwin-x64": "4.60.1",
|
||||
"@rollup/rollup-freebsd-arm64": "4.60.1",
|
||||
"@rollup/rollup-freebsd-x64": "4.60.1",
|
||||
"@rollup/rollup-linux-arm-gnueabihf": "4.60.1",
|
||||
"@rollup/rollup-linux-arm-musleabihf": "4.60.1",
|
||||
"@rollup/rollup-linux-arm64-gnu": "4.60.1",
|
||||
"@rollup/rollup-linux-arm64-musl": "4.60.1",
|
||||
"@rollup/rollup-linux-loong64-gnu": "4.60.1",
|
||||
"@rollup/rollup-linux-loong64-musl": "4.60.1",
|
||||
"@rollup/rollup-linux-ppc64-gnu": "4.60.1",
|
||||
"@rollup/rollup-linux-ppc64-musl": "4.60.1",
|
||||
"@rollup/rollup-linux-riscv64-gnu": "4.60.1",
|
||||
"@rollup/rollup-linux-riscv64-musl": "4.60.1",
|
||||
"@rollup/rollup-linux-s390x-gnu": "4.60.1",
|
||||
"@rollup/rollup-linux-x64-gnu": "4.60.1",
|
||||
"@rollup/rollup-linux-x64-musl": "4.60.1",
|
||||
"@rollup/rollup-openbsd-x64": "4.60.1",
|
||||
"@rollup/rollup-openharmony-arm64": "4.60.1",
|
||||
"@rollup/rollup-win32-arm64-msvc": "4.60.1",
|
||||
"@rollup/rollup-win32-ia32-msvc": "4.60.1",
|
||||
"@rollup/rollup-win32-x64-gnu": "4.60.1",
|
||||
"@rollup/rollup-win32-x64-msvc": "4.60.1",
|
||||
"fsevents": "~2.3.2"
|
||||
}
|
||||
},
|
||||
@@ -6290,9 +6425,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/tar": {
|
||||
"version": "7.5.7",
|
||||
"resolved": "https://registry.npmjs.org/tar/-/tar-7.5.7.tgz",
|
||||
"integrity": "sha512-fov56fJiRuThVFXD6o6/Q354S7pnWMJIVlDBYijsTNx6jKSE4pvrDTs6lUnmGvNyfJwFQQwWy3owKz1ucIhveQ==",
|
||||
"version": "7.5.13",
|
||||
"resolved": "https://registry.npmjs.org/tar/-/tar-7.5.13.tgz",
|
||||
"integrity": "sha512-tOG/7GyXpFevhXVh8jOPJrmtRpOTsYqUIkVdVooZYJS/z8WhfQUX8RJILmeuJNinGAMSu1veBr4asSHFt5/hng==",
|
||||
"dev": true,
|
||||
"license": "BlueOak-1.0.0",
|
||||
"dependencies": {
|
||||
@@ -6559,9 +6694,9 @@
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/valibot": {
|
||||
"version": "0.42.1",
|
||||
"resolved": "https://registry.npmjs.org/valibot/-/valibot-0.42.1.tgz",
|
||||
"integrity": "sha512-3keXV29Ar5b//Hqi4MbSdV7lfVp6zuYLZuA9V1PvQUsXqogr+u5lvLPLk3A4f74VUXDnf/JfWMN6sB+koJ/FFw==",
|
||||
"version": "1.3.1",
|
||||
"resolved": "https://registry.npmjs.org/valibot/-/valibot-1.3.1.tgz",
|
||||
"integrity": "sha512-sfdRir/QFM0JaF22hqTroPc5xy4DimuGQVKFrzF1YfGwaS1nJot3Y8VqMdLO2Lg27fMzat2yD3pY5PbAYO39Gg==",
|
||||
"license": "MIT",
|
||||
"peerDependencies": {
|
||||
"typescript": ">=5"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "beszel",
|
||||
"private": true,
|
||||
"version": "0.18.6",
|
||||
"version": "0.18.7",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"dev": "vite --host",
|
||||
@@ -52,7 +52,7 @@
|
||||
"recharts": "^2.15.4",
|
||||
"shiki": "^3.13.0",
|
||||
"tailwind-merge": "^3.3.1",
|
||||
"valibot": "^0.42.1"
|
||||
"valibot": "^1.3.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@biomejs/biome": "2.2.4",
|
||||
|
||||
@@ -2,11 +2,13 @@ import { t } from "@lingui/core/macro"
|
||||
import { Plural, Trans } from "@lingui/react/macro"
|
||||
import { useStore } from "@nanostores/react"
|
||||
import { getPagePath } from "@nanostores/router"
|
||||
import { GlobeIcon, ServerIcon } from "lucide-react"
|
||||
import { ChevronDownIcon, GlobeIcon, ServerIcon } from "lucide-react"
|
||||
import { lazy, memo, Suspense, useMemo, useState } from "react"
|
||||
import { $router, Link } from "@/components/router"
|
||||
import { Button } from "@/components/ui/button"
|
||||
import { Checkbox } from "@/components/ui/checkbox"
|
||||
import { DialogDescription, DialogHeader, DialogTitle } from "@/components/ui/dialog"
|
||||
import { DropdownMenu, DropdownMenuContent, DropdownMenuItem, DropdownMenuTrigger } from "@/components/ui/dropdown-menu"
|
||||
import { Input } from "@/components/ui/input"
|
||||
import { Switch } from "@/components/ui/switch"
|
||||
import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs"
|
||||
@@ -64,11 +66,57 @@ const deleteAlerts = debounce(async ({ name, systems }: { name: string; systems:
|
||||
|
||||
export const AlertDialogContent = memo(function AlertDialogContent({ system }: { system: SystemRecord }) {
|
||||
const alerts = useStore($alerts)
|
||||
const systems = useStore($systems)
|
||||
const [overwriteExisting, setOverwriteExisting] = useState<boolean | "indeterminate">(false)
|
||||
const [currentTab, setCurrentTab] = useState("system")
|
||||
// copyKey is used to force remount AlertContent components with
|
||||
// new alert data after copying alerts from another system
|
||||
const [copyKey, setCopyKey] = useState(0)
|
||||
|
||||
const systemAlerts = alerts[system.id] ?? new Map()
|
||||
|
||||
// Systems that have at least one alert configured (excluding the current system)
|
||||
const systemsWithAlerts = useMemo(
|
||||
() => systems.filter((s) => s.id !== system.id && alerts[s.id]?.size),
|
||||
[systems, alerts, system.id]
|
||||
)
|
||||
|
||||
async function copyAlertsFromSystem(sourceSystemId: string) {
|
||||
const sourceAlerts = $alerts.get()[sourceSystemId]
|
||||
if (!sourceAlerts?.size) return
|
||||
try {
|
||||
const currentTargetAlerts = $alerts.get()[system.id] ?? new Map()
|
||||
// Alert names present on target but absent from source should be deleted
|
||||
const namesToDelete = Array.from(currentTargetAlerts.keys()).filter((name) => !sourceAlerts.has(name))
|
||||
await Promise.all([
|
||||
...Array.from(sourceAlerts.values()).map(({ name, value, min }) =>
|
||||
pb.send<{ success: boolean }>(endpoint, {
|
||||
method: "POST",
|
||||
body: { name, value, min, systems: [system.id], overwrite: true },
|
||||
requestKey: name,
|
||||
})
|
||||
),
|
||||
...namesToDelete.map((name) =>
|
||||
pb.send<{ success: boolean }>(endpoint, {
|
||||
method: "DELETE",
|
||||
body: { name, systems: [system.id] },
|
||||
requestKey: name,
|
||||
})
|
||||
),
|
||||
])
|
||||
// Optimistically update the store so components re-mount with correct data
|
||||
// before the realtime subscription event arrives.
|
||||
const newSystemAlerts = new Map<string, AlertRecord>()
|
||||
for (const alert of sourceAlerts.values()) {
|
||||
newSystemAlerts.set(alert.name, { ...alert, system: system.id, triggered: false })
|
||||
}
|
||||
$alerts.setKey(system.id, newSystemAlerts)
|
||||
setCopyKey((k) => k + 1)
|
||||
} catch (error) {
|
||||
failedUpdateToast(error)
|
||||
}
|
||||
}
|
||||
|
||||
// We need to keep a copy of alerts when we switch to global tab. If we always compare to
|
||||
// current alerts, it will only be updated when first checked, then won't be updated because
|
||||
// after that it exists.
|
||||
@@ -93,18 +141,37 @@ export const AlertDialogContent = memo(function AlertDialogContent({ system }: {
|
||||
</DialogDescription>
|
||||
</DialogHeader>
|
||||
<Tabs defaultValue="system" onValueChange={setCurrentTab}>
|
||||
<TabsList className="mb-1 -mt-0.5">
|
||||
<TabsTrigger value="system">
|
||||
<ServerIcon className="me-2 h-3.5 w-3.5" />
|
||||
<span className="truncate max-w-60">{system.name}</span>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="global">
|
||||
<GlobeIcon className="me-1.5 h-3.5 w-3.5" />
|
||||
<Trans>All Systems</Trans>
|
||||
</TabsTrigger>
|
||||
</TabsList>
|
||||
<div className="flex items-center justify-between mb-1 -mt-0.5">
|
||||
<TabsList>
|
||||
<TabsTrigger value="system">
|
||||
<ServerIcon className="me-2 h-3.5 w-3.5" />
|
||||
<span className="truncate max-w-60">{system.name}</span>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="global">
|
||||
<GlobeIcon className="me-1.5 h-3.5 w-3.5" />
|
||||
<Trans>All Systems</Trans>
|
||||
</TabsTrigger>
|
||||
</TabsList>
|
||||
{systemsWithAlerts.length > 0 && currentTab === "system" && (
|
||||
<DropdownMenu>
|
||||
<DropdownMenuTrigger asChild>
|
||||
<Button variant="ghost" size="sm" className="text-muted-foreground text-xs gap-1.5">
|
||||
<Trans context="Copy alerts from another system">Copy from</Trans>
|
||||
<ChevronDownIcon className="h-3.5 w-3.5" />
|
||||
</Button>
|
||||
</DropdownMenuTrigger>
|
||||
<DropdownMenuContent align="end" className="max-h-100 overflow-auto">
|
||||
{systemsWithAlerts.map((s) => (
|
||||
<DropdownMenuItem key={s.id} className="min-w-44" onSelect={() => copyAlertsFromSystem(s.id)}>
|
||||
{s.name}
|
||||
</DropdownMenuItem>
|
||||
))}
|
||||
</DropdownMenuContent>
|
||||
</DropdownMenu>
|
||||
)}
|
||||
</div>
|
||||
<TabsContent value="system">
|
||||
<div className="grid gap-3">
|
||||
<div key={copyKey} className="grid gap-3">
|
||||
{alertKeys.map((name) => (
|
||||
<AlertContent
|
||||
key={name}
|
||||
|
||||
@@ -41,8 +41,8 @@ export default function AreaChartDefault({
|
||||
hideYAxis = false,
|
||||
filter,
|
||||
truncate = false,
|
||||
}: // logRender = false,
|
||||
{
|
||||
chartProps,
|
||||
}: {
|
||||
chartData: ChartData
|
||||
// biome-ignore lint/suspicious/noExplicitAny: accepts different data source types (systemStats or containerData)
|
||||
customData?: any[]
|
||||
@@ -62,11 +62,11 @@ export default function AreaChartDefault({
|
||||
hideYAxis?: boolean
|
||||
filter?: string
|
||||
truncate?: boolean
|
||||
// logRender?: boolean
|
||||
chartProps?: Omit<React.ComponentProps<typeof AreaChart>, "data" | "margin">
|
||||
}) {
|
||||
const { yAxisWidth, updateYAxisWidth } = useYAxisWidth()
|
||||
const { isIntersecting, ref } = useIntersectionObserver({ freeze: false })
|
||||
const sourceData = customData ?? chartData.systemStats
|
||||
const sourceData = customData ?? chartData.systemStats ?? []
|
||||
const [displayData, setDisplayData] = useState(sourceData)
|
||||
const [displayMaxToggled, setDisplayMaxToggled] = useState(maxToggled)
|
||||
|
||||
@@ -111,6 +111,8 @@ export default function AreaChartDefault({
|
||||
})
|
||||
}, [areasKey, displayMaxToggled])
|
||||
|
||||
const XAxis = xAxis(chartData.chartTime, displayData.at(-1)?.created)
|
||||
|
||||
return useMemo(() => {
|
||||
if (displayData.length === 0) {
|
||||
return null
|
||||
@@ -131,6 +133,7 @@ export default function AreaChartDefault({
|
||||
accessibilityLayer
|
||||
data={displayData}
|
||||
margin={hideYAxis ? { ...chartMargin, left: 5 } : chartMargin}
|
||||
{...chartProps}
|
||||
>
|
||||
<CartesianGrid vertical={false} />
|
||||
{!hideYAxis && (
|
||||
@@ -145,7 +148,7 @@ export default function AreaChartDefault({
|
||||
axisLine={false}
|
||||
/>
|
||||
)}
|
||||
{xAxis(chartData)}
|
||||
{XAxis}
|
||||
<ChartTooltip
|
||||
animationEasing="ease-out"
|
||||
animationDuration={150}
|
||||
@@ -166,5 +169,5 @@ export default function AreaChartDefault({
|
||||
</AreaChart>
|
||||
</ChartContainer>
|
||||
)
|
||||
}, [displayData, yAxisWidth, filter, Areas])
|
||||
}, [displayData, yAxisWidth, filter, Areas, XAxis])
|
||||
}
|
||||
|
||||
@@ -22,6 +22,7 @@ export type DataPoint<T = SystemStatsRecord> = {
|
||||
order?: number
|
||||
strokeOpacity?: number
|
||||
activeDot?: boolean
|
||||
dot?: boolean
|
||||
}
|
||||
|
||||
export default function LineChartDefault({
|
||||
@@ -40,8 +41,9 @@ export default function LineChartDefault({
|
||||
hideYAxis = false,
|
||||
filter,
|
||||
truncate = false,
|
||||
}: // logRender = false,
|
||||
{
|
||||
chartProps,
|
||||
connectNulls,
|
||||
}: {
|
||||
chartData: ChartData
|
||||
// biome-ignore lint/suspicious/noExplicitAny: accepts different data source types (systemStats or containerData)
|
||||
customData?: any[]
|
||||
@@ -61,11 +63,12 @@ export default function LineChartDefault({
|
||||
hideYAxis?: boolean
|
||||
filter?: string
|
||||
truncate?: boolean
|
||||
// logRender?: boolean
|
||||
chartProps?: Omit<React.ComponentProps<typeof LineChart>, "data" | "margin">
|
||||
connectNulls?: boolean
|
||||
}) {
|
||||
const { yAxisWidth, updateYAxisWidth } = useYAxisWidth()
|
||||
const { isIntersecting, ref } = useIntersectionObserver({ freeze: false })
|
||||
const sourceData = customData ?? chartData.systemStats
|
||||
const sourceData = customData ?? chartData.systemStats ?? []
|
||||
const [displayData, setDisplayData] = useState(sourceData)
|
||||
const [displayMaxToggled, setDisplayMaxToggled] = useState(maxToggled)
|
||||
|
||||
@@ -83,7 +86,9 @@ export default function LineChartDefault({
|
||||
}, [displayData, displayMaxToggled, isIntersecting, maxToggled, sourceData])
|
||||
|
||||
// Use a stable key derived from data point identities and visual properties
|
||||
const linesKey = dataPoints?.map((d) => `${d.label}:${d.strokeOpacity ?? ""}`).join("\0")
|
||||
const linesKey = dataPoints?.map((d) => `${d.label}:${d.strokeOpacity}${d.dot}`).join("\0")
|
||||
|
||||
const XAxis = xAxis(chartData.chartTime, displayData.at(-1)?.created)
|
||||
|
||||
const Lines = useMemo(() => {
|
||||
return dataPoints?.map((dataPoint, i) => {
|
||||
@@ -97,14 +102,15 @@ export default function LineChartDefault({
|
||||
dataKey={dataPoint.dataKey}
|
||||
name={dataPoint.label}
|
||||
type="monotoneX"
|
||||
dot={false}
|
||||
dot={dataPoint.dot || false}
|
||||
strokeWidth={1.5}
|
||||
stroke={color}
|
||||
strokeOpacity={dataPoint.strokeOpacity}
|
||||
isAnimationActive={false}
|
||||
// stackId={dataPoint.stackId}
|
||||
order={dataPoint.order || i}
|
||||
// activeDot={dataPoint.activeDot ?? true}
|
||||
activeDot={dataPoint.activeDot ?? true}
|
||||
connectNulls={connectNulls}
|
||||
/>
|
||||
)
|
||||
})
|
||||
@@ -130,6 +136,7 @@ export default function LineChartDefault({
|
||||
accessibilityLayer
|
||||
data={displayData}
|
||||
margin={hideYAxis ? { ...chartMargin, left: 5 } : chartMargin}
|
||||
{...chartProps}
|
||||
>
|
||||
<CartesianGrid vertical={false} />
|
||||
{!hideYAxis && (
|
||||
@@ -144,7 +151,7 @@ export default function LineChartDefault({
|
||||
axisLine={false}
|
||||
/>
|
||||
)}
|
||||
{xAxis(chartData)}
|
||||
{XAxis}
|
||||
<ChartTooltip
|
||||
animationEasing="ease-out"
|
||||
animationDuration={150}
|
||||
@@ -165,5 +172,5 @@ export default function LineChartDefault({
|
||||
</LineChart>
|
||||
</ChartContainer>
|
||||
)
|
||||
}, [displayData, yAxisWidth, filter, Lines])
|
||||
}, [displayData, yAxisWidth, filter, Lines, XAxis])
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ import {
|
||||
import { EthernetIcon, HourglassIcon, SquareArrowRightEnterIcon } from "../ui/icons"
|
||||
import { Badge } from "../ui/badge"
|
||||
import { t } from "@lingui/core/macro"
|
||||
import { $allSystemsById, $longestSystemNameLen } from "@/lib/stores"
|
||||
import { $allSystemsById, $longestSystemName } from "@/lib/stores"
|
||||
import { useStore } from "@nanostores/react"
|
||||
import { Tooltip, TooltipContent, TooltipTrigger } from "../ui/tooltip"
|
||||
|
||||
@@ -63,10 +63,13 @@ export const containerChartCols: ColumnDef<ContainerRecord>[] = [
|
||||
header: ({ column }) => <HeaderButton column={column} name={t`System`} Icon={ServerIcon} />,
|
||||
cell: ({ getValue }) => {
|
||||
const allSystems = useStore($allSystemsById)
|
||||
const longestName = useStore($longestSystemNameLen)
|
||||
const longestName = useStore($longestSystemName)
|
||||
return (
|
||||
<div className="ms-1 max-w-40 truncate" style={{ width: `${longestName / 1.05}ch` }}>
|
||||
{allSystems[getValue() as string]?.name ?? ""}
|
||||
<div className="ms-1 relative w-fit max-w-40">
|
||||
<span className="invisible block whitespace-nowrap" aria-hidden="true">
|
||||
{longestName}
|
||||
</span>
|
||||
<span className="absolute inset-0 truncate">{allSystems[getValue() as string]?.name ?? ""}</span>
|
||||
</div>
|
||||
)
|
||||
},
|
||||
|
||||
@@ -12,7 +12,7 @@ import { Label } from "@/components/ui/label"
|
||||
import { pb } from "@/lib/api"
|
||||
import { $authenticated } from "@/lib/stores"
|
||||
import { cn } from "@/lib/utils"
|
||||
import { $router, Link, prependBasePath } from "../router"
|
||||
import { $router, Link, basePath, prependBasePath } from "../router"
|
||||
import { toast } from "../ui/use-toast"
|
||||
import { OtpInputForm } from "./otp-forms"
|
||||
|
||||
@@ -37,8 +37,7 @@ const RegisterSchema = v.looseObject({
|
||||
passwordConfirm: passwordSchema,
|
||||
})
|
||||
|
||||
export const showLoginFaliedToast = (description?: string) => {
|
||||
description ||= t`Please check your credentials and try again`
|
||||
export const showLoginFaliedToast = (description = t`Please check your credentials and try again`) => {
|
||||
toast({
|
||||
title: t`Login attempt failed`,
|
||||
description,
|
||||
@@ -130,10 +129,6 @@ export function UserAuthForm({
|
||||
[isFirstRun]
|
||||
)
|
||||
|
||||
if (!authMethods) {
|
||||
return null
|
||||
}
|
||||
|
||||
const authProviders = authMethods.oauth2.providers ?? []
|
||||
const oauthEnabled = authMethods.oauth2.enabled && authProviders.length > 0
|
||||
const passwordEnabled = authMethods.password.enabled
|
||||
@@ -142,6 +137,12 @@ export function UserAuthForm({
|
||||
|
||||
function loginWithOauth(provider: AuthProviderInfo, forcePopup = false) {
|
||||
setIsOauthLoading(true)
|
||||
|
||||
if (globalThis.BESZEL.OAUTH_DISABLE_POPUP) {
|
||||
redirectToOauthProvider(provider)
|
||||
return
|
||||
}
|
||||
|
||||
const oAuthOpts: OAuth2AuthConfig = {
|
||||
provider: provider.name,
|
||||
}
|
||||
@@ -150,10 +151,7 @@ export function UserAuthForm({
|
||||
const authWindow = window.open()
|
||||
if (!authWindow) {
|
||||
setIsOauthLoading(false)
|
||||
toast({
|
||||
title: t`Error`,
|
||||
description: t`Please enable pop-ups for this site`,
|
||||
})
|
||||
showLoginFaliedToast(t`Please enable pop-ups for this site`)
|
||||
return
|
||||
}
|
||||
oAuthOpts.urlCallback = (url) => {
|
||||
@@ -171,16 +169,57 @@ export function UserAuthForm({
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Redirects the user to the OAuth provider's authentication page in the same window.
|
||||
* Requires the app's base URL to be registered as a redirect URI with the OAuth provider.
|
||||
*/
|
||||
function redirectToOauthProvider(provider: AuthProviderInfo) {
|
||||
const url = new URL(provider.authURL)
|
||||
// url.searchParams.set("redirect_uri", `${window.location.origin}${basePath}`)
|
||||
sessionStorage.setItem("provider", JSON.stringify(provider))
|
||||
window.location.href = url.toString()
|
||||
}
|
||||
|
||||
useEffect(() => {
|
||||
// auto login if password disabled and only one auth provider
|
||||
if (!passwordEnabled && authProviders.length === 1 && !sessionStorage.getItem("lo")) {
|
||||
// Add a small timeout to ensure browser is ready to handle popups
|
||||
setTimeout(() => {
|
||||
loginWithOauth(authProviders[0], true)
|
||||
}, 300)
|
||||
// handle redirect-based OAuth callback if we have a code
|
||||
const params = new URLSearchParams(window.location.search)
|
||||
const code = params.get("code")
|
||||
if (code) {
|
||||
const state = params.get("state")
|
||||
const provider: AuthProviderInfo = JSON.parse(sessionStorage.getItem("provider") ?? "{}")
|
||||
if (!state || provider.state !== state) {
|
||||
showLoginFaliedToast()
|
||||
} else {
|
||||
setIsOauthLoading(true)
|
||||
window.history.replaceState({}, "", window.location.pathname)
|
||||
pb.collection("users")
|
||||
.authWithOAuth2Code(provider.name, code, provider.codeVerifier, `${window.location.origin}${basePath}`)
|
||||
.then(() => $authenticated.set(pb.authStore.isValid))
|
||||
.catch((e: unknown) => showLoginFaliedToast((e as Error).message))
|
||||
.finally(() => setIsOauthLoading(false))
|
||||
}
|
||||
}
|
||||
|
||||
// auto login if password disabled and only one auth provider
|
||||
if (!code && !passwordEnabled && authProviders.length === 1 && !sessionStorage.getItem("lo")) {
|
||||
// Add a small timeout to ensure browser is ready to handle popups
|
||||
setTimeout(() => loginWithOauth(authProviders[0], false), 300)
|
||||
return
|
||||
}
|
||||
|
||||
// refresh auth if not in above states (required for trusted auth header)
|
||||
pb.collection("users")
|
||||
.authRefresh()
|
||||
.then((res) => {
|
||||
pb.authStore.save(res.token, res.record)
|
||||
$authenticated.set(!!pb.authStore.isValid)
|
||||
})
|
||||
}, [])
|
||||
|
||||
if (!authMethods) {
|
||||
return null
|
||||
}
|
||||
|
||||
if (otpId && mfaId) {
|
||||
return <OtpInputForm otpId={otpId} mfaId={mfaId} />
|
||||
}
|
||||
@@ -248,7 +287,7 @@ export function UserAuthForm({
|
||||
)}
|
||||
<div className="sr-only">
|
||||
{/* honeypot */}
|
||||
<label htmlFor="website"></label>
|
||||
<label htmlFor="website">Website</label>
|
||||
<input
|
||||
id="website"
|
||||
type="text"
|
||||
|
||||
@@ -1,28 +1,39 @@
|
||||
import { t } from "@lingui/core/macro"
|
||||
import { MoonStarIcon, SunIcon } from "lucide-react"
|
||||
import { MoonStarIcon, SunIcon, SunMoonIcon } from "lucide-react"
|
||||
import { useTheme } from "@/components/theme-provider"
|
||||
import { Button } from "@/components/ui/button"
|
||||
import { Tooltip, TooltipContent, TooltipTrigger } from "./ui/tooltip"
|
||||
import { Trans } from "@lingui/react/macro"
|
||||
import { cn } from "@/lib/utils"
|
||||
|
||||
const themes = ["light", "dark", "system"] as const
|
||||
const icons = [SunIcon, MoonStarIcon, SunMoonIcon] as const
|
||||
|
||||
export function ModeToggle() {
|
||||
const { theme, setTheme } = useTheme()
|
||||
|
||||
const currentIndex = themes.indexOf(theme)
|
||||
const Icon = icons[currentIndex]
|
||||
|
||||
return (
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<Button
|
||||
variant={"ghost"}
|
||||
size="icon"
|
||||
aria-label={t`Toggle theme`}
|
||||
onClick={() => setTheme(theme === "dark" ? "light" : "dark")}
|
||||
aria-label={t`Switch theme`}
|
||||
onClick={() => setTheme(themes[(currentIndex + 1) % themes.length])}
|
||||
>
|
||||
<SunIcon className="h-[1.2rem] w-[1.2rem] transition-all -rotate-90 dark:opacity-0 dark:rotate-0" />
|
||||
<MoonStarIcon className="absolute h-[1.2rem] w-[1.2rem] transition-all opacity-0 -rotate-90 dark:opacity-100 dark:rotate-0" />
|
||||
<Icon
|
||||
className={cn(
|
||||
"animate-in fade-in spin-in-[-30deg] duration-200",
|
||||
currentIndex === 2 ? "size-[1.35rem]" : "size-[1.2rem]"
|
||||
)}
|
||||
/>
|
||||
</Button>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>
|
||||
<Trans>Toggle theme</Trans>
|
||||
<Trans>Switch theme</Trans>
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
)
|
||||
|
||||
@@ -8,6 +8,7 @@ import {
|
||||
LogOutIcon,
|
||||
LogsIcon,
|
||||
MenuIcon,
|
||||
NetworkIcon,
|
||||
PlusIcon,
|
||||
SearchIcon,
|
||||
ServerIcon,
|
||||
@@ -109,6 +110,10 @@ export default function Navbar() {
|
||||
<HardDriveIcon className="h-4 w-4 me-2.5" strokeWidth={1.5} />
|
||||
<span>S.M.A.R.T.</span>
|
||||
</DropdownMenuItem>
|
||||
<DropdownMenuItem onClick={() => navigate(getPagePath($router, "probes"))} className="flex items-center">
|
||||
<NetworkIcon className="h-4 w-4 me-2.5" strokeWidth={1.5} />
|
||||
<Trans>Network Probes</Trans>
|
||||
</DropdownMenuItem>
|
||||
<DropdownMenuItem
|
||||
onClick={() => navigate(getPagePath($router, "settings", { name: "general" }))}
|
||||
className="flex items-center"
|
||||
@@ -180,6 +185,21 @@ export default function Navbar() {
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>S.M.A.R.T.</TooltipContent>
|
||||
</Tooltip>
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<Link
|
||||
href={getPagePath($router, "probes")}
|
||||
className={cn("hidden md:grid", buttonVariants({ variant: "ghost", size: "icon" }))}
|
||||
aria-label="Network Probes"
|
||||
onMouseEnter={() => import("@/components/routes/probes")}
|
||||
>
|
||||
<NetworkIcon className="h-[1.2rem] w-[1.2rem]" strokeWidth={1.5} />
|
||||
</Link>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>
|
||||
<Trans>Network Probes</Trans>
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
<LangToggle />
|
||||
<ModeToggle />
|
||||
<Tooltip>
|
||||
|
||||
@@ -0,0 +1,391 @@
|
||||
import type { CellContext, Column, ColumnDef } from "@tanstack/react-table"
|
||||
import { Button } from "@/components/ui/button"
|
||||
import { cn, copyToClipboard, decimalString, formatMicroseconds, hourWithSeconds } from "@/lib/utils"
|
||||
import {
|
||||
GlobeIcon,
|
||||
TimerIcon,
|
||||
WifiOffIcon,
|
||||
Trash2Icon,
|
||||
ArrowLeftRightIcon,
|
||||
MoreHorizontalIcon,
|
||||
ServerIcon,
|
||||
ClockIcon,
|
||||
NetworkIcon,
|
||||
RefreshCwIcon,
|
||||
PenBoxIcon,
|
||||
PauseCircleIcon,
|
||||
PlayCircleIcon,
|
||||
CopyIcon,
|
||||
} from "lucide-react"
|
||||
import { t } from "@lingui/core/macro"
|
||||
import type { NetworkProbeRecord, SystemRecord } from "@/types"
|
||||
import {
|
||||
DropdownMenu,
|
||||
DropdownMenuContent,
|
||||
DropdownMenuItem,
|
||||
DropdownMenuSeparator,
|
||||
DropdownMenuTrigger,
|
||||
} from "@/components/ui/dropdown-menu"
|
||||
import { Trans } from "@lingui/react/macro"
|
||||
import { $allSystemsById, $longestSystemName } from "@/lib/stores"
|
||||
import { useStore } from "@nanostores/react"
|
||||
import { SystemStatus } from "@/lib/enums"
|
||||
import { Checkbox } from "@/components/ui/checkbox"
|
||||
import { useMemo } from "react"
|
||||
import { formatBulkProbeLine } from "@/components/network-probes-table/probe-dialog"
|
||||
import { Badge } from "../ui/badge"
|
||||
|
||||
const protocolColors: Record<string, string> = {
|
||||
icmp: "bg-blue-500/15! text-blue-600 dark:text-blue-400",
|
||||
tcp: "bg-purple-500/15! text-purple-600 dark:text-purple-400",
|
||||
http: "bg-green-500/15! text-green-700 dark:text-green-400",
|
||||
}
|
||||
|
||||
const SYSTEM_STATUS_COLORS = {
|
||||
[SystemStatus.Up]: "bg-green-500",
|
||||
[SystemStatus.Down]: "bg-red-500",
|
||||
[SystemStatus.Paused]: "bg-primary/40",
|
||||
[SystemStatus.Pending]: "bg-yellow-500",
|
||||
} as const
|
||||
|
||||
/**
|
||||
* A probe is considered muted if it's disabled or if its associated system is not up.
|
||||
*/
|
||||
const isMuted = (record: NetworkProbeRecord, systemRecord: SystemRecord | undefined) =>
|
||||
!record.enabled || systemRecord?.status !== SystemStatus.Up
|
||||
|
||||
export function getProbeColumns(
|
||||
longestName = "",
|
||||
longestTarget = "",
|
||||
{
|
||||
onEdit,
|
||||
onDelete,
|
||||
onSetEnabled,
|
||||
}: {
|
||||
onEdit?: (probe: NetworkProbeRecord) => void
|
||||
onDelete?: (probes: NetworkProbeRecord[]) => void | Promise<void>
|
||||
onSetEnabled?: (probes: NetworkProbeRecord[], enabled: boolean) => void | Promise<void>
|
||||
} = {}
|
||||
): ColumnDef<NetworkProbeRecord>[] {
|
||||
return [
|
||||
{
|
||||
id: "select",
|
||||
header: ({ table }) => (
|
||||
<Checkbox
|
||||
className="ms-2"
|
||||
checked={table.getIsAllRowsSelected() || (table.getIsSomeRowsSelected() && "indeterminate")}
|
||||
onClick={(event) => event.stopPropagation()}
|
||||
onCheckedChange={(value) => table.toggleAllRowsSelected(!!value)}
|
||||
aria-label={t`Select all`}
|
||||
/>
|
||||
),
|
||||
cell: ({ row }) => (
|
||||
<Checkbox
|
||||
checked={row.getIsSelected()}
|
||||
onClick={(event) => event.stopPropagation()}
|
||||
onCheckedChange={(value) => row.toggleSelected(!!value)}
|
||||
aria-label={t`Select row`}
|
||||
/>
|
||||
),
|
||||
enableSorting: false,
|
||||
enableHiding: false,
|
||||
size: 44,
|
||||
},
|
||||
{
|
||||
id: "name",
|
||||
sortingFn: (a, b) => (a.original.name || a.original.target).localeCompare(b.original.name || b.original.target),
|
||||
accessorFn: (record) => record.name || record.target,
|
||||
header: ({ column }) => <HeaderButton column={column} name={t`Name`} Icon={NetworkIcon} />,
|
||||
cell: ({ row, getValue }) => {
|
||||
const probe = row.original
|
||||
const { status } = useStore($allSystemsById)[probe.system] || {}
|
||||
|
||||
let color = "bg-green-500"
|
||||
if (!probe.enabled || status === SystemStatus.Paused) {
|
||||
color = "bg-primary/40"
|
||||
} else if (status === SystemStatus.Down || status === SystemStatus.Pending) {
|
||||
color = "bg-yellow-500"
|
||||
}
|
||||
return (
|
||||
<div className="ms-1.5 max-w-40 flex gap-2 items-center tabular-nums">
|
||||
<span className={cn("shrink-0 size-2 rounded-full", color)} />
|
||||
<div className="relative w-fit min-w-0 max-w-full">
|
||||
<span className="invisible block overflow-hidden whitespace-nowrap" aria-hidden="true">
|
||||
{longestName}
|
||||
</span>
|
||||
<span className="absolute inset-0 truncate">{getValue() as string}</span>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
},
|
||||
},
|
||||
{
|
||||
id: "system",
|
||||
accessorFn: (record) => record.system,
|
||||
sortingFn: (a, b) => {
|
||||
const allSystems = $allSystemsById.get()
|
||||
const systemNameA = allSystems[a.original.system]?.name ?? ""
|
||||
const systemNameB = allSystems[b.original.system]?.name ?? ""
|
||||
const primary = systemNameA.localeCompare(systemNameB)
|
||||
if (primary !== 0) {
|
||||
return primary
|
||||
}
|
||||
return (a.original.name || a.original.target).localeCompare(b.original.name || b.original.target)
|
||||
},
|
||||
header: ({ column }) => <HeaderButton column={column} name={t`System`} Icon={ServerIcon} />,
|
||||
cell: ({ getValue }) => {
|
||||
const system = useStore($allSystemsById)[getValue() as string] as SystemRecord | undefined
|
||||
const longestSystemName = useStore($longestSystemName)
|
||||
const name = system?.name
|
||||
const status = system?.status as SystemStatus // undefined val is fine but makes lsp mad
|
||||
|
||||
return useMemo(
|
||||
() => (
|
||||
<div className="ms-1.5 max-w-44 flex gap-2 items-center tabular-nums">
|
||||
<span className={cn("shrink-0 size-2 rounded-full", SYSTEM_STATUS_COLORS[status])} />
|
||||
<div className="relative w-fit min-w-0 max-w-full">
|
||||
<span className="invisible block whitespace-nowrap" aria-hidden="true">
|
||||
{longestSystemName}
|
||||
</span>
|
||||
<span className="absolute inset-0 truncate">{name}</span>
|
||||
</div>
|
||||
</div>
|
||||
),
|
||||
[status, name]
|
||||
)
|
||||
},
|
||||
},
|
||||
{
|
||||
id: "target",
|
||||
sortingFn: (a, b) => a.original.target.localeCompare(b.original.target),
|
||||
accessorFn: (record) => record.target,
|
||||
header: ({ column }) => <HeaderButton column={column} name={t`Target`} Icon={GlobeIcon} />,
|
||||
cell: ({ getValue }) => (
|
||||
<div className="ms-1.5 relative w-fit max-w-44 tabular-nums">
|
||||
<span className="invisible block whitespace-nowrap" aria-hidden="true">
|
||||
{longestTarget}
|
||||
</span>
|
||||
<span className="absolute inset-0 truncate">{getValue() as string}</span>
|
||||
</div>
|
||||
),
|
||||
},
|
||||
{
|
||||
id: "protocol",
|
||||
accessorFn: (record) => record.protocol,
|
||||
header: ({ column }) => <HeaderButton column={column} name={t`Protocol`} Icon={ArrowLeftRightIcon} />,
|
||||
cell: ({ getValue }) => {
|
||||
const protocol = getValue() as string
|
||||
return <Badge className={cn("uppercase", protocolColors[protocol])}>{protocol}</Badge>
|
||||
},
|
||||
},
|
||||
{
|
||||
id: "interval",
|
||||
accessorFn: (record) => record.interval,
|
||||
invertSorting: true,
|
||||
header: ({ column }) => <HeaderButton column={column} name={t`Interval`} Icon={RefreshCwIcon} />,
|
||||
cell: ({ getValue }) => <span className="ms-1.5 tabular-nums">{getValue() as number}s</span>,
|
||||
},
|
||||
{
|
||||
id: "res",
|
||||
accessorFn: (record) => record.res,
|
||||
invertSorting: true,
|
||||
header: ({ column }) => <HeaderButton column={column} name={t`Response`} Icon={TimerIcon} />,
|
||||
cell: responseTimeCell,
|
||||
},
|
||||
{
|
||||
id: "res1h",
|
||||
accessorFn: (record) => record.resAvg1h,
|
||||
invertSorting: true,
|
||||
header: ({ column }) => <HeaderButton column={column} name={t`Avg 1h`} Icon={TimerIcon} />,
|
||||
cell: responseTimeCell,
|
||||
},
|
||||
{
|
||||
id: "max1h",
|
||||
accessorFn: (record) => record.resMax1h,
|
||||
invertSorting: true,
|
||||
header: ({ column }) => <HeaderButton column={column} name={t`Max 1h`} Icon={TimerIcon} />,
|
||||
cell: responseTimeCell,
|
||||
},
|
||||
{
|
||||
id: "min1h",
|
||||
accessorFn: (record) => record.resMin1h,
|
||||
invertSorting: true,
|
||||
header: ({ column }) => <HeaderButton column={column} name={t`Min 1h`} Icon={TimerIcon} />,
|
||||
cell: responseTimeCell,
|
||||
},
|
||||
{
|
||||
id: "loss",
|
||||
accessorFn: (record) => record.loss1h,
|
||||
invertSorting: true,
|
||||
header: ({ column }) => <HeaderButton column={column} name={t`Loss 1h`} Icon={WifiOffIcon} />,
|
||||
cell: ({ row }) => {
|
||||
const { loss1h, res, system } = row.original
|
||||
const systemRecord = useStore($allSystemsById)[system]
|
||||
|
||||
if (loss1h === undefined || (!res && !loss1h)) {
|
||||
return <span className="ms-1.5 text-muted-foreground">-</span>
|
||||
}
|
||||
|
||||
const muted = isMuted(row.original, systemRecord)
|
||||
let color = "bg-green-500"
|
||||
if (muted) {
|
||||
color = "bg-muted-foreground/50"
|
||||
} else if (loss1h) {
|
||||
color = loss1h > 20 ? "bg-red-500" : "bg-yellow-500"
|
||||
}
|
||||
return (
|
||||
<span className="ms-1.5 tabular-nums flex gap-2 items-center">
|
||||
<span className={cn("shrink-0 size-2 rounded-full", color)} />
|
||||
{loss1h === 100 ? loss1h : decimalString(loss1h, loss1h >= 10 ? 1 : 2)}%
|
||||
</span>
|
||||
)
|
||||
},
|
||||
},
|
||||
{
|
||||
id: "updated",
|
||||
invertSorting: true,
|
||||
accessorFn: (record) => record.updated,
|
||||
header: ({ column }) => <HeaderButton column={column} name={t`Updated`} Icon={ClockIcon} />,
|
||||
cell: ({ getValue }) => {
|
||||
const timestamp = getValue() as number
|
||||
if (!timestamp) {
|
||||
return <span className="ms-1.5 text-muted-foreground">-</span>
|
||||
}
|
||||
return <span className="ms-1.5 tabular-nums">{hourWithSeconds(timestamp)}</span>
|
||||
},
|
||||
},
|
||||
{
|
||||
id: "actions",
|
||||
enableSorting: false,
|
||||
enableHiding: false,
|
||||
header: () => null,
|
||||
size: 40,
|
||||
cell: ({ row, table }) => {
|
||||
const selectedRows = table.getSelectedRowModel().rows
|
||||
const actionRows =
|
||||
row.getIsSelected() && selectedRows.length > 1
|
||||
? selectedRows.map((selectedRow) => selectedRow.original)
|
||||
: [row.original]
|
||||
const isBulkAction = actionRows.length > 1
|
||||
const shouldPause = actionRows.some((probe) => probe.enabled)
|
||||
const bulkCopyContent = actionRows.map((probe) => formatBulkProbeLine(probe)).join("\n")
|
||||
return (
|
||||
<DropdownMenu>
|
||||
<DropdownMenuTrigger asChild>
|
||||
<Button variant="ghost" size="icon" className="size-10">
|
||||
<span className="sr-only">
|
||||
<Trans>Open menu</Trans>
|
||||
</span>
|
||||
<MoreHorizontalIcon className="w-5" />
|
||||
</Button>
|
||||
</DropdownMenuTrigger>
|
||||
<DropdownMenuContent align="end" onClick={(event) => event.stopPropagation()}>
|
||||
{!isBulkAction && (
|
||||
<DropdownMenuItem
|
||||
onClick={() => {
|
||||
onEdit?.(row.original)
|
||||
}}
|
||||
>
|
||||
<PenBoxIcon className="me-2.5 size-4" />
|
||||
<Trans>Edit</Trans>
|
||||
</DropdownMenuItem>
|
||||
)}
|
||||
<DropdownMenuItem
|
||||
onClick={() => {
|
||||
onSetEnabled?.(actionRows, !shouldPause)
|
||||
}}
|
||||
>
|
||||
{shouldPause ? (
|
||||
<>
|
||||
<PauseCircleIcon className="me-2.5 size-4" />
|
||||
<Trans>Pause</Trans>
|
||||
</>
|
||||
) : (
|
||||
<>
|
||||
<PlayCircleIcon className="me-2.5 size-4" />
|
||||
<Trans>Resume</Trans>
|
||||
</>
|
||||
)}
|
||||
</DropdownMenuItem>
|
||||
<DropdownMenuItem
|
||||
onClick={() => {
|
||||
copyToClipboard(bulkCopyContent)
|
||||
}}
|
||||
>
|
||||
<CopyIcon className="me-2.5 size-4" />
|
||||
<Trans>Bulk copy</Trans>
|
||||
</DropdownMenuItem>
|
||||
<DropdownMenuSeparator />
|
||||
<DropdownMenuItem
|
||||
onClick={() => {
|
||||
onDelete?.(actionRows)
|
||||
}}
|
||||
>
|
||||
<Trash2Icon className="me-2.5 size-4" />
|
||||
<Trans>Delete</Trans>
|
||||
</DropdownMenuItem>
|
||||
</DropdownMenuContent>
|
||||
</DropdownMenu>
|
||||
)
|
||||
},
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
const responseTimeThresholds = {
|
||||
http: { warning: 800_000, critical: 3_000_000 },
|
||||
tcp: { warning: 500_000, critical: 2_000_000 },
|
||||
icmp: { warning: 100_000, critical: 500_000 },
|
||||
}
|
||||
|
||||
function responseTimeCell(cell: CellContext<NetworkProbeRecord, unknown>) {
|
||||
const probe = cell.row.original
|
||||
const systemRecord = useStore($allSystemsById)[probe.system]
|
||||
const responseTime = cell.getValue() as number | undefined
|
||||
|
||||
if (!responseTime) {
|
||||
return <span className="ms-1.5 text-muted-foreground">-</span>
|
||||
}
|
||||
|
||||
const muted = isMuted(probe, systemRecord)
|
||||
let color = "bg-green-500"
|
||||
if (muted) {
|
||||
color = "bg-muted-foreground/50"
|
||||
} else if (responseTime > responseTimeThresholds[probe.protocol].warning) {
|
||||
color = "bg-yellow-500"
|
||||
}
|
||||
if (!muted && responseTime > responseTimeThresholds[probe.protocol].critical) {
|
||||
color = "bg-red-500"
|
||||
}
|
||||
return (
|
||||
<span className="ms-1.5 tabular-nums flex gap-2 items-center">
|
||||
<span className={cn("shrink-0 size-2 rounded-full", color)} />
|
||||
{formatMicroseconds(responseTime)}
|
||||
</span>
|
||||
)
|
||||
}
|
||||
|
||||
function HeaderButton({
|
||||
column,
|
||||
name,
|
||||
Icon,
|
||||
}: {
|
||||
column: Column<NetworkProbeRecord>
|
||||
name: string
|
||||
Icon: React.ElementType
|
||||
}) {
|
||||
const isSorted = column.getIsSorted()
|
||||
return (
|
||||
<Button
|
||||
className={cn(
|
||||
"h-9 px-3 flex items-center gap-2 duration-50",
|
||||
isSorted && "bg-accent/70 light:bg-accent text-accent-foreground/90"
|
||||
)}
|
||||
variant="ghost"
|
||||
onClick={() => column.toggleSorting(column.getIsSorted() === "asc")}
|
||||
>
|
||||
{Icon && <Icon className="size-4" />}
|
||||
{name}
|
||||
</Button>
|
||||
)
|
||||
}
|
||||
@@ -0,0 +1,538 @@
|
||||
import { t } from "@lingui/core/macro"
|
||||
import { Trans } from "@lingui/react/macro"
|
||||
import {
|
||||
type ColumnFiltersState,
|
||||
flexRender,
|
||||
getCoreRowModel,
|
||||
getFilteredRowModel,
|
||||
getSortedRowModel,
|
||||
type Row,
|
||||
type RowSelectionState,
|
||||
type SortingState,
|
||||
type Table as TableType,
|
||||
useReactTable,
|
||||
type VisibilityState,
|
||||
} from "@tanstack/react-table"
|
||||
import { useVirtualizer, type VirtualItem } from "@tanstack/react-virtual"
|
||||
import {
|
||||
AlertDialog,
|
||||
AlertDialogAction,
|
||||
AlertDialogCancel,
|
||||
AlertDialogContent,
|
||||
AlertDialogDescription,
|
||||
AlertDialogFooter,
|
||||
AlertDialogHeader,
|
||||
AlertDialogTitle,
|
||||
} from "@/components/ui/alert-dialog"
|
||||
import { Button, buttonVariants } from "@/components/ui/button"
|
||||
import { memo, useCallback, useMemo, useRef, useState } from "react"
|
||||
import { getProbeColumns } from "@/components/network-probes-table/network-probes-columns"
|
||||
import { Card, CardHeader, CardTitle } from "@/components/ui/card"
|
||||
import { Input } from "@/components/ui/input"
|
||||
import { TableBody, TableCell, TableHead, TableHeader, TableRow } from "@/components/ui/table"
|
||||
import { useToast } from "@/components/ui/use-toast"
|
||||
import { isReadOnlyUser } from "@/lib/api"
|
||||
import { pb } from "@/lib/api"
|
||||
import { $allSystemsById, $chartTime, $direction } from "@/lib/stores"
|
||||
import { cn, isVisuallyLonger, useBrowserStorage } from "@/lib/utils"
|
||||
import type { NetworkProbeRecord } from "@/types"
|
||||
import { AddProbeDialog, EditProbeDialog } from "./probe-dialog"
|
||||
import { ArrowLeftRightIcon, EthernetPortIcon, GlobeIcon, ServerIcon, XIcon } from "lucide-react"
|
||||
import { Sheet, SheetContent, SheetDescription, SheetHeader, SheetTitle } from "@/components/ui/sheet"
|
||||
import ChartTimeSelect from "@/components/charts/chart-time-select"
|
||||
import { LossChart, AvgMinMaxResponseChart } from "@/components/routes/system/charts/probes-charts"
|
||||
import { useNetworkProbeStats } from "@/lib/use-network-probes"
|
||||
import { useStore } from "@nanostores/react"
|
||||
import type { ChartData } from "@/types"
|
||||
import { parseSemVer } from "@/lib/utils"
|
||||
import { Separator } from "../ui/separator"
|
||||
import { $router, Link } from "../router"
|
||||
import { getPagePath } from "@nanostores/router"
|
||||
|
||||
export default function NetworkProbesTableNew({
|
||||
systemId,
|
||||
probes,
|
||||
}: {
|
||||
systemId?: string
|
||||
probes: NetworkProbeRecord[]
|
||||
}) {
|
||||
const [sorting, setSorting] = useBrowserStorage<SortingState>(
|
||||
`sort-np-${systemId ? 1 : 0}`,
|
||||
[{ id: systemId ? "name" : "system", desc: false }],
|
||||
sessionStorage
|
||||
)
|
||||
const [columnFilters, setColumnFilters] = useState<ColumnFiltersState>([])
|
||||
const [columnVisibility, setColumnVisibility] = useState<VisibilityState>({})
|
||||
const [rowSelection, setRowSelection] = useState<RowSelectionState>({})
|
||||
const [globalFilter, setGlobalFilter] = useState("")
|
||||
const [deleteOpen, setDeleteOpen] = useState(false)
|
||||
const [pendingDeleteIds, setPendingDeleteIds] = useState<string[]>([])
|
||||
const [editingProbe, setEditingProbe] = useState<NetworkProbeRecord>()
|
||||
const { toast } = useToast()
|
||||
const canManageProbes = !isReadOnlyUser()
|
||||
|
||||
const [longestName, longestTarget] = useMemo(() => {
|
||||
let longestName = ""
|
||||
let longestTarget = ""
|
||||
for (const p of probes) {
|
||||
const name = p.name || p.target
|
||||
if (isVisuallyLonger(name, longestName)) {
|
||||
longestName = name
|
||||
}
|
||||
if (isVisuallyLonger(p.target, longestTarget)) {
|
||||
longestTarget = p.target
|
||||
}
|
||||
}
|
||||
return [longestName, longestTarget]
|
||||
}, [probes])
|
||||
|
||||
const runProbeBatch = useCallback(
|
||||
async (ids: string[], enqueue: (batch: ReturnType<typeof pb.createBatch>, id: string) => void) => {
|
||||
let batch = pb.createBatch()
|
||||
let inBatch = 0
|
||||
for (const id of ids) {
|
||||
enqueue(batch, id)
|
||||
if (++inBatch >= 20) {
|
||||
await batch.send()
|
||||
batch = pb.createBatch()
|
||||
inBatch = 0
|
||||
}
|
||||
}
|
||||
if (inBatch) {
|
||||
await batch.send()
|
||||
}
|
||||
},
|
||||
[]
|
||||
)
|
||||
|
||||
const handleDeleteRequest = useCallback(
|
||||
async (probesToDelete: NetworkProbeRecord[]) => {
|
||||
if (!probesToDelete.length) {
|
||||
return
|
||||
}
|
||||
|
||||
const ids = probesToDelete.map((probe) => probe.id)
|
||||
if (ids.length === 1) {
|
||||
try {
|
||||
await pb.collection("network_probes").delete(ids[0])
|
||||
} catch (err: unknown) {
|
||||
toast({
|
||||
variant: "destructive",
|
||||
title: t`Error`,
|
||||
description: (err as Error)?.message || t`Failed to delete probes.`,
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
setPendingDeleteIds(ids)
|
||||
setDeleteOpen(true)
|
||||
},
|
||||
[toast]
|
||||
)
|
||||
|
||||
const handleBulkDelete = async () => {
|
||||
setDeleteOpen(false)
|
||||
if (!pendingDeleteIds.length) {
|
||||
return
|
||||
}
|
||||
|
||||
try {
|
||||
await runProbeBatch(pendingDeleteIds, (batch, id) => batch.collection("network_probes").delete(id))
|
||||
setPendingDeleteIds([])
|
||||
setRowSelection({})
|
||||
} catch (err: unknown) {
|
||||
toast({
|
||||
variant: "destructive",
|
||||
title: t`Error`,
|
||||
description: (err as Error)?.message || t`Failed to delete probes.`,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const handleSetEnabled = useCallback(
|
||||
async (probesToUpdate: NetworkProbeRecord[], enabled: boolean) => {
|
||||
if (!probesToUpdate.length) {
|
||||
return
|
||||
}
|
||||
|
||||
const pendingUpdates = probesToUpdate.filter((probe) => probe.enabled !== enabled)
|
||||
if (!pendingUpdates.length) {
|
||||
return
|
||||
}
|
||||
|
||||
try {
|
||||
if (pendingUpdates.length === 1) {
|
||||
await pb.collection("network_probes").update(pendingUpdates[0].id, { enabled })
|
||||
return
|
||||
}
|
||||
await runProbeBatch(
|
||||
pendingUpdates.map((probe) => probe.id),
|
||||
(batch, id) => batch.collection("network_probes").update(id, { enabled })
|
||||
)
|
||||
if (probesToUpdate.length > 1) {
|
||||
setRowSelection({})
|
||||
}
|
||||
} catch (err: unknown) {
|
||||
toast({
|
||||
variant: "destructive",
|
||||
title: t`Error`,
|
||||
description: (err as Error)?.message || t`Failed to update probes.`,
|
||||
})
|
||||
}
|
||||
},
|
||||
[runProbeBatch, toast]
|
||||
)
|
||||
|
||||
const columns = useMemo(() => {
|
||||
let columns = getProbeColumns(longestName, longestTarget, {
|
||||
onEdit: setEditingProbe,
|
||||
onDelete: handleDeleteRequest,
|
||||
onSetEnabled: handleSetEnabled,
|
||||
})
|
||||
columns = systemId ? columns.filter((col) => col.id !== "system") : columns
|
||||
columns = canManageProbes ? columns : columns.filter((col) => col.id !== "actions")
|
||||
return columns
|
||||
}, [canManageProbes, handleDeleteRequest, handleSetEnabled, longestName, systemId, longestTarget])
|
||||
|
||||
const table = useReactTable({
|
||||
data: probes,
|
||||
columns,
|
||||
getRowId: (row) => row.id,
|
||||
getCoreRowModel: getCoreRowModel(),
|
||||
getSortedRowModel: getSortedRowModel(),
|
||||
getFilteredRowModel: getFilteredRowModel(),
|
||||
onSortingChange: setSorting,
|
||||
onColumnFiltersChange: setColumnFilters,
|
||||
onColumnVisibilityChange: setColumnVisibility,
|
||||
onRowSelectionChange: setRowSelection,
|
||||
defaultColumn: {
|
||||
sortUndefined: "last",
|
||||
size: 900,
|
||||
minSize: 0,
|
||||
},
|
||||
state: {
|
||||
sorting,
|
||||
columnFilters,
|
||||
columnVisibility,
|
||||
rowSelection,
|
||||
globalFilter,
|
||||
},
|
||||
onGlobalFilterChange: setGlobalFilter,
|
||||
globalFilterFn: (row, _columnId, filterValue) => {
|
||||
const probe = row.original
|
||||
const systemName = $allSystemsById.get()[probe.system]?.name ?? ""
|
||||
const searchString = `${probe.name}${probe.target}${probe.protocol}${systemName}`.toLocaleLowerCase()
|
||||
return (filterValue as string)
|
||||
.toLowerCase()
|
||||
.split(" ")
|
||||
.every((term) => searchString.includes(term))
|
||||
},
|
||||
})
|
||||
|
||||
const rows = table.getRowModel().rows
|
||||
const visibleColumns = table.getVisibleLeafColumns()
|
||||
|
||||
return (
|
||||
<Card className="@container w-full px-3 py-5 sm:py-6 sm:px-6">
|
||||
<CardHeader className="p-0 mb-3 sm:mb-4">
|
||||
<div className="grid md:flex gap-x-5 gap-y-3 w-full items-end">
|
||||
<div className="px-2 sm:px-1">
|
||||
<CardTitle className="mb-2">
|
||||
<Trans>Network Probes</Trans>
|
||||
</CardTitle>
|
||||
<div className="text-sm text-muted-foreground flex items-center flex-wrap">
|
||||
<Trans>Response time monitoring from agents.</Trans>
|
||||
</div>
|
||||
</div>
|
||||
<div className="md:ms-auto flex items-center gap-2">
|
||||
{probes.length > 0 && (
|
||||
<div className="relative">
|
||||
<Input
|
||||
placeholder={t`Filter...`}
|
||||
value={globalFilter}
|
||||
onChange={(e) => setGlobalFilter(e.target.value)}
|
||||
className="ms-auto px-4 w-full max-w-full md:w-50"
|
||||
/>
|
||||
{globalFilter && (
|
||||
<Button
|
||||
type="button"
|
||||
variant="ghost"
|
||||
size="icon"
|
||||
aria-label={t`Clear`}
|
||||
className="absolute right-1 top-1/2 -translate-y-1/2 h-7 w-7 text-muted-foreground"
|
||||
onClick={() => setGlobalFilter("")}
|
||||
>
|
||||
<XIcon className="h-4 w-4" />
|
||||
</Button>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
{canManageProbes ? <AddProbeDialog systemId={systemId} probes={probes} /> : null}
|
||||
{canManageProbes ? (
|
||||
<EditProbeDialog
|
||||
systemId={systemId}
|
||||
probe={editingProbe}
|
||||
open={!!editingProbe}
|
||||
setOpen={(open) => {
|
||||
if (!open) {
|
||||
setEditingProbe(undefined)
|
||||
}
|
||||
}}
|
||||
/>
|
||||
) : null}
|
||||
<AlertDialog
|
||||
open={deleteOpen}
|
||||
onOpenChange={(open) => {
|
||||
setDeleteOpen(open)
|
||||
if (!open) {
|
||||
setPendingDeleteIds([])
|
||||
}
|
||||
}}
|
||||
>
|
||||
<AlertDialogContent>
|
||||
<AlertDialogHeader>
|
||||
<AlertDialogTitle>
|
||||
<Trans>Are you sure?</Trans>
|
||||
</AlertDialogTitle>
|
||||
<AlertDialogDescription>
|
||||
<Trans>This will permanently delete all selected records from the database.</Trans>
|
||||
</AlertDialogDescription>
|
||||
</AlertDialogHeader>
|
||||
<AlertDialogFooter>
|
||||
<AlertDialogCancel>
|
||||
<Trans>Cancel</Trans>
|
||||
</AlertDialogCancel>
|
||||
<AlertDialogAction
|
||||
className={cn(buttonVariants({ variant: "destructive" }))}
|
||||
onClick={handleBulkDelete}
|
||||
>
|
||||
<Trans>Continue</Trans>
|
||||
</AlertDialogAction>
|
||||
</AlertDialogFooter>
|
||||
</AlertDialogContent>
|
||||
</AlertDialog>
|
||||
</div>
|
||||
</div>
|
||||
</CardHeader>
|
||||
<div className="rounded-md">
|
||||
<NetworkProbesTable table={table} rows={rows} colLength={visibleColumns.length} rowSelection={rowSelection} />
|
||||
</div>
|
||||
</Card>
|
||||
)
|
||||
}
|
||||
|
||||
const NetworkProbesTable = memo(function NetworkProbeTable({
|
||||
table,
|
||||
rows,
|
||||
colLength,
|
||||
rowSelection: _rowSelection,
|
||||
}: {
|
||||
table: TableType<NetworkProbeRecord>
|
||||
rows: Row<NetworkProbeRecord>[]
|
||||
colLength: number
|
||||
rowSelection: RowSelectionState
|
||||
}) {
|
||||
// The virtualizer will need a reference to the scrollable container element
|
||||
const scrollRef = useRef<HTMLDivElement>(null)
|
||||
const [sheetOpen, setSheetOpen] = useState(false)
|
||||
const [activeProbeId, setActiveProbeId] = useState<string | null>(null)
|
||||
const activeProbe = activeProbeId ? table.options.data.find((probe) => probe.id === activeProbeId) : undefined
|
||||
const openSheet = useCallback((probe: NetworkProbeRecord) => {
|
||||
setActiveProbeId(probe.id)
|
||||
setSheetOpen(true)
|
||||
}, [])
|
||||
|
||||
const virtualizer = useVirtualizer<HTMLDivElement, HTMLTableRowElement>({
|
||||
count: rows.length,
|
||||
estimateSize: () => 54,
|
||||
getScrollElement: () => scrollRef.current,
|
||||
overscan: 5,
|
||||
})
|
||||
const virtualRows = virtualizer.getVirtualItems()
|
||||
|
||||
const paddingTop = Math.max(0, virtualRows[0]?.start ?? 0 - virtualizer.options.scrollMargin)
|
||||
const paddingBottom = Math.max(0, virtualizer.getTotalSize() - (virtualRows[virtualRows.length - 1]?.end ?? 0))
|
||||
|
||||
return (
|
||||
<div
|
||||
className={cn(
|
||||
"h-min max-h-[calc(100dvh-17rem)] max-w-full relative overflow-auto border rounded-md",
|
||||
// don't set min height if there are less than 2 rows, do set if we need to display the empty state
|
||||
(!rows.length || rows.length > 2) && "min-h-50"
|
||||
)}
|
||||
ref={scrollRef}
|
||||
>
|
||||
{/* add header height to table size */}
|
||||
<div style={{ height: `${virtualizer.getTotalSize() + 48}px`, paddingTop, paddingBottom }}>
|
||||
<table className="text-sm w-full h-full text-nowrap">
|
||||
<NetworkProbeTableHead table={table} />
|
||||
<TableBody>
|
||||
{rows.length ? (
|
||||
virtualRows.map((virtualRow) => {
|
||||
const row = rows[virtualRow.index]
|
||||
return (
|
||||
<NetworkProbeTableRow
|
||||
key={row.id}
|
||||
row={row}
|
||||
virtualRow={virtualRow}
|
||||
isSelected={row.getIsSelected()}
|
||||
openSheet={openSheet}
|
||||
/>
|
||||
)
|
||||
})
|
||||
) : (
|
||||
<TableRow>
|
||||
<TableCell colSpan={colLength} className="h-37 text-center pointer-events-none">
|
||||
<Trans>No results.</Trans>
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
)}
|
||||
</TableBody>
|
||||
</table>
|
||||
</div>
|
||||
<NetworkProbeSheet
|
||||
open={sheetOpen}
|
||||
onOpenChange={(nextOpen) => {
|
||||
setSheetOpen(nextOpen)
|
||||
}}
|
||||
probe={activeProbe}
|
||||
/>
|
||||
</div>
|
||||
)
|
||||
})
|
||||
|
||||
function NetworkProbeTableHead({ table }: { table: TableType<NetworkProbeRecord> }) {
|
||||
return (
|
||||
<TableHeader className="sticky top-0 z-50 w-full border-b-2">
|
||||
{table.getHeaderGroups().map((headerGroup) => (
|
||||
<tr key={headerGroup.id}>
|
||||
{headerGroup.headers.map((header) => {
|
||||
return (
|
||||
<TableHead className="px-2" key={header.id}>
|
||||
{header.isPlaceholder ? null : flexRender(header.column.columnDef.header, header.getContext())}
|
||||
</TableHead>
|
||||
)
|
||||
})}
|
||||
</tr>
|
||||
))}
|
||||
</TableHeader>
|
||||
)
|
||||
}
|
||||
|
||||
const NetworkProbeTableRow = memo(function NetworkProbeTableRow({
|
||||
row,
|
||||
virtualRow,
|
||||
isSelected,
|
||||
openSheet,
|
||||
}: {
|
||||
row: Row<NetworkProbeRecord>
|
||||
virtualRow: VirtualItem
|
||||
isSelected: boolean
|
||||
openSheet: (probe: NetworkProbeRecord) => void
|
||||
}) {
|
||||
return (
|
||||
<TableRow
|
||||
data-state={isSelected && "selected"}
|
||||
className="cursor-pointer transition-opacity"
|
||||
onClick={() => openSheet(row.original)}
|
||||
>
|
||||
{row.getVisibleCells().map((cell) => (
|
||||
<TableCell
|
||||
key={cell.id}
|
||||
className="py-0"
|
||||
style={{
|
||||
width: `${cell.column.getSize()}px`,
|
||||
height: virtualRow.size,
|
||||
}}
|
||||
>
|
||||
{flexRender(cell.column.columnDef.cell, cell.getContext())}
|
||||
</TableCell>
|
||||
))}
|
||||
</TableRow>
|
||||
)
|
||||
})
|
||||
|
||||
function NetworkProbeSheet({
|
||||
open,
|
||||
onOpenChange,
|
||||
probe,
|
||||
}: {
|
||||
open: boolean
|
||||
onOpenChange: (open: boolean) => void
|
||||
probe?: NetworkProbeRecord
|
||||
}) {
|
||||
if (!probe) {
|
||||
return null
|
||||
}
|
||||
|
||||
return <NetworkProbeSheetContent key={probe.system} open={open} onOpenChange={onOpenChange} probe={probe} />
|
||||
}
|
||||
|
||||
function NetworkProbeSheetContent({
|
||||
open,
|
||||
onOpenChange,
|
||||
probe,
|
||||
}: {
|
||||
open: boolean
|
||||
onOpenChange: (open: boolean) => void
|
||||
probe: NetworkProbeRecord
|
||||
}) {
|
||||
const chartTime = useStore($chartTime)
|
||||
const direction = useStore($direction)
|
||||
const system = useStore($allSystemsById)[probe.system]
|
||||
|
||||
const probeStats = useNetworkProbeStats({ systemId: probe.system, chartTime })
|
||||
|
||||
const chartData = useMemo<ChartData>(
|
||||
() => ({
|
||||
agentVersion: parseSemVer(system?.info?.v),
|
||||
orientation: direction === "rtl" ? "right" : "left",
|
||||
chartTime,
|
||||
}),
|
||||
[probeStats]
|
||||
)
|
||||
const hasProbeStats = probeStats.some((record) => record.stats?.[probe.id] != null)
|
||||
const probeLabel = probe.name || probe.target
|
||||
|
||||
return (
|
||||
<Sheet open={open} onOpenChange={onOpenChange}>
|
||||
<SheetContent className="w-full sm:max-w-220 overflow-auto p-4 sm:p-6">
|
||||
<SheetHeader className="mb-0 border-b p-0 pb-4">
|
||||
<SheetTitle>{probeLabel}</SheetTitle>
|
||||
<SheetDescription className="flex flex-wrap items-center gap-x-2 gap-y-1">
|
||||
<ServerIcon className="size-3.5 text-muted-foreground" />
|
||||
<Link className="hover:underline" href={getPagePath($router, "system", { id: system?.id ?? "" })}>
|
||||
{system?.name ?? ""}
|
||||
</Link>
|
||||
<Separator orientation="vertical" className="h-2.5 bg-muted-foreground opacity-70" />
|
||||
<ArrowLeftRightIcon className="size-3.5 text-muted-foreground" />
|
||||
{probe.protocol.toUpperCase()}
|
||||
<Separator orientation="vertical" className="h-2.5 bg-muted-foreground opacity-70" />
|
||||
<GlobeIcon className="size-3.5 text-muted-foreground" />
|
||||
{probe.target}
|
||||
{probe.protocol === "tcp" && probe.port > 0 && (
|
||||
<>
|
||||
<Separator orientation="vertical" className="h-2.5 bg-muted-foreground opacity-70" />
|
||||
<EthernetPortIcon className="size-3.5 text-muted-foreground" />
|
||||
<span>{probe.port}</span>
|
||||
</>
|
||||
)}
|
||||
</SheetDescription>
|
||||
</SheetHeader>
|
||||
<div className="grid gap-4">
|
||||
<ChartTimeSelect className="bg-card" agentVersion={chartData.agentVersion} />
|
||||
<AvgMinMaxResponseChart probeStats={probeStats} probe={probe} chartData={chartData} empty={!hasProbeStats} />
|
||||
<LossChart
|
||||
probeStats={probeStats}
|
||||
grid={false}
|
||||
probes={[probe]}
|
||||
chartData={chartData}
|
||||
empty={!hasProbeStats}
|
||||
showFilter={false}
|
||||
/>
|
||||
</div>
|
||||
</SheetContent>
|
||||
</Sheet>
|
||||
)
|
||||
}
|
||||
@@ -0,0 +1,613 @@
|
||||
import { useEffect, useRef, useState } from "react"
|
||||
import { Trans, useLingui } from "@lingui/react/macro"
|
||||
import { useStore } from "@nanostores/react"
|
||||
import { pb } from "@/lib/api"
|
||||
import {
|
||||
Dialog,
|
||||
DialogContent,
|
||||
DialogDescription,
|
||||
DialogFooter,
|
||||
DialogHeader,
|
||||
DialogTitle,
|
||||
} from "@/components/ui/dialog"
|
||||
import { DropdownMenu, DropdownMenuContent, DropdownMenuItem, DropdownMenuTrigger } from "@/components/ui/dropdown-menu"
|
||||
import { Sheet, SheetContent, SheetDescription, SheetFooter, SheetHeader, SheetTitle } from "@/components/ui/sheet"
|
||||
import { Button } from "@/components/ui/button"
|
||||
import { Input } from "@/components/ui/input"
|
||||
import { Label } from "@/components/ui/label"
|
||||
import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "@/components/ui/select"
|
||||
import { Textarea } from "@/components/ui/textarea"
|
||||
import { ChevronDownIcon, ListIcon, ServerIcon } from "lucide-react"
|
||||
import { useToast } from "@/components/ui/use-toast"
|
||||
import { $systems } from "@/lib/stores"
|
||||
import type { NetworkProbeRecord } from "@/types"
|
||||
import * as v from "valibot"
|
||||
|
||||
type ProbeProtocol = "icmp" | "tcp" | "http"
|
||||
|
||||
type ProbeValues = {
|
||||
system: string
|
||||
target: string
|
||||
protocol: ProbeProtocol
|
||||
port: number
|
||||
interval: string
|
||||
name?: string
|
||||
}
|
||||
|
||||
type NormalizedProbeValues = Omit<ProbeValues, "system" | "interval"> & {
|
||||
interval: number
|
||||
}
|
||||
|
||||
type BulkProbeLineSource = Pick<NetworkProbeRecord, "target" | "protocol" | "port" | "interval" | "name">
|
||||
|
||||
const defaultInterval = 30
|
||||
|
||||
const ProbeProtocolSchema = v.picklist(["icmp", "tcp", "http"])
|
||||
|
||||
const ProbeIntervalSchema = v.pipe(v.string(), v.toNumber(), v.minValue(1), v.maxValue(3600))
|
||||
|
||||
// Both the single-probe form and the bulk importer flow through this schema so
|
||||
// defaults and HTTP target normalization stay in one place.
|
||||
const NormalizedProbeValuesSchema = v.pipe(
|
||||
v.object({
|
||||
target: v.pipe(v.string(), v.trim(), v.nonEmpty("target is required")),
|
||||
protocol: ProbeProtocolSchema,
|
||||
port: v.number(),
|
||||
interval: ProbeIntervalSchema,
|
||||
name: v.optional(v.pipe(v.string(), v.trim())),
|
||||
}),
|
||||
v.transform((input): NormalizedProbeValues => {
|
||||
let { protocol, port } = input
|
||||
let httpTarget = input.target
|
||||
if (protocol === "icmp" || protocol === "http") {
|
||||
if (protocol === "http") {
|
||||
httpTarget = normalizeHttpTarget(input.target, port)
|
||||
}
|
||||
port = 0
|
||||
} else if (protocol === "tcp" && !port) {
|
||||
port = 443
|
||||
}
|
||||
return {
|
||||
// HTTP probes may be entered as bare hostnames, so normalize them to a
|
||||
// scheme-bearing URL before the payload is sent to PocketBase.
|
||||
target: protocol === "http" ? httpTarget : input.target,
|
||||
protocol,
|
||||
port,
|
||||
interval: input.interval,
|
||||
name: input.name || undefined,
|
||||
}
|
||||
}),
|
||||
v.forward(
|
||||
v.check((input) => {
|
||||
if (input.protocol === "icmp" || input.protocol === "http") {
|
||||
return input.port === 0
|
||||
}
|
||||
|
||||
return Number.isInteger(input.port) && input.port >= 1 && input.port <= 65535
|
||||
}, "Port must be between 1 and 65535"),
|
||||
["port"]
|
||||
)
|
||||
)
|
||||
|
||||
// Bulk parsing only trims raw CSV fields. Inference, defaults, and protocol-
|
||||
// specific validation still go through the shared normalization schema above.
|
||||
const BulkProbeSchema = v.object({
|
||||
target: v.pipe(v.string(), v.trim(), v.nonEmpty("target is required")),
|
||||
protocol: v.optional(v.pipe(v.string(), v.trim())),
|
||||
port: v.optional(v.pipe(v.string(), v.trim())),
|
||||
interval: v.optional(v.pipe(v.string(), v.trim())),
|
||||
name: v.optional(v.pipe(v.string(), v.trim())),
|
||||
})
|
||||
|
||||
function normalizeHttpTarget(target: string, port = 0) {
|
||||
const useExplicitPort = port > 0 && port !== 80 && port !== 443
|
||||
const hasOriginOnlyTarget = /^https?:\/\/[^/?#]+$/i.test(target)
|
||||
if (!/^https?:\/\//i.test(target)) {
|
||||
const scheme = port === 80 ? "http" : "https"
|
||||
return `${scheme}://${target}${useExplicitPort ? `:${port}` : ""}`
|
||||
}
|
||||
|
||||
let parsedUrl: URL
|
||||
try {
|
||||
parsedUrl = new URL(target)
|
||||
} catch {
|
||||
return target
|
||||
}
|
||||
|
||||
if (!parsedUrl.port && useExplicitPort) {
|
||||
parsedUrl.port = `${port}`
|
||||
}
|
||||
|
||||
// avoid converting "http://localhost:8090" to "http://localhost:8090/" - keep the original formatting if the URL is just an origin
|
||||
if (hasOriginOnlyTarget && parsedUrl.pathname === "/" && !parsedUrl.search && !parsedUrl.hash) {
|
||||
return parsedUrl.origin
|
||||
}
|
||||
|
||||
return parsedUrl.toString()
|
||||
}
|
||||
|
||||
function trimTrailingEmptyFields(fields: string[]) {
|
||||
let lastValueIndex = fields.length - 1
|
||||
while (lastValueIndex > 0 && !fields[lastValueIndex]) {
|
||||
lastValueIndex--
|
||||
}
|
||||
return fields.slice(0, lastValueIndex + 1)
|
||||
}
|
||||
|
||||
function buildProbePayload(values: ProbeValues, enabled = true) {
|
||||
const normalizedValues = v.safeParse(NormalizedProbeValuesSchema, values)
|
||||
if (!normalizedValues.success) {
|
||||
throw new Error(normalizedValues.issues[0]?.message || "Invalid probe")
|
||||
}
|
||||
|
||||
const payload = {
|
||||
system: values.system,
|
||||
enabled,
|
||||
...normalizedValues.output,
|
||||
}
|
||||
|
||||
const trimmedName = normalizedValues.output.name?.trim()
|
||||
const targetName = normalizedValues.output.target.replace(/^https?:\/\//i, "")
|
||||
if (trimmedName) {
|
||||
payload.name = trimmedName
|
||||
} else if (targetName !== normalizedValues.output.target) {
|
||||
payload.name = targetName
|
||||
} else {
|
||||
payload.name = ""
|
||||
}
|
||||
return payload
|
||||
}
|
||||
|
||||
type ProbeIdentity = Pick<ProbeValues, "system" | "target" | "protocol" | "port">
|
||||
function getProbeIdentityKey({ system, target, protocol, port }: ProbeIdentity) {
|
||||
return `${system}${target}${protocol}${port}`
|
||||
}
|
||||
|
||||
function parseBulkProbeLine(line: string, lineNumber: number, system: string) {
|
||||
const [rawTarget = "", rawProtocol = "", rawPort = "", rawInterval = "", ...rawName] = line.split(",")
|
||||
const parsed = v.safeParse(BulkProbeSchema, {
|
||||
target: rawTarget,
|
||||
protocol: rawProtocol,
|
||||
port: rawPort,
|
||||
interval: rawInterval,
|
||||
name: rawName.join(","),
|
||||
})
|
||||
if (!parsed.success) {
|
||||
throw new Error(`Line ${lineNumber}: ${parsed.issues[0]?.message || "invalid probe entry"}`)
|
||||
}
|
||||
const protocol = (parsed.output.protocol?.toLowerCase() ||
|
||||
(/^https?:\/\//i.test(parsed.output.target) ? "http" : "icmp")) as ProbeProtocol
|
||||
|
||||
return buildProbePayload({
|
||||
system,
|
||||
target: parsed.output.target,
|
||||
protocol,
|
||||
port: parsed.output.port ? Number(parsed.output.port) : 0,
|
||||
interval: parsed.output.interval || `${defaultInterval}`,
|
||||
name: parsed.output.name || undefined,
|
||||
})
|
||||
}
|
||||
|
||||
export function formatBulkProbeLine(probe: BulkProbeLineSource) {
|
||||
const port = probe.protocol !== "tcp" || probe.port === 443 ? "" : `${probe.port}`
|
||||
const interval = probe.interval === defaultInterval ? "" : `${probe.interval}`
|
||||
return trimTrailingEmptyFields([probe.target, probe.protocol, port, interval, probe.name?.trim() || ""]).join(",")
|
||||
}
|
||||
|
||||
export function AddProbeDialog({ systemId, probes }: { systemId?: string; probes: NetworkProbeRecord[] }) {
|
||||
const [open, setOpen] = useState(false)
|
||||
const [bulkOpen, setBulkOpen] = useState(false)
|
||||
const [bulkInput, setBulkInput] = useState("")
|
||||
const [bulkLoading, setBulkLoading] = useState(false)
|
||||
const [bulkSelectedSystemId, setBulkSelectedSystemId] = useState("")
|
||||
const bulkFormRef = useRef<HTMLFormElement>(null)
|
||||
const { toast } = useToast()
|
||||
const { t } = useLingui()
|
||||
const systems = useStore($systems)
|
||||
|
||||
const resetBulkForm = () => {
|
||||
setBulkInput("")
|
||||
// setBulkSelectedSystemId("")
|
||||
}
|
||||
|
||||
const openBulkAdd = (selectedSystemId?: string) => {
|
||||
if (!systemId && selectedSystemId) {
|
||||
setBulkSelectedSystemId(selectedSystemId)
|
||||
}
|
||||
setOpen(false)
|
||||
setBulkOpen(true)
|
||||
}
|
||||
|
||||
const openAdd = () => {
|
||||
setBulkOpen(false)
|
||||
setOpen(true)
|
||||
}
|
||||
|
||||
async function handleBulkSubmit(e: React.FormEvent) {
|
||||
e.preventDefault()
|
||||
setBulkLoading(true)
|
||||
let closedForSubmit = false
|
||||
|
||||
try {
|
||||
const system = systemId ?? bulkSelectedSystemId
|
||||
if (!system) {
|
||||
throw new Error("Select a system.")
|
||||
}
|
||||
const rawLines = bulkInput.split(/\r?\n/).filter((line) => line.trim())
|
||||
if (!rawLines.length) {
|
||||
throw new Error("Enter at least one probe.")
|
||||
}
|
||||
|
||||
const payloads = rawLines.map((line, index) => parseBulkProbeLine(line, index + 1, system))
|
||||
const existingProbeKeys = new Set(
|
||||
probes.filter((probe) => probe.system === system).map((probe) => getProbeIdentityKey(probe))
|
||||
)
|
||||
const newPayloads = [] as typeof payloads
|
||||
|
||||
for (const payload of payloads) {
|
||||
const probeKey = getProbeIdentityKey(payload)
|
||||
if (existingProbeKeys.has(probeKey)) {
|
||||
continue
|
||||
}
|
||||
|
||||
existingProbeKeys.add(probeKey)
|
||||
newPayloads.push(payload)
|
||||
}
|
||||
|
||||
if (!newPayloads.length) {
|
||||
throw new Error("No new probes. All entries exist.")
|
||||
}
|
||||
|
||||
closedForSubmit = true
|
||||
let batch = pb.createBatch()
|
||||
let inBatch = 0
|
||||
for (const payload of newPayloads) {
|
||||
batch.collection("network_probes").create(payload)
|
||||
inBatch++
|
||||
if (inBatch > 20) {
|
||||
await batch.send()
|
||||
batch = pb.createBatch()
|
||||
inBatch = 0
|
||||
}
|
||||
}
|
||||
if (inBatch) {
|
||||
await batch.send()
|
||||
}
|
||||
|
||||
resetBulkForm()
|
||||
toast({ title: t`Probes created`, description: `${newPayloads.length} probe(s) added.` })
|
||||
} catch (err: unknown) {
|
||||
if (closedForSubmit) {
|
||||
setBulkOpen(true)
|
||||
}
|
||||
toast({ variant: "destructive", title: t`Error`, description: (err as Error)?.message })
|
||||
} finally {
|
||||
setBulkLoading(false)
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<>
|
||||
<div className="flex gap-0 rounded-lg">
|
||||
<Button variant="outline" onClick={openAdd} className="rounded-e-none grow">
|
||||
{/* <PlusIcon className="size-4 me-1" /> */}
|
||||
<Trans>Add {{ foo: t`Probe` }}</Trans>
|
||||
</Button>
|
||||
<div className="w-px h-full bg-muted"></div>
|
||||
<DropdownMenu>
|
||||
<DropdownMenuTrigger asChild>
|
||||
<Button variant="outline" className="px-2 rounded-s-none border-s-0" aria-label={t`More probe actions`}>
|
||||
<ChevronDownIcon className="size-4" />
|
||||
</Button>
|
||||
</DropdownMenuTrigger>
|
||||
<DropdownMenuContent align="end">
|
||||
<DropdownMenuItem onClick={() => openBulkAdd(systemId)}>
|
||||
<ListIcon className="size-4 me-2" />
|
||||
<Trans>Bulk Add</Trans>
|
||||
</DropdownMenuItem>
|
||||
</DropdownMenuContent>
|
||||
</DropdownMenu>
|
||||
</div>
|
||||
<Dialog
|
||||
open={open}
|
||||
onOpenChange={(nextOpen) => {
|
||||
setOpen(nextOpen)
|
||||
}}
|
||||
>
|
||||
<ProbeDialogContent open={open} setOpen={setOpen} systemId={systemId} onOpenBulkAdd={openBulkAdd} />
|
||||
</Dialog>
|
||||
|
||||
<Sheet
|
||||
open={bulkOpen}
|
||||
onOpenChange={(nextOpen) => {
|
||||
setBulkOpen(nextOpen)
|
||||
if (!nextOpen) {
|
||||
resetBulkForm()
|
||||
}
|
||||
}}
|
||||
>
|
||||
<SheetContent className="w-full sm:max-w-xl gap-0">
|
||||
<SheetHeader className="border-b">
|
||||
<SheetTitle>
|
||||
<Trans>Bulk Add {{ foo: t`Network Probes` }}</Trans>
|
||||
</SheetTitle>
|
||||
<SheetDescription>target[,protocol[,port[,interval[,name]]]]</SheetDescription>
|
||||
</SheetHeader>
|
||||
<form ref={bulkFormRef} onSubmit={handleBulkSubmit} className="flex h-full flex-col overflow-hidden">
|
||||
<div className="flex-1 flex flex-col space-y-4 overflow-auto p-4">
|
||||
{!systemId && (
|
||||
<div className="grid gap-2">
|
||||
<Label className="sr-only">
|
||||
<Trans>System</Trans>
|
||||
</Label>
|
||||
<Select value={bulkSelectedSystemId} onValueChange={setBulkSelectedSystemId} required>
|
||||
<SelectTrigger className="relative ps-10 pe-5 bg-card">
|
||||
<ServerIcon className="size-3.5 absolute start-4 top-1/2 -translate-y-1/2 opacity-85" />
|
||||
<SelectValue placeholder={t`Select a system`} />
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
{systems.map((sys) => (
|
||||
<SelectItem key={sys.id} value={sys.id}>
|
||||
{sys.name}
|
||||
</SelectItem>
|
||||
))}
|
||||
</SelectContent>
|
||||
</Select>
|
||||
</div>
|
||||
)}
|
||||
<div className="grow flex flex-col gap-2">
|
||||
<Label htmlFor="bulk-probes" className="sr-only">
|
||||
Entries
|
||||
</Label>
|
||||
<Textarea
|
||||
id="bulk-probes"
|
||||
value={bulkInput}
|
||||
onChange={(e) => setBulkInput(e.target.value)}
|
||||
onKeyDown={(e) => {
|
||||
if (e.key === "Enter" && (e.ctrlKey || e.metaKey)) {
|
||||
e.preventDefault()
|
||||
bulkFormRef.current?.requestSubmit()
|
||||
}
|
||||
}}
|
||||
className="font-mono grow text-sm bg-card"
|
||||
placeholder={["1.1.1.1", "example.com,tcp", "https://example.com,http,,60,Example"].join("\n")}
|
||||
required
|
||||
/>
|
||||
<p className="text-xs text-muted-foreground">target[,protocol[,port[,interval[,name]]]]</p>
|
||||
</div>
|
||||
</div>
|
||||
<SheetFooter className="border-t">
|
||||
<Button type="submit" disabled={bulkLoading || (!systemId && !bulkSelectedSystemId)}>
|
||||
<Trans>Add {{ foo: t`Network Probes` }}</Trans>
|
||||
</Button>
|
||||
</SheetFooter>
|
||||
</form>
|
||||
</SheetContent>
|
||||
</Sheet>
|
||||
</>
|
||||
)
|
||||
}
|
||||
|
||||
export function EditProbeDialog({
|
||||
open,
|
||||
setOpen,
|
||||
systemId,
|
||||
probe,
|
||||
}: {
|
||||
open: boolean
|
||||
setOpen: (open: boolean) => void
|
||||
systemId?: string
|
||||
probe?: NetworkProbeRecord
|
||||
}) {
|
||||
const hasOpened = useRef(false)
|
||||
if (!probe && !hasOpened.current) {
|
||||
return null
|
||||
}
|
||||
hasOpened.current = true
|
||||
return (
|
||||
<Dialog open={open} onOpenChange={setOpen}>
|
||||
<ProbeDialogContent open={open} setOpen={setOpen} systemId={systemId} probe={probe} />
|
||||
</Dialog>
|
||||
)
|
||||
}
|
||||
|
||||
function ProbeDialogContent({
|
||||
open,
|
||||
setOpen,
|
||||
systemId,
|
||||
probe,
|
||||
onOpenBulkAdd,
|
||||
}: {
|
||||
open: boolean
|
||||
setOpen: (open: boolean) => void
|
||||
systemId?: string
|
||||
probe?: NetworkProbeRecord
|
||||
onOpenBulkAdd?: (selectedSystemId?: string) => void
|
||||
}) {
|
||||
const [protocol, setProtocol] = useState<ProbeProtocol>(probe?.protocol ?? "icmp")
|
||||
const [target, setTarget] = useState(probe?.target ?? "")
|
||||
const [port, setPort] = useState(probe?.protocol === "tcp" && probe.port ? String(probe.port) : "")
|
||||
const [probeInterval, setProbeInterval] = useState(String(probe?.interval ?? defaultInterval))
|
||||
const [name, setName] = useState(probe?.name ?? "")
|
||||
const [loading, setLoading] = useState(false)
|
||||
const [selectedSystemId, setSelectedSystemId] = useState(probe?.system ?? "")
|
||||
const systems = useStore($systems)
|
||||
const { toast } = useToast()
|
||||
const { t } = useLingui()
|
||||
const isEditing = !!probe
|
||||
const targetName = target.replace(/^https?:\/\//, "")
|
||||
|
||||
// When the dialog is opened, initialize form fields with probe values (if editing) or defaults (if adding).
|
||||
useEffect(() => {
|
||||
if (!open) {
|
||||
return
|
||||
}
|
||||
|
||||
setProtocol(probe?.protocol ?? "icmp")
|
||||
setTarget(probe?.target ?? "")
|
||||
setPort(probe?.protocol === "tcp" && probe.port ? String(probe.port) : "")
|
||||
setProbeInterval(String(probe?.interval ?? defaultInterval))
|
||||
setName(probe?.name ?? "")
|
||||
setSelectedSystemId(probe?.system ?? "")
|
||||
setLoading(false)
|
||||
}, [open, probe])
|
||||
|
||||
async function handleSubmit(e: React.FormEvent) {
|
||||
e.preventDefault()
|
||||
setLoading(true)
|
||||
|
||||
try {
|
||||
const selectedSystem = systemId ?? selectedSystemId
|
||||
if (!selectedSystem) {
|
||||
throw new Error("Select a system.")
|
||||
}
|
||||
const payload = buildProbePayload(
|
||||
{
|
||||
system: selectedSystem,
|
||||
target,
|
||||
protocol,
|
||||
port: protocol === "tcp" ? Number(port) : 0,
|
||||
interval: probeInterval,
|
||||
name,
|
||||
},
|
||||
probe ? probe.enabled : true
|
||||
)
|
||||
if (probe) {
|
||||
await pb.collection("network_probes").update(probe.id, payload)
|
||||
} else {
|
||||
await pb.collection("network_probes").create(payload)
|
||||
}
|
||||
setOpen(false)
|
||||
} catch (err: unknown) {
|
||||
toast({ variant: "destructive", title: t`Error`, description: (err as Error)?.message })
|
||||
} finally {
|
||||
setLoading(false)
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<DialogContent className="max-w-md">
|
||||
<DialogHeader>
|
||||
<DialogTitle>
|
||||
{isEditing ? <Trans>Edit {{ foo: t`Network Probe` }}</Trans> : <Trans>Add {{ foo: t`Network Probe` }}</Trans>}
|
||||
</DialogTitle>
|
||||
<DialogDescription>
|
||||
<Trans>Configure response monitoring from this agent.</Trans>
|
||||
</DialogDescription>
|
||||
</DialogHeader>
|
||||
<form onSubmit={handleSubmit} className="grid gap-4 tabular-nums">
|
||||
{!systemId && (
|
||||
<div className="grid gap-2">
|
||||
<Label>
|
||||
<Trans>System</Trans>
|
||||
</Label>
|
||||
<Select value={selectedSystemId} onValueChange={setSelectedSystemId} required>
|
||||
<SelectTrigger>
|
||||
<SelectValue placeholder={t`Select a system`} />
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
{systems.map((sys) => (
|
||||
<SelectItem key={sys.id} value={sys.id}>
|
||||
{sys.name}
|
||||
</SelectItem>
|
||||
))}
|
||||
</SelectContent>
|
||||
</Select>
|
||||
</div>
|
||||
)}
|
||||
<div className="grid gap-2">
|
||||
<Label>
|
||||
<Trans>Target</Trans>
|
||||
</Label>
|
||||
<Input
|
||||
value={target}
|
||||
onChange={(e) => setTarget(e.target.value)}
|
||||
placeholder={protocol === "http" ? "http://localhost:8090" : "1.1.1.1"}
|
||||
required
|
||||
/>
|
||||
</div>
|
||||
<div className="grid gap-2">
|
||||
<Label>
|
||||
<Trans>Protocol</Trans>
|
||||
</Label>
|
||||
|
||||
<Select value={protocol} onValueChange={(value) => setProtocol(value as ProbeProtocol)}>
|
||||
<SelectTrigger>
|
||||
<SelectValue />
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
<SelectItem value="icmp">ICMP</SelectItem>
|
||||
<SelectItem value="tcp">TCP</SelectItem>
|
||||
<SelectItem value="http">HTTP</SelectItem>
|
||||
</SelectContent>
|
||||
</Select>
|
||||
</div>
|
||||
{protocol === "tcp" && (
|
||||
<div className="grid gap-2">
|
||||
<Label>
|
||||
<Trans>Port</Trans>
|
||||
</Label>
|
||||
<Input
|
||||
type="number"
|
||||
value={port}
|
||||
onChange={(e) => setPort(e.target.value)}
|
||||
placeholder="443"
|
||||
min={1}
|
||||
max={65535}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
<div className="grid gap-2">
|
||||
<Label>
|
||||
<Trans>Interval (seconds)</Trans>
|
||||
</Label>
|
||||
<Input
|
||||
type="number"
|
||||
value={probeInterval}
|
||||
onChange={(e) => setProbeInterval(e.target.value)}
|
||||
min={1}
|
||||
max={3600}
|
||||
required
|
||||
/>
|
||||
</div>
|
||||
<div className="grid gap-2">
|
||||
<Label>
|
||||
<Trans>Name (optional)</Trans>
|
||||
</Label>
|
||||
<Input
|
||||
value={name}
|
||||
onChange={(e) => setName(e.target.value)}
|
||||
placeholder={targetName || t`e.g. Cloudflare DNS`}
|
||||
/>
|
||||
</div>
|
||||
<DialogFooter>
|
||||
{!isEditing && onOpenBulkAdd && (
|
||||
<Button
|
||||
type="button"
|
||||
variant="outline"
|
||||
onClick={() => onOpenBulkAdd(selectedSystemId)}
|
||||
disabled={loading}
|
||||
className="me-auto"
|
||||
>
|
||||
<ListIcon className="size-4 me-2" />
|
||||
<Trans>Bulk Add</Trans>
|
||||
</Button>
|
||||
)}
|
||||
<Button type="submit" disabled={loading || (!systemId && !selectedSystemId)}>
|
||||
{loading ? (
|
||||
isEditing ? (
|
||||
<Trans>Saving...</Trans>
|
||||
) : (
|
||||
<Trans>Creating...</Trans>
|
||||
)
|
||||
) : isEditing ? (
|
||||
<Trans>Save {{ foo: t`Probe` }}</Trans>
|
||||
) : (
|
||||
<Trans>Add {{ foo: t`Probe` }}</Trans>
|
||||
)}
|
||||
</Button>
|
||||
</DialogFooter>
|
||||
</form>
|
||||
</DialogContent>
|
||||
)
|
||||
}
|
||||
@@ -4,6 +4,7 @@ const routes = {
|
||||
home: "/",
|
||||
containers: "/containers",
|
||||
smart: "/smart",
|
||||
probes: "/probes",
|
||||
system: `/system/:id`,
|
||||
settings: `/settings/:name?`,
|
||||
forgot_password: `/forgot-password`,
|
||||
|
||||
25
internal/site/src/components/routes/probes.tsx
Normal file
25
internal/site/src/components/routes/probes.tsx
Normal file
@@ -0,0 +1,25 @@
|
||||
import { useLingui } from "@lingui/react/macro"
|
||||
import { memo, useEffect } from "react"
|
||||
import NetworkProbesTableNew from "@/components/network-probes-table/network-probes-table"
|
||||
import { ActiveAlerts } from "@/components/active-alerts"
|
||||
import { FooterRepoLink } from "@/components/footer-repo-link"
|
||||
import { useNetworkProbes } from "@/lib/use-network-probes"
|
||||
|
||||
export default memo(() => {
|
||||
const { t } = useLingui()
|
||||
const probes = useNetworkProbes({})
|
||||
|
||||
useEffect(() => {
|
||||
document.title = `${t`Network Probes`} / Beszel`
|
||||
}, [t])
|
||||
|
||||
return (
|
||||
<>
|
||||
<div className="grid gap-4">
|
||||
<ActiveAlerts />
|
||||
<NetworkProbesTableNew probes={probes} />
|
||||
</div>
|
||||
<FooterRepoLink />
|
||||
</>
|
||||
)
|
||||
})
|
||||
@@ -15,6 +15,7 @@ import { isAdmin, pb } from "@/lib/api"
|
||||
import type { UserSettings } from "@/types"
|
||||
import { saveSettings } from "./layout"
|
||||
import { QuietHours } from "./quiet-hours"
|
||||
import type { ClientResponseError } from "pocketbase"
|
||||
|
||||
interface ShoutrrrUrlCardProps {
|
||||
url: string
|
||||
@@ -59,10 +60,10 @@ const SettingsNotificationsPage = ({ userSettings }: { userSettings: UserSetting
|
||||
try {
|
||||
const parsedData = v.parse(NotificationSchema, { emails, webhooks })
|
||||
await saveSettings(parsedData)
|
||||
} catch (e: any) {
|
||||
} catch (e: unknown) {
|
||||
toast({
|
||||
title: t`Failed to save settings`,
|
||||
description: e.message,
|
||||
description: (e as Error).message,
|
||||
variant: "destructive",
|
||||
})
|
||||
}
|
||||
@@ -136,12 +137,7 @@ const SettingsNotificationsPage = ({ userSettings }: { userSettings: UserSetting
|
||||
</Trans>
|
||||
</p>
|
||||
</div>
|
||||
<Button
|
||||
type="button"
|
||||
variant="outline"
|
||||
className="h-10 shrink-0"
|
||||
onClick={addWebhook}
|
||||
>
|
||||
<Button type="button" variant="outline" className="h-10 shrink-0" onClick={addWebhook}>
|
||||
<PlusIcon className="size-4" />
|
||||
<span className="ms-1">
|
||||
<Trans>Add URL</Trans>
|
||||
@@ -180,25 +176,34 @@ const SettingsNotificationsPage = ({ userSettings }: { userSettings: UserSetting
|
||||
)
|
||||
}
|
||||
|
||||
function showTestNotificationError(msg: string) {
|
||||
toast({
|
||||
title: t`Error`,
|
||||
description: msg ?? t`Failed to send test notification`,
|
||||
variant: "destructive",
|
||||
})
|
||||
}
|
||||
|
||||
const ShoutrrrUrlCard = ({ url, onUrlChange, onRemove }: ShoutrrrUrlCardProps) => {
|
||||
const [isLoading, setIsLoading] = useState(false)
|
||||
|
||||
const sendTestNotification = async () => {
|
||||
setIsLoading(true)
|
||||
const res = await pb.send("/api/beszel/test-notification", { method: "POST", body: { url } })
|
||||
if ("err" in res && !res.err) {
|
||||
toast({
|
||||
title: t`Test notification sent`,
|
||||
description: t`Check your notification service`,
|
||||
})
|
||||
} else {
|
||||
toast({
|
||||
title: t`Error`,
|
||||
description: res.err ?? t`Failed to send test notification`,
|
||||
variant: "destructive",
|
||||
})
|
||||
try {
|
||||
const res = await pb.send("/api/beszel/test-notification", { method: "POST", body: { url } })
|
||||
if ("err" in res && !res.err) {
|
||||
toast({
|
||||
title: t`Test notification sent`,
|
||||
description: t`Check your notification service`,
|
||||
})
|
||||
} else {
|
||||
showTestNotificationError(res.err)
|
||||
}
|
||||
} catch (e: unknown) {
|
||||
showTestNotificationError((e as ClientResponseError).data?.message)
|
||||
} finally {
|
||||
setIsLoading(false)
|
||||
}
|
||||
setIsLoading(false)
|
||||
}
|
||||
|
||||
return (
|
||||
|
||||
@@ -1,21 +1,19 @@
|
||||
import { memo, useState } from "react"
|
||||
import { Trans } from "@lingui/react/macro"
|
||||
import { compareSemVer, parseSemVer } from "@/lib/utils"
|
||||
|
||||
import type { GPUData } from "@/types"
|
||||
import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs"
|
||||
import InfoBar from "./system/info-bar"
|
||||
import { useSystemData } from "./system/use-system-data"
|
||||
import { CpuChart, ContainerCpuChart } from "./system/charts/cpu-charts"
|
||||
import { MemoryChart, ContainerMemoryChart, SwapChart } from "./system/charts/memory-charts"
|
||||
import { DiskCharts } from "./system/charts/disk-charts"
|
||||
import { RootDiskCharts, ExtraFsCharts } from "./system/charts/disk-charts"
|
||||
import { BandwidthChart, ContainerNetworkChart } from "./system/charts/network-charts"
|
||||
import { TemperatureChart, BatteryChart } from "./system/charts/sensor-charts"
|
||||
import { GpuPowerChart, GpuDetailCharts } from "./system/charts/gpu-charts"
|
||||
import { ExtraFsCharts } from "./system/charts/extra-fs-charts"
|
||||
import { LazyContainersTable, LazySmartTable, LazySystemdTable } from "./system/lazy-tables"
|
||||
import { LazyContainersTable, LazySmartTable, LazySystemdTable, LazyNetworkProbesTable } from "./system/lazy-tables"
|
||||
import { LoadAverageChart } from "./system/charts/load-average-chart"
|
||||
import { ContainerIcon, CpuIcon, HardDriveIcon, TerminalSquareIcon } from "lucide-react"
|
||||
import { ContainerIcon, CpuIcon, HardDriveIcon, NetworkIcon, TerminalSquareIcon } from "lucide-react"
|
||||
import { GpuIcon } from "../ui/icons"
|
||||
import SystemdTable from "../systemd-table/systemd-table"
|
||||
import ContainersTable from "../containers-table/containers-table"
|
||||
@@ -24,6 +22,8 @@ const SEMVER_0_14_0 = parseSemVer("0.14.0")
|
||||
const SEMVER_0_15_0 = parseSemVer("0.15.0")
|
||||
|
||||
export default memo(function SystemDetail({ id }: { id: string }) {
|
||||
const systemData = useSystemData(id)
|
||||
|
||||
const {
|
||||
system,
|
||||
systemStats,
|
||||
@@ -48,7 +48,7 @@ export default memo(function SystemDetail({ id }: { id: string }) {
|
||||
hasGpuData,
|
||||
hasGpuEnginesData,
|
||||
hasGpuPowerData,
|
||||
} = useSystemData(id)
|
||||
} = systemData
|
||||
|
||||
// extra margin to add to bottom of page, specifically for temperature chart,
|
||||
// where the tooltip can go past the bottom of the page if lots of sensors
|
||||
@@ -65,7 +65,7 @@ export default memo(function SystemDetail({ id }: { id: string }) {
|
||||
const hasGpu = hasGpuData || hasGpuPowerData
|
||||
|
||||
// keep tabsRef in sync for keyboard navigation
|
||||
const tabs = ["core", "disk"]
|
||||
const tabs = ["core", "network", "disk"]
|
||||
if (hasGpu) tabs.push("gpu")
|
||||
if (hasContainers) tabs.push("containers")
|
||||
if (hasSystemd) tabs.push("services")
|
||||
@@ -103,7 +103,7 @@ export default memo(function SystemDetail({ id }: { id: string }) {
|
||||
/>
|
||||
)}
|
||||
|
||||
<DiskCharts {...coreProps} systemStats={systemStats} />
|
||||
<RootDiskCharts systemData={systemData} />
|
||||
|
||||
<BandwidthChart {...coreProps} systemStats={systemStats} />
|
||||
|
||||
@@ -138,13 +138,15 @@ export default memo(function SystemDetail({ id }: { id: string }) {
|
||||
/>
|
||||
)}
|
||||
|
||||
<ExtraFsCharts {...coreProps} systemStats={systemStats} />
|
||||
<ExtraFsCharts systemData={systemData} />
|
||||
|
||||
{maybeHasSmartData && <LazySmartTable systemId={system.id} />}
|
||||
|
||||
{hasContainersTable && <LazyContainersTable systemId={system.id} />}
|
||||
|
||||
{hasSystemd && <LazySystemdTable systemId={system.id} />}
|
||||
|
||||
<LazyNetworkProbesTable systemId={system.id} systemData={systemData} />
|
||||
</>
|
||||
)
|
||||
}
|
||||
@@ -157,6 +159,10 @@ export default memo(function SystemDetail({ id }: { id: string }) {
|
||||
<CpuIcon className="size-3.5" />
|
||||
<Trans context="Core system metrics">Core</Trans>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="network" className="w-full flex items-center gap-1.5">
|
||||
<NetworkIcon className="size-3.5" />
|
||||
<Trans>Network</Trans>
|
||||
</TabsTrigger>
|
||||
<TabsTrigger value="disk" className="w-full flex items-center gap-1.5">
|
||||
<HardDriveIcon className="size-3.5" />
|
||||
<Trans>Disk</Trans>
|
||||
@@ -184,22 +190,33 @@ export default memo(function SystemDetail({ id }: { id: string }) {
|
||||
<TabsContent value="core" forceMount className={activeTab === "core" ? "contents" : "hidden"}>
|
||||
<div className="grid xl:grid-cols-2 gap-4">
|
||||
<CpuChart {...coreProps} />
|
||||
<MemoryChart {...coreProps} />
|
||||
<LoadAverageChart chartData={chartData} grid={grid} dataEmpty={dataEmpty} />
|
||||
<BandwidthChart {...coreProps} systemStats={systemStats} />
|
||||
<TemperatureChart {...coreProps} setPageBottomExtraMargin={setPageBottomExtraMargin} />
|
||||
<MemoryChart {...coreProps} />
|
||||
<SwapChart chartData={chartData} grid={grid} dataEmpty={dataEmpty} systemStats={systemStats} />
|
||||
<TemperatureChart {...coreProps} setPageBottomExtraMargin={setPageBottomExtraMargin} />
|
||||
<BatteryChart {...coreProps} />
|
||||
{pageBottomExtraMargin > 0 && <div style={{ marginBottom: pageBottomExtraMargin }}></div>}
|
||||
</div>
|
||||
</TabsContent>
|
||||
|
||||
<TabsContent value="network" forceMount className={activeTab === "network" ? "contents" : "hidden"}>
|
||||
{mountedTabs.has("network") && (
|
||||
<>
|
||||
<div className="grid xl:grid-cols-2 gap-4">
|
||||
<BandwidthChart {...coreProps} systemStats={systemStats} />
|
||||
</div>
|
||||
<LazyNetworkProbesTable systemId={system.id} systemData={systemData} />
|
||||
</>
|
||||
)}
|
||||
</TabsContent>
|
||||
|
||||
<TabsContent value="disk" forceMount className={activeTab === "disk" ? "contents" : "hidden"}>
|
||||
{mountedTabs.has("disk") && (
|
||||
<>
|
||||
<div className="grid xl:grid-cols-2 gap-4">
|
||||
<DiskCharts {...coreProps} systemStats={systemStats} />
|
||||
<RootDiskCharts systemData={systemData} />
|
||||
</div>
|
||||
<ExtraFsCharts {...coreProps} systemStats={systemStats} />
|
||||
<ExtraFsCharts systemData={systemData} />
|
||||
{maybeHasSmartData && <LazySmartTable systemId={system.id} />}
|
||||
</>
|
||||
)}
|
||||
|
||||
@@ -1,7 +1,13 @@
|
||||
import { timeTicks } from "d3-time"
|
||||
import { getPbTimestamp, pb } from "@/lib/api"
|
||||
import { chartTimeData } from "@/lib/utils"
|
||||
import type { ChartData, ChartTimes, ContainerStatsRecord, SystemStatsRecord } from "@/types"
|
||||
import type {
|
||||
ChartData,
|
||||
ChartDataContainer,
|
||||
ChartTimes,
|
||||
ContainerStatsRecord,
|
||||
NetworkProbeStatsRecord,
|
||||
SystemStatsRecord,
|
||||
} from "@/types"
|
||||
|
||||
type ChartTimeData = {
|
||||
time: number
|
||||
@@ -17,31 +23,10 @@ export const cache = new Map<
|
||||
ChartTimeData | SystemStatsRecord[] | ContainerStatsRecord[] | ChartData["containerData"]
|
||||
>()
|
||||
|
||||
// create ticks and domain for charts
|
||||
export function getTimeData(chartTime: ChartTimes, lastCreated: number) {
|
||||
const cached = cache.get("td") as ChartTimeData | undefined
|
||||
if (cached && cached.chartTime === chartTime) {
|
||||
if (!lastCreated || cached.time >= lastCreated) {
|
||||
return cached.data
|
||||
}
|
||||
}
|
||||
|
||||
// const buffer = chartTime === "1m" ? 400 : 20_000
|
||||
const now = new Date(Date.now())
|
||||
const startTime = chartTimeData[chartTime].getOffset(now)
|
||||
const ticks = timeTicks(startTime, now, chartTimeData[chartTime].ticks ?? 12).map((date) => date.getTime())
|
||||
const data = {
|
||||
ticks,
|
||||
domain: [chartTimeData[chartTime].getOffset(now).getTime(), now.getTime()],
|
||||
}
|
||||
cache.set("td", { time: now.getTime(), data, chartTime })
|
||||
return data
|
||||
}
|
||||
|
||||
/** Append new records onto prev with gap detection. Converts string `created` values to ms timestamps in place.
|
||||
* Pass `maxLen` to cap the result length in one copy instead of slicing again after the call. */
|
||||
export function appendData<T extends { created: string | number | null }>(
|
||||
prev: T[],
|
||||
prev: T[] = [],
|
||||
newRecords: T[],
|
||||
expectedInterval: number,
|
||||
maxLen?: number
|
||||
@@ -66,17 +51,18 @@ export function appendData<T extends { created: string | number | null }>(
|
||||
return result
|
||||
}
|
||||
|
||||
export async function getStats<T extends SystemStatsRecord | ContainerStatsRecord>(
|
||||
export async function getStats<T extends SystemStatsRecord | ContainerStatsRecord | NetworkProbeStatsRecord>(
|
||||
collection: string,
|
||||
systemId: string,
|
||||
chartTime: ChartTimes
|
||||
chartTime: ChartTimes,
|
||||
cachedStats?: { created: string | number | null }[],
|
||||
createdIsNumber?: boolean
|
||||
): Promise<T[]> {
|
||||
const cachedStats = cache.get(`${systemId}_${chartTime}_${collection}`) as T[] | undefined
|
||||
const lastCached = cachedStats?.at(-1)?.created as number
|
||||
return await pb.collection<T>(collection).getFullList({
|
||||
filter: pb.filter("system={:id} && created > {:created} && type={:type}", {
|
||||
id: systemId,
|
||||
created: getPbTimestamp(chartTime, lastCached ? new Date(lastCached + 1000) : undefined),
|
||||
created: getPbTimestamp(chartTime, lastCached ? new Date(lastCached + 1000) : undefined, createdIsNumber),
|
||||
type: chartTimeData[chartTime].type,
|
||||
}),
|
||||
fields: "created,stats",
|
||||
@@ -84,11 +70,11 @@ export async function getStats<T extends SystemStatsRecord | ContainerStatsRecor
|
||||
})
|
||||
}
|
||||
|
||||
export function makeContainerData(containers: ContainerStatsRecord[]): ChartData["containerData"] {
|
||||
const result = [] as ChartData["containerData"]
|
||||
export function makeContainerData(containers: ContainerStatsRecord[]): ChartDataContainer[] {
|
||||
const result = [] as ChartDataContainer[]
|
||||
for (const { created, stats } of containers) {
|
||||
if (!created) {
|
||||
result.push({ created: null } as ChartData["containerData"][0])
|
||||
result.push({ created: null } as ChartDataContainer)
|
||||
continue
|
||||
}
|
||||
result.push(makeContainerPoint(new Date(created).getTime(), stats))
|
||||
@@ -97,11 +83,8 @@ export function makeContainerData(containers: ContainerStatsRecord[]): ChartData
|
||||
}
|
||||
|
||||
/** Transform a single realtime container stats message into a ChartDataContainer point. */
|
||||
export function makeContainerPoint(
|
||||
created: number,
|
||||
stats: ContainerStatsRecord["stats"]
|
||||
): ChartData["containerData"][0] {
|
||||
const point: ChartData["containerData"][0] = { created } as ChartData["containerData"][0]
|
||||
export function makeContainerPoint(created: number, stats: ContainerStatsRecord["stats"]): ChartDataContainer {
|
||||
const point: ChartDataContainer = { created } as ChartDataContainer
|
||||
for (const container of stats) {
|
||||
;(point as Record<string, unknown>)[container.n] = container
|
||||
}
|
||||
|
||||
@@ -1,106 +1,283 @@
|
||||
import { t } from "@lingui/core/macro"
|
||||
import AreaChartDefault from "@/components/charts/area-chart"
|
||||
import { $userSettings } from "@/lib/stores"
|
||||
import { decimalString, formatBytes, toFixedFloat } from "@/lib/utils"
|
||||
import type { ChartData, SystemStatsRecord } from "@/types"
|
||||
import type { SystemStatsRecord } from "@/types"
|
||||
import { ChartCard, SelectAvgMax } from "../chart-card"
|
||||
import { Unit } from "@/lib/enums"
|
||||
import { pinnedAxisDomain } from "@/components/ui/chart"
|
||||
import DiskIoSheet from "../disk-io-sheet"
|
||||
import type { SystemData } from "../use-system-data"
|
||||
import { useStore } from "@nanostores/react"
|
||||
import { $userSettings } from "@/lib/stores"
|
||||
|
||||
export function DiskCharts({
|
||||
chartData,
|
||||
grid,
|
||||
dataEmpty,
|
||||
showMax,
|
||||
isLongerChart,
|
||||
maxValues,
|
||||
}: {
|
||||
chartData: ChartData
|
||||
grid: boolean
|
||||
dataEmpty: boolean
|
||||
showMax: boolean
|
||||
isLongerChart: boolean
|
||||
maxValues: boolean
|
||||
systemStats: SystemStatsRecord[]
|
||||
}) {
|
||||
const maxValSelect = isLongerChart ? <SelectAvgMax max={maxValues} /> : null
|
||||
const userSettings = $userSettings.get()
|
||||
// Helpers for indexed dios/diosm access
|
||||
const dios =
|
||||
(i: number) =>
|
||||
({ stats }: SystemStatsRecord) =>
|
||||
stats?.dios?.[i] ?? 0
|
||||
const diosMax =
|
||||
(i: number) =>
|
||||
({ stats }: SystemStatsRecord) =>
|
||||
stats?.diosm?.[i] ?? 0
|
||||
const extraDios =
|
||||
(name: string, i: number) =>
|
||||
({ stats }: SystemStatsRecord) =>
|
||||
stats?.efs?.[name]?.dios?.[i] ?? 0
|
||||
const extraDiosMax =
|
||||
(name: string, i: number) =>
|
||||
({ stats }: SystemStatsRecord) =>
|
||||
stats?.efs?.[name]?.diosm?.[i] ?? 0
|
||||
|
||||
export const diskDataFns = {
|
||||
// usage
|
||||
usage: ({ stats }: SystemStatsRecord) => stats?.du ?? 0,
|
||||
extraUsage:
|
||||
(name: string) =>
|
||||
({ stats }: SystemStatsRecord) =>
|
||||
stats?.efs?.[name]?.du ?? 0,
|
||||
// throughput
|
||||
read: ({ stats }: SystemStatsRecord) => stats?.dio?.[0] ?? (stats?.dr ?? 0) * 1024 * 1024,
|
||||
readMax: ({ stats }: SystemStatsRecord) => stats?.diom?.[0] ?? (stats?.drm ?? 0) * 1024 * 1024,
|
||||
write: ({ stats }: SystemStatsRecord) => stats?.dio?.[1] ?? (stats?.dw ?? 0) * 1024 * 1024,
|
||||
writeMax: ({ stats }: SystemStatsRecord) => stats?.diom?.[1] ?? (stats?.dwm ?? 0) * 1024 * 1024,
|
||||
// extra fs throughput
|
||||
extraRead:
|
||||
(name: string) =>
|
||||
({ stats }: SystemStatsRecord) =>
|
||||
stats?.efs?.[name]?.rb ?? (stats?.efs?.[name]?.r ?? 0) * 1024 * 1024,
|
||||
extraReadMax:
|
||||
(name: string) =>
|
||||
({ stats }: SystemStatsRecord) =>
|
||||
stats?.efs?.[name]?.rbm ?? (stats?.efs?.[name]?.rm ?? 0) * 1024 * 1024,
|
||||
extraWrite:
|
||||
(name: string) =>
|
||||
({ stats }: SystemStatsRecord) =>
|
||||
stats?.efs?.[name]?.wb ?? (stats?.efs?.[name]?.w ?? 0) * 1024 * 1024,
|
||||
extraWriteMax:
|
||||
(name: string) =>
|
||||
({ stats }: SystemStatsRecord) =>
|
||||
stats?.efs?.[name]?.wbm ?? (stats?.efs?.[name]?.wm ?? 0) * 1024 * 1024,
|
||||
// read/write time
|
||||
readTime: dios(0),
|
||||
readTimeMax: diosMax(0),
|
||||
extraReadTime: (name: string) => extraDios(name, 0),
|
||||
extraReadTimeMax: (name: string) => extraDiosMax(name, 0),
|
||||
writeTime: dios(1),
|
||||
writeTimeMax: diosMax(1),
|
||||
extraWriteTime: (name: string) => extraDios(name, 1),
|
||||
extraWriteTimeMax: (name: string) => extraDiosMax(name, 1),
|
||||
// utilization (IoTime-based, 0-100%)
|
||||
util: dios(2),
|
||||
utilMax: diosMax(2),
|
||||
extraUtil: (name: string) => extraDios(name, 2),
|
||||
extraUtilMax: (name: string) => extraDiosMax(name, 2),
|
||||
// r_await / w_await: average service time per read/write operation (ms)
|
||||
rAwait: dios(3),
|
||||
rAwaitMax: diosMax(3),
|
||||
extraRAwait: (name: string) => extraDios(name, 3),
|
||||
extraRAwaitMax: (name: string) => extraDiosMax(name, 3),
|
||||
wAwait: dios(4),
|
||||
wAwaitMax: diosMax(4),
|
||||
extraWAwait: (name: string) => extraDios(name, 4),
|
||||
extraWAwaitMax: (name: string) => extraDiosMax(name, 4),
|
||||
// average queue depth: stored as queue_depth * 100 in Go, divided here
|
||||
weightedIO: ({ stats }: SystemStatsRecord) => (stats?.dios?.[5] ?? 0) / 100,
|
||||
weightedIOMax: ({ stats }: SystemStatsRecord) => (stats?.diosm?.[5] ?? 0) / 100,
|
||||
extraWeightedIO:
|
||||
(name: string) =>
|
||||
({ stats }: SystemStatsRecord) =>
|
||||
(stats?.efs?.[name]?.dios?.[5] ?? 0) / 100,
|
||||
extraWeightedIOMax:
|
||||
(name: string) =>
|
||||
({ stats }: SystemStatsRecord) =>
|
||||
(stats?.efs?.[name]?.diosm?.[5] ?? 0) / 100,
|
||||
}
|
||||
|
||||
export function RootDiskCharts({ systemData }: { systemData: SystemData }) {
|
||||
return (
|
||||
<>
|
||||
<DiskUsageChart systemData={systemData} />
|
||||
<DiskIOChart systemData={systemData} />
|
||||
</>
|
||||
)
|
||||
}
|
||||
|
||||
export function DiskUsageChart({ systemData, extraFsName }: { systemData: SystemData; extraFsName?: string }) {
|
||||
const { chartData, grid, dataEmpty } = systemData
|
||||
|
||||
let diskSize = chartData.systemStats?.at(-1)?.stats.d ?? NaN
|
||||
if (extraFsName) {
|
||||
diskSize = chartData.systemStats?.at(-1)?.stats.efs?.[extraFsName]?.d ?? NaN
|
||||
}
|
||||
// round to nearest GB
|
||||
if (diskSize >= 100) {
|
||||
diskSize = Math.round(diskSize)
|
||||
}
|
||||
|
||||
return (
|
||||
<>
|
||||
<ChartCard empty={dataEmpty} grid={grid} title={t`Disk Usage`} description={t`Usage of root partition`}>
|
||||
<AreaChartDefault
|
||||
chartData={chartData}
|
||||
domain={[0, diskSize]}
|
||||
tickFormatter={(val) => {
|
||||
const { value, unit } = formatBytes(val * 1024, false, Unit.Bytes, true)
|
||||
return `${toFixedFloat(value, value >= 10 ? 0 : 1)} ${unit}`
|
||||
}}
|
||||
contentFormatter={({ value }) => {
|
||||
const { value: convertedValue, unit } = formatBytes(value * 1024, false, Unit.Bytes, true)
|
||||
return `${decimalString(convertedValue)} ${unit}`
|
||||
}}
|
||||
dataPoints={[
|
||||
{
|
||||
label: t`Disk Usage`,
|
||||
color: 4,
|
||||
opacity: 0.4,
|
||||
dataKey: ({ stats }) => stats?.du,
|
||||
},
|
||||
]}
|
||||
></AreaChartDefault>
|
||||
</ChartCard>
|
||||
const title = extraFsName ? `${extraFsName} ${t`Usage`}` : t`Disk Usage`
|
||||
const description = extraFsName ? t`Disk usage of ${extraFsName}` : t`Usage of root partition`
|
||||
|
||||
<ChartCard
|
||||
empty={dataEmpty}
|
||||
grid={grid}
|
||||
title={t`Disk I/O`}
|
||||
description={t`Throughput of root filesystem`}
|
||||
cornerEl={maxValSelect}
|
||||
>
|
||||
<AreaChartDefault
|
||||
chartData={chartData}
|
||||
maxToggled={showMax}
|
||||
dataPoints={[
|
||||
{
|
||||
label: t({ message: "Write", comment: "Disk write" }),
|
||||
dataKey: ({ stats }: SystemStatsRecord) => {
|
||||
if (showMax) {
|
||||
return stats?.dio?.[1] ?? (stats?.dwm ?? 0) * 1024 * 1024
|
||||
}
|
||||
return stats?.dio?.[1] ?? (stats?.dw ?? 0) * 1024 * 1024
|
||||
},
|
||||
color: 3,
|
||||
opacity: 0.3,
|
||||
},
|
||||
{
|
||||
label: t({ message: "Read", comment: "Disk read" }),
|
||||
dataKey: ({ stats }: SystemStatsRecord) => {
|
||||
if (showMax) {
|
||||
return stats?.diom?.[0] ?? (stats?.drm ?? 0) * 1024 * 1024
|
||||
}
|
||||
return stats?.dio?.[0] ?? (stats?.dr ?? 0) * 1024 * 1024
|
||||
},
|
||||
color: 1,
|
||||
opacity: 0.3,
|
||||
},
|
||||
]}
|
||||
tickFormatter={(val) => {
|
||||
const { value, unit } = formatBytes(val, true, userSettings.unitDisk, false)
|
||||
return `${toFixedFloat(value, value >= 10 ? 0 : 1)} ${unit}`
|
||||
}}
|
||||
contentFormatter={({ value }) => {
|
||||
const { value: convertedValue, unit } = formatBytes(value, true, userSettings.unitDisk, false)
|
||||
return `${decimalString(convertedValue, convertedValue >= 100 ? 1 : 2)} ${unit}`
|
||||
}}
|
||||
showTotal={true}
|
||||
/>
|
||||
</ChartCard>
|
||||
</>
|
||||
return (
|
||||
<ChartCard empty={dataEmpty} grid={grid} title={title} description={description}>
|
||||
<AreaChartDefault
|
||||
chartData={chartData}
|
||||
domain={[0, diskSize]}
|
||||
tickFormatter={(val) => {
|
||||
const { value, unit } = formatBytes(val * 1024, false, Unit.Bytes, true)
|
||||
return `${toFixedFloat(value, value >= 10 ? 0 : 1)} ${unit}`
|
||||
}}
|
||||
contentFormatter={({ value }) => {
|
||||
const { value: convertedValue, unit } = formatBytes(value * 1024, false, Unit.Bytes, true)
|
||||
return `${decimalString(convertedValue)} ${unit}`
|
||||
}}
|
||||
dataPoints={[
|
||||
{
|
||||
label: t`Disk Usage`,
|
||||
color: 4,
|
||||
opacity: 0.4,
|
||||
dataKey: extraFsName ? diskDataFns.extraUsage(extraFsName) : diskDataFns.usage,
|
||||
},
|
||||
]}
|
||||
></AreaChartDefault>
|
||||
</ChartCard>
|
||||
)
|
||||
}
|
||||
|
||||
export function DiskIOChart({ systemData, extraFsName }: { systemData: SystemData; extraFsName?: string }) {
|
||||
const { chartData, grid, dataEmpty, showMax, isLongerChart, maxValues } = systemData
|
||||
const maxValSelect = isLongerChart ? <SelectAvgMax max={maxValues} /> : null
|
||||
const userSettings = useStore($userSettings)
|
||||
|
||||
if (!chartData.systemStats?.length) {
|
||||
return null
|
||||
}
|
||||
|
||||
const title = extraFsName ? `${extraFsName} I/O` : t`Disk I/O`
|
||||
const description = extraFsName ? t`Throughput of ${extraFsName}` : t`Throughput of root filesystem`
|
||||
|
||||
const hasMoreIOMetrics = chartData.systemStats?.some((record) => record.stats?.dios?.at(0))
|
||||
|
||||
let CornerEl = maxValSelect
|
||||
if (hasMoreIOMetrics) {
|
||||
CornerEl = (
|
||||
<div className="flex gap-2">
|
||||
{maxValSelect}
|
||||
<DiskIoSheet systemData={systemData} extraFsName={extraFsName} title={title} description={description} />
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
let readFn = showMax ? diskDataFns.readMax : diskDataFns.read
|
||||
let writeFn = showMax ? diskDataFns.writeMax : diskDataFns.write
|
||||
if (extraFsName) {
|
||||
readFn = showMax ? diskDataFns.extraReadMax(extraFsName) : diskDataFns.extraRead(extraFsName)
|
||||
writeFn = showMax ? diskDataFns.extraWriteMax(extraFsName) : diskDataFns.extraWrite(extraFsName)
|
||||
}
|
||||
|
||||
return (
|
||||
<ChartCard empty={dataEmpty} grid={grid} title={title} description={description} cornerEl={CornerEl}>
|
||||
<AreaChartDefault
|
||||
chartData={chartData}
|
||||
maxToggled={showMax}
|
||||
// domain={pinnedAxisDomain(true)}
|
||||
showTotal={true}
|
||||
dataPoints={[
|
||||
{
|
||||
label: t({ message: "Write", comment: "Disk write" }),
|
||||
dataKey: writeFn,
|
||||
color: 3,
|
||||
opacity: 0.3,
|
||||
},
|
||||
{
|
||||
label: t({ message: "Read", comment: "Disk read" }),
|
||||
dataKey: readFn,
|
||||
color: 1,
|
||||
opacity: 0.3,
|
||||
},
|
||||
]}
|
||||
tickFormatter={(val) => {
|
||||
const { value, unit } = formatBytes(val, true, userSettings.unitDisk, false)
|
||||
return `${toFixedFloat(value, value >= 10 ? 0 : 1)} ${unit}`
|
||||
}}
|
||||
contentFormatter={({ value }) => {
|
||||
const { value: convertedValue, unit } = formatBytes(value, true, userSettings.unitDisk, false)
|
||||
return `${decimalString(convertedValue, convertedValue >= 100 ? 1 : 2)} ${unit}`
|
||||
}}
|
||||
/>
|
||||
</ChartCard>
|
||||
)
|
||||
}
|
||||
|
||||
export function DiskUtilizationChart({ systemData, extraFsName }: { systemData: SystemData; extraFsName?: string }) {
|
||||
const { chartData, grid, dataEmpty, showMax, isLongerChart, maxValues } = systemData
|
||||
const maxValSelect = isLongerChart ? <SelectAvgMax max={maxValues} /> : null
|
||||
|
||||
if (!chartData.systemStats?.length) {
|
||||
return null
|
||||
}
|
||||
|
||||
let utilFn = showMax ? diskDataFns.utilMax : diskDataFns.util
|
||||
if (extraFsName) {
|
||||
utilFn = showMax ? diskDataFns.extraUtilMax(extraFsName) : diskDataFns.extraUtil(extraFsName)
|
||||
}
|
||||
return (
|
||||
<ChartCard
|
||||
cornerEl={maxValSelect}
|
||||
empty={dataEmpty}
|
||||
grid={grid}
|
||||
title={t({
|
||||
message: `I/O Utilization`,
|
||||
context: "Percent of time the disk is busy with I/O",
|
||||
})}
|
||||
description={t`Percent of time the disk is busy with I/O`}
|
||||
// legend={true}
|
||||
className="min-h-auto"
|
||||
>
|
||||
<AreaChartDefault
|
||||
chartData={chartData}
|
||||
domain={pinnedAxisDomain()}
|
||||
tickFormatter={(val) => `${toFixedFloat(val, 2)}%`}
|
||||
contentFormatter={({ value }) => `${decimalString(value)}%`}
|
||||
maxToggled={showMax}
|
||||
chartProps={{ syncId: "io" }}
|
||||
dataPoints={[
|
||||
{
|
||||
label: t({ message: "Utilization", context: "Disk I/O utilization" }),
|
||||
dataKey: utilFn,
|
||||
color: 1,
|
||||
opacity: 0.4,
|
||||
},
|
||||
]}
|
||||
/>
|
||||
</ChartCard>
|
||||
)
|
||||
}
|
||||
|
||||
export function ExtraFsCharts({ systemData }: { systemData: SystemData }) {
|
||||
const { systemStats } = systemData.chartData
|
||||
|
||||
const extraFs = systemStats?.at(-1)?.stats.efs
|
||||
|
||||
if (!extraFs || Object.keys(extraFs).length === 0) {
|
||||
return null
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="grid xl:grid-cols-2 gap-4">
|
||||
{Object.keys(extraFs).map((extraFsName) => {
|
||||
let diskSize = systemStats.at(-1)?.stats.efs?.[extraFsName].d ?? NaN
|
||||
// round to nearest GB
|
||||
if (diskSize >= 100) {
|
||||
diskSize = Math.round(diskSize)
|
||||
}
|
||||
return (
|
||||
<div key={extraFsName} className="contents">
|
||||
<DiskUsageChart systemData={systemData} extraFsName={extraFsName} />
|
||||
|
||||
<DiskIOChart systemData={systemData} extraFsName={extraFsName} />
|
||||
</div>
|
||||
)
|
||||
})}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
@@ -1,120 +0,0 @@
|
||||
import { t } from "@lingui/core/macro"
|
||||
import AreaChartDefault from "@/components/charts/area-chart"
|
||||
import { $userSettings } from "@/lib/stores"
|
||||
import { decimalString, formatBytes, toFixedFloat } from "@/lib/utils"
|
||||
import type { ChartData, SystemStatsRecord } from "@/types"
|
||||
import { ChartCard, SelectAvgMax } from "../chart-card"
|
||||
import { Unit } from "@/lib/enums"
|
||||
|
||||
export function ExtraFsCharts({
|
||||
chartData,
|
||||
grid,
|
||||
dataEmpty,
|
||||
showMax,
|
||||
isLongerChart,
|
||||
maxValues,
|
||||
systemStats,
|
||||
}: {
|
||||
chartData: ChartData
|
||||
grid: boolean
|
||||
dataEmpty: boolean
|
||||
showMax: boolean
|
||||
isLongerChart: boolean
|
||||
maxValues: boolean
|
||||
systemStats: SystemStatsRecord[]
|
||||
}) {
|
||||
const maxValSelect = isLongerChart ? <SelectAvgMax max={maxValues} /> : null
|
||||
const userSettings = $userSettings.get()
|
||||
const extraFs = systemStats.at(-1)?.stats.efs
|
||||
if (!extraFs || Object.keys(extraFs).length === 0) {
|
||||
return null
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="grid xl:grid-cols-2 gap-4">
|
||||
{Object.keys(extraFs).map((extraFsName) => {
|
||||
let diskSize = systemStats.at(-1)?.stats.efs?.[extraFsName].d ?? NaN
|
||||
// round to nearest GB
|
||||
if (diskSize >= 100) {
|
||||
diskSize = Math.round(diskSize)
|
||||
}
|
||||
return (
|
||||
<div key={extraFsName} className="contents">
|
||||
<ChartCard
|
||||
empty={dataEmpty}
|
||||
grid={grid}
|
||||
title={`${extraFsName} ${t`Usage`}`}
|
||||
description={t`Disk usage of ${extraFsName}`}
|
||||
>
|
||||
<AreaChartDefault
|
||||
chartData={chartData}
|
||||
domain={[0, diskSize]}
|
||||
tickFormatter={(val) => {
|
||||
const { value, unit } = formatBytes(val * 1024, false, Unit.Bytes, true)
|
||||
return `${toFixedFloat(value, value >= 10 ? 0 : 1)} ${unit}`
|
||||
}}
|
||||
contentFormatter={({ value }) => {
|
||||
const { value: convertedValue, unit } = formatBytes(value * 1024, false, Unit.Bytes, true)
|
||||
return `${decimalString(convertedValue)} ${unit}`
|
||||
}}
|
||||
dataPoints={[
|
||||
{
|
||||
label: t`Disk Usage`,
|
||||
color: 4,
|
||||
opacity: 0.4,
|
||||
dataKey: ({ stats }) => stats?.efs?.[extraFsName]?.du,
|
||||
},
|
||||
]}
|
||||
></AreaChartDefault>
|
||||
</ChartCard>
|
||||
<ChartCard
|
||||
empty={dataEmpty}
|
||||
grid={grid}
|
||||
title={`${extraFsName} I/O`}
|
||||
description={t`Throughput of ${extraFsName}`}
|
||||
cornerEl={maxValSelect}
|
||||
>
|
||||
<AreaChartDefault
|
||||
chartData={chartData}
|
||||
showTotal={true}
|
||||
dataPoints={[
|
||||
{
|
||||
label: t`Write`,
|
||||
dataKey: ({ stats }) => {
|
||||
if (showMax) {
|
||||
return stats?.efs?.[extraFsName]?.wbm || (stats?.efs?.[extraFsName]?.wm ?? 0) * 1024 * 1024
|
||||
}
|
||||
return stats?.efs?.[extraFsName]?.wb || (stats?.efs?.[extraFsName]?.w ?? 0) * 1024 * 1024
|
||||
},
|
||||
color: 3,
|
||||
opacity: 0.3,
|
||||
},
|
||||
{
|
||||
label: t`Read`,
|
||||
dataKey: ({ stats }) => {
|
||||
if (showMax) {
|
||||
return stats?.efs?.[extraFsName]?.rbm ?? (stats?.efs?.[extraFsName]?.rm ?? 0) * 1024 * 1024
|
||||
}
|
||||
return stats?.efs?.[extraFsName]?.rb ?? (stats?.efs?.[extraFsName]?.r ?? 0) * 1024 * 1024
|
||||
},
|
||||
color: 1,
|
||||
opacity: 0.3,
|
||||
},
|
||||
]}
|
||||
maxToggled={showMax}
|
||||
tickFormatter={(val) => {
|
||||
const { value, unit } = formatBytes(val, true, userSettings.unitDisk, false)
|
||||
return `${toFixedFloat(value, value >= 10 ? 0 : 1)} ${unit}`
|
||||
}}
|
||||
contentFormatter={({ value }) => {
|
||||
const { value: convertedValue, unit } = formatBytes(value, true, userSettings.unitDisk, false)
|
||||
return `${decimalString(convertedValue, convertedValue >= 100 ? 1 : 2)} ${unit}`
|
||||
}}
|
||||
/>
|
||||
</ChartCard>
|
||||
</div>
|
||||
)
|
||||
})}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
@@ -0,0 +1,237 @@
|
||||
import LineChartDefault from "@/components/charts/line-chart"
|
||||
import type { DataPoint } from "@/components/charts/line-chart"
|
||||
import { decimalString, formatMicroseconds, toFixedFloat } from "@/lib/utils"
|
||||
import { useLingui } from "@lingui/react/macro"
|
||||
import { ChartCard, FilterBar } from "../chart-card"
|
||||
import type { ChartData, NetworkProbeRecord, NetworkProbeStatsRecord } from "@/types"
|
||||
import { useMemo } from "react"
|
||||
import { atom } from "nanostores"
|
||||
import { useStore } from "@nanostores/react"
|
||||
|
||||
const $filter = atom("")
|
||||
|
||||
type ProbeChartProps = {
|
||||
probeStats: NetworkProbeStatsRecord[]
|
||||
grid?: boolean
|
||||
probes: NetworkProbeRecord[]
|
||||
chartData: ChartData
|
||||
empty: boolean
|
||||
showFilter?: boolean
|
||||
}
|
||||
|
||||
type ProbeChartBaseProps = ProbeChartProps & {
|
||||
valueIndex: number
|
||||
title: string
|
||||
description: string
|
||||
tickFormatter: (value: number) => string
|
||||
contentFormatter: ({ value }: { value: number | string }) => string | number
|
||||
domain?: [number | "auto", number | "auto"]
|
||||
}
|
||||
|
||||
function ProbeChart({
|
||||
probeStats,
|
||||
grid,
|
||||
probes,
|
||||
chartData,
|
||||
empty,
|
||||
valueIndex,
|
||||
title,
|
||||
description,
|
||||
tickFormatter,
|
||||
contentFormatter,
|
||||
domain,
|
||||
showFilter = probes.length > 1,
|
||||
}: ProbeChartBaseProps) {
|
||||
const storedFilter = useStore($filter)
|
||||
const filter = showFilter ? storedFilter : ""
|
||||
|
||||
const { dataPoints, visibleKeys } = useMemo(() => {
|
||||
const sortedProbes = [...probes].sort((a, b) => b.resAvg1h - a.resAvg1h)
|
||||
const count = sortedProbes.length
|
||||
const points: DataPoint<NetworkProbeStatsRecord>[] = []
|
||||
const visibleIDs: string[] = []
|
||||
const filterTerms = filter
|
||||
? filter
|
||||
.toLowerCase()
|
||||
.split(" ")
|
||||
.filter((term) => term.length > 0)
|
||||
: []
|
||||
const dot = chartData.chartTime === "1m"
|
||||
for (let i = 0; i < count; i++) {
|
||||
const p = sortedProbes[i]
|
||||
const label = p.name || p.target
|
||||
const filtered = filterTerms.length > 0 && !filterTerms.some((term) => label.toLowerCase().includes(term))
|
||||
if (filtered) {
|
||||
continue
|
||||
}
|
||||
visibleIDs.push(p.id)
|
||||
points.push({
|
||||
order: i,
|
||||
label,
|
||||
dataKey: (record: NetworkProbeStatsRecord) => record.stats?.[p.id]?.[valueIndex] ?? "-",
|
||||
dot,
|
||||
color: count <= 5 ? i + 1 : `hsl(${(i * 360) / count}, var(--chart-saturation), var(--chart-lightness))`,
|
||||
})
|
||||
}
|
||||
return { dataPoints: points, visibleKeys: visibleIDs }
|
||||
}, [probes, filter, valueIndex, chartData.chartTime])
|
||||
|
||||
const filteredProbeStats = useMemo(() => {
|
||||
if (!visibleKeys.length) return probeStats
|
||||
return probeStats.filter((record) => visibleKeys.some((id) => record.stats?.[id] != null))
|
||||
}, [probeStats, visibleKeys])
|
||||
|
||||
const legend = dataPoints.length < 10 && showFilter
|
||||
|
||||
return (
|
||||
<ChartCard
|
||||
legend={legend || !showFilter}
|
||||
cornerEl={showFilter ? <FilterBar store={$filter} /> : undefined}
|
||||
empty={empty}
|
||||
title={title}
|
||||
description={description}
|
||||
grid={grid}
|
||||
>
|
||||
<LineChartDefault
|
||||
truncate
|
||||
chartData={chartData}
|
||||
customData={filteredProbeStats}
|
||||
dataPoints={dataPoints}
|
||||
domain={domain ?? ["auto", "auto"]}
|
||||
connectNulls
|
||||
tickFormatter={tickFormatter}
|
||||
contentFormatter={contentFormatter}
|
||||
legend={legend}
|
||||
filter={filter}
|
||||
/>
|
||||
</ChartCard>
|
||||
)
|
||||
}
|
||||
|
||||
export function ResponseChart({ probeStats, grid, probes, chartData, empty }: ProbeChartProps) {
|
||||
const { t } = useLingui()
|
||||
|
||||
return (
|
||||
<ProbeChart
|
||||
probeStats={probeStats}
|
||||
grid={grid}
|
||||
probes={probes}
|
||||
chartData={chartData}
|
||||
empty={empty}
|
||||
valueIndex={0}
|
||||
title={t`Response`}
|
||||
description={t`Average response time`}
|
||||
tickFormatter={(value) => formatMicroseconds(value, false)}
|
||||
contentFormatter={({ value }) => {
|
||||
if (typeof value !== "number") {
|
||||
return value
|
||||
}
|
||||
return formatMicroseconds(value)
|
||||
}}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
interface AvgMinMaxResponseChartProps {
|
||||
probeStats: NetworkProbeStatsRecord[]
|
||||
probe: NetworkProbeRecord | null
|
||||
chartData: ChartData
|
||||
empty: boolean
|
||||
}
|
||||
|
||||
export function AvgMinMaxResponseChart({ probeStats, probe, chartData, empty }: AvgMinMaxResponseChartProps) {
|
||||
const { t } = useLingui()
|
||||
|
||||
const { chartTime } = chartData
|
||||
const hasLongInterval = (probe?.interval ?? 61) > 60
|
||||
|
||||
// only one probe is relevant for this chart
|
||||
const dataPoints: DataPoint<NetworkProbeStatsRecord>[] = useMemo(() => {
|
||||
const dataFn = (index: number) => (record: NetworkProbeStatsRecord) =>
|
||||
record.stats?.[probe?.id ?? ""]?.[index] ?? "-"
|
||||
const avgPoint = {
|
||||
label: "Avg",
|
||||
dataKey: dataFn(0),
|
||||
color: 1,
|
||||
order: 0,
|
||||
}
|
||||
if (chartTime === "1m" || (hasLongInterval && chartTime === "1h")) {
|
||||
// avg, min, max are all the same for 1m interval, so just show avg
|
||||
return [avgPoint]
|
||||
}
|
||||
return [
|
||||
{
|
||||
label: "Max",
|
||||
dataKey: dataFn(2),
|
||||
color: 3,
|
||||
order: 0,
|
||||
},
|
||||
avgPoint,
|
||||
{
|
||||
label: "Min",
|
||||
dataKey: dataFn(1),
|
||||
color: 2,
|
||||
order: 2,
|
||||
},
|
||||
]
|
||||
}, [chartTime, hasLongInterval])
|
||||
|
||||
const data = useMemo(() => {
|
||||
if (!probe) return []
|
||||
return probeStats.filter((record) => record.stats && probe.id in record.stats)
|
||||
}, [probe, probeStats])
|
||||
|
||||
const legend = dataPoints.length > 1
|
||||
|
||||
return (
|
||||
<ChartCard
|
||||
legend={true}
|
||||
empty={empty}
|
||||
title={t`Response`}
|
||||
description={t`Average, minimum, and maximum response time`}
|
||||
grid={false}
|
||||
>
|
||||
<LineChartDefault
|
||||
truncate
|
||||
chartData={chartData}
|
||||
customData={data}
|
||||
dataPoints={dataPoints}
|
||||
domain={["auto", "auto"]}
|
||||
connectNulls
|
||||
legend={legend}
|
||||
tickFormatter={(value) => formatMicroseconds(value, false)}
|
||||
contentFormatter={({ value }) => {
|
||||
if (typeof value !== "number") {
|
||||
return value
|
||||
}
|
||||
return formatMicroseconds(value)
|
||||
}}
|
||||
/>
|
||||
</ChartCard>
|
||||
)
|
||||
}
|
||||
|
||||
export function LossChart({ probeStats, grid, probes, chartData, empty }: ProbeChartProps) {
|
||||
const { t } = useLingui()
|
||||
|
||||
return (
|
||||
<ProbeChart
|
||||
probeStats={probeStats}
|
||||
grid={grid}
|
||||
probes={probes}
|
||||
chartData={chartData}
|
||||
empty={empty}
|
||||
valueIndex={3}
|
||||
title={t`Loss`}
|
||||
description={t`Packet loss (%)`}
|
||||
domain={[0, 100]}
|
||||
tickFormatter={(value) => `${toFixedFloat(value, value >= 10 ? 0 : 1)}%`}
|
||||
contentFormatter={({ value }) => {
|
||||
if (typeof value !== "number") {
|
||||
return value
|
||||
}
|
||||
return `${decimalString(value, 2)}%`
|
||||
}}
|
||||
/>
|
||||
)
|
||||
}
|
||||
265
internal/site/src/components/routes/system/disk-io-sheet.tsx
Normal file
265
internal/site/src/components/routes/system/disk-io-sheet.tsx
Normal file
@@ -0,0 +1,265 @@
|
||||
import { t } from "@lingui/core/macro"
|
||||
import { useStore } from "@nanostores/react"
|
||||
import { MoreHorizontalIcon } from "lucide-react"
|
||||
import { memo, useRef, useState } from "react"
|
||||
import AreaChartDefault from "@/components/charts/area-chart"
|
||||
import ChartTimeSelect from "@/components/charts/chart-time-select"
|
||||
import { Button } from "@/components/ui/button"
|
||||
import { Sheet, SheetContent, SheetTrigger } from "@/components/ui/sheet"
|
||||
import { DialogTitle } from "@/components/ui/dialog"
|
||||
import { $userSettings } from "@/lib/stores"
|
||||
import { decimalString, formatBytes, toFixedFloat } from "@/lib/utils"
|
||||
import { ChartCard, SelectAvgMax } from "@/components/routes/system/chart-card"
|
||||
import type { SystemData } from "@/components/routes/system/use-system-data"
|
||||
import { diskDataFns, DiskUtilizationChart } from "./charts/disk-charts"
|
||||
import { pinnedAxisDomain } from "@/components/ui/chart"
|
||||
|
||||
export default memo(function DiskIOSheet({
|
||||
systemData,
|
||||
extraFsName,
|
||||
title,
|
||||
description,
|
||||
}: {
|
||||
systemData: SystemData
|
||||
extraFsName?: string
|
||||
title: string
|
||||
description: string
|
||||
}) {
|
||||
const { chartData, grid, dataEmpty, showMax, maxValues, isLongerChart } = systemData
|
||||
const userSettings = useStore($userSettings)
|
||||
|
||||
const [sheetOpen, setSheetOpen] = useState(false)
|
||||
|
||||
const hasOpened = useRef(false)
|
||||
|
||||
if (sheetOpen && !hasOpened.current) {
|
||||
hasOpened.current = true
|
||||
}
|
||||
|
||||
// throughput functions, with extra fs variants if needed
|
||||
let readFn = showMax ? diskDataFns.readMax : diskDataFns.read
|
||||
let writeFn = showMax ? diskDataFns.writeMax : diskDataFns.write
|
||||
if (extraFsName) {
|
||||
readFn = showMax ? diskDataFns.extraReadMax(extraFsName) : diskDataFns.extraRead(extraFsName)
|
||||
writeFn = showMax ? diskDataFns.extraWriteMax(extraFsName) : diskDataFns.extraWrite(extraFsName)
|
||||
}
|
||||
|
||||
// read and write time functions, with extra fs variants if needed
|
||||
let readTimeFn = showMax ? diskDataFns.readTimeMax : diskDataFns.readTime
|
||||
let writeTimeFn = showMax ? diskDataFns.writeTimeMax : diskDataFns.writeTime
|
||||
if (extraFsName) {
|
||||
readTimeFn = showMax ? diskDataFns.extraReadTimeMax(extraFsName) : diskDataFns.extraReadTime(extraFsName)
|
||||
writeTimeFn = showMax ? diskDataFns.extraWriteTimeMax(extraFsName) : diskDataFns.extraWriteTime(extraFsName)
|
||||
}
|
||||
|
||||
// I/O await functions, with extra fs variants if needed
|
||||
let rAwaitFn = showMax ? diskDataFns.rAwaitMax : diskDataFns.rAwait
|
||||
let wAwaitFn = showMax ? diskDataFns.wAwaitMax : diskDataFns.wAwait
|
||||
if (extraFsName) {
|
||||
rAwaitFn = showMax ? diskDataFns.extraRAwaitMax(extraFsName) : diskDataFns.extraRAwait(extraFsName)
|
||||
wAwaitFn = showMax ? diskDataFns.extraWAwaitMax(extraFsName) : diskDataFns.extraWAwait(extraFsName)
|
||||
}
|
||||
|
||||
// weighted I/O function, with extra fs variant if needed
|
||||
let weightedIOFn = showMax ? diskDataFns.weightedIOMax : diskDataFns.weightedIO
|
||||
if (extraFsName) {
|
||||
weightedIOFn = showMax ? diskDataFns.extraWeightedIOMax(extraFsName) : diskDataFns.extraWeightedIO(extraFsName)
|
||||
}
|
||||
|
||||
// check for availability of I/O metrics
|
||||
let hasUtilization = false
|
||||
let hasAwait = false
|
||||
let hasWeightedIO = false
|
||||
for (const record of chartData.systemStats ?? []) {
|
||||
const dios = record.stats?.dios
|
||||
if ((dios?.at(2) ?? 0) > 0) hasUtilization = true
|
||||
if ((dios?.at(3) ?? 0) > 0) hasAwait = true
|
||||
if ((dios?.at(5) ?? 0) > 0) hasWeightedIO = true
|
||||
if (hasUtilization && hasAwait && hasWeightedIO) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
const maxValSelect = isLongerChart ? <SelectAvgMax max={maxValues} /> : null
|
||||
|
||||
const chartProps = { syncId: "io" }
|
||||
|
||||
const queueDepthTranslation = t({ message: "Queue Depth", context: "Disk I/O average queue depth" })
|
||||
|
||||
return (
|
||||
<Sheet open={sheetOpen} onOpenChange={setSheetOpen}>
|
||||
<DialogTitle className="sr-only">{title}</DialogTitle>
|
||||
<SheetTrigger asChild>
|
||||
<Button
|
||||
title={t`View more`}
|
||||
variant="outline"
|
||||
size="icon"
|
||||
className="shrink-0 max-sm:absolute max-sm:top-0 max-sm:end-0"
|
||||
>
|
||||
<MoreHorizontalIcon />
|
||||
</Button>
|
||||
</SheetTrigger>
|
||||
{hasOpened.current && (
|
||||
<SheetContent aria-describedby={undefined} className="overflow-auto w-200 !max-w-full p-4 sm:p-6">
|
||||
<ChartTimeSelect className="w-[calc(100%-2em)] bg-card" agentVersion={chartData.agentVersion} />
|
||||
|
||||
<ChartCard
|
||||
className="min-h-auto"
|
||||
empty={dataEmpty}
|
||||
grid={grid}
|
||||
title={title}
|
||||
description={description}
|
||||
cornerEl={maxValSelect}
|
||||
// legend={true}
|
||||
>
|
||||
<AreaChartDefault
|
||||
chartData={chartData}
|
||||
maxToggled={showMax}
|
||||
chartProps={chartProps}
|
||||
showTotal={true}
|
||||
domain={pinnedAxisDomain()}
|
||||
itemSorter={(a, b) => a.order - b.order}
|
||||
reverseStackOrder={true}
|
||||
dataPoints={[
|
||||
{
|
||||
label: t`Write`,
|
||||
dataKey: writeFn,
|
||||
color: 3,
|
||||
opacity: 0.4,
|
||||
stackId: 0,
|
||||
order: 0,
|
||||
},
|
||||
{
|
||||
label: t`Read`,
|
||||
dataKey: readFn,
|
||||
color: 1,
|
||||
opacity: 0.4,
|
||||
stackId: 0,
|
||||
order: 1,
|
||||
},
|
||||
]}
|
||||
tickFormatter={(val) => {
|
||||
const { value, unit } = formatBytes(val, true, userSettings.unitDisk, false)
|
||||
return `${toFixedFloat(value, value >= 10 ? 0 : 1)} ${unit}`
|
||||
}}
|
||||
contentFormatter={({ value }) => {
|
||||
const { value: convertedValue, unit } = formatBytes(value, true, userSettings.unitDisk, false)
|
||||
return `${decimalString(convertedValue, convertedValue >= 100 ? 1 : 2)} ${unit}`
|
||||
}}
|
||||
/>
|
||||
</ChartCard>
|
||||
|
||||
{hasUtilization && <DiskUtilizationChart systemData={systemData} extraFsName={extraFsName} />}
|
||||
|
||||
<ChartCard
|
||||
empty={dataEmpty}
|
||||
grid={grid}
|
||||
title={t({ message: "I/O Time", context: "Disk I/O total time spent on read/write" })}
|
||||
description={t({
|
||||
message: "Total time spent on read/write (can exceed 100%)",
|
||||
context: "Disk I/O",
|
||||
})}
|
||||
className="min-h-auto"
|
||||
cornerEl={maxValSelect}
|
||||
>
|
||||
<AreaChartDefault
|
||||
chartData={chartData}
|
||||
domain={pinnedAxisDomain()}
|
||||
tickFormatter={(val) => `${toFixedFloat(val, 2)}%`}
|
||||
contentFormatter={({ value }) => `${decimalString(value)}%`}
|
||||
maxToggled={showMax}
|
||||
chartProps={chartProps}
|
||||
showTotal={true}
|
||||
itemSorter={(a, b) => a.order - b.order}
|
||||
reverseStackOrder={true}
|
||||
dataPoints={[
|
||||
{
|
||||
label: t`Write`,
|
||||
dataKey: writeTimeFn,
|
||||
color: 3,
|
||||
opacity: 0.4,
|
||||
stackId: 0,
|
||||
order: 0,
|
||||
},
|
||||
{
|
||||
label: t`Read`,
|
||||
dataKey: readTimeFn,
|
||||
color: 1,
|
||||
opacity: 0.4,
|
||||
stackId: 0,
|
||||
order: 1,
|
||||
},
|
||||
]}
|
||||
/>
|
||||
</ChartCard>
|
||||
|
||||
{hasWeightedIO && (
|
||||
<ChartCard
|
||||
empty={dataEmpty}
|
||||
grid={grid}
|
||||
title={queueDepthTranslation}
|
||||
description={t`Average number of I/O operations waiting to be serviced`}
|
||||
className="min-h-auto"
|
||||
cornerEl={maxValSelect}
|
||||
>
|
||||
<AreaChartDefault
|
||||
chartData={chartData}
|
||||
domain={pinnedAxisDomain()}
|
||||
tickFormatter={(val) => `${toFixedFloat(val, 2)}`}
|
||||
contentFormatter={({ value }) => decimalString(value, value < 10 ? 3 : 2)}
|
||||
maxToggled={showMax}
|
||||
chartProps={chartProps}
|
||||
dataPoints={[
|
||||
{
|
||||
label: queueDepthTranslation,
|
||||
dataKey: weightedIOFn,
|
||||
color: 1,
|
||||
opacity: 0.4,
|
||||
},
|
||||
]}
|
||||
/>
|
||||
</ChartCard>
|
||||
)}
|
||||
|
||||
{hasAwait && (
|
||||
<ChartCard
|
||||
empty={dataEmpty}
|
||||
grid={grid}
|
||||
title={t({ message: "I/O Await", context: "Disk I/O average operation time (iostat await)" })}
|
||||
description={t({
|
||||
message: "Average queue to completion time per operation",
|
||||
context: "Disk I/O average operation time (iostat await)",
|
||||
})}
|
||||
className="min-h-auto"
|
||||
cornerEl={maxValSelect}
|
||||
// legend={true}
|
||||
>
|
||||
<AreaChartDefault
|
||||
chartData={chartData}
|
||||
domain={pinnedAxisDomain()}
|
||||
tickFormatter={(val) => `${toFixedFloat(val, 2)} ms`}
|
||||
contentFormatter={({ value }) => `${decimalString(value)} ms`}
|
||||
maxToggled={showMax}
|
||||
chartProps={chartProps}
|
||||
dataPoints={[
|
||||
{
|
||||
label: t`Write`,
|
||||
dataKey: wAwaitFn,
|
||||
color: 3,
|
||||
opacity: 0.3,
|
||||
},
|
||||
{
|
||||
label: t`Read`,
|
||||
dataKey: rAwaitFn,
|
||||
color: 1,
|
||||
opacity: 0.3,
|
||||
},
|
||||
]}
|
||||
/>
|
||||
</ChartCard>
|
||||
)}
|
||||
</SheetContent>
|
||||
)}
|
||||
</Sheet>
|
||||
)
|
||||
})
|
||||
@@ -1,6 +1,11 @@
|
||||
import { lazy } from "react"
|
||||
import { useIntersectionObserver } from "@/lib/use-intersection-observer"
|
||||
import { cn } from "@/lib/utils"
|
||||
import { ResponseChart, LossChart } from "./charts/probes-charts"
|
||||
import type { SystemData } from "./use-system-data"
|
||||
import { $chartTime } from "@/lib/stores"
|
||||
import { useStore } from "@nanostores/react"
|
||||
import { useNetworkProbes, useNetworkProbeStats } from "@/lib/use-network-probes"
|
||||
|
||||
const ContainersTable = lazy(() => import("../../containers-table/containers-table"))
|
||||
|
||||
@@ -34,3 +39,47 @@ export function LazySystemdTable({ systemId }: { systemId: string }) {
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
const NetworkProbesTable = lazy(() => import("@/components/network-probes-table/network-probes-table"))
|
||||
|
||||
export function LazyNetworkProbesTable({ systemId, systemData }: { systemId: string; systemData: SystemData }) {
|
||||
const { isIntersecting, ref } = useIntersectionObserver()
|
||||
|
||||
return (
|
||||
<div ref={ref} className={cn(isIntersecting && "contents")}>
|
||||
{isIntersecting && <ProbesTable systemId={systemId} systemData={systemData} />}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
|
||||
function ProbesTable({ systemId, systemData }: { systemId: string; systemData: SystemData }) {
|
||||
const { grid, chartData } = systemData ?? {}
|
||||
const chartTime = useStore($chartTime)
|
||||
|
||||
const probes = useNetworkProbes({ systemId })
|
||||
const probeStats = useNetworkProbeStats({ systemId, chartTime })
|
||||
|
||||
return (
|
||||
<>
|
||||
<NetworkProbesTable systemId={systemId} probes={probes} />
|
||||
{!!chartData && !!probes.length && (
|
||||
<div className="grid xl:grid-cols-2 gap-4">
|
||||
<ResponseChart
|
||||
probeStats={probeStats}
|
||||
grid={grid}
|
||||
probes={probes}
|
||||
chartData={chartData}
|
||||
empty={!probeStats.length}
|
||||
/>
|
||||
<LossChart
|
||||
probeStats={probeStats}
|
||||
grid={grid}
|
||||
probes={probes}
|
||||
chartData={chartData}
|
||||
empty={!probeStats.length}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
</>
|
||||
)
|
||||
}
|
||||
|
||||
@@ -43,7 +43,6 @@ import {
|
||||
toFixedFloat,
|
||||
formatTemperature,
|
||||
cn,
|
||||
getVisualStringWidth,
|
||||
secondsToString,
|
||||
hourWithSeconds,
|
||||
formatShortDate,
|
||||
@@ -106,9 +105,9 @@ function formatCapacity(bytes: number): string {
|
||||
const SMART_DEVICE_FIELDS = "id,system,name,model,state,capacity,temp,type,hours,cycles,updated"
|
||||
|
||||
export const createColumns = (
|
||||
longestName: number,
|
||||
longestModel: number,
|
||||
longestDevice: number
|
||||
longestName: string,
|
||||
longestModel: string,
|
||||
longestDevice: string
|
||||
): ColumnDef<SmartDeviceRecord>[] => [
|
||||
{
|
||||
id: "system",
|
||||
@@ -123,8 +122,11 @@ export const createColumns = (
|
||||
cell: ({ getValue }) => {
|
||||
const allSystems = useStore($allSystemsById)
|
||||
return (
|
||||
<div className="ms-1.5 max-w-40 block truncate" style={{ width: `${longestName / 1.05}ch` }}>
|
||||
{allSystems[getValue() as string]?.name ?? ""}
|
||||
<div className="ms-1.5 relative w-fit max-w-44">
|
||||
<span className="invisible block whitespace-nowrap" aria-hidden="true">
|
||||
{longestName}
|
||||
</span>
|
||||
<span className="absolute inset-0 truncate">{allSystems[getValue() as string]?.name ?? ""}</span>
|
||||
</div>
|
||||
)
|
||||
},
|
||||
@@ -134,12 +136,11 @@ export const createColumns = (
|
||||
sortingFn: (a, b) => a.original.name.localeCompare(b.original.name),
|
||||
header: ({ column }) => <HeaderButton column={column} name={t`Device`} Icon={HardDrive} />,
|
||||
cell: ({ getValue }) => (
|
||||
<div
|
||||
className="font-medium max-w-40 truncate ms-1"
|
||||
title={getValue() as string}
|
||||
style={{ width: `${longestDevice / 1.05}ch` }}
|
||||
>
|
||||
{getValue() as string}
|
||||
<div className="font-medium ms-1 relative w-fit max-w-44" title={getValue() as string}>
|
||||
<span className="invisible block whitespace-nowrap" aria-hidden="true">
|
||||
{longestDevice}
|
||||
</span>
|
||||
<span className="absolute inset-0 truncate">{getValue() as string}</span>
|
||||
</div>
|
||||
),
|
||||
},
|
||||
@@ -150,12 +151,11 @@ export const createColumns = (
|
||||
<HeaderButton column={column} name={t({ message: "Model", comment: "Device model" })} Icon={Box} />
|
||||
),
|
||||
cell: ({ getValue }) => (
|
||||
<div
|
||||
className="max-w-48 truncate ms-1"
|
||||
title={getValue() as string}
|
||||
style={{ width: `${longestModel / 1.05}ch` }}
|
||||
>
|
||||
{getValue() as string}
|
||||
<div className="ms-1 relative w-fit max-w-44" title={getValue() as string}>
|
||||
<span className="invisible block whitespace-nowrap" aria-hidden="true">
|
||||
{longestModel}
|
||||
</span>
|
||||
<span className="absolute inset-0 truncate">{getValue() as string}</span>
|
||||
</div>
|
||||
),
|
||||
},
|
||||
@@ -309,7 +309,7 @@ export default function DisksTable({ systemId }: { systemId?: string }) {
|
||||
|
||||
// Calculate the right width for the columns based on the longest strings among the displayed devices
|
||||
const { longestName, longestModel, longestDevice } = useMemo(() => {
|
||||
const result = { longestName: 0, longestModel: 0, longestDevice: 0 }
|
||||
const result = { longestName: "", longestModel: "", longestDevice: "" }
|
||||
if (!smartDevices || Object.keys(allSystems).length === 0) {
|
||||
return result
|
||||
}
|
||||
@@ -318,10 +318,16 @@ export default function DisksTable({ systemId }: { systemId?: string }) {
|
||||
if (!systemId && !seenSystems.has(device.system)) {
|
||||
seenSystems.add(device.system)
|
||||
const name = allSystems[device.system]?.name ?? ""
|
||||
result.longestName = Math.max(result.longestName, getVisualStringWidth(name))
|
||||
if (name.length > result.longestName.length) {
|
||||
result.longestName = name
|
||||
}
|
||||
}
|
||||
if ((device.model ?? "").length > result.longestModel.length) {
|
||||
result.longestModel = device.model ?? ""
|
||||
}
|
||||
if ((device.name ?? "").length > result.longestDevice.length) {
|
||||
result.longestDevice = device.name ?? ""
|
||||
}
|
||||
result.longestModel = Math.max(result.longestModel, getVisualStringWidth(device.model ?? ""))
|
||||
result.longestDevice = Math.max(result.longestDevice, getVisualStringWidth(device.name ?? ""))
|
||||
}
|
||||
return result
|
||||
}, [smartDevices, systemId, allSystems])
|
||||
|
||||
@@ -26,7 +26,9 @@ import type {
|
||||
SystemStatsRecord,
|
||||
} from "@/types"
|
||||
import { $router, navigate } from "../../router"
|
||||
import { appendData, cache, getStats, getTimeData, makeContainerData, makeContainerPoint } from "./chart-data"
|
||||
import { appendData, cache, getStats, makeContainerData, makeContainerPoint } from "./chart-data"
|
||||
|
||||
export type SystemData = ReturnType<typeof useSystemData>
|
||||
|
||||
export function useSystemData(id: string) {
|
||||
const direction = useStore($direction)
|
||||
@@ -149,16 +151,11 @@ export function useSystemData(id: string) {
|
||||
const agentVersion = useMemo(() => parseSemVer(system?.info?.v), [system?.info?.v])
|
||||
|
||||
const chartData: ChartData = useMemo(() => {
|
||||
const lastCreated = Math.max(
|
||||
(systemStats.at(-1)?.created as number) ?? 0,
|
||||
(containerData.at(-1)?.created as number) ?? 0
|
||||
)
|
||||
return {
|
||||
systemStats,
|
||||
containerData,
|
||||
chartTime,
|
||||
orientation: direction === "rtl" ? "right" : "left",
|
||||
...getTimeData(chartTime, lastCreated),
|
||||
agentVersion,
|
||||
}
|
||||
}, [systemStats, containerData, direction])
|
||||
@@ -190,7 +187,7 @@ export function useSystemData(id: string) {
|
||||
|
||||
// Skip the fetch if the latest cached point is recent enough that no new point is expected yet
|
||||
const lastCreated = cachedSystemStats.at(-1)?.created as number | undefined
|
||||
if (lastCreated && Date.now() - lastCreated < expectedInterval) {
|
||||
if (lastCreated && Date.now() - lastCreated < expectedInterval * 0.9) {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
@@ -198,8 +195,8 @@ export function useSystemData(id: string) {
|
||||
}
|
||||
|
||||
Promise.allSettled([
|
||||
getStats<SystemStatsRecord>("system_stats", systemId, chartTime),
|
||||
getStats<ContainerStatsRecord>("container_stats", systemId, chartTime),
|
||||
getStats<SystemStatsRecord>("system_stats", systemId, chartTime, cachedSystemStats),
|
||||
getStats<ContainerStatsRecord>("container_stats", systemId, chartTime, cachedContainerData),
|
||||
]).then(([systemStats, containerStats]) => {
|
||||
// If another request has been made since this one, ignore the results
|
||||
if (requestId !== statsRequestId.current) {
|
||||
@@ -291,7 +288,7 @@ export function useSystemData(id: string) {
|
||||
// derived values
|
||||
const isLongerChart = !["1m", "1h"].includes(chartTime)
|
||||
const showMax = maxValues && isLongerChart
|
||||
const dataEmpty = !chartLoading && chartData.systemStats.length === 0
|
||||
const dataEmpty = !chartLoading && chartData.systemStats?.length === 0
|
||||
const lastGpus = systemStats.at(-1)?.stats?.g
|
||||
const isPodman = details?.podman ?? system.info?.p ?? false
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ import { memo, useMemo, useRef, useState } from "react"
|
||||
import { Tooltip, TooltipContent, TooltipTrigger } from "../ui/tooltip"
|
||||
import { isReadOnlyUser, pb } from "@/lib/api"
|
||||
import { BatteryState, ConnectionType, connectionTypeLabels, MeterState, SystemStatus } from "@/lib/enums"
|
||||
import { $longestSystemNameLen, $userSettings } from "@/lib/stores"
|
||||
import { $longestSystemName, $userSettings } from "@/lib/stores"
|
||||
import {
|
||||
cn,
|
||||
copyToClipboard,
|
||||
@@ -110,20 +110,23 @@ export function SystemsTableColumns(viewMode: "table" | "grid"): ColumnDef<Syste
|
||||
|
||||
// match filter value against name or translated status
|
||||
return (row, _, newFilterInput) => {
|
||||
const { name, status } = row.original
|
||||
const sys = row.original
|
||||
if (sys.host.includes(newFilterInput) || sys.info.v?.includes(newFilterInput)) {
|
||||
return true
|
||||
}
|
||||
if (newFilterInput !== filterInput) {
|
||||
filterInput = newFilterInput
|
||||
filterInputLower = newFilterInput.toLowerCase()
|
||||
}
|
||||
let nameLower = nameCache.get(name)
|
||||
let nameLower = nameCache.get(sys.name)
|
||||
if (nameLower === undefined) {
|
||||
nameLower = name.toLowerCase()
|
||||
nameCache.set(name, nameLower)
|
||||
nameLower = sys.name.toLowerCase()
|
||||
nameCache.set(sys.name, nameLower)
|
||||
}
|
||||
if (nameLower.includes(filterInputLower)) {
|
||||
return true
|
||||
}
|
||||
const statusLower = statusTranslations[status as keyof typeof statusTranslations]
|
||||
const statusLower = statusTranslations[sys.status as keyof typeof statusTranslations]
|
||||
return statusLower?.includes(filterInputLower) || false
|
||||
}
|
||||
})(),
|
||||
@@ -132,7 +135,7 @@ export function SystemsTableColumns(viewMode: "table" | "grid"): ColumnDef<Syste
|
||||
Icon: ServerIcon,
|
||||
cell: (info) => {
|
||||
const { name, id } = info.row.original
|
||||
const longestName = useStore($longestSystemNameLen)
|
||||
const longestName = useStore($longestSystemName)
|
||||
const linkUrl = getPagePath($router, "system", { id })
|
||||
|
||||
return (
|
||||
@@ -142,8 +145,7 @@ export function SystemsTableColumns(viewMode: "table" | "grid"): ColumnDef<Syste
|
||||
<Link
|
||||
href={linkUrl}
|
||||
tabIndex={-1}
|
||||
className="truncate z-10 relative"
|
||||
style={{ width: `${longestName / 1.05}ch` }}
|
||||
className="relative w-fit max-w-48 z-10"
|
||||
onMouseEnter={(e) => {
|
||||
// set title on hover if text is truncated to show full name
|
||||
const a = e.currentTarget
|
||||
@@ -154,7 +156,10 @@ export function SystemsTableColumns(viewMode: "table" | "grid"): ColumnDef<Syste
|
||||
}
|
||||
}}
|
||||
>
|
||||
{name}
|
||||
<span className="invisible block" aria-hidden="true">
|
||||
{longestName}
|
||||
</span>
|
||||
<span className="absolute inset-0 truncate">{name}</span>
|
||||
</Link>
|
||||
</span>
|
||||
<Link href={linkUrl} className="inset-0 absolute size-full" aria-label={name}></Link>
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
import type { JSX } from "react"
|
||||
import { useLingui } from "@lingui/react/macro"
|
||||
import * as React from "react"
|
||||
import * as RechartsPrimitive from "recharts"
|
||||
import { chartTimeData, cn } from "@/lib/utils"
|
||||
import type { ChartData } from "@/types"
|
||||
import type { ChartTimes } from "@/types"
|
||||
import { Separator } from "./separator"
|
||||
import { AxisDomain } from "recharts/types/util/types"
|
||||
import type { AxisDomain } from "recharts/types/util/types"
|
||||
import { timeTicks } from "d3-time"
|
||||
|
||||
// Format: { THEME_NAME: CSS_SELECTOR }
|
||||
const THEMES = { light: "", dark: ".dark" } as const
|
||||
@@ -101,7 +101,7 @@ const ChartTooltipContent = React.forwardRef<
|
||||
labelKey?: string
|
||||
unit?: string
|
||||
filter?: string
|
||||
contentFormatter?: (item: any, key: string) => React.ReactNode | string
|
||||
contentFormatter?: (item: unknown, key: string) => React.ReactNode | string
|
||||
truncate?: boolean
|
||||
showTotal?: boolean
|
||||
totalLabel?: React.ReactNode
|
||||
@@ -175,7 +175,13 @@ const ChartTooltipContent = React.forwardRef<
|
||||
}
|
||||
|
||||
const totalKey = "__total__"
|
||||
const totalItem: any = {
|
||||
const totalItem: {
|
||||
value: number
|
||||
name: string
|
||||
dataKey: string
|
||||
color: string | undefined
|
||||
payload?: unknown
|
||||
} = {
|
||||
value: totalValue,
|
||||
name: totalName,
|
||||
dataKey: totalKey,
|
||||
@@ -400,26 +406,57 @@ function getPayloadConfigFromPayload(config: ChartConfig, payload: unknown, key:
|
||||
return configLabelKey in config ? config[configLabelKey] : config[key as keyof typeof config]
|
||||
}
|
||||
|
||||
let cachedAxis: JSX.Element
|
||||
const xAxis = ({ domain, ticks, chartTime }: ChartData) => {
|
||||
if (cachedAxis && domain[0] === cachedAxis.props.domain[0]) {
|
||||
return cachedAxis
|
||||
interface XAxisData {
|
||||
el: React.ReactElement
|
||||
domain: [number, number]
|
||||
}
|
||||
|
||||
const xAxisCache = new Map<ChartTimes, XAxisData>()
|
||||
|
||||
function createXAxisData(chartTime: ChartTimes): XAxisData {
|
||||
// console.log("Creating XAxis for", chartTime, new Date())
|
||||
const axisEndTime = Date.now() + 500
|
||||
const axisEndDate = new Date(axisEndTime)
|
||||
const startTime = chartTimeData[chartTime].getOffset(axisEndDate)
|
||||
const ticks = timeTicks(startTime, axisEndDate, chartTimeData[chartTime].ticks ?? 12).map((date) => date.getTime())
|
||||
const domain: [number, number] = [startTime.getTime(), axisEndTime]
|
||||
|
||||
return {
|
||||
domain,
|
||||
el: (
|
||||
<RechartsPrimitive.XAxis
|
||||
dataKey="created"
|
||||
domain={domain}
|
||||
ticks={ticks}
|
||||
allowDataOverflow
|
||||
type="number"
|
||||
scale="time"
|
||||
minTickGap={12}
|
||||
tickMargin={8}
|
||||
axisLine={false}
|
||||
tickFormatter={chartTimeData[chartTime].format}
|
||||
/>
|
||||
),
|
||||
}
|
||||
cachedAxis = (
|
||||
<RechartsPrimitive.XAxis
|
||||
dataKey="created"
|
||||
domain={domain}
|
||||
ticks={ticks}
|
||||
allowDataOverflow
|
||||
type="number"
|
||||
scale="time"
|
||||
minTickGap={12}
|
||||
tickMargin={8}
|
||||
axisLine={false}
|
||||
tickFormatter={chartTimeData[chartTime].format}
|
||||
/>
|
||||
)
|
||||
return cachedAxis
|
||||
}
|
||||
|
||||
function xAxis(chartTime: ChartTimes, lastCreated: number) {
|
||||
if (!lastCreated) {
|
||||
return null
|
||||
}
|
||||
const cachedAxis = xAxisCache.get(chartTime)
|
||||
|
||||
const expectedInterval = chartTimeData[chartTime].expectedInterval
|
||||
const conservativeEndTime = Date.now() - expectedInterval / 2
|
||||
const axisEndTime = Math.max(lastCreated, conservativeEndTime)
|
||||
|
||||
if (cachedAxis && axisEndTime < cachedAxis.domain[1]) {
|
||||
return cachedAxis.el
|
||||
}
|
||||
|
||||
const axisData = createXAxisData(chartTime)
|
||||
xAxisCache.set(chartTime, axisData)
|
||||
return axisData.el
|
||||
}
|
||||
|
||||
export {
|
||||
|
||||
@@ -41,7 +41,7 @@ const TableRow = React.forwardRef<HTMLTableRowElement, React.HTMLAttributes<HTML
|
||||
<tr
|
||||
ref={ref}
|
||||
className={cn(
|
||||
"border-b border-border/60 hover:bg-muted/40 dark:hover:bg-muted/20 data-[state=selected]:bg-muted!",
|
||||
"border-b border-border/60 hover:bg-muted/40 dark:hover:bg-muted/20 data-[state=selected]:bg-muted/40!",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
|
||||
@@ -54,8 +54,11 @@ export async function updateUserSettings() {
|
||||
}
|
||||
}
|
||||
|
||||
export function getPbTimestamp(timeString: ChartTimes, d?: Date) {
|
||||
export function getPbTimestamp(timeString: ChartTimes, d?: Date, createdIsNumber?: boolean) {
|
||||
d ||= chartTimeData[timeString].getOffset(new Date())
|
||||
if (createdIsNumber) {
|
||||
return d.getTime()
|
||||
}
|
||||
const year = d.getUTCFullYear()
|
||||
const month = String(d.getUTCMonth() + 1).padStart(2, "0")
|
||||
const day = String(d.getUTCDate()).padStart(2, "0")
|
||||
|
||||
@@ -70,7 +70,5 @@ export const $copyContent = atom("")
|
||||
/** Direction for localization */
|
||||
export const $direction = atom<"ltr" | "rtl">("ltr")
|
||||
|
||||
/** Longest system name length. Used to set table column width. I know this
|
||||
* is stupid but the table is virtualized and I know this will work.
|
||||
*/
|
||||
export const $longestSystemNameLen = atom(8)
|
||||
/** Longest system name string. Used to reserve width in virtualized tables. */
|
||||
export const $longestSystemName = atom("")
|
||||
|
||||
@@ -5,20 +5,17 @@ import {
|
||||
$allSystemsById,
|
||||
$allSystemsByName,
|
||||
$downSystems,
|
||||
$longestSystemNameLen,
|
||||
$longestSystemName,
|
||||
$pausedSystems,
|
||||
$upSystems,
|
||||
} from "@/lib/stores"
|
||||
import { getVisualStringWidth, updateFavicon } from "@/lib/utils"
|
||||
import { isVisuallyLonger, updateFavicon } from "@/lib/utils"
|
||||
import type { SystemRecord } from "@/types"
|
||||
import { SystemStatus } from "./enums"
|
||||
|
||||
const COLLECTION = pb.collection<SystemRecord>("systems")
|
||||
const FIELDS_DEFAULT = "id,name,host,port,info,status"
|
||||
|
||||
/** Maximum system name length for display purposes */
|
||||
const MAX_SYSTEM_NAME_LENGTH = 22
|
||||
|
||||
let initialized = false
|
||||
// biome-ignore lint/suspicious/noConfusingVoidType: typescript rocks
|
||||
let unsub: (() => void) | undefined | void
|
||||
@@ -44,7 +41,7 @@ export function init() {
|
||||
}
|
||||
|
||||
if (!newSystem) {
|
||||
onSystemsChanged(newSystems, undefined)
|
||||
onSystemsChanged(newSystems, newSystem, oldSystem)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -68,20 +65,28 @@ export function init() {
|
||||
}
|
||||
|
||||
// run things that need to be done when systems change
|
||||
onSystemsChanged(newSystems, newSystem)
|
||||
onSystemsChanged(newSystems, newSystem, oldSystem)
|
||||
})
|
||||
}
|
||||
|
||||
/** Update the longest system name length and favicon based on system status */
|
||||
function onSystemsChanged(_: Record<string, SystemRecord>, changedSystem: SystemRecord | undefined) {
|
||||
/** Update the longest system name string and favicon based on system status */
|
||||
function onSystemsChanged(systems: Record<string, SystemRecord>, newSystem?: SystemRecord, oldSystem?: SystemRecord) {
|
||||
const downSystemsStore = $downSystems.get()
|
||||
const downSystems = Object.values(downSystemsStore)
|
||||
|
||||
// Update longest system name length
|
||||
const longestName = $longestSystemNameLen.get()
|
||||
const nameLen = Math.min(MAX_SYSTEM_NAME_LENGTH, getVisualStringWidth(changedSystem?.name || ""))
|
||||
if (nameLen > longestName) {
|
||||
$longestSystemNameLen.set(nameLen)
|
||||
// if the old system's old name was the longest, we need to find the new longest name
|
||||
// otherwise, if the changed system's new name is longer than the current longest, update it
|
||||
const longestName = $longestSystemName.get()
|
||||
if (oldSystem?.name === longestName && oldSystem.name !== newSystem?.name) {
|
||||
let newLongest = ""
|
||||
for (const id in systems) {
|
||||
if (isVisuallyLonger(systems[id].name, newLongest)) {
|
||||
newLongest = systems[id].name
|
||||
}
|
||||
}
|
||||
$longestSystemName.set(newLongest)
|
||||
} else if (newSystem && newSystem.name !== longestName && isVisuallyLonger(newSystem.name, longestName)) {
|
||||
$longestSystemName.set(newSystem.name)
|
||||
}
|
||||
|
||||
updateFavicon(downSystems.length)
|
||||
|
||||
289
internal/site/src/lib/use-network-probes.ts
Normal file
289
internal/site/src/lib/use-network-probes.ts
Normal file
@@ -0,0 +1,289 @@
|
||||
import { chartTimeData } from "@/lib/utils"
|
||||
import type { ChartTimes, NetworkProbeRecord, NetworkProbeStatsRecord } from "@/types"
|
||||
import { useEffect, useRef, useState } from "react"
|
||||
import { getStats, appendData } from "@/components/routes/system/chart-data"
|
||||
import { pb } from "@/lib/api"
|
||||
import { toast } from "@/components/ui/use-toast"
|
||||
import type { RecordListOptions, RecordSubscription } from "pocketbase"
|
||||
|
||||
const cache = new Map<string, NetworkProbeStatsRecord[]>()
|
||||
|
||||
function getCacheValue(systemId: string, chartTime: ChartTimes | "rt") {
|
||||
return cache.get(`${systemId}${chartTime}`) || []
|
||||
}
|
||||
|
||||
function appendCacheValue(
|
||||
systemId: string,
|
||||
chartTime: ChartTimes | "rt",
|
||||
newStats: NetworkProbeStatsRecord[],
|
||||
maxPoints = 100
|
||||
) {
|
||||
const cache_key = `${systemId}${chartTime}`
|
||||
const existingStats = getCacheValue(systemId, chartTime)
|
||||
if (existingStats) {
|
||||
const { expectedInterval } = chartTimeData[chartTime]
|
||||
const updatedStats = appendData(existingStats, newStats, expectedInterval, maxPoints)
|
||||
cache.set(cache_key, updatedStats)
|
||||
return updatedStats
|
||||
} else {
|
||||
cache.set(cache_key, newStats)
|
||||
return newStats
|
||||
}
|
||||
}
|
||||
|
||||
const NETWORK_PROBE_FIELDS =
|
||||
"id,name,system,target,protocol,port,interval,res,resMin1h,resMax1h,resAvg1h,loss1h,enabled,updated"
|
||||
|
||||
interface UseNetworkProbesProps {
|
||||
systemId?: string
|
||||
}
|
||||
|
||||
export function useNetworkProbes(props: UseNetworkProbesProps) {
|
||||
const { systemId } = props
|
||||
|
||||
const [probes, setProbes] = useState<NetworkProbeRecord[]>([])
|
||||
const pendingProbeEvents = useRef(new Map<string, RecordSubscription<NetworkProbeRecord>>())
|
||||
const probeBatchTimeout = useRef<ReturnType<typeof setTimeout> | null>(null)
|
||||
|
||||
// clear old data when systemId changes
|
||||
// useEffect(() => {
|
||||
// return setProbes([])
|
||||
// }, [systemId])
|
||||
|
||||
// initial load - fetch probes if not provided by caller
|
||||
useEffect(() => {
|
||||
fetchProbes(systemId).then((probes) => setProbes(probes))
|
||||
}, [systemId])
|
||||
|
||||
// Subscribe to updates if probes not provided by caller
|
||||
useEffect(() => {
|
||||
let unsubscribe: (() => void) | undefined
|
||||
|
||||
function flushPendingProbeEvents() {
|
||||
probeBatchTimeout.current = null
|
||||
if (!pendingProbeEvents.current.size) {
|
||||
return
|
||||
}
|
||||
const events = pendingProbeEvents.current
|
||||
pendingProbeEvents.current = new Map()
|
||||
setProbes((currentProbes) => {
|
||||
return applyProbeEvents(currentProbes ?? [], events.values(), systemId)
|
||||
})
|
||||
}
|
||||
|
||||
const pbOptions: RecordListOptions = { fields: NETWORK_PROBE_FIELDS }
|
||||
if (systemId) {
|
||||
pbOptions.filter = pb.filter("system = {:system}", { system: systemId })
|
||||
}
|
||||
|
||||
;(async () => {
|
||||
try {
|
||||
unsubscribe = await pb.collection<NetworkProbeRecord>("network_probes").subscribe(
|
||||
"*",
|
||||
(event) => {
|
||||
pendingProbeEvents.current.set(event.record.id, event)
|
||||
if (!probeBatchTimeout.current) {
|
||||
probeBatchTimeout.current = setTimeout(flushPendingProbeEvents, 50)
|
||||
}
|
||||
},
|
||||
pbOptions
|
||||
)
|
||||
} catch (error) {
|
||||
console.error("Failed to subscribe to probes", error)
|
||||
}
|
||||
})()
|
||||
|
||||
return () => {
|
||||
if (probeBatchTimeout.current !== null) {
|
||||
clearTimeout(probeBatchTimeout.current)
|
||||
probeBatchTimeout.current = null
|
||||
}
|
||||
pendingProbeEvents.current.clear()
|
||||
unsubscribe?.()
|
||||
}
|
||||
}, [systemId])
|
||||
|
||||
return probes
|
||||
}
|
||||
|
||||
interface UseNetworkProbeStatsProps {
|
||||
systemId?: string
|
||||
chartTime: ChartTimes
|
||||
}
|
||||
|
||||
export function useNetworkProbeStats(props: UseNetworkProbeStatsProps) {
|
||||
const { systemId, chartTime } = props
|
||||
const [probeStats, setProbeStats] = useState<NetworkProbeStatsRecord[]>([])
|
||||
const requestID = useRef(0)
|
||||
|
||||
useEffect(() => {
|
||||
if (!systemId) {
|
||||
setProbeStats([])
|
||||
return
|
||||
}
|
||||
if (chartTime === "1m") {
|
||||
setProbeStats(getCacheValue(systemId, "rt"))
|
||||
return
|
||||
}
|
||||
setProbeStats(getCacheValue(systemId, chartTime))
|
||||
}, [systemId, chartTime])
|
||||
|
||||
// fetch missing probe stats on load and when chart time changes
|
||||
useEffect(() => {
|
||||
if (!systemId || !chartTime || chartTime === "1m") {
|
||||
return
|
||||
}
|
||||
|
||||
const { expectedInterval } = chartTimeData[chartTime]
|
||||
const requestId = ++requestID.current
|
||||
|
||||
const cachedProbeStats = getCacheValue(systemId, chartTime)
|
||||
|
||||
// Render from cache immediately if available
|
||||
if (cachedProbeStats.length) {
|
||||
setProbeStats(cachedProbeStats)
|
||||
|
||||
// Skip the fetch if the latest cached point is recent enough that no new point is expected yet
|
||||
const lastCreated = cachedProbeStats.at(-1)?.created
|
||||
if (lastCreated && Date.now() - lastCreated < expectedInterval * 0.9) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
getStats<NetworkProbeStatsRecord>("network_probe_stats", systemId, chartTime, cachedProbeStats, true).then(
|
||||
(probeStats) => {
|
||||
// If another request has been made since this one, ignore the results
|
||||
if (requestId !== requestID.current) {
|
||||
return
|
||||
}
|
||||
const newStats = appendCacheValue(systemId, chartTime, probeStats)
|
||||
setProbeStats(newStats)
|
||||
}
|
||||
)
|
||||
}, [systemId, chartTime])
|
||||
|
||||
// Subscribe to new probe stats on non-1m chart times (1h, 12h, etc)
|
||||
useEffect(() => {
|
||||
if (!systemId || !chartTime || chartTime === "1m") {
|
||||
return
|
||||
}
|
||||
let unsubscribe: (() => void) | undefined
|
||||
const pbOptions = {
|
||||
fields: "stats,created,type",
|
||||
filter: pb.filter("system={:system} && type={:type}", { system: systemId, type: chartTimeData[chartTime].type }),
|
||||
}
|
||||
|
||||
;(async () => {
|
||||
try {
|
||||
unsubscribe = await pb.collection<NetworkProbeStatsRecord>("network_probe_stats").subscribe(
|
||||
"*",
|
||||
(event) => {
|
||||
if (event.action !== "create") {
|
||||
return
|
||||
}
|
||||
// console.log("Appending new probe stats to chart:", event.record)
|
||||
const newStats = appendCacheValue(systemId, chartTime, [event.record])
|
||||
setProbeStats(newStats)
|
||||
},
|
||||
pbOptions
|
||||
)
|
||||
} catch (error) {
|
||||
console.error("Failed to subscribe to probe stats:", error)
|
||||
}
|
||||
})()
|
||||
|
||||
return () => unsubscribe?.()
|
||||
}, [systemId, chartTime])
|
||||
|
||||
// subscribe to realtime metrics if chart time is 1m
|
||||
useEffect(() => {
|
||||
if (!systemId || chartTime !== "1m") {
|
||||
return
|
||||
}
|
||||
let unsubscribe: (() => void) | undefined
|
||||
const cache_key = `${systemId}rt`
|
||||
pb.realtime
|
||||
.subscribe(
|
||||
`rt_metrics`,
|
||||
(data: { Probes: NetworkProbeStatsRecord["stats"] }) => {
|
||||
const prev = getCacheValue(systemId, "rt")
|
||||
const now = Date.now()
|
||||
// if no previous data or the last data point is older than 1min,
|
||||
// create a new data set starting with a point 1 second ago to seed the chart data
|
||||
// if (!prev || (prev.at(-1)?.created ?? 0) < now - 60_000) {
|
||||
// prev = [{ created: now - 30_000, stats: probesToStats(probes) }]
|
||||
// }
|
||||
const stats = { created: now, stats: data.Probes } as NetworkProbeStatsRecord
|
||||
const newStats = appendData(prev, [stats], 1000, 120)
|
||||
setProbeStats(() => newStats)
|
||||
cache.set(cache_key, newStats)
|
||||
},
|
||||
{ query: { system: systemId } }
|
||||
)
|
||||
.then((us) => {
|
||||
unsubscribe = us
|
||||
})
|
||||
return () => unsubscribe?.()
|
||||
}, [chartTime, systemId])
|
||||
|
||||
return probeStats
|
||||
}
|
||||
async function fetchProbes(system?: string) {
|
||||
try {
|
||||
const res = await pb.collection<NetworkProbeRecord>("network_probes").getList(0, 2000, {
|
||||
fields: NETWORK_PROBE_FIELDS,
|
||||
filter: system ? pb.filter("system={:system}", { system }) : undefined,
|
||||
})
|
||||
return res.items
|
||||
} catch (error) {
|
||||
toast({
|
||||
title: "Error",
|
||||
description: (error as Error)?.message,
|
||||
variant: "destructive",
|
||||
})
|
||||
return []
|
||||
}
|
||||
}
|
||||
|
||||
function applyProbeEvents(
|
||||
probes: NetworkProbeRecord[],
|
||||
events: Iterable<RecordSubscription<NetworkProbeRecord>>,
|
||||
systemId?: string
|
||||
) {
|
||||
// Use a map to handle updates/deletes in constant time
|
||||
const probeById = new Map(probes.map((probe) => [probe.id, probe]))
|
||||
const createdProbes: NetworkProbeRecord[] = []
|
||||
|
||||
for (const { action, record } of events) {
|
||||
const matchesSystemScope = !systemId || record.system === systemId
|
||||
|
||||
if (action === "delete" || !matchesSystemScope) {
|
||||
probeById.delete(record.id)
|
||||
continue
|
||||
}
|
||||
|
||||
if (!probeById.has(record.id)) {
|
||||
createdProbes.push(record)
|
||||
}
|
||||
|
||||
probeById.set(record.id, record)
|
||||
}
|
||||
|
||||
const nextProbes: NetworkProbeRecord[] = []
|
||||
// Prepend brand new probes (matching previous behavior)
|
||||
for (let index = createdProbes.length - 1; index >= 0; index -= 1) {
|
||||
nextProbes.push(createdProbes[index])
|
||||
}
|
||||
|
||||
// Rebuild the final list while preserving original order for existing probes
|
||||
for (const probe of probes) {
|
||||
const nextProbe = probeById.get(probe.id)
|
||||
if (!nextProbe) {
|
||||
continue
|
||||
}
|
||||
nextProbes.push(nextProbe)
|
||||
probeById.delete(probe.id)
|
||||
}
|
||||
|
||||
return nextProbes
|
||||
}
|
||||
@@ -72,7 +72,7 @@ export const formatShortDate = (timestamp: string) => {
|
||||
return shortDateFormatter.format(new Date(timestamp))
|
||||
}
|
||||
|
||||
export const hourWithSeconds = (timestamp: string) => {
|
||||
export const hourWithSeconds = (timestamp: string | number) => {
|
||||
return hourWithSecondsFormatter.format(new Date(timestamp))
|
||||
}
|
||||
|
||||
@@ -111,17 +111,18 @@ export const updateFavicon = (() => {
|
||||
</linearGradient>
|
||||
</defs>
|
||||
<path fill="url(#gradient)" d="M35 70H0V0h35q4.4 0 8.2 1.7a21.4 21.4 0 0 1 6.6 4.5q2.9 2.8 4.5 6.6Q56 16.7 56 21a15.4 15.4 0 0 1-.3 3.2 17.6 17.6 0 0 1-.2.8 19.4 19.4 0 0 1-1.5 4 17 17 0 0 1-2.4 3.4 13.5 13.5 0 0 1-2.6 2.3 12.5 12.5 0 0 1-.4.3q1.7 1 3 2.5Q53 39.1 54 41a18.3 18.3 0 0 1 1.5 4 17.4 17.4 0 0 1 .5 3 15.3 15.3 0 0 1 0 1q0 4.4-1.7 8.2a21.4 21.4 0 0 1-4.5 6.6q-2.8 2.9-6.6 4.6Q39.4 70 35 70ZM14 14v14h21a7 7 0 0 0 2.3-.3 6.6 6.6 0 0 0 .4-.2Q39 27 40 26a6.9 6.9 0 0 0 1.5-2.2q.5-1.3.5-2.8a7 7 0 0 0-.4-2.3 6.6 6.6 0 0 0-.1-.4Q40.9 17 40 16a7 7 0 0 0-2.3-1.4 6.9 6.9 0 0 0-2.5-.6 7.9 7.9 0 0 0-.2 0H14Zm0 28v14h21a7 7 0 0 0 2.3-.4 6.6 6.6 0 0 0 .4-.1Q39 54.9 40 54a7 7 0 0 0 1.5-2.2 6.9 6.9 0 0 0 .5-2.6 7.9 7.9 0 0 0 0-.2 7 7 0 0 0-.4-2.3 6.6 6.6 0 0 0-.1-.4Q40.9 45 40 44a7 7 0 0 0-2.3-1.5 6.9 6.9 0 0 0-2.5-.6 7.9 7.9 0 0 0-.2 0H14Z"/>
|
||||
${downCount > 0 &&
|
||||
`
|
||||
${
|
||||
downCount > 0 &&
|
||||
`
|
||||
<circle cx="40" cy="50" r="22" fill="#f00"/>
|
||||
<text x="40" y="60" font-size="34" text-anchor="middle" fill="#fff" font-family="Arial" font-weight="bold">${downCount}</text>
|
||||
`
|
||||
}
|
||||
}
|
||||
</svg>
|
||||
`
|
||||
const blob = new Blob([svg], { type: "image/svg+xml" })
|
||||
const url = URL.createObjectURL(blob)
|
||||
; (document.querySelector("link[rel='icon']") as HTMLLinkElement).href = url
|
||||
;(document.querySelector("link[rel='icon']") as HTMLLinkElement).href = url
|
||||
}
|
||||
})()
|
||||
|
||||
@@ -198,6 +199,26 @@ export function decimalString(num: number, digits = 2) {
|
||||
return formatter.format(num)
|
||||
}
|
||||
|
||||
export function formatMicroseconds(microseconds: number, showDigits = true): string {
|
||||
if (!Number.isFinite(microseconds)) {
|
||||
return "-"
|
||||
}
|
||||
|
||||
if (microseconds < 1000) {
|
||||
return `${microseconds}μs`
|
||||
}
|
||||
|
||||
if (microseconds < 1_000_000) {
|
||||
const milliseconds = microseconds / 1000
|
||||
const digits = milliseconds >= 10 ? 1 : 2
|
||||
return `${decimalString(milliseconds, showDigits ? digits : 0)}ms`
|
||||
}
|
||||
|
||||
const seconds = microseconds / 1_000_000
|
||||
const digits = seconds >= 10 ? 1 : 2
|
||||
return `${decimalString(seconds, showDigits ? digits : 0)}s`
|
||||
}
|
||||
|
||||
/** Get value from local or session storage */
|
||||
function getStorageValue(key: string, defaultValue: unknown, storageInterface: Storage = localStorage) {
|
||||
const saved = storageInterface?.getItem(key)
|
||||
@@ -365,12 +386,12 @@ export function formatDuration(
|
||||
.join(" ")
|
||||
}
|
||||
|
||||
/** Parse semver string into major, minor, and patch numbers
|
||||
/** Parse semver string into major, minor, and patch numbers
|
||||
* @example
|
||||
* const semVer = "1.2.3"
|
||||
* const { major, minor, patch } = parseSemVer(semVer)
|
||||
* console.log(major, minor, patch) // 1, 2, 3
|
||||
*/
|
||||
*/
|
||||
export const parseSemVer = (semVer = ""): SemVer => {
|
||||
// if (semVer.startsWith("v")) {
|
||||
// semVer = semVer.slice(1)
|
||||
@@ -422,10 +443,22 @@ export function runOnce<T extends (...args: any[]) => any>(fn: T): T {
|
||||
}) as T
|
||||
}
|
||||
|
||||
/** Get the visual width of a string, accounting for full-width characters */
|
||||
export function getVisualStringWidth(str: string): number {
|
||||
const visualWidthCache = new Map<string, number>()
|
||||
|
||||
/** Get the visual width of a string, accounting for full-width and narrow punctuation characters.
|
||||
* Don't use for monospaced fonts, use .length instead
|
||||
*/
|
||||
function getVisualStringWidth(str: string): number {
|
||||
const cached = visualWidthCache.get(str)
|
||||
if (cached !== undefined) {
|
||||
return cached
|
||||
}
|
||||
let width = 0
|
||||
for (const char of str) {
|
||||
if (char === ".") {
|
||||
width += 0.7
|
||||
continue
|
||||
}
|
||||
const code = char.codePointAt(0) || 0
|
||||
// Hangul Jamo and Syllables are often slightly thinner than Hanzi/Kanji
|
||||
if ((code >= 0x1100 && code <= 0x115f) || (code >= 0xac00 && code <= 0xd7af)) {
|
||||
@@ -443,16 +476,27 @@ export function getVisualStringWidth(str: string): number {
|
||||
code > 0xffff // Emojis and other supplementary plane characters
|
||||
width += isFullWidth ? 2 : 1
|
||||
}
|
||||
visualWidthCache.set(str, width)
|
||||
return width
|
||||
}
|
||||
|
||||
/** Compare the visual width of two strings imprecisely */
|
||||
export function isVisuallyLonger(str1: string, str2: string): boolean {
|
||||
return getVisualStringWidth(str1) > getVisualStringWidth(str2)
|
||||
}
|
||||
|
||||
/** Format seconds to hours, minutes, or seconds */
|
||||
export function secondsToString(seconds: number, unit: "hour" | "minute" | "day"): string {
|
||||
const count = Math.floor(seconds / (unit === "hour" ? 3600 : unit === "minute" ? 60 : 86400))
|
||||
const countString = count.toLocaleString()
|
||||
switch (unit) {
|
||||
case "minute":
|
||||
return plural(count, { one: `${countString} minute`, few: `${countString} minutes`, many: `${countString} minutes`, other: `${countString} minutes` })
|
||||
return plural(count, {
|
||||
one: `${countString} minute`,
|
||||
few: `${countString} minutes`,
|
||||
many: `${countString} minutes`,
|
||||
other: `${countString} minutes`,
|
||||
})
|
||||
case "hour":
|
||||
return plural(count, { one: `${countString} hour`, other: `${countString} hours` })
|
||||
case "day":
|
||||
@@ -469,4 +513,4 @@ export function secondsToUptimeString(seconds: number): string {
|
||||
} else {
|
||||
return secondsToString(seconds, "day")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ msgstr ""
|
||||
"Language: ar\n"
|
||||
"Project-Id-Version: beszel\n"
|
||||
"Report-Msgid-Bugs-To: \n"
|
||||
"PO-Revision-Date: 2026-03-27 19:17\n"
|
||||
"PO-Revision-Date: 2026-04-05 18:27\n"
|
||||
"Last-Translator: \n"
|
||||
"Language-Team: Arabic\n"
|
||||
"Plural-Forms: nplurals=6; plural=(n==0 ? 0 : n==1 ? 1 : n==2 ? 2 : n%100>=3 && n%100<=10 ? 3 : n%100>=11 && n%100<=99 ? 4 : 5);\n"
|
||||
@@ -22,7 +22,7 @@ msgstr ""
|
||||
#: src/components/footer-repo-link.tsx
|
||||
msgctxt "New version available"
|
||||
msgid "{0} available"
|
||||
msgstr ""
|
||||
msgstr "{0} متاح"
|
||||
|
||||
#. placeholder {0}: table.getFilteredSelectedRowModel().rows.length
|
||||
#. placeholder {1}: table.getFilteredRowModel().rows.length
|
||||
@@ -209,10 +209,19 @@ msgstr "المتوسط ينخفض أقل من <0>{value}{0}</0>"
|
||||
msgid "Average exceeds <0>{value}{0}</0>"
|
||||
msgstr "المتوسط يتجاوز <0>{value}{0}</0>"
|
||||
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgid "Average number of I/O operations waiting to be serviced"
|
||||
msgstr "متوسط عدد عمليات الإدخال والإخراج التي تنتظر خدمتها"
|
||||
|
||||
#: src/components/routes/system/charts/gpu-charts.tsx
|
||||
msgid "Average power consumption of GPUs"
|
||||
msgstr "متوسط استهلاك طاقة وحدة معالجة الرسوميات"
|
||||
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgctxt "Disk I/O average operation time (iostat await)"
|
||||
msgid "Average queue to completion time per operation"
|
||||
msgstr "متوسط الوقت من الانتظار في الدور حتى الإتمام لكل عملية"
|
||||
|
||||
#: src/components/routes/system/charts/cpu-charts.tsx
|
||||
msgid "Average system-wide CPU utilization"
|
||||
msgstr "متوسط استخدام وحدة المعالجة المركزية على مستوى النظام"
|
||||
@@ -444,6 +453,11 @@ msgctxt "Environment variables"
|
||||
msgid "Copy env"
|
||||
msgstr "نسخ متغيرات البيئة"
|
||||
|
||||
#: src/components/alerts/alerts-sheet.tsx
|
||||
msgctxt "Copy alerts from another system"
|
||||
msgid "Copy from"
|
||||
msgstr "نسخ من"
|
||||
|
||||
#: src/components/systems-table/systems-table-columns.tsx
|
||||
msgid "Copy host"
|
||||
msgstr "نسخ المضيف"
|
||||
@@ -476,7 +490,7 @@ msgstr "نسخ YAML"
|
||||
#: src/components/routes/system.tsx
|
||||
msgctxt "Core system metrics"
|
||||
msgid "Core"
|
||||
msgstr ""
|
||||
msgstr "النواة"
|
||||
|
||||
#: src/components/containers-table/containers-table-columns.tsx
|
||||
#: src/components/systemd-table/systemd-table-columns.tsx
|
||||
@@ -550,7 +564,7 @@ msgstr "يوميًا"
|
||||
#: src/components/routes/system/info-bar.tsx
|
||||
msgctxt "Default system layout option"
|
||||
msgid "Default"
|
||||
msgstr ""
|
||||
msgstr "افتراضي"
|
||||
|
||||
#: src/components/routes/settings/general.tsx
|
||||
msgid "Default time period"
|
||||
@@ -599,19 +613,18 @@ msgstr "وحدة القرص"
|
||||
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
#: src/components/routes/system/charts/extra-fs-charts.tsx
|
||||
#: src/lib/alerts.ts
|
||||
msgid "Disk Usage"
|
||||
msgstr "استخدام القرص"
|
||||
|
||||
#: src/components/routes/system/charts/extra-fs-charts.tsx
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
msgid "Disk usage of {extraFsName}"
|
||||
msgstr "استخدام القرص لـ {extraFsName}"
|
||||
|
||||
#: src/components/routes/system/info-bar.tsx
|
||||
msgctxt "Layout display options"
|
||||
msgid "Display"
|
||||
msgstr ""
|
||||
msgstr "عرض"
|
||||
|
||||
#: src/components/routes/system/charts/cpu-charts.tsx
|
||||
msgid "Docker CPU Usage"
|
||||
@@ -898,6 +911,21 @@ msgstr "طريقة HTTP"
|
||||
msgid "HTTP method: POST, GET, or HEAD (default: POST)"
|
||||
msgstr "طريقة HTTP: POST، GET، أو HEAD (الافتراضي: POST)"
|
||||
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgctxt "Disk I/O average operation time (iostat await)"
|
||||
msgid "I/O Await"
|
||||
msgstr "انتظار الإدخال والإخراج"
|
||||
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgctxt "Disk I/O total time spent on read/write"
|
||||
msgid "I/O Time"
|
||||
msgstr "وقت الإدخال والإخراج"
|
||||
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
msgctxt "Percent of time the disk is busy with I/O"
|
||||
msgid "I/O Utilization"
|
||||
msgstr "استخدام الإدخال والإخراج"
|
||||
|
||||
#. Context: Battery state
|
||||
#: src/lib/i18n.ts
|
||||
msgid "Idle"
|
||||
@@ -1207,6 +1235,10 @@ msgstr "تنسيق الحمولة"
|
||||
msgid "Per-core average utilization"
|
||||
msgstr "متوسط الاستخدام لكل نواة"
|
||||
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
msgid "Percent of time the disk is busy with I/O"
|
||||
msgstr "النسبة المئوية للوقت الذي يكون فيه القرص مشغولاً بالإدخال والإخراج"
|
||||
|
||||
#: src/components/routes/system/cpu-sheet.tsx
|
||||
msgid "Percentage of time spent in each state"
|
||||
msgstr "النسبة المئوية للوقت المقضي في كل حالة"
|
||||
@@ -1259,7 +1291,7 @@ msgstr "المنفذ"
|
||||
#: src/components/containers-table/containers-table-columns.tsx
|
||||
msgctxt "Container ports"
|
||||
msgid "Ports"
|
||||
msgstr ""
|
||||
msgstr "المنافذ"
|
||||
|
||||
#. Power On Time
|
||||
#: src/components/routes/system/smart-table.tsx
|
||||
@@ -1284,13 +1316,20 @@ msgstr "تم بدء العملية"
|
||||
msgid "Public Key"
|
||||
msgstr "المفتاح العام"
|
||||
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgctxt "Disk I/O average queue depth"
|
||||
msgid "Queue Depth"
|
||||
msgstr "عمق الدور"
|
||||
|
||||
#: src/components/routes/settings/quiet-hours.tsx
|
||||
msgid "Quiet Hours"
|
||||
msgstr "ساعات الهدوء"
|
||||
|
||||
#. Disk read
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
#: src/components/routes/system/charts/extra-fs-charts.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgid "Read"
|
||||
msgstr "قراءة"
|
||||
|
||||
@@ -1549,7 +1588,7 @@ msgstr "جدول"
|
||||
#: src/components/routes/system/info-bar.tsx
|
||||
msgctxt "Tabs system layout option"
|
||||
msgid "Tabs"
|
||||
msgstr ""
|
||||
msgstr "تبويبات"
|
||||
|
||||
#: src/components/systemd-table/systemd-table.tsx
|
||||
msgid "Tasks"
|
||||
@@ -1602,7 +1641,7 @@ msgstr "لا يمكن التراجع عن هذا الإجراء. سيؤدي ذل
|
||||
msgid "This will permanently delete all selected records from the database."
|
||||
msgstr "سيؤدي هذا إلى حذف جميع السجلات المحددة من قاعدة البيانات بشكل دائم."
|
||||
|
||||
#: src/components/routes/system/charts/extra-fs-charts.tsx
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
msgid "Throughput of {extraFsName}"
|
||||
msgstr "معدل نقل {extraFsName}"
|
||||
|
||||
@@ -1655,6 +1694,11 @@ msgstr "إجمالي البيانات المستلمة لكل واجهة"
|
||||
msgid "Total data sent for each interface"
|
||||
msgstr "إجمالي البيانات المرسلة لكل واجهة"
|
||||
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgctxt "Disk I/O"
|
||||
msgid "Total time spent on read/write (can exceed 100%)"
|
||||
msgstr ""
|
||||
|
||||
#. placeholder {0}: data.length
|
||||
#: src/components/systemd-table/systemd-table.tsx
|
||||
msgid "Total: {0}"
|
||||
@@ -1775,7 +1819,7 @@ msgstr "رفع"
|
||||
msgid "Uptime"
|
||||
msgstr "مدة التشغيل"
|
||||
|
||||
#: src/components/routes/system/charts/extra-fs-charts.tsx
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
#: src/components/routes/system/charts/gpu-charts.tsx
|
||||
#: src/components/routes/system/charts/gpu-charts.tsx
|
||||
#: src/components/routes/system/charts/gpu-charts.tsx
|
||||
@@ -1797,6 +1841,11 @@ msgstr "مستخدم"
|
||||
msgid "Users"
|
||||
msgstr "المستخدمون"
|
||||
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
msgctxt "Disk I/O utilization"
|
||||
msgid "Utilization"
|
||||
msgstr "الاستخدام"
|
||||
|
||||
#: src/components/alerts-history-columns.tsx
|
||||
msgid "Value"
|
||||
msgstr "القيمة"
|
||||
@@ -1806,6 +1855,7 @@ msgid "View"
|
||||
msgstr "عرض"
|
||||
|
||||
#: src/components/routes/system/cpu-sheet.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
#: src/components/routes/system/network-sheet.tsx
|
||||
msgid "View more"
|
||||
msgstr "عرض المزيد"
|
||||
@@ -1858,7 +1908,9 @@ msgstr "أمر ويندوز"
|
||||
|
||||
#. Disk write
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
#: src/components/routes/system/charts/extra-fs-charts.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgid "Write"
|
||||
msgstr "كتابة"
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ msgstr ""
|
||||
"Language: bg\n"
|
||||
"Project-Id-Version: beszel\n"
|
||||
"Report-Msgid-Bugs-To: \n"
|
||||
"PO-Revision-Date: 2026-03-27 19:17\n"
|
||||
"PO-Revision-Date: 2026-03-28 09:32\n"
|
||||
"Last-Translator: \n"
|
||||
"Language-Team: Bulgarian\n"
|
||||
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
|
||||
@@ -22,7 +22,7 @@ msgstr ""
|
||||
#: src/components/footer-repo-link.tsx
|
||||
msgctxt "New version available"
|
||||
msgid "{0} available"
|
||||
msgstr ""
|
||||
msgstr "Версия {0} е налична"
|
||||
|
||||
#. placeholder {0}: table.getFilteredSelectedRowModel().rows.length
|
||||
#. placeholder {1}: table.getFilteredRowModel().rows.length
|
||||
@@ -209,10 +209,19 @@ msgstr "Средната стойност пада под <0>{value}{0}</0>"
|
||||
msgid "Average exceeds <0>{value}{0}</0>"
|
||||
msgstr "Средната стойност надхвърля <0>{value}{0}</0>"
|
||||
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgid "Average number of I/O operations waiting to be serviced"
|
||||
msgstr "Среден брой I/O операции, чакащи обслужване"
|
||||
|
||||
#: src/components/routes/system/charts/gpu-charts.tsx
|
||||
msgid "Average power consumption of GPUs"
|
||||
msgstr "Средна консумация на ток от графични карти"
|
||||
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgctxt "Disk I/O average operation time (iostat await)"
|
||||
msgid "Average queue to completion time per operation"
|
||||
msgstr "Средно време в опашката до приключване на операция"
|
||||
|
||||
#: src/components/routes/system/charts/cpu-charts.tsx
|
||||
msgid "Average system-wide CPU utilization"
|
||||
msgstr "Средно използване на процесора на цялата система"
|
||||
@@ -444,6 +453,11 @@ msgctxt "Environment variables"
|
||||
msgid "Copy env"
|
||||
msgstr "Копирай еnv"
|
||||
|
||||
#: src/components/alerts/alerts-sheet.tsx
|
||||
msgctxt "Copy alerts from another system"
|
||||
msgid "Copy from"
|
||||
msgstr "Копиране от"
|
||||
|
||||
#: src/components/systems-table/systems-table-columns.tsx
|
||||
msgid "Copy host"
|
||||
msgstr "Копирай хоста"
|
||||
@@ -476,7 +490,7 @@ msgstr "Копирай YAML"
|
||||
#: src/components/routes/system.tsx
|
||||
msgctxt "Core system metrics"
|
||||
msgid "Core"
|
||||
msgstr ""
|
||||
msgstr "Основни"
|
||||
|
||||
#: src/components/containers-table/containers-table-columns.tsx
|
||||
#: src/components/systemd-table/systemd-table-columns.tsx
|
||||
@@ -550,7 +564,7 @@ msgstr "Дневно"
|
||||
#: src/components/routes/system/info-bar.tsx
|
||||
msgctxt "Default system layout option"
|
||||
msgid "Default"
|
||||
msgstr ""
|
||||
msgstr "Подредба"
|
||||
|
||||
#: src/components/routes/settings/general.tsx
|
||||
msgid "Default time period"
|
||||
@@ -599,19 +613,18 @@ msgstr "Единица за диск"
|
||||
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
#: src/components/routes/system/charts/extra-fs-charts.tsx
|
||||
#: src/lib/alerts.ts
|
||||
msgid "Disk Usage"
|
||||
msgstr "Използване на диск"
|
||||
|
||||
#: src/components/routes/system/charts/extra-fs-charts.tsx
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
msgid "Disk usage of {extraFsName}"
|
||||
msgstr "Изполване на диск от {extraFsName}"
|
||||
|
||||
#: src/components/routes/system/info-bar.tsx
|
||||
msgctxt "Layout display options"
|
||||
msgid "Display"
|
||||
msgstr ""
|
||||
msgstr "Показване"
|
||||
|
||||
#: src/components/routes/system/charts/cpu-charts.tsx
|
||||
msgid "Docker CPU Usage"
|
||||
@@ -898,6 +911,21 @@ msgstr "HTTP метод"
|
||||
msgid "HTTP method: POST, GET, or HEAD (default: POST)"
|
||||
msgstr "HTTP метод: POST, GET или HEAD (по подразбиране: POST)"
|
||||
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgctxt "Disk I/O average operation time (iostat await)"
|
||||
msgid "I/O Await"
|
||||
msgstr "I/O изчакване"
|
||||
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgctxt "Disk I/O total time spent on read/write"
|
||||
msgid "I/O Time"
|
||||
msgstr "I/O време"
|
||||
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
msgctxt "Percent of time the disk is busy with I/O"
|
||||
msgid "I/O Utilization"
|
||||
msgstr "I/O натоварване"
|
||||
|
||||
#. Context: Battery state
|
||||
#: src/lib/i18n.ts
|
||||
msgid "Idle"
|
||||
@@ -1207,6 +1235,10 @@ msgstr "Формат на полезния товар"
|
||||
msgid "Per-core average utilization"
|
||||
msgstr "Средно използване на ядро"
|
||||
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
msgid "Percent of time the disk is busy with I/O"
|
||||
msgstr "Процент от времето, в което дискът е зает с I/O операции"
|
||||
|
||||
#: src/components/routes/system/cpu-sheet.tsx
|
||||
msgid "Percentage of time spent in each state"
|
||||
msgstr "Процент време, прекарано във всяко състояние"
|
||||
@@ -1259,7 +1291,7 @@ msgstr "Порт"
|
||||
#: src/components/containers-table/containers-table-columns.tsx
|
||||
msgctxt "Container ports"
|
||||
msgid "Ports"
|
||||
msgstr ""
|
||||
msgstr "Портове"
|
||||
|
||||
#. Power On Time
|
||||
#: src/components/routes/system/smart-table.tsx
|
||||
@@ -1284,13 +1316,20 @@ msgstr "Процесът стартира"
|
||||
msgid "Public Key"
|
||||
msgstr "Публичен ключ"
|
||||
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgctxt "Disk I/O average queue depth"
|
||||
msgid "Queue Depth"
|
||||
msgstr "Дълбочина на опашката"
|
||||
|
||||
#: src/components/routes/settings/quiet-hours.tsx
|
||||
msgid "Quiet Hours"
|
||||
msgstr "Тихи часове"
|
||||
|
||||
#. Disk read
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
#: src/components/routes/system/charts/extra-fs-charts.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgid "Read"
|
||||
msgstr "Прочети"
|
||||
|
||||
@@ -1549,7 +1588,7 @@ msgstr "Таблица"
|
||||
#: src/components/routes/system/info-bar.tsx
|
||||
msgctxt "Tabs system layout option"
|
||||
msgid "Tabs"
|
||||
msgstr ""
|
||||
msgstr "Табове"
|
||||
|
||||
#: src/components/systemd-table/systemd-table.tsx
|
||||
msgid "Tasks"
|
||||
@@ -1602,7 +1641,7 @@ msgstr "Това действие не може да бъде отменено.
|
||||
msgid "This will permanently delete all selected records from the database."
|
||||
msgstr "Това ще доведе до трайно изтриване на всички избрани записи от базата данни."
|
||||
|
||||
#: src/components/routes/system/charts/extra-fs-charts.tsx
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
msgid "Throughput of {extraFsName}"
|
||||
msgstr "Пропускателна способност на {extraFsName}"
|
||||
|
||||
@@ -1655,6 +1694,11 @@ msgstr "Общо получени данни за всеки интерфейс"
|
||||
msgid "Total data sent for each interface"
|
||||
msgstr "Общо изпратени данни за всеки интерфейс"
|
||||
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgctxt "Disk I/O"
|
||||
msgid "Total time spent on read/write (can exceed 100%)"
|
||||
msgstr ""
|
||||
|
||||
#. placeholder {0}: data.length
|
||||
#: src/components/systemd-table/systemd-table.tsx
|
||||
msgid "Total: {0}"
|
||||
@@ -1775,7 +1819,7 @@ msgstr "Качване"
|
||||
msgid "Uptime"
|
||||
msgstr "Време на работа"
|
||||
|
||||
#: src/components/routes/system/charts/extra-fs-charts.tsx
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
#: src/components/routes/system/charts/gpu-charts.tsx
|
||||
#: src/components/routes/system/charts/gpu-charts.tsx
|
||||
#: src/components/routes/system/charts/gpu-charts.tsx
|
||||
@@ -1797,6 +1841,11 @@ msgstr "Използвани"
|
||||
msgid "Users"
|
||||
msgstr "Потребители"
|
||||
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
msgctxt "Disk I/O utilization"
|
||||
msgid "Utilization"
|
||||
msgstr "Натоварване"
|
||||
|
||||
#: src/components/alerts-history-columns.tsx
|
||||
msgid "Value"
|
||||
msgstr "Стойност"
|
||||
@@ -1806,6 +1855,7 @@ msgid "View"
|
||||
msgstr "Изглед"
|
||||
|
||||
#: src/components/routes/system/cpu-sheet.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
#: src/components/routes/system/network-sheet.tsx
|
||||
msgid "View more"
|
||||
msgstr "Виж повече"
|
||||
@@ -1858,7 +1908,9 @@ msgstr "Команда Windows"
|
||||
|
||||
#. Disk write
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
#: src/components/routes/system/charts/extra-fs-charts.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgid "Write"
|
||||
msgstr "Запиши"
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ msgstr ""
|
||||
"Language: cs\n"
|
||||
"Project-Id-Version: beszel\n"
|
||||
"Report-Msgid-Bugs-To: \n"
|
||||
"PO-Revision-Date: 2026-03-27 19:17\n"
|
||||
"PO-Revision-Date: 2026-04-05 18:27\n"
|
||||
"Last-Translator: \n"
|
||||
"Language-Team: Czech\n"
|
||||
"Plural-Forms: nplurals=4; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 3;\n"
|
||||
@@ -22,7 +22,7 @@ msgstr ""
|
||||
#: src/components/footer-repo-link.tsx
|
||||
msgctxt "New version available"
|
||||
msgid "{0} available"
|
||||
msgstr ""
|
||||
msgstr "{0} k dispozici"
|
||||
|
||||
#. placeholder {0}: table.getFilteredSelectedRowModel().rows.length
|
||||
#. placeholder {1}: table.getFilteredRowModel().rows.length
|
||||
@@ -209,10 +209,19 @@ msgstr "Průměr klesne pod <0>{value}{0}</0>"
|
||||
msgid "Average exceeds <0>{value}{0}</0>"
|
||||
msgstr "Průměr je vyšší než <0>{value}{0}</0>"
|
||||
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgid "Average number of I/O operations waiting to be serviced"
|
||||
msgstr "Průměrný počet I/O operací čekajících na vyřízení"
|
||||
|
||||
#: src/components/routes/system/charts/gpu-charts.tsx
|
||||
msgid "Average power consumption of GPUs"
|
||||
msgstr "Průměrná spotřeba energie GPU"
|
||||
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgctxt "Disk I/O average operation time (iostat await)"
|
||||
msgid "Average queue to completion time per operation"
|
||||
msgstr "Průměrná doba od zařazení do fronty po dokončení operace"
|
||||
|
||||
#: src/components/routes/system/charts/cpu-charts.tsx
|
||||
msgid "Average system-wide CPU utilization"
|
||||
msgstr "Průměrné využití CPU v celém systému"
|
||||
@@ -444,6 +453,11 @@ msgctxt "Environment variables"
|
||||
msgid "Copy env"
|
||||
msgstr "Kopírovat env"
|
||||
|
||||
#: src/components/alerts/alerts-sheet.tsx
|
||||
msgctxt "Copy alerts from another system"
|
||||
msgid "Copy from"
|
||||
msgstr "Kopírovat z"
|
||||
|
||||
#: src/components/systems-table/systems-table-columns.tsx
|
||||
msgid "Copy host"
|
||||
msgstr "Kopírovat hostitele"
|
||||
@@ -476,7 +490,7 @@ msgstr "Kopírovat YAML"
|
||||
#: src/components/routes/system.tsx
|
||||
msgctxt "Core system metrics"
|
||||
msgid "Core"
|
||||
msgstr ""
|
||||
msgstr "Jádro"
|
||||
|
||||
#: src/components/containers-table/containers-table-columns.tsx
|
||||
#: src/components/systemd-table/systemd-table-columns.tsx
|
||||
@@ -550,7 +564,7 @@ msgstr "Denně"
|
||||
#: src/components/routes/system/info-bar.tsx
|
||||
msgctxt "Default system layout option"
|
||||
msgid "Default"
|
||||
msgstr ""
|
||||
msgstr "Výchozí"
|
||||
|
||||
#: src/components/routes/settings/general.tsx
|
||||
msgid "Default time period"
|
||||
@@ -599,19 +613,18 @@ msgstr "Disková jednotka"
|
||||
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
#: src/components/routes/system/charts/extra-fs-charts.tsx
|
||||
#: src/lib/alerts.ts
|
||||
msgid "Disk Usage"
|
||||
msgstr "Využití disku"
|
||||
|
||||
#: src/components/routes/system/charts/extra-fs-charts.tsx
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
msgid "Disk usage of {extraFsName}"
|
||||
msgstr "Využití disku {extraFsName}"
|
||||
|
||||
#: src/components/routes/system/info-bar.tsx
|
||||
msgctxt "Layout display options"
|
||||
msgid "Display"
|
||||
msgstr ""
|
||||
msgstr "Zobrazení"
|
||||
|
||||
#: src/components/routes/system/charts/cpu-charts.tsx
|
||||
msgid "Docker CPU Usage"
|
||||
@@ -898,6 +911,21 @@ msgstr "HTTP metoda"
|
||||
msgid "HTTP method: POST, GET, or HEAD (default: POST)"
|
||||
msgstr "HTTP metoda: POST, GET nebo HEAD (výchozí: POST)"
|
||||
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgctxt "Disk I/O average operation time (iostat await)"
|
||||
msgid "I/O Await"
|
||||
msgstr "I/O Čekání"
|
||||
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgctxt "Disk I/O total time spent on read/write"
|
||||
msgid "I/O Time"
|
||||
msgstr "I/O Čas"
|
||||
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
msgctxt "Percent of time the disk is busy with I/O"
|
||||
msgid "I/O Utilization"
|
||||
msgstr "I/O Využití"
|
||||
|
||||
#. Context: Battery state
|
||||
#: src/lib/i18n.ts
|
||||
msgid "Idle"
|
||||
@@ -1207,6 +1235,10 @@ msgstr "Formát payloadu"
|
||||
msgid "Per-core average utilization"
|
||||
msgstr "Průměrné využití na jádro"
|
||||
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
msgid "Percent of time the disk is busy with I/O"
|
||||
msgstr "Procento času, po který je disk zaneprázdněn I/O operacemi"
|
||||
|
||||
#: src/components/routes/system/cpu-sheet.tsx
|
||||
msgid "Percentage of time spent in each state"
|
||||
msgstr "Procento času strávěného v každém stavu"
|
||||
@@ -1259,7 +1291,7 @@ msgstr ""
|
||||
#: src/components/containers-table/containers-table-columns.tsx
|
||||
msgctxt "Container ports"
|
||||
msgid "Ports"
|
||||
msgstr ""
|
||||
msgstr "Porty"
|
||||
|
||||
#. Power On Time
|
||||
#: src/components/routes/system/smart-table.tsx
|
||||
@@ -1284,13 +1316,20 @@ msgstr "Proces spuštěn"
|
||||
msgid "Public Key"
|
||||
msgstr "Veřejný klíč"
|
||||
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgctxt "Disk I/O average queue depth"
|
||||
msgid "Queue Depth"
|
||||
msgstr "Hloubka fronty"
|
||||
|
||||
#: src/components/routes/settings/quiet-hours.tsx
|
||||
msgid "Quiet Hours"
|
||||
msgstr "Tiché hodiny"
|
||||
|
||||
#. Disk read
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
#: src/components/routes/system/charts/extra-fs-charts.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgid "Read"
|
||||
msgstr "Číst"
|
||||
|
||||
@@ -1549,7 +1588,7 @@ msgstr "Tabulka"
|
||||
#: src/components/routes/system/info-bar.tsx
|
||||
msgctxt "Tabs system layout option"
|
||||
msgid "Tabs"
|
||||
msgstr ""
|
||||
msgstr "Karty"
|
||||
|
||||
#: src/components/systemd-table/systemd-table.tsx
|
||||
msgid "Tasks"
|
||||
@@ -1602,7 +1641,7 @@ msgstr "Tuto akci nelze vzít zpět. Tím se z databáze trvale odstraní všech
|
||||
msgid "This will permanently delete all selected records from the database."
|
||||
msgstr "Tímto trvale odstraníte všechny vybrané záznamy z databáze."
|
||||
|
||||
#: src/components/routes/system/charts/extra-fs-charts.tsx
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
msgid "Throughput of {extraFsName}"
|
||||
msgstr "Propustnost {extraFsName}"
|
||||
|
||||
@@ -1655,6 +1694,11 @@ msgstr "Celkový přijatý objem dat pro každé rozhraní"
|
||||
msgid "Total data sent for each interface"
|
||||
msgstr "Celkový odeslaný objem dat pro každé rozhraní"
|
||||
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgctxt "Disk I/O"
|
||||
msgid "Total time spent on read/write (can exceed 100%)"
|
||||
msgstr ""
|
||||
|
||||
#. placeholder {0}: data.length
|
||||
#: src/components/systemd-table/systemd-table.tsx
|
||||
msgid "Total: {0}"
|
||||
@@ -1775,7 +1819,7 @@ msgstr "Odeslání"
|
||||
msgid "Uptime"
|
||||
msgstr "Doba provozu"
|
||||
|
||||
#: src/components/routes/system/charts/extra-fs-charts.tsx
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
#: src/components/routes/system/charts/gpu-charts.tsx
|
||||
#: src/components/routes/system/charts/gpu-charts.tsx
|
||||
#: src/components/routes/system/charts/gpu-charts.tsx
|
||||
@@ -1797,6 +1841,11 @@ msgstr "Využito"
|
||||
msgid "Users"
|
||||
msgstr "Uživatelé"
|
||||
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
msgctxt "Disk I/O utilization"
|
||||
msgid "Utilization"
|
||||
msgstr "Využití"
|
||||
|
||||
#: src/components/alerts-history-columns.tsx
|
||||
msgid "Value"
|
||||
msgstr "Hodnota"
|
||||
@@ -1806,6 +1855,7 @@ msgid "View"
|
||||
msgstr "Zobrazení"
|
||||
|
||||
#: src/components/routes/system/cpu-sheet.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
#: src/components/routes/system/network-sheet.tsx
|
||||
msgid "View more"
|
||||
msgstr "Zobrazit více"
|
||||
@@ -1858,7 +1908,9 @@ msgstr "Windows příkaz"
|
||||
|
||||
#. Disk write
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
#: src/components/routes/system/charts/extra-fs-charts.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgid "Write"
|
||||
msgstr "Psát"
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ msgstr ""
|
||||
"Language: da\n"
|
||||
"Project-Id-Version: beszel\n"
|
||||
"Report-Msgid-Bugs-To: \n"
|
||||
"PO-Revision-Date: 2026-03-27 19:17\n"
|
||||
"PO-Revision-Date: 2026-04-05 18:27\n"
|
||||
"Last-Translator: \n"
|
||||
"Language-Team: Danish\n"
|
||||
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
|
||||
@@ -22,7 +22,7 @@ msgstr ""
|
||||
#: src/components/footer-repo-link.tsx
|
||||
msgctxt "New version available"
|
||||
msgid "{0} available"
|
||||
msgstr ""
|
||||
msgstr "{0} tilgængelig"
|
||||
|
||||
#. placeholder {0}: table.getFilteredSelectedRowModel().rows.length
|
||||
#. placeholder {1}: table.getFilteredRowModel().rows.length
|
||||
@@ -209,10 +209,19 @@ msgstr "Gennemsnit falder under <0>{value}{0}</0>"
|
||||
msgid "Average exceeds <0>{value}{0}</0>"
|
||||
msgstr "Gennemsnittet overstiger <0>{value}{0}</0>"
|
||||
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgid "Average number of I/O operations waiting to be serviced"
|
||||
msgstr "Gennemsnitligt antal I/O-operationer, der venter på at blive betjent"
|
||||
|
||||
#: src/components/routes/system/charts/gpu-charts.tsx
|
||||
msgid "Average power consumption of GPUs"
|
||||
msgstr "Gennemsnitligt strømforbrug for GPU'er"
|
||||
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgctxt "Disk I/O average operation time (iostat await)"
|
||||
msgid "Average queue to completion time per operation"
|
||||
msgstr "Gennemsnitlig tid fra kø til færdiggørelse pr. operation"
|
||||
|
||||
#: src/components/routes/system/charts/cpu-charts.tsx
|
||||
msgid "Average system-wide CPU utilization"
|
||||
msgstr "Gennemsnitlig systembaseret CPU-udnyttelse"
|
||||
@@ -444,6 +453,11 @@ msgctxt "Environment variables"
|
||||
msgid "Copy env"
|
||||
msgstr "Kopier miljø"
|
||||
|
||||
#: src/components/alerts/alerts-sheet.tsx
|
||||
msgctxt "Copy alerts from another system"
|
||||
msgid "Copy from"
|
||||
msgstr "Kopier fra"
|
||||
|
||||
#: src/components/systems-table/systems-table-columns.tsx
|
||||
msgid "Copy host"
|
||||
msgstr "Kopier vært"
|
||||
@@ -476,7 +490,7 @@ msgstr "Kopier YAML"
|
||||
#: src/components/routes/system.tsx
|
||||
msgctxt "Core system metrics"
|
||||
msgid "Core"
|
||||
msgstr ""
|
||||
msgstr "Kerne"
|
||||
|
||||
#: src/components/containers-table/containers-table-columns.tsx
|
||||
#: src/components/systemd-table/systemd-table-columns.tsx
|
||||
@@ -550,7 +564,7 @@ msgstr "Dagligt"
|
||||
#: src/components/routes/system/info-bar.tsx
|
||||
msgctxt "Default system layout option"
|
||||
msgid "Default"
|
||||
msgstr ""
|
||||
msgstr "Standard"
|
||||
|
||||
#: src/components/routes/settings/general.tsx
|
||||
msgid "Default time period"
|
||||
@@ -599,19 +613,18 @@ msgstr "Diskenhed"
|
||||
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
#: src/components/routes/system/charts/extra-fs-charts.tsx
|
||||
#: src/lib/alerts.ts
|
||||
msgid "Disk Usage"
|
||||
msgstr "Diskforbrug"
|
||||
|
||||
#: src/components/routes/system/charts/extra-fs-charts.tsx
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
msgid "Disk usage of {extraFsName}"
|
||||
msgstr "Diskforbrug af {extraFsName}"
|
||||
|
||||
#: src/components/routes/system/info-bar.tsx
|
||||
msgctxt "Layout display options"
|
||||
msgid "Display"
|
||||
msgstr ""
|
||||
msgstr "Visning"
|
||||
|
||||
#: src/components/routes/system/charts/cpu-charts.tsx
|
||||
msgid "Docker CPU Usage"
|
||||
@@ -870,7 +883,7 @@ msgstr "Sundhed"
|
||||
|
||||
#: src/components/routes/settings/layout.tsx
|
||||
msgid "Heartbeat"
|
||||
msgstr ""
|
||||
msgstr "Hjerteslag"
|
||||
|
||||
#: src/components/routes/settings/heartbeat.tsx
|
||||
msgid "Heartbeat Monitoring"
|
||||
@@ -898,6 +911,21 @@ msgstr "HTTP-metode"
|
||||
msgid "HTTP method: POST, GET, or HEAD (default: POST)"
|
||||
msgstr "HTTP-metode: POST, GET eller HEAD (standard: POST)"
|
||||
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgctxt "Disk I/O average operation time (iostat await)"
|
||||
msgid "I/O Await"
|
||||
msgstr "I/O Vent"
|
||||
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgctxt "Disk I/O total time spent on read/write"
|
||||
msgid "I/O Time"
|
||||
msgstr "I/O Tid"
|
||||
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
msgctxt "Percent of time the disk is busy with I/O"
|
||||
msgid "I/O Utilization"
|
||||
msgstr "I/O Udnyttelse"
|
||||
|
||||
#. Context: Battery state
|
||||
#: src/lib/i18n.ts
|
||||
msgid "Idle"
|
||||
@@ -1207,6 +1235,10 @@ msgstr "Payload-format"
|
||||
msgid "Per-core average utilization"
|
||||
msgstr "Gennemsnitlig udnyttelse pr. kerne"
|
||||
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
msgid "Percent of time the disk is busy with I/O"
|
||||
msgstr "Procentdel af tiden disk-en er optaget af I/O"
|
||||
|
||||
#: src/components/routes/system/cpu-sheet.tsx
|
||||
msgid "Percentage of time spent in each state"
|
||||
msgstr "Procentdel af tid brugt i hver tilstand"
|
||||
@@ -1259,7 +1291,7 @@ msgstr ""
|
||||
#: src/components/containers-table/containers-table-columns.tsx
|
||||
msgctxt "Container ports"
|
||||
msgid "Ports"
|
||||
msgstr ""
|
||||
msgstr "Porte"
|
||||
|
||||
#. Power On Time
|
||||
#: src/components/routes/system/smart-table.tsx
|
||||
@@ -1284,13 +1316,20 @@ msgstr "Proces startet"
|
||||
msgid "Public Key"
|
||||
msgstr "Offentlig nøgle"
|
||||
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgctxt "Disk I/O average queue depth"
|
||||
msgid "Queue Depth"
|
||||
msgstr "Kødybde"
|
||||
|
||||
#: src/components/routes/settings/quiet-hours.tsx
|
||||
msgid "Quiet Hours"
|
||||
msgstr "Stille timer"
|
||||
|
||||
#. Disk read
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
#: src/components/routes/system/charts/extra-fs-charts.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgid "Read"
|
||||
msgstr "Læs"
|
||||
|
||||
@@ -1549,7 +1588,7 @@ msgstr "Tabel"
|
||||
#: src/components/routes/system/info-bar.tsx
|
||||
msgctxt "Tabs system layout option"
|
||||
msgid "Tabs"
|
||||
msgstr ""
|
||||
msgstr "Faner"
|
||||
|
||||
#: src/components/systemd-table/systemd-table.tsx
|
||||
msgid "Tasks"
|
||||
@@ -1602,7 +1641,7 @@ msgstr "Denne handling kan ikke fortrydes. Dette vil permanent slette alle aktue
|
||||
msgid "This will permanently delete all selected records from the database."
|
||||
msgstr "Dette vil permanent slette alle poster fra databasen."
|
||||
|
||||
#: src/components/routes/system/charts/extra-fs-charts.tsx
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
msgid "Throughput of {extraFsName}"
|
||||
msgstr "Gennemløb af {extraFsName}"
|
||||
|
||||
@@ -1655,6 +1694,11 @@ msgstr "Samlet modtaget data for hver interface"
|
||||
msgid "Total data sent for each interface"
|
||||
msgstr "Samlet sendt data for hver interface"
|
||||
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgctxt "Disk I/O"
|
||||
msgid "Total time spent on read/write (can exceed 100%)"
|
||||
msgstr ""
|
||||
|
||||
#. placeholder {0}: data.length
|
||||
#: src/components/systemd-table/systemd-table.tsx
|
||||
msgid "Total: {0}"
|
||||
@@ -1775,7 +1819,7 @@ msgstr "Overfør"
|
||||
msgid "Uptime"
|
||||
msgstr "Oppetid"
|
||||
|
||||
#: src/components/routes/system/charts/extra-fs-charts.tsx
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
#: src/components/routes/system/charts/gpu-charts.tsx
|
||||
#: src/components/routes/system/charts/gpu-charts.tsx
|
||||
#: src/components/routes/system/charts/gpu-charts.tsx
|
||||
@@ -1797,6 +1841,11 @@ msgstr "Brugt"
|
||||
msgid "Users"
|
||||
msgstr "Brugere"
|
||||
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
msgctxt "Disk I/O utilization"
|
||||
msgid "Utilization"
|
||||
msgstr "Udnyttelse"
|
||||
|
||||
#: src/components/alerts-history-columns.tsx
|
||||
msgid "Value"
|
||||
msgstr "Værdi"
|
||||
@@ -1806,6 +1855,7 @@ msgid "View"
|
||||
msgstr "Vis"
|
||||
|
||||
#: src/components/routes/system/cpu-sheet.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
#: src/components/routes/system/network-sheet.tsx
|
||||
msgid "View more"
|
||||
msgstr "Se mere"
|
||||
@@ -1858,7 +1908,9 @@ msgstr "Windows-kommando"
|
||||
|
||||
#. Disk write
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
#: src/components/routes/system/charts/extra-fs-charts.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgid "Write"
|
||||
msgstr "Skriv"
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ msgstr ""
|
||||
"Language: de\n"
|
||||
"Project-Id-Version: beszel\n"
|
||||
"Report-Msgid-Bugs-To: \n"
|
||||
"PO-Revision-Date: 2026-03-14 20:37\n"
|
||||
"PO-Revision-Date: 2026-04-05 18:27\n"
|
||||
"Last-Translator: \n"
|
||||
"Language-Team: German\n"
|
||||
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
|
||||
@@ -22,7 +22,7 @@ msgstr ""
|
||||
#: src/components/footer-repo-link.tsx
|
||||
msgctxt "New version available"
|
||||
msgid "{0} available"
|
||||
msgstr ""
|
||||
msgstr "{0} verfügbar"
|
||||
|
||||
#. placeholder {0}: table.getFilteredSelectedRowModel().rows.length
|
||||
#. placeholder {1}: table.getFilteredRowModel().rows.length
|
||||
@@ -209,10 +209,19 @@ msgstr "Durchschnitt unterschreitet <0>{value}{0}</0>"
|
||||
msgid "Average exceeds <0>{value}{0}</0>"
|
||||
msgstr "Durchschnitt überschreitet <0>{value}{0}</0>"
|
||||
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgid "Average number of I/O operations waiting to be serviced"
|
||||
msgstr "Durchschnittliche Anzahl der auf Bearbeitung wartenden I/O-Operationen"
|
||||
|
||||
#: src/components/routes/system/charts/gpu-charts.tsx
|
||||
msgid "Average power consumption of GPUs"
|
||||
msgstr "Durchschnittlicher Stromverbrauch der GPUs"
|
||||
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgctxt "Disk I/O average operation time (iostat await)"
|
||||
msgid "Average queue to completion time per operation"
|
||||
msgstr "Durchschnittliche Warteschlangen- bis Abschlusszeit pro Operation"
|
||||
|
||||
#: src/components/routes/system/charts/cpu-charts.tsx
|
||||
msgid "Average system-wide CPU utilization"
|
||||
msgstr "Durchschnittliche systemweite CPU-Auslastung"
|
||||
@@ -444,6 +453,11 @@ msgctxt "Environment variables"
|
||||
msgid "Copy env"
|
||||
msgstr "Umgebungsvariablen kopieren"
|
||||
|
||||
#: src/components/alerts/alerts-sheet.tsx
|
||||
msgctxt "Copy alerts from another system"
|
||||
msgid "Copy from"
|
||||
msgstr "Kopieren von"
|
||||
|
||||
#: src/components/systems-table/systems-table-columns.tsx
|
||||
msgid "Copy host"
|
||||
msgstr "Host kopieren"
|
||||
@@ -476,7 +490,7 @@ msgstr "YAML kopieren"
|
||||
#: src/components/routes/system.tsx
|
||||
msgctxt "Core system metrics"
|
||||
msgid "Core"
|
||||
msgstr ""
|
||||
msgstr "Kern"
|
||||
|
||||
#: src/components/containers-table/containers-table-columns.tsx
|
||||
#: src/components/systemd-table/systemd-table-columns.tsx
|
||||
@@ -550,7 +564,7 @@ msgstr "Täglich"
|
||||
#: src/components/routes/system/info-bar.tsx
|
||||
msgctxt "Default system layout option"
|
||||
msgid "Default"
|
||||
msgstr ""
|
||||
msgstr "Standard"
|
||||
|
||||
#: src/components/routes/settings/general.tsx
|
||||
msgid "Default time period"
|
||||
@@ -599,19 +613,18 @@ msgstr "Festplatteneinheit"
|
||||
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
#: src/components/routes/system/charts/extra-fs-charts.tsx
|
||||
#: src/lib/alerts.ts
|
||||
msgid "Disk Usage"
|
||||
msgstr "Festplattennutzung"
|
||||
|
||||
#: src/components/routes/system/charts/extra-fs-charts.tsx
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
msgid "Disk usage of {extraFsName}"
|
||||
msgstr "Festplattennutzung von {extraFsName}"
|
||||
|
||||
#: src/components/routes/system/info-bar.tsx
|
||||
msgctxt "Layout display options"
|
||||
msgid "Display"
|
||||
msgstr ""
|
||||
msgstr "Anzeige"
|
||||
|
||||
#: src/components/routes/system/charts/cpu-charts.tsx
|
||||
msgid "Docker CPU Usage"
|
||||
@@ -898,6 +911,21 @@ msgstr "HTTP-Methode"
|
||||
msgid "HTTP method: POST, GET, or HEAD (default: POST)"
|
||||
msgstr "HTTP-Methode: POST, GET oder HEAD (Standard: POST)"
|
||||
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgctxt "Disk I/O average operation time (iostat await)"
|
||||
msgid "I/O Await"
|
||||
msgstr "I/O-Wartezeit"
|
||||
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgctxt "Disk I/O total time spent on read/write"
|
||||
msgid "I/O Time"
|
||||
msgstr "I/O-Zeit"
|
||||
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
msgctxt "Percent of time the disk is busy with I/O"
|
||||
msgid "I/O Utilization"
|
||||
msgstr "I/O-Auslastung"
|
||||
|
||||
#. Context: Battery state
|
||||
#: src/lib/i18n.ts
|
||||
msgid "Idle"
|
||||
@@ -1207,6 +1235,10 @@ msgstr "Payload-Format"
|
||||
msgid "Per-core average utilization"
|
||||
msgstr "Durchschnittliche Auslastung pro Kern"
|
||||
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
msgid "Percent of time the disk is busy with I/O"
|
||||
msgstr "Prozentsatz der Zeit, in der die Festplatte mit I/O beschäftigt ist"
|
||||
|
||||
#: src/components/routes/system/cpu-sheet.tsx
|
||||
msgid "Percentage of time spent in each state"
|
||||
msgstr "Prozentsatz der Zeit in jedem Zustand"
|
||||
@@ -1284,13 +1316,20 @@ msgstr "Prozess gestartet"
|
||||
msgid "Public Key"
|
||||
msgstr "Öffentlicher Schlüssel"
|
||||
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgctxt "Disk I/O average queue depth"
|
||||
msgid "Queue Depth"
|
||||
msgstr "Warteschlangentiefe"
|
||||
|
||||
#: src/components/routes/settings/quiet-hours.tsx
|
||||
msgid "Quiet Hours"
|
||||
msgstr "Ruhezeiten"
|
||||
|
||||
#. Disk read
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
#: src/components/routes/system/charts/extra-fs-charts.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgid "Read"
|
||||
msgstr "Lesen"
|
||||
|
||||
@@ -1602,7 +1641,7 @@ msgstr "Diese Aktion kann nicht rückgängig gemacht werden. Dadurch werden alle
|
||||
msgid "This will permanently delete all selected records from the database."
|
||||
msgstr "Dadurch werden alle ausgewählten Datensätze dauerhaft aus der Datenbank gelöscht."
|
||||
|
||||
#: src/components/routes/system/charts/extra-fs-charts.tsx
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
msgid "Throughput of {extraFsName}"
|
||||
msgstr "Durchsatz von {extraFsName}"
|
||||
|
||||
@@ -1655,6 +1694,11 @@ msgstr "Empfangene Gesamtdatenmenge je Schnittstelle "
|
||||
msgid "Total data sent for each interface"
|
||||
msgstr "Gesendete Gesamtdatenmenge je Schnittstelle"
|
||||
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgctxt "Disk I/O"
|
||||
msgid "Total time spent on read/write (can exceed 100%)"
|
||||
msgstr "Gesamtzeit für Lese-/Schreibvorgänge (kann 100% überschreiten)"
|
||||
|
||||
#. placeholder {0}: data.length
|
||||
#: src/components/systemd-table/systemd-table.tsx
|
||||
msgid "Total: {0}"
|
||||
@@ -1775,7 +1819,7 @@ msgstr "Hochladen"
|
||||
msgid "Uptime"
|
||||
msgstr "Betriebszeit"
|
||||
|
||||
#: src/components/routes/system/charts/extra-fs-charts.tsx
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
#: src/components/routes/system/charts/gpu-charts.tsx
|
||||
#: src/components/routes/system/charts/gpu-charts.tsx
|
||||
#: src/components/routes/system/charts/gpu-charts.tsx
|
||||
@@ -1797,6 +1841,11 @@ msgstr "Verwendet"
|
||||
msgid "Users"
|
||||
msgstr "Benutzer"
|
||||
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
msgctxt "Disk I/O utilization"
|
||||
msgid "Utilization"
|
||||
msgstr "Auslastung"
|
||||
|
||||
#: src/components/alerts-history-columns.tsx
|
||||
msgid "Value"
|
||||
msgstr "Wert"
|
||||
@@ -1806,6 +1855,7 @@ msgid "View"
|
||||
msgstr "Ansicht"
|
||||
|
||||
#: src/components/routes/system/cpu-sheet.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
#: src/components/routes/system/network-sheet.tsx
|
||||
msgid "View more"
|
||||
msgstr "Mehr anzeigen"
|
||||
@@ -1858,7 +1908,9 @@ msgstr "Windows-Befehl"
|
||||
|
||||
#. Disk write
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
#: src/components/routes/system/charts/extra-fs-charts.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgid "Write"
|
||||
msgstr "Schreiben"
|
||||
|
||||
|
||||
@@ -204,10 +204,19 @@ msgstr "Average drops below <0>{value}{0}</0>"
|
||||
msgid "Average exceeds <0>{value}{0}</0>"
|
||||
msgstr "Average exceeds <0>{value}{0}</0>"
|
||||
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgid "Average number of I/O operations waiting to be serviced"
|
||||
msgstr "Average number of I/O operations waiting to be serviced"
|
||||
|
||||
#: src/components/routes/system/charts/gpu-charts.tsx
|
||||
msgid "Average power consumption of GPUs"
|
||||
msgstr "Average power consumption of GPUs"
|
||||
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgctxt "Disk I/O average operation time (iostat await)"
|
||||
msgid "Average queue to completion time per operation"
|
||||
msgstr "Average queue to completion time per operation"
|
||||
|
||||
#: src/components/routes/system/charts/cpu-charts.tsx
|
||||
msgid "Average system-wide CPU utilization"
|
||||
msgstr "Average system-wide CPU utilization"
|
||||
@@ -439,6 +448,11 @@ msgctxt "Environment variables"
|
||||
msgid "Copy env"
|
||||
msgstr "Copy env"
|
||||
|
||||
#: src/components/alerts/alerts-sheet.tsx
|
||||
msgctxt "Copy alerts from another system"
|
||||
msgid "Copy from"
|
||||
msgstr "Copy from"
|
||||
|
||||
#: src/components/systems-table/systems-table-columns.tsx
|
||||
msgid "Copy host"
|
||||
msgstr "Copy host"
|
||||
@@ -594,12 +608,11 @@ msgstr "Disk unit"
|
||||
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
#: src/components/routes/system/charts/extra-fs-charts.tsx
|
||||
#: src/lib/alerts.ts
|
||||
msgid "Disk Usage"
|
||||
msgstr "Disk Usage"
|
||||
|
||||
#: src/components/routes/system/charts/extra-fs-charts.tsx
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
msgid "Disk usage of {extraFsName}"
|
||||
msgstr "Disk usage of {extraFsName}"
|
||||
|
||||
@@ -893,6 +906,21 @@ msgstr "HTTP Method"
|
||||
msgid "HTTP method: POST, GET, or HEAD (default: POST)"
|
||||
msgstr "HTTP method: POST, GET, or HEAD (default: POST)"
|
||||
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgctxt "Disk I/O average operation time (iostat await)"
|
||||
msgid "I/O Await"
|
||||
msgstr "I/O Await"
|
||||
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgctxt "Disk I/O total time spent on read/write"
|
||||
msgid "I/O Time"
|
||||
msgstr "I/O Time"
|
||||
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
msgctxt "Percent of time the disk is busy with I/O"
|
||||
msgid "I/O Utilization"
|
||||
msgstr "I/O Utilization"
|
||||
|
||||
#. Context: Battery state
|
||||
#: src/lib/i18n.ts
|
||||
msgid "Idle"
|
||||
@@ -1202,6 +1230,10 @@ msgstr "Payload format"
|
||||
msgid "Per-core average utilization"
|
||||
msgstr "Per-core average utilization"
|
||||
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
msgid "Percent of time the disk is busy with I/O"
|
||||
msgstr "Percent of time the disk is busy with I/O"
|
||||
|
||||
#: src/components/routes/system/cpu-sheet.tsx
|
||||
msgid "Percentage of time spent in each state"
|
||||
msgstr "Percentage of time spent in each state"
|
||||
@@ -1279,13 +1311,20 @@ msgstr "Process started"
|
||||
msgid "Public Key"
|
||||
msgstr "Public Key"
|
||||
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgctxt "Disk I/O average queue depth"
|
||||
msgid "Queue Depth"
|
||||
msgstr "Queue Depth"
|
||||
|
||||
#: src/components/routes/settings/quiet-hours.tsx
|
||||
msgid "Quiet Hours"
|
||||
msgstr "Quiet Hours"
|
||||
|
||||
#. Disk read
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
#: src/components/routes/system/charts/extra-fs-charts.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgid "Read"
|
||||
msgstr "Read"
|
||||
|
||||
@@ -1597,7 +1636,7 @@ msgstr "This action cannot be undone. This will permanently delete all current r
|
||||
msgid "This will permanently delete all selected records from the database."
|
||||
msgstr "This will permanently delete all selected records from the database."
|
||||
|
||||
#: src/components/routes/system/charts/extra-fs-charts.tsx
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
msgid "Throughput of {extraFsName}"
|
||||
msgstr "Throughput of {extraFsName}"
|
||||
|
||||
@@ -1650,6 +1689,11 @@ msgstr "Total data received for each interface"
|
||||
msgid "Total data sent for each interface"
|
||||
msgstr "Total data sent for each interface"
|
||||
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgctxt "Disk I/O"
|
||||
msgid "Total time spent on read/write (can exceed 100%)"
|
||||
msgstr "Total time spent on read/write (can exceed 100%)"
|
||||
|
||||
#. placeholder {0}: data.length
|
||||
#: src/components/systemd-table/systemd-table.tsx
|
||||
msgid "Total: {0}"
|
||||
@@ -1770,7 +1814,7 @@ msgstr "Upload"
|
||||
msgid "Uptime"
|
||||
msgstr "Uptime"
|
||||
|
||||
#: src/components/routes/system/charts/extra-fs-charts.tsx
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
#: src/components/routes/system/charts/gpu-charts.tsx
|
||||
#: src/components/routes/system/charts/gpu-charts.tsx
|
||||
#: src/components/routes/system/charts/gpu-charts.tsx
|
||||
@@ -1792,6 +1836,11 @@ msgstr "Used"
|
||||
msgid "Users"
|
||||
msgstr "Users"
|
||||
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
msgctxt "Disk I/O utilization"
|
||||
msgid "Utilization"
|
||||
msgstr "Utilization"
|
||||
|
||||
#: src/components/alerts-history-columns.tsx
|
||||
msgid "Value"
|
||||
msgstr "Value"
|
||||
@@ -1801,6 +1850,7 @@ msgid "View"
|
||||
msgstr "View"
|
||||
|
||||
#: src/components/routes/system/cpu-sheet.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
#: src/components/routes/system/network-sheet.tsx
|
||||
msgid "View more"
|
||||
msgstr "View more"
|
||||
@@ -1853,7 +1903,9 @@ msgstr "Windows command"
|
||||
|
||||
#. Disk write
|
||||
#: src/components/routes/system/charts/disk-charts.tsx
|
||||
#: src/components/routes/system/charts/extra-fs-charts.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
#: src/components/routes/system/disk-io-sheet.tsx
|
||||
msgid "Write"
|
||||
msgstr "Write"
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user