mirror of
https://github.com/henrygd/beszel.git
synced 2026-04-21 12:11:49 +02:00
Merge branch 'main' into feat/network-probes
Resolved conflict in internal/records/records.go: - Upstream refactor moved deletion code to records_deletion.go and switched averaging functions from package-level globals to local variables (var row StatsRecord / params := make(dbx.Params, 1)). - Kept AverageProbeStats and rewrote it to match the new local-variable pattern. - Dropped duplicated deletion helpers from records.go (they now live in records_deletion.go). - Added "network_probe_stats" to the collections list in records_deletion.go:deleteOldSystemStats so probe stats keep the same retention policy.
This commit is contained in:
@@ -82,6 +82,9 @@ func GetBatteryStats() (batteryPercent uint8, batteryState uint8, err error) {
|
|||||||
return batteryPercent, batteryState, errors.ErrUnsupported
|
return batteryPercent, batteryState, errors.ErrUnsupported
|
||||||
}
|
}
|
||||||
paths, err := getBatteryPaths()
|
paths, err := getBatteryPaths()
|
||||||
|
if err != nil {
|
||||||
|
return batteryPercent, batteryState, err
|
||||||
|
}
|
||||||
if len(paths) == 0 {
|
if len(paths) == 0 {
|
||||||
return batteryPercent, batteryState, errors.New("no batteries")
|
return batteryPercent, batteryState, errors.New("no batteries")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -20,6 +20,7 @@ import (
|
|||||||
"github.com/fxamacker/cbor/v2"
|
"github.com/fxamacker/cbor/v2"
|
||||||
"github.com/lxzan/gws"
|
"github.com/lxzan/gws"
|
||||||
"golang.org/x/crypto/ssh"
|
"golang.org/x/crypto/ssh"
|
||||||
|
"golang.org/x/net/proxy"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -104,6 +105,11 @@ func (client *WebSocketClient) getOptions() *gws.ClientOption {
|
|||||||
}
|
}
|
||||||
client.hubURL.Path = path.Join(client.hubURL.Path, "api/beszel/agent-connect")
|
client.hubURL.Path = path.Join(client.hubURL.Path, "api/beszel/agent-connect")
|
||||||
|
|
||||||
|
// make sure BESZEL_AGENT_ALL_PROXY works (GWS only checks ALL_PROXY)
|
||||||
|
if val := os.Getenv("BESZEL_AGENT_ALL_PROXY"); val != "" {
|
||||||
|
os.Setenv("ALL_PROXY", val)
|
||||||
|
}
|
||||||
|
|
||||||
client.options = &gws.ClientOption{
|
client.options = &gws.ClientOption{
|
||||||
Addr: client.hubURL.String(),
|
Addr: client.hubURL.String(),
|
||||||
TlsConfig: &tls.Config{InsecureSkipVerify: true},
|
TlsConfig: &tls.Config{InsecureSkipVerify: true},
|
||||||
@@ -112,6 +118,9 @@ func (client *WebSocketClient) getOptions() *gws.ClientOption {
|
|||||||
"X-Token": []string{client.token},
|
"X-Token": []string{client.token},
|
||||||
"X-Beszel": []string{beszel.Version},
|
"X-Beszel": []string{beszel.Version},
|
||||||
},
|
},
|
||||||
|
NewDialer: func() (gws.Dialer, error) {
|
||||||
|
return proxy.FromEnvironment(), nil
|
||||||
|
},
|
||||||
}
|
}
|
||||||
return client.options
|
return client.options
|
||||||
}
|
}
|
||||||
|
|||||||
2
go.mod
2
go.mod
@@ -10,7 +10,7 @@ require (
|
|||||||
github.com/gliderlabs/ssh v0.3.8
|
github.com/gliderlabs/ssh v0.3.8
|
||||||
github.com/google/uuid v1.6.0
|
github.com/google/uuid v1.6.0
|
||||||
github.com/lxzan/gws v1.9.1
|
github.com/lxzan/gws v1.9.1
|
||||||
github.com/nicholas-fedor/shoutrrr v0.14.1
|
github.com/nicholas-fedor/shoutrrr v0.14.3
|
||||||
github.com/pocketbase/dbx v1.12.0
|
github.com/pocketbase/dbx v1.12.0
|
||||||
github.com/pocketbase/pocketbase v0.36.8
|
github.com/pocketbase/pocketbase v0.36.8
|
||||||
github.com/shirou/gopsutil/v4 v4.26.3
|
github.com/shirou/gopsutil/v4 v4.26.3
|
||||||
|
|||||||
4
go.sum
4
go.sum
@@ -85,8 +85,8 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE
|
|||||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w=
|
github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w=
|
||||||
github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
||||||
github.com/nicholas-fedor/shoutrrr v0.14.1 h1:6sx4cJNfNuUtD6ygGlB0dqcCQ+abfsUh+b+6jgujf6A=
|
github.com/nicholas-fedor/shoutrrr v0.14.3 h1:aBX2iw9a7jl5wfHd3bi9LnS5ucoYIy6KcLH9XVF+gig=
|
||||||
github.com/nicholas-fedor/shoutrrr v0.14.1/go.mod h1:U7IywBkLpBV7rgn8iLbQ9/LklJG1gm24bFv5cXXsDKs=
|
github.com/nicholas-fedor/shoutrrr v0.14.3/go.mod h1:U7IywBkLpBV7rgn8iLbQ9/LklJG1gm24bFv5cXXsDKs=
|
||||||
github.com/onsi/ginkgo/v2 v2.28.1 h1:S4hj+HbZp40fNKuLUQOYLDgZLwNUVn19N3Atb98NCyI=
|
github.com/onsi/ginkgo/v2 v2.28.1 h1:S4hj+HbZp40fNKuLUQOYLDgZLwNUVn19N3Atb98NCyI=
|
||||||
github.com/onsi/ginkgo/v2 v2.28.1/go.mod h1:CLtbVInNckU3/+gC8LzkGUb9oF+e8W8TdUsxPwvdOgE=
|
github.com/onsi/ginkgo/v2 v2.28.1/go.mod h1:CLtbVInNckU3/+gC8LzkGUb9oF+e8W8TdUsxPwvdOgE=
|
||||||
github.com/onsi/gomega v1.39.1 h1:1IJLAad4zjPn2PsnhH70V4DKRFlrCzGBNrNaru+Vf28=
|
github.com/onsi/gomega v1.39.1 h1:1IJLAad4zjPn2PsnhH70V4DKRFlrCzGBNrNaru+Vf28=
|
||||||
|
|||||||
42
internal/hub/server.go
Normal file
42
internal/hub/server.go
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
package hub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel"
|
||||||
|
"github.com/henrygd/beszel/internal/hub/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PublicAppInfo defines the structure of the public app information that will be injected into the HTML
|
||||||
|
type PublicAppInfo struct {
|
||||||
|
BASE_PATH string
|
||||||
|
HUB_VERSION string
|
||||||
|
HUB_URL string
|
||||||
|
OAUTH_DISABLE_POPUP bool `json:"OAUTH_DISABLE_POPUP,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// modifyIndexHTML injects the public app information into the index.html content
|
||||||
|
func modifyIndexHTML(hub *Hub, html []byte) string {
|
||||||
|
info := getPublicAppInfo(hub)
|
||||||
|
content, err := json.Marshal(info)
|
||||||
|
if err != nil {
|
||||||
|
return string(html)
|
||||||
|
}
|
||||||
|
htmlContent := strings.ReplaceAll(string(html), "./", info.BASE_PATH)
|
||||||
|
return strings.Replace(htmlContent, "\"{info}\"", string(content), 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getPublicAppInfo(hub *Hub) PublicAppInfo {
|
||||||
|
parsedURL, _ := url.Parse(hub.appURL)
|
||||||
|
info := PublicAppInfo{
|
||||||
|
BASE_PATH: strings.TrimSuffix(parsedURL.Path, "/") + "/",
|
||||||
|
HUB_VERSION: beszel.Version,
|
||||||
|
HUB_URL: hub.appURL,
|
||||||
|
}
|
||||||
|
if val, _ := utils.GetEnv("OAUTH_DISABLE_POPUP"); val == "true" {
|
||||||
|
info.OAUTH_DISABLE_POPUP = true
|
||||||
|
}
|
||||||
|
return info
|
||||||
|
}
|
||||||
@@ -10,8 +10,6 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/henrygd/beszel"
|
|
||||||
|
|
||||||
"github.com/pocketbase/pocketbase/core"
|
"github.com/pocketbase/pocketbase/core"
|
||||||
"github.com/pocketbase/pocketbase/tools/osutils"
|
"github.com/pocketbase/pocketbase/tools/osutils"
|
||||||
)
|
)
|
||||||
@@ -38,7 +36,7 @@ func (rm *responseModifier) RoundTrip(req *http.Request) (*http.Response, error)
|
|||||||
}
|
}
|
||||||
resp.Body.Close()
|
resp.Body.Close()
|
||||||
// Create a new response with the modified body
|
// Create a new response with the modified body
|
||||||
modifiedBody := rm.modifyHTML(string(body))
|
modifiedBody := modifyIndexHTML(rm.hub, body)
|
||||||
resp.Body = io.NopCloser(strings.NewReader(modifiedBody))
|
resp.Body = io.NopCloser(strings.NewReader(modifiedBody))
|
||||||
resp.ContentLength = int64(len(modifiedBody))
|
resp.ContentLength = int64(len(modifiedBody))
|
||||||
resp.Header.Set("Content-Length", fmt.Sprintf("%d", len(modifiedBody)))
|
resp.Header.Set("Content-Length", fmt.Sprintf("%d", len(modifiedBody)))
|
||||||
@@ -46,19 +44,6 @@ func (rm *responseModifier) RoundTrip(req *http.Request) (*http.Response, error)
|
|||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rm *responseModifier) modifyHTML(html string) string {
|
|
||||||
parsedURL, err := url.Parse(rm.hub.appURL)
|
|
||||||
if err != nil {
|
|
||||||
return html
|
|
||||||
}
|
|
||||||
// fix base paths in html if using subpath
|
|
||||||
basePath := strings.TrimSuffix(parsedURL.Path, "/") + "/"
|
|
||||||
html = strings.ReplaceAll(html, "./", basePath)
|
|
||||||
html = strings.Replace(html, "{{V}}", beszel.Version, 1)
|
|
||||||
html = strings.Replace(html, "{{HUB_URL}}", rm.hub.appURL, 1)
|
|
||||||
return html
|
|
||||||
}
|
|
||||||
|
|
||||||
// startServer sets up the development server for Beszel
|
// startServer sets up the development server for Beszel
|
||||||
func (h *Hub) startServer(se *core.ServeEvent) error {
|
func (h *Hub) startServer(se *core.ServeEvent) error {
|
||||||
proxy := httputil.NewSingleHostReverseProxy(&url.URL{
|
proxy := httputil.NewSingleHostReverseProxy(&url.URL{
|
||||||
|
|||||||
@@ -5,10 +5,8 @@ package hub
|
|||||||
import (
|
import (
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/henrygd/beszel"
|
|
||||||
"github.com/henrygd/beszel/internal/hub/utils"
|
"github.com/henrygd/beszel/internal/hub/utils"
|
||||||
"github.com/henrygd/beszel/internal/site"
|
"github.com/henrygd/beszel/internal/site"
|
||||||
|
|
||||||
@@ -18,17 +16,8 @@ import (
|
|||||||
|
|
||||||
// startServer sets up the production server for Beszel
|
// startServer sets up the production server for Beszel
|
||||||
func (h *Hub) startServer(se *core.ServeEvent) error {
|
func (h *Hub) startServer(se *core.ServeEvent) error {
|
||||||
// parse app url
|
|
||||||
parsedURL, err := url.Parse(h.appURL)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// fix base paths in html if using subpath
|
|
||||||
basePath := strings.TrimSuffix(parsedURL.Path, "/") + "/"
|
|
||||||
indexFile, _ := fs.ReadFile(site.DistDirFS, "index.html")
|
indexFile, _ := fs.ReadFile(site.DistDirFS, "index.html")
|
||||||
html := strings.ReplaceAll(string(indexFile), "./", basePath)
|
html := modifyIndexHTML(h, indexFile)
|
||||||
html = strings.Replace(html, "{{V}}", beszel.Version, 1)
|
|
||||||
html = strings.Replace(html, "{{HUB_URL}}", h.appURL, 1)
|
|
||||||
// set up static asset serving
|
// set up static asset serving
|
||||||
staticPaths := [2]string{"/static/", "/assets/"}
|
staticPaths := [2]string{"/static/", "/assets/"}
|
||||||
serveStatic := apis.Static(site.DistDirFS, false)
|
serveStatic := apis.Static(site.DistDirFS, false)
|
||||||
|
|||||||
@@ -8,7 +8,6 @@ import (
|
|||||||
"hash/fnv"
|
"hash/fnv"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"net"
|
"net"
|
||||||
"slices"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
@@ -368,12 +367,16 @@ func (sys *System) HasUser(app core.App, user *core.Record) bool {
|
|||||||
if v, _ := utils.GetEnv("SHARE_ALL_SYSTEMS"); v == "true" {
|
if v, _ := utils.GetEnv("SHARE_ALL_SYSTEMS"); v == "true" {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
record, err := sys.getRecord(app)
|
var recordData = struct {
|
||||||
if err != nil {
|
Users string
|
||||||
|
}{}
|
||||||
|
err := app.DB().NewQuery("SELECT users FROM systems WHERE id={:id}").
|
||||||
|
Bind(dbx.Params{"id": sys.Id}).
|
||||||
|
One(&recordData)
|
||||||
|
if err != nil || recordData.Users == "" {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
users := record.GetStringSlice("users")
|
return strings.Contains(recordData.Users, user.Id)
|
||||||
return slices.Contains(users, user.Id)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// setDown marks a system as down in the database.
|
// setDown marks a system as down in the database.
|
||||||
|
|||||||
@@ -436,7 +436,7 @@ func TestHasUser(t *testing.T) {
|
|||||||
user2, err := tests.CreateUser(hub, "user2@test.com", "password123")
|
user2, err := tests.CreateUser(hub, "user2@test.com", "password123")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
record, err := tests.CreateRecord(hub, "systems", map[string]any{
|
systemRecord, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||||
"name": "has-user-test",
|
"name": "has-user-test",
|
||||||
"host": "127.0.0.1",
|
"host": "127.0.0.1",
|
||||||
"port": "33914",
|
"port": "33914",
|
||||||
@@ -444,7 +444,7 @@ func TestHasUser(t *testing.T) {
|
|||||||
})
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
sys, err := sm.GetSystemFromStore(record.Id)
|
sys, err := sm.GetSystemFromStore(systemRecord.Id)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
t.Run("user in list returns true", func(t *testing.T) {
|
t.Run("user in list returns true", func(t *testing.T) {
|
||||||
@@ -468,4 +468,13 @@ func TestHasUser(t *testing.T) {
|
|||||||
t.Setenv("BESZEL_HUB_SHARE_ALL_SYSTEMS", "true")
|
t.Setenv("BESZEL_HUB_SHARE_ALL_SYSTEMS", "true")
|
||||||
assert.True(t, sys.HasUser(hub, user2))
|
assert.True(t, sys.HasUser(hub, user2))
|
||||||
})
|
})
|
||||||
|
|
||||||
|
t.Run("additional user works", func(t *testing.T) {
|
||||||
|
assert.False(t, sys.HasUser(hub, user2))
|
||||||
|
systemRecord.Set("users", []string{user1.Id, user2.Id})
|
||||||
|
err = hub.Save(systemRecord)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.True(t, sys.HasUser(hub, user1))
|
||||||
|
assert.True(t, sys.HasUser(hub, user2))
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,10 +3,8 @@ package records
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
|
||||||
"log"
|
"log"
|
||||||
"math"
|
"math"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/henrygd/beszel/internal/entities/container"
|
"github.com/henrygd/beszel/internal/entities/container"
|
||||||
@@ -39,16 +37,6 @@ type StatsRecord struct {
|
|||||||
Stats []byte `db:"stats"`
|
Stats []byte `db:"stats"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// global variables for reusing allocations
|
|
||||||
var (
|
|
||||||
statsRecord StatsRecord
|
|
||||||
containerStats []container.Stats
|
|
||||||
sumStats system.Stats
|
|
||||||
tempStats system.Stats
|
|
||||||
queryParams = make(dbx.Params, 1)
|
|
||||||
containerSums = make(map[string]*container.Stats)
|
|
||||||
)
|
|
||||||
|
|
||||||
// Create longer records by averaging shorter records
|
// Create longer records by averaging shorter records
|
||||||
func (rm *RecordManager) CreateLongerRecords() {
|
func (rm *RecordManager) CreateLongerRecords() {
|
||||||
// start := time.Now()
|
// start := time.Now()
|
||||||
@@ -168,41 +156,47 @@ func (rm *RecordManager) CreateLongerRecords() {
|
|||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
||||||
statsRecord.Stats = statsRecord.Stats[:0]
|
|
||||||
|
|
||||||
// log.Println("finished creating longer records", "time (ms)", time.Since(start).Milliseconds())
|
// log.Println("finished creating longer records", "time (ms)", time.Since(start).Milliseconds())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Calculate the average stats of a list of system_stats records without reflect
|
// Calculate the average stats of a list of system_stats records without reflect
|
||||||
func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *system.Stats {
|
func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *system.Stats {
|
||||||
// Clear/reset global structs for reuse
|
stats := make([]system.Stats, 0, len(records))
|
||||||
sumStats = system.Stats{}
|
var row StatsRecord
|
||||||
tempStats = system.Stats{}
|
params := make(dbx.Params, 1)
|
||||||
sum := &sumStats
|
for _, rec := range records {
|
||||||
stats := &tempStats
|
row.Stats = row.Stats[:0]
|
||||||
|
params["id"] = rec.Id
|
||||||
|
db.NewQuery("SELECT stats FROM system_stats WHERE id = {:id}").Bind(params).One(&row)
|
||||||
|
var s system.Stats
|
||||||
|
if err := json.Unmarshal(row.Stats, &s); err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
stats = append(stats, s)
|
||||||
|
}
|
||||||
|
result := AverageSystemStatsSlice(stats)
|
||||||
|
return &result
|
||||||
|
}
|
||||||
|
|
||||||
|
// AverageSystemStatsSlice computes the average of a slice of system stats.
|
||||||
|
func AverageSystemStatsSlice(records []system.Stats) system.Stats {
|
||||||
|
var sum system.Stats
|
||||||
|
count := float64(len(records))
|
||||||
|
if count == 0 {
|
||||||
|
return sum
|
||||||
|
}
|
||||||
|
|
||||||
// necessary because uint8 is not big enough for the sum
|
// necessary because uint8 is not big enough for the sum
|
||||||
batterySum := 0
|
batterySum := 0
|
||||||
// accumulate per-core usage across records
|
// accumulate per-core usage across records
|
||||||
var cpuCoresSums []uint64
|
var cpuCoresSums []uint64
|
||||||
// accumulate cpu breakdown [user, system, iowait, steal, idle]
|
// accumulate cpu breakdown [user, system, iowait, steal, idle]
|
||||||
var cpuBreakdownSums []float64
|
var cpuBreakdownSums []float64
|
||||||
|
|
||||||
count := float64(len(records))
|
|
||||||
tempCount := float64(0)
|
tempCount := float64(0)
|
||||||
|
|
||||||
// Accumulate totals
|
// Accumulate totals
|
||||||
for _, record := range records {
|
for i := range records {
|
||||||
id := record.Id
|
stats := &records[i]
|
||||||
// clear global statsRecord for reuse
|
|
||||||
statsRecord.Stats = statsRecord.Stats[:0]
|
|
||||||
// reset tempStats each iteration to avoid omitzero fields retaining stale values
|
|
||||||
*stats = system.Stats{}
|
|
||||||
|
|
||||||
queryParams["id"] = id
|
|
||||||
db.NewQuery("SELECT stats FROM system_stats WHERE id = {:id}").Bind(queryParams).One(&statsRecord)
|
|
||||||
if err := json.Unmarshal(statsRecord.Stats, stats); err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
sum.Cpu += stats.Cpu
|
sum.Cpu += stats.Cpu
|
||||||
// accumulate cpu time breakdowns if present
|
// accumulate cpu time breakdowns if present
|
||||||
@@ -210,8 +204,8 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
|||||||
if len(cpuBreakdownSums) < len(stats.CpuBreakdown) {
|
if len(cpuBreakdownSums) < len(stats.CpuBreakdown) {
|
||||||
cpuBreakdownSums = append(cpuBreakdownSums, make([]float64, len(stats.CpuBreakdown)-len(cpuBreakdownSums))...)
|
cpuBreakdownSums = append(cpuBreakdownSums, make([]float64, len(stats.CpuBreakdown)-len(cpuBreakdownSums))...)
|
||||||
}
|
}
|
||||||
for i, v := range stats.CpuBreakdown {
|
for j, v := range stats.CpuBreakdown {
|
||||||
cpuBreakdownSums[i] += v
|
cpuBreakdownSums[j] += v
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
sum.Mem += stats.Mem
|
sum.Mem += stats.Mem
|
||||||
@@ -247,8 +241,8 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
|||||||
// extend slices to accommodate core count
|
// extend slices to accommodate core count
|
||||||
cpuCoresSums = append(cpuCoresSums, make([]uint64, len(stats.CpuCoresUsage)-len(cpuCoresSums))...)
|
cpuCoresSums = append(cpuCoresSums, make([]uint64, len(stats.CpuCoresUsage)-len(cpuCoresSums))...)
|
||||||
}
|
}
|
||||||
for i, v := range stats.CpuCoresUsage {
|
for j, v := range stats.CpuCoresUsage {
|
||||||
cpuCoresSums[i] += uint64(v)
|
cpuCoresSums[j] += uint64(v)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Set peak values
|
// Set peak values
|
||||||
@@ -348,109 +342,107 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compute averages in place
|
// Compute averages
|
||||||
if count > 0 {
|
sum.Cpu = twoDecimals(sum.Cpu / count)
|
||||||
sum.Cpu = twoDecimals(sum.Cpu / count)
|
sum.Mem = twoDecimals(sum.Mem / count)
|
||||||
sum.Mem = twoDecimals(sum.Mem / count)
|
sum.MemUsed = twoDecimals(sum.MemUsed / count)
|
||||||
sum.MemUsed = twoDecimals(sum.MemUsed / count)
|
sum.MemPct = twoDecimals(sum.MemPct / count)
|
||||||
sum.MemPct = twoDecimals(sum.MemPct / count)
|
sum.MemBuffCache = twoDecimals(sum.MemBuffCache / count)
|
||||||
sum.MemBuffCache = twoDecimals(sum.MemBuffCache / count)
|
sum.MemZfsArc = twoDecimals(sum.MemZfsArc / count)
|
||||||
sum.MemZfsArc = twoDecimals(sum.MemZfsArc / count)
|
sum.Swap = twoDecimals(sum.Swap / count)
|
||||||
sum.Swap = twoDecimals(sum.Swap / count)
|
sum.SwapUsed = twoDecimals(sum.SwapUsed / count)
|
||||||
sum.SwapUsed = twoDecimals(sum.SwapUsed / count)
|
sum.DiskTotal = twoDecimals(sum.DiskTotal / count)
|
||||||
sum.DiskTotal = twoDecimals(sum.DiskTotal / count)
|
sum.DiskUsed = twoDecimals(sum.DiskUsed / count)
|
||||||
sum.DiskUsed = twoDecimals(sum.DiskUsed / count)
|
sum.DiskPct = twoDecimals(sum.DiskPct / count)
|
||||||
sum.DiskPct = twoDecimals(sum.DiskPct / count)
|
sum.DiskReadPs = twoDecimals(sum.DiskReadPs / count)
|
||||||
sum.DiskReadPs = twoDecimals(sum.DiskReadPs / count)
|
sum.DiskWritePs = twoDecimals(sum.DiskWritePs / count)
|
||||||
sum.DiskWritePs = twoDecimals(sum.DiskWritePs / count)
|
sum.DiskIO[0] = sum.DiskIO[0] / uint64(count)
|
||||||
sum.DiskIO[0] = sum.DiskIO[0] / uint64(count)
|
sum.DiskIO[1] = sum.DiskIO[1] / uint64(count)
|
||||||
sum.DiskIO[1] = sum.DiskIO[1] / uint64(count)
|
for i := range sum.DiskIoStats {
|
||||||
for i := range sum.DiskIoStats {
|
sum.DiskIoStats[i] = twoDecimals(sum.DiskIoStats[i] / count)
|
||||||
sum.DiskIoStats[i] = twoDecimals(sum.DiskIoStats[i] / count)
|
}
|
||||||
}
|
sum.NetworkSent = twoDecimals(sum.NetworkSent / count)
|
||||||
sum.NetworkSent = twoDecimals(sum.NetworkSent / count)
|
sum.NetworkRecv = twoDecimals(sum.NetworkRecv / count)
|
||||||
sum.NetworkRecv = twoDecimals(sum.NetworkRecv / count)
|
sum.LoadAvg[0] = twoDecimals(sum.LoadAvg[0] / count)
|
||||||
sum.LoadAvg[0] = twoDecimals(sum.LoadAvg[0] / count)
|
sum.LoadAvg[1] = twoDecimals(sum.LoadAvg[1] / count)
|
||||||
sum.LoadAvg[1] = twoDecimals(sum.LoadAvg[1] / count)
|
sum.LoadAvg[2] = twoDecimals(sum.LoadAvg[2] / count)
|
||||||
sum.LoadAvg[2] = twoDecimals(sum.LoadAvg[2] / count)
|
sum.Bandwidth[0] = sum.Bandwidth[0] / uint64(count)
|
||||||
sum.Bandwidth[0] = sum.Bandwidth[0] / uint64(count)
|
sum.Bandwidth[1] = sum.Bandwidth[1] / uint64(count)
|
||||||
sum.Bandwidth[1] = sum.Bandwidth[1] / uint64(count)
|
sum.Battery[0] = uint8(batterySum / int(count))
|
||||||
sum.Battery[0] = uint8(batterySum / int(count))
|
|
||||||
|
|
||||||
// Average network interfaces
|
// Average network interfaces
|
||||||
if sum.NetworkInterfaces != nil {
|
if sum.NetworkInterfaces != nil {
|
||||||
for key := range sum.NetworkInterfaces {
|
for key := range sum.NetworkInterfaces {
|
||||||
sum.NetworkInterfaces[key] = [4]uint64{
|
sum.NetworkInterfaces[key] = [4]uint64{
|
||||||
sum.NetworkInterfaces[key][0] / uint64(count),
|
sum.NetworkInterfaces[key][0] / uint64(count),
|
||||||
sum.NetworkInterfaces[key][1] / uint64(count),
|
sum.NetworkInterfaces[key][1] / uint64(count),
|
||||||
sum.NetworkInterfaces[key][2],
|
sum.NetworkInterfaces[key][2],
|
||||||
sum.NetworkInterfaces[key][3],
|
sum.NetworkInterfaces[key][3],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Average temperatures
|
||||||
|
if sum.Temperatures != nil && tempCount > 0 {
|
||||||
|
for key := range sum.Temperatures {
|
||||||
|
sum.Temperatures[key] = twoDecimals(sum.Temperatures[key] / tempCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Average extra filesystem stats
|
||||||
|
if sum.ExtraFs != nil {
|
||||||
|
for key := range sum.ExtraFs {
|
||||||
|
fs := sum.ExtraFs[key]
|
||||||
|
fs.DiskTotal = twoDecimals(fs.DiskTotal / count)
|
||||||
|
fs.DiskUsed = twoDecimals(fs.DiskUsed / count)
|
||||||
|
fs.DiskWritePs = twoDecimals(fs.DiskWritePs / count)
|
||||||
|
fs.DiskReadPs = twoDecimals(fs.DiskReadPs / count)
|
||||||
|
fs.DiskReadBytes = fs.DiskReadBytes / uint64(count)
|
||||||
|
fs.DiskWriteBytes = fs.DiskWriteBytes / uint64(count)
|
||||||
|
for i := range fs.DiskIoStats {
|
||||||
|
fs.DiskIoStats[i] = twoDecimals(fs.DiskIoStats[i] / count)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Average GPU data
|
||||||
|
if sum.GPUData != nil {
|
||||||
|
for id := range sum.GPUData {
|
||||||
|
gpu := sum.GPUData[id]
|
||||||
|
gpu.Temperature = twoDecimals(gpu.Temperature / count)
|
||||||
|
gpu.MemoryUsed = twoDecimals(gpu.MemoryUsed / count)
|
||||||
|
gpu.MemoryTotal = twoDecimals(gpu.MemoryTotal / count)
|
||||||
|
gpu.Usage = twoDecimals(gpu.Usage / count)
|
||||||
|
gpu.Power = twoDecimals(gpu.Power / count)
|
||||||
|
gpu.Count = twoDecimals(gpu.Count / count)
|
||||||
|
|
||||||
|
if gpu.Engines != nil {
|
||||||
|
for engineKey := range gpu.Engines {
|
||||||
|
gpu.Engines[engineKey] = twoDecimals(gpu.Engines[engineKey] / count)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sum.GPUData[id] = gpu
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Average temperatures
|
// Average per-core usage
|
||||||
if sum.Temperatures != nil && tempCount > 0 {
|
if len(cpuCoresSums) > 0 {
|
||||||
for key := range sum.Temperatures {
|
avg := make(system.Uint8Slice, len(cpuCoresSums))
|
||||||
sum.Temperatures[key] = twoDecimals(sum.Temperatures[key] / tempCount)
|
for i := range cpuCoresSums {
|
||||||
}
|
v := math.Round(float64(cpuCoresSums[i]) / count)
|
||||||
|
avg[i] = uint8(v)
|
||||||
}
|
}
|
||||||
|
sum.CpuCoresUsage = avg
|
||||||
|
}
|
||||||
|
|
||||||
// Average extra filesystem stats
|
// Average CPU breakdown
|
||||||
if sum.ExtraFs != nil {
|
if len(cpuBreakdownSums) > 0 {
|
||||||
for key := range sum.ExtraFs {
|
avg := make([]float64, len(cpuBreakdownSums))
|
||||||
fs := sum.ExtraFs[key]
|
for i := range cpuBreakdownSums {
|
||||||
fs.DiskTotal = twoDecimals(fs.DiskTotal / count)
|
avg[i] = twoDecimals(cpuBreakdownSums[i] / count)
|
||||||
fs.DiskUsed = twoDecimals(fs.DiskUsed / count)
|
|
||||||
fs.DiskWritePs = twoDecimals(fs.DiskWritePs / count)
|
|
||||||
fs.DiskReadPs = twoDecimals(fs.DiskReadPs / count)
|
|
||||||
fs.DiskReadBytes = fs.DiskReadBytes / uint64(count)
|
|
||||||
fs.DiskWriteBytes = fs.DiskWriteBytes / uint64(count)
|
|
||||||
for i := range fs.DiskIoStats {
|
|
||||||
fs.DiskIoStats[i] = twoDecimals(fs.DiskIoStats[i] / count)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Average GPU data
|
|
||||||
if sum.GPUData != nil {
|
|
||||||
for id := range sum.GPUData {
|
|
||||||
gpu := sum.GPUData[id]
|
|
||||||
gpu.Temperature = twoDecimals(gpu.Temperature / count)
|
|
||||||
gpu.MemoryUsed = twoDecimals(gpu.MemoryUsed / count)
|
|
||||||
gpu.MemoryTotal = twoDecimals(gpu.MemoryTotal / count)
|
|
||||||
gpu.Usage = twoDecimals(gpu.Usage / count)
|
|
||||||
gpu.Power = twoDecimals(gpu.Power / count)
|
|
||||||
gpu.Count = twoDecimals(gpu.Count / count)
|
|
||||||
|
|
||||||
if gpu.Engines != nil {
|
|
||||||
for engineKey := range gpu.Engines {
|
|
||||||
gpu.Engines[engineKey] = twoDecimals(gpu.Engines[engineKey] / count)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sum.GPUData[id] = gpu
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Average per-core usage
|
|
||||||
if len(cpuCoresSums) > 0 {
|
|
||||||
avg := make(system.Uint8Slice, len(cpuCoresSums))
|
|
||||||
for i := range cpuCoresSums {
|
|
||||||
v := math.Round(float64(cpuCoresSums[i]) / count)
|
|
||||||
avg[i] = uint8(v)
|
|
||||||
}
|
|
||||||
sum.CpuCoresUsage = avg
|
|
||||||
}
|
|
||||||
|
|
||||||
// Average CPU breakdown
|
|
||||||
if len(cpuBreakdownSums) > 0 {
|
|
||||||
avg := make([]float64, len(cpuBreakdownSums))
|
|
||||||
for i := range cpuBreakdownSums {
|
|
||||||
avg[i] = twoDecimals(cpuBreakdownSums[i] / count)
|
|
||||||
}
|
|
||||||
sum.CpuBreakdown = avg
|
|
||||||
}
|
}
|
||||||
|
sum.CpuBreakdown = avg
|
||||||
}
|
}
|
||||||
|
|
||||||
return sum
|
return sum
|
||||||
@@ -458,29 +450,33 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
|||||||
|
|
||||||
// Calculate the average stats of a list of container_stats records
|
// Calculate the average stats of a list of container_stats records
|
||||||
func (rm *RecordManager) AverageContainerStats(db dbx.Builder, records RecordIds) []container.Stats {
|
func (rm *RecordManager) AverageContainerStats(db dbx.Builder, records RecordIds) []container.Stats {
|
||||||
// Clear global map for reuse
|
allStats := make([][]container.Stats, 0, len(records))
|
||||||
for k := range containerSums {
|
var row StatsRecord
|
||||||
delete(containerSums, k)
|
params := make(dbx.Params, 1)
|
||||||
}
|
for _, rec := range records {
|
||||||
sums := containerSums
|
row.Stats = row.Stats[:0]
|
||||||
count := float64(len(records))
|
params["id"] = rec.Id
|
||||||
|
db.NewQuery("SELECT stats FROM container_stats WHERE id = {:id}").Bind(params).One(&row)
|
||||||
for i := range records {
|
var cs []container.Stats
|
||||||
id := records[i].Id
|
if err := json.Unmarshal(row.Stats, &cs); err != nil {
|
||||||
// clear global statsRecord for reuse
|
|
||||||
statsRecord.Stats = statsRecord.Stats[:0]
|
|
||||||
// must set to nil (not [:0]) to avoid json.Unmarshal reusing backing array
|
|
||||||
// which causes omitzero fields to inherit stale values from previous iterations
|
|
||||||
containerStats = nil
|
|
||||||
|
|
||||||
queryParams["id"] = id
|
|
||||||
db.NewQuery("SELECT stats FROM container_stats WHERE id = {:id}").Bind(queryParams).One(&statsRecord)
|
|
||||||
|
|
||||||
if err := json.Unmarshal(statsRecord.Stats, &containerStats); err != nil {
|
|
||||||
return []container.Stats{}
|
return []container.Stats{}
|
||||||
}
|
}
|
||||||
|
allStats = append(allStats, cs)
|
||||||
|
}
|
||||||
|
return AverageContainerStatsSlice(allStats)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AverageContainerStatsSlice computes the average of container stats across multiple time periods.
|
||||||
|
func AverageContainerStatsSlice(records [][]container.Stats) []container.Stats {
|
||||||
|
if len(records) == 0 {
|
||||||
|
return []container.Stats{}
|
||||||
|
}
|
||||||
|
sums := make(map[string]*container.Stats)
|
||||||
|
count := float64(len(records))
|
||||||
|
|
||||||
|
for _, containerStats := range records {
|
||||||
for i := range containerStats {
|
for i := range containerStats {
|
||||||
stat := containerStats[i]
|
stat := &containerStats[i]
|
||||||
if _, ok := sums[stat.Name]; !ok {
|
if _, ok := sums[stat.Name]; !ok {
|
||||||
sums[stat.Name] = &container.Stats{Name: stat.Name}
|
sums[stat.Name] = &container.Stats{Name: stat.Name}
|
||||||
}
|
}
|
||||||
@@ -521,15 +517,14 @@ func (rm *RecordManager) AverageProbeStats(db dbx.Builder, records RecordIds) ma
|
|||||||
}
|
}
|
||||||
|
|
||||||
sums := make(map[string]*probeValues)
|
sums := make(map[string]*probeValues)
|
||||||
var rawStats map[string]map[string]float64
|
var row StatsRecord
|
||||||
|
params := make(dbx.Params, 1)
|
||||||
for _, record := range records {
|
for _, rec := range records {
|
||||||
statsRecord.Stats = statsRecord.Stats[:0]
|
row.Stats = row.Stats[:0]
|
||||||
rawStats = nil
|
params["id"] = rec.Id
|
||||||
|
db.NewQuery("SELECT stats FROM network_probe_stats WHERE id = {:id}").Bind(params).One(&row)
|
||||||
queryParams["id"] = record.Id
|
var rawStats map[string]map[string]float64
|
||||||
db.NewQuery("SELECT stats FROM network_probe_stats WHERE id = {:id}").Bind(queryParams).One(&statsRecord)
|
if err := json.Unmarshal(row.Stats, &rawStats); err != nil {
|
||||||
if err := json.Unmarshal(statsRecord.Stats, &rawStats); err != nil {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -570,133 +565,6 @@ func (rm *RecordManager) AverageProbeStats(db dbx.Builder, records RecordIds) ma
|
|||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete old records
|
|
||||||
func (rm *RecordManager) DeleteOldRecords() {
|
|
||||||
rm.app.RunInTransaction(func(txApp core.App) error {
|
|
||||||
err := deleteOldSystemStats(txApp)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = deleteOldContainerRecords(txApp)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = deleteOldSystemdServiceRecords(txApp)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = deleteOldAlertsHistory(txApp, 200, 250)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = deleteOldQuietHours(txApp)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete old alerts history records
|
|
||||||
func deleteOldAlertsHistory(app core.App, countToKeep, countBeforeDeletion int) error {
|
|
||||||
db := app.DB()
|
|
||||||
var users []struct {
|
|
||||||
Id string `db:"user"`
|
|
||||||
}
|
|
||||||
err := db.NewQuery("SELECT user, COUNT(*) as count FROM alerts_history GROUP BY user HAVING count > {:countBeforeDeletion}").Bind(dbx.Params{"countBeforeDeletion": countBeforeDeletion}).All(&users)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, user := range users {
|
|
||||||
_, err = db.NewQuery("DELETE FROM alerts_history WHERE user = {:user} AND id NOT IN (SELECT id FROM alerts_history WHERE user = {:user} ORDER BY created DESC LIMIT {:countToKeep})").Bind(dbx.Params{"user": user.Id, "countToKeep": countToKeep}).Execute()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deletes system_stats records older than what is displayed in the UI
|
|
||||||
func deleteOldSystemStats(app core.App) error {
|
|
||||||
// Collections to process
|
|
||||||
collections := [3]string{"system_stats", "container_stats", "network_probe_stats"}
|
|
||||||
|
|
||||||
// Record types and their retention periods
|
|
||||||
type RecordDeletionData struct {
|
|
||||||
recordType string
|
|
||||||
retention time.Duration
|
|
||||||
}
|
|
||||||
recordData := []RecordDeletionData{
|
|
||||||
{recordType: "1m", retention: time.Hour}, // 1 hour
|
|
||||||
{recordType: "10m", retention: 12 * time.Hour}, // 12 hours
|
|
||||||
{recordType: "20m", retention: 24 * time.Hour}, // 1 day
|
|
||||||
{recordType: "120m", retention: 7 * 24 * time.Hour}, // 7 days
|
|
||||||
{recordType: "480m", retention: 30 * 24 * time.Hour}, // 30 days
|
|
||||||
}
|
|
||||||
|
|
||||||
now := time.Now().UTC()
|
|
||||||
|
|
||||||
for _, collection := range collections {
|
|
||||||
// Build the WHERE clause
|
|
||||||
var conditionParts []string
|
|
||||||
var params dbx.Params = make(map[string]any)
|
|
||||||
for i := range recordData {
|
|
||||||
rd := recordData[i]
|
|
||||||
// Create parameterized condition for this record type
|
|
||||||
dateParam := fmt.Sprintf("date%d", i)
|
|
||||||
conditionParts = append(conditionParts, fmt.Sprintf("(type = '%s' AND created < {:%s})", rd.recordType, dateParam))
|
|
||||||
params[dateParam] = now.Add(-rd.retention)
|
|
||||||
}
|
|
||||||
// Combine conditions with OR
|
|
||||||
conditionStr := strings.Join(conditionParts, " OR ")
|
|
||||||
// Construct and execute the full raw query
|
|
||||||
rawQuery := fmt.Sprintf("DELETE FROM %s WHERE %s", collection, conditionStr)
|
|
||||||
if _, err := app.DB().NewQuery(rawQuery).Bind(params).Execute(); err != nil {
|
|
||||||
return fmt.Errorf("failed to delete from %s: %v", collection, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deletes systemd service records that haven't been updated in the last 20 minutes
|
|
||||||
func deleteOldSystemdServiceRecords(app core.App) error {
|
|
||||||
now := time.Now().UTC()
|
|
||||||
twentyMinutesAgo := now.Add(-20 * time.Minute)
|
|
||||||
|
|
||||||
// Delete systemd service records where updated < twentyMinutesAgo
|
|
||||||
_, err := app.DB().NewQuery("DELETE FROM systemd_services WHERE updated < {:updated}").Bind(dbx.Params{"updated": twentyMinutesAgo.UnixMilli()}).Execute()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to delete old systemd service records: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deletes container records that haven't been updated in the last 10 minutes
|
|
||||||
func deleteOldContainerRecords(app core.App) error {
|
|
||||||
now := time.Now().UTC()
|
|
||||||
tenMinutesAgo := now.Add(-10 * time.Minute)
|
|
||||||
|
|
||||||
// Delete container records where updated < tenMinutesAgo
|
|
||||||
_, err := app.DB().NewQuery("DELETE FROM containers WHERE updated < {:updated}").Bind(dbx.Params{"updated": tenMinutesAgo.UnixMilli()}).Execute()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to delete old container records: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deletes old quiet hours records where end date has passed
|
|
||||||
func deleteOldQuietHours(app core.App) error {
|
|
||||||
now := time.Now().UTC()
|
|
||||||
_, err := app.DB().NewQuery("DELETE FROM quiet_hours WHERE type = 'one-time' AND end < {:now}").Bind(dbx.Params{"now": now}).Execute()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Round float to two decimals */
|
/* Round float to two decimals */
|
||||||
func twoDecimals(value float64) float64 {
|
func twoDecimals(value float64) float64 {
|
||||||
return math.Round(value*100) / 100
|
return math.Round(value*100) / 100
|
||||||
|
|||||||
820
internal/records/records_averaging_test.go
Normal file
820
internal/records/records_averaging_test.go
Normal file
@@ -0,0 +1,820 @@
|
|||||||
|
//go:build testing
|
||||||
|
|
||||||
|
package records_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sort"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/entities/container"
|
||||||
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
"github.com/henrygd/beszel/internal/records"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAverageSystemStatsSlice_Empty(t *testing.T) {
|
||||||
|
result := records.AverageSystemStatsSlice(nil)
|
||||||
|
assert.Equal(t, system.Stats{}, result)
|
||||||
|
|
||||||
|
result = records.AverageSystemStatsSlice([]system.Stats{})
|
||||||
|
assert.Equal(t, system.Stats{}, result)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAverageSystemStatsSlice_SingleRecord(t *testing.T) {
|
||||||
|
input := []system.Stats{
|
||||||
|
{
|
||||||
|
Cpu: 45.67,
|
||||||
|
Mem: 16.0,
|
||||||
|
MemUsed: 8.5,
|
||||||
|
MemPct: 53.12,
|
||||||
|
MemBuffCache: 2.0,
|
||||||
|
Swap: 4.0,
|
||||||
|
SwapUsed: 1.0,
|
||||||
|
DiskTotal: 500.0,
|
||||||
|
DiskUsed: 250.0,
|
||||||
|
DiskPct: 50.0,
|
||||||
|
DiskReadPs: 100.5,
|
||||||
|
DiskWritePs: 200.75,
|
||||||
|
NetworkSent: 10.5,
|
||||||
|
NetworkRecv: 20.25,
|
||||||
|
LoadAvg: [3]float64{1.5, 2.0, 3.5},
|
||||||
|
Bandwidth: [2]uint64{1000, 2000},
|
||||||
|
DiskIO: [2]uint64{500, 600},
|
||||||
|
Battery: [2]uint8{80, 1},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
assert.Equal(t, 45.67, result.Cpu)
|
||||||
|
assert.Equal(t, 16.0, result.Mem)
|
||||||
|
assert.Equal(t, 8.5, result.MemUsed)
|
||||||
|
assert.Equal(t, 53.12, result.MemPct)
|
||||||
|
assert.Equal(t, 2.0, result.MemBuffCache)
|
||||||
|
assert.Equal(t, 4.0, result.Swap)
|
||||||
|
assert.Equal(t, 1.0, result.SwapUsed)
|
||||||
|
assert.Equal(t, 500.0, result.DiskTotal)
|
||||||
|
assert.Equal(t, 250.0, result.DiskUsed)
|
||||||
|
assert.Equal(t, 50.0, result.DiskPct)
|
||||||
|
assert.Equal(t, 100.5, result.DiskReadPs)
|
||||||
|
assert.Equal(t, 200.75, result.DiskWritePs)
|
||||||
|
assert.Equal(t, 10.5, result.NetworkSent)
|
||||||
|
assert.Equal(t, 20.25, result.NetworkRecv)
|
||||||
|
assert.Equal(t, [3]float64{1.5, 2.0, 3.5}, result.LoadAvg)
|
||||||
|
assert.Equal(t, [2]uint64{1000, 2000}, result.Bandwidth)
|
||||||
|
assert.Equal(t, [2]uint64{500, 600}, result.DiskIO)
|
||||||
|
assert.Equal(t, uint8(80), result.Battery[0])
|
||||||
|
assert.Equal(t, uint8(1), result.Battery[1])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAverageSystemStatsSlice_BasicAveraging(t *testing.T) {
|
||||||
|
input := []system.Stats{
|
||||||
|
{
|
||||||
|
Cpu: 20.0,
|
||||||
|
Mem: 16.0,
|
||||||
|
MemUsed: 6.0,
|
||||||
|
MemPct: 37.5,
|
||||||
|
MemBuffCache: 1.0,
|
||||||
|
MemZfsArc: 0.5,
|
||||||
|
Swap: 4.0,
|
||||||
|
SwapUsed: 1.0,
|
||||||
|
DiskTotal: 500.0,
|
||||||
|
DiskUsed: 200.0,
|
||||||
|
DiskPct: 40.0,
|
||||||
|
DiskReadPs: 100.0,
|
||||||
|
DiskWritePs: 200.0,
|
||||||
|
NetworkSent: 10.0,
|
||||||
|
NetworkRecv: 20.0,
|
||||||
|
LoadAvg: [3]float64{1.0, 2.0, 3.0},
|
||||||
|
Bandwidth: [2]uint64{1000, 2000},
|
||||||
|
DiskIO: [2]uint64{400, 600},
|
||||||
|
Battery: [2]uint8{80, 1},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Cpu: 40.0,
|
||||||
|
Mem: 16.0,
|
||||||
|
MemUsed: 10.0,
|
||||||
|
MemPct: 62.5,
|
||||||
|
MemBuffCache: 3.0,
|
||||||
|
MemZfsArc: 1.5,
|
||||||
|
Swap: 4.0,
|
||||||
|
SwapUsed: 3.0,
|
||||||
|
DiskTotal: 500.0,
|
||||||
|
DiskUsed: 300.0,
|
||||||
|
DiskPct: 60.0,
|
||||||
|
DiskReadPs: 200.0,
|
||||||
|
DiskWritePs: 400.0,
|
||||||
|
NetworkSent: 30.0,
|
||||||
|
NetworkRecv: 40.0,
|
||||||
|
LoadAvg: [3]float64{3.0, 4.0, 5.0},
|
||||||
|
Bandwidth: [2]uint64{3000, 4000},
|
||||||
|
DiskIO: [2]uint64{600, 800},
|
||||||
|
Battery: [2]uint8{60, 1},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
assert.Equal(t, 30.0, result.Cpu)
|
||||||
|
assert.Equal(t, 16.0, result.Mem)
|
||||||
|
assert.Equal(t, 8.0, result.MemUsed)
|
||||||
|
assert.Equal(t, 50.0, result.MemPct)
|
||||||
|
assert.Equal(t, 2.0, result.MemBuffCache)
|
||||||
|
assert.Equal(t, 1.0, result.MemZfsArc)
|
||||||
|
assert.Equal(t, 4.0, result.Swap)
|
||||||
|
assert.Equal(t, 2.0, result.SwapUsed)
|
||||||
|
assert.Equal(t, 500.0, result.DiskTotal)
|
||||||
|
assert.Equal(t, 250.0, result.DiskUsed)
|
||||||
|
assert.Equal(t, 50.0, result.DiskPct)
|
||||||
|
assert.Equal(t, 150.0, result.DiskReadPs)
|
||||||
|
assert.Equal(t, 300.0, result.DiskWritePs)
|
||||||
|
assert.Equal(t, 20.0, result.NetworkSent)
|
||||||
|
assert.Equal(t, 30.0, result.NetworkRecv)
|
||||||
|
assert.Equal(t, [3]float64{2.0, 3.0, 4.0}, result.LoadAvg)
|
||||||
|
assert.Equal(t, [2]uint64{2000, 3000}, result.Bandwidth)
|
||||||
|
assert.Equal(t, [2]uint64{500, 700}, result.DiskIO)
|
||||||
|
assert.Equal(t, uint8(70), result.Battery[0])
|
||||||
|
assert.Equal(t, uint8(1), result.Battery[1])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAverageSystemStatsSlice_PeakValues(t *testing.T) {
|
||||||
|
input := []system.Stats{
|
||||||
|
{
|
||||||
|
Cpu: 20.0,
|
||||||
|
MaxCpu: 25.0,
|
||||||
|
MemUsed: 6.0,
|
||||||
|
MaxMem: 7.0,
|
||||||
|
NetworkSent: 10.0,
|
||||||
|
MaxNetworkSent: 15.0,
|
||||||
|
NetworkRecv: 20.0,
|
||||||
|
MaxNetworkRecv: 25.0,
|
||||||
|
DiskReadPs: 100.0,
|
||||||
|
MaxDiskReadPs: 120.0,
|
||||||
|
DiskWritePs: 200.0,
|
||||||
|
MaxDiskWritePs: 220.0,
|
||||||
|
Bandwidth: [2]uint64{1000, 2000},
|
||||||
|
MaxBandwidth: [2]uint64{1500, 2500},
|
||||||
|
DiskIO: [2]uint64{400, 600},
|
||||||
|
MaxDiskIO: [2]uint64{500, 700},
|
||||||
|
DiskIoStats: [6]float64{10.0, 20.0, 30.0, 5.0, 8.0, 12.0},
|
||||||
|
MaxDiskIoStats: [6]float64{15.0, 25.0, 35.0, 6.0, 9.0, 14.0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Cpu: 40.0,
|
||||||
|
MaxCpu: 50.0,
|
||||||
|
MemUsed: 10.0,
|
||||||
|
MaxMem: 12.0,
|
||||||
|
NetworkSent: 30.0,
|
||||||
|
MaxNetworkSent: 35.0,
|
||||||
|
NetworkRecv: 40.0,
|
||||||
|
MaxNetworkRecv: 45.0,
|
||||||
|
DiskReadPs: 200.0,
|
||||||
|
MaxDiskReadPs: 210.0,
|
||||||
|
DiskWritePs: 400.0,
|
||||||
|
MaxDiskWritePs: 410.0,
|
||||||
|
Bandwidth: [2]uint64{3000, 4000},
|
||||||
|
MaxBandwidth: [2]uint64{3500, 4500},
|
||||||
|
DiskIO: [2]uint64{600, 800},
|
||||||
|
MaxDiskIO: [2]uint64{650, 850},
|
||||||
|
DiskIoStats: [6]float64{50.0, 60.0, 70.0, 15.0, 18.0, 22.0},
|
||||||
|
MaxDiskIoStats: [6]float64{55.0, 65.0, 75.0, 16.0, 19.0, 23.0},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
assert.Equal(t, 50.0, result.MaxCpu)
|
||||||
|
assert.Equal(t, 12.0, result.MaxMem)
|
||||||
|
assert.Equal(t, 35.0, result.MaxNetworkSent)
|
||||||
|
assert.Equal(t, 45.0, result.MaxNetworkRecv)
|
||||||
|
assert.Equal(t, 210.0, result.MaxDiskReadPs)
|
||||||
|
assert.Equal(t, 410.0, result.MaxDiskWritePs)
|
||||||
|
assert.Equal(t, [2]uint64{3500, 4500}, result.MaxBandwidth)
|
||||||
|
assert.Equal(t, [2]uint64{650, 850}, result.MaxDiskIO)
|
||||||
|
assert.Equal(t, [6]float64{30.0, 40.0, 50.0, 10.0, 13.0, 17.0}, result.DiskIoStats)
|
||||||
|
assert.Equal(t, [6]float64{55.0, 65.0, 75.0, 16.0, 19.0, 23.0}, result.MaxDiskIoStats)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAverageSystemStatsSlice_DiskIoStats(t *testing.T) {
|
||||||
|
input := []system.Stats{
|
||||||
|
{
|
||||||
|
Cpu: 10.0,
|
||||||
|
DiskIoStats: [6]float64{10.0, 20.0, 30.0, 5.0, 8.0, 12.0},
|
||||||
|
MaxDiskIoStats: [6]float64{12.0, 22.0, 32.0, 6.0, 9.0, 13.0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Cpu: 20.0,
|
||||||
|
DiskIoStats: [6]float64{30.0, 40.0, 50.0, 15.0, 18.0, 22.0},
|
||||||
|
MaxDiskIoStats: [6]float64{28.0, 38.0, 48.0, 14.0, 17.0, 21.0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Cpu: 30.0,
|
||||||
|
DiskIoStats: [6]float64{20.0, 30.0, 40.0, 10.0, 12.0, 16.0},
|
||||||
|
MaxDiskIoStats: [6]float64{25.0, 35.0, 45.0, 11.0, 13.0, 17.0},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
// Average: (10+30+20)/3=20, (20+40+30)/3=30, (30+50+40)/3=40, (5+15+10)/3=10, (8+18+12)/3≈12.67, (12+22+16)/3≈16.67
|
||||||
|
assert.Equal(t, 20.0, result.DiskIoStats[0])
|
||||||
|
assert.Equal(t, 30.0, result.DiskIoStats[1])
|
||||||
|
assert.Equal(t, 40.0, result.DiskIoStats[2])
|
||||||
|
assert.Equal(t, 10.0, result.DiskIoStats[3])
|
||||||
|
assert.Equal(t, 12.67, result.DiskIoStats[4])
|
||||||
|
assert.Equal(t, 16.67, result.DiskIoStats[5])
|
||||||
|
// Max: current DiskIoStats[0] wins for record 2 (30 > MaxDiskIoStats 28)
|
||||||
|
assert.Equal(t, 30.0, result.MaxDiskIoStats[0])
|
||||||
|
assert.Equal(t, 40.0, result.MaxDiskIoStats[1])
|
||||||
|
assert.Equal(t, 50.0, result.MaxDiskIoStats[2])
|
||||||
|
assert.Equal(t, 15.0, result.MaxDiskIoStats[3])
|
||||||
|
assert.Equal(t, 18.0, result.MaxDiskIoStats[4])
|
||||||
|
assert.Equal(t, 22.0, result.MaxDiskIoStats[5])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that current DiskIoStats values are considered when computing MaxDiskIoStats.
|
||||||
|
func TestAverageSystemStatsSlice_DiskIoStatsPeakFromCurrentValues(t *testing.T) {
|
||||||
|
input := []system.Stats{
|
||||||
|
{Cpu: 10.0, DiskIoStats: [6]float64{95.0, 90.0, 85.0, 50.0, 60.0, 80.0}, MaxDiskIoStats: [6]float64{80.0, 80.0, 80.0, 40.0, 50.0, 70.0}},
|
||||||
|
{Cpu: 20.0, DiskIoStats: [6]float64{10.0, 10.0, 10.0, 5.0, 6.0, 8.0}, MaxDiskIoStats: [6]float64{20.0, 20.0, 20.0, 10.0, 12.0, 16.0}},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
// Current value from first record (95, 90, 85, 50, 60, 80) beats MaxDiskIoStats in both records
|
||||||
|
assert.Equal(t, 95.0, result.MaxDiskIoStats[0])
|
||||||
|
assert.Equal(t, 90.0, result.MaxDiskIoStats[1])
|
||||||
|
assert.Equal(t, 85.0, result.MaxDiskIoStats[2])
|
||||||
|
assert.Equal(t, 50.0, result.MaxDiskIoStats[3])
|
||||||
|
assert.Equal(t, 60.0, result.MaxDiskIoStats[4])
|
||||||
|
assert.Equal(t, 80.0, result.MaxDiskIoStats[5])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that current values are considered when computing peaks
|
||||||
|
// (i.e., current cpu > MaxCpu should still win).
|
||||||
|
func TestAverageSystemStatsSlice_PeakFromCurrentValues(t *testing.T) {
|
||||||
|
input := []system.Stats{
|
||||||
|
{Cpu: 95.0, MaxCpu: 80.0, MemUsed: 15.0, MaxMem: 10.0},
|
||||||
|
{Cpu: 10.0, MaxCpu: 20.0, MemUsed: 5.0, MaxMem: 8.0},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
assert.Equal(t, 95.0, result.MaxCpu)
|
||||||
|
assert.Equal(t, 15.0, result.MaxMem)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that records without temperature data are excluded from the temperature average.
|
||||||
|
func TestAverageSystemStatsSlice_Temperatures(t *testing.T) {
|
||||||
|
input := []system.Stats{
|
||||||
|
{
|
||||||
|
Cpu: 10.0,
|
||||||
|
Temperatures: map[string]float64{"cpu": 60.0, "gpu": 70.0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Cpu: 20.0,
|
||||||
|
Temperatures: map[string]float64{"cpu": 80.0, "gpu": 90.0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// No temperatures - should not affect temp averaging
|
||||||
|
Cpu: 30.0,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
require.NotNil(t, result.Temperatures)
|
||||||
|
// Average over 2 records that had temps, not 3
|
||||||
|
assert.Equal(t, 70.0, result.Temperatures["cpu"])
|
||||||
|
assert.Equal(t, 80.0, result.Temperatures["gpu"])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAverageSystemStatsSlice_NetworkInterfaces(t *testing.T) {
|
||||||
|
input := []system.Stats{
|
||||||
|
{
|
||||||
|
Cpu: 10.0,
|
||||||
|
NetworkInterfaces: map[string][4]uint64{
|
||||||
|
"eth0": {100, 200, 150, 250},
|
||||||
|
"eth1": {50, 60, 70, 80},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Cpu: 20.0,
|
||||||
|
NetworkInterfaces: map[string][4]uint64{
|
||||||
|
"eth0": {200, 400, 300, 500},
|
||||||
|
"eth1": {150, 160, 170, 180},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
require.NotNil(t, result.NetworkInterfaces)
|
||||||
|
// [0] and [1] are averaged, [2] and [3] are max
|
||||||
|
assert.Equal(t, [4]uint64{150, 300, 300, 500}, result.NetworkInterfaces["eth0"])
|
||||||
|
assert.Equal(t, [4]uint64{100, 110, 170, 180}, result.NetworkInterfaces["eth1"])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAverageSystemStatsSlice_ExtraFs(t *testing.T) {
|
||||||
|
input := []system.Stats{
|
||||||
|
{
|
||||||
|
Cpu: 10.0,
|
||||||
|
ExtraFs: map[string]*system.FsStats{
|
||||||
|
"/data": {
|
||||||
|
DiskTotal: 1000.0,
|
||||||
|
DiskUsed: 400.0,
|
||||||
|
DiskReadPs: 50.0,
|
||||||
|
DiskWritePs: 100.0,
|
||||||
|
MaxDiskReadPS: 60.0,
|
||||||
|
MaxDiskWritePS: 110.0,
|
||||||
|
DiskReadBytes: 5000,
|
||||||
|
DiskWriteBytes: 10000,
|
||||||
|
MaxDiskReadBytes: 6000,
|
||||||
|
MaxDiskWriteBytes: 11000,
|
||||||
|
DiskIoStats: [6]float64{10.0, 20.0, 30.0, 5.0, 8.0, 12.0},
|
||||||
|
MaxDiskIoStats: [6]float64{12.0, 22.0, 32.0, 6.0, 9.0, 13.0},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Cpu: 20.0,
|
||||||
|
ExtraFs: map[string]*system.FsStats{
|
||||||
|
"/data": {
|
||||||
|
DiskTotal: 1000.0,
|
||||||
|
DiskUsed: 600.0,
|
||||||
|
DiskReadPs: 150.0,
|
||||||
|
DiskWritePs: 200.0,
|
||||||
|
MaxDiskReadPS: 160.0,
|
||||||
|
MaxDiskWritePS: 210.0,
|
||||||
|
DiskReadBytes: 15000,
|
||||||
|
DiskWriteBytes: 20000,
|
||||||
|
MaxDiskReadBytes: 16000,
|
||||||
|
MaxDiskWriteBytes: 21000,
|
||||||
|
DiskIoStats: [6]float64{50.0, 60.0, 70.0, 15.0, 18.0, 22.0},
|
||||||
|
MaxDiskIoStats: [6]float64{55.0, 65.0, 75.0, 16.0, 19.0, 23.0},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
require.NotNil(t, result.ExtraFs)
|
||||||
|
require.NotNil(t, result.ExtraFs["/data"])
|
||||||
|
fs := result.ExtraFs["/data"]
|
||||||
|
assert.Equal(t, 1000.0, fs.DiskTotal)
|
||||||
|
assert.Equal(t, 500.0, fs.DiskUsed)
|
||||||
|
assert.Equal(t, 100.0, fs.DiskReadPs)
|
||||||
|
assert.Equal(t, 150.0, fs.DiskWritePs)
|
||||||
|
assert.Equal(t, 160.0, fs.MaxDiskReadPS)
|
||||||
|
assert.Equal(t, 210.0, fs.MaxDiskWritePS)
|
||||||
|
assert.Equal(t, uint64(10000), fs.DiskReadBytes)
|
||||||
|
assert.Equal(t, uint64(15000), fs.DiskWriteBytes)
|
||||||
|
assert.Equal(t, uint64(16000), fs.MaxDiskReadBytes)
|
||||||
|
assert.Equal(t, uint64(21000), fs.MaxDiskWriteBytes)
|
||||||
|
assert.Equal(t, [6]float64{30.0, 40.0, 50.0, 10.0, 13.0, 17.0}, fs.DiskIoStats)
|
||||||
|
assert.Equal(t, [6]float64{55.0, 65.0, 75.0, 16.0, 19.0, 23.0}, fs.MaxDiskIoStats)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that ExtraFs DiskIoStats peak considers current values, not just previous peaks.
|
||||||
|
func TestAverageSystemStatsSlice_ExtraFsDiskIoStatsPeakFromCurrentValues(t *testing.T) {
|
||||||
|
input := []system.Stats{
|
||||||
|
{
|
||||||
|
Cpu: 10.0,
|
||||||
|
ExtraFs: map[string]*system.FsStats{
|
||||||
|
"/data": {
|
||||||
|
DiskIoStats: [6]float64{95.0, 90.0, 85.0, 50.0, 60.0, 80.0}, // exceeds MaxDiskIoStats
|
||||||
|
MaxDiskIoStats: [6]float64{80.0, 80.0, 80.0, 40.0, 50.0, 70.0},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Cpu: 20.0,
|
||||||
|
ExtraFs: map[string]*system.FsStats{
|
||||||
|
"/data": {
|
||||||
|
DiskIoStats: [6]float64{10.0, 10.0, 10.0, 5.0, 6.0, 8.0},
|
||||||
|
MaxDiskIoStats: [6]float64{20.0, 20.0, 20.0, 10.0, 12.0, 16.0},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
fs := result.ExtraFs["/data"]
|
||||||
|
assert.Equal(t, 95.0, fs.MaxDiskIoStats[0])
|
||||||
|
assert.Equal(t, 90.0, fs.MaxDiskIoStats[1])
|
||||||
|
assert.Equal(t, 85.0, fs.MaxDiskIoStats[2])
|
||||||
|
assert.Equal(t, 50.0, fs.MaxDiskIoStats[3])
|
||||||
|
assert.Equal(t, 60.0, fs.MaxDiskIoStats[4])
|
||||||
|
assert.Equal(t, 80.0, fs.MaxDiskIoStats[5])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that extra FS peak values consider current values, not just previous peaks.
|
||||||
|
func TestAverageSystemStatsSlice_ExtraFsPeaksFromCurrentValues(t *testing.T) {
|
||||||
|
input := []system.Stats{
|
||||||
|
{
|
||||||
|
Cpu: 10.0,
|
||||||
|
ExtraFs: map[string]*system.FsStats{
|
||||||
|
"/data": {
|
||||||
|
DiskReadPs: 500.0, // exceeds MaxDiskReadPS
|
||||||
|
MaxDiskReadPS: 100.0,
|
||||||
|
DiskReadBytes: 50000,
|
||||||
|
MaxDiskReadBytes: 10000,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Cpu: 20.0,
|
||||||
|
ExtraFs: map[string]*system.FsStats{
|
||||||
|
"/data": {
|
||||||
|
DiskReadPs: 50.0,
|
||||||
|
MaxDiskReadPS: 200.0,
|
||||||
|
DiskReadBytes: 5000,
|
||||||
|
MaxDiskReadBytes: 20000,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
fs := result.ExtraFs["/data"]
|
||||||
|
assert.Equal(t, 500.0, fs.MaxDiskReadPS)
|
||||||
|
assert.Equal(t, uint64(50000), fs.MaxDiskReadBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAverageSystemStatsSlice_GPUData(t *testing.T) {
|
||||||
|
input := []system.Stats{
|
||||||
|
{
|
||||||
|
Cpu: 10.0,
|
||||||
|
GPUData: map[string]system.GPUData{
|
||||||
|
"gpu0": {
|
||||||
|
Name: "RTX 4090",
|
||||||
|
Temperature: 60.0,
|
||||||
|
MemoryUsed: 4.0,
|
||||||
|
MemoryTotal: 24.0,
|
||||||
|
Usage: 30.0,
|
||||||
|
Power: 200.0,
|
||||||
|
Count: 1.0,
|
||||||
|
Engines: map[string]float64{
|
||||||
|
"3D": 50.0,
|
||||||
|
"Video": 10.0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Cpu: 20.0,
|
||||||
|
GPUData: map[string]system.GPUData{
|
||||||
|
"gpu0": {
|
||||||
|
Name: "RTX 4090",
|
||||||
|
Temperature: 80.0,
|
||||||
|
MemoryUsed: 8.0,
|
||||||
|
MemoryTotal: 24.0,
|
||||||
|
Usage: 70.0,
|
||||||
|
Power: 300.0,
|
||||||
|
Count: 1.0,
|
||||||
|
Engines: map[string]float64{
|
||||||
|
"3D": 90.0,
|
||||||
|
"Video": 30.0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
require.NotNil(t, result.GPUData)
|
||||||
|
gpu := result.GPUData["gpu0"]
|
||||||
|
assert.Equal(t, "RTX 4090", gpu.Name)
|
||||||
|
assert.Equal(t, 70.0, gpu.Temperature)
|
||||||
|
assert.Equal(t, 6.0, gpu.MemoryUsed)
|
||||||
|
assert.Equal(t, 24.0, gpu.MemoryTotal)
|
||||||
|
assert.Equal(t, 50.0, gpu.Usage)
|
||||||
|
assert.Equal(t, 250.0, gpu.Power)
|
||||||
|
assert.Equal(t, 1.0, gpu.Count)
|
||||||
|
require.NotNil(t, gpu.Engines)
|
||||||
|
assert.Equal(t, 70.0, gpu.Engines["3D"])
|
||||||
|
assert.Equal(t, 20.0, gpu.Engines["Video"])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAverageSystemStatsSlice_MultipleGPUs(t *testing.T) {
|
||||||
|
input := []system.Stats{
|
||||||
|
{
|
||||||
|
Cpu: 10.0,
|
||||||
|
GPUData: map[string]system.GPUData{
|
||||||
|
"gpu0": {Name: "GPU A", Usage: 20.0, Temperature: 50.0},
|
||||||
|
"gpu1": {Name: "GPU B", Usage: 60.0, Temperature: 70.0},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Cpu: 20.0,
|
||||||
|
GPUData: map[string]system.GPUData{
|
||||||
|
"gpu0": {Name: "GPU A", Usage: 40.0, Temperature: 60.0},
|
||||||
|
"gpu1": {Name: "GPU B", Usage: 80.0, Temperature: 80.0},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
require.NotNil(t, result.GPUData)
|
||||||
|
assert.Equal(t, 30.0, result.GPUData["gpu0"].Usage)
|
||||||
|
assert.Equal(t, 55.0, result.GPUData["gpu0"].Temperature)
|
||||||
|
assert.Equal(t, 70.0, result.GPUData["gpu1"].Usage)
|
||||||
|
assert.Equal(t, 75.0, result.GPUData["gpu1"].Temperature)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAverageSystemStatsSlice_CpuCoresUsage(t *testing.T) {
|
||||||
|
input := []system.Stats{
|
||||||
|
{Cpu: 10.0, CpuCoresUsage: system.Uint8Slice{10, 20, 30, 40}},
|
||||||
|
{Cpu: 20.0, CpuCoresUsage: system.Uint8Slice{30, 40, 50, 60}},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
require.NotNil(t, result.CpuCoresUsage)
|
||||||
|
assert.Equal(t, system.Uint8Slice{20, 30, 40, 50}, result.CpuCoresUsage)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that per-core usage rounds correctly (e.g., 15.5 -> 16 via math.Round).
|
||||||
|
func TestAverageSystemStatsSlice_CpuCoresUsageRounding(t *testing.T) {
|
||||||
|
input := []system.Stats{
|
||||||
|
{Cpu: 10.0, CpuCoresUsage: system.Uint8Slice{11}},
|
||||||
|
{Cpu: 20.0, CpuCoresUsage: system.Uint8Slice{20}},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
require.NotNil(t, result.CpuCoresUsage)
|
||||||
|
// (11+20)/2 = 15.5, rounds to 16
|
||||||
|
assert.Equal(t, uint8(16), result.CpuCoresUsage[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAverageSystemStatsSlice_CpuBreakdown(t *testing.T) {
|
||||||
|
input := []system.Stats{
|
||||||
|
{Cpu: 10.0, CpuBreakdown: []float64{5.0, 3.0, 1.0, 0.5, 90.5}},
|
||||||
|
{Cpu: 20.0, CpuBreakdown: []float64{15.0, 7.0, 3.0, 1.5, 73.5}},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
require.NotNil(t, result.CpuBreakdown)
|
||||||
|
assert.Equal(t, []float64{10.0, 5.0, 2.0, 1.0, 82.0}, result.CpuBreakdown)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that Battery[1] (charge state) uses the last record's value.
|
||||||
|
func TestAverageSystemStatsSlice_BatteryLastChargeState(t *testing.T) {
|
||||||
|
input := []system.Stats{
|
||||||
|
{Cpu: 10.0, Battery: [2]uint8{100, 1}}, // charging
|
||||||
|
{Cpu: 20.0, Battery: [2]uint8{90, 0}}, // not charging
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
assert.Equal(t, uint8(95), result.Battery[0])
|
||||||
|
assert.Equal(t, uint8(0), result.Battery[1]) // last record's charge state
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAverageSystemStatsSlice_ThreeRecordsRounding(t *testing.T) {
|
||||||
|
input := []system.Stats{
|
||||||
|
{Cpu: 10.0, Mem: 8.0},
|
||||||
|
{Cpu: 20.0, Mem: 8.0},
|
||||||
|
{Cpu: 30.0, Mem: 8.0},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
assert.Equal(t, 20.0, result.Cpu)
|
||||||
|
assert.Equal(t, 8.0, result.Mem)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests records where some have optional fields and others don't.
|
||||||
|
func TestAverageSystemStatsSlice_MixedOptionalFields(t *testing.T) {
|
||||||
|
input := []system.Stats{
|
||||||
|
{
|
||||||
|
Cpu: 10.0,
|
||||||
|
CpuCoresUsage: system.Uint8Slice{50, 60},
|
||||||
|
CpuBreakdown: []float64{5.0, 3.0, 1.0, 0.5, 90.5},
|
||||||
|
GPUData: map[string]system.GPUData{
|
||||||
|
"gpu0": {Name: "GPU", Usage: 40.0},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Cpu: 20.0,
|
||||||
|
// No CpuCoresUsage, CpuBreakdown, or GPUData
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
assert.Equal(t, 15.0, result.Cpu)
|
||||||
|
// CpuCoresUsage: only 1 record had it, so sum/2
|
||||||
|
require.NotNil(t, result.CpuCoresUsage)
|
||||||
|
assert.Equal(t, uint8(25), result.CpuCoresUsage[0])
|
||||||
|
assert.Equal(t, uint8(30), result.CpuCoresUsage[1])
|
||||||
|
// CpuBreakdown: only 1 record had it, so sum/2
|
||||||
|
require.NotNil(t, result.CpuBreakdown)
|
||||||
|
assert.Equal(t, 2.5, result.CpuBreakdown[0])
|
||||||
|
// GPUData: only 1 record had it, so sum/2
|
||||||
|
require.NotNil(t, result.GPUData)
|
||||||
|
assert.Equal(t, 20.0, result.GPUData["gpu0"].Usage)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests with 10 records matching the common real-world case (10 x 1m -> 1 x 10m).
|
||||||
|
func TestAverageSystemStatsSlice_TenRecords(t *testing.T) {
|
||||||
|
input := make([]system.Stats, 10)
|
||||||
|
for i := range input {
|
||||||
|
input[i] = system.Stats{
|
||||||
|
Cpu: float64(i * 10), // 0, 10, 20, ..., 90
|
||||||
|
Mem: 16.0,
|
||||||
|
MemUsed: float64(4 + i), // 4, 5, 6, ..., 13
|
||||||
|
MemPct: float64(25 + i), // 25, 26, ..., 34
|
||||||
|
DiskTotal: 500.0,
|
||||||
|
DiskUsed: 250.0,
|
||||||
|
DiskPct: 50.0,
|
||||||
|
NetworkSent: float64(i),
|
||||||
|
NetworkRecv: float64(i * 2),
|
||||||
|
Bandwidth: [2]uint64{uint64(i * 1000), uint64(i * 2000)},
|
||||||
|
LoadAvg: [3]float64{float64(i), float64(i) * 0.5, float64(i) * 0.25},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageSystemStatsSlice(input)
|
||||||
|
|
||||||
|
assert.Equal(t, 45.0, result.Cpu) // avg of 0..90
|
||||||
|
assert.Equal(t, 16.0, result.Mem) // constant
|
||||||
|
assert.Equal(t, 8.5, result.MemUsed) // avg of 4..13
|
||||||
|
assert.Equal(t, 29.5, result.MemPct) // avg of 25..34
|
||||||
|
assert.Equal(t, 500.0, result.DiskTotal)
|
||||||
|
assert.Equal(t, 250.0, result.DiskUsed)
|
||||||
|
assert.Equal(t, 50.0, result.DiskPct)
|
||||||
|
assert.Equal(t, 4.5, result.NetworkSent)
|
||||||
|
assert.Equal(t, 9.0, result.NetworkRecv)
|
||||||
|
assert.Equal(t, [2]uint64{4500, 9000}, result.Bandwidth)
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- Container Stats Tests ---
|
||||||
|
|
||||||
|
func TestAverageContainerStatsSlice_Empty(t *testing.T) {
|
||||||
|
result := records.AverageContainerStatsSlice(nil)
|
||||||
|
assert.Equal(t, []container.Stats{}, result)
|
||||||
|
|
||||||
|
result = records.AverageContainerStatsSlice([][]container.Stats{})
|
||||||
|
assert.Equal(t, []container.Stats{}, result)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAverageContainerStatsSlice_SingleRecord(t *testing.T) {
|
||||||
|
input := [][]container.Stats{
|
||||||
|
{
|
||||||
|
{Name: "nginx", Cpu: 5.0, Mem: 128.0, Bandwidth: [2]uint64{1000, 2000}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageContainerStatsSlice(input)
|
||||||
|
|
||||||
|
require.Len(t, result, 1)
|
||||||
|
assert.Equal(t, "nginx", result[0].Name)
|
||||||
|
assert.Equal(t, 5.0, result[0].Cpu)
|
||||||
|
assert.Equal(t, 128.0, result[0].Mem)
|
||||||
|
assert.Equal(t, [2]uint64{1000, 2000}, result[0].Bandwidth)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAverageContainerStatsSlice_BasicAveraging(t *testing.T) {
|
||||||
|
input := [][]container.Stats{
|
||||||
|
{
|
||||||
|
{Name: "nginx", Cpu: 10.0, Mem: 100.0, Bandwidth: [2]uint64{1000, 2000}},
|
||||||
|
{Name: "redis", Cpu: 5.0, Mem: 64.0, Bandwidth: [2]uint64{500, 1000}},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{Name: "nginx", Cpu: 20.0, Mem: 200.0, Bandwidth: [2]uint64{3000, 4000}},
|
||||||
|
{Name: "redis", Cpu: 15.0, Mem: 128.0, Bandwidth: [2]uint64{1500, 2000}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageContainerStatsSlice(input)
|
||||||
|
sort.Slice(result, func(i, j int) bool { return result[i].Name < result[j].Name })
|
||||||
|
|
||||||
|
require.Len(t, result, 2)
|
||||||
|
|
||||||
|
assert.Equal(t, "nginx", result[0].Name)
|
||||||
|
assert.Equal(t, 15.0, result[0].Cpu)
|
||||||
|
assert.Equal(t, 150.0, result[0].Mem)
|
||||||
|
assert.Equal(t, [2]uint64{2000, 3000}, result[0].Bandwidth)
|
||||||
|
|
||||||
|
assert.Equal(t, "redis", result[1].Name)
|
||||||
|
assert.Equal(t, 10.0, result[1].Cpu)
|
||||||
|
assert.Equal(t, 96.0, result[1].Mem)
|
||||||
|
assert.Equal(t, [2]uint64{1000, 1500}, result[1].Bandwidth)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests containers that appear in some records but not all.
|
||||||
|
func TestAverageContainerStatsSlice_ContainerAppearsInSomeRecords(t *testing.T) {
|
||||||
|
input := [][]container.Stats{
|
||||||
|
{
|
||||||
|
{Name: "nginx", Cpu: 10.0, Mem: 100.0},
|
||||||
|
{Name: "redis", Cpu: 5.0, Mem: 64.0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{Name: "nginx", Cpu: 20.0, Mem: 200.0},
|
||||||
|
// redis not present
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageContainerStatsSlice(input)
|
||||||
|
sort.Slice(result, func(i, j int) bool { return result[i].Name < result[j].Name })
|
||||||
|
|
||||||
|
require.Len(t, result, 2)
|
||||||
|
|
||||||
|
assert.Equal(t, "nginx", result[0].Name)
|
||||||
|
assert.Equal(t, 15.0, result[0].Cpu)
|
||||||
|
assert.Equal(t, 150.0, result[0].Mem)
|
||||||
|
|
||||||
|
// redis: sum / count where count = total records (2), not records containing redis
|
||||||
|
assert.Equal(t, "redis", result[1].Name)
|
||||||
|
assert.Equal(t, 2.5, result[1].Cpu)
|
||||||
|
assert.Equal(t, 32.0, result[1].Mem)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests backward compatibility with deprecated NetworkSent/NetworkRecv (MB) when Bandwidth is zero.
|
||||||
|
func TestAverageContainerStatsSlice_DeprecatedNetworkFields(t *testing.T) {
|
||||||
|
input := [][]container.Stats{
|
||||||
|
{
|
||||||
|
{Name: "nginx", Cpu: 10.0, Mem: 100.0, NetworkSent: 1.0, NetworkRecv: 2.0}, // 1 MB, 2 MB
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{Name: "nginx", Cpu: 20.0, Mem: 200.0, NetworkSent: 3.0, NetworkRecv: 4.0}, // 3 MB, 4 MB
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageContainerStatsSlice(input)
|
||||||
|
|
||||||
|
require.Len(t, result, 1)
|
||||||
|
assert.Equal(t, "nginx", result[0].Name)
|
||||||
|
// avg sent = (1*1048576 + 3*1048576) / 2 = 2*1048576
|
||||||
|
assert.Equal(t, uint64(2*1048576), result[0].Bandwidth[0])
|
||||||
|
// avg recv = (2*1048576 + 4*1048576) / 2 = 3*1048576
|
||||||
|
assert.Equal(t, uint64(3*1048576), result[0].Bandwidth[1])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Tests that when Bandwidth is set, deprecated NetworkSent/NetworkRecv are ignored.
|
||||||
|
func TestAverageContainerStatsSlice_MixedBandwidthAndDeprecated(t *testing.T) {
|
||||||
|
input := [][]container.Stats{
|
||||||
|
{
|
||||||
|
{Name: "nginx", Cpu: 10.0, Mem: 100.0, Bandwidth: [2]uint64{5000, 6000}, NetworkSent: 99.0, NetworkRecv: 99.0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{Name: "nginx", Cpu: 20.0, Mem: 200.0, Bandwidth: [2]uint64{7000, 8000}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageContainerStatsSlice(input)
|
||||||
|
|
||||||
|
require.Len(t, result, 1)
|
||||||
|
assert.Equal(t, uint64(6000), result[0].Bandwidth[0])
|
||||||
|
assert.Equal(t, uint64(7000), result[0].Bandwidth[1])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAverageContainerStatsSlice_ThreeRecords(t *testing.T) {
|
||||||
|
input := [][]container.Stats{
|
||||||
|
{{Name: "app", Cpu: 1.0, Mem: 100.0}},
|
||||||
|
{{Name: "app", Cpu: 2.0, Mem: 200.0}},
|
||||||
|
{{Name: "app", Cpu: 3.0, Mem: 300.0}},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageContainerStatsSlice(input)
|
||||||
|
|
||||||
|
require.Len(t, result, 1)
|
||||||
|
assert.Equal(t, 2.0, result[0].Cpu)
|
||||||
|
assert.Equal(t, 200.0, result[0].Mem)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAverageContainerStatsSlice_ManyContainers(t *testing.T) {
|
||||||
|
input := [][]container.Stats{
|
||||||
|
{
|
||||||
|
{Name: "a", Cpu: 10.0, Mem: 100.0},
|
||||||
|
{Name: "b", Cpu: 20.0, Mem: 200.0},
|
||||||
|
{Name: "c", Cpu: 30.0, Mem: 300.0},
|
||||||
|
{Name: "d", Cpu: 40.0, Mem: 400.0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{Name: "a", Cpu: 20.0, Mem: 200.0},
|
||||||
|
{Name: "b", Cpu: 30.0, Mem: 300.0},
|
||||||
|
{Name: "c", Cpu: 40.0, Mem: 400.0},
|
||||||
|
{Name: "d", Cpu: 50.0, Mem: 500.0},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := records.AverageContainerStatsSlice(input)
|
||||||
|
sort.Slice(result, func(i, j int) bool { return result[i].Name < result[j].Name })
|
||||||
|
|
||||||
|
require.Len(t, result, 4)
|
||||||
|
assert.Equal(t, 15.0, result[0].Cpu)
|
||||||
|
assert.Equal(t, 25.0, result[1].Cpu)
|
||||||
|
assert.Equal(t, 35.0, result[2].Cpu)
|
||||||
|
assert.Equal(t, 45.0, result[3].Cpu)
|
||||||
|
}
|
||||||
138
internal/records/records_deletion.go
Normal file
138
internal/records/records_deletion.go
Normal file
@@ -0,0 +1,138 @@
|
|||||||
|
package records
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/pocketbase/dbx"
|
||||||
|
"github.com/pocketbase/pocketbase/core"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Delete old records
|
||||||
|
func (rm *RecordManager) DeleteOldRecords() {
|
||||||
|
rm.app.RunInTransaction(func(txApp core.App) error {
|
||||||
|
err := deleteOldSystemStats(txApp)
|
||||||
|
if err != nil {
|
||||||
|
slog.Error("Error deleting old system stats", "err", err)
|
||||||
|
}
|
||||||
|
err = deleteOldContainerRecords(txApp)
|
||||||
|
if err != nil {
|
||||||
|
slog.Error("Error deleting old container records", "err", err)
|
||||||
|
}
|
||||||
|
err = deleteOldSystemdServiceRecords(txApp)
|
||||||
|
if err != nil {
|
||||||
|
slog.Error("Error deleting old systemd service records", "err", err)
|
||||||
|
}
|
||||||
|
err = deleteOldAlertsHistory(txApp, 200, 250)
|
||||||
|
if err != nil {
|
||||||
|
slog.Error("Error deleting old alerts history", "err", err)
|
||||||
|
}
|
||||||
|
err = deleteOldQuietHours(txApp)
|
||||||
|
if err != nil {
|
||||||
|
slog.Error("Error deleting old quiet hours", "err", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete old alerts history records
|
||||||
|
func deleteOldAlertsHistory(app core.App, countToKeep, countBeforeDeletion int) error {
|
||||||
|
db := app.DB()
|
||||||
|
var users []struct {
|
||||||
|
Id string `db:"user"`
|
||||||
|
}
|
||||||
|
err := db.NewQuery("SELECT user, COUNT(*) as count FROM alerts_history GROUP BY user HAVING count > {:countBeforeDeletion}").Bind(dbx.Params{"countBeforeDeletion": countBeforeDeletion}).All(&users)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, user := range users {
|
||||||
|
_, err = db.NewQuery("DELETE FROM alerts_history WHERE user = {:user} AND id NOT IN (SELECT id FROM alerts_history WHERE user = {:user} ORDER BY created DESC LIMIT {:countToKeep})").Bind(dbx.Params{"user": user.Id, "countToKeep": countToKeep}).Execute()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deletes system_stats records older than what is displayed in the UI
|
||||||
|
func deleteOldSystemStats(app core.App) error {
|
||||||
|
// Collections to process
|
||||||
|
collections := [3]string{"system_stats", "container_stats", "network_probe_stats"}
|
||||||
|
|
||||||
|
// Record types and their retention periods
|
||||||
|
type RecordDeletionData struct {
|
||||||
|
recordType string
|
||||||
|
retention time.Duration
|
||||||
|
}
|
||||||
|
recordData := []RecordDeletionData{
|
||||||
|
{recordType: "1m", retention: time.Hour}, // 1 hour
|
||||||
|
{recordType: "10m", retention: 12 * time.Hour}, // 12 hours
|
||||||
|
{recordType: "20m", retention: 24 * time.Hour}, // 1 day
|
||||||
|
{recordType: "120m", retention: 7 * 24 * time.Hour}, // 7 days
|
||||||
|
{recordType: "480m", retention: 30 * 24 * time.Hour}, // 30 days
|
||||||
|
}
|
||||||
|
|
||||||
|
now := time.Now().UTC()
|
||||||
|
|
||||||
|
for _, collection := range collections {
|
||||||
|
// Build the WHERE clause
|
||||||
|
var conditionParts []string
|
||||||
|
var params dbx.Params = make(map[string]any)
|
||||||
|
for i := range recordData {
|
||||||
|
rd := recordData[i]
|
||||||
|
// Create parameterized condition for this record type
|
||||||
|
dateParam := fmt.Sprintf("date%d", i)
|
||||||
|
conditionParts = append(conditionParts, fmt.Sprintf("(type = '%s' AND created < {:%s})", rd.recordType, dateParam))
|
||||||
|
params[dateParam] = now.Add(-rd.retention)
|
||||||
|
}
|
||||||
|
// Combine conditions with OR
|
||||||
|
conditionStr := strings.Join(conditionParts, " OR ")
|
||||||
|
// Construct and execute the full raw query
|
||||||
|
rawQuery := fmt.Sprintf("DELETE FROM %s WHERE %s", collection, conditionStr)
|
||||||
|
if _, err := app.DB().NewQuery(rawQuery).Bind(params).Execute(); err != nil {
|
||||||
|
return fmt.Errorf("failed to delete from %s: %v", collection, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deletes systemd service records that haven't been updated in the last 20 minutes
|
||||||
|
func deleteOldSystemdServiceRecords(app core.App) error {
|
||||||
|
now := time.Now().UTC()
|
||||||
|
twentyMinutesAgo := now.Add(-20 * time.Minute)
|
||||||
|
|
||||||
|
// Delete systemd service records where updated < twentyMinutesAgo
|
||||||
|
_, err := app.DB().NewQuery("DELETE FROM systemd_services WHERE updated < {:updated}").Bind(dbx.Params{"updated": twentyMinutesAgo.UnixMilli()}).Execute()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to delete old systemd service records: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deletes container records that haven't been updated in the last 10 minutes
|
||||||
|
func deleteOldContainerRecords(app core.App) error {
|
||||||
|
now := time.Now().UTC()
|
||||||
|
tenMinutesAgo := now.Add(-10 * time.Minute)
|
||||||
|
|
||||||
|
// Delete container records where updated < tenMinutesAgo
|
||||||
|
_, err := app.DB().NewQuery("DELETE FROM containers WHERE updated < {:updated}").Bind(dbx.Params{"updated": tenMinutesAgo.UnixMilli()}).Execute()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to delete old container records: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deletes old quiet hours records where end date has passed
|
||||||
|
func deleteOldQuietHours(app core.App) error {
|
||||||
|
now := time.Now().UTC()
|
||||||
|
_, err := app.DB().NewQuery("DELETE FROM quiet_hours WHERE type = 'one-time' AND end < {:now}").Bind(dbx.Params{"now": now}).Execute()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
428
internal/records/records_deletion_test.go
Normal file
428
internal/records/records_deletion_test.go
Normal file
@@ -0,0 +1,428 @@
|
|||||||
|
//go:build testing
|
||||||
|
|
||||||
|
package records_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/records"
|
||||||
|
"github.com/henrygd/beszel/internal/tests"
|
||||||
|
|
||||||
|
"github.com/pocketbase/dbx"
|
||||||
|
"github.com/pocketbase/pocketbase/core"
|
||||||
|
"github.com/pocketbase/pocketbase/tools/types"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestDeleteOldRecords tests the main DeleteOldRecords function
|
||||||
|
func TestDeleteOldRecords(t *testing.T) {
|
||||||
|
hub, err := tests.NewTestHub(t.TempDir())
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
rm := records.NewRecordManager(hub)
|
||||||
|
|
||||||
|
// Create test user for alerts history
|
||||||
|
user, err := tests.CreateUser(hub, "test@example.com", "testtesttest")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Create test system
|
||||||
|
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||||
|
"name": "test-system",
|
||||||
|
"host": "localhost",
|
||||||
|
"port": "45876",
|
||||||
|
"status": "up",
|
||||||
|
"users": []string{user.Id},
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
// Create old system_stats records that should be deleted
|
||||||
|
var record *core.Record
|
||||||
|
record, err = tests.CreateRecord(hub, "system_stats", map[string]any{
|
||||||
|
"system": system.Id,
|
||||||
|
"type": "1m",
|
||||||
|
"stats": `{"cpu": 50.0, "mem": 1024}`,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
// created is autodate field, so we need to set it manually
|
||||||
|
record.SetRaw("created", now.UTC().Add(-2*time.Hour).Format(types.DefaultDateLayout))
|
||||||
|
err = hub.SaveNoValidate(record)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, record)
|
||||||
|
require.InDelta(t, record.GetDateTime("created").Time().UTC().Unix(), now.UTC().Add(-2*time.Hour).Unix(), 1)
|
||||||
|
require.Equal(t, record.Get("system"), system.Id)
|
||||||
|
require.Equal(t, record.Get("type"), "1m")
|
||||||
|
|
||||||
|
// Create recent system_stats record that should be kept
|
||||||
|
_, err = tests.CreateRecord(hub, "system_stats", map[string]any{
|
||||||
|
"system": system.Id,
|
||||||
|
"type": "1m",
|
||||||
|
"stats": `{"cpu": 30.0, "mem": 512}`,
|
||||||
|
"created": now.Add(-30 * time.Minute), // 30 minutes old, should be kept
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Create many alerts history records to trigger deletion
|
||||||
|
for i := range 260 { // More than countBeforeDeletion (250)
|
||||||
|
_, err = tests.CreateRecord(hub, "alerts_history", map[string]any{
|
||||||
|
"user": user.Id,
|
||||||
|
"name": "CPU",
|
||||||
|
"value": i + 1,
|
||||||
|
"system": system.Id,
|
||||||
|
"created": now.Add(-time.Duration(i) * time.Minute),
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count records before deletion
|
||||||
|
systemStatsCountBefore, err := hub.CountRecords("system_stats")
|
||||||
|
require.NoError(t, err)
|
||||||
|
alertsCountBefore, err := hub.CountRecords("alerts_history")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Run deletion
|
||||||
|
rm.DeleteOldRecords()
|
||||||
|
|
||||||
|
// Count records after deletion
|
||||||
|
systemStatsCountAfter, err := hub.CountRecords("system_stats")
|
||||||
|
require.NoError(t, err)
|
||||||
|
alertsCountAfter, err := hub.CountRecords("alerts_history")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Verify old system stats were deleted
|
||||||
|
assert.Less(t, systemStatsCountAfter, systemStatsCountBefore, "Old system stats should be deleted")
|
||||||
|
|
||||||
|
// Verify alerts history was trimmed
|
||||||
|
assert.Less(t, alertsCountAfter, alertsCountBefore, "Excessive alerts history should be deleted")
|
||||||
|
assert.Equal(t, alertsCountAfter, int64(200), "Alerts count should be equal to countToKeep (200)")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestDeleteOldSystemStats tests the deleteOldSystemStats function
|
||||||
|
func TestDeleteOldSystemStats(t *testing.T) {
|
||||||
|
hub, err := tests.NewTestHub(t.TempDir())
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
// Create test system
|
||||||
|
user, err := tests.CreateUser(hub, "test@example.com", "testtesttest")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||||
|
"name": "test-system",
|
||||||
|
"host": "localhost",
|
||||||
|
"port": "45876",
|
||||||
|
"status": "up",
|
||||||
|
"users": []string{user.Id},
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
now := time.Now().UTC()
|
||||||
|
|
||||||
|
// Test data for different record types and their retention periods
|
||||||
|
testCases := []struct {
|
||||||
|
recordType string
|
||||||
|
retention time.Duration
|
||||||
|
shouldBeKept bool
|
||||||
|
ageFromNow time.Duration
|
||||||
|
description string
|
||||||
|
}{
|
||||||
|
{"1m", time.Hour, true, 30 * time.Minute, "1m record within 1 hour should be kept"},
|
||||||
|
{"1m", time.Hour, false, 2 * time.Hour, "1m record older than 1 hour should be deleted"},
|
||||||
|
{"10m", 12 * time.Hour, true, 6 * time.Hour, "10m record within 12 hours should be kept"},
|
||||||
|
{"10m", 12 * time.Hour, false, 24 * time.Hour, "10m record older than 12 hours should be deleted"},
|
||||||
|
{"20m", 24 * time.Hour, true, 12 * time.Hour, "20m record within 24 hours should be kept"},
|
||||||
|
{"20m", 24 * time.Hour, false, 48 * time.Hour, "20m record older than 24 hours should be deleted"},
|
||||||
|
{"120m", 7 * 24 * time.Hour, true, 3 * 24 * time.Hour, "120m record within 7 days should be kept"},
|
||||||
|
{"120m", 7 * 24 * time.Hour, false, 10 * 24 * time.Hour, "120m record older than 7 days should be deleted"},
|
||||||
|
{"480m", 30 * 24 * time.Hour, true, 15 * 24 * time.Hour, "480m record within 30 days should be kept"},
|
||||||
|
{"480m", 30 * 24 * time.Hour, false, 45 * 24 * time.Hour, "480m record older than 30 days should be deleted"},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create test records for both system_stats and container_stats
|
||||||
|
collections := []string{"system_stats", "container_stats"}
|
||||||
|
recordIds := make(map[string][]string)
|
||||||
|
|
||||||
|
for _, collection := range collections {
|
||||||
|
recordIds[collection] = make([]string, 0)
|
||||||
|
|
||||||
|
for i, tc := range testCases {
|
||||||
|
recordTime := now.Add(-tc.ageFromNow)
|
||||||
|
|
||||||
|
var stats string
|
||||||
|
if collection == "system_stats" {
|
||||||
|
stats = fmt.Sprintf(`{"cpu": %d.0, "mem": %d}`, i*10, i*100)
|
||||||
|
} else {
|
||||||
|
stats = fmt.Sprintf(`[{"name": "container%d", "cpu": %d.0, "mem": %d}]`, i, i*5, i*50)
|
||||||
|
}
|
||||||
|
|
||||||
|
record, err := tests.CreateRecord(hub, collection, map[string]any{
|
||||||
|
"system": system.Id,
|
||||||
|
"type": tc.recordType,
|
||||||
|
"stats": stats,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
record.SetRaw("created", recordTime.Format(types.DefaultDateLayout))
|
||||||
|
err = hub.SaveNoValidate(record)
|
||||||
|
require.NoError(t, err)
|
||||||
|
recordIds[collection] = append(recordIds[collection], record.Id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run deletion
|
||||||
|
err = records.DeleteOldSystemStats(hub)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Verify results
|
||||||
|
for _, collection := range collections {
|
||||||
|
for i, tc := range testCases {
|
||||||
|
recordId := recordIds[collection][i]
|
||||||
|
|
||||||
|
// Try to find the record
|
||||||
|
_, err := hub.FindRecordById(collection, recordId)
|
||||||
|
|
||||||
|
if tc.shouldBeKept {
|
||||||
|
assert.NoError(t, err, "Record should exist: %s", tc.description)
|
||||||
|
} else {
|
||||||
|
assert.Error(t, err, "Record should be deleted: %s", tc.description)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestDeleteOldAlertsHistory tests the deleteOldAlertsHistory function
|
||||||
|
func TestDeleteOldAlertsHistory(t *testing.T) {
|
||||||
|
hub, err := tests.NewTestHub(t.TempDir())
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
// Create test users
|
||||||
|
user1, err := tests.CreateUser(hub, "user1@example.com", "testtesttest")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
user2, err := tests.CreateUser(hub, "user2@example.com", "testtesttest")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||||
|
"name": "test-system",
|
||||||
|
"host": "localhost",
|
||||||
|
"port": "45876",
|
||||||
|
"status": "up",
|
||||||
|
"users": []string{user1.Id, user2.Id},
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
now := time.Now().UTC()
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
user *core.Record
|
||||||
|
alertCount int
|
||||||
|
countToKeep int
|
||||||
|
countBeforeDeletion int
|
||||||
|
expectedAfterDeletion int
|
||||||
|
description string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "User with few alerts (below threshold)",
|
||||||
|
user: user1,
|
||||||
|
alertCount: 100,
|
||||||
|
countToKeep: 50,
|
||||||
|
countBeforeDeletion: 150,
|
||||||
|
expectedAfterDeletion: 100, // No deletion because below threshold
|
||||||
|
description: "User with alerts below countBeforeDeletion should not have any deleted",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "User with many alerts (above threshold)",
|
||||||
|
user: user2,
|
||||||
|
alertCount: 300,
|
||||||
|
countToKeep: 100,
|
||||||
|
countBeforeDeletion: 200,
|
||||||
|
expectedAfterDeletion: 100, // Should be trimmed to countToKeep
|
||||||
|
description: "User with alerts above countBeforeDeletion should be trimmed to countToKeep",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
// Create alerts for this user
|
||||||
|
for i := 0; i < tc.alertCount; i++ {
|
||||||
|
_, err := tests.CreateRecord(hub, "alerts_history", map[string]any{
|
||||||
|
"user": tc.user.Id,
|
||||||
|
"name": "CPU",
|
||||||
|
"value": i + 1,
|
||||||
|
"system": system.Id,
|
||||||
|
"created": now.Add(-time.Duration(i) * time.Minute),
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count before deletion
|
||||||
|
countBefore, err := hub.CountRecords("alerts_history",
|
||||||
|
dbx.NewExp("user = {:user}", dbx.Params{"user": tc.user.Id}))
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, int64(tc.alertCount), countBefore, "Initial count should match")
|
||||||
|
|
||||||
|
// Run deletion
|
||||||
|
err = records.DeleteOldAlertsHistory(hub, tc.countToKeep, tc.countBeforeDeletion)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Count after deletion
|
||||||
|
countAfter, err := hub.CountRecords("alerts_history",
|
||||||
|
dbx.NewExp("user = {:user}", dbx.Params{"user": tc.user.Id}))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Equal(t, int64(tc.expectedAfterDeletion), countAfter, tc.description)
|
||||||
|
|
||||||
|
// If deletion occurred, verify the most recent records were kept
|
||||||
|
if tc.expectedAfterDeletion < tc.alertCount {
|
||||||
|
records, err := hub.FindRecordsByFilter("alerts_history",
|
||||||
|
"user = {:user}",
|
||||||
|
"-created", // Order by created DESC
|
||||||
|
tc.countToKeep,
|
||||||
|
0,
|
||||||
|
map[string]any{"user": tc.user.Id})
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Len(t, records, tc.expectedAfterDeletion, "Should have exactly countToKeep records")
|
||||||
|
|
||||||
|
// Verify records are in descending order by created time
|
||||||
|
for i := 1; i < len(records); i++ {
|
||||||
|
prev := records[i-1].GetDateTime("created").Time()
|
||||||
|
curr := records[i].GetDateTime("created").Time()
|
||||||
|
assert.True(t, prev.After(curr) || prev.Equal(curr),
|
||||||
|
"Records should be ordered by created time (newest first)")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestDeleteOldAlertsHistoryEdgeCases tests edge cases for alerts history deletion
|
||||||
|
func TestDeleteOldAlertsHistoryEdgeCases(t *testing.T) {
|
||||||
|
hub, err := tests.NewTestHub(t.TempDir())
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
t.Run("No users with excessive alerts", func(t *testing.T) {
|
||||||
|
// Create user with few alerts
|
||||||
|
user, err := tests.CreateUser(hub, "few@example.com", "testtesttest")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||||
|
"name": "test-system",
|
||||||
|
"host": "localhost",
|
||||||
|
"port": "45876",
|
||||||
|
"status": "up",
|
||||||
|
"users": []string{user.Id},
|
||||||
|
})
|
||||||
|
|
||||||
|
// Create only 5 alerts (well below threshold)
|
||||||
|
for i := range 5 {
|
||||||
|
_, err := tests.CreateRecord(hub, "alerts_history", map[string]any{
|
||||||
|
"user": user.Id,
|
||||||
|
"name": "CPU",
|
||||||
|
"value": i + 1,
|
||||||
|
"system": system.Id,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should not error and should not delete anything
|
||||||
|
err = records.DeleteOldAlertsHistory(hub, 10, 20)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
count, err := hub.CountRecords("alerts_history")
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, int64(5), count, "All alerts should remain")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Empty alerts_history table", func(t *testing.T) {
|
||||||
|
// Clear any existing alerts
|
||||||
|
_, err := hub.DB().NewQuery("DELETE FROM alerts_history").Execute()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Should not error with empty table
|
||||||
|
err = records.DeleteOldAlertsHistory(hub, 10, 20)
|
||||||
|
require.NoError(t, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestDeleteOldSystemdServiceRecords tests systemd service cleanup via DeleteOldRecords
|
||||||
|
func TestDeleteOldSystemdServiceRecords(t *testing.T) {
|
||||||
|
hub, err := tests.NewTestHub(t.TempDir())
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
rm := records.NewRecordManager(hub)
|
||||||
|
|
||||||
|
// Create test user and system
|
||||||
|
user, err := tests.CreateUser(hub, "test@example.com", "testtesttest")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||||
|
"name": "test-system",
|
||||||
|
"host": "localhost",
|
||||||
|
"port": "45876",
|
||||||
|
"status": "up",
|
||||||
|
"users": []string{user.Id},
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
now := time.Now().UTC()
|
||||||
|
|
||||||
|
// Create old systemd service records that should be deleted (older than 20 minutes)
|
||||||
|
oldRecord, err := tests.CreateRecord(hub, "systemd_services", map[string]any{
|
||||||
|
"system": system.Id,
|
||||||
|
"name": "nginx.service",
|
||||||
|
"state": 0, // Active
|
||||||
|
"sub": 1, // Running
|
||||||
|
"cpu": 5.0,
|
||||||
|
"cpuPeak": 10.0,
|
||||||
|
"memory": 1024000,
|
||||||
|
"memPeak": 2048000,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
// Set updated time to 25 minutes ago (should be deleted)
|
||||||
|
oldRecord.SetRaw("updated", now.Add(-25*time.Minute).UnixMilli())
|
||||||
|
err = hub.SaveNoValidate(oldRecord)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Create recent systemd service record that should be kept (within 20 minutes)
|
||||||
|
recentRecord, err := tests.CreateRecord(hub, "systemd_services", map[string]any{
|
||||||
|
"system": system.Id,
|
||||||
|
"name": "apache.service",
|
||||||
|
"state": 1, // Inactive
|
||||||
|
"sub": 0, // Dead
|
||||||
|
"cpu": 2.0,
|
||||||
|
"cpuPeak": 3.0,
|
||||||
|
"memory": 512000,
|
||||||
|
"memPeak": 1024000,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
// Set updated time to 10 minutes ago (should be kept)
|
||||||
|
recentRecord.SetRaw("updated", now.Add(-10*time.Minute).UnixMilli())
|
||||||
|
err = hub.SaveNoValidate(recentRecord)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Count records before deletion
|
||||||
|
countBefore, err := hub.CountRecords("systemd_services")
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, int64(2), countBefore, "Should have 2 systemd service records initially")
|
||||||
|
|
||||||
|
// Run deletion via RecordManager
|
||||||
|
rm.DeleteOldRecords()
|
||||||
|
|
||||||
|
// Count records after deletion
|
||||||
|
countAfter, err := hub.CountRecords("systemd_services")
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, int64(1), countAfter, "Should have 1 systemd service record after deletion")
|
||||||
|
|
||||||
|
// Verify the correct record was kept
|
||||||
|
remainingRecords, err := hub.FindRecordsByFilter("systemd_services", "", "", 10, 0, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Len(t, remainingRecords, 1, "Should have exactly 1 record remaining")
|
||||||
|
assert.Equal(t, "apache.service", remainingRecords[0].Get("name"), "The recent record should be kept")
|
||||||
|
}
|
||||||
@@ -3,430 +3,15 @@
|
|||||||
package records_test
|
package records_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/henrygd/beszel/internal/records"
|
"github.com/henrygd/beszel/internal/records"
|
||||||
"github.com/henrygd/beszel/internal/tests"
|
"github.com/henrygd/beszel/internal/tests"
|
||||||
|
|
||||||
"github.com/pocketbase/dbx"
|
|
||||||
"github.com/pocketbase/pocketbase/core"
|
|
||||||
"github.com/pocketbase/pocketbase/tools/types"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestDeleteOldRecords tests the main DeleteOldRecords function
|
|
||||||
func TestDeleteOldRecords(t *testing.T) {
|
|
||||||
hub, err := tests.NewTestHub(t.TempDir())
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer hub.Cleanup()
|
|
||||||
|
|
||||||
rm := records.NewRecordManager(hub)
|
|
||||||
|
|
||||||
// Create test user for alerts history
|
|
||||||
user, err := tests.CreateUser(hub, "test@example.com", "testtesttest")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Create test system
|
|
||||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
|
||||||
"name": "test-system",
|
|
||||||
"host": "localhost",
|
|
||||||
"port": "45876",
|
|
||||||
"status": "up",
|
|
||||||
"users": []string{user.Id},
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
now := time.Now()
|
|
||||||
|
|
||||||
// Create old system_stats records that should be deleted
|
|
||||||
var record *core.Record
|
|
||||||
record, err = tests.CreateRecord(hub, "system_stats", map[string]any{
|
|
||||||
"system": system.Id,
|
|
||||||
"type": "1m",
|
|
||||||
"stats": `{"cpu": 50.0, "mem": 1024}`,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
// created is autodate field, so we need to set it manually
|
|
||||||
record.SetRaw("created", now.UTC().Add(-2*time.Hour).Format(types.DefaultDateLayout))
|
|
||||||
err = hub.SaveNoValidate(record)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NotNil(t, record)
|
|
||||||
require.InDelta(t, record.GetDateTime("created").Time().UTC().Unix(), now.UTC().Add(-2*time.Hour).Unix(), 1)
|
|
||||||
require.Equal(t, record.Get("system"), system.Id)
|
|
||||||
require.Equal(t, record.Get("type"), "1m")
|
|
||||||
|
|
||||||
// Create recent system_stats record that should be kept
|
|
||||||
_, err = tests.CreateRecord(hub, "system_stats", map[string]any{
|
|
||||||
"system": system.Id,
|
|
||||||
"type": "1m",
|
|
||||||
"stats": `{"cpu": 30.0, "mem": 512}`,
|
|
||||||
"created": now.Add(-30 * time.Minute), // 30 minutes old, should be kept
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Create many alerts history records to trigger deletion
|
|
||||||
for i := range 260 { // More than countBeforeDeletion (250)
|
|
||||||
_, err = tests.CreateRecord(hub, "alerts_history", map[string]any{
|
|
||||||
"user": user.Id,
|
|
||||||
"name": "CPU",
|
|
||||||
"value": i + 1,
|
|
||||||
"system": system.Id,
|
|
||||||
"created": now.Add(-time.Duration(i) * time.Minute),
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Count records before deletion
|
|
||||||
systemStatsCountBefore, err := hub.CountRecords("system_stats")
|
|
||||||
require.NoError(t, err)
|
|
||||||
alertsCountBefore, err := hub.CountRecords("alerts_history")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Run deletion
|
|
||||||
rm.DeleteOldRecords()
|
|
||||||
|
|
||||||
// Count records after deletion
|
|
||||||
systemStatsCountAfter, err := hub.CountRecords("system_stats")
|
|
||||||
require.NoError(t, err)
|
|
||||||
alertsCountAfter, err := hub.CountRecords("alerts_history")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Verify old system stats were deleted
|
|
||||||
assert.Less(t, systemStatsCountAfter, systemStatsCountBefore, "Old system stats should be deleted")
|
|
||||||
|
|
||||||
// Verify alerts history was trimmed
|
|
||||||
assert.Less(t, alertsCountAfter, alertsCountBefore, "Excessive alerts history should be deleted")
|
|
||||||
assert.Equal(t, alertsCountAfter, int64(200), "Alerts count should be equal to countToKeep (200)")
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestDeleteOldSystemStats tests the deleteOldSystemStats function
|
|
||||||
func TestDeleteOldSystemStats(t *testing.T) {
|
|
||||||
hub, err := tests.NewTestHub(t.TempDir())
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer hub.Cleanup()
|
|
||||||
|
|
||||||
// Create test system
|
|
||||||
user, err := tests.CreateUser(hub, "test@example.com", "testtesttest")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
|
||||||
"name": "test-system",
|
|
||||||
"host": "localhost",
|
|
||||||
"port": "45876",
|
|
||||||
"status": "up",
|
|
||||||
"users": []string{user.Id},
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
now := time.Now().UTC()
|
|
||||||
|
|
||||||
// Test data for different record types and their retention periods
|
|
||||||
testCases := []struct {
|
|
||||||
recordType string
|
|
||||||
retention time.Duration
|
|
||||||
shouldBeKept bool
|
|
||||||
ageFromNow time.Duration
|
|
||||||
description string
|
|
||||||
}{
|
|
||||||
{"1m", time.Hour, true, 30 * time.Minute, "1m record within 1 hour should be kept"},
|
|
||||||
{"1m", time.Hour, false, 2 * time.Hour, "1m record older than 1 hour should be deleted"},
|
|
||||||
{"10m", 12 * time.Hour, true, 6 * time.Hour, "10m record within 12 hours should be kept"},
|
|
||||||
{"10m", 12 * time.Hour, false, 24 * time.Hour, "10m record older than 12 hours should be deleted"},
|
|
||||||
{"20m", 24 * time.Hour, true, 12 * time.Hour, "20m record within 24 hours should be kept"},
|
|
||||||
{"20m", 24 * time.Hour, false, 48 * time.Hour, "20m record older than 24 hours should be deleted"},
|
|
||||||
{"120m", 7 * 24 * time.Hour, true, 3 * 24 * time.Hour, "120m record within 7 days should be kept"},
|
|
||||||
{"120m", 7 * 24 * time.Hour, false, 10 * 24 * time.Hour, "120m record older than 7 days should be deleted"},
|
|
||||||
{"480m", 30 * 24 * time.Hour, true, 15 * 24 * time.Hour, "480m record within 30 days should be kept"},
|
|
||||||
{"480m", 30 * 24 * time.Hour, false, 45 * 24 * time.Hour, "480m record older than 30 days should be deleted"},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create test records for both system_stats and container_stats
|
|
||||||
collections := []string{"system_stats", "container_stats"}
|
|
||||||
recordIds := make(map[string][]string)
|
|
||||||
|
|
||||||
for _, collection := range collections {
|
|
||||||
recordIds[collection] = make([]string, 0)
|
|
||||||
|
|
||||||
for i, tc := range testCases {
|
|
||||||
recordTime := now.Add(-tc.ageFromNow)
|
|
||||||
|
|
||||||
var stats string
|
|
||||||
if collection == "system_stats" {
|
|
||||||
stats = fmt.Sprintf(`{"cpu": %d.0, "mem": %d}`, i*10, i*100)
|
|
||||||
} else {
|
|
||||||
stats = fmt.Sprintf(`[{"name": "container%d", "cpu": %d.0, "mem": %d}]`, i, i*5, i*50)
|
|
||||||
}
|
|
||||||
|
|
||||||
record, err := tests.CreateRecord(hub, collection, map[string]any{
|
|
||||||
"system": system.Id,
|
|
||||||
"type": tc.recordType,
|
|
||||||
"stats": stats,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
record.SetRaw("created", recordTime.Format(types.DefaultDateLayout))
|
|
||||||
err = hub.SaveNoValidate(record)
|
|
||||||
require.NoError(t, err)
|
|
||||||
recordIds[collection] = append(recordIds[collection], record.Id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run deletion
|
|
||||||
err = records.DeleteOldSystemStats(hub)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Verify results
|
|
||||||
for _, collection := range collections {
|
|
||||||
for i, tc := range testCases {
|
|
||||||
recordId := recordIds[collection][i]
|
|
||||||
|
|
||||||
// Try to find the record
|
|
||||||
_, err := hub.FindRecordById(collection, recordId)
|
|
||||||
|
|
||||||
if tc.shouldBeKept {
|
|
||||||
assert.NoError(t, err, "Record should exist: %s", tc.description)
|
|
||||||
} else {
|
|
||||||
assert.Error(t, err, "Record should be deleted: %s", tc.description)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestDeleteOldAlertsHistory tests the deleteOldAlertsHistory function
|
|
||||||
func TestDeleteOldAlertsHistory(t *testing.T) {
|
|
||||||
hub, err := tests.NewTestHub(t.TempDir())
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer hub.Cleanup()
|
|
||||||
|
|
||||||
// Create test users
|
|
||||||
user1, err := tests.CreateUser(hub, "user1@example.com", "testtesttest")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
user2, err := tests.CreateUser(hub, "user2@example.com", "testtesttest")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
|
||||||
"name": "test-system",
|
|
||||||
"host": "localhost",
|
|
||||||
"port": "45876",
|
|
||||||
"status": "up",
|
|
||||||
"users": []string{user1.Id, user2.Id},
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
now := time.Now().UTC()
|
|
||||||
|
|
||||||
testCases := []struct {
|
|
||||||
name string
|
|
||||||
user *core.Record
|
|
||||||
alertCount int
|
|
||||||
countToKeep int
|
|
||||||
countBeforeDeletion int
|
|
||||||
expectedAfterDeletion int
|
|
||||||
description string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "User with few alerts (below threshold)",
|
|
||||||
user: user1,
|
|
||||||
alertCount: 100,
|
|
||||||
countToKeep: 50,
|
|
||||||
countBeforeDeletion: 150,
|
|
||||||
expectedAfterDeletion: 100, // No deletion because below threshold
|
|
||||||
description: "User with alerts below countBeforeDeletion should not have any deleted",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "User with many alerts (above threshold)",
|
|
||||||
user: user2,
|
|
||||||
alertCount: 300,
|
|
||||||
countToKeep: 100,
|
|
||||||
countBeforeDeletion: 200,
|
|
||||||
expectedAfterDeletion: 100, // Should be trimmed to countToKeep
|
|
||||||
description: "User with alerts above countBeforeDeletion should be trimmed to countToKeep",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range testCases {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
// Create alerts for this user
|
|
||||||
for i := 0; i < tc.alertCount; i++ {
|
|
||||||
_, err := tests.CreateRecord(hub, "alerts_history", map[string]any{
|
|
||||||
"user": tc.user.Id,
|
|
||||||
"name": "CPU",
|
|
||||||
"value": i + 1,
|
|
||||||
"system": system.Id,
|
|
||||||
"created": now.Add(-time.Duration(i) * time.Minute),
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Count before deletion
|
|
||||||
countBefore, err := hub.CountRecords("alerts_history",
|
|
||||||
dbx.NewExp("user = {:user}", dbx.Params{"user": tc.user.Id}))
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, int64(tc.alertCount), countBefore, "Initial count should match")
|
|
||||||
|
|
||||||
// Run deletion
|
|
||||||
err = records.DeleteOldAlertsHistory(hub, tc.countToKeep, tc.countBeforeDeletion)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Count after deletion
|
|
||||||
countAfter, err := hub.CountRecords("alerts_history",
|
|
||||||
dbx.NewExp("user = {:user}", dbx.Params{"user": tc.user.Id}))
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, int64(tc.expectedAfterDeletion), countAfter, tc.description)
|
|
||||||
|
|
||||||
// If deletion occurred, verify the most recent records were kept
|
|
||||||
if tc.expectedAfterDeletion < tc.alertCount {
|
|
||||||
records, err := hub.FindRecordsByFilter("alerts_history",
|
|
||||||
"user = {:user}",
|
|
||||||
"-created", // Order by created DESC
|
|
||||||
tc.countToKeep,
|
|
||||||
0,
|
|
||||||
map[string]any{"user": tc.user.Id})
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Len(t, records, tc.expectedAfterDeletion, "Should have exactly countToKeep records")
|
|
||||||
|
|
||||||
// Verify records are in descending order by created time
|
|
||||||
for i := 1; i < len(records); i++ {
|
|
||||||
prev := records[i-1].GetDateTime("created").Time()
|
|
||||||
curr := records[i].GetDateTime("created").Time()
|
|
||||||
assert.True(t, prev.After(curr) || prev.Equal(curr),
|
|
||||||
"Records should be ordered by created time (newest first)")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestDeleteOldAlertsHistoryEdgeCases tests edge cases for alerts history deletion
|
|
||||||
func TestDeleteOldAlertsHistoryEdgeCases(t *testing.T) {
|
|
||||||
hub, err := tests.NewTestHub(t.TempDir())
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer hub.Cleanup()
|
|
||||||
|
|
||||||
t.Run("No users with excessive alerts", func(t *testing.T) {
|
|
||||||
// Create user with few alerts
|
|
||||||
user, err := tests.CreateUser(hub, "few@example.com", "testtesttest")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
|
||||||
"name": "test-system",
|
|
||||||
"host": "localhost",
|
|
||||||
"port": "45876",
|
|
||||||
"status": "up",
|
|
||||||
"users": []string{user.Id},
|
|
||||||
})
|
|
||||||
|
|
||||||
// Create only 5 alerts (well below threshold)
|
|
||||||
for i := range 5 {
|
|
||||||
_, err := tests.CreateRecord(hub, "alerts_history", map[string]any{
|
|
||||||
"user": user.Id,
|
|
||||||
"name": "CPU",
|
|
||||||
"value": i + 1,
|
|
||||||
"system": system.Id,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Should not error and should not delete anything
|
|
||||||
err = records.DeleteOldAlertsHistory(hub, 10, 20)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
count, err := hub.CountRecords("alerts_history")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, int64(5), count, "All alerts should remain")
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("Empty alerts_history table", func(t *testing.T) {
|
|
||||||
// Clear any existing alerts
|
|
||||||
_, err := hub.DB().NewQuery("DELETE FROM alerts_history").Execute()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Should not error with empty table
|
|
||||||
err = records.DeleteOldAlertsHistory(hub, 10, 20)
|
|
||||||
require.NoError(t, err)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestDeleteOldSystemdServiceRecords tests systemd service cleanup via DeleteOldRecords
|
|
||||||
func TestDeleteOldSystemdServiceRecords(t *testing.T) {
|
|
||||||
hub, err := tests.NewTestHub(t.TempDir())
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer hub.Cleanup()
|
|
||||||
|
|
||||||
rm := records.NewRecordManager(hub)
|
|
||||||
|
|
||||||
// Create test user and system
|
|
||||||
user, err := tests.CreateUser(hub, "test@example.com", "testtesttest")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
|
||||||
"name": "test-system",
|
|
||||||
"host": "localhost",
|
|
||||||
"port": "45876",
|
|
||||||
"status": "up",
|
|
||||||
"users": []string{user.Id},
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
now := time.Now().UTC()
|
|
||||||
|
|
||||||
// Create old systemd service records that should be deleted (older than 20 minutes)
|
|
||||||
oldRecord, err := tests.CreateRecord(hub, "systemd_services", map[string]any{
|
|
||||||
"system": system.Id,
|
|
||||||
"name": "nginx.service",
|
|
||||||
"state": 0, // Active
|
|
||||||
"sub": 1, // Running
|
|
||||||
"cpu": 5.0,
|
|
||||||
"cpuPeak": 10.0,
|
|
||||||
"memory": 1024000,
|
|
||||||
"memPeak": 2048000,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
// Set updated time to 25 minutes ago (should be deleted)
|
|
||||||
oldRecord.SetRaw("updated", now.Add(-25*time.Minute).UnixMilli())
|
|
||||||
err = hub.SaveNoValidate(oldRecord)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Create recent systemd service record that should be kept (within 20 minutes)
|
|
||||||
recentRecord, err := tests.CreateRecord(hub, "systemd_services", map[string]any{
|
|
||||||
"system": system.Id,
|
|
||||||
"name": "apache.service",
|
|
||||||
"state": 1, // Inactive
|
|
||||||
"sub": 0, // Dead
|
|
||||||
"cpu": 2.0,
|
|
||||||
"cpuPeak": 3.0,
|
|
||||||
"memory": 512000,
|
|
||||||
"memPeak": 1024000,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
// Set updated time to 10 minutes ago (should be kept)
|
|
||||||
recentRecord.SetRaw("updated", now.Add(-10*time.Minute).UnixMilli())
|
|
||||||
err = hub.SaveNoValidate(recentRecord)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
// Count records before deletion
|
|
||||||
countBefore, err := hub.CountRecords("systemd_services")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, int64(2), countBefore, "Should have 2 systemd service records initially")
|
|
||||||
|
|
||||||
// Run deletion via RecordManager
|
|
||||||
rm.DeleteOldRecords()
|
|
||||||
|
|
||||||
// Count records after deletion
|
|
||||||
countAfter, err := hub.CountRecords("systemd_services")
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, int64(1), countAfter, "Should have 1 systemd service record after deletion")
|
|
||||||
|
|
||||||
// Verify the correct record was kept
|
|
||||||
remainingRecords, err := hub.FindRecordsByFilter("systemd_services", "", "", 10, 0, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Len(t, remainingRecords, 1, "Should have exactly 1 record remaining")
|
|
||||||
assert.Equal(t, "apache.service", remainingRecords[0].Get("name"), "The recent record should be kept")
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestRecordManagerCreation tests RecordManager creation
|
// TestRecordManagerCreation tests RecordManager creation
|
||||||
func TestRecordManagerCreation(t *testing.T) {
|
func TestRecordManagerCreation(t *testing.T) {
|
||||||
hub, err := tests.NewTestHub(t.TempDir())
|
hub, err := tests.NewTestHub(t.TempDir())
|
||||||
|
|||||||
@@ -22,11 +22,7 @@
|
|||||||
})();
|
})();
|
||||||
</script>
|
</script>
|
||||||
<script>
|
<script>
|
||||||
globalThis.BESZEL = {
|
globalThis.BESZEL = "{info}"
|
||||||
BASE_PATH: "%BASE_URL%",
|
|
||||||
HUB_VERSION: "{{V}}",
|
|
||||||
HUB_URL: "{{HUB_URL}}"
|
|
||||||
}
|
|
||||||
</script>
|
</script>
|
||||||
</head>
|
</head>
|
||||||
<body>
|
<body>
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ import { Label } from "@/components/ui/label"
|
|||||||
import { pb } from "@/lib/api"
|
import { pb } from "@/lib/api"
|
||||||
import { $authenticated } from "@/lib/stores"
|
import { $authenticated } from "@/lib/stores"
|
||||||
import { cn } from "@/lib/utils"
|
import { cn } from "@/lib/utils"
|
||||||
import { $router, Link, prependBasePath } from "../router"
|
import { $router, Link, basePath, prependBasePath } from "../router"
|
||||||
import { toast } from "../ui/use-toast"
|
import { toast } from "../ui/use-toast"
|
||||||
import { OtpInputForm } from "./otp-forms"
|
import { OtpInputForm } from "./otp-forms"
|
||||||
|
|
||||||
@@ -37,8 +37,7 @@ const RegisterSchema = v.looseObject({
|
|||||||
passwordConfirm: passwordSchema,
|
passwordConfirm: passwordSchema,
|
||||||
})
|
})
|
||||||
|
|
||||||
export const showLoginFaliedToast = (description?: string) => {
|
export const showLoginFaliedToast = (description = t`Please check your credentials and try again`) => {
|
||||||
description ||= t`Please check your credentials and try again`
|
|
||||||
toast({
|
toast({
|
||||||
title: t`Login attempt failed`,
|
title: t`Login attempt failed`,
|
||||||
description,
|
description,
|
||||||
@@ -130,10 +129,6 @@ export function UserAuthForm({
|
|||||||
[isFirstRun]
|
[isFirstRun]
|
||||||
)
|
)
|
||||||
|
|
||||||
if (!authMethods) {
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
|
|
||||||
const authProviders = authMethods.oauth2.providers ?? []
|
const authProviders = authMethods.oauth2.providers ?? []
|
||||||
const oauthEnabled = authMethods.oauth2.enabled && authProviders.length > 0
|
const oauthEnabled = authMethods.oauth2.enabled && authProviders.length > 0
|
||||||
const passwordEnabled = authMethods.password.enabled
|
const passwordEnabled = authMethods.password.enabled
|
||||||
@@ -142,6 +137,12 @@ export function UserAuthForm({
|
|||||||
|
|
||||||
function loginWithOauth(provider: AuthProviderInfo, forcePopup = false) {
|
function loginWithOauth(provider: AuthProviderInfo, forcePopup = false) {
|
||||||
setIsOauthLoading(true)
|
setIsOauthLoading(true)
|
||||||
|
|
||||||
|
if (globalThis.BESZEL.OAUTH_DISABLE_POPUP) {
|
||||||
|
redirectToOauthProvider(provider)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
const oAuthOpts: OAuth2AuthConfig = {
|
const oAuthOpts: OAuth2AuthConfig = {
|
||||||
provider: provider.name,
|
provider: provider.name,
|
||||||
}
|
}
|
||||||
@@ -150,10 +151,7 @@ export function UserAuthForm({
|
|||||||
const authWindow = window.open()
|
const authWindow = window.open()
|
||||||
if (!authWindow) {
|
if (!authWindow) {
|
||||||
setIsOauthLoading(false)
|
setIsOauthLoading(false)
|
||||||
toast({
|
showLoginFaliedToast(t`Please enable pop-ups for this site`)
|
||||||
title: t`Error`,
|
|
||||||
description: t`Please enable pop-ups for this site`,
|
|
||||||
})
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
oAuthOpts.urlCallback = (url) => {
|
oAuthOpts.urlCallback = (url) => {
|
||||||
@@ -171,16 +169,57 @@ export function UserAuthForm({
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Redirects the user to the OAuth provider's authentication page in the same window.
|
||||||
|
* Requires the app's base URL to be registered as a redirect URI with the OAuth provider.
|
||||||
|
*/
|
||||||
|
function redirectToOauthProvider(provider: AuthProviderInfo) {
|
||||||
|
const url = new URL(provider.authURL)
|
||||||
|
// url.searchParams.set("redirect_uri", `${window.location.origin}${basePath}`)
|
||||||
|
sessionStorage.setItem("provider", JSON.stringify(provider))
|
||||||
|
window.location.href = url.toString()
|
||||||
|
}
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
// auto login if password disabled and only one auth provider
|
// handle redirect-based OAuth callback if we have a code
|
||||||
if (!passwordEnabled && authProviders.length === 1 && !sessionStorage.getItem("lo")) {
|
const params = new URLSearchParams(window.location.search)
|
||||||
// Add a small timeout to ensure browser is ready to handle popups
|
const code = params.get("code")
|
||||||
setTimeout(() => {
|
if (code) {
|
||||||
loginWithOauth(authProviders[0], true)
|
const state = params.get("state")
|
||||||
}, 300)
|
const provider: AuthProviderInfo = JSON.parse(sessionStorage.getItem("provider") ?? "{}")
|
||||||
|
if (!state || provider.state !== state) {
|
||||||
|
showLoginFaliedToast()
|
||||||
|
} else {
|
||||||
|
setIsOauthLoading(true)
|
||||||
|
window.history.replaceState({}, "", window.location.pathname)
|
||||||
|
pb.collection("users")
|
||||||
|
.authWithOAuth2Code(provider.name, code, provider.codeVerifier, `${window.location.origin}${basePath}`)
|
||||||
|
.then(() => $authenticated.set(pb.authStore.isValid))
|
||||||
|
.catch((e: unknown) => showLoginFaliedToast((e as Error).message))
|
||||||
|
.finally(() => setIsOauthLoading(false))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// auto login if password disabled and only one auth provider
|
||||||
|
if (!code && !passwordEnabled && authProviders.length === 1 && !sessionStorage.getItem("lo")) {
|
||||||
|
// Add a small timeout to ensure browser is ready to handle popups
|
||||||
|
setTimeout(() => loginWithOauth(authProviders[0], false), 300)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// refresh auth if not in above states (required for trusted auth header)
|
||||||
|
pb.collection("users")
|
||||||
|
.authRefresh()
|
||||||
|
.then((res) => {
|
||||||
|
pb.authStore.save(res.token, res.record)
|
||||||
|
$authenticated.set(!!pb.authStore.isValid)
|
||||||
|
})
|
||||||
}, [])
|
}, [])
|
||||||
|
|
||||||
|
if (!authMethods) {
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
|
||||||
if (otpId && mfaId) {
|
if (otpId && mfaId) {
|
||||||
return <OtpInputForm otpId={otpId} mfaId={mfaId} />
|
return <OtpInputForm otpId={otpId} mfaId={mfaId} />
|
||||||
}
|
}
|
||||||
@@ -248,7 +287,7 @@ export function UserAuthForm({
|
|||||||
)}
|
)}
|
||||||
<div className="sr-only">
|
<div className="sr-only">
|
||||||
{/* honeypot */}
|
{/* honeypot */}
|
||||||
<label htmlFor="website"></label>
|
<label htmlFor="website">Website</label>
|
||||||
<input
|
<input
|
||||||
id="website"
|
id="website"
|
||||||
type="text"
|
type="text"
|
||||||
|
|||||||
@@ -110,20 +110,23 @@ export function SystemsTableColumns(viewMode: "table" | "grid"): ColumnDef<Syste
|
|||||||
|
|
||||||
// match filter value against name or translated status
|
// match filter value against name or translated status
|
||||||
return (row, _, newFilterInput) => {
|
return (row, _, newFilterInput) => {
|
||||||
const { name, status } = row.original
|
const sys = row.original
|
||||||
|
if (sys.host.includes(newFilterInput) || sys.info.v?.includes(newFilterInput)) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
if (newFilterInput !== filterInput) {
|
if (newFilterInput !== filterInput) {
|
||||||
filterInput = newFilterInput
|
filterInput = newFilterInput
|
||||||
filterInputLower = newFilterInput.toLowerCase()
|
filterInputLower = newFilterInput.toLowerCase()
|
||||||
}
|
}
|
||||||
let nameLower = nameCache.get(name)
|
let nameLower = nameCache.get(sys.name)
|
||||||
if (nameLower === undefined) {
|
if (nameLower === undefined) {
|
||||||
nameLower = name.toLowerCase()
|
nameLower = sys.name.toLowerCase()
|
||||||
nameCache.set(name, nameLower)
|
nameCache.set(sys.name, nameLower)
|
||||||
}
|
}
|
||||||
if (nameLower.includes(filterInputLower)) {
|
if (nameLower.includes(filterInputLower)) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
const statusLower = statusTranslations[status as keyof typeof statusTranslations]
|
const statusLower = statusTranslations[sys.status as keyof typeof statusTranslations]
|
||||||
return statusLower?.includes(filterInputLower) || false
|
return statusLower?.includes(filterInputLower) || false
|
||||||
}
|
}
|
||||||
})(),
|
})(),
|
||||||
|
|||||||
@@ -94,18 +94,6 @@ const Layout = () => {
|
|||||||
document.documentElement.dir = direction
|
document.documentElement.dir = direction
|
||||||
}, [direction])
|
}, [direction])
|
||||||
|
|
||||||
useEffect(() => {
|
|
||||||
// refresh auth if not authenticated (required for trusted auth header)
|
|
||||||
if (!authenticated) {
|
|
||||||
pb.collection("users")
|
|
||||||
.authRefresh()
|
|
||||||
.then((res) => {
|
|
||||||
pb.authStore.save(res.token, res.record)
|
|
||||||
$authenticated.set(!!pb.authStore.isValid)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}, [])
|
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<DirectionProvider dir={direction}>
|
<DirectionProvider dir={direction}>
|
||||||
{!authenticated ? (
|
{!authenticated ? (
|
||||||
|
|||||||
1
internal/site/src/types.d.ts
vendored
1
internal/site/src/types.d.ts
vendored
@@ -7,6 +7,7 @@ declare global {
|
|||||||
BASE_PATH: string
|
BASE_PATH: string
|
||||||
HUB_VERSION: string
|
HUB_VERSION: string
|
||||||
HUB_URL: string
|
HUB_URL: string
|
||||||
|
OAUTH_DISABLE_POPUP: boolean
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -7,7 +7,7 @@
|
|||||||
"module": "ESNext",
|
"module": "ESNext",
|
||||||
"lib": ["ES2022", "DOM", "DOM.Iterable"],
|
"lib": ["ES2022", "DOM", "DOM.Iterable"],
|
||||||
"skipLibCheck": true,
|
"skipLibCheck": true,
|
||||||
"baseUrl": ".",
|
// "baseUrl": ".",
|
||||||
"paths": {
|
"paths": {
|
||||||
"@/*": ["./src/*"]
|
"@/*": ["./src/*"]
|
||||||
},
|
},
|
||||||
|
|||||||
Reference in New Issue
Block a user