mirror of
https://github.com/henrygd/beszel.git
synced 2026-03-28 16:36:16 +01:00
Compare commits
43 Commits
ec7ad632a9
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9840b99327 | ||
|
|
f7b5a505e8 | ||
|
|
3cb32ac046 | ||
|
|
e610d9bfc8 | ||
|
|
b53fdbe0ef | ||
|
|
c7261b56f1 | ||
|
|
3f4c3d51b6 | ||
|
|
ad21cab457 | ||
|
|
f04684b30a | ||
|
|
4d4e4fba9b | ||
|
|
62587919f4 | ||
|
|
35528332fd | ||
|
|
e3e453140e | ||
|
|
7a64da9f65 | ||
|
|
8e71c8ad97 | ||
|
|
97f3b8c61f | ||
|
|
0b0b5d16d7 | ||
|
|
b2fd50211e | ||
|
|
c159eaacd1 | ||
|
|
441bdd2ec5 | ||
|
|
ff36138229 | ||
|
|
be70840609 | ||
|
|
565162ef5f | ||
|
|
adbfe7cfb7 | ||
|
|
1ff7762c80 | ||
|
|
0ab8a606e0 | ||
|
|
e4e0affbc1 | ||
|
|
c3a0e645ee | ||
|
|
c6c3950fb0 | ||
|
|
48ddc96a0d | ||
|
|
704cb86de8 | ||
|
|
2854ce882f | ||
|
|
ed50367f70 | ||
|
|
4ebe869591 | ||
|
|
c9bbbe91f2 | ||
|
|
5bfe4f6970 | ||
|
|
380d2b1091 | ||
|
|
a7f99e7a8c | ||
|
|
bd94a9d142 | ||
|
|
8e2316f845 | ||
|
|
0d3dfcb207 | ||
|
|
b386ce5190 | ||
|
|
e527534016 |
6
.github/workflows/vulncheck.yml
vendored
6
.github/workflows/vulncheck.yml
vendored
@@ -19,11 +19,11 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Check out code into the Go module directory
|
- name: Check out code into the Go module directory
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v6
|
||||||
with:
|
with:
|
||||||
go-version: 1.25.x
|
go-version: 1.26.x
|
||||||
# cached: false
|
# cached: false
|
||||||
- name: Get official govulncheck
|
- name: Get official govulncheck
|
||||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||||
|
|||||||
@@ -70,19 +70,11 @@ func TestNewWebSocketClient(t *testing.T) {
|
|||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
// Set up environment
|
// Set up environment
|
||||||
if tc.hubURL != "" {
|
if tc.hubURL != "" {
|
||||||
os.Setenv("BESZEL_AGENT_HUB_URL", tc.hubURL)
|
t.Setenv("BESZEL_AGENT_HUB_URL", tc.hubURL)
|
||||||
} else {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
|
||||||
}
|
}
|
||||||
if tc.token != "" {
|
if tc.token != "" {
|
||||||
os.Setenv("BESZEL_AGENT_TOKEN", tc.token)
|
t.Setenv("BESZEL_AGENT_TOKEN", tc.token)
|
||||||
} else {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
}
|
}
|
||||||
defer func() {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
}()
|
|
||||||
|
|
||||||
client, err := newWebSocketClient(agent)
|
client, err := newWebSocketClient(agent)
|
||||||
|
|
||||||
@@ -138,12 +130,8 @@ func TestWebSocketClient_GetOptions(t *testing.T) {
|
|||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
// Set up environment
|
// Set up environment
|
||||||
os.Setenv("BESZEL_AGENT_HUB_URL", tc.inputURL)
|
t.Setenv("BESZEL_AGENT_HUB_URL", tc.inputURL)
|
||||||
os.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
t.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
||||||
defer func() {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
}()
|
|
||||||
|
|
||||||
client, err := newWebSocketClient(agent)
|
client, err := newWebSocketClient(agent)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -185,12 +173,8 @@ func TestWebSocketClient_VerifySignature(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Set up environment
|
// Set up environment
|
||||||
os.Setenv("BESZEL_AGENT_HUB_URL", "http://localhost:8080")
|
t.Setenv("BESZEL_AGENT_HUB_URL", "http://localhost:8080")
|
||||||
os.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
t.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
||||||
defer func() {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
}()
|
|
||||||
|
|
||||||
client, err := newWebSocketClient(agent)
|
client, err := newWebSocketClient(agent)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -258,12 +242,8 @@ func TestWebSocketClient_HandleHubRequest(t *testing.T) {
|
|||||||
agent := createTestAgent(t)
|
agent := createTestAgent(t)
|
||||||
|
|
||||||
// Set up environment
|
// Set up environment
|
||||||
os.Setenv("BESZEL_AGENT_HUB_URL", "http://localhost:8080")
|
t.Setenv("BESZEL_AGENT_HUB_URL", "http://localhost:8080")
|
||||||
os.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
t.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
||||||
defer func() {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
}()
|
|
||||||
|
|
||||||
client, err := newWebSocketClient(agent)
|
client, err := newWebSocketClient(agent)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -350,13 +330,8 @@ func TestGetUserAgent(t *testing.T) {
|
|||||||
func TestWebSocketClient_Close(t *testing.T) {
|
func TestWebSocketClient_Close(t *testing.T) {
|
||||||
agent := createTestAgent(t)
|
agent := createTestAgent(t)
|
||||||
|
|
||||||
// Set up environment
|
t.Setenv("BESZEL_AGENT_HUB_URL", "http://localhost:8080")
|
||||||
os.Setenv("BESZEL_AGENT_HUB_URL", "http://localhost:8080")
|
t.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
||||||
os.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
|
||||||
defer func() {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
}()
|
|
||||||
|
|
||||||
client, err := newWebSocketClient(agent)
|
client, err := newWebSocketClient(agent)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -371,13 +346,8 @@ func TestWebSocketClient_Close(t *testing.T) {
|
|||||||
func TestWebSocketClient_ConnectRateLimit(t *testing.T) {
|
func TestWebSocketClient_ConnectRateLimit(t *testing.T) {
|
||||||
agent := createTestAgent(t)
|
agent := createTestAgent(t)
|
||||||
|
|
||||||
// Set up environment
|
t.Setenv("BESZEL_AGENT_HUB_URL", "http://localhost:8080")
|
||||||
os.Setenv("BESZEL_AGENT_HUB_URL", "http://localhost:8080")
|
t.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
||||||
os.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
|
||||||
defer func() {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
}()
|
|
||||||
|
|
||||||
client, err := newWebSocketClient(agent)
|
client, err := newWebSocketClient(agent)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -393,20 +363,10 @@ func TestWebSocketClient_ConnectRateLimit(t *testing.T) {
|
|||||||
|
|
||||||
// TestGetToken tests the getToken function with various scenarios
|
// TestGetToken tests the getToken function with various scenarios
|
||||||
func TestGetToken(t *testing.T) {
|
func TestGetToken(t *testing.T) {
|
||||||
unsetEnvVars := func() {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
os.Unsetenv("TOKEN")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN_FILE")
|
|
||||||
os.Unsetenv("TOKEN_FILE")
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Run("token from TOKEN environment variable", func(t *testing.T) {
|
t.Run("token from TOKEN environment variable", func(t *testing.T) {
|
||||||
unsetEnvVars()
|
|
||||||
|
|
||||||
// Set TOKEN env var
|
// Set TOKEN env var
|
||||||
expectedToken := "test-token-from-env"
|
expectedToken := "test-token-from-env"
|
||||||
os.Setenv("TOKEN", expectedToken)
|
t.Setenv("TOKEN", expectedToken)
|
||||||
defer os.Unsetenv("TOKEN")
|
|
||||||
|
|
||||||
token, err := getToken()
|
token, err := getToken()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@@ -414,12 +374,9 @@ func TestGetToken(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("token from BESZEL_AGENT_TOKEN environment variable", func(t *testing.T) {
|
t.Run("token from BESZEL_AGENT_TOKEN environment variable", func(t *testing.T) {
|
||||||
unsetEnvVars()
|
|
||||||
|
|
||||||
// Set BESZEL_AGENT_TOKEN env var (should take precedence)
|
// Set BESZEL_AGENT_TOKEN env var (should take precedence)
|
||||||
expectedToken := "test-token-from-beszel-env"
|
expectedToken := "test-token-from-beszel-env"
|
||||||
os.Setenv("BESZEL_AGENT_TOKEN", expectedToken)
|
t.Setenv("BESZEL_AGENT_TOKEN", expectedToken)
|
||||||
defer os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
|
|
||||||
token, err := getToken()
|
token, err := getToken()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@@ -427,8 +384,6 @@ func TestGetToken(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("token from TOKEN_FILE", func(t *testing.T) {
|
t.Run("token from TOKEN_FILE", func(t *testing.T) {
|
||||||
unsetEnvVars()
|
|
||||||
|
|
||||||
// Create a temporary token file
|
// Create a temporary token file
|
||||||
expectedToken := "test-token-from-file"
|
expectedToken := "test-token-from-file"
|
||||||
tokenFile, err := os.CreateTemp("", "token-test-*.txt")
|
tokenFile, err := os.CreateTemp("", "token-test-*.txt")
|
||||||
@@ -440,8 +395,7 @@ func TestGetToken(t *testing.T) {
|
|||||||
tokenFile.Close()
|
tokenFile.Close()
|
||||||
|
|
||||||
// Set TOKEN_FILE env var
|
// Set TOKEN_FILE env var
|
||||||
os.Setenv("TOKEN_FILE", tokenFile.Name())
|
t.Setenv("TOKEN_FILE", tokenFile.Name())
|
||||||
defer os.Unsetenv("TOKEN_FILE")
|
|
||||||
|
|
||||||
token, err := getToken()
|
token, err := getToken()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@@ -449,8 +403,6 @@ func TestGetToken(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("token from BESZEL_AGENT_TOKEN_FILE", func(t *testing.T) {
|
t.Run("token from BESZEL_AGENT_TOKEN_FILE", func(t *testing.T) {
|
||||||
unsetEnvVars()
|
|
||||||
|
|
||||||
// Create a temporary token file
|
// Create a temporary token file
|
||||||
expectedToken := "test-token-from-beszel-file"
|
expectedToken := "test-token-from-beszel-file"
|
||||||
tokenFile, err := os.CreateTemp("", "token-test-*.txt")
|
tokenFile, err := os.CreateTemp("", "token-test-*.txt")
|
||||||
@@ -462,8 +414,7 @@ func TestGetToken(t *testing.T) {
|
|||||||
tokenFile.Close()
|
tokenFile.Close()
|
||||||
|
|
||||||
// Set BESZEL_AGENT_TOKEN_FILE env var (should take precedence)
|
// Set BESZEL_AGENT_TOKEN_FILE env var (should take precedence)
|
||||||
os.Setenv("BESZEL_AGENT_TOKEN_FILE", tokenFile.Name())
|
t.Setenv("BESZEL_AGENT_TOKEN_FILE", tokenFile.Name())
|
||||||
defer os.Unsetenv("BESZEL_AGENT_TOKEN_FILE")
|
|
||||||
|
|
||||||
token, err := getToken()
|
token, err := getToken()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@@ -471,8 +422,6 @@ func TestGetToken(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("TOKEN takes precedence over TOKEN_FILE", func(t *testing.T) {
|
t.Run("TOKEN takes precedence over TOKEN_FILE", func(t *testing.T) {
|
||||||
unsetEnvVars()
|
|
||||||
|
|
||||||
// Create a temporary token file
|
// Create a temporary token file
|
||||||
fileToken := "token-from-file"
|
fileToken := "token-from-file"
|
||||||
tokenFile, err := os.CreateTemp("", "token-test-*.txt")
|
tokenFile, err := os.CreateTemp("", "token-test-*.txt")
|
||||||
@@ -485,12 +434,8 @@ func TestGetToken(t *testing.T) {
|
|||||||
|
|
||||||
// Set both TOKEN and TOKEN_FILE
|
// Set both TOKEN and TOKEN_FILE
|
||||||
envToken := "token-from-env"
|
envToken := "token-from-env"
|
||||||
os.Setenv("TOKEN", envToken)
|
t.Setenv("TOKEN", envToken)
|
||||||
os.Setenv("TOKEN_FILE", tokenFile.Name())
|
t.Setenv("TOKEN_FILE", tokenFile.Name())
|
||||||
defer func() {
|
|
||||||
os.Unsetenv("TOKEN")
|
|
||||||
os.Unsetenv("TOKEN_FILE")
|
|
||||||
}()
|
|
||||||
|
|
||||||
token, err := getToken()
|
token, err := getToken()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@@ -498,7 +443,10 @@ func TestGetToken(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("error when neither TOKEN nor TOKEN_FILE is set", func(t *testing.T) {
|
t.Run("error when neither TOKEN nor TOKEN_FILE is set", func(t *testing.T) {
|
||||||
unsetEnvVars()
|
t.Setenv("BESZEL_AGENT_TOKEN", "")
|
||||||
|
t.Setenv("TOKEN", "")
|
||||||
|
t.Setenv("BESZEL_AGENT_TOKEN_FILE", "")
|
||||||
|
t.Setenv("TOKEN_FILE", "")
|
||||||
|
|
||||||
token, err := getToken()
|
token, err := getToken()
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
@@ -507,11 +455,8 @@ func TestGetToken(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("error when TOKEN_FILE points to non-existent file", func(t *testing.T) {
|
t.Run("error when TOKEN_FILE points to non-existent file", func(t *testing.T) {
|
||||||
unsetEnvVars()
|
|
||||||
|
|
||||||
// Set TOKEN_FILE to a non-existent file
|
// Set TOKEN_FILE to a non-existent file
|
||||||
os.Setenv("TOKEN_FILE", "/non/existent/file.txt")
|
t.Setenv("TOKEN_FILE", "/non/existent/file.txt")
|
||||||
defer os.Unsetenv("TOKEN_FILE")
|
|
||||||
|
|
||||||
token, err := getToken()
|
token, err := getToken()
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
@@ -520,8 +465,6 @@ func TestGetToken(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("handles empty token file", func(t *testing.T) {
|
t.Run("handles empty token file", func(t *testing.T) {
|
||||||
unsetEnvVars()
|
|
||||||
|
|
||||||
// Create an empty token file
|
// Create an empty token file
|
||||||
tokenFile, err := os.CreateTemp("", "token-test-*.txt")
|
tokenFile, err := os.CreateTemp("", "token-test-*.txt")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -529,8 +472,7 @@ func TestGetToken(t *testing.T) {
|
|||||||
tokenFile.Close()
|
tokenFile.Close()
|
||||||
|
|
||||||
// Set TOKEN_FILE env var
|
// Set TOKEN_FILE env var
|
||||||
os.Setenv("TOKEN_FILE", tokenFile.Name())
|
t.Setenv("TOKEN_FILE", tokenFile.Name())
|
||||||
defer os.Unsetenv("TOKEN_FILE")
|
|
||||||
|
|
||||||
token, err := getToken()
|
token, err := getToken()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@@ -538,8 +480,6 @@ func TestGetToken(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("strips whitespace from TOKEN_FILE", func(t *testing.T) {
|
t.Run("strips whitespace from TOKEN_FILE", func(t *testing.T) {
|
||||||
unsetEnvVars()
|
|
||||||
|
|
||||||
tokenWithWhitespace := " test-token-with-whitespace \n\t"
|
tokenWithWhitespace := " test-token-with-whitespace \n\t"
|
||||||
expectedToken := "test-token-with-whitespace"
|
expectedToken := "test-token-with-whitespace"
|
||||||
tokenFile, err := os.CreateTemp("", "token-test-*.txt")
|
tokenFile, err := os.CreateTemp("", "token-test-*.txt")
|
||||||
@@ -550,8 +490,7 @@ func TestGetToken(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
tokenFile.Close()
|
tokenFile.Close()
|
||||||
|
|
||||||
os.Setenv("TOKEN_FILE", tokenFile.Name())
|
t.Setenv("TOKEN_FILE", tokenFile.Name())
|
||||||
defer os.Unsetenv("TOKEN_FILE")
|
|
||||||
|
|
||||||
token, err := getToken()
|
token, err := getToken()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -183,10 +182,6 @@ func TestConnectionManager_TickerManagement(t *testing.T) {
|
|||||||
|
|
||||||
// TestConnectionManager_WebSocketConnectionFlow tests WebSocket connection logic
|
// TestConnectionManager_WebSocketConnectionFlow tests WebSocket connection logic
|
||||||
func TestConnectionManager_WebSocketConnectionFlow(t *testing.T) {
|
func TestConnectionManager_WebSocketConnectionFlow(t *testing.T) {
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("Skipping WebSocket connection test in short mode")
|
|
||||||
}
|
|
||||||
|
|
||||||
agent := createTestAgent(t)
|
agent := createTestAgent(t)
|
||||||
cm := agent.connectionManager
|
cm := agent.connectionManager
|
||||||
|
|
||||||
@@ -196,19 +191,18 @@ func TestConnectionManager_WebSocketConnectionFlow(t *testing.T) {
|
|||||||
assert.Equal(t, Disconnected, cm.State, "State should remain Disconnected after failed connection")
|
assert.Equal(t, Disconnected, cm.State, "State should remain Disconnected after failed connection")
|
||||||
|
|
||||||
// Test with invalid URL
|
// Test with invalid URL
|
||||||
os.Setenv("BESZEL_AGENT_HUB_URL", "invalid-url")
|
t.Setenv("BESZEL_AGENT_HUB_URL", "1,33%")
|
||||||
os.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
t.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
||||||
defer func() {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Test with missing token
|
|
||||||
os.Setenv("BESZEL_AGENT_HUB_URL", "http://localhost:8080")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
|
|
||||||
_, err2 := newWebSocketClient(agent)
|
_, err2 := newWebSocketClient(agent)
|
||||||
assert.Error(t, err2, "WebSocket client creation should fail without token")
|
assert.Error(t, err2, "WebSocket client creation should fail with invalid URL")
|
||||||
|
|
||||||
|
// Test with missing token
|
||||||
|
t.Setenv("BESZEL_AGENT_HUB_URL", "http://localhost:8080")
|
||||||
|
t.Setenv("BESZEL_AGENT_TOKEN", "")
|
||||||
|
|
||||||
|
_, err3 := newWebSocketClient(agent)
|
||||||
|
assert.Error(t, err3, "WebSocket client creation should fail without token")
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestConnectionManager_ReconnectionLogic tests reconnection prevention logic
|
// TestConnectionManager_ReconnectionLogic tests reconnection prevention logic
|
||||||
@@ -234,12 +228,8 @@ func TestConnectionManager_ConnectWithRateLimit(t *testing.T) {
|
|||||||
cm := agent.connectionManager
|
cm := agent.connectionManager
|
||||||
|
|
||||||
// Set up environment for WebSocket client creation
|
// Set up environment for WebSocket client creation
|
||||||
os.Setenv("BESZEL_AGENT_HUB_URL", "ws://localhost:8080")
|
t.Setenv("BESZEL_AGENT_HUB_URL", "ws://localhost:8080")
|
||||||
os.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
t.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
||||||
defer func() {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Create WebSocket client
|
// Create WebSocket client
|
||||||
wsClient, err := newWebSocketClient(agent)
|
wsClient, err := newWebSocketClient(agent)
|
||||||
@@ -285,12 +275,8 @@ func TestConnectionManager_CloseWebSocket(t *testing.T) {
|
|||||||
}, "Should not panic when closing nil WebSocket client")
|
}, "Should not panic when closing nil WebSocket client")
|
||||||
|
|
||||||
// Set up environment and create WebSocket client
|
// Set up environment and create WebSocket client
|
||||||
os.Setenv("BESZEL_AGENT_HUB_URL", "ws://localhost:8080")
|
t.Setenv("BESZEL_AGENT_HUB_URL", "ws://localhost:8080")
|
||||||
os.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
t.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
||||||
defer func() {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
}()
|
|
||||||
|
|
||||||
wsClient, err := newWebSocketClient(agent)
|
wsClient, err := newWebSocketClient(agent)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|||||||
@@ -39,17 +39,7 @@ func TestGetDataDir(t *testing.T) {
|
|||||||
t.Run("DATA_DIR environment variable", func(t *testing.T) {
|
t.Run("DATA_DIR environment variable", func(t *testing.T) {
|
||||||
tempDir := t.TempDir()
|
tempDir := t.TempDir()
|
||||||
|
|
||||||
// Set environment variable
|
t.Setenv("BESZEL_AGENT_DATA_DIR", tempDir)
|
||||||
oldValue := os.Getenv("DATA_DIR")
|
|
||||||
defer func() {
|
|
||||||
if oldValue == "" {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_DATA_DIR")
|
|
||||||
} else {
|
|
||||||
os.Setenv("BESZEL_AGENT_DATA_DIR", oldValue)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
os.Setenv("BESZEL_AGENT_DATA_DIR", tempDir)
|
|
||||||
|
|
||||||
result, err := GetDataDir()
|
result, err := GetDataDir()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -65,17 +55,6 @@ func TestGetDataDir(t *testing.T) {
|
|||||||
|
|
||||||
// Test fallback behavior (empty dataDir, no env var)
|
// Test fallback behavior (empty dataDir, no env var)
|
||||||
t.Run("fallback to default directories", func(t *testing.T) {
|
t.Run("fallback to default directories", func(t *testing.T) {
|
||||||
// Clear DATA_DIR environment variable
|
|
||||||
oldValue := os.Getenv("DATA_DIR")
|
|
||||||
defer func() {
|
|
||||||
if oldValue == "" {
|
|
||||||
os.Unsetenv("DATA_DIR")
|
|
||||||
} else {
|
|
||||||
os.Setenv("DATA_DIR", oldValue)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
os.Unsetenv("DATA_DIR")
|
|
||||||
|
|
||||||
// This will try platform-specific defaults, which may or may not work
|
// This will try platform-specific defaults, which may or may not work
|
||||||
// We're mainly testing that it doesn't panic and returns some result
|
// We're mainly testing that it doesn't panic and returns some result
|
||||||
result, err := GetDataDir()
|
result, err := GetDataDir()
|
||||||
|
|||||||
392
agent/disk.go
392
agent/disk.go
@@ -14,6 +14,25 @@ import (
|
|||||||
"github.com/shirou/gopsutil/v4/disk"
|
"github.com/shirou/gopsutil/v4/disk"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// fsRegistrationContext holds the shared lookup state needed to resolve a
|
||||||
|
// filesystem into the tracked fsStats key and metadata.
|
||||||
|
type fsRegistrationContext struct {
|
||||||
|
filesystem string // value of optional FILESYSTEM env var
|
||||||
|
isWindows bool
|
||||||
|
efPath string // path to extra filesystems (default "/extra-filesystems")
|
||||||
|
diskIoCounters map[string]disk.IOCountersStat
|
||||||
|
}
|
||||||
|
|
||||||
|
// diskDiscovery groups the transient state for a single initializeDiskInfo run so
|
||||||
|
// helper methods can share the same partitions, mount paths, and lookup functions
|
||||||
|
type diskDiscovery struct {
|
||||||
|
agent *Agent
|
||||||
|
rootMountPoint string
|
||||||
|
partitions []disk.PartitionStat
|
||||||
|
usageFn func(string) (*disk.UsageStat, error)
|
||||||
|
ctx fsRegistrationContext
|
||||||
|
}
|
||||||
|
|
||||||
// parseFilesystemEntry parses a filesystem entry in the format "device__customname"
|
// parseFilesystemEntry parses a filesystem entry in the format "device__customname"
|
||||||
// Returns the device/filesystem part and the custom name part
|
// Returns the device/filesystem part and the custom name part
|
||||||
func parseFilesystemEntry(entry string) (device, customName string) {
|
func parseFilesystemEntry(entry string) (device, customName string) {
|
||||||
@@ -27,19 +46,230 @@ func parseFilesystemEntry(entry string) (device, customName string) {
|
|||||||
return device, customName
|
return device, customName
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// extraFilesystemPartitionInfo derives the I/O device and optional display name
|
||||||
|
// for a mounted /extra-filesystems partition. Prefer the partition device reported
|
||||||
|
// by the system and only use the folder name for custom naming metadata.
|
||||||
|
func extraFilesystemPartitionInfo(p disk.PartitionStat) (device, customName string) {
|
||||||
|
device = strings.TrimSpace(p.Device)
|
||||||
|
folderDevice, customName := parseFilesystemEntry(filepath.Base(p.Mountpoint))
|
||||||
|
if device == "" {
|
||||||
|
device = folderDevice
|
||||||
|
}
|
||||||
|
return device, customName
|
||||||
|
}
|
||||||
|
|
||||||
func isDockerSpecialMountpoint(mountpoint string) bool {
|
func isDockerSpecialMountpoint(mountpoint string) bool {
|
||||||
switch mountpoint {
|
switch mountpoint {
|
||||||
case "/etc/hosts", "/etc/resolv.conf", "/etc/hostname":
|
case "/etc/hosts", "/etc/resolv.conf", "/etc/hostname":
|
||||||
return true
|
return true
|
||||||
default:
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// registerFilesystemStats resolves the tracked key and stats payload for a
|
||||||
|
// filesystem before it is inserted into fsStats.
|
||||||
|
func registerFilesystemStats(existing map[string]*system.FsStats, device, mountpoint string, root bool, customName string, ctx fsRegistrationContext) (string, *system.FsStats, bool) {
|
||||||
|
key := device
|
||||||
|
if !ctx.isWindows {
|
||||||
|
key = filepath.Base(device)
|
||||||
|
}
|
||||||
|
|
||||||
|
if root {
|
||||||
|
// Try to map root device to a diskIoCounters entry. First checks for an
|
||||||
|
// exact key match, then uses findIoDevice for normalized / prefix-based
|
||||||
|
// matching (e.g. nda0p2 -> nda0), and finally falls back to FILESYSTEM.
|
||||||
|
if _, ioMatch := ctx.diskIoCounters[key]; !ioMatch {
|
||||||
|
if matchedKey, match := findIoDevice(key, ctx.diskIoCounters); match {
|
||||||
|
key = matchedKey
|
||||||
|
} else if ctx.filesystem != "" {
|
||||||
|
if matchedKey, match := findIoDevice(ctx.filesystem, ctx.diskIoCounters); match {
|
||||||
|
key = matchedKey
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _, ioMatch = ctx.diskIoCounters[key]; !ioMatch {
|
||||||
|
slog.Warn("Root I/O unmapped; set FILESYSTEM", "device", device, "mountpoint", mountpoint)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Check if non-root has diskstats and prefer the folder device for
|
||||||
|
// /extra-filesystems mounts when the discovered partition device is a
|
||||||
|
// mapper path (e.g. luks UUID) that obscures the underlying block device.
|
||||||
|
if _, ioMatch := ctx.diskIoCounters[key]; !ioMatch {
|
||||||
|
if strings.HasPrefix(mountpoint, ctx.efPath) {
|
||||||
|
folderDevice, _ := parseFilesystemEntry(filepath.Base(mountpoint))
|
||||||
|
if folderDevice != "" {
|
||||||
|
if matchedKey, match := findIoDevice(folderDevice, ctx.diskIoCounters); match {
|
||||||
|
key = matchedKey
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _, ioMatch = ctx.diskIoCounters[key]; !ioMatch {
|
||||||
|
if matchedKey, match := findIoDevice(key, ctx.diskIoCounters); match {
|
||||||
|
key = matchedKey
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, exists := existing[key]; exists {
|
||||||
|
return "", nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
fsStats := &system.FsStats{Root: root, Mountpoint: mountpoint}
|
||||||
|
if customName != "" {
|
||||||
|
fsStats.Name = customName
|
||||||
|
}
|
||||||
|
return key, fsStats, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// addFsStat inserts a discovered filesystem if it resolves to a new tracking
|
||||||
|
// key. The key selection itself lives in buildFsStatRegistration so that logic
|
||||||
|
// can stay directly unit-tested.
|
||||||
|
func (d *diskDiscovery) addFsStat(device, mountpoint string, root bool, customName string) {
|
||||||
|
key, fsStats, ok := registerFilesystemStats(d.agent.fsStats, device, mountpoint, root, customName, d.ctx)
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d.agent.fsStats[key] = fsStats
|
||||||
|
name := key
|
||||||
|
if customName != "" {
|
||||||
|
name = customName
|
||||||
|
}
|
||||||
|
slog.Info("Detected disk", "name", name, "device", device, "mount", mountpoint, "io", key, "root", root)
|
||||||
|
}
|
||||||
|
|
||||||
|
// addConfiguredRootFs resolves FILESYSTEM against partitions first, then falls
|
||||||
|
// back to direct diskstats matching for setups like ZFS where partitions do not
|
||||||
|
// expose the physical device name.
|
||||||
|
func (d *diskDiscovery) addConfiguredRootFs() bool {
|
||||||
|
if d.ctx.filesystem == "" {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, p := range d.partitions {
|
||||||
|
if filesystemMatchesPartitionSetting(d.ctx.filesystem, p) {
|
||||||
|
d.addFsStat(p.Device, p.Mountpoint, true, "")
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FILESYSTEM may name a physical disk absent from partitions (e.g. ZFS lists
|
||||||
|
// dataset paths like zroot/ROOT/default, not block devices).
|
||||||
|
if ioKey, match := findIoDevice(d.ctx.filesystem, d.ctx.diskIoCounters); match {
|
||||||
|
d.agent.fsStats[ioKey] = &system.FsStats{Root: true, Mountpoint: d.rootMountPoint}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
slog.Warn("Partition details not found", "filesystem", d.ctx.filesystem)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func isRootFallbackPartition(p disk.PartitionStat, rootMountPoint string) bool {
|
||||||
|
return p.Mountpoint == rootMountPoint ||
|
||||||
|
(isDockerSpecialMountpoint(p.Mountpoint) && strings.HasPrefix(p.Device, "/dev"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// addPartitionRootFs handles the non-configured root fallback path when a
|
||||||
|
// partition looks like the active root mount but still needs translating to an
|
||||||
|
// I/O device key.
|
||||||
|
func (d *diskDiscovery) addPartitionRootFs(device, mountpoint string) bool {
|
||||||
|
fs, match := findIoDevice(filepath.Base(device), d.ctx.diskIoCounters)
|
||||||
|
if !match {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// The resolved I/O device is already known here, so use it directly to avoid
|
||||||
|
// a second fallback search inside buildFsStatRegistration.
|
||||||
|
d.addFsStat(fs, mountpoint, true, "")
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// addLastResortRootFs is only used when neither FILESYSTEM nor partition-based
|
||||||
|
// heuristics can identify root, so it picks the busiest I/O device as a final
|
||||||
|
// fallback and preserves the root mountpoint for usage collection.
|
||||||
|
func (d *diskDiscovery) addLastResortRootFs() {
|
||||||
|
rootKey := mostActiveIoDevice(d.ctx.diskIoCounters)
|
||||||
|
if rootKey != "" {
|
||||||
|
slog.Warn("Using most active device for root I/O; set FILESYSTEM to override", "device", rootKey)
|
||||||
|
} else {
|
||||||
|
rootKey = filepath.Base(d.rootMountPoint)
|
||||||
|
if _, exists := d.agent.fsStats[rootKey]; exists {
|
||||||
|
rootKey = "root"
|
||||||
|
}
|
||||||
|
slog.Warn("Root I/O device not detected; set FILESYSTEM to override")
|
||||||
|
}
|
||||||
|
d.agent.fsStats[rootKey] = &system.FsStats{Root: true, Mountpoint: d.rootMountPoint}
|
||||||
|
}
|
||||||
|
|
||||||
|
// findPartitionByFilesystemSetting matches an EXTRA_FILESYSTEMS entry against a
|
||||||
|
// discovered partition either by mountpoint or by device suffix.
|
||||||
|
func findPartitionByFilesystemSetting(filesystem string, partitions []disk.PartitionStat) (disk.PartitionStat, bool) {
|
||||||
|
for _, p := range partitions {
|
||||||
|
if strings.HasSuffix(p.Device, filesystem) || p.Mountpoint == filesystem {
|
||||||
|
return p, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return disk.PartitionStat{}, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// addConfiguredExtraFsEntry resolves one EXTRA_FILESYSTEMS entry, preferring a
|
||||||
|
// discovered partition and falling back to any path that disk.Usage accepts.
|
||||||
|
func (d *diskDiscovery) addConfiguredExtraFsEntry(filesystem, customName string) {
|
||||||
|
if p, found := findPartitionByFilesystemSetting(filesystem, d.partitions); found {
|
||||||
|
d.addFsStat(p.Device, p.Mountpoint, false, customName)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := d.usageFn(filesystem); err == nil {
|
||||||
|
d.addFsStat(filepath.Base(filesystem), filesystem, false, customName)
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
slog.Error("Invalid filesystem", "name", filesystem, "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// addConfiguredExtraFilesystems parses and registers the comma-separated
|
||||||
|
// EXTRA_FILESYSTEMS env var entries.
|
||||||
|
func (d *diskDiscovery) addConfiguredExtraFilesystems(extraFilesystems string) {
|
||||||
|
for fsEntry := range strings.SplitSeq(extraFilesystems, ",") {
|
||||||
|
filesystem, customName := parseFilesystemEntry(fsEntry)
|
||||||
|
d.addConfiguredExtraFsEntry(filesystem, customName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// addPartitionExtraFs registers partitions mounted under /extra-filesystems so
|
||||||
|
// their display names can come from the folder name while their I/O keys still
|
||||||
|
// prefer the underlying partition device.
|
||||||
|
func (d *diskDiscovery) addPartitionExtraFs(p disk.PartitionStat) {
|
||||||
|
if !strings.HasPrefix(p.Mountpoint, d.ctx.efPath) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
device, customName := extraFilesystemPartitionInfo(p)
|
||||||
|
d.addFsStat(device, p.Mountpoint, false, customName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// addExtraFilesystemFolders handles bare directories under /extra-filesystems
|
||||||
|
// that may not appear in partition discovery, while skipping mountpoints that
|
||||||
|
// were already registered from higher-fidelity sources.
|
||||||
|
func (d *diskDiscovery) addExtraFilesystemFolders(folderNames []string) {
|
||||||
|
existingMountpoints := make(map[string]bool, len(d.agent.fsStats))
|
||||||
|
for _, stats := range d.agent.fsStats {
|
||||||
|
existingMountpoints[stats.Mountpoint] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, folderName := range folderNames {
|
||||||
|
mountpoint := filepath.Join(d.ctx.efPath, folderName)
|
||||||
|
slog.Debug("/extra-filesystems", "mountpoint", mountpoint)
|
||||||
|
if existingMountpoints[mountpoint] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
device, customName := parseFilesystemEntry(folderName)
|
||||||
|
d.addFsStat(device, mountpoint, false, customName)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sets up the filesystems to monitor for disk usage and I/O.
|
// Sets up the filesystems to monitor for disk usage and I/O.
|
||||||
func (a *Agent) initializeDiskInfo() {
|
func (a *Agent) initializeDiskInfo() {
|
||||||
filesystem, _ := utils.GetEnv("FILESYSTEM")
|
filesystem, _ := utils.GetEnv("FILESYSTEM")
|
||||||
efPath := "/extra-filesystems"
|
|
||||||
hasRoot := false
|
hasRoot := false
|
||||||
isWindows := runtime.GOOS == "windows"
|
isWindows := runtime.GOOS == "windows"
|
||||||
|
|
||||||
@@ -56,167 +286,57 @@ func (a *Agent) initializeDiskInfo() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ioContext := context.WithValue(a.sensorsContext,
|
|
||||||
// common.EnvKey, common.EnvMap{common.HostProcEnvKey: "/tmp/testproc"},
|
|
||||||
// )
|
|
||||||
// diskIoCounters, err := disk.IOCountersWithContext(ioContext)
|
|
||||||
|
|
||||||
diskIoCounters, err := disk.IOCounters()
|
diskIoCounters, err := disk.IOCounters()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Error("Error getting diskstats", "err", err)
|
slog.Error("Error getting diskstats", "err", err)
|
||||||
}
|
}
|
||||||
slog.Debug("Disk I/O", "diskstats", diskIoCounters)
|
slog.Debug("Disk I/O", "diskstats", diskIoCounters)
|
||||||
|
ctx := fsRegistrationContext{
|
||||||
// Helper function to add a filesystem to fsStats if it doesn't exist
|
filesystem: filesystem,
|
||||||
addFsStat := func(device, mountpoint string, root bool, customName ...string) {
|
isWindows: isWindows,
|
||||||
var key string
|
diskIoCounters: diskIoCounters,
|
||||||
if isWindows {
|
efPath: "/extra-filesystems",
|
||||||
key = device
|
|
||||||
} else {
|
|
||||||
key = filepath.Base(device)
|
|
||||||
}
|
|
||||||
var ioMatch bool
|
|
||||||
if _, exists := a.fsStats[key]; !exists {
|
|
||||||
if root {
|
|
||||||
slog.Info("Detected root device", "name", key)
|
|
||||||
// Try to map root device to a diskIoCounters entry. First
|
|
||||||
// checks for an exact key match, then uses findIoDevice for
|
|
||||||
// normalized / prefix-based matching (e.g. nda0p2 → nda0),
|
|
||||||
// and finally falls back to the FILESYSTEM env var.
|
|
||||||
if _, ioMatch = diskIoCounters[key]; !ioMatch {
|
|
||||||
if matchedKey, match := findIoDevice(key, diskIoCounters); match {
|
|
||||||
key = matchedKey
|
|
||||||
ioMatch = true
|
|
||||||
} else if filesystem != "" {
|
|
||||||
if matchedKey, match := findIoDevice(filesystem, diskIoCounters); match {
|
|
||||||
key = matchedKey
|
|
||||||
ioMatch = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !ioMatch {
|
|
||||||
slog.Warn("Root I/O unmapped; set FILESYSTEM", "device", device, "mountpoint", mountpoint)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Check if non-root has diskstats and fall back to folder name if not
|
|
||||||
// Scenario: device is encrypted and named luks-2bcb02be-999d-4417-8d18-5c61e660fb6e - not in /proc/diskstats.
|
|
||||||
// However, the device can be specified by mounting folder from luks device at /extra-filesystems/sda1
|
|
||||||
if _, ioMatch = diskIoCounters[key]; !ioMatch {
|
|
||||||
efBase := filepath.Base(mountpoint)
|
|
||||||
if _, ioMatch = diskIoCounters[efBase]; ioMatch {
|
|
||||||
key = efBase
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fsStats := &system.FsStats{Root: root, Mountpoint: mountpoint}
|
|
||||||
if len(customName) > 0 && customName[0] != "" {
|
|
||||||
fsStats.Name = customName[0]
|
|
||||||
}
|
|
||||||
a.fsStats[key] = fsStats
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the appropriate root mount point for this system
|
// Get the appropriate root mount point for this system
|
||||||
rootMountPoint := a.getRootMountPoint()
|
discovery := diskDiscovery{
|
||||||
|
agent: a,
|
||||||
// Use FILESYSTEM env var to find root filesystem
|
rootMountPoint: a.getRootMountPoint(),
|
||||||
if filesystem != "" {
|
partitions: partitions,
|
||||||
for _, p := range partitions {
|
usageFn: disk.Usage,
|
||||||
if filesystemMatchesPartitionSetting(filesystem, p) {
|
ctx: ctx,
|
||||||
addFsStat(p.Device, p.Mountpoint, true)
|
|
||||||
hasRoot = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !hasRoot {
|
|
||||||
// FILESYSTEM may name a physical disk absent from partitions (e.g.
|
|
||||||
// ZFS lists dataset paths like zroot/ROOT/default, not block devices).
|
|
||||||
// Try matching directly against diskIoCounters.
|
|
||||||
if ioKey, match := findIoDevice(filesystem, diskIoCounters); match {
|
|
||||||
a.fsStats[ioKey] = &system.FsStats{Root: true, Mountpoint: rootMountPoint}
|
|
||||||
hasRoot = true
|
|
||||||
} else {
|
|
||||||
slog.Warn("Partition details not found", "filesystem", filesystem)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
hasRoot = discovery.addConfiguredRootFs()
|
||||||
|
|
||||||
// Add EXTRA_FILESYSTEMS env var values to fsStats
|
// Add EXTRA_FILESYSTEMS env var values to fsStats
|
||||||
if extraFilesystems, exists := utils.GetEnv("EXTRA_FILESYSTEMS"); exists {
|
if extraFilesystems, exists := utils.GetEnv("EXTRA_FILESYSTEMS"); exists {
|
||||||
for fsEntry := range strings.SplitSeq(extraFilesystems, ",") {
|
discovery.addConfiguredExtraFilesystems(extraFilesystems)
|
||||||
// Parse custom name from format: device__customname
|
|
||||||
fs, customName := parseFilesystemEntry(fsEntry)
|
|
||||||
|
|
||||||
found := false
|
|
||||||
for _, p := range partitions {
|
|
||||||
if strings.HasSuffix(p.Device, fs) || p.Mountpoint == fs {
|
|
||||||
addFsStat(p.Device, p.Mountpoint, false, customName)
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// if not in partitions, test if we can get disk usage
|
|
||||||
if !found {
|
|
||||||
if _, err := disk.Usage(fs); err == nil {
|
|
||||||
addFsStat(filepath.Base(fs), fs, false, customName)
|
|
||||||
} else {
|
|
||||||
slog.Error("Invalid filesystem", "name", fs, "err", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process partitions for various mount points
|
// Process partitions for various mount points
|
||||||
for _, p := range partitions {
|
for _, p := range partitions {
|
||||||
// fmt.Println(p.Device, p.Mountpoint)
|
if !hasRoot && isRootFallbackPartition(p, discovery.rootMountPoint) {
|
||||||
// Binary root fallback or docker root fallback
|
hasRoot = discovery.addPartitionRootFs(p.Device, p.Mountpoint)
|
||||||
if !hasRoot && (p.Mountpoint == rootMountPoint || (isDockerSpecialMountpoint(p.Mountpoint) && strings.HasPrefix(p.Device, "/dev"))) {
|
|
||||||
fs, match := findIoDevice(filepath.Base(p.Device), diskIoCounters)
|
|
||||||
if match {
|
|
||||||
addFsStat(fs, p.Mountpoint, true)
|
|
||||||
hasRoot = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if device is in /extra-filesystems
|
|
||||||
if strings.HasPrefix(p.Mountpoint, efPath) {
|
|
||||||
device, customName := parseFilesystemEntry(p.Mountpoint)
|
|
||||||
addFsStat(device, p.Mountpoint, false, customName)
|
|
||||||
}
|
}
|
||||||
|
discovery.addPartitionExtraFs(p)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check all folders in /extra-filesystems and add them if not already present
|
// Check all folders in /extra-filesystems and add them if not already present
|
||||||
if folders, err := os.ReadDir(efPath); err == nil {
|
if folders, err := os.ReadDir(discovery.ctx.efPath); err == nil {
|
||||||
existingMountpoints := make(map[string]bool)
|
folderNames := make([]string, 0, len(folders))
|
||||||
for _, stats := range a.fsStats {
|
|
||||||
existingMountpoints[stats.Mountpoint] = true
|
|
||||||
}
|
|
||||||
for _, folder := range folders {
|
for _, folder := range folders {
|
||||||
if folder.IsDir() {
|
if folder.IsDir() {
|
||||||
mountpoint := filepath.Join(efPath, folder.Name())
|
folderNames = append(folderNames, folder.Name())
|
||||||
slog.Debug("/extra-filesystems", "mountpoint", mountpoint)
|
|
||||||
if !existingMountpoints[mountpoint] {
|
|
||||||
device, customName := parseFilesystemEntry(folder.Name())
|
|
||||||
addFsStat(device, mountpoint, false, customName)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
discovery.addExtraFilesystemFolders(folderNames)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If no root filesystem set, try the most active I/O device as a last
|
// If no root filesystem set, try the most active I/O device as a last
|
||||||
// resort (e.g. ZFS where dataset names are unrelated to disk names).
|
// resort (e.g. ZFS where dataset names are unrelated to disk names).
|
||||||
if !hasRoot {
|
if !hasRoot {
|
||||||
rootKey := mostActiveIoDevice(diskIoCounters)
|
discovery.addLastResortRootFs()
|
||||||
if rootKey != "" {
|
|
||||||
slog.Warn("Using most active device for root I/O; set FILESYSTEM to override", "device", rootKey)
|
|
||||||
} else {
|
|
||||||
rootKey = filepath.Base(rootMountPoint)
|
|
||||||
if _, exists := a.fsStats[rootKey]; exists {
|
|
||||||
rootKey = "root"
|
|
||||||
}
|
|
||||||
slog.Warn("Root I/O device not detected; set FILESYSTEM to override")
|
|
||||||
}
|
|
||||||
a.fsStats[rootKey] = &system.FsStats{Root: true, Mountpoint: rootMountPoint}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
a.pruneDuplicateRootExtraFilesystems()
|
a.pruneDuplicateRootExtraFilesystems()
|
||||||
@@ -381,6 +501,8 @@ func normalizeDeviceName(value string) string {
|
|||||||
|
|
||||||
// Sets start values for disk I/O stats.
|
// Sets start values for disk I/O stats.
|
||||||
func (a *Agent) initializeDiskIoStats(diskIoCounters map[string]disk.IOCountersStat) {
|
func (a *Agent) initializeDiskIoStats(diskIoCounters map[string]disk.IOCountersStat) {
|
||||||
|
a.fsNames = a.fsNames[:0]
|
||||||
|
now := time.Now()
|
||||||
for device, stats := range a.fsStats {
|
for device, stats := range a.fsStats {
|
||||||
// skip if not in diskIoCounters
|
// skip if not in diskIoCounters
|
||||||
d, exists := diskIoCounters[device]
|
d, exists := diskIoCounters[device]
|
||||||
@@ -389,7 +511,7 @@ func (a *Agent) initializeDiskIoStats(diskIoCounters map[string]disk.IOCountersS
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// populate initial values
|
// populate initial values
|
||||||
stats.Time = time.Now()
|
stats.Time = now
|
||||||
stats.TotalRead = d.ReadBytes
|
stats.TotalRead = d.ReadBytes
|
||||||
stats.TotalWrite = d.WriteBytes
|
stats.TotalWrite = d.WriteBytes
|
||||||
// add to list of valid io device names
|
// add to list of valid io device names
|
||||||
|
|||||||
@@ -93,6 +93,443 @@ func TestParseFilesystemEntry(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestExtraFilesystemPartitionInfo(t *testing.T) {
|
||||||
|
t.Run("uses partition device for label-only mountpoint", func(t *testing.T) {
|
||||||
|
device, customName := extraFilesystemPartitionInfo(disk.PartitionStat{
|
||||||
|
Device: "/dev/sdc",
|
||||||
|
Mountpoint: "/extra-filesystems/Share",
|
||||||
|
})
|
||||||
|
|
||||||
|
assert.Equal(t, "/dev/sdc", device)
|
||||||
|
assert.Equal(t, "", customName)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("uses custom name from mountpoint suffix", func(t *testing.T) {
|
||||||
|
device, customName := extraFilesystemPartitionInfo(disk.PartitionStat{
|
||||||
|
Device: "/dev/sdc",
|
||||||
|
Mountpoint: "/extra-filesystems/sdc__Share",
|
||||||
|
})
|
||||||
|
|
||||||
|
assert.Equal(t, "/dev/sdc", device)
|
||||||
|
assert.Equal(t, "Share", customName)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("falls back to folder device when partition device is unavailable", func(t *testing.T) {
|
||||||
|
device, customName := extraFilesystemPartitionInfo(disk.PartitionStat{
|
||||||
|
Mountpoint: "/extra-filesystems/sdc__Share",
|
||||||
|
})
|
||||||
|
|
||||||
|
assert.Equal(t, "sdc", device)
|
||||||
|
assert.Equal(t, "Share", customName)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("supports custom name without folder device prefix", func(t *testing.T) {
|
||||||
|
device, customName := extraFilesystemPartitionInfo(disk.PartitionStat{
|
||||||
|
Device: "/dev/sdc",
|
||||||
|
Mountpoint: "/extra-filesystems/__Share",
|
||||||
|
})
|
||||||
|
|
||||||
|
assert.Equal(t, "/dev/sdc", device)
|
||||||
|
assert.Equal(t, "Share", customName)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuildFsStatRegistration(t *testing.T) {
|
||||||
|
t.Run("uses basename for non-windows exact io match", func(t *testing.T) {
|
||||||
|
key, stats, ok := registerFilesystemStats(
|
||||||
|
map[string]*system.FsStats{},
|
||||||
|
"/dev/sda1",
|
||||||
|
"/mnt/data",
|
||||||
|
false,
|
||||||
|
"archive",
|
||||||
|
fsRegistrationContext{
|
||||||
|
isWindows: false,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"sda1": {Name: "sda1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "sda1", key)
|
||||||
|
assert.Equal(t, "/mnt/data", stats.Mountpoint)
|
||||||
|
assert.Equal(t, "archive", stats.Name)
|
||||||
|
assert.False(t, stats.Root)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("maps root partition to io device by prefix", func(t *testing.T) {
|
||||||
|
key, stats, ok := registerFilesystemStats(
|
||||||
|
map[string]*system.FsStats{},
|
||||||
|
"/dev/ada0p2",
|
||||||
|
"/",
|
||||||
|
true,
|
||||||
|
"",
|
||||||
|
fsRegistrationContext{
|
||||||
|
isWindows: false,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"ada0": {Name: "ada0", ReadBytes: 1000, WriteBytes: 1000},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "ada0", key)
|
||||||
|
assert.True(t, stats.Root)
|
||||||
|
assert.Equal(t, "/", stats.Mountpoint)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("uses filesystem setting as root fallback", func(t *testing.T) {
|
||||||
|
key, _, ok := registerFilesystemStats(
|
||||||
|
map[string]*system.FsStats{},
|
||||||
|
"overlay",
|
||||||
|
"/",
|
||||||
|
true,
|
||||||
|
"",
|
||||||
|
fsRegistrationContext{
|
||||||
|
filesystem: "nvme0n1p2",
|
||||||
|
isWindows: false,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"nvme0n1": {Name: "nvme0n1", ReadBytes: 1000, WriteBytes: 1000},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "nvme0n1", key)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("prefers parsed extra-filesystems device over mapper device", func(t *testing.T) {
|
||||||
|
key, stats, ok := registerFilesystemStats(
|
||||||
|
map[string]*system.FsStats{},
|
||||||
|
"/dev/mapper/luks-2bcb02be-999d-4417-8d18-5c61e660fb6e",
|
||||||
|
"/extra-filesystems/nvme0n1p2__Archive",
|
||||||
|
false,
|
||||||
|
"Archive",
|
||||||
|
fsRegistrationContext{
|
||||||
|
isWindows: false,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"dm-1": {Name: "dm-1", Label: "luks-2bcb02be-999d-4417-8d18-5c61e660fb6e"},
|
||||||
|
"nvme0n1p2": {Name: "nvme0n1p2"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "nvme0n1p2", key)
|
||||||
|
assert.Equal(t, "Archive", stats.Name)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("falls back to mapper io device when folder device cannot be resolved", func(t *testing.T) {
|
||||||
|
key, stats, ok := registerFilesystemStats(
|
||||||
|
map[string]*system.FsStats{},
|
||||||
|
"/dev/mapper/luks-2bcb02be-999d-4417-8d18-5c61e660fb6e",
|
||||||
|
"/extra-filesystems/Archive",
|
||||||
|
false,
|
||||||
|
"Archive",
|
||||||
|
fsRegistrationContext{
|
||||||
|
isWindows: false,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"dm-1": {Name: "dm-1", Label: "luks-2bcb02be-999d-4417-8d18-5c61e660fb6e"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "dm-1", key)
|
||||||
|
assert.Equal(t, "Archive", stats.Name)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("uses full device name on windows", func(t *testing.T) {
|
||||||
|
key, _, ok := registerFilesystemStats(
|
||||||
|
map[string]*system.FsStats{},
|
||||||
|
`C:`,
|
||||||
|
`C:\\`,
|
||||||
|
false,
|
||||||
|
"",
|
||||||
|
fsRegistrationContext{
|
||||||
|
isWindows: true,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
`C:`: {Name: `C:`},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, `C:`, key)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("skips existing key", func(t *testing.T) {
|
||||||
|
key, stats, ok := registerFilesystemStats(
|
||||||
|
map[string]*system.FsStats{"sda1": {Mountpoint: "/existing"}},
|
||||||
|
"/dev/sda1",
|
||||||
|
"/mnt/data",
|
||||||
|
false,
|
||||||
|
"",
|
||||||
|
fsRegistrationContext{
|
||||||
|
isWindows: false,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"sda1": {Name: "sda1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert.False(t, ok)
|
||||||
|
assert.Empty(t, key)
|
||||||
|
assert.Nil(t, stats)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddConfiguredRootFs(t *testing.T) {
|
||||||
|
t.Run("adds root from matching partition", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
discovery := diskDiscovery{
|
||||||
|
agent: agent,
|
||||||
|
rootMountPoint: "/",
|
||||||
|
partitions: []disk.PartitionStat{{Device: "/dev/ada0p2", Mountpoint: "/"}},
|
||||||
|
ctx: fsRegistrationContext{
|
||||||
|
filesystem: "/dev/ada0p2",
|
||||||
|
isWindows: false,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"ada0": {Name: "ada0", ReadBytes: 1000, WriteBytes: 1000},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
ok := discovery.addConfiguredRootFs()
|
||||||
|
|
||||||
|
assert.True(t, ok)
|
||||||
|
stats, exists := agent.fsStats["ada0"]
|
||||||
|
assert.True(t, exists)
|
||||||
|
assert.True(t, stats.Root)
|
||||||
|
assert.Equal(t, "/", stats.Mountpoint)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("adds root from io device when partition is missing", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
discovery := diskDiscovery{
|
||||||
|
agent: agent,
|
||||||
|
rootMountPoint: "/sysroot",
|
||||||
|
ctx: fsRegistrationContext{
|
||||||
|
filesystem: "zroot",
|
||||||
|
isWindows: false,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"nda0": {Name: "nda0", Label: "zroot", ReadBytes: 1000, WriteBytes: 1000},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
ok := discovery.addConfiguredRootFs()
|
||||||
|
|
||||||
|
assert.True(t, ok)
|
||||||
|
stats, exists := agent.fsStats["nda0"]
|
||||||
|
assert.True(t, exists)
|
||||||
|
assert.True(t, stats.Root)
|
||||||
|
assert.Equal(t, "/sysroot", stats.Mountpoint)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("returns false when filesystem cannot be resolved", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
discovery := diskDiscovery{
|
||||||
|
agent: agent,
|
||||||
|
rootMountPoint: "/",
|
||||||
|
ctx: fsRegistrationContext{
|
||||||
|
filesystem: "missing-disk",
|
||||||
|
isWindows: false,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
ok := discovery.addConfiguredRootFs()
|
||||||
|
|
||||||
|
assert.False(t, ok)
|
||||||
|
assert.Empty(t, agent.fsStats)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddPartitionRootFs(t *testing.T) {
|
||||||
|
t.Run("adds root from fallback partition candidate", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
discovery := diskDiscovery{
|
||||||
|
agent: agent,
|
||||||
|
ctx: fsRegistrationContext{
|
||||||
|
isWindows: false,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"nvme0n1": {Name: "nvme0n1", ReadBytes: 1000, WriteBytes: 1000},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
ok := discovery.addPartitionRootFs("/dev/nvme0n1p2", "/")
|
||||||
|
|
||||||
|
assert.True(t, ok)
|
||||||
|
stats, exists := agent.fsStats["nvme0n1"]
|
||||||
|
assert.True(t, exists)
|
||||||
|
assert.True(t, stats.Root)
|
||||||
|
assert.Equal(t, "/", stats.Mountpoint)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("returns false when no io device matches", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
discovery := diskDiscovery{agent: agent, ctx: fsRegistrationContext{diskIoCounters: map[string]disk.IOCountersStat{}}}
|
||||||
|
|
||||||
|
ok := discovery.addPartitionRootFs("/dev/mapper/root", "/")
|
||||||
|
|
||||||
|
assert.False(t, ok)
|
||||||
|
assert.Empty(t, agent.fsStats)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddLastResortRootFs(t *testing.T) {
|
||||||
|
t.Run("uses most active io device when available", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
discovery := diskDiscovery{agent: agent, rootMountPoint: "/", ctx: fsRegistrationContext{diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"sda": {Name: "sda", ReadBytes: 5000, WriteBytes: 5000},
|
||||||
|
"sdb": {Name: "sdb", ReadBytes: 1000, WriteBytes: 1000},
|
||||||
|
}}}
|
||||||
|
|
||||||
|
discovery.addLastResortRootFs()
|
||||||
|
|
||||||
|
stats, exists := agent.fsStats["sda"]
|
||||||
|
assert.True(t, exists)
|
||||||
|
assert.True(t, stats.Root)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("falls back to root key when mountpoint basename collides", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: map[string]*system.FsStats{
|
||||||
|
"sysroot": {Mountpoint: "/extra-filesystems/sysroot"},
|
||||||
|
}}
|
||||||
|
discovery := diskDiscovery{agent: agent, rootMountPoint: "/sysroot", ctx: fsRegistrationContext{diskIoCounters: map[string]disk.IOCountersStat{}}}
|
||||||
|
|
||||||
|
discovery.addLastResortRootFs()
|
||||||
|
|
||||||
|
stats, exists := agent.fsStats["root"]
|
||||||
|
assert.True(t, exists)
|
||||||
|
assert.True(t, stats.Root)
|
||||||
|
assert.Equal(t, "/sysroot", stats.Mountpoint)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddConfiguredExtraFsEntry(t *testing.T) {
|
||||||
|
t.Run("uses matching partition when present", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
discovery := diskDiscovery{
|
||||||
|
agent: agent,
|
||||||
|
partitions: []disk.PartitionStat{{Device: "/dev/sdb1", Mountpoint: "/mnt/backup"}},
|
||||||
|
usageFn: func(string) (*disk.UsageStat, error) {
|
||||||
|
t.Fatal("usage fallback should not be called when partition matches")
|
||||||
|
return nil, nil
|
||||||
|
},
|
||||||
|
ctx: fsRegistrationContext{
|
||||||
|
isWindows: false,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"sdb1": {Name: "sdb1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
discovery.addConfiguredExtraFsEntry("sdb1", "backup")
|
||||||
|
|
||||||
|
stats, exists := agent.fsStats["sdb1"]
|
||||||
|
assert.True(t, exists)
|
||||||
|
assert.Equal(t, "/mnt/backup", stats.Mountpoint)
|
||||||
|
assert.Equal(t, "backup", stats.Name)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("falls back to usage-validated path", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
discovery := diskDiscovery{
|
||||||
|
agent: agent,
|
||||||
|
usageFn: func(path string) (*disk.UsageStat, error) {
|
||||||
|
assert.Equal(t, "/srv/archive", path)
|
||||||
|
return &disk.UsageStat{}, nil
|
||||||
|
},
|
||||||
|
ctx: fsRegistrationContext{
|
||||||
|
isWindows: false,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"archive": {Name: "archive"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
discovery.addConfiguredExtraFsEntry("/srv/archive", "archive")
|
||||||
|
|
||||||
|
stats, exists := agent.fsStats["archive"]
|
||||||
|
assert.True(t, exists)
|
||||||
|
assert.Equal(t, "/srv/archive", stats.Mountpoint)
|
||||||
|
assert.Equal(t, "archive", stats.Name)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("ignores invalid filesystem entry", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
discovery := diskDiscovery{
|
||||||
|
agent: agent,
|
||||||
|
usageFn: func(string) (*disk.UsageStat, error) {
|
||||||
|
return nil, os.ErrNotExist
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
discovery.addConfiguredExtraFsEntry("/missing/archive", "")
|
||||||
|
|
||||||
|
assert.Empty(t, agent.fsStats)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddConfiguredExtraFilesystems(t *testing.T) {
|
||||||
|
t.Run("parses and registers multiple configured filesystems", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
discovery := diskDiscovery{
|
||||||
|
agent: agent,
|
||||||
|
partitions: []disk.PartitionStat{{Device: "/dev/sda1", Mountpoint: "/mnt/fast"}},
|
||||||
|
usageFn: func(path string) (*disk.UsageStat, error) {
|
||||||
|
if path == "/srv/archive" {
|
||||||
|
return &disk.UsageStat{}, nil
|
||||||
|
}
|
||||||
|
return nil, os.ErrNotExist
|
||||||
|
},
|
||||||
|
ctx: fsRegistrationContext{
|
||||||
|
isWindows: false,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"sda1": {Name: "sda1"},
|
||||||
|
"archive": {Name: "archive"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
discovery.addConfiguredExtraFilesystems("sda1__fast,/srv/archive__cold")
|
||||||
|
|
||||||
|
assert.Contains(t, agent.fsStats, "sda1")
|
||||||
|
assert.Equal(t, "fast", agent.fsStats["sda1"].Name)
|
||||||
|
assert.Contains(t, agent.fsStats, "archive")
|
||||||
|
assert.Equal(t, "cold", agent.fsStats["archive"].Name)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddExtraFilesystemFolders(t *testing.T) {
|
||||||
|
t.Run("adds missing folders and skips existing mountpoints", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: map[string]*system.FsStats{
|
||||||
|
"existing": {Mountpoint: "/extra-filesystems/existing"},
|
||||||
|
}}
|
||||||
|
discovery := diskDiscovery{
|
||||||
|
agent: agent,
|
||||||
|
ctx: fsRegistrationContext{
|
||||||
|
isWindows: false,
|
||||||
|
efPath: "/extra-filesystems",
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"newdisk": {Name: "newdisk"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
discovery.addExtraFilesystemFolders([]string{"existing", "newdisk__Archive"})
|
||||||
|
|
||||||
|
assert.Len(t, agent.fsStats, 2)
|
||||||
|
stats, exists := agent.fsStats["newdisk"]
|
||||||
|
assert.True(t, exists)
|
||||||
|
assert.Equal(t, "/extra-filesystems/newdisk__Archive", stats.Mountpoint)
|
||||||
|
assert.Equal(t, "Archive", stats.Name)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestFindIoDevice(t *testing.T) {
|
func TestFindIoDevice(t *testing.T) {
|
||||||
t.Run("matches by device name", func(t *testing.T) {
|
t.Run("matches by device name", func(t *testing.T) {
|
||||||
ioCounters := map[string]disk.IOCountersStat{
|
ioCounters := map[string]disk.IOCountersStat{
|
||||||
@@ -250,18 +687,8 @@ func TestIsDockerSpecialMountpoint(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestInitializeDiskInfoWithCustomNames(t *testing.T) {
|
func TestInitializeDiskInfoWithCustomNames(t *testing.T) {
|
||||||
// Set up environment variables
|
|
||||||
oldEnv := os.Getenv("EXTRA_FILESYSTEMS")
|
|
||||||
defer func() {
|
|
||||||
if oldEnv != "" {
|
|
||||||
os.Setenv("EXTRA_FILESYSTEMS", oldEnv)
|
|
||||||
} else {
|
|
||||||
os.Unsetenv("EXTRA_FILESYSTEMS")
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Test with custom names
|
// Test with custom names
|
||||||
os.Setenv("EXTRA_FILESYSTEMS", "sda1__my-storage,/dev/sdb1__backup-drive,nvme0n1p2")
|
t.Setenv("EXTRA_FILESYSTEMS", "sda1__my-storage,/dev/sdb1__backup-drive,nvme0n1p2")
|
||||||
|
|
||||||
// Mock disk partitions (we'll just test the parsing logic)
|
// Mock disk partitions (we'll just test the parsing logic)
|
||||||
// Since the actual disk operations are system-dependent, we'll focus on the parsing
|
// Since the actual disk operations are system-dependent, we'll focus on the parsing
|
||||||
@@ -289,7 +716,7 @@ func TestInitializeDiskInfoWithCustomNames(t *testing.T) {
|
|||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run("env_"+tc.envValue, func(t *testing.T) {
|
t.Run("env_"+tc.envValue, func(t *testing.T) {
|
||||||
os.Setenv("EXTRA_FILESYSTEMS", tc.envValue)
|
t.Setenv("EXTRA_FILESYSTEMS", tc.envValue)
|
||||||
|
|
||||||
// Create mock partitions that would match our test cases
|
// Create mock partitions that would match our test cases
|
||||||
partitions := []disk.PartitionStat{}
|
partitions := []disk.PartitionStat{}
|
||||||
@@ -310,7 +737,7 @@ func TestInitializeDiskInfoWithCustomNames(t *testing.T) {
|
|||||||
// Test the parsing logic by calling the relevant part
|
// Test the parsing logic by calling the relevant part
|
||||||
// We'll create a simplified version to test just the parsing
|
// We'll create a simplified version to test just the parsing
|
||||||
extraFilesystems := tc.envValue
|
extraFilesystems := tc.envValue
|
||||||
for _, fsEntry := range strings.Split(extraFilesystems, ",") {
|
for fsEntry := range strings.SplitSeq(extraFilesystems, ",") {
|
||||||
// Parse the entry
|
// Parse the entry
|
||||||
fsEntry = strings.TrimSpace(fsEntry)
|
fsEntry = strings.TrimSpace(fsEntry)
|
||||||
var fs, customName string
|
var fs, customName string
|
||||||
@@ -506,3 +933,33 @@ func TestHasSameDiskUsage(t *testing.T) {
|
|||||||
assert.False(t, hasSameDiskUsage(&disk.UsageStat{Total: 0, Used: 0}, &disk.UsageStat{Total: 1, Used: 1}))
|
assert.False(t, hasSameDiskUsage(&disk.UsageStat{Total: 0, Used: 0}, &disk.UsageStat{Total: 1, Used: 1}))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestInitializeDiskIoStatsResetsTrackedDevices(t *testing.T) {
|
||||||
|
agent := &Agent{
|
||||||
|
fsStats: map[string]*system.FsStats{
|
||||||
|
"sda": {},
|
||||||
|
"sdb": {},
|
||||||
|
},
|
||||||
|
fsNames: []string{"stale", "sda"},
|
||||||
|
}
|
||||||
|
|
||||||
|
agent.initializeDiskIoStats(map[string]disk.IOCountersStat{
|
||||||
|
"sda": {Name: "sda", ReadBytes: 10, WriteBytes: 20},
|
||||||
|
"sdb": {Name: "sdb", ReadBytes: 30, WriteBytes: 40},
|
||||||
|
})
|
||||||
|
|
||||||
|
assert.ElementsMatch(t, []string{"sda", "sdb"}, agent.fsNames)
|
||||||
|
assert.Len(t, agent.fsNames, 2)
|
||||||
|
assert.Equal(t, uint64(10), agent.fsStats["sda"].TotalRead)
|
||||||
|
assert.Equal(t, uint64(20), agent.fsStats["sda"].TotalWrite)
|
||||||
|
assert.False(t, agent.fsStats["sda"].Time.IsZero())
|
||||||
|
assert.False(t, agent.fsStats["sdb"].Time.IsZero())
|
||||||
|
|
||||||
|
agent.initializeDiskIoStats(map[string]disk.IOCountersStat{
|
||||||
|
"sdb": {Name: "sdb", ReadBytes: 50, WriteBytes: 60},
|
||||||
|
})
|
||||||
|
|
||||||
|
assert.Equal(t, []string{"sdb"}, agent.fsNames)
|
||||||
|
assert.Equal(t, uint64(50), agent.fsStats["sdb"].TotalRead)
|
||||||
|
assert.Equal(t, uint64(60), agent.fsStats["sdb"].TotalWrite)
|
||||||
|
}
|
||||||
|
|||||||
122
agent/docker.go
122
agent/docker.go
@@ -16,6 +16,8 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
@@ -75,6 +77,7 @@ type dockerManager struct {
|
|||||||
// cacheTimeMs -> DeltaTracker for network bytes sent/received
|
// cacheTimeMs -> DeltaTracker for network bytes sent/received
|
||||||
networkSentTrackers map[uint16]*deltatracker.DeltaTracker[string, uint64]
|
networkSentTrackers map[uint16]*deltatracker.DeltaTracker[string, uint64]
|
||||||
networkRecvTrackers map[uint16]*deltatracker.DeltaTracker[string, uint64]
|
networkRecvTrackers map[uint16]*deltatracker.DeltaTracker[string, uint64]
|
||||||
|
lastNetworkReadTime map[uint16]map[string]time.Time // cacheTimeMs -> containerId -> last network read time
|
||||||
retrySleep func(time.Duration)
|
retrySleep func(time.Duration)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -283,7 +286,7 @@ func (dm *dockerManager) cycleNetworkDeltasForCacheTime(cacheTimeMs uint16) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// calculateNetworkStats calculates network sent/receive deltas using DeltaTracker
|
// calculateNetworkStats calculates network sent/receive deltas using DeltaTracker
|
||||||
func (dm *dockerManager) calculateNetworkStats(ctr *container.ApiInfo, apiStats *container.ApiStats, stats *container.Stats, initialized bool, name string, cacheTimeMs uint16) (uint64, uint64) {
|
func (dm *dockerManager) calculateNetworkStats(ctr *container.ApiInfo, apiStats *container.ApiStats, name string, cacheTimeMs uint16) (uint64, uint64) {
|
||||||
var total_sent, total_recv uint64
|
var total_sent, total_recv uint64
|
||||||
for _, v := range apiStats.Networks {
|
for _, v := range apiStats.Networks {
|
||||||
total_sent += v.TxBytes
|
total_sent += v.TxBytes
|
||||||
@@ -302,10 +305,11 @@ func (dm *dockerManager) calculateNetworkStats(ctr *container.ApiInfo, apiStats
|
|||||||
sent_delta_raw := sentTracker.Delta(ctr.IdShort)
|
sent_delta_raw := sentTracker.Delta(ctr.IdShort)
|
||||||
recv_delta_raw := recvTracker.Delta(ctr.IdShort)
|
recv_delta_raw := recvTracker.Delta(ctr.IdShort)
|
||||||
|
|
||||||
// Calculate bytes per second independently for Tx and Rx if we have previous data
|
// Calculate bytes per second using per-cache-time read time to avoid
|
||||||
|
// interference between different cache intervals (e.g. 1000ms vs 60000ms)
|
||||||
var sent_delta, recv_delta uint64
|
var sent_delta, recv_delta uint64
|
||||||
if initialized {
|
if prevReadTime, ok := dm.lastNetworkReadTime[cacheTimeMs][ctr.IdShort]; ok {
|
||||||
millisecondsElapsed := uint64(time.Since(stats.PrevReadTime).Milliseconds())
|
millisecondsElapsed := uint64(time.Since(prevReadTime).Milliseconds())
|
||||||
if millisecondsElapsed > 0 {
|
if millisecondsElapsed > 0 {
|
||||||
if sent_delta_raw > 0 {
|
if sent_delta_raw > 0 {
|
||||||
sent_delta = sent_delta_raw * 1000 / millisecondsElapsed
|
sent_delta = sent_delta_raw * 1000 / millisecondsElapsed
|
||||||
@@ -346,6 +350,39 @@ func updateContainerStatsValues(stats *container.Stats, cpuPct float64, usedMemo
|
|||||||
stats.PrevReadTime = readTime
|
stats.PrevReadTime = readTime
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// convertContainerPortsToString formats the ports of a container into a sorted, deduplicated string.
|
||||||
|
// ctr.Ports is nilled out after processing so the slice is not accidentally reused.
|
||||||
|
func convertContainerPortsToString(ctr *container.ApiInfo) string {
|
||||||
|
if len(ctr.Ports) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
sort.Slice(ctr.Ports, func(i, j int) bool {
|
||||||
|
return ctr.Ports[i].PublicPort < ctr.Ports[j].PublicPort
|
||||||
|
})
|
||||||
|
var builder strings.Builder
|
||||||
|
seenPorts := make(map[uint16]struct{})
|
||||||
|
for _, p := range ctr.Ports {
|
||||||
|
_, ok := seenPorts[p.PublicPort]
|
||||||
|
if p.PublicPort == 0 || ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
seenPorts[p.PublicPort] = struct{}{}
|
||||||
|
if builder.Len() > 0 {
|
||||||
|
builder.WriteString(", ")
|
||||||
|
}
|
||||||
|
switch p.IP {
|
||||||
|
case "0.0.0.0", "::":
|
||||||
|
default:
|
||||||
|
builder.WriteString(p.IP)
|
||||||
|
builder.WriteByte(':')
|
||||||
|
}
|
||||||
|
builder.WriteString(strconv.Itoa(int(p.PublicPort)))
|
||||||
|
}
|
||||||
|
// clear ports slice so it doesn't get reused and blend into next response
|
||||||
|
ctr.Ports = nil
|
||||||
|
return builder.String()
|
||||||
|
}
|
||||||
|
|
||||||
func parseDockerStatus(status string) (string, container.DockerHealth) {
|
func parseDockerStatus(status string) (string, container.DockerHealth) {
|
||||||
trimmed := strings.TrimSpace(status)
|
trimmed := strings.TrimSpace(status)
|
||||||
if trimmed == "" {
|
if trimmed == "" {
|
||||||
@@ -365,22 +402,60 @@ func parseDockerStatus(status string) (string, container.DockerHealth) {
|
|||||||
statusText = trimmed
|
statusText = trimmed
|
||||||
}
|
}
|
||||||
|
|
||||||
healthText := strings.ToLower(strings.TrimSpace(strings.TrimSuffix(trimmed[openIdx+1:], ")")))
|
healthText := strings.TrimSpace(strings.TrimSuffix(trimmed[openIdx+1:], ")"))
|
||||||
// Some Docker statuses include a "health:" prefix inside the parentheses.
|
// Some Docker statuses include a "health:" prefix inside the parentheses.
|
||||||
// Strip it so it maps correctly to the known health states.
|
// Strip it so it maps correctly to the known health states.
|
||||||
if colonIdx := strings.IndexRune(healthText, ':'); colonIdx != -1 {
|
if colonIdx := strings.IndexRune(healthText, ':'); colonIdx != -1 {
|
||||||
prefix := strings.TrimSpace(healthText[:colonIdx])
|
prefix := strings.ToLower(strings.TrimSpace(healthText[:colonIdx]))
|
||||||
if prefix == "health" || prefix == "health status" {
|
if prefix == "health" || prefix == "health status" {
|
||||||
healthText = strings.TrimSpace(healthText[colonIdx+1:])
|
healthText = strings.TrimSpace(healthText[colonIdx+1:])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if health, ok := container.DockerHealthStrings[healthText]; ok {
|
if health, ok := parseDockerHealthStatus(healthText); ok {
|
||||||
return statusText, health
|
return statusText, health
|
||||||
}
|
}
|
||||||
|
|
||||||
return trimmed, container.DockerHealthNone
|
return trimmed, container.DockerHealthNone
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// parseDockerHealthStatus maps Docker health status strings to container.DockerHealth values
|
||||||
|
func parseDockerHealthStatus(status string) (container.DockerHealth, bool) {
|
||||||
|
health, ok := container.DockerHealthStrings[strings.ToLower(strings.TrimSpace(status))]
|
||||||
|
return health, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// getPodmanContainerHealth fetches container health status from the container inspect endpoint.
|
||||||
|
// Used for Podman which doesn't provide health status in the /containers/json endpoint as of March 2026.
|
||||||
|
// https://github.com/containers/podman/issues/27786
|
||||||
|
func (dm *dockerManager) getPodmanContainerHealth(containerID string) (container.DockerHealth, error) {
|
||||||
|
resp, err := dm.client.Get(fmt.Sprintf("http://localhost/containers/%s/json", url.PathEscape(containerID)))
|
||||||
|
if err != nil {
|
||||||
|
return container.DockerHealthNone, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return container.DockerHealthNone, fmt.Errorf("container inspect request failed: %s", resp.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
var inspectInfo struct {
|
||||||
|
State struct {
|
||||||
|
Health struct {
|
||||||
|
Status string
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&inspectInfo); err != nil {
|
||||||
|
return container.DockerHealthNone, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if health, ok := parseDockerHealthStatus(inspectInfo.State.Health.Status); ok {
|
||||||
|
return health, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return container.DockerHealthNone, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Updates stats for individual container with cache-time-aware delta tracking
|
// Updates stats for individual container with cache-time-aware delta tracking
|
||||||
func (dm *dockerManager) updateContainerStats(ctr *container.ApiInfo, cacheTimeMs uint16) error {
|
func (dm *dockerManager) updateContainerStats(ctr *container.ApiInfo, cacheTimeMs uint16) error {
|
||||||
name := ctr.Names[0][1:]
|
name := ctr.Names[0][1:]
|
||||||
@@ -390,6 +465,21 @@ func (dm *dockerManager) updateContainerStats(ctr *container.ApiInfo, cacheTimeM
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
statusText, health := parseDockerStatus(ctr.Status)
|
||||||
|
|
||||||
|
// Docker exposes Health.Status on /containers/json in API 1.52+.
|
||||||
|
// Podman currently requires falling back to the inspect endpoint as of March 2026.
|
||||||
|
// https://github.com/containers/podman/issues/27786
|
||||||
|
if ctr.Health.Status != "" {
|
||||||
|
if h, ok := parseDockerHealthStatus(ctr.Health.Status); ok {
|
||||||
|
health = h
|
||||||
|
}
|
||||||
|
} else if dm.usingPodman {
|
||||||
|
if podmanHealth, err := dm.getPodmanContainerHealth(ctr.IdShort); err == nil {
|
||||||
|
health = podmanHealth
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
dm.containerStatsMutex.Lock()
|
dm.containerStatsMutex.Lock()
|
||||||
defer dm.containerStatsMutex.Unlock()
|
defer dm.containerStatsMutex.Unlock()
|
||||||
|
|
||||||
@@ -401,11 +491,13 @@ func (dm *dockerManager) updateContainerStats(ctr *container.ApiInfo, cacheTimeM
|
|||||||
}
|
}
|
||||||
|
|
||||||
stats.Id = ctr.IdShort
|
stats.Id = ctr.IdShort
|
||||||
|
|
||||||
statusText, health := parseDockerStatus(ctr.Status)
|
|
||||||
stats.Status = statusText
|
stats.Status = statusText
|
||||||
stats.Health = health
|
stats.Health = health
|
||||||
|
|
||||||
|
if len(ctr.Ports) > 0 {
|
||||||
|
stats.Ports = convertContainerPortsToString(ctr)
|
||||||
|
}
|
||||||
|
|
||||||
// reset current stats
|
// reset current stats
|
||||||
stats.Cpu = 0
|
stats.Cpu = 0
|
||||||
stats.Mem = 0
|
stats.Mem = 0
|
||||||
@@ -452,7 +544,13 @@ func (dm *dockerManager) updateContainerStats(ctr *container.ApiInfo, cacheTimeM
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Calculate network stats using DeltaTracker
|
// Calculate network stats using DeltaTracker
|
||||||
sent_delta, recv_delta := dm.calculateNetworkStats(ctr, res, stats, initialized, name, cacheTimeMs)
|
sent_delta, recv_delta := dm.calculateNetworkStats(ctr, res, name, cacheTimeMs)
|
||||||
|
|
||||||
|
// Store per-cache-time network read time for next rate calculation
|
||||||
|
if dm.lastNetworkReadTime[cacheTimeMs] == nil {
|
||||||
|
dm.lastNetworkReadTime[cacheTimeMs] = make(map[string]time.Time)
|
||||||
|
}
|
||||||
|
dm.lastNetworkReadTime[cacheTimeMs][ctr.IdShort] = time.Now()
|
||||||
|
|
||||||
// Store current network values for legacy compatibility
|
// Store current network values for legacy compatibility
|
||||||
var total_sent, total_recv uint64
|
var total_sent, total_recv uint64
|
||||||
@@ -484,6 +582,9 @@ func (dm *dockerManager) deleteContainerStatsSync(id string) {
|
|||||||
for ct := range dm.lastCpuReadTime {
|
for ct := range dm.lastCpuReadTime {
|
||||||
delete(dm.lastCpuReadTime[ct], id)
|
delete(dm.lastCpuReadTime[ct], id)
|
||||||
}
|
}
|
||||||
|
for ct := range dm.lastNetworkReadTime {
|
||||||
|
delete(dm.lastNetworkReadTime[ct], id)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creates a new http client for Docker or Podman API
|
// Creates a new http client for Docker or Podman API
|
||||||
@@ -569,6 +670,7 @@ func newDockerManager() *dockerManager {
|
|||||||
lastCpuReadTime: make(map[uint16]map[string]time.Time),
|
lastCpuReadTime: make(map[uint16]map[string]time.Time),
|
||||||
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
lastNetworkReadTime: make(map[uint16]map[string]time.Time),
|
||||||
retrySleep: time.Sleep,
|
retrySleep: time.Sleep,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -35,6 +35,12 @@ type recordingRoundTripper struct {
|
|||||||
lastQuery map[string]string
|
lastQuery map[string]string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type roundTripFunc func(*http.Request) (*http.Response, error)
|
||||||
|
|
||||||
|
func (fn roundTripFunc) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||||
|
return fn(req)
|
||||||
|
}
|
||||||
|
|
||||||
func (rt *recordingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
func (rt *recordingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||||
rt.called = true
|
rt.called = true
|
||||||
rt.lastPath = req.URL.EscapedPath()
|
rt.lastPath = req.URL.EscapedPath()
|
||||||
@@ -214,6 +220,28 @@ func TestContainerDetailsRequestsUseExpectedDockerPaths(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGetPodmanContainerHealth(t *testing.T) {
|
||||||
|
called := false
|
||||||
|
dm := &dockerManager{
|
||||||
|
client: &http.Client{Transport: roundTripFunc(func(req *http.Request) (*http.Response, error) {
|
||||||
|
called = true
|
||||||
|
assert.Equal(t, "/containers/0123456789ab/json", req.URL.EscapedPath())
|
||||||
|
return &http.Response{
|
||||||
|
StatusCode: http.StatusOK,
|
||||||
|
Status: "200 OK",
|
||||||
|
Header: make(http.Header),
|
||||||
|
Body: io.NopCloser(strings.NewReader(`{"State":{"Health":{"Status":"healthy"}}}`)),
|
||||||
|
Request: req,
|
||||||
|
}, nil
|
||||||
|
})},
|
||||||
|
}
|
||||||
|
|
||||||
|
health, err := dm.getPodmanContainerHealth("0123456789ab")
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.True(t, called)
|
||||||
|
assert.Equal(t, container.DockerHealthHealthy, health)
|
||||||
|
}
|
||||||
|
|
||||||
func TestValidateCpuPercentage(t *testing.T) {
|
func TestValidateCpuPercentage(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
@@ -380,6 +408,7 @@ func TestCalculateNetworkStats(t *testing.T) {
|
|||||||
dm := &dockerManager{
|
dm := &dockerManager{
|
||||||
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
lastNetworkReadTime: make(map[uint16]map[string]time.Time),
|
||||||
}
|
}
|
||||||
|
|
||||||
cacheTimeMs := uint16(30000)
|
cacheTimeMs := uint16(30000)
|
||||||
@@ -395,6 +424,11 @@ func TestCalculateNetworkStats(t *testing.T) {
|
|||||||
dm.networkSentTrackers[cacheTimeMs] = sentTracker
|
dm.networkSentTrackers[cacheTimeMs] = sentTracker
|
||||||
dm.networkRecvTrackers[cacheTimeMs] = recvTracker
|
dm.networkRecvTrackers[cacheTimeMs] = recvTracker
|
||||||
|
|
||||||
|
// Set per-cache-time network read time (1 second ago)
|
||||||
|
dm.lastNetworkReadTime[cacheTimeMs] = map[string]time.Time{
|
||||||
|
"container1": time.Now().Add(-time.Second),
|
||||||
|
}
|
||||||
|
|
||||||
ctr := &container.ApiInfo{
|
ctr := &container.ApiInfo{
|
||||||
IdShort: "container1",
|
IdShort: "container1",
|
||||||
}
|
}
|
||||||
@@ -405,12 +439,8 @@ func TestCalculateNetworkStats(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
stats := &container.Stats{
|
|
||||||
PrevReadTime: time.Now().Add(-time.Second), // 1 second ago
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test with initialized container
|
// Test with initialized container
|
||||||
sent, recv := dm.calculateNetworkStats(ctr, apiStats, stats, true, "test-container", cacheTimeMs)
|
sent, recv := dm.calculateNetworkStats(ctr, apiStats, "test-container", cacheTimeMs)
|
||||||
|
|
||||||
// Should return calculated byte rates per second
|
// Should return calculated byte rates per second
|
||||||
assert.GreaterOrEqual(t, sent, uint64(0))
|
assert.GreaterOrEqual(t, sent, uint64(0))
|
||||||
@@ -418,12 +448,76 @@ func TestCalculateNetworkStats(t *testing.T) {
|
|||||||
|
|
||||||
// Cycle and test one-direction change (Tx only) is reflected independently
|
// Cycle and test one-direction change (Tx only) is reflected independently
|
||||||
dm.cycleNetworkDeltasForCacheTime(cacheTimeMs)
|
dm.cycleNetworkDeltasForCacheTime(cacheTimeMs)
|
||||||
|
dm.lastNetworkReadTime[cacheTimeMs]["container1"] = time.Now().Add(-time.Second)
|
||||||
apiStats.Networks["eth0"] = container.NetworkStats{TxBytes: 2500, RxBytes: 1800} // +500 Tx only
|
apiStats.Networks["eth0"] = container.NetworkStats{TxBytes: 2500, RxBytes: 1800} // +500 Tx only
|
||||||
sent, recv = dm.calculateNetworkStats(ctr, apiStats, stats, true, "test-container", cacheTimeMs)
|
sent, recv = dm.calculateNetworkStats(ctr, apiStats, "test-container", cacheTimeMs)
|
||||||
assert.Greater(t, sent, uint64(0))
|
assert.Greater(t, sent, uint64(0))
|
||||||
assert.Equal(t, uint64(0), recv)
|
assert.Equal(t, uint64(0), recv)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestNetworkStatsCacheTimeIsolation verifies that frequent collections at one cache time
|
||||||
|
// (e.g. 1000ms) don't cause inflated rates at another cache time (e.g. 60000ms).
|
||||||
|
// This was a bug where PrevReadTime was shared, so the 60000ms tracker would see a
|
||||||
|
// large byte delta divided by a tiny elapsed time (set by the 1000ms path).
|
||||||
|
func TestNetworkStatsCacheTimeIsolation(t *testing.T) {
|
||||||
|
dm := &dockerManager{
|
||||||
|
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
lastNetworkReadTime: make(map[uint16]map[string]time.Time),
|
||||||
|
}
|
||||||
|
|
||||||
|
ctr := &container.ApiInfo{IdShort: "container1"}
|
||||||
|
fastCache := uint16(1000)
|
||||||
|
slowCache := uint16(60000)
|
||||||
|
|
||||||
|
// Baseline for both cache times at T=0 with 100 bytes total
|
||||||
|
baseline := &container.ApiStats{
|
||||||
|
Networks: map[string]container.NetworkStats{
|
||||||
|
"eth0": {TxBytes: 100, RxBytes: 100},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
dm.calculateNetworkStats(ctr, baseline, "test", fastCache)
|
||||||
|
dm.calculateNetworkStats(ctr, baseline, "test", slowCache)
|
||||||
|
|
||||||
|
// Record read times and cycle both
|
||||||
|
now := time.Now()
|
||||||
|
dm.lastNetworkReadTime[fastCache] = map[string]time.Time{"container1": now}
|
||||||
|
dm.lastNetworkReadTime[slowCache] = map[string]time.Time{"container1": now}
|
||||||
|
dm.cycleNetworkDeltasForCacheTime(fastCache)
|
||||||
|
dm.cycleNetworkDeltasForCacheTime(slowCache)
|
||||||
|
|
||||||
|
// Simulate many fast (1000ms) collections over ~5 seconds, each adding 10 bytes
|
||||||
|
totalBytes := uint64(100)
|
||||||
|
for i := 0; i < 5; i++ {
|
||||||
|
totalBytes += 10
|
||||||
|
stats := &container.ApiStats{
|
||||||
|
Networks: map[string]container.NetworkStats{
|
||||||
|
"eth0": {TxBytes: totalBytes, RxBytes: totalBytes},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// Set fast cache read time to 1 second ago
|
||||||
|
dm.lastNetworkReadTime[fastCache]["container1"] = time.Now().Add(-time.Second)
|
||||||
|
sent, _ := dm.calculateNetworkStats(ctr, stats, "test", fastCache)
|
||||||
|
// Fast cache should see ~10 bytes/sec per interval
|
||||||
|
assert.LessOrEqual(t, sent, uint64(100), "fast cache rate should be reasonable")
|
||||||
|
dm.cycleNetworkDeltasForCacheTime(fastCache)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now do slow cache collection — total delta is 50 bytes over ~5 seconds
|
||||||
|
// Set slow cache read time to 5 seconds ago (the actual elapsed time)
|
||||||
|
dm.lastNetworkReadTime[slowCache]["container1"] = time.Now().Add(-5 * time.Second)
|
||||||
|
finalStats := &container.ApiStats{
|
||||||
|
Networks: map[string]container.NetworkStats{
|
||||||
|
"eth0": {TxBytes: totalBytes, RxBytes: totalBytes},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
sent, _ := dm.calculateNetworkStats(ctr, finalStats, "test", slowCache)
|
||||||
|
|
||||||
|
// Slow cache rate should be ~10 bytes/sec (50 bytes / 5 seconds), NOT 100x inflated
|
||||||
|
assert.LessOrEqual(t, sent, uint64(100), "slow cache rate should NOT be inflated by fast cache collections")
|
||||||
|
assert.GreaterOrEqual(t, sent, uint64(1), "slow cache should still report some traffic")
|
||||||
|
}
|
||||||
|
|
||||||
func TestDockerManagerCreation(t *testing.T) {
|
func TestDockerManagerCreation(t *testing.T) {
|
||||||
// Test that dockerManager can be created without panicking
|
// Test that dockerManager can be created without panicking
|
||||||
dm := &dockerManager{
|
dm := &dockerManager{
|
||||||
@@ -432,6 +526,7 @@ func TestDockerManagerCreation(t *testing.T) {
|
|||||||
lastCpuReadTime: make(map[uint16]map[string]time.Time),
|
lastCpuReadTime: make(map[uint16]map[string]time.Time),
|
||||||
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
lastNetworkReadTime: make(map[uint16]map[string]time.Time),
|
||||||
}
|
}
|
||||||
|
|
||||||
assert.NotNil(t, dm)
|
assert.NotNil(t, dm)
|
||||||
@@ -439,6 +534,7 @@ func TestDockerManagerCreation(t *testing.T) {
|
|||||||
assert.NotNil(t, dm.lastCpuSystem)
|
assert.NotNil(t, dm.lastCpuSystem)
|
||||||
assert.NotNil(t, dm.networkSentTrackers)
|
assert.NotNil(t, dm.networkSentTrackers)
|
||||||
assert.NotNil(t, dm.networkRecvTrackers)
|
assert.NotNil(t, dm.networkRecvTrackers)
|
||||||
|
assert.NotNil(t, dm.lastNetworkReadTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCheckDockerVersion(t *testing.T) {
|
func TestCheckDockerVersion(t *testing.T) {
|
||||||
@@ -623,6 +719,7 @@ func TestDockerStatsWithMockData(t *testing.T) {
|
|||||||
lastCpuReadTime: make(map[uint16]map[string]time.Time),
|
lastCpuReadTime: make(map[uint16]map[string]time.Time),
|
||||||
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
lastNetworkReadTime: make(map[uint16]map[string]time.Time),
|
||||||
containerStatsMap: make(map[string]*container.Stats),
|
containerStatsMap: make(map[string]*container.Stats),
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -768,23 +865,22 @@ func TestNetworkStatsCalculationWithRealData(t *testing.T) {
|
|||||||
dm := &dockerManager{
|
dm := &dockerManager{
|
||||||
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
lastNetworkReadTime: make(map[uint16]map[string]time.Time),
|
||||||
}
|
}
|
||||||
|
|
||||||
ctr := &container.ApiInfo{IdShort: "test-container"}
|
ctr := &container.ApiInfo{IdShort: "test-container"}
|
||||||
cacheTimeMs := uint16(30000) // Test with 30 second cache
|
cacheTimeMs := uint16(30000) // Test with 30 second cache
|
||||||
|
|
||||||
// Use exact timing for deterministic results
|
// First call sets baseline (no previous read time, so rates should be 0)
|
||||||
exactly1000msAgo := time.Now().Add(-1000 * time.Millisecond)
|
sent1, recv1 := dm.calculateNetworkStats(ctr, apiStats1, "test", cacheTimeMs)
|
||||||
stats := &container.Stats{
|
|
||||||
PrevReadTime: exactly1000msAgo,
|
|
||||||
}
|
|
||||||
|
|
||||||
// First call sets baseline
|
|
||||||
sent1, recv1 := dm.calculateNetworkStats(ctr, apiStats1, stats, true, "test", cacheTimeMs)
|
|
||||||
assert.Equal(t, uint64(0), sent1)
|
assert.Equal(t, uint64(0), sent1)
|
||||||
assert.Equal(t, uint64(0), recv1)
|
assert.Equal(t, uint64(0), recv1)
|
||||||
|
|
||||||
// Cycle to establish baseline for this cache time
|
// Record read time and cycle to establish baseline for this cache time
|
||||||
|
exactly1000msAgo := time.Now().Add(-1000 * time.Millisecond)
|
||||||
|
dm.lastNetworkReadTime[cacheTimeMs] = map[string]time.Time{
|
||||||
|
"test-container": exactly1000msAgo,
|
||||||
|
}
|
||||||
dm.cycleNetworkDeltasForCacheTime(cacheTimeMs)
|
dm.cycleNetworkDeltasForCacheTime(cacheTimeMs)
|
||||||
|
|
||||||
// Calculate expected results precisely
|
// Calculate expected results precisely
|
||||||
@@ -795,7 +891,7 @@ func TestNetworkStatsCalculationWithRealData(t *testing.T) {
|
|||||||
expectedRecvRate := deltaRecv * 1000 / expectedElapsedMs // Should be exactly 1000000
|
expectedRecvRate := deltaRecv * 1000 / expectedElapsedMs // Should be exactly 1000000
|
||||||
|
|
||||||
// Second call with changed data
|
// Second call with changed data
|
||||||
sent2, recv2 := dm.calculateNetworkStats(ctr, apiStats2, stats, true, "test", cacheTimeMs)
|
sent2, recv2 := dm.calculateNetworkStats(ctr, apiStats2, "test", cacheTimeMs)
|
||||||
|
|
||||||
// Should be exactly the expected rates (no tolerance needed)
|
// Should be exactly the expected rates (no tolerance needed)
|
||||||
assert.Equal(t, expectedSentRate, sent2)
|
assert.Equal(t, expectedSentRate, sent2)
|
||||||
@@ -803,12 +899,13 @@ func TestNetworkStatsCalculationWithRealData(t *testing.T) {
|
|||||||
|
|
||||||
// Bad speed cap: set absurd delta over 1ms and expect 0 due to cap
|
// Bad speed cap: set absurd delta over 1ms and expect 0 due to cap
|
||||||
dm.cycleNetworkDeltasForCacheTime(cacheTimeMs)
|
dm.cycleNetworkDeltasForCacheTime(cacheTimeMs)
|
||||||
stats.PrevReadTime = time.Now().Add(-1 * time.Millisecond)
|
dm.lastNetworkReadTime[cacheTimeMs]["test-container"] = time.Now().Add(-1 * time.Millisecond)
|
||||||
apiStats1.Networks["eth0"] = container.NetworkStats{TxBytes: 0, RxBytes: 0}
|
apiStats1.Networks["eth0"] = container.NetworkStats{TxBytes: 0, RxBytes: 0}
|
||||||
apiStats2.Networks["eth0"] = container.NetworkStats{TxBytes: 10 * 1024 * 1024 * 1024, RxBytes: 0} // 10GB delta
|
apiStats2.Networks["eth0"] = container.NetworkStats{TxBytes: 10 * 1024 * 1024 * 1024, RxBytes: 0} // 10GB delta
|
||||||
_, _ = dm.calculateNetworkStats(ctr, apiStats1, stats, true, "test", cacheTimeMs) // baseline
|
_, _ = dm.calculateNetworkStats(ctr, apiStats1, "test", cacheTimeMs) // baseline
|
||||||
dm.cycleNetworkDeltasForCacheTime(cacheTimeMs)
|
dm.cycleNetworkDeltasForCacheTime(cacheTimeMs)
|
||||||
sent3, recv3 := dm.calculateNetworkStats(ctr, apiStats2, stats, true, "test", cacheTimeMs)
|
dm.lastNetworkReadTime[cacheTimeMs]["test-container"] = time.Now().Add(-1 * time.Millisecond)
|
||||||
|
sent3, recv3 := dm.calculateNetworkStats(ctr, apiStats2, "test", cacheTimeMs)
|
||||||
assert.Equal(t, uint64(0), sent3)
|
assert.Equal(t, uint64(0), sent3)
|
||||||
assert.Equal(t, uint64(0), recv3)
|
assert.Equal(t, uint64(0), recv3)
|
||||||
}
|
}
|
||||||
@@ -829,6 +926,7 @@ func TestContainerStatsEndToEndWithRealData(t *testing.T) {
|
|||||||
lastCpuReadTime: make(map[uint16]map[string]time.Time),
|
lastCpuReadTime: make(map[uint16]map[string]time.Time),
|
||||||
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
lastNetworkReadTime: make(map[uint16]map[string]time.Time),
|
||||||
containerStatsMap: make(map[string]*container.Stats),
|
containerStatsMap: make(map[string]*container.Stats),
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -950,6 +1048,7 @@ func TestDockerStatsWorkflow(t *testing.T) {
|
|||||||
lastCpuSystem: make(map[uint16]map[string]uint64),
|
lastCpuSystem: make(map[uint16]map[string]uint64),
|
||||||
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
lastNetworkReadTime: make(map[uint16]map[string]time.Time),
|
||||||
containerStatsMap: make(map[string]*container.Stats),
|
containerStatsMap: make(map[string]*container.Stats),
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1129,6 +1228,18 @@ func TestParseDockerStatus(t *testing.T) {
|
|||||||
expectedStatus: "",
|
expectedStatus: "",
|
||||||
expectedHealth: container.DockerHealthNone,
|
expectedHealth: container.DockerHealthNone,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "status health with health: prefix",
|
||||||
|
input: "Up 5 minutes (health: starting)",
|
||||||
|
expectedStatus: "Up 5 minutes",
|
||||||
|
expectedHealth: container.DockerHealthStarting,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "status health with health status: prefix",
|
||||||
|
input: "Up 10 minutes (health status: unhealthy)",
|
||||||
|
expectedStatus: "Up 10 minutes",
|
||||||
|
expectedHealth: container.DockerHealthUnhealthy,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
@@ -1140,6 +1251,85 @@ func TestParseDockerStatus(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestParseDockerHealthStatus(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
input string
|
||||||
|
expectedHealth container.DockerHealth
|
||||||
|
expectedOk bool
|
||||||
|
}{
|
||||||
|
{"healthy", container.DockerHealthHealthy, true},
|
||||||
|
{"unhealthy", container.DockerHealthUnhealthy, true},
|
||||||
|
{"starting", container.DockerHealthStarting, true},
|
||||||
|
{"none", container.DockerHealthNone, true},
|
||||||
|
{" Healthy ", container.DockerHealthHealthy, true},
|
||||||
|
{"unknown", container.DockerHealthNone, false},
|
||||||
|
{"", container.DockerHealthNone, false},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.input, func(t *testing.T) {
|
||||||
|
health, ok := parseDockerHealthStatus(tt.input)
|
||||||
|
assert.Equal(t, tt.expectedHealth, health)
|
||||||
|
assert.Equal(t, tt.expectedOk, ok)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUpdateContainerStatsUsesPodmanInspectHealthFallback(t *testing.T) {
|
||||||
|
var requestedPaths []string
|
||||||
|
dm := &dockerManager{
|
||||||
|
client: &http.Client{Transport: roundTripFunc(func(req *http.Request) (*http.Response, error) {
|
||||||
|
requestedPaths = append(requestedPaths, req.URL.EscapedPath())
|
||||||
|
switch req.URL.EscapedPath() {
|
||||||
|
case "/containers/0123456789ab/stats":
|
||||||
|
return &http.Response{
|
||||||
|
StatusCode: http.StatusOK,
|
||||||
|
Status: "200 OK",
|
||||||
|
Header: make(http.Header),
|
||||||
|
Body: io.NopCloser(strings.NewReader(`{
|
||||||
|
"read":"2026-03-15T21:26:59Z",
|
||||||
|
"cpu_stats":{"cpu_usage":{"total_usage":1000},"system_cpu_usage":2000},
|
||||||
|
"memory_stats":{"usage":1048576,"stats":{"inactive_file":262144}},
|
||||||
|
"networks":{"eth0":{"rx_bytes":0,"tx_bytes":0}}
|
||||||
|
}`)),
|
||||||
|
Request: req,
|
||||||
|
}, nil
|
||||||
|
case "/containers/0123456789ab/json":
|
||||||
|
return &http.Response{
|
||||||
|
StatusCode: http.StatusOK,
|
||||||
|
Status: "200 OK",
|
||||||
|
Header: make(http.Header),
|
||||||
|
Body: io.NopCloser(strings.NewReader(`{"State":{"Health":{"Status":"healthy"}}}`)),
|
||||||
|
Request: req,
|
||||||
|
}, nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unexpected path: %s", req.URL.EscapedPath())
|
||||||
|
}
|
||||||
|
})},
|
||||||
|
containerStatsMap: make(map[string]*container.Stats),
|
||||||
|
apiStats: &container.ApiStats{},
|
||||||
|
usingPodman: true,
|
||||||
|
lastCpuContainer: make(map[uint16]map[string]uint64),
|
||||||
|
lastCpuSystem: make(map[uint16]map[string]uint64),
|
||||||
|
lastCpuReadTime: make(map[uint16]map[string]time.Time),
|
||||||
|
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
lastNetworkReadTime: make(map[uint16]map[string]time.Time),
|
||||||
|
}
|
||||||
|
|
||||||
|
ctr := &container.ApiInfo{
|
||||||
|
IdShort: "0123456789ab",
|
||||||
|
Names: []string{"/beszel"},
|
||||||
|
Status: "Up 2 minutes",
|
||||||
|
Image: "beszel:latest",
|
||||||
|
}
|
||||||
|
|
||||||
|
err := dm.updateContainerStats(ctr, defaultCacheTimeMs)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, []string{"/containers/0123456789ab/stats", "/containers/0123456789ab/json"}, requestedPaths)
|
||||||
|
assert.Equal(t, container.DockerHealthHealthy, dm.containerStatsMap[ctr.IdShort].Health)
|
||||||
|
assert.Equal(t, "Up 2 minutes", dm.containerStatsMap[ctr.IdShort].Status)
|
||||||
|
}
|
||||||
|
|
||||||
func TestConstantsAndUtilityFunctions(t *testing.T) {
|
func TestConstantsAndUtilityFunctions(t *testing.T) {
|
||||||
// Test constants are properly defined
|
// Test constants are properly defined
|
||||||
assert.Equal(t, uint16(60000), defaultCacheTimeMs)
|
assert.Equal(t, uint16(60000), defaultCacheTimeMs)
|
||||||
@@ -1455,3 +1645,99 @@ func TestAnsiEscapePattern(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestConvertContainerPortsToString(t *testing.T) {
|
||||||
|
type port = struct {
|
||||||
|
PublicPort uint16
|
||||||
|
IP string
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
ports []port
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "empty ports",
|
||||||
|
ports: nil,
|
||||||
|
expected: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "single port",
|
||||||
|
ports: []port{
|
||||||
|
{PublicPort: 80, IP: "0.0.0.0"},
|
||||||
|
},
|
||||||
|
expected: "80",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "single port with non-default IP",
|
||||||
|
ports: []port{
|
||||||
|
{PublicPort: 80, IP: "1.2.3.4"},
|
||||||
|
},
|
||||||
|
expected: "1.2.3.4:80",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ipv6 default ip",
|
||||||
|
ports: []port{
|
||||||
|
{PublicPort: 80, IP: "::"},
|
||||||
|
},
|
||||||
|
expected: "80",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "zero PublicPort is skipped",
|
||||||
|
ports: []port{
|
||||||
|
{PublicPort: 0, IP: "0.0.0.0"},
|
||||||
|
{PublicPort: 80, IP: "0.0.0.0"},
|
||||||
|
},
|
||||||
|
expected: "80",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ports sorted ascending by PublicPort",
|
||||||
|
ports: []port{
|
||||||
|
{PublicPort: 443, IP: "0.0.0.0"},
|
||||||
|
{PublicPort: 80, IP: "0.0.0.0"},
|
||||||
|
{PublicPort: 8080, IP: "0.0.0.0"},
|
||||||
|
},
|
||||||
|
expected: "80, 443, 8080",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "duplicates are deduplicated",
|
||||||
|
ports: []port{
|
||||||
|
{PublicPort: 80, IP: "0.0.0.0"},
|
||||||
|
{PublicPort: 80, IP: "0.0.0.0"},
|
||||||
|
{PublicPort: 443, IP: "0.0.0.0"},
|
||||||
|
},
|
||||||
|
expected: "80, 443",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "multiple ports with different IPs",
|
||||||
|
ports: []port{
|
||||||
|
{PublicPort: 80, IP: "0.0.0.0"},
|
||||||
|
{PublicPort: 443, IP: "1.2.3.4"},
|
||||||
|
},
|
||||||
|
expected: "80, 1.2.3.4:443",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "ports slice is nilled after call",
|
||||||
|
ports: []port{
|
||||||
|
{PublicPort: 8080, IP: "0.0.0.0"},
|
||||||
|
},
|
||||||
|
expected: "8080",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
ctr := &container.ApiInfo{}
|
||||||
|
for _, p := range tt.ports {
|
||||||
|
ctr.Ports = append(ctr.Ports, struct {
|
||||||
|
PublicPort uint16
|
||||||
|
IP string
|
||||||
|
}{PublicPort: p.PublicPort, IP: p.IP})
|
||||||
|
}
|
||||||
|
result := convertContainerPortsToString(ctr)
|
||||||
|
assert.Equal(t, tt.expected, result)
|
||||||
|
// Ports slice must be cleared to prevent bleed-over into the next response
|
||||||
|
assert.Nil(t, ctr.Ports, "ctr.Ports should be nil after formatContainerPorts")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -461,7 +461,7 @@ func (gm *GPUManager) discoverGpuCapabilities() gpuCapabilities {
|
|||||||
caps.hasNvtop = true
|
caps.hasNvtop = true
|
||||||
}
|
}
|
||||||
if runtime.GOOS == "darwin" {
|
if runtime.GOOS == "darwin" {
|
||||||
if _, err := exec.LookPath(macmonCmd); err == nil {
|
if _, err := utils.LookPathHomebrew(macmonCmd); err == nil {
|
||||||
caps.hasMacmon = true
|
caps.hasMacmon = true
|
||||||
}
|
}
|
||||||
if _, err := exec.LookPath(powermetricsCmd); err == nil {
|
if _, err := exec.LookPath(powermetricsCmd); err == nil {
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
"github.com/henrygd/beszel/internal/entities/system"
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -171,7 +172,11 @@ type macmonSample struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (gm *GPUManager) collectMacmonPipe() (err error) {
|
func (gm *GPUManager) collectMacmonPipe() (err error) {
|
||||||
cmd := exec.Command(macmonCmd, "pipe", "-i", strconv.Itoa(macmonIntervalMs))
|
macmonPath, err := utils.LookPathHomebrew(macmonCmd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
cmd := exec.Command(macmonPath, "pipe", "-i", strconv.Itoa(macmonIntervalMs))
|
||||||
// Avoid blocking if macmon writes to stderr.
|
// Avoid blocking if macmon writes to stderr.
|
||||||
cmd.Stderr = io.Discard
|
cmd.Stderr = io.Discard
|
||||||
stdout, err := cmd.StdoutPipe()
|
stdout, err := cmd.StdoutPipe()
|
||||||
|
|||||||
@@ -1083,8 +1083,6 @@ func TestCalculateGPUAverage(t *testing.T) {
|
|||||||
|
|
||||||
func TestGPUCapabilitiesAndLegacyPriority(t *testing.T) {
|
func TestGPUCapabilitiesAndLegacyPriority(t *testing.T) {
|
||||||
// Save original PATH
|
// Save original PATH
|
||||||
origPath := os.Getenv("PATH")
|
|
||||||
defer os.Setenv("PATH", origPath)
|
|
||||||
hasAmdSysfs := (&GPUManager{}).hasAmdSysfs()
|
hasAmdSysfs := (&GPUManager{}).hasAmdSysfs()
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
@@ -1178,7 +1176,7 @@ echo "[]"`
|
|||||||
{
|
{
|
||||||
name: "no gpu tools available",
|
name: "no gpu tools available",
|
||||||
setupCommands: func(_ string) error {
|
setupCommands: func(_ string) error {
|
||||||
os.Setenv("PATH", "")
|
t.Setenv("PATH", "")
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
@@ -1188,7 +1186,7 @@ echo "[]"`
|
|||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
tempDir := t.TempDir()
|
tempDir := t.TempDir()
|
||||||
os.Setenv("PATH", tempDir)
|
t.Setenv("PATH", tempDir)
|
||||||
if err := tt.setupCommands(tempDir); err != nil {
|
if err := tt.setupCommands(tempDir); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -1234,13 +1232,9 @@ echo "[]"`
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestCollectorStartHelpers(t *testing.T) {
|
func TestCollectorStartHelpers(t *testing.T) {
|
||||||
// Save original PATH
|
|
||||||
origPath := os.Getenv("PATH")
|
|
||||||
defer os.Setenv("PATH", origPath)
|
|
||||||
|
|
||||||
// Set up temp dir with the commands
|
// Set up temp dir with the commands
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
os.Setenv("PATH", dir)
|
t.Setenv("PATH", dir)
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
@@ -1370,11 +1364,8 @@ echo '[{"device_name":"NVIDIA Test GPU","temp":"52C","power_draw":"31W","gpu_uti
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNewGPUManagerPriorityNvtopFallback(t *testing.T) {
|
func TestNewGPUManagerPriorityNvtopFallback(t *testing.T) {
|
||||||
origPath := os.Getenv("PATH")
|
|
||||||
defer os.Setenv("PATH", origPath)
|
|
||||||
|
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
os.Setenv("PATH", dir)
|
t.Setenv("PATH", dir)
|
||||||
t.Setenv("BESZEL_AGENT_GPU_COLLECTOR", "nvtop,nvidia-smi")
|
t.Setenv("BESZEL_AGENT_GPU_COLLECTOR", "nvtop,nvidia-smi")
|
||||||
|
|
||||||
nvtopPath := filepath.Join(dir, "nvtop")
|
nvtopPath := filepath.Join(dir, "nvtop")
|
||||||
@@ -1399,11 +1390,8 @@ echo "0, NVIDIA Priority GPU, 45, 512, 2048, 12, 25"`
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNewGPUManagerPriorityMixedCollectors(t *testing.T) {
|
func TestNewGPUManagerPriorityMixedCollectors(t *testing.T) {
|
||||||
origPath := os.Getenv("PATH")
|
|
||||||
defer os.Setenv("PATH", origPath)
|
|
||||||
|
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
os.Setenv("PATH", dir)
|
t.Setenv("PATH", dir)
|
||||||
t.Setenv("BESZEL_AGENT_GPU_COLLECTOR", "intel_gpu_top,rocm-smi")
|
t.Setenv("BESZEL_AGENT_GPU_COLLECTOR", "intel_gpu_top,rocm-smi")
|
||||||
|
|
||||||
intelPath := filepath.Join(dir, "intel_gpu_top")
|
intelPath := filepath.Join(dir, "intel_gpu_top")
|
||||||
@@ -1433,11 +1421,8 @@ echo '{"card0": {"Temperature (Sensor edge) (C)": "49.0", "Current Socket Graphi
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNewGPUManagerPriorityNvmlFallbackToNvidiaSmi(t *testing.T) {
|
func TestNewGPUManagerPriorityNvmlFallbackToNvidiaSmi(t *testing.T) {
|
||||||
origPath := os.Getenv("PATH")
|
|
||||||
defer os.Setenv("PATH", origPath)
|
|
||||||
|
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
os.Setenv("PATH", dir)
|
t.Setenv("PATH", dir)
|
||||||
t.Setenv("BESZEL_AGENT_GPU_COLLECTOR", "nvml,nvidia-smi")
|
t.Setenv("BESZEL_AGENT_GPU_COLLECTOR", "nvml,nvidia-smi")
|
||||||
|
|
||||||
nvidiaPath := filepath.Join(dir, "nvidia-smi")
|
nvidiaPath := filepath.Join(dir, "nvidia-smi")
|
||||||
@@ -1456,11 +1441,8 @@ echo "0, NVIDIA Fallback GPU, 41, 256, 1024, 8, 14"`
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNewGPUManagerConfiguredCollectorsMustStart(t *testing.T) {
|
func TestNewGPUManagerConfiguredCollectorsMustStart(t *testing.T) {
|
||||||
origPath := os.Getenv("PATH")
|
|
||||||
defer os.Setenv("PATH", origPath)
|
|
||||||
|
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
os.Setenv("PATH", dir)
|
t.Setenv("PATH", dir)
|
||||||
|
|
||||||
t.Run("configured valid collector unavailable", func(t *testing.T) {
|
t.Run("configured valid collector unavailable", func(t *testing.T) {
|
||||||
t.Setenv("BESZEL_AGENT_GPU_COLLECTOR", "nvidia-smi")
|
t.Setenv("BESZEL_AGENT_GPU_COLLECTOR", "nvidia-smi")
|
||||||
@@ -1480,11 +1462,8 @@ func TestNewGPUManagerConfiguredCollectorsMustStart(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNewGPUManagerJetsonIgnoresCollectorConfig(t *testing.T) {
|
func TestNewGPUManagerJetsonIgnoresCollectorConfig(t *testing.T) {
|
||||||
origPath := os.Getenv("PATH")
|
|
||||||
defer os.Setenv("PATH", origPath)
|
|
||||||
|
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
os.Setenv("PATH", dir)
|
t.Setenv("PATH", dir)
|
||||||
t.Setenv("BESZEL_AGENT_GPU_COLLECTOR", "nvidia-smi")
|
t.Setenv("BESZEL_AGENT_GPU_COLLECTOR", "nvidia-smi")
|
||||||
|
|
||||||
tegraPath := filepath.Join(dir, "tegrastats")
|
tegraPath := filepath.Join(dir, "tegrastats")
|
||||||
@@ -1719,12 +1698,8 @@ func TestIntelUpdateFromStats(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestIntelCollectorStreaming(t *testing.T) {
|
func TestIntelCollectorStreaming(t *testing.T) {
|
||||||
// Save and override PATH
|
|
||||||
origPath := os.Getenv("PATH")
|
|
||||||
defer os.Setenv("PATH", origPath)
|
|
||||||
|
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
os.Setenv("PATH", dir)
|
t.Setenv("PATH", dir)
|
||||||
|
|
||||||
// Create a fake intel_gpu_top that prints -l format with four samples (first will be skipped) and exits
|
// Create a fake intel_gpu_top that prints -l format with four samples (first will be skipped) and exits
|
||||||
scriptPath := filepath.Join(dir, "intel_gpu_top")
|
scriptPath := filepath.Join(dir, "intel_gpu_top")
|
||||||
|
|||||||
@@ -213,10 +213,8 @@ func (a *Agent) applyNetworkTotals(
|
|||||||
totalBytesSent, totalBytesRecv uint64,
|
totalBytesSent, totalBytesRecv uint64,
|
||||||
bytesSentPerSecond, bytesRecvPerSecond uint64,
|
bytesSentPerSecond, bytesRecvPerSecond uint64,
|
||||||
) {
|
) {
|
||||||
networkSentPs := utils.BytesToMegabytes(float64(bytesSentPerSecond))
|
if bytesSentPerSecond > 10_000_000_000 || bytesRecvPerSecond > 10_000_000_000 {
|
||||||
networkRecvPs := utils.BytesToMegabytes(float64(bytesRecvPerSecond))
|
slog.Warn("Invalid net stats. Resetting.", "sent", bytesSentPerSecond, "recv", bytesRecvPerSecond)
|
||||||
if networkSentPs > 10_000 || networkRecvPs > 10_000 {
|
|
||||||
slog.Warn("Invalid net stats. Resetting.", "sent", networkSentPs, "recv", networkRecvPs)
|
|
||||||
for _, v := range netIO {
|
for _, v := range netIO {
|
||||||
if _, exists := a.netInterfaces[v.Name]; !exists {
|
if _, exists := a.netInterfaces[v.Name]; !exists {
|
||||||
continue
|
continue
|
||||||
@@ -226,14 +224,10 @@ func (a *Agent) applyNetworkTotals(
|
|||||||
a.initializeNetIoStats()
|
a.initializeNetIoStats()
|
||||||
delete(a.netIoStats, cacheTimeMs)
|
delete(a.netIoStats, cacheTimeMs)
|
||||||
delete(a.netInterfaceDeltaTrackers, cacheTimeMs)
|
delete(a.netInterfaceDeltaTrackers, cacheTimeMs)
|
||||||
systemStats.NetworkSent = 0
|
|
||||||
systemStats.NetworkRecv = 0
|
|
||||||
systemStats.Bandwidth[0], systemStats.Bandwidth[1] = 0, 0
|
systemStats.Bandwidth[0], systemStats.Bandwidth[1] = 0, 0
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
systemStats.NetworkSent = networkSentPs
|
|
||||||
systemStats.NetworkRecv = networkRecvPs
|
|
||||||
systemStats.Bandwidth[0], systemStats.Bandwidth[1] = bytesSentPerSecond, bytesRecvPerSecond
|
systemStats.Bandwidth[0], systemStats.Bandwidth[1] = bytesSentPerSecond, bytesRecvPerSecond
|
||||||
nis.BytesSent = totalBytesSent
|
nis.BytesSent = totalBytesSent
|
||||||
nis.BytesRecv = totalBytesRecv
|
nis.BytesRecv = totalBytesRecv
|
||||||
|
|||||||
@@ -416,8 +416,6 @@ func TestApplyNetworkTotals(t *testing.T) {
|
|||||||
totalBytesSent uint64
|
totalBytesSent uint64
|
||||||
totalBytesRecv uint64
|
totalBytesRecv uint64
|
||||||
expectReset bool
|
expectReset bool
|
||||||
expectedNetworkSent float64
|
|
||||||
expectedNetworkRecv float64
|
|
||||||
expectedBandwidthSent uint64
|
expectedBandwidthSent uint64
|
||||||
expectedBandwidthRecv uint64
|
expectedBandwidthRecv uint64
|
||||||
}{
|
}{
|
||||||
@@ -428,8 +426,6 @@ func TestApplyNetworkTotals(t *testing.T) {
|
|||||||
totalBytesSent: 10000000,
|
totalBytesSent: 10000000,
|
||||||
totalBytesRecv: 20000000,
|
totalBytesRecv: 20000000,
|
||||||
expectReset: false,
|
expectReset: false,
|
||||||
expectedNetworkSent: 0.95, // ~1 MB/s rounded to 2 decimals
|
|
||||||
expectedNetworkRecv: 1.91, // ~2 MB/s rounded to 2 decimals
|
|
||||||
expectedBandwidthSent: 1000000,
|
expectedBandwidthSent: 1000000,
|
||||||
expectedBandwidthRecv: 2000000,
|
expectedBandwidthRecv: 2000000,
|
||||||
},
|
},
|
||||||
@@ -457,18 +453,6 @@ func TestApplyNetworkTotals(t *testing.T) {
|
|||||||
totalBytesRecv: 20000000,
|
totalBytesRecv: 20000000,
|
||||||
expectReset: true,
|
expectReset: true,
|
||||||
},
|
},
|
||||||
{
|
|
||||||
name: "Valid network stats - at threshold boundary",
|
|
||||||
bytesSentPerSecond: 10485750000, // ~9999.99 MB/s (rounds to 9999.99)
|
|
||||||
bytesRecvPerSecond: 10485750000, // ~9999.99 MB/s (rounds to 9999.99)
|
|
||||||
totalBytesSent: 10000000,
|
|
||||||
totalBytesRecv: 20000000,
|
|
||||||
expectReset: false,
|
|
||||||
expectedNetworkSent: 9999.99,
|
|
||||||
expectedNetworkRecv: 9999.99,
|
|
||||||
expectedBandwidthSent: 10485750000,
|
|
||||||
expectedBandwidthRecv: 10485750000,
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
name: "Zero values",
|
name: "Zero values",
|
||||||
bytesSentPerSecond: 0,
|
bytesSentPerSecond: 0,
|
||||||
@@ -476,8 +460,6 @@ func TestApplyNetworkTotals(t *testing.T) {
|
|||||||
totalBytesSent: 0,
|
totalBytesSent: 0,
|
||||||
totalBytesRecv: 0,
|
totalBytesRecv: 0,
|
||||||
expectReset: false,
|
expectReset: false,
|
||||||
expectedNetworkSent: 0.0,
|
|
||||||
expectedNetworkRecv: 0.0,
|
|
||||||
expectedBandwidthSent: 0,
|
expectedBandwidthSent: 0,
|
||||||
expectedBandwidthRecv: 0,
|
expectedBandwidthRecv: 0,
|
||||||
},
|
},
|
||||||
@@ -514,14 +496,10 @@ func TestApplyNetworkTotals(t *testing.T) {
|
|||||||
// Should have reset network tracking state - maps cleared and stats zeroed
|
// Should have reset network tracking state - maps cleared and stats zeroed
|
||||||
assert.NotContains(t, a.netIoStats, cacheTimeMs, "cache entry should be cleared after reset")
|
assert.NotContains(t, a.netIoStats, cacheTimeMs, "cache entry should be cleared after reset")
|
||||||
assert.NotContains(t, a.netInterfaceDeltaTrackers, cacheTimeMs, "tracker should be cleared on reset")
|
assert.NotContains(t, a.netInterfaceDeltaTrackers, cacheTimeMs, "tracker should be cleared on reset")
|
||||||
assert.Zero(t, systemStats.NetworkSent)
|
|
||||||
assert.Zero(t, systemStats.NetworkRecv)
|
|
||||||
assert.Zero(t, systemStats.Bandwidth[0])
|
assert.Zero(t, systemStats.Bandwidth[0])
|
||||||
assert.Zero(t, systemStats.Bandwidth[1])
|
assert.Zero(t, systemStats.Bandwidth[1])
|
||||||
} else {
|
} else {
|
||||||
// Should have applied stats
|
// Should have applied stats
|
||||||
assert.Equal(t, tt.expectedNetworkSent, systemStats.NetworkSent)
|
|
||||||
assert.Equal(t, tt.expectedNetworkRecv, systemStats.NetworkRecv)
|
|
||||||
assert.Equal(t, tt.expectedBandwidthSent, systemStats.Bandwidth[0])
|
assert.Equal(t, tt.expectedBandwidthSent, systemStats.Bandwidth[0])
|
||||||
assert.Equal(t, tt.expectedBandwidthRecv, systemStats.Bandwidth[1])
|
assert.Equal(t, tt.expectedBandwidthRecv, systemStats.Bandwidth[1])
|
||||||
|
|
||||||
|
|||||||
@@ -2,12 +2,14 @@ package agent
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"path"
|
"path"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
"github.com/henrygd/beszel/agent/utils"
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
@@ -38,6 +40,11 @@ func (a *Agent) newSensorConfig() *SensorConfig {
|
|||||||
// Matches sensors.TemperaturesWithContext to allow for panic recovery (gopsutil/issues/1832)
|
// Matches sensors.TemperaturesWithContext to allow for panic recovery (gopsutil/issues/1832)
|
||||||
type getTempsFn func(ctx context.Context) ([]sensors.TemperatureStat, error)
|
type getTempsFn func(ctx context.Context) ([]sensors.TemperatureStat, error)
|
||||||
|
|
||||||
|
var (
|
||||||
|
errTemperatureFetchTimeout = errors.New("temperature collection timed out")
|
||||||
|
temperatureFetchTimeout = 2 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
// newSensorConfigWithEnv creates a SensorConfig with the provided environment variables
|
// newSensorConfigWithEnv creates a SensorConfig with the provided environment variables
|
||||||
// sensorsSet indicates if the SENSORS environment variable was explicitly set (even to empty string)
|
// sensorsSet indicates if the SENSORS environment variable was explicitly set (even to empty string)
|
||||||
func (a *Agent) newSensorConfigWithEnv(primarySensor, sysSensors, sensorsEnvVal string, skipCollection bool) *SensorConfig {
|
func (a *Agent) newSensorConfigWithEnv(primarySensor, sysSensors, sensorsEnvVal string, skipCollection bool) *SensorConfig {
|
||||||
@@ -86,10 +93,12 @@ func (a *Agent) updateTemperatures(systemStats *system.Stats) {
|
|||||||
// reset high temp
|
// reset high temp
|
||||||
a.systemInfo.DashboardTemp = 0
|
a.systemInfo.DashboardTemp = 0
|
||||||
|
|
||||||
temps, err := a.getTempsWithPanicRecovery(getSensorTemps)
|
temps, err := a.getTempsWithTimeout(getSensorTemps)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// retry once on panic (gopsutil/issues/1832)
|
// retry once on panic (gopsutil/issues/1832)
|
||||||
temps, err = a.getTempsWithPanicRecovery(getSensorTemps)
|
if !errors.Is(err, errTemperatureFetchTimeout) {
|
||||||
|
temps, err = a.getTempsWithTimeout(getSensorTemps)
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Warn("Error updating temperatures", "err", err)
|
slog.Warn("Error updating temperatures", "err", err)
|
||||||
if len(systemStats.Temperatures) > 0 {
|
if len(systemStats.Temperatures) > 0 {
|
||||||
@@ -152,6 +161,26 @@ func (a *Agent) getTempsWithPanicRecovery(getTemps getTempsFn) (temps []sensors.
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (a *Agent) getTempsWithTimeout(getTemps getTempsFn) ([]sensors.TemperatureStat, error) {
|
||||||
|
type result struct {
|
||||||
|
temps []sensors.TemperatureStat
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
resultCh := make(chan result, 1)
|
||||||
|
go func() {
|
||||||
|
temps, err := a.getTempsWithPanicRecovery(getTemps)
|
||||||
|
resultCh <- result{temps: temps, err: err}
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case res := <-resultCh:
|
||||||
|
return res.temps, res.err
|
||||||
|
case <-time.After(temperatureFetchTimeout):
|
||||||
|
return nil, errTemperatureFetchTimeout
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// isValidSensor checks if a sensor is valid based on the sensor name and the sensor config
|
// isValidSensor checks if a sensor is valid based on the sensor name and the sensor config
|
||||||
func isValidSensor(sensorName string, config *SensorConfig) bool {
|
func isValidSensor(sensorName string, config *SensorConfig) bool {
|
||||||
// if no sensors configured, everything is valid
|
// if no sensors configured, everything is valid
|
||||||
|
|||||||
@@ -5,8 +5,8 @@ package agent
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/henrygd/beszel/internal/entities/system"
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
|
||||||
@@ -329,34 +329,10 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNewSensorConfig(t *testing.T) {
|
func TestNewSensorConfig(t *testing.T) {
|
||||||
// Save original environment variables
|
|
||||||
originalPrimary, hasPrimary := os.LookupEnv("BESZEL_AGENT_PRIMARY_SENSOR")
|
|
||||||
originalSys, hasSys := os.LookupEnv("BESZEL_AGENT_SYS_SENSORS")
|
|
||||||
originalSensors, hasSensors := os.LookupEnv("BESZEL_AGENT_SENSORS")
|
|
||||||
|
|
||||||
// Restore environment variables after the test
|
|
||||||
defer func() {
|
|
||||||
// Clean up test environment variables
|
|
||||||
os.Unsetenv("BESZEL_AGENT_PRIMARY_SENSOR")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_SYS_SENSORS")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_SENSORS")
|
|
||||||
|
|
||||||
// Restore original values if they existed
|
|
||||||
if hasPrimary {
|
|
||||||
os.Setenv("BESZEL_AGENT_PRIMARY_SENSOR", originalPrimary)
|
|
||||||
}
|
|
||||||
if hasSys {
|
|
||||||
os.Setenv("BESZEL_AGENT_SYS_SENSORS", originalSys)
|
|
||||||
}
|
|
||||||
if hasSensors {
|
|
||||||
os.Setenv("BESZEL_AGENT_SENSORS", originalSensors)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Set test environment variables
|
// Set test environment variables
|
||||||
os.Setenv("BESZEL_AGENT_PRIMARY_SENSOR", "test_primary")
|
t.Setenv("BESZEL_AGENT_PRIMARY_SENSOR", "test_primary")
|
||||||
os.Setenv("BESZEL_AGENT_SYS_SENSORS", "/test/path")
|
t.Setenv("BESZEL_AGENT_SYS_SENSORS", "/test/path")
|
||||||
os.Setenv("BESZEL_AGENT_SENSORS", "test_sensor1,test_*,test_sensor3")
|
t.Setenv("BESZEL_AGENT_SENSORS", "test_sensor1,test_*,test_sensor3")
|
||||||
|
|
||||||
agent := &Agent{}
|
agent := &Agent{}
|
||||||
result := agent.newSensorConfig()
|
result := agent.newSensorConfig()
|
||||||
@@ -551,3 +527,66 @@ func TestGetTempsWithPanicRecovery(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGetTempsWithTimeout(t *testing.T) {
|
||||||
|
agent := &Agent{
|
||||||
|
sensorConfig: &SensorConfig{
|
||||||
|
context: context.Background(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
originalTimeout := temperatureFetchTimeout
|
||||||
|
t.Cleanup(func() {
|
||||||
|
temperatureFetchTimeout = originalTimeout
|
||||||
|
})
|
||||||
|
temperatureFetchTimeout = 10 * time.Millisecond
|
||||||
|
|
||||||
|
t.Run("returns temperatures before timeout", func(t *testing.T) {
|
||||||
|
temps, err := agent.getTempsWithTimeout(func(ctx context.Context) ([]sensors.TemperatureStat, error) {
|
||||||
|
return []sensors.TemperatureStat{{SensorKey: "cpu_temp", Temperature: 42}}, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, temps, 1)
|
||||||
|
assert.Equal(t, "cpu_temp", temps[0].SensorKey)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("returns timeout error when collector hangs", func(t *testing.T) {
|
||||||
|
temps, err := agent.getTempsWithTimeout(func(ctx context.Context) ([]sensors.TemperatureStat, error) {
|
||||||
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
return []sensors.TemperatureStat{{SensorKey: "cpu_temp", Temperature: 42}}, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
assert.Nil(t, temps)
|
||||||
|
assert.ErrorIs(t, err, errTemperatureFetchTimeout)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUpdateTemperaturesSkipsOnTimeout(t *testing.T) {
|
||||||
|
agent := &Agent{
|
||||||
|
systemInfo: system.Info{DashboardTemp: 99},
|
||||||
|
sensorConfig: &SensorConfig{
|
||||||
|
context: context.Background(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
originalTimeout := temperatureFetchTimeout
|
||||||
|
t.Cleanup(func() {
|
||||||
|
temperatureFetchTimeout = originalTimeout
|
||||||
|
getSensorTemps = sensors.TemperaturesWithContext
|
||||||
|
})
|
||||||
|
temperatureFetchTimeout = 10 * time.Millisecond
|
||||||
|
getSensorTemps = func(ctx context.Context) ([]sensors.TemperatureStat, error) {
|
||||||
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
stats := &system.Stats{
|
||||||
|
Temperatures: map[string]float64{"stale": 50},
|
||||||
|
}
|
||||||
|
|
||||||
|
agent.updateTemperatures(stats)
|
||||||
|
|
||||||
|
assert.Equal(t, 0.0, agent.systemInfo.DashboardTemp)
|
||||||
|
assert.Equal(t, map[string]float64{}, stats.Temperatures)
|
||||||
|
}
|
||||||
|
|||||||
@@ -183,8 +183,7 @@ func TestStartServer(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestStartServerDisableSSH(t *testing.T) {
|
func TestStartServerDisableSSH(t *testing.T) {
|
||||||
os.Setenv("BESZEL_AGENT_DISABLE_SSH", "true")
|
t.Setenv("BESZEL_AGENT_DISABLE_SSH", "true")
|
||||||
defer os.Unsetenv("BESZEL_AGENT_DISABLE_SSH")
|
|
||||||
|
|
||||||
agent, err := NewAgent("")
|
agent, err := NewAgent("")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|||||||
@@ -1104,32 +1104,21 @@ func (sm *SmartManager) parseSmartForNvme(output []byte) (bool, int) {
|
|||||||
|
|
||||||
// detectSmartctl checks if smartctl is installed, returns an error if not
|
// detectSmartctl checks if smartctl is installed, returns an error if not
|
||||||
func (sm *SmartManager) detectSmartctl() (string, error) {
|
func (sm *SmartManager) detectSmartctl() (string, error) {
|
||||||
isWindows := runtime.GOOS == "windows"
|
if runtime.GOOS == "windows" {
|
||||||
|
// Load embedded smartctl.exe for Windows amd64 builds.
|
||||||
// Load embedded smartctl.exe for Windows amd64 builds.
|
if runtime.GOARCH == "amd64" {
|
||||||
if isWindows && runtime.GOARCH == "amd64" {
|
if path, err := ensureEmbeddedSmartctl(); err == nil {
|
||||||
if path, err := ensureEmbeddedSmartctl(); err == nil {
|
return path, nil
|
||||||
return path, nil
|
}
|
||||||
}
|
}
|
||||||
}
|
// Try to find smartctl in the default installation location
|
||||||
|
const location = "C:\\Program Files\\smartmontools\\bin\\smartctl.exe"
|
||||||
if path, err := exec.LookPath("smartctl"); err == nil {
|
|
||||||
return path, nil
|
|
||||||
}
|
|
||||||
locations := []string{}
|
|
||||||
if isWindows {
|
|
||||||
locations = append(locations,
|
|
||||||
"C:\\Program Files\\smartmontools\\bin\\smartctl.exe",
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
locations = append(locations, "/opt/homebrew/bin/smartctl")
|
|
||||||
}
|
|
||||||
for _, location := range locations {
|
|
||||||
if _, err := os.Stat(location); err == nil {
|
if _, err := os.Stat(location); err == nil {
|
||||||
return location, nil
|
return location, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return "", errors.New("smartctl not found")
|
|
||||||
|
return utils.LookPathHomebrew("smartctl")
|
||||||
}
|
}
|
||||||
|
|
||||||
// isNvmeControllerPath checks if the path matches an NVMe controller pattern
|
// isNvmeControllerPath checks if the path matches an NVMe controller pattern
|
||||||
|
|||||||
@@ -1035,7 +1035,7 @@ func TestRefreshExcludedDevices(t *testing.T) {
|
|||||||
t.Setenv("EXCLUDE_SMART", tt.envValue)
|
t.Setenv("EXCLUDE_SMART", tt.envValue)
|
||||||
} else {
|
} else {
|
||||||
// Ensure env var is not set for empty test
|
// Ensure env var is not set for empty test
|
||||||
os.Unsetenv("EXCLUDE_SMART")
|
t.Setenv("EXCLUDE_SMART", "")
|
||||||
}
|
}
|
||||||
|
|
||||||
sm := &SmartManager{}
|
sm := &SmartManager{}
|
||||||
|
|||||||
@@ -301,7 +301,7 @@ func getServicePatterns() []string {
|
|||||||
if pattern == "" {
|
if pattern == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if !strings.HasSuffix(pattern, ".service") {
|
if !strings.HasSuffix(pattern, "timer") && !strings.HasSuffix(pattern, ".service") {
|
||||||
pattern += ".service"
|
pattern += ".service"
|
||||||
}
|
}
|
||||||
patterns = append(patterns, pattern)
|
patterns = append(patterns, pattern)
|
||||||
|
|||||||
@@ -156,20 +156,23 @@ func TestGetServicePatterns(t *testing.T) {
|
|||||||
expected: []string{"*nginx*.service", "*apache*.service"},
|
expected: []string{"*nginx*.service", "*apache*.service"},
|
||||||
cleanupEnvVars: true,
|
cleanupEnvVars: true,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "opt into timer monitoring",
|
||||||
|
prefixedEnv: "nginx.service,docker,apache.timer",
|
||||||
|
unprefixedEnv: "",
|
||||||
|
expected: []string{"nginx.service", "docker.service", "apache.timer"},
|
||||||
|
cleanupEnvVars: true,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
// Clean up any existing env vars
|
|
||||||
os.Unsetenv("BESZEL_AGENT_SERVICE_PATTERNS")
|
|
||||||
os.Unsetenv("SERVICE_PATTERNS")
|
|
||||||
|
|
||||||
// Set up environment variables
|
// Set up environment variables
|
||||||
if tt.prefixedEnv != "" {
|
if tt.prefixedEnv != "" {
|
||||||
os.Setenv("BESZEL_AGENT_SERVICE_PATTERNS", tt.prefixedEnv)
|
t.Setenv("BESZEL_AGENT_SERVICE_PATTERNS", tt.prefixedEnv)
|
||||||
}
|
}
|
||||||
if tt.unprefixedEnv != "" {
|
if tt.unprefixedEnv != "" {
|
||||||
os.Setenv("SERVICE_PATTERNS", tt.unprefixedEnv)
|
t.Setenv("SERVICE_PATTERNS", tt.unprefixedEnv)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run the function
|
// Run the function
|
||||||
@@ -177,12 +180,6 @@ func TestGetServicePatterns(t *testing.T) {
|
|||||||
|
|
||||||
// Verify results
|
// Verify results
|
||||||
assert.Equal(t, tt.expected, result, "Patterns should match expected values")
|
assert.Equal(t, tt.expected, result, "Patterns should match expected values")
|
||||||
|
|
||||||
// Cleanup
|
|
||||||
if tt.cleanupEnvVars {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_SERVICE_PATTERNS")
|
|
||||||
os.Unsetenv("SERVICE_PATTERNS")
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,6 +4,9 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
"os"
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
@@ -86,3 +89,24 @@ func ReadUintFile(path string) (uint64, bool) {
|
|||||||
}
|
}
|
||||||
return parsed, true
|
return parsed, true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LookPathHomebrew is like exec.LookPath but also checks Homebrew paths.
|
||||||
|
func LookPathHomebrew(file string) (string, error) {
|
||||||
|
foundPath, lookPathErr := exec.LookPath(file)
|
||||||
|
if lookPathErr == nil {
|
||||||
|
return foundPath, nil
|
||||||
|
}
|
||||||
|
var homebrewPath string
|
||||||
|
switch runtime.GOOS {
|
||||||
|
case "darwin":
|
||||||
|
homebrewPath = filepath.Join("/opt", "homebrew", "bin", file)
|
||||||
|
case "linux":
|
||||||
|
homebrewPath = filepath.Join("/home", "linuxbrew", ".linuxbrew", "bin", file)
|
||||||
|
}
|
||||||
|
if homebrewPath != "" {
|
||||||
|
if _, err := os.Stat(homebrewPath); err == nil {
|
||||||
|
return homebrewPath, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", lookPathErr
|
||||||
|
}
|
||||||
|
|||||||
@@ -134,10 +134,8 @@ func TestGetEnv(t *testing.T) {
|
|||||||
prefixedKey := "BESZEL_AGENT_" + key
|
prefixedKey := "BESZEL_AGENT_" + key
|
||||||
|
|
||||||
t.Run("prefixed variable exists", func(t *testing.T) {
|
t.Run("prefixed variable exists", func(t *testing.T) {
|
||||||
os.Setenv(prefixedKey, "prefixed_val")
|
t.Setenv(prefixedKey, "prefixed_val")
|
||||||
os.Setenv(key, "unprefixed_val")
|
t.Setenv(key, "unprefixed_val")
|
||||||
defer os.Unsetenv(prefixedKey)
|
|
||||||
defer os.Unsetenv(key)
|
|
||||||
|
|
||||||
val, exists := GetEnv(key)
|
val, exists := GetEnv(key)
|
||||||
assert.True(t, exists)
|
assert.True(t, exists)
|
||||||
@@ -145,9 +143,7 @@ func TestGetEnv(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("only unprefixed variable exists", func(t *testing.T) {
|
t.Run("only unprefixed variable exists", func(t *testing.T) {
|
||||||
os.Unsetenv(prefixedKey)
|
t.Setenv(key, "unprefixed_val")
|
||||||
os.Setenv(key, "unprefixed_val")
|
|
||||||
defer os.Unsetenv(key)
|
|
||||||
|
|
||||||
val, exists := GetEnv(key)
|
val, exists := GetEnv(key)
|
||||||
assert.True(t, exists)
|
assert.True(t, exists)
|
||||||
@@ -155,9 +151,6 @@ func TestGetEnv(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("neither variable exists", func(t *testing.T) {
|
t.Run("neither variable exists", func(t *testing.T) {
|
||||||
os.Unsetenv(prefixedKey)
|
|
||||||
os.Unsetenv(key)
|
|
||||||
|
|
||||||
val, exists := GetEnv(key)
|
val, exists := GetEnv(key)
|
||||||
assert.False(t, exists)
|
assert.False(t, exists)
|
||||||
assert.Empty(t, val)
|
assert.Empty(t, val)
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ import "github.com/blang/semver"
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
// Version is the current version of the application.
|
// Version is the current version of the application.
|
||||||
Version = "0.18.4"
|
Version = "0.18.5"
|
||||||
// AppName is the name of the application.
|
// AppName is the name of the application.
|
||||||
AppName = "beszel"
|
AppName = "beszel"
|
||||||
)
|
)
|
||||||
|
|||||||
41
go.mod
41
go.mod
@@ -6,22 +6,22 @@ require (
|
|||||||
github.com/blang/semver v3.5.1+incompatible
|
github.com/blang/semver v3.5.1+incompatible
|
||||||
github.com/coreos/go-systemd/v22 v22.7.0
|
github.com/coreos/go-systemd/v22 v22.7.0
|
||||||
github.com/distatus/battery v0.11.0
|
github.com/distatus/battery v0.11.0
|
||||||
github.com/ebitengine/purego v0.9.1
|
github.com/ebitengine/purego v0.10.0
|
||||||
github.com/fxamacker/cbor/v2 v2.9.0
|
github.com/fxamacker/cbor/v2 v2.9.0
|
||||||
github.com/gliderlabs/ssh v0.3.8
|
github.com/gliderlabs/ssh v0.3.8
|
||||||
github.com/google/uuid v1.6.0
|
github.com/google/uuid v1.6.0
|
||||||
github.com/lxzan/gws v1.8.9
|
github.com/lxzan/gws v1.9.1
|
||||||
github.com/nicholas-fedor/shoutrrr v0.13.2
|
github.com/nicholas-fedor/shoutrrr v0.14.1
|
||||||
github.com/pocketbase/dbx v1.12.0
|
github.com/pocketbase/dbx v1.12.0
|
||||||
github.com/pocketbase/pocketbase v0.36.4
|
github.com/pocketbase/pocketbase v0.36.7
|
||||||
github.com/shirou/gopsutil/v4 v4.26.1
|
github.com/shirou/gopsutil/v4 v4.26.2
|
||||||
github.com/spf13/cast v1.10.0
|
github.com/spf13/cast v1.10.0
|
||||||
github.com/spf13/cobra v1.10.2
|
github.com/spf13/cobra v1.10.2
|
||||||
github.com/spf13/pflag v1.0.10
|
github.com/spf13/pflag v1.0.10
|
||||||
github.com/stretchr/testify v1.11.1
|
github.com/stretchr/testify v1.11.1
|
||||||
golang.org/x/crypto v0.48.0
|
golang.org/x/crypto v0.49.0
|
||||||
golang.org/x/exp v0.0.0-20260218203240-3dfff04db8fa
|
golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90
|
||||||
golang.org/x/sys v0.41.0
|
golang.org/x/sys v0.42.0
|
||||||
gopkg.in/yaml.v3 v3.0.1
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -30,10 +30,10 @@ require (
|
|||||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||||
github.com/disintegration/imaging v1.6.2 // indirect
|
github.com/disintegration/imaging v1.6.2 // indirect
|
||||||
github.com/dolthub/maphash v0.1.0 // indirect
|
|
||||||
github.com/domodwyer/mailyak/v3 v3.6.2 // indirect
|
github.com/domodwyer/mailyak/v3 v3.6.2 // indirect
|
||||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||||
github.com/fatih/color v1.18.0 // indirect
|
github.com/eclipse/paho.golang v0.23.0 // indirect
|
||||||
|
github.com/fatih/color v1.19.0 // indirect
|
||||||
github.com/gabriel-vasile/mimetype v1.4.13 // indirect
|
github.com/gabriel-vasile/mimetype v1.4.13 // indirect
|
||||||
github.com/ganigeorgiev/fexpr v0.5.0 // indirect
|
github.com/ganigeorgiev/fexpr v0.5.0 // indirect
|
||||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||||
@@ -41,9 +41,10 @@ require (
|
|||||||
github.com/go-sql-driver/mysql v1.9.1 // indirect
|
github.com/go-sql-driver/mysql v1.9.1 // indirect
|
||||||
github.com/godbus/dbus/v5 v5.2.2 // indirect
|
github.com/godbus/dbus/v5 v5.2.2 // indirect
|
||||||
github.com/golang-jwt/jwt/v5 v5.3.1 // indirect
|
github.com/golang-jwt/jwt/v5 v5.3.1 // indirect
|
||||||
|
github.com/gorilla/websocket v1.5.3 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||||
github.com/klauspost/compress v1.18.4 // indirect
|
github.com/klauspost/compress v1.18.5 // indirect
|
||||||
github.com/lufia/plan9stats v0.0.0-20260216142805-b3301c5f2a88 // indirect
|
github.com/lufia/plan9stats v0.0.0-20260324052639-156f7da3f749 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||||
github.com/ncruces/go-strftime v1.0.0 // indirect
|
github.com/ncruces/go-strftime v1.0.0 // indirect
|
||||||
@@ -54,15 +55,15 @@ require (
|
|||||||
github.com/tklauser/numcpus v0.11.0 // indirect
|
github.com/tklauser/numcpus v0.11.0 // indirect
|
||||||
github.com/x448/float16 v0.8.4 // indirect
|
github.com/x448/float16 v0.8.4 // indirect
|
||||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||||
golang.org/x/image v0.36.0 // indirect
|
golang.org/x/image v0.38.0 // indirect
|
||||||
golang.org/x/net v0.50.0 // indirect
|
golang.org/x/net v0.52.0 // indirect
|
||||||
golang.org/x/oauth2 v0.35.0 // indirect
|
golang.org/x/oauth2 v0.36.0 // indirect
|
||||||
golang.org/x/sync v0.19.0 // indirect
|
golang.org/x/sync v0.20.0 // indirect
|
||||||
golang.org/x/term v0.40.0 // indirect
|
golang.org/x/term v0.41.0 // indirect
|
||||||
golang.org/x/text v0.34.0 // indirect
|
golang.org/x/text v0.35.0 // indirect
|
||||||
howett.net/plist v1.0.1 // indirect
|
howett.net/plist v1.0.1 // indirect
|
||||||
modernc.org/libc v1.67.6 // indirect
|
modernc.org/libc v1.70.0 // indirect
|
||||||
modernc.org/mathutil v1.7.1 // indirect
|
modernc.org/mathutil v1.7.1 // indirect
|
||||||
modernc.org/memory v1.11.0 // indirect
|
modernc.org/memory v1.11.0 // indirect
|
||||||
modernc.org/sqlite v1.45.0 // indirect
|
modernc.org/sqlite v1.46.2 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
112
go.sum
112
go.sum
@@ -19,16 +19,16 @@ github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1
|
|||||||
github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4=
|
github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4=
|
||||||
github.com/distatus/battery v0.11.0 h1:KJk89gz90Iq/wJtbjjM9yUzBXV+ASV/EG2WOOL7N8lc=
|
github.com/distatus/battery v0.11.0 h1:KJk89gz90Iq/wJtbjjM9yUzBXV+ASV/EG2WOOL7N8lc=
|
||||||
github.com/distatus/battery v0.11.0/go.mod h1:KmVkE8A8hpIX4T78QRdMktYpEp35QfOL8A8dwZBxq2k=
|
github.com/distatus/battery v0.11.0/go.mod h1:KmVkE8A8hpIX4T78QRdMktYpEp35QfOL8A8dwZBxq2k=
|
||||||
github.com/dolthub/maphash v0.1.0 h1:bsQ7JsF4FkkWyrP3oCnFJgrCUAFbFf3kOl4L/QxPDyQ=
|
|
||||||
github.com/dolthub/maphash v0.1.0/go.mod h1:gkg4Ch4CdCDu5h6PMriVLawB7koZ+5ijb9puGMV50a4=
|
|
||||||
github.com/domodwyer/mailyak/v3 v3.6.2 h1:x3tGMsyFhTCaxp6ycgR0FE/bu5QiNp+hetUuCOBXMn8=
|
github.com/domodwyer/mailyak/v3 v3.6.2 h1:x3tGMsyFhTCaxp6ycgR0FE/bu5QiNp+hetUuCOBXMn8=
|
||||||
github.com/domodwyer/mailyak/v3 v3.6.2/go.mod h1:lOm/u9CyCVWHeaAmHIdF4RiKVxKUT/H5XX10lIKAL6c=
|
github.com/domodwyer/mailyak/v3 v3.6.2/go.mod h1:lOm/u9CyCVWHeaAmHIdF4RiKVxKUT/H5XX10lIKAL6c=
|
||||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||||
github.com/ebitengine/purego v0.9.1 h1:a/k2f2HQU3Pi399RPW1MOaZyhKJL9w/xFpKAg4q1s0A=
|
github.com/ebitengine/purego v0.10.0 h1:QIw4xfpWT6GWTzaW5XEKy3HXoqrJGx1ijYHzTF0/ISU=
|
||||||
github.com/ebitengine/purego v0.9.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
github.com/ebitengine/purego v0.10.0/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||||
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
|
github.com/eclipse/paho.golang v0.23.0 h1:KHgl2wz6EJo7cMBmkuhpt7C576vP+kpPv7jjvSyR6Mk=
|
||||||
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
|
github.com/eclipse/paho.golang v0.23.0/go.mod h1:nQRhTkoZv8EAiNs5UU0/WdQIx2NrnWUpL9nsGJTQN04=
|
||||||
|
github.com/fatih/color v1.19.0 h1:Zp3PiM21/9Ld6FzSKyL5c/BULoe/ONr9KlbYVOfG8+w=
|
||||||
|
github.com/fatih/color v1.19.0/go.mod h1:zNk67I0ZUT1bEGsSGyCZYZNrHuTkJJB+r6Q9VuMi0LE=
|
||||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||||
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
|
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
|
||||||
@@ -58,10 +58,12 @@ github.com/golang-jwt/jwt/v5 v5.3.1/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArs
|
|||||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||||
github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83 h1:z2ogiKUYzX5Is6zr/vP9vJGqPwcdqsWjOt+V8J7+bTc=
|
github.com/google/pprof v0.0.0-20260302011040-a15ffb7f9dcc h1:VBbFa1lDYWEeV5FZKUiYKYT0VxCp9twUmmaq9eb8sXw=
|
||||||
github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83/go.mod h1:MxpfABSjhmINe3F1It9d+8exIHFvUqtLIRCdOGNXqiI=
|
github.com/google/pprof v0.0.0-20260302011040-a15ffb7f9dcc/go.mod h1:MxpfABSjhmINe3F1It9d+8exIHFvUqtLIRCdOGNXqiI=
|
||||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||||
|
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||||
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||||
@@ -69,24 +71,24 @@ github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLf
|
|||||||
github.com/jarcoal/httpmock v1.4.1 h1:0Ju+VCFuARfFlhVXFc2HxlcQkfB+Xq12/EotHko+x2A=
|
github.com/jarcoal/httpmock v1.4.1 h1:0Ju+VCFuARfFlhVXFc2HxlcQkfB+Xq12/EotHko+x2A=
|
||||||
github.com/jarcoal/httpmock v1.4.1/go.mod h1:ftW1xULwo+j0R0JJkJIIi7UKigZUXCLLanykgjwBXL0=
|
github.com/jarcoal/httpmock v1.4.1/go.mod h1:ftW1xULwo+j0R0JJkJIIi7UKigZUXCLLanykgjwBXL0=
|
||||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||||
github.com/klauspost/compress v1.18.4 h1:RPhnKRAQ4Fh8zU2FY/6ZFDwTVTxgJ/EMydqSTzE9a2c=
|
github.com/klauspost/compress v1.18.5 h1:/h1gH5Ce+VWNLSWqPzOVn6XBO+vJbCNGvjoaGBFW2IE=
|
||||||
github.com/klauspost/compress v1.18.4/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
|
github.com/klauspost/compress v1.18.5/go.mod h1:cwPg85FWrGar70rWktvGQj8/hthj3wpl0PGDogxkrSQ=
|
||||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
github.com/lufia/plan9stats v0.0.0-20260216142805-b3301c5f2a88 h1:PTw+yKnXcOFCR6+8hHTyWBeQ/P4Nb7dd4/0ohEcWQuM=
|
github.com/lufia/plan9stats v0.0.0-20260324052639-156f7da3f749 h1:Qj3hTcdWH8uMZDI41HNuTuJN525C7NBrbtH5kSO6fPk=
|
||||||
github.com/lufia/plan9stats v0.0.0-20260216142805-b3301c5f2a88/go.mod h1:autxFIvghDt3jPTLoqZ9OZ7s9qTGNAWmYCjVFWPX/zg=
|
github.com/lufia/plan9stats v0.0.0-20260324052639-156f7da3f749/go.mod h1:autxFIvghDt3jPTLoqZ9OZ7s9qTGNAWmYCjVFWPX/zg=
|
||||||
github.com/lxzan/gws v1.8.9 h1:VU3SGUeWlQrEwfUSfokcZep8mdg/BrUF+y73YYshdBM=
|
github.com/lxzan/gws v1.9.1 h1:4lbIp4cW0hOLP3ejFHR/uWRy741AURx7oKkNNi2OT9o=
|
||||||
github.com/lxzan/gws v1.8.9/go.mod h1:d9yHaR1eDTBHagQC6KY7ycUOaz5KWeqQtP3xu7aMK8Y=
|
github.com/lxzan/gws v1.9.1/go.mod h1:gXHSCPmTGryWJ4icuqy8Yho32E4YIMHH0fkDRYJRbdc=
|
||||||
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
||||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
||||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w=
|
github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w=
|
||||||
github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
||||||
github.com/nicholas-fedor/shoutrrr v0.13.2 h1:hfsYBIqSFYGg92pZP5CXk/g7/OJIkLYmiUnRl+AD1IA=
|
github.com/nicholas-fedor/shoutrrr v0.14.1 h1:6sx4cJNfNuUtD6ygGlB0dqcCQ+abfsUh+b+6jgujf6A=
|
||||||
github.com/nicholas-fedor/shoutrrr v0.13.2/go.mod h1:ZqzV3gY/Wj6AvWs1etlO7+yKbh4iptSbeL8avBpMQbA=
|
github.com/nicholas-fedor/shoutrrr v0.14.1/go.mod h1:U7IywBkLpBV7rgn8iLbQ9/LklJG1gm24bFv5cXXsDKs=
|
||||||
github.com/onsi/ginkgo/v2 v2.28.1 h1:S4hj+HbZp40fNKuLUQOYLDgZLwNUVn19N3Atb98NCyI=
|
github.com/onsi/ginkgo/v2 v2.28.1 h1:S4hj+HbZp40fNKuLUQOYLDgZLwNUVn19N3Atb98NCyI=
|
||||||
github.com/onsi/ginkgo/v2 v2.28.1/go.mod h1:CLtbVInNckU3/+gC8LzkGUb9oF+e8W8TdUsxPwvdOgE=
|
github.com/onsi/ginkgo/v2 v2.28.1/go.mod h1:CLtbVInNckU3/+gC8LzkGUb9oF+e8W8TdUsxPwvdOgE=
|
||||||
github.com/onsi/gomega v1.39.1 h1:1IJLAad4zjPn2PsnhH70V4DKRFlrCzGBNrNaru+Vf28=
|
github.com/onsi/gomega v1.39.1 h1:1IJLAad4zjPn2PsnhH70V4DKRFlrCzGBNrNaru+Vf28=
|
||||||
@@ -96,8 +98,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI
|
|||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/pocketbase/dbx v1.12.0 h1:/oLErM+A0b4xI0PWTGPqSDVjzix48PqI/bng2l0PzoA=
|
github.com/pocketbase/dbx v1.12.0 h1:/oLErM+A0b4xI0PWTGPqSDVjzix48PqI/bng2l0PzoA=
|
||||||
github.com/pocketbase/dbx v1.12.0/go.mod h1:xXRCIAKTHMgUCyCKZm55pUOdvFziJjQfXaWKhu2vhMs=
|
github.com/pocketbase/dbx v1.12.0/go.mod h1:xXRCIAKTHMgUCyCKZm55pUOdvFziJjQfXaWKhu2vhMs=
|
||||||
github.com/pocketbase/pocketbase v0.36.4 h1:zTjRZbp2WfTOJJfb+pFRWa200UaQwxZYt8RzkFMlAZ4=
|
github.com/pocketbase/pocketbase v0.36.7 h1:MrViB7BptPYrf2Nt25pJEYBqUdFjuhRKu1p5GTrkvPA=
|
||||||
github.com/pocketbase/pocketbase v0.36.4/go.mod h1:9CiezhRudd9FZGa5xZa53QZBTNxc5vvw/FGG+diAECI=
|
github.com/pocketbase/pocketbase v0.36.7/go.mod h1:qX4HuVjoKXtEg41fSJVM0JLfGWXbBmHxVv/FaE446r4=
|
||||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
|
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
|
||||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||||
@@ -105,8 +107,8 @@ github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qq
|
|||||||
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
github.com/shirou/gopsutil/v4 v4.26.1 h1:TOkEyriIXk2HX9d4isZJtbjXbEjf5qyKPAzbzY0JWSo=
|
github.com/shirou/gopsutil/v4 v4.26.2 h1:X8i6sicvUFih4BmYIGT1m2wwgw2VG9YgrDTi7cIRGUI=
|
||||||
github.com/shirou/gopsutil/v4 v4.26.1/go.mod h1:medLI9/UNAb0dOI9Q3/7yWSqKkj00u+1tgY8nvv41pc=
|
github.com/shirou/gopsutil/v4 v4.26.2/go.mod h1:LZ6ewCSkBqUpvSOf+LsTGnRinC6iaNUNMGBtDkJBaLQ=
|
||||||
github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY=
|
github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY=
|
||||||
github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
|
github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
|
||||||
github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
|
github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
|
||||||
@@ -115,6 +117,8 @@ github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
|
|||||||
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
|
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
|
||||||
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/objx v0.5.3 h1:jmXUvGomnU1o3W/V5h2VEradbpJDwGrzugQQvL0POH4=
|
||||||
|
github.com/stretchr/objx v0.5.3/go.mod h1:rDQraq+vQZU7Fde9LOZLr8Tax6zZvy4kuNKF+QYS+U0=
|
||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||||
@@ -126,44 +130,44 @@ github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
|||||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||||
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
|
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
|
||||||
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||||
|
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||||
|
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts=
|
golang.org/x/crypto v0.49.0 h1:+Ng2ULVvLHnJ/ZFEq4KdcDd/cfjrrjjNSXNzxg0Y4U4=
|
||||||
golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos=
|
golang.org/x/crypto v0.49.0/go.mod h1:ErX4dUh2UM+CFYiXZRTcMpEcN8b/1gxEuv3nODoYtCA=
|
||||||
golang.org/x/exp v0.0.0-20260218203240-3dfff04db8fa h1:Zt3DZoOFFYkKhDT3v7Lm9FDMEV06GpzjG2jrqW+QTE0=
|
golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90 h1:jiDhWWeC7jfWqR9c/uplMOqJ0sbNlNWv0UkzE0vX1MA=
|
||||||
golang.org/x/exp v0.0.0-20260218203240-3dfff04db8fa/go.mod h1:K79w1Vqn7PoiZn+TkNpx3BUWUQksGO3JcVX6qIjytmA=
|
golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90/go.mod h1:xE1HEv6b+1SCZ5/uscMRjUBKtIxworgEcEi+/n9NQDQ=
|
||||||
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||||
golang.org/x/image v0.36.0 h1:Iknbfm1afbgtwPTmHnS2gTM/6PPZfH+z2EFuOkSbqwc=
|
golang.org/x/image v0.38.0 h1:5l+q+Y9JDC7mBOMjo4/aPhMDcxEptsX+Tt3GgRQRPuE=
|
||||||
golang.org/x/image v0.36.0/go.mod h1:YsWD2TyyGKiIX1kZlu9QfKIsQ4nAAK9bdgdrIsE7xy4=
|
golang.org/x/image v0.38.0/go.mod h1:/3f6vaXC+6CEanU4KJxbcUZyEePbyKbaLoDOe4ehFYY=
|
||||||
golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8=
|
golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI=
|
||||||
golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w=
|
golang.org/x/mod v0.34.0/go.mod h1:ykgH52iCZe79kzLLMhyCUzhMci+nQj+0XkbXpNYtVjY=
|
||||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||||
golang.org/x/net v0.50.0 h1:ucWh9eiCGyDR3vtzso0WMQinm2Dnt8cFMuQa9K33J60=
|
golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0=
|
||||||
golang.org/x/net v0.50.0/go.mod h1:UgoSli3F/pBgdJBHCTc+tp3gmrU4XswgGRgtnwWTfyM=
|
golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw=
|
||||||
golang.org/x/oauth2 v0.35.0 h1:Mv2mzuHuZuY2+bkyWXIHMfhNdJAdwW3FuWeCPYN5GVQ=
|
golang.org/x/oauth2 v0.36.0 h1:peZ/1z27fi9hUOFCAZaHyrpWG5lwe0RJEEEeH0ThlIs=
|
||||||
golang.org/x/oauth2 v0.35.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
golang.org/x/oauth2 v0.36.0/go.mod h1:YDBUJMTkDnJS+A4BP4eZBjCqtokkg1hODuPjwiGPO7Q=
|
||||||
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4=
|
||||||
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
|
golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo=
|
||||||
golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw=
|
||||||
golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg=
|
golang.org/x/term v0.41.0 h1:QCgPso/Q3RTJx2Th4bDLqML4W6iJiaXFq2/ftQF13YU=
|
||||||
golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM=
|
golang.org/x/term v0.41.0/go.mod h1:3pfBgksrReYfZ5lvYM0kSO0LIkAl4Yl2bXOkKP7Ec2A=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk=
|
golang.org/x/text v0.35.0 h1:JOVx6vVDFokkpaq1AEptVzLTpDe9KGpj5tR4/X+ybL8=
|
||||||
golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA=
|
golang.org/x/text v0.35.0/go.mod h1:khi/HExzZJ2pGnjenulevKNX1W67CUy0AsXcNubPGCA=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.42.0 h1:uNgphsn75Tdz5Ji2q36v/nsFSfR/9BRFvqhGBaJGd5k=
|
golang.org/x/tools v0.43.0 h1:12BdW9CeB3Z+J/I/wj34VMl8X+fEXBxVR90JeMX5E7s=
|
||||||
golang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0=
|
golang.org/x/tools v0.43.0/go.mod h1:uHkMso649BX2cZK6+RpuIPXS3ho2hZo4FVwfoy1vIk0=
|
||||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||||
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
|
|
||||||
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
@@ -175,18 +179,18 @@ howett.net/plist v1.0.1 h1:37GdZ8tP09Q35o9ych3ehygcsL+HqKSwzctveSlarvM=
|
|||||||
howett.net/plist v1.0.1/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=
|
howett.net/plist v1.0.1/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=
|
||||||
modernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis=
|
modernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis=
|
||||||
modernc.org/cc/v4 v4.27.1/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
|
modernc.org/cc/v4 v4.27.1/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
|
||||||
modernc.org/ccgo/v4 v4.30.1 h1:4r4U1J6Fhj98NKfSjnPUN7Ze2c6MnAdL0hWw6+LrJpc=
|
modernc.org/ccgo/v4 v4.32.0 h1:hjG66bI/kqIPX1b2yT6fr/jt+QedtP2fqojG2VrFuVw=
|
||||||
modernc.org/ccgo/v4 v4.30.1/go.mod h1:bIOeI1JL54Utlxn+LwrFyjCx2n2RDiYEaJVSrgdrRfM=
|
modernc.org/ccgo/v4 v4.32.0/go.mod h1:6F08EBCx5uQc38kMGl+0Nm0oWczoo1c7cgpzEry7Uc0=
|
||||||
modernc.org/fileutil v1.3.40 h1:ZGMswMNc9JOCrcrakF1HrvmergNLAmxOPjizirpfqBA=
|
modernc.org/fileutil v1.4.0 h1:j6ZzNTftVS054gi281TyLjHPp6CPHr2KCxEXjEbD6SM=
|
||||||
modernc.org/fileutil v1.3.40/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc=
|
modernc.org/fileutil v1.4.0/go.mod h1:EqdKFDxiByqxLk8ozOxObDSfcVOv/54xDs/DUHdvCUU=
|
||||||
modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI=
|
modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI=
|
||||||
modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
|
modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
|
||||||
modernc.org/gc/v3 v3.1.1 h1:k8T3gkXWY9sEiytKhcgyiZ2L0DTyCQ/nvX+LoCljoRE=
|
modernc.org/gc/v3 v3.1.2 h1:ZtDCnhonXSZexk/AYsegNRV1lJGgaNZJuKjJSWKyEqo=
|
||||||
modernc.org/gc/v3 v3.1.1/go.mod h1:HFK/6AGESC7Ex+EZJhJ2Gni6cTaYpSMmU/cT9RmlfYY=
|
modernc.org/gc/v3 v3.1.2/go.mod h1:HFK/6AGESC7Ex+EZJhJ2Gni6cTaYpSMmU/cT9RmlfYY=
|
||||||
modernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks=
|
modernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks=
|
||||||
modernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI=
|
modernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI=
|
||||||
modernc.org/libc v1.67.6 h1:eVOQvpModVLKOdT+LvBPjdQqfrZq+pC39BygcT+E7OI=
|
modernc.org/libc v1.70.0 h1:U58NawXqXbgpZ/dcdS9kMshu08aiA6b7gusEusqzNkw=
|
||||||
modernc.org/libc v1.67.6/go.mod h1:JAhxUVlolfYDErnwiqaLvUqc8nfb2r6S6slAgZOnaiE=
|
modernc.org/libc v1.70.0/go.mod h1:OVmxFGP1CI/Z4L3E0Q3Mf1PDE0BucwMkcXjjLntvHJo=
|
||||||
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
|
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
|
||||||
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
|
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
|
||||||
modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI=
|
modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI=
|
||||||
@@ -195,8 +199,8 @@ modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
|
|||||||
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
|
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
|
||||||
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
|
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
|
||||||
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
|
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
|
||||||
modernc.org/sqlite v1.45.0 h1:r51cSGzKpbptxnby+EIIz5fop4VuE4qFoVEjNvWoObs=
|
modernc.org/sqlite v1.46.2 h1:gkXQ6R0+AjxFC/fTDaeIVLbNLNrRoOK7YYVz5BKhTcE=
|
||||||
modernc.org/sqlite v1.45.0/go.mod h1:CzbrU2lSB1DKUusvwGz7rqEKIq+NUd8GWuBBZDs9/nA=
|
modernc.org/sqlite v1.46.2/go.mod h1:hWjRO6Tj/5Ik8ieqxQybiEOUXy0NJFNp2tpvVpKlvig=
|
||||||
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
|
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
|
||||||
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
|
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
|
||||||
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
||||||
|
|||||||
@@ -21,9 +21,9 @@ type hubLike interface {
|
|||||||
|
|
||||||
type AlertManager struct {
|
type AlertManager struct {
|
||||||
hub hubLike
|
hub hubLike
|
||||||
alertQueue chan alertTask
|
stopOnce sync.Once
|
||||||
stopChan chan struct{}
|
|
||||||
pendingAlerts sync.Map
|
pendingAlerts sync.Map
|
||||||
|
alertsCache *AlertsCache
|
||||||
}
|
}
|
||||||
|
|
||||||
type AlertMessageData struct {
|
type AlertMessageData struct {
|
||||||
@@ -45,17 +45,17 @@ type SystemAlertFsStats struct {
|
|||||||
DiskUsed float64 `json:"du"`
|
DiskUsed float64 `json:"du"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Values pulled from system_stats.stats that are relevant to alerts.
|
||||||
type SystemAlertStats struct {
|
type SystemAlertStats struct {
|
||||||
Cpu float64 `json:"cpu"`
|
Cpu float64 `json:"cpu"`
|
||||||
Mem float64 `json:"mp"`
|
Mem float64 `json:"mp"`
|
||||||
Disk float64 `json:"dp"`
|
Disk float64 `json:"dp"`
|
||||||
NetSent float64 `json:"ns"`
|
Bandwidth [2]uint64 `json:"b"`
|
||||||
NetRecv float64 `json:"nr"`
|
GPU map[string]SystemAlertGPUData `json:"g"`
|
||||||
GPU map[string]SystemAlertGPUData `json:"g"`
|
Temperatures map[string]float32 `json:"t"`
|
||||||
Temperatures map[string]float32 `json:"t"`
|
LoadAvg [3]float64 `json:"la"`
|
||||||
LoadAvg [3]float64 `json:"la"`
|
Battery [2]uint8 `json:"bat"`
|
||||||
Battery [2]uint8 `json:"bat"`
|
ExtraFs map[string]SystemAlertFsStats `json:"efs"`
|
||||||
ExtraFs map[string]SystemAlertFsStats `json:"efs"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type SystemAlertGPUData struct {
|
type SystemAlertGPUData struct {
|
||||||
@@ -64,7 +64,7 @@ type SystemAlertGPUData struct {
|
|||||||
|
|
||||||
type SystemAlertData struct {
|
type SystemAlertData struct {
|
||||||
systemRecord *core.Record
|
systemRecord *core.Record
|
||||||
alertRecord *core.Record
|
alertData CachedAlertData
|
||||||
name string
|
name string
|
||||||
unit string
|
unit string
|
||||||
val float64
|
val float64
|
||||||
@@ -98,12 +98,10 @@ var supportsTitle = map[string]struct{}{
|
|||||||
// NewAlertManager creates a new AlertManager instance.
|
// NewAlertManager creates a new AlertManager instance.
|
||||||
func NewAlertManager(app hubLike) *AlertManager {
|
func NewAlertManager(app hubLike) *AlertManager {
|
||||||
am := &AlertManager{
|
am := &AlertManager{
|
||||||
hub: app,
|
hub: app,
|
||||||
alertQueue: make(chan alertTask, 5),
|
alertsCache: NewAlertsCache(app),
|
||||||
stopChan: make(chan struct{}),
|
|
||||||
}
|
}
|
||||||
am.bindEvents()
|
am.bindEvents()
|
||||||
go am.startWorker()
|
|
||||||
return am
|
return am
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -112,6 +110,19 @@ func (am *AlertManager) bindEvents() {
|
|||||||
am.hub.OnRecordAfterUpdateSuccess("alerts").BindFunc(updateHistoryOnAlertUpdate)
|
am.hub.OnRecordAfterUpdateSuccess("alerts").BindFunc(updateHistoryOnAlertUpdate)
|
||||||
am.hub.OnRecordAfterDeleteSuccess("alerts").BindFunc(resolveHistoryOnAlertDelete)
|
am.hub.OnRecordAfterDeleteSuccess("alerts").BindFunc(resolveHistoryOnAlertDelete)
|
||||||
am.hub.OnRecordAfterUpdateSuccess("smart_devices").BindFunc(am.handleSmartDeviceAlert)
|
am.hub.OnRecordAfterUpdateSuccess("smart_devices").BindFunc(am.handleSmartDeviceAlert)
|
||||||
|
|
||||||
|
am.hub.OnServe().BindFunc(func(e *core.ServeEvent) error {
|
||||||
|
// Populate all alerts into cache on startup
|
||||||
|
_ = am.alertsCache.PopulateFromDB(true)
|
||||||
|
|
||||||
|
if err := resolveStatusAlerts(e.App); err != nil {
|
||||||
|
e.App.Logger().Error("Failed to resolve stale status alerts", "err", err)
|
||||||
|
}
|
||||||
|
if err := am.restorePendingStatusAlerts(); err != nil {
|
||||||
|
e.App.Logger().Error("Failed to restore pending status alerts", "err", err)
|
||||||
|
}
|
||||||
|
return e.Next()
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsNotificationSilenced checks if a notification should be silenced based on configured quiet hours
|
// IsNotificationSilenced checks if a notification should be silenced based on configured quiet hours
|
||||||
@@ -265,13 +276,14 @@ func (am *AlertManager) SendShoutrrrAlert(notificationUrl, title, message, link,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Add link
|
// Add link
|
||||||
if scheme == "ntfy" {
|
switch scheme {
|
||||||
|
case "ntfy":
|
||||||
queryParams.Add("Actions", fmt.Sprintf("view, %s, %s", linkText, link))
|
queryParams.Add("Actions", fmt.Sprintf("view, %s, %s", linkText, link))
|
||||||
} else if scheme == "lark" {
|
case "lark":
|
||||||
queryParams.Add("link", link)
|
queryParams.Add("link", link)
|
||||||
} else if scheme == "bark" {
|
case "bark":
|
||||||
queryParams.Add("url", link)
|
queryParams.Add("url", link)
|
||||||
} else {
|
default:
|
||||||
message += "\n\n" + link
|
message += "\n\n" + link
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -304,3 +316,13 @@ func (am *AlertManager) SendTestNotification(e *core.RequestEvent) error {
|
|||||||
}
|
}
|
||||||
return e.JSON(200, map[string]bool{"err": false})
|
return e.JSON(200, map[string]bool{"err": false})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// setAlertTriggered updates the "triggered" status of an alert record in the database
|
||||||
|
func (am *AlertManager) setAlertTriggered(alert CachedAlertData, triggered bool) error {
|
||||||
|
alertRecord, err := am.hub.FindRecordById("alerts", alert.Id)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
alertRecord.Set("triggered", triggered)
|
||||||
|
return am.hub.Save(alertRecord)
|
||||||
|
}
|
||||||
|
|||||||
177
internal/alerts/alerts_cache.go
Normal file
177
internal/alerts/alerts_cache.go
Normal file
@@ -0,0 +1,177 @@
|
|||||||
|
package alerts
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/pocketbase/dbx"
|
||||||
|
"github.com/pocketbase/pocketbase/core"
|
||||||
|
"github.com/pocketbase/pocketbase/tools/store"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CachedAlertData represents the relevant fields of an alert record for status checking and updates.
|
||||||
|
type CachedAlertData struct {
|
||||||
|
Id string
|
||||||
|
SystemID string
|
||||||
|
UserID string
|
||||||
|
Name string
|
||||||
|
Value float64
|
||||||
|
Triggered bool
|
||||||
|
Min uint8
|
||||||
|
// Created types.DateTime
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *CachedAlertData) PopulateFromRecord(record *core.Record) {
|
||||||
|
a.Id = record.Id
|
||||||
|
a.SystemID = record.GetString("system")
|
||||||
|
a.UserID = record.GetString("user")
|
||||||
|
a.Name = record.GetString("name")
|
||||||
|
a.Value = record.GetFloat("value")
|
||||||
|
a.Triggered = record.GetBool("triggered")
|
||||||
|
a.Min = uint8(record.GetInt("min"))
|
||||||
|
// a.Created = record.GetDateTime("created")
|
||||||
|
}
|
||||||
|
|
||||||
|
// AlertsCache provides an in-memory cache for system alerts.
|
||||||
|
type AlertsCache struct {
|
||||||
|
app core.App
|
||||||
|
store *store.Store[string, *store.Store[string, CachedAlertData]]
|
||||||
|
populated bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAlertsCache creates a new instance of SystemAlertsCache.
|
||||||
|
func NewAlertsCache(app core.App) *AlertsCache {
|
||||||
|
c := AlertsCache{
|
||||||
|
app: app,
|
||||||
|
store: store.New(map[string]*store.Store[string, CachedAlertData]{}),
|
||||||
|
}
|
||||||
|
return c.bindEvents()
|
||||||
|
}
|
||||||
|
|
||||||
|
// bindEvents sets up event listeners to keep the cache in sync with database changes.
|
||||||
|
func (c *AlertsCache) bindEvents() *AlertsCache {
|
||||||
|
c.app.OnRecordAfterUpdateSuccess("alerts").BindFunc(func(e *core.RecordEvent) error {
|
||||||
|
// c.Delete(e.Record.Original()) // this would be needed if the system field on an existing alert was changed, however we don't currently allow that in the UI so we'll leave it commented out
|
||||||
|
c.Update(e.Record)
|
||||||
|
return e.Next()
|
||||||
|
})
|
||||||
|
c.app.OnRecordAfterDeleteSuccess("alerts").BindFunc(func(e *core.RecordEvent) error {
|
||||||
|
c.Delete(e.Record)
|
||||||
|
return e.Next()
|
||||||
|
})
|
||||||
|
c.app.OnRecordAfterCreateSuccess("alerts").BindFunc(func(e *core.RecordEvent) error {
|
||||||
|
c.Update(e.Record)
|
||||||
|
return e.Next()
|
||||||
|
})
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// PopulateFromDB clears current entries and loads all alerts from the database into the cache.
|
||||||
|
func (c *AlertsCache) PopulateFromDB(force bool) error {
|
||||||
|
if !force && c.populated {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
records, err := c.app.FindAllRecords("alerts")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
c.store.RemoveAll()
|
||||||
|
for _, record := range records {
|
||||||
|
c.Update(record)
|
||||||
|
}
|
||||||
|
c.populated = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update adds or updates an alert record in the cache.
|
||||||
|
func (c *AlertsCache) Update(record *core.Record) {
|
||||||
|
systemID := record.GetString("system")
|
||||||
|
if systemID == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
systemStore, ok := c.store.GetOk(systemID)
|
||||||
|
if !ok {
|
||||||
|
systemStore = store.New(map[string]CachedAlertData{})
|
||||||
|
c.store.Set(systemID, systemStore)
|
||||||
|
}
|
||||||
|
var ca CachedAlertData
|
||||||
|
ca.PopulateFromRecord(record)
|
||||||
|
systemStore.Set(record.Id, ca)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete removes an alert record from the cache.
|
||||||
|
func (c *AlertsCache) Delete(record *core.Record) {
|
||||||
|
systemID := record.GetString("system")
|
||||||
|
if systemID == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if systemStore, ok := c.store.GetOk(systemID); ok {
|
||||||
|
systemStore.Remove(record.Id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSystemAlerts returns all alerts for the specified system, lazy-loading if necessary.
|
||||||
|
func (c *AlertsCache) GetSystemAlerts(systemID string) []CachedAlertData {
|
||||||
|
systemStore, ok := c.store.GetOk(systemID)
|
||||||
|
if !ok {
|
||||||
|
// Populate cache for this system
|
||||||
|
records, err := c.app.FindAllRecords("alerts", dbx.NewExp("system={:system}", dbx.Params{"system": systemID}))
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
systemStore = store.New(map[string]CachedAlertData{})
|
||||||
|
for _, record := range records {
|
||||||
|
var ca CachedAlertData
|
||||||
|
ca.PopulateFromRecord(record)
|
||||||
|
systemStore.Set(record.Id, ca)
|
||||||
|
}
|
||||||
|
c.store.Set(systemID, systemStore)
|
||||||
|
}
|
||||||
|
all := systemStore.GetAll()
|
||||||
|
alerts := make([]CachedAlertData, 0, len(all))
|
||||||
|
for _, alert := range all {
|
||||||
|
alerts = append(alerts, alert)
|
||||||
|
}
|
||||||
|
return alerts
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAlert returns a specific alert by its ID from the cache.
|
||||||
|
func (c *AlertsCache) GetAlert(systemID, alertID string) (CachedAlertData, bool) {
|
||||||
|
if systemStore, ok := c.store.GetOk(systemID); ok {
|
||||||
|
return systemStore.GetOk(alertID)
|
||||||
|
}
|
||||||
|
return CachedAlertData{}, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAlertsByName returns all alerts of a specific type for the specified system.
|
||||||
|
func (c *AlertsCache) GetAlertsByName(systemID, alertName string) []CachedAlertData {
|
||||||
|
allAlerts := c.GetSystemAlerts(systemID)
|
||||||
|
var alerts []CachedAlertData
|
||||||
|
for _, record := range allAlerts {
|
||||||
|
if record.Name == alertName {
|
||||||
|
alerts = append(alerts, record)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return alerts
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAlertsExcludingNames returns all alerts for the specified system excluding the given types.
|
||||||
|
func (c *AlertsCache) GetAlertsExcludingNames(systemID string, excludedNames ...string) []CachedAlertData {
|
||||||
|
excludeMap := make(map[string]struct{})
|
||||||
|
for _, name := range excludedNames {
|
||||||
|
excludeMap[name] = struct{}{}
|
||||||
|
}
|
||||||
|
allAlerts := c.GetSystemAlerts(systemID)
|
||||||
|
var alerts []CachedAlertData
|
||||||
|
for _, record := range allAlerts {
|
||||||
|
if _, excluded := excludeMap[record.Name]; !excluded {
|
||||||
|
alerts = append(alerts, record)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return alerts
|
||||||
|
}
|
||||||
|
|
||||||
|
// Refresh returns the latest cached copy for an alert snapshot if it still exists.
|
||||||
|
func (c *AlertsCache) Refresh(alert CachedAlertData) (CachedAlertData, bool) {
|
||||||
|
if alert.Id == "" {
|
||||||
|
return CachedAlertData{}, false
|
||||||
|
}
|
||||||
|
return c.GetAlert(alert.SystemID, alert.Id)
|
||||||
|
}
|
||||||
215
internal/alerts/alerts_cache_test.go
Normal file
215
internal/alerts/alerts_cache_test.go
Normal file
@@ -0,0 +1,215 @@
|
|||||||
|
//go:build testing
|
||||||
|
|
||||||
|
package alerts_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/alerts"
|
||||||
|
beszelTests "github.com/henrygd/beszel/internal/tests"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSystemAlertsCachePopulateAndFilter(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
systems, err := beszelTests.CreateSystems(hub, 2, user.Id, "up")
|
||||||
|
require.NoError(t, err)
|
||||||
|
system1 := systems[0]
|
||||||
|
system2 := systems[1]
|
||||||
|
|
||||||
|
statusAlert, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": "Status",
|
||||||
|
"system": system1.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"min": 1,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
cpuAlert, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"system": system1.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"value": 80,
|
||||||
|
"min": 1,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
memoryAlert, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": "Memory",
|
||||||
|
"system": system2.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"value": 90,
|
||||||
|
"min": 1,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
cache := alerts.NewAlertsCache(hub)
|
||||||
|
cache.PopulateFromDB(false)
|
||||||
|
|
||||||
|
statusAlerts := cache.GetAlertsByName(system1.Id, "Status")
|
||||||
|
require.Len(t, statusAlerts, 1)
|
||||||
|
assert.Equal(t, statusAlert.Id, statusAlerts[0].Id)
|
||||||
|
|
||||||
|
nonStatusAlerts := cache.GetAlertsExcludingNames(system1.Id, "Status")
|
||||||
|
require.Len(t, nonStatusAlerts, 1)
|
||||||
|
assert.Equal(t, cpuAlert.Id, nonStatusAlerts[0].Id)
|
||||||
|
|
||||||
|
system2Alerts := cache.GetSystemAlerts(system2.Id)
|
||||||
|
require.Len(t, system2Alerts, 1)
|
||||||
|
assert.Equal(t, memoryAlert.Id, system2Alerts[0].Id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSystemAlertsCacheLazyLoadUpdateAndDelete(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
systems, err := beszelTests.CreateSystems(hub, 1, user.Id, "up")
|
||||||
|
require.NoError(t, err)
|
||||||
|
systemRecord := systems[0]
|
||||||
|
|
||||||
|
statusAlert, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": "Status",
|
||||||
|
"system": systemRecord.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"min": 1,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
cache := alerts.NewAlertsCache(hub)
|
||||||
|
require.Len(t, cache.GetSystemAlerts(systemRecord.Id), 1, "first lookup should lazy-load alerts for the system")
|
||||||
|
|
||||||
|
cpuAlert, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"system": systemRecord.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"value": 80,
|
||||||
|
"min": 1,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
cache.Update(cpuAlert)
|
||||||
|
|
||||||
|
nonStatusAlerts := cache.GetAlertsExcludingNames(systemRecord.Id, "Status")
|
||||||
|
require.Len(t, nonStatusAlerts, 1)
|
||||||
|
assert.Equal(t, cpuAlert.Id, nonStatusAlerts[0].Id)
|
||||||
|
|
||||||
|
cache.Delete(statusAlert)
|
||||||
|
assert.Empty(t, cache.GetAlertsByName(systemRecord.Id, "Status"), "deleted alerts should be removed from the in-memory cache")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSystemAlertsCacheRefreshReturnsLatestCopy(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
systems, err := beszelTests.CreateSystems(hub, 1, user.Id, "up")
|
||||||
|
require.NoError(t, err)
|
||||||
|
system := systems[0]
|
||||||
|
|
||||||
|
alert, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": "Status",
|
||||||
|
"system": system.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"min": 1,
|
||||||
|
"triggered": false,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
cache := alerts.NewAlertsCache(hub)
|
||||||
|
snapshot := cache.GetSystemAlerts(system.Id)[0]
|
||||||
|
assert.False(t, snapshot.Triggered)
|
||||||
|
|
||||||
|
alert.Set("triggered", true)
|
||||||
|
require.NoError(t, hub.Save(alert))
|
||||||
|
|
||||||
|
refreshed, ok := cache.Refresh(snapshot)
|
||||||
|
require.True(t, ok)
|
||||||
|
assert.Equal(t, snapshot.Id, refreshed.Id)
|
||||||
|
assert.True(t, refreshed.Triggered, "refresh should return the updated cached value rather than the stale snapshot")
|
||||||
|
|
||||||
|
require.NoError(t, hub.Delete(alert))
|
||||||
|
_, ok = cache.Refresh(snapshot)
|
||||||
|
assert.False(t, ok, "refresh should report false when the cached alert no longer exists")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAlertManagerCacheLifecycle(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
systems, err := beszelTests.CreateSystems(hub, 1, user.Id, "up")
|
||||||
|
require.NoError(t, err)
|
||||||
|
system := systems[0]
|
||||||
|
|
||||||
|
// Create an alert
|
||||||
|
alert, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"system": system.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"value": 80,
|
||||||
|
"min": 1,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
am := hub.AlertManager
|
||||||
|
cache := am.GetSystemAlertsCache()
|
||||||
|
|
||||||
|
// Verify it's in cache (it should be since CreateRecord triggers the event)
|
||||||
|
assert.Len(t, cache.GetSystemAlerts(system.Id), 1)
|
||||||
|
assert.Equal(t, alert.Id, cache.GetSystemAlerts(system.Id)[0].Id)
|
||||||
|
assert.EqualValues(t, 80, cache.GetSystemAlerts(system.Id)[0].Value)
|
||||||
|
|
||||||
|
// Update the alert through PocketBase to trigger events
|
||||||
|
alert.Set("value", 85)
|
||||||
|
require.NoError(t, hub.Save(alert))
|
||||||
|
|
||||||
|
// Check if updated value is reflected (or at least that it's still there)
|
||||||
|
cachedAlerts := cache.GetSystemAlerts(system.Id)
|
||||||
|
assert.Len(t, cachedAlerts, 1)
|
||||||
|
assert.EqualValues(t, 85, cachedAlerts[0].Value)
|
||||||
|
|
||||||
|
// Delete the alert through PocketBase to trigger events
|
||||||
|
require.NoError(t, hub.Delete(alert))
|
||||||
|
|
||||||
|
// Verify it's removed from cache
|
||||||
|
assert.Empty(t, cache.GetSystemAlerts(system.Id), "alert should be removed from cache after PocketBase delete")
|
||||||
|
}
|
||||||
|
|
||||||
|
// func TestAlertManagerCacheMovesAlertToNewSystemOnUpdate(t *testing.T) {
|
||||||
|
// hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
// defer hub.Cleanup()
|
||||||
|
|
||||||
|
// systems, err := beszelTests.CreateSystems(hub, 2, user.Id, "up")
|
||||||
|
// require.NoError(t, err)
|
||||||
|
// system1 := systems[0]
|
||||||
|
// system2 := systems[1]
|
||||||
|
|
||||||
|
// alert, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
// "name": "CPU",
|
||||||
|
// "system": system1.Id,
|
||||||
|
// "user": user.Id,
|
||||||
|
// "value": 80,
|
||||||
|
// "min": 1,
|
||||||
|
// })
|
||||||
|
// require.NoError(t, err)
|
||||||
|
|
||||||
|
// am := hub.AlertManager
|
||||||
|
// cache := am.GetSystemAlertsCache()
|
||||||
|
|
||||||
|
// // Initially in system1 cache
|
||||||
|
// assert.Len(t, cache.Get(system1.Id), 1)
|
||||||
|
// assert.Empty(t, cache.Get(system2.Id))
|
||||||
|
|
||||||
|
// // Move alert to system2
|
||||||
|
// alert.Set("system", system2.Id)
|
||||||
|
// require.NoError(t, hub.Save(alert))
|
||||||
|
|
||||||
|
// // DEBUG: print if it is found
|
||||||
|
// // fmt.Printf("system1 alerts after update: %v\n", cache.Get(system1.Id))
|
||||||
|
|
||||||
|
// // Should be removed from system1 and present in system2
|
||||||
|
// assert.Empty(t, cache.GetType(system1.Id, "CPU"), "updated alerts should be evicted from the previous system cache")
|
||||||
|
// require.Len(t, cache.Get(system2.Id), 1)
|
||||||
|
// assert.Equal(t, alert.Id, cache.Get(system2.Id)[0].Id)
|
||||||
|
// }
|
||||||
@@ -49,7 +49,7 @@ func TestAlertSilencedOneTime(t *testing.T) {
|
|||||||
|
|
||||||
// Get alert manager
|
// Get alert manager
|
||||||
am := alerts.NewAlertManager(hub)
|
am := alerts.NewAlertManager(hub)
|
||||||
defer am.StopWorker()
|
defer am.Stop()
|
||||||
|
|
||||||
// Test that alert is silenced
|
// Test that alert is silenced
|
||||||
silenced := am.IsNotificationSilenced(user.Id, system.Id)
|
silenced := am.IsNotificationSilenced(user.Id, system.Id)
|
||||||
@@ -106,7 +106,7 @@ func TestAlertSilencedDaily(t *testing.T) {
|
|||||||
|
|
||||||
// Get alert manager
|
// Get alert manager
|
||||||
am := alerts.NewAlertManager(hub)
|
am := alerts.NewAlertManager(hub)
|
||||||
defer am.StopWorker()
|
defer am.Stop()
|
||||||
|
|
||||||
// Get current hour and create a window that includes current time
|
// Get current hour and create a window that includes current time
|
||||||
now := time.Now().UTC()
|
now := time.Now().UTC()
|
||||||
@@ -170,7 +170,7 @@ func TestAlertSilencedDailyMidnightCrossing(t *testing.T) {
|
|||||||
|
|
||||||
// Get alert manager
|
// Get alert manager
|
||||||
am := alerts.NewAlertManager(hub)
|
am := alerts.NewAlertManager(hub)
|
||||||
defer am.StopWorker()
|
defer am.Stop()
|
||||||
|
|
||||||
// Create a window that crosses midnight: 22:00 - 02:00
|
// Create a window that crosses midnight: 22:00 - 02:00
|
||||||
startTime := time.Date(2000, 1, 1, 22, 0, 0, 0, time.UTC)
|
startTime := time.Date(2000, 1, 1, 22, 0, 0, 0, time.UTC)
|
||||||
@@ -211,7 +211,7 @@ func TestAlertSilencedGlobal(t *testing.T) {
|
|||||||
|
|
||||||
// Get alert manager
|
// Get alert manager
|
||||||
am := alerts.NewAlertManager(hub)
|
am := alerts.NewAlertManager(hub)
|
||||||
defer am.StopWorker()
|
defer am.Stop()
|
||||||
|
|
||||||
// Create a global quiet hours window (no system specified)
|
// Create a global quiet hours window (no system specified)
|
||||||
now := time.Now().UTC()
|
now := time.Now().UTC()
|
||||||
@@ -250,7 +250,7 @@ func TestAlertSilencedSystemSpecific(t *testing.T) {
|
|||||||
|
|
||||||
// Get alert manager
|
// Get alert manager
|
||||||
am := alerts.NewAlertManager(hub)
|
am := alerts.NewAlertManager(hub)
|
||||||
defer am.StopWorker()
|
defer am.Stop()
|
||||||
|
|
||||||
// Create a system-specific quiet hours window for system1 only
|
// Create a system-specific quiet hours window for system1 only
|
||||||
now := time.Now().UTC()
|
now := time.Now().UTC()
|
||||||
@@ -296,7 +296,7 @@ func TestAlertSilencedMultiUser(t *testing.T) {
|
|||||||
|
|
||||||
// Get alert manager
|
// Get alert manager
|
||||||
am := alerts.NewAlertManager(hub)
|
am := alerts.NewAlertManager(hub)
|
||||||
defer am.StopWorker()
|
defer am.Stop()
|
||||||
|
|
||||||
// Create a quiet hours window for user1 only
|
// Create a quiet hours window for user1 only
|
||||||
now := time.Now().UTC()
|
now := time.Now().UTC()
|
||||||
@@ -417,7 +417,7 @@ func TestAlertSilencedNoWindows(t *testing.T) {
|
|||||||
|
|
||||||
// Get alert manager
|
// Get alert manager
|
||||||
am := alerts.NewAlertManager(hub)
|
am := alerts.NewAlertManager(hub)
|
||||||
defer am.StopWorker()
|
defer am.Stop()
|
||||||
|
|
||||||
// Without any quiet hours windows, alert should NOT be silenced
|
// Without any quiet hours windows, alert should NOT be silenced
|
||||||
silenced := am.IsNotificationSilenced(user.Id, system.Id)
|
silenced := am.IsNotificationSilenced(user.Id, system.Id)
|
||||||
|
|||||||
@@ -5,67 +5,28 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pocketbase/dbx"
|
|
||||||
"github.com/pocketbase/pocketbase/core"
|
"github.com/pocketbase/pocketbase/core"
|
||||||
)
|
)
|
||||||
|
|
||||||
type alertTask struct {
|
|
||||||
action string // "schedule" or "cancel"
|
|
||||||
systemName string
|
|
||||||
alertRecord *core.Record
|
|
||||||
delay time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
type alertInfo struct {
|
type alertInfo struct {
|
||||||
systemName string
|
systemName string
|
||||||
alertRecord *core.Record
|
alertData CachedAlertData
|
||||||
expireTime time.Time
|
expireTime time.Time
|
||||||
|
timer *time.Timer
|
||||||
}
|
}
|
||||||
|
|
||||||
// startWorker is a long-running goroutine that processes alert tasks
|
// Stop cancels all pending status alert timers.
|
||||||
// every x seconds. It must be running to process status alerts.
|
func (am *AlertManager) Stop() {
|
||||||
func (am *AlertManager) startWorker() {
|
am.stopOnce.Do(func() {
|
||||||
processPendingAlerts := time.Tick(15 * time.Second)
|
am.pendingAlerts.Range(func(key, value any) bool {
|
||||||
|
info := value.(*alertInfo)
|
||||||
// check for status alerts that are not resolved when system comes up
|
if info.timer != nil {
|
||||||
// (can be removed if we figure out core bug in #1052)
|
info.timer.Stop()
|
||||||
checkStatusAlerts := time.Tick(561 * time.Second)
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-am.stopChan:
|
|
||||||
return
|
|
||||||
case task := <-am.alertQueue:
|
|
||||||
switch task.action {
|
|
||||||
case "schedule":
|
|
||||||
am.pendingAlerts.Store(task.alertRecord.Id, &alertInfo{
|
|
||||||
systemName: task.systemName,
|
|
||||||
alertRecord: task.alertRecord,
|
|
||||||
expireTime: time.Now().Add(task.delay),
|
|
||||||
})
|
|
||||||
case "cancel":
|
|
||||||
am.pendingAlerts.Delete(task.alertRecord.Id)
|
|
||||||
}
|
}
|
||||||
case <-checkStatusAlerts:
|
am.pendingAlerts.Delete(key)
|
||||||
resolveStatusAlerts(am.hub)
|
return true
|
||||||
case <-processPendingAlerts:
|
})
|
||||||
// Check for expired alerts every tick
|
})
|
||||||
now := time.Now()
|
|
||||||
for key, value := range am.pendingAlerts.Range {
|
|
||||||
info := value.(*alertInfo)
|
|
||||||
if now.After(info.expireTime) {
|
|
||||||
// Downtime delay has passed, process alert
|
|
||||||
am.sendStatusAlert("down", info.systemName, info.alertRecord)
|
|
||||||
am.pendingAlerts.Delete(key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// StopWorker shuts down the AlertManager.worker goroutine
|
|
||||||
func (am *AlertManager) StopWorker() {
|
|
||||||
close(am.stopChan)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// HandleStatusAlerts manages the logic when system status changes.
|
// HandleStatusAlerts manages the logic when system status changes.
|
||||||
@@ -74,82 +35,104 @@ func (am *AlertManager) HandleStatusAlerts(newStatus string, systemRecord *core.
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
alertRecords, err := am.getSystemStatusAlerts(systemRecord.Id)
|
alerts := am.alertsCache.GetAlertsByName(systemRecord.Id, "Status")
|
||||||
if err != nil {
|
if len(alerts) == 0 {
|
||||||
return err
|
|
||||||
}
|
|
||||||
if len(alertRecords) == 0 {
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
systemName := systemRecord.GetString("name")
|
systemName := systemRecord.GetString("name")
|
||||||
if newStatus == "down" {
|
if newStatus == "down" {
|
||||||
am.handleSystemDown(systemName, alertRecords)
|
am.handleSystemDown(systemName, alerts)
|
||||||
} else {
|
} else {
|
||||||
am.handleSystemUp(systemName, alertRecords)
|
am.handleSystemUp(systemName, alerts)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getSystemStatusAlerts retrieves all "Status" alert records for a given system ID.
|
// handleSystemDown manages the logic when a system status changes to "down". It schedules pending alerts for each alert record.
|
||||||
func (am *AlertManager) getSystemStatusAlerts(systemID string) ([]*core.Record, error) {
|
func (am *AlertManager) handleSystemDown(systemName string, alerts []CachedAlertData) {
|
||||||
alertRecords, err := am.hub.FindAllRecords("alerts", dbx.HashExp{
|
for _, alertData := range alerts {
|
||||||
"system": systemID,
|
min := max(1, int(alertData.Min))
|
||||||
"name": "Status",
|
am.schedulePendingStatusAlert(systemName, alertData, time.Duration(min)*time.Minute)
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
return alertRecords, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Schedules delayed "down" alerts for each alert record.
|
// schedulePendingStatusAlert sets up a timer to send a "down" alert after the specified delay if the system is still down.
|
||||||
func (am *AlertManager) handleSystemDown(systemName string, alertRecords []*core.Record) {
|
// It returns true if the alert was scheduled, or false if an alert was already pending for the given alert record.
|
||||||
for _, alertRecord := range alertRecords {
|
func (am *AlertManager) schedulePendingStatusAlert(systemName string, alertData CachedAlertData, delay time.Duration) bool {
|
||||||
// Continue if alert is already scheduled
|
alert := &alertInfo{
|
||||||
if _, exists := am.pendingAlerts.Load(alertRecord.Id); exists {
|
systemName: systemName,
|
||||||
continue
|
alertData: alertData,
|
||||||
}
|
expireTime: time.Now().Add(delay),
|
||||||
// Schedule by adding to queue
|
|
||||||
min := max(1, alertRecord.GetInt("min"))
|
|
||||||
am.alertQueue <- alertTask{
|
|
||||||
action: "schedule",
|
|
||||||
systemName: systemName,
|
|
||||||
alertRecord: alertRecord,
|
|
||||||
delay: time.Duration(min) * time.Minute,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
storedAlert, loaded := am.pendingAlerts.LoadOrStore(alertData.Id, alert)
|
||||||
|
if loaded {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
stored := storedAlert.(*alertInfo)
|
||||||
|
stored.timer = time.AfterFunc(time.Until(stored.expireTime), func() {
|
||||||
|
am.processPendingAlert(alertData.Id)
|
||||||
|
})
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// handleSystemUp manages the logic when a system status changes to "up".
|
// handleSystemUp manages the logic when a system status changes to "up".
|
||||||
// It cancels any pending alerts and sends "up" alerts.
|
// It cancels any pending alerts and sends "up" alerts.
|
||||||
func (am *AlertManager) handleSystemUp(systemName string, alertRecords []*core.Record) {
|
func (am *AlertManager) handleSystemUp(systemName string, alerts []CachedAlertData) {
|
||||||
for _, alertRecord := range alertRecords {
|
for _, alertData := range alerts {
|
||||||
alertRecordID := alertRecord.Id
|
|
||||||
// If alert exists for record, delete and continue (down alert not sent)
|
// If alert exists for record, delete and continue (down alert not sent)
|
||||||
if _, exists := am.pendingAlerts.Load(alertRecordID); exists {
|
if am.cancelPendingAlert(alertData.Id) {
|
||||||
am.alertQueue <- alertTask{
|
|
||||||
action: "cancel",
|
|
||||||
alertRecord: alertRecord,
|
|
||||||
}
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// No alert scheduled for this record, send "up" alert
|
if !alertData.Triggered {
|
||||||
if err := am.sendStatusAlert("up", systemName, alertRecord); err != nil {
|
continue
|
||||||
|
}
|
||||||
|
if err := am.sendStatusAlert("up", systemName, alertData); err != nil {
|
||||||
am.hub.Logger().Error("Failed to send alert", "err", err)
|
am.hub.Logger().Error("Failed to send alert", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// sendStatusAlert sends a status alert ("up" or "down") to the users associated with the alert records.
|
// cancelPendingAlert stops the timer and removes the pending alert for the given alert ID. Returns true if a pending alert was found and cancelled.
|
||||||
func (am *AlertManager) sendStatusAlert(alertStatus string, systemName string, alertRecord *core.Record) error {
|
func (am *AlertManager) cancelPendingAlert(alertID string) bool {
|
||||||
switch alertStatus {
|
value, loaded := am.pendingAlerts.LoadAndDelete(alertID)
|
||||||
case "up":
|
if !loaded {
|
||||||
alertRecord.Set("triggered", false)
|
return false
|
||||||
case "down":
|
}
|
||||||
alertRecord.Set("triggered", true)
|
|
||||||
|
info := value.(*alertInfo)
|
||||||
|
if info.timer != nil {
|
||||||
|
info.timer.Stop()
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// processPendingAlert sends a "down" alert if the pending alert has expired and the system is still down.
|
||||||
|
func (am *AlertManager) processPendingAlert(alertID string) {
|
||||||
|
value, loaded := am.pendingAlerts.LoadAndDelete(alertID)
|
||||||
|
if !loaded {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
info := value.(*alertInfo)
|
||||||
|
refreshedAlertData, ok := am.alertsCache.Refresh(info.alertData)
|
||||||
|
if !ok || refreshedAlertData.Triggered {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := am.sendStatusAlert("down", info.systemName, refreshedAlertData); err != nil {
|
||||||
|
am.hub.Logger().Error("Failed to send alert", "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// sendStatusAlert sends a status alert ("up" or "down") to the users associated with the alert records.
|
||||||
|
func (am *AlertManager) sendStatusAlert(alertStatus string, systemName string, alertData CachedAlertData) error {
|
||||||
|
// Update trigger state for alert record before sending alert
|
||||||
|
triggered := alertStatus == "down"
|
||||||
|
if err := am.setAlertTriggered(alertData, triggered); err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
am.hub.Save(alertRecord)
|
|
||||||
|
|
||||||
var emoji string
|
var emoji string
|
||||||
if alertStatus == "up" {
|
if alertStatus == "up" {
|
||||||
@@ -162,10 +145,10 @@ func (am *AlertManager) sendStatusAlert(alertStatus string, systemName string, a
|
|||||||
message := strings.TrimSuffix(title, emoji)
|
message := strings.TrimSuffix(title, emoji)
|
||||||
|
|
||||||
// Get system ID for the link
|
// Get system ID for the link
|
||||||
systemID := alertRecord.GetString("system")
|
systemID := alertData.SystemID
|
||||||
|
|
||||||
return am.SendAlert(AlertMessageData{
|
return am.SendAlert(AlertMessageData{
|
||||||
UserID: alertRecord.GetString("user"),
|
UserID: alertData.UserID,
|
||||||
SystemID: systemID,
|
SystemID: systemID,
|
||||||
Title: title,
|
Title: title,
|
||||||
Message: message,
|
Message: message,
|
||||||
@@ -174,8 +157,8 @@ func (am *AlertManager) sendStatusAlert(alertStatus string, systemName string, a
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// resolveStatusAlerts resolves any status alerts that weren't resolved
|
// resolveStatusAlerts resolves any triggered status alerts that weren't resolved
|
||||||
// when system came up (https://github.com/henrygd/beszel/issues/1052)
|
// when system came up (https://github.com/henrygd/beszel/issues/1052).
|
||||||
func resolveStatusAlerts(app core.App) error {
|
func resolveStatusAlerts(app core.App) error {
|
||||||
db := app.DB()
|
db := app.DB()
|
||||||
// Find all active status alerts where the system is actually up
|
// Find all active status alerts where the system is actually up
|
||||||
@@ -205,3 +188,40 @@ func resolveStatusAlerts(app core.App) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// restorePendingStatusAlerts re-queues untriggered status alerts for systems that
|
||||||
|
// are still down after a hub restart. This rebuilds the lost in-memory timer state.
|
||||||
|
func (am *AlertManager) restorePendingStatusAlerts() error {
|
||||||
|
type pendingStatusAlert struct {
|
||||||
|
AlertID string `db:"alert_id"`
|
||||||
|
SystemID string `db:"system_id"`
|
||||||
|
SystemName string `db:"system_name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var pending []pendingStatusAlert
|
||||||
|
err := am.hub.DB().NewQuery(`
|
||||||
|
SELECT a.id AS alert_id, a.system AS system_id, s.name AS system_name
|
||||||
|
FROM alerts a
|
||||||
|
JOIN systems s ON a.system = s.id
|
||||||
|
WHERE a.name = 'Status'
|
||||||
|
AND a.triggered = false
|
||||||
|
AND s.status = 'down'
|
||||||
|
`).All(&pending)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure cache is populated before trying to restore pending alerts
|
||||||
|
_ = am.alertsCache.PopulateFromDB(false)
|
||||||
|
|
||||||
|
for _, item := range pending {
|
||||||
|
alertData, ok := am.alertsCache.GetAlert(item.SystemID, item.AlertID)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
min := max(1, int(alertData.Min))
|
||||||
|
am.schedulePendingStatusAlert(item.SystemName, alertData, time.Duration(min)*time.Minute)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
943
internal/alerts/alerts_status_test.go
Normal file
943
internal/alerts/alerts_status_test.go
Normal file
@@ -0,0 +1,943 @@
|
|||||||
|
//go:build testing
|
||||||
|
|
||||||
|
package alerts_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"testing/synctest"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/alerts"
|
||||||
|
beszelTests "github.com/henrygd/beszel/internal/tests"
|
||||||
|
"github.com/pocketbase/dbx"
|
||||||
|
"github.com/pocketbase/pocketbase/core"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func setStatusAlertEmail(t *testing.T, hub core.App, userID, email string) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
userSettings, err := hub.FindFirstRecordByFilter("user_settings", "user={:user}", map[string]any{"user": userID})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
userSettings.Set("settings", map[string]any{
|
||||||
|
"emails": []string{email},
|
||||||
|
"webhooks": []string{},
|
||||||
|
})
|
||||||
|
require.NoError(t, hub.Save(userSettings))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStatusAlerts(t *testing.T) {
|
||||||
|
synctest.Test(t, func(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
systems, err := beszelTests.CreateSystems(hub, 4, user.Id, "paused")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
var alerts []*core.Record
|
||||||
|
for i, system := range systems {
|
||||||
|
alert, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": "Status",
|
||||||
|
"system": system.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"min": i + 1,
|
||||||
|
})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
alerts = append(alerts, alert)
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
|
||||||
|
for _, alert := range alerts {
|
||||||
|
assert.False(t, alert.GetBool("triggered"), "Alert should not be triggered immediately")
|
||||||
|
}
|
||||||
|
if hub.TestMailer.TotalSend() != 0 {
|
||||||
|
assert.Zero(t, hub.TestMailer.TotalSend(), "Expected 0 messages, got %d", hub.TestMailer.TotalSend())
|
||||||
|
}
|
||||||
|
for _, system := range systems {
|
||||||
|
assert.EqualValues(t, "paused", system.GetString("status"), "System should be paused")
|
||||||
|
}
|
||||||
|
for _, system := range systems {
|
||||||
|
system.Set("status", "up")
|
||||||
|
err = hub.SaveNoValidate(system)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
assert.EqualValues(t, 0, hub.GetPendingAlertsCount(), "should have 0 alerts in the pendingAlerts map")
|
||||||
|
for _, system := range systems {
|
||||||
|
system.Set("status", "down")
|
||||||
|
err = hub.SaveNoValidate(system)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
// after 30 seconds, should have 4 alerts in the pendingAlerts map, no triggered alerts
|
||||||
|
time.Sleep(time.Second * 30)
|
||||||
|
assert.EqualValues(t, 4, hub.GetPendingAlertsCount(), "should have 4 alerts in the pendingAlerts map")
|
||||||
|
triggeredCount, err := hub.CountRecords("alerts", dbx.HashExp{"triggered": true})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.EqualValues(t, 0, triggeredCount, "should have 0 alert triggered")
|
||||||
|
assert.EqualValues(t, 0, hub.TestMailer.TotalSend(), "should have 0 messages sent")
|
||||||
|
// after 1:30 seconds, should have 1 triggered alert and 3 pending alerts
|
||||||
|
time.Sleep(time.Second * 60)
|
||||||
|
assert.EqualValues(t, 3, hub.GetPendingAlertsCount(), "should have 3 alerts in the pendingAlerts map")
|
||||||
|
triggeredCount, err = hub.CountRecords("alerts", dbx.HashExp{"triggered": true})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.EqualValues(t, 1, triggeredCount, "should have 1 alert triggered")
|
||||||
|
assert.EqualValues(t, 1, hub.TestMailer.TotalSend(), "should have 1 messages sent")
|
||||||
|
// after 2:30 seconds, should have 2 triggered alerts and 2 pending alerts
|
||||||
|
time.Sleep(time.Second * 60)
|
||||||
|
assert.EqualValues(t, 2, hub.GetPendingAlertsCount(), "should have 2 alerts in the pendingAlerts map")
|
||||||
|
triggeredCount, err = hub.CountRecords("alerts", dbx.HashExp{"triggered": true})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.EqualValues(t, 2, triggeredCount, "should have 2 alert triggered")
|
||||||
|
assert.EqualValues(t, 2, hub.TestMailer.TotalSend(), "should have 2 messages sent")
|
||||||
|
// now we will bring the remaning systems back up
|
||||||
|
for _, system := range systems {
|
||||||
|
system.Set("status", "up")
|
||||||
|
err = hub.SaveNoValidate(system)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
// should have 0 alerts in the pendingAlerts map and 0 alerts triggered
|
||||||
|
assert.EqualValues(t, 0, hub.GetPendingAlertsCount(), "should have 0 alerts in the pendingAlerts map")
|
||||||
|
triggeredCount, err = hub.CountRecords("alerts", dbx.HashExp{"triggered": true})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Zero(t, triggeredCount, "should have 0 alert triggered")
|
||||||
|
// 4 messages sent, 2 down alerts and 2 up alerts for first 2 systems
|
||||||
|
assert.EqualValues(t, 4, hub.TestMailer.TotalSend(), "should have 4 messages sent")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
func TestStatusAlertRecoveryBeforeDeadline(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
// Ensure user settings have an email
|
||||||
|
userSettings, _ := hub.FindFirstRecordByFilter("user_settings", "user={:user}", map[string]any{"user": user.Id})
|
||||||
|
userSettings.Set("settings", `{"emails":["test@example.com"],"webhooks":[]}`)
|
||||||
|
hub.Save(userSettings)
|
||||||
|
|
||||||
|
// Initial email count
|
||||||
|
initialEmailCount := hub.TestMailer.TotalSend()
|
||||||
|
|
||||||
|
systemCollection, _ := hub.FindCollectionByNameOrId("systems")
|
||||||
|
system := core.NewRecord(systemCollection)
|
||||||
|
system.Set("name", "test-system")
|
||||||
|
system.Set("status", "up")
|
||||||
|
system.Set("host", "127.0.0.1")
|
||||||
|
system.Set("users", []string{user.Id})
|
||||||
|
hub.Save(system)
|
||||||
|
|
||||||
|
alertCollection, _ := hub.FindCollectionByNameOrId("alerts")
|
||||||
|
alert := core.NewRecord(alertCollection)
|
||||||
|
alert.Set("user", user.Id)
|
||||||
|
alert.Set("system", system.Id)
|
||||||
|
alert.Set("name", "Status")
|
||||||
|
alert.Set("triggered", false)
|
||||||
|
alert.Set("min", 1)
|
||||||
|
hub.Save(alert)
|
||||||
|
|
||||||
|
am := hub.AlertManager
|
||||||
|
|
||||||
|
// 1. System goes down
|
||||||
|
am.HandleStatusAlerts("down", system)
|
||||||
|
assert.Equal(t, 1, am.GetPendingAlertsCount(), "Alert should be scheduled")
|
||||||
|
|
||||||
|
// 2. System goes up BEFORE delay expires
|
||||||
|
// Triggering HandleStatusAlerts("up") SHOULD NOT send an alert.
|
||||||
|
am.HandleStatusAlerts("up", system)
|
||||||
|
|
||||||
|
assert.Equal(t, 0, am.GetPendingAlertsCount(), "Alert should be canceled if system recovers before delay expires")
|
||||||
|
|
||||||
|
// Verify that NO email was sent.
|
||||||
|
assert.Equal(t, initialEmailCount, hub.TestMailer.TotalSend(), "Recovery notification should not be sent if system never went down")
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStatusAlertNormalRecovery(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
// Ensure user settings have an email
|
||||||
|
userSettings, _ := hub.FindFirstRecordByFilter("user_settings", "user={:user}", map[string]any{"user": user.Id})
|
||||||
|
userSettings.Set("settings", `{"emails":["test@example.com"],"webhooks":[]}`)
|
||||||
|
hub.Save(userSettings)
|
||||||
|
|
||||||
|
systemCollection, _ := hub.FindCollectionByNameOrId("systems")
|
||||||
|
system := core.NewRecord(systemCollection)
|
||||||
|
system.Set("name", "test-system")
|
||||||
|
system.Set("status", "up")
|
||||||
|
system.Set("host", "127.0.0.1")
|
||||||
|
system.Set("users", []string{user.Id})
|
||||||
|
hub.Save(system)
|
||||||
|
|
||||||
|
alertCollection, _ := hub.FindCollectionByNameOrId("alerts")
|
||||||
|
alert := core.NewRecord(alertCollection)
|
||||||
|
alert.Set("user", user.Id)
|
||||||
|
alert.Set("system", system.Id)
|
||||||
|
alert.Set("name", "Status")
|
||||||
|
alert.Set("triggered", true) // System was confirmed DOWN
|
||||||
|
hub.Save(alert)
|
||||||
|
|
||||||
|
am := hub.AlertManager
|
||||||
|
initialEmailCount := hub.TestMailer.TotalSend()
|
||||||
|
|
||||||
|
// System goes up
|
||||||
|
am.HandleStatusAlerts("up", system)
|
||||||
|
|
||||||
|
// Verify that an email WAS sent (normal recovery).
|
||||||
|
assert.Equal(t, initialEmailCount+1, hub.TestMailer.TotalSend(), "Recovery notification should be sent if system was triggered as down")
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHandleStatusAlertsDoesNotSendRecoveryWhileDownIsOnlyPending(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
userSettings, err := hub.FindFirstRecordByFilter("user_settings", "user={:user}", map[string]any{"user": user.Id})
|
||||||
|
require.NoError(t, err)
|
||||||
|
userSettings.Set("settings", `{"emails":["test@example.com"],"webhooks":[]}`)
|
||||||
|
require.NoError(t, hub.Save(userSettings))
|
||||||
|
|
||||||
|
systemCollection, err := hub.FindCollectionByNameOrId("systems")
|
||||||
|
require.NoError(t, err)
|
||||||
|
system := core.NewRecord(systemCollection)
|
||||||
|
system.Set("name", "test-system")
|
||||||
|
system.Set("status", "up")
|
||||||
|
system.Set("host", "127.0.0.1")
|
||||||
|
system.Set("users", []string{user.Id})
|
||||||
|
require.NoError(t, hub.Save(system))
|
||||||
|
|
||||||
|
alertCollection, err := hub.FindCollectionByNameOrId("alerts")
|
||||||
|
require.NoError(t, err)
|
||||||
|
alert := core.NewRecord(alertCollection)
|
||||||
|
alert.Set("user", user.Id)
|
||||||
|
alert.Set("system", system.Id)
|
||||||
|
alert.Set("name", "Status")
|
||||||
|
alert.Set("triggered", false)
|
||||||
|
alert.Set("min", 1)
|
||||||
|
require.NoError(t, hub.Save(alert))
|
||||||
|
|
||||||
|
initialEmailCount := hub.TestMailer.TotalSend()
|
||||||
|
am := alerts.NewTestAlertManagerWithoutWorker(hub)
|
||||||
|
|
||||||
|
require.NoError(t, am.HandleStatusAlerts("down", system))
|
||||||
|
assert.Equal(t, 1, am.GetPendingAlertsCount(), "down transition should register a pending alert immediately")
|
||||||
|
|
||||||
|
require.NoError(t, am.HandleStatusAlerts("up", system))
|
||||||
|
assert.Zero(t, am.GetPendingAlertsCount(), "recovery should cancel the pending down alert")
|
||||||
|
assert.Equal(t, initialEmailCount, hub.TestMailer.TotalSend(), "recovery notification should not be sent before a down alert triggers")
|
||||||
|
|
||||||
|
alertRecord, err := hub.FindRecordById("alerts", alert.Id)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.False(t, alertRecord.GetBool("triggered"), "alert should remain untriggered when downtime never matured")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStatusAlertTimerCancellationPreventsBoundaryDelivery(t *testing.T) {
|
||||||
|
synctest.Test(t, func(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
userSettings, err := hub.FindFirstRecordByFilter("user_settings", "user={:user}", map[string]any{"user": user.Id})
|
||||||
|
require.NoError(t, err)
|
||||||
|
userSettings.Set("settings", `{"emails":["test@example.com"],"webhooks":[]}`)
|
||||||
|
require.NoError(t, hub.Save(userSettings))
|
||||||
|
|
||||||
|
systemCollection, err := hub.FindCollectionByNameOrId("systems")
|
||||||
|
require.NoError(t, err)
|
||||||
|
system := core.NewRecord(systemCollection)
|
||||||
|
system.Set("name", "test-system")
|
||||||
|
system.Set("status", "up")
|
||||||
|
system.Set("host", "127.0.0.1")
|
||||||
|
system.Set("users", []string{user.Id})
|
||||||
|
require.NoError(t, hub.Save(system))
|
||||||
|
|
||||||
|
alertCollection, err := hub.FindCollectionByNameOrId("alerts")
|
||||||
|
require.NoError(t, err)
|
||||||
|
alert := core.NewRecord(alertCollection)
|
||||||
|
alert.Set("user", user.Id)
|
||||||
|
alert.Set("system", system.Id)
|
||||||
|
alert.Set("name", "Status")
|
||||||
|
alert.Set("triggered", false)
|
||||||
|
alert.Set("min", 1)
|
||||||
|
require.NoError(t, hub.Save(alert))
|
||||||
|
|
||||||
|
initialEmailCount := hub.TestMailer.TotalSend()
|
||||||
|
am := alerts.NewTestAlertManagerWithoutWorker(hub)
|
||||||
|
|
||||||
|
require.NoError(t, am.HandleStatusAlerts("down", system))
|
||||||
|
assert.Equal(t, 1, am.GetPendingAlertsCount(), "down transition should register a pending alert immediately")
|
||||||
|
require.True(t, am.ResetPendingAlertTimer(alert.Id, 25*time.Millisecond), "test should shorten the pending alert timer")
|
||||||
|
|
||||||
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
require.NoError(t, am.HandleStatusAlerts("up", system))
|
||||||
|
assert.Zero(t, am.GetPendingAlertsCount(), "recovery should remove the pending alert before the timer callback runs")
|
||||||
|
|
||||||
|
time.Sleep(40 * time.Millisecond)
|
||||||
|
assert.Equal(t, initialEmailCount, hub.TestMailer.TotalSend(), "timer callback should not deliver after recovery cancels the pending alert")
|
||||||
|
|
||||||
|
alertRecord, err := hub.FindRecordById("alerts", alert.Id)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.False(t, alertRecord.GetBool("triggered"), "alert should remain untriggered when cancellation wins the timer race")
|
||||||
|
|
||||||
|
time.Sleep(time.Minute)
|
||||||
|
synctest.Wait()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStatusAlertDownFiresAfterDelayExpires(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
userSettings, err := hub.FindFirstRecordByFilter("user_settings", "user={:user}", map[string]any{"user": user.Id})
|
||||||
|
require.NoError(t, err)
|
||||||
|
userSettings.Set("settings", `{"emails":["test@example.com"],"webhooks":[]}`)
|
||||||
|
require.NoError(t, hub.Save(userSettings))
|
||||||
|
|
||||||
|
systemCollection, err := hub.FindCollectionByNameOrId("systems")
|
||||||
|
require.NoError(t, err)
|
||||||
|
system := core.NewRecord(systemCollection)
|
||||||
|
system.Set("name", "test-system")
|
||||||
|
system.Set("status", "up")
|
||||||
|
system.Set("host", "127.0.0.1")
|
||||||
|
system.Set("users", []string{user.Id})
|
||||||
|
require.NoError(t, hub.Save(system))
|
||||||
|
|
||||||
|
alertCollection, err := hub.FindCollectionByNameOrId("alerts")
|
||||||
|
require.NoError(t, err)
|
||||||
|
alert := core.NewRecord(alertCollection)
|
||||||
|
alert.Set("user", user.Id)
|
||||||
|
alert.Set("system", system.Id)
|
||||||
|
alert.Set("name", "Status")
|
||||||
|
alert.Set("triggered", false)
|
||||||
|
alert.Set("min", 1)
|
||||||
|
require.NoError(t, hub.Save(alert))
|
||||||
|
|
||||||
|
initialEmailCount := hub.TestMailer.TotalSend()
|
||||||
|
am := alerts.NewTestAlertManagerWithoutWorker(hub)
|
||||||
|
|
||||||
|
require.NoError(t, am.HandleStatusAlerts("down", system))
|
||||||
|
assert.Equal(t, 1, am.GetPendingAlertsCount(), "alert should be pending after system goes down")
|
||||||
|
|
||||||
|
// Expire the pending alert and process it
|
||||||
|
am.ForceExpirePendingAlerts()
|
||||||
|
processed, err := am.ProcessPendingAlerts()
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Len(t, processed, 1, "one alert should have been processed")
|
||||||
|
assert.Equal(t, 0, am.GetPendingAlertsCount(), "pending alert should be consumed after processing")
|
||||||
|
|
||||||
|
// Verify down email was sent
|
||||||
|
assert.Equal(t, initialEmailCount+1, hub.TestMailer.TotalSend(), "down notification should be sent after delay expires")
|
||||||
|
|
||||||
|
// Verify triggered flag is set in the DB
|
||||||
|
alertRecord, err := hub.FindRecordById("alerts", alert.Id)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.True(t, alertRecord.GetBool("triggered"), "alert should be marked triggered after downtime matures")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStatusAlertMultipleUsersRespectDifferentMinutes(t *testing.T) {
|
||||||
|
synctest.Test(t, func(t *testing.T) {
|
||||||
|
hub, user1 := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
setStatusAlertEmail(t, hub, user1.Id, "user1@example.com")
|
||||||
|
|
||||||
|
user2, err := beszelTests.CreateUser(hub, "user2@example.com", "password")
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = beszelTests.CreateRecord(hub, "user_settings", map[string]any{
|
||||||
|
"user": user2.Id,
|
||||||
|
"settings": map[string]any{
|
||||||
|
"emails": []string{"user2@example.com"},
|
||||||
|
"webhooks": []string{},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
system, err := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||||
|
"name": "shared-system",
|
||||||
|
"users": []string{user1.Id, user2.Id},
|
||||||
|
"host": "127.0.0.1",
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
system.Set("status", "up")
|
||||||
|
require.NoError(t, hub.SaveNoValidate(system))
|
||||||
|
|
||||||
|
alertUser1, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": "Status",
|
||||||
|
"system": system.Id,
|
||||||
|
"user": user1.Id,
|
||||||
|
"min": 1,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
alertUser2, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": "Status",
|
||||||
|
"system": system.Id,
|
||||||
|
"user": user2.Id,
|
||||||
|
"min": 2,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
|
||||||
|
system.Set("status", "down")
|
||||||
|
require.NoError(t, hub.SaveNoValidate(system))
|
||||||
|
|
||||||
|
assert.Equal(t, 2, hub.GetPendingAlertsCount(), "both user alerts should be pending after the system goes down")
|
||||||
|
|
||||||
|
time.Sleep(59 * time.Second)
|
||||||
|
synctest.Wait()
|
||||||
|
assert.Zero(t, hub.TestMailer.TotalSend(), "no messages should be sent before the earliest alert minute elapses")
|
||||||
|
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
synctest.Wait()
|
||||||
|
|
||||||
|
messages := hub.TestMailer.Messages()
|
||||||
|
require.Len(t, messages, 1, "only the first user's alert should send after one minute")
|
||||||
|
require.Len(t, messages[0].To, 1)
|
||||||
|
assert.Equal(t, "user1@example.com", messages[0].To[0].Address)
|
||||||
|
assert.Contains(t, messages[0].Subject, "Connection to shared-system is down")
|
||||||
|
assert.Equal(t, 1, hub.GetPendingAlertsCount(), "the later user alert should still be pending")
|
||||||
|
|
||||||
|
time.Sleep(58 * time.Second)
|
||||||
|
synctest.Wait()
|
||||||
|
assert.Equal(t, 1, hub.TestMailer.TotalSend(), "the second user's alert should still be waiting before two minutes")
|
||||||
|
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
synctest.Wait()
|
||||||
|
|
||||||
|
messages = hub.TestMailer.Messages()
|
||||||
|
require.Len(t, messages, 2, "both users should eventually receive their own status alert")
|
||||||
|
require.Len(t, messages[1].To, 1)
|
||||||
|
assert.Equal(t, "user2@example.com", messages[1].To[0].Address)
|
||||||
|
assert.Contains(t, messages[1].Subject, "Connection to shared-system is down")
|
||||||
|
assert.Zero(t, hub.GetPendingAlertsCount(), "all pending alerts should be consumed after both timers fire")
|
||||||
|
|
||||||
|
alertUser1, err = hub.FindRecordById("alerts", alertUser1.Id)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.True(t, alertUser1.GetBool("triggered"), "user1 alert should be marked triggered after delivery")
|
||||||
|
|
||||||
|
alertUser2, err = hub.FindRecordById("alerts", alertUser2.Id)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.True(t, alertUser2.GetBool("triggered"), "user2 alert should be marked triggered after delivery")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStatusAlertMultipleUsersRecoveryBetweenMinutesOnlyAlertsEarlierUser(t *testing.T) {
|
||||||
|
synctest.Test(t, func(t *testing.T) {
|
||||||
|
hub, user1 := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
setStatusAlertEmail(t, hub, user1.Id, "user1@example.com")
|
||||||
|
|
||||||
|
user2, err := beszelTests.CreateUser(hub, "user2@example.com", "password")
|
||||||
|
require.NoError(t, err)
|
||||||
|
_, err = beszelTests.CreateRecord(hub, "user_settings", map[string]any{
|
||||||
|
"user": user2.Id,
|
||||||
|
"settings": map[string]any{
|
||||||
|
"emails": []string{"user2@example.com"},
|
||||||
|
"webhooks": []string{},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
system, err := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||||
|
"name": "shared-system",
|
||||||
|
"users": []string{user1.Id, user2.Id},
|
||||||
|
"host": "127.0.0.1",
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
system.Set("status", "up")
|
||||||
|
require.NoError(t, hub.SaveNoValidate(system))
|
||||||
|
|
||||||
|
alertUser1, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": "Status",
|
||||||
|
"system": system.Id,
|
||||||
|
"user": user1.Id,
|
||||||
|
"min": 1,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
alertUser2, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": "Status",
|
||||||
|
"system": system.Id,
|
||||||
|
"user": user2.Id,
|
||||||
|
"min": 2,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
|
||||||
|
system.Set("status", "down")
|
||||||
|
require.NoError(t, hub.SaveNoValidate(system))
|
||||||
|
|
||||||
|
time.Sleep(61 * time.Second)
|
||||||
|
synctest.Wait()
|
||||||
|
|
||||||
|
messages := hub.TestMailer.Messages()
|
||||||
|
require.Len(t, messages, 1, "the first user's down alert should send before recovery")
|
||||||
|
require.Len(t, messages[0].To, 1)
|
||||||
|
assert.Equal(t, "user1@example.com", messages[0].To[0].Address)
|
||||||
|
assert.Contains(t, messages[0].Subject, "Connection to shared-system is down")
|
||||||
|
assert.Equal(t, 1, hub.GetPendingAlertsCount(), "the second user's alert should still be pending")
|
||||||
|
|
||||||
|
system.Set("status", "up")
|
||||||
|
require.NoError(t, hub.SaveNoValidate(system))
|
||||||
|
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
synctest.Wait()
|
||||||
|
|
||||||
|
messages = hub.TestMailer.Messages()
|
||||||
|
require.Len(t, messages, 2, "recovery should notify only the user whose down alert had already triggered")
|
||||||
|
for _, message := range messages {
|
||||||
|
require.Len(t, message.To, 1)
|
||||||
|
assert.Equal(t, "user1@example.com", message.To[0].Address)
|
||||||
|
}
|
||||||
|
assert.Contains(t, messages[1].Subject, "Connection to shared-system is up")
|
||||||
|
assert.Zero(t, hub.GetPendingAlertsCount(), "recovery should cancel the later user's pending alert")
|
||||||
|
|
||||||
|
time.Sleep(61 * time.Second)
|
||||||
|
synctest.Wait()
|
||||||
|
|
||||||
|
messages = hub.TestMailer.Messages()
|
||||||
|
require.Len(t, messages, 2, "user2 should never receive a down alert once recovery cancels the pending timer")
|
||||||
|
|
||||||
|
alertUser1, err = hub.FindRecordById("alerts", alertUser1.Id)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.False(t, alertUser1.GetBool("triggered"), "user1 alert should be cleared after recovery")
|
||||||
|
|
||||||
|
alertUser2, err = hub.FindRecordById("alerts", alertUser2.Id)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.False(t, alertUser2.GetBool("triggered"), "user2 alert should remain untriggered because it never fired")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStatusAlertDuplicateDownCallIsIdempotent(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
userSettings, err := hub.FindFirstRecordByFilter("user_settings", "user={:user}", map[string]any{"user": user.Id})
|
||||||
|
require.NoError(t, err)
|
||||||
|
userSettings.Set("settings", `{"emails":["test@example.com"],"webhooks":[]}`)
|
||||||
|
require.NoError(t, hub.Save(userSettings))
|
||||||
|
|
||||||
|
systemCollection, err := hub.FindCollectionByNameOrId("systems")
|
||||||
|
require.NoError(t, err)
|
||||||
|
system := core.NewRecord(systemCollection)
|
||||||
|
system.Set("name", "test-system")
|
||||||
|
system.Set("status", "up")
|
||||||
|
system.Set("host", "127.0.0.1")
|
||||||
|
system.Set("users", []string{user.Id})
|
||||||
|
require.NoError(t, hub.Save(system))
|
||||||
|
|
||||||
|
alertCollection, err := hub.FindCollectionByNameOrId("alerts")
|
||||||
|
require.NoError(t, err)
|
||||||
|
alert := core.NewRecord(alertCollection)
|
||||||
|
alert.Set("user", user.Id)
|
||||||
|
alert.Set("system", system.Id)
|
||||||
|
alert.Set("name", "Status")
|
||||||
|
alert.Set("triggered", false)
|
||||||
|
alert.Set("min", 5)
|
||||||
|
require.NoError(t, hub.Save(alert))
|
||||||
|
|
||||||
|
am := alerts.NewTestAlertManagerWithoutWorker(hub)
|
||||||
|
|
||||||
|
require.NoError(t, am.HandleStatusAlerts("down", system))
|
||||||
|
require.NoError(t, am.HandleStatusAlerts("down", system))
|
||||||
|
require.NoError(t, am.HandleStatusAlerts("down", system))
|
||||||
|
|
||||||
|
assert.Equal(t, 1, am.GetPendingAlertsCount(), "repeated down calls should not schedule duplicate pending alerts")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStatusAlertNoAlertRecord(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
systemCollection, err := hub.FindCollectionByNameOrId("systems")
|
||||||
|
require.NoError(t, err)
|
||||||
|
system := core.NewRecord(systemCollection)
|
||||||
|
system.Set("name", "test-system")
|
||||||
|
system.Set("status", "up")
|
||||||
|
system.Set("host", "127.0.0.1")
|
||||||
|
system.Set("users", []string{user.Id})
|
||||||
|
require.NoError(t, hub.Save(system))
|
||||||
|
|
||||||
|
// No Status alert record created for this system
|
||||||
|
initialEmailCount := hub.TestMailer.TotalSend()
|
||||||
|
am := alerts.NewTestAlertManagerWithoutWorker(hub)
|
||||||
|
|
||||||
|
require.NoError(t, am.HandleStatusAlerts("down", system))
|
||||||
|
assert.Equal(t, 0, am.GetPendingAlertsCount(), "no pending alert when no alert record exists")
|
||||||
|
|
||||||
|
require.NoError(t, am.HandleStatusAlerts("up", system))
|
||||||
|
assert.Equal(t, initialEmailCount, hub.TestMailer.TotalSend(), "no email when no alert record exists")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRestorePendingStatusAlertsRequeuesDownSystemsAfterRestart(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
userSettings, err := hub.FindFirstRecordByFilter("user_settings", "user={:user}", map[string]any{"user": user.Id})
|
||||||
|
require.NoError(t, err)
|
||||||
|
userSettings.Set("settings", `{"emails":["test@example.com"],"webhooks":[]}`)
|
||||||
|
require.NoError(t, hub.Save(userSettings))
|
||||||
|
|
||||||
|
systems, err := beszelTests.CreateSystems(hub, 1, user.Id, "down")
|
||||||
|
require.NoError(t, err)
|
||||||
|
system := systems[0]
|
||||||
|
|
||||||
|
alertCollection, err := hub.FindCollectionByNameOrId("alerts")
|
||||||
|
require.NoError(t, err)
|
||||||
|
alert := core.NewRecord(alertCollection)
|
||||||
|
alert.Set("user", user.Id)
|
||||||
|
alert.Set("system", system.Id)
|
||||||
|
alert.Set("name", "Status")
|
||||||
|
alert.Set("triggered", false)
|
||||||
|
alert.Set("min", 1)
|
||||||
|
require.NoError(t, hub.Save(alert))
|
||||||
|
|
||||||
|
initialEmailCount := hub.TestMailer.TotalSend()
|
||||||
|
am := alerts.NewTestAlertManagerWithoutWorker(hub)
|
||||||
|
|
||||||
|
require.NoError(t, am.RestorePendingStatusAlerts())
|
||||||
|
assert.Equal(t, 1, am.GetPendingAlertsCount(), "startup restore should requeue a pending down alert for a system still marked down")
|
||||||
|
|
||||||
|
am.ForceExpirePendingAlerts()
|
||||||
|
processed, err := am.ProcessPendingAlerts()
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Len(t, processed, 1, "restored pending alert should be processable after the delay expires")
|
||||||
|
assert.Equal(t, initialEmailCount+1, hub.TestMailer.TotalSend(), "restored pending alert should send the down notification")
|
||||||
|
|
||||||
|
alertRecord, err := hub.FindRecordById("alerts", alert.Id)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.True(t, alertRecord.GetBool("triggered"), "restored pending alert should mark the alert as triggered once delivered")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRestorePendingStatusAlertsSkipsNonDownOrAlreadyTriggeredAlerts(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
systemsDown, err := beszelTests.CreateSystems(hub, 2, user.Id, "down")
|
||||||
|
require.NoError(t, err)
|
||||||
|
systemDownPending := systemsDown[0]
|
||||||
|
systemDownTriggered := systemsDown[1]
|
||||||
|
|
||||||
|
systemUp, err := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||||
|
"name": "up-system",
|
||||||
|
"users": []string{user.Id},
|
||||||
|
"host": "127.0.0.2",
|
||||||
|
"status": "up",
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
_, err = beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": "Status",
|
||||||
|
"system": systemDownPending.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"min": 1,
|
||||||
|
"triggered": false,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
_, err = beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": "Status",
|
||||||
|
"system": systemUp.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"min": 1,
|
||||||
|
"triggered": false,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
_, err = beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": "Status",
|
||||||
|
"system": systemDownTriggered.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"min": 1,
|
||||||
|
"triggered": true,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
am := alerts.NewTestAlertManagerWithoutWorker(hub)
|
||||||
|
require.NoError(t, am.RestorePendingStatusAlerts())
|
||||||
|
assert.Equal(t, 1, am.GetPendingAlertsCount(), "only untriggered alerts for currently down systems should be restored")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRestorePendingStatusAlertsIsIdempotent(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
systems, err := beszelTests.CreateSystems(hub, 1, user.Id, "down")
|
||||||
|
require.NoError(t, err)
|
||||||
|
system := systems[0]
|
||||||
|
|
||||||
|
_, err = beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": "Status",
|
||||||
|
"system": system.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"min": 1,
|
||||||
|
"triggered": false,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
am := alerts.NewTestAlertManagerWithoutWorker(hub)
|
||||||
|
require.NoError(t, am.RestorePendingStatusAlerts())
|
||||||
|
require.NoError(t, am.RestorePendingStatusAlerts())
|
||||||
|
|
||||||
|
assert.Equal(t, 1, am.GetPendingAlertsCount(), "restoring twice should not create duplicate pending alerts")
|
||||||
|
am.ForceExpirePendingAlerts()
|
||||||
|
processed, err := am.ProcessPendingAlerts()
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Len(t, processed, 1, "restored alert should still be processable exactly once")
|
||||||
|
assert.Zero(t, am.GetPendingAlertsCount(), "processing the restored alert should empty the pending map")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResolveStatusAlertsFixesStaleTriggered(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
// CreateSystems uses SaveNoValidate after initial save to bypass the
|
||||||
|
// onRecordCreate hook that forces status = "pending".
|
||||||
|
systems, err := beszelTests.CreateSystems(hub, 1, user.Id, "up")
|
||||||
|
require.NoError(t, err)
|
||||||
|
system := systems[0]
|
||||||
|
|
||||||
|
alertCollection, err := hub.FindCollectionByNameOrId("alerts")
|
||||||
|
require.NoError(t, err)
|
||||||
|
alert := core.NewRecord(alertCollection)
|
||||||
|
alert.Set("user", user.Id)
|
||||||
|
alert.Set("system", system.Id)
|
||||||
|
alert.Set("name", "Status")
|
||||||
|
alert.Set("triggered", true) // Stale: system is up but alert still says triggered
|
||||||
|
require.NoError(t, hub.Save(alert))
|
||||||
|
|
||||||
|
// resolveStatusAlerts should clear the stale triggered flag
|
||||||
|
require.NoError(t, alerts.ResolveStatusAlerts(hub))
|
||||||
|
|
||||||
|
alertRecord, err := hub.FindRecordById("alerts", alert.Id)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.False(t, alertRecord.GetBool("triggered"), "stale triggered flag should be cleared when system is up")
|
||||||
|
}
|
||||||
|
func TestResolveStatusAlerts(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
// Create a systemUp
|
||||||
|
systemUp, err := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||||
|
"name": "test-system",
|
||||||
|
"users": []string{user.Id},
|
||||||
|
"host": "127.0.0.1",
|
||||||
|
"status": "up",
|
||||||
|
})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
systemDown, err := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||||
|
"name": "test-system-2",
|
||||||
|
"users": []string{user.Id},
|
||||||
|
"host": "127.0.0.2",
|
||||||
|
"status": "up",
|
||||||
|
})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Create a status alertUp for the system
|
||||||
|
alertUp, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": "Status",
|
||||||
|
"system": systemUp.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"min": 1,
|
||||||
|
})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
alertDown, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": "Status",
|
||||||
|
"system": systemDown.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"min": 1,
|
||||||
|
})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Verify alert is not triggered initially
|
||||||
|
assert.False(t, alertUp.GetBool("triggered"), "Alert should not be triggered initially")
|
||||||
|
|
||||||
|
// Set the system to 'up' (this should not trigger the alert)
|
||||||
|
systemUp.Set("status", "up")
|
||||||
|
err = hub.SaveNoValidate(systemUp)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
systemDown.Set("status", "down")
|
||||||
|
err = hub.SaveNoValidate(systemDown)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Wait a moment for any processing
|
||||||
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
|
||||||
|
// Verify alertUp is still not triggered after setting system to up
|
||||||
|
alertUp, err = hub.FindFirstRecordByFilter("alerts", "id={:id}", dbx.Params{"id": alertUp.Id})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.False(t, alertUp.GetBool("triggered"), "Alert should not be triggered when system is up")
|
||||||
|
|
||||||
|
// Manually set both alerts triggered to true
|
||||||
|
alertUp.Set("triggered", true)
|
||||||
|
err = hub.SaveNoValidate(alertUp)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
alertDown.Set("triggered", true)
|
||||||
|
err = hub.SaveNoValidate(alertDown)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Verify we have exactly one alert with triggered true
|
||||||
|
triggeredCount, err := hub.CountRecords("alerts", dbx.HashExp{"triggered": true})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.EqualValues(t, 2, triggeredCount, "Should have exactly two alerts with triggered true")
|
||||||
|
|
||||||
|
// Verify the specific alertUp is triggered
|
||||||
|
alertUp, err = hub.FindFirstRecordByFilter("alerts", "id={:id}", dbx.Params{"id": alertUp.Id})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, alertUp.GetBool("triggered"), "Alert should be triggered")
|
||||||
|
|
||||||
|
// Verify we have two unresolved alert history records
|
||||||
|
alertHistoryCount, err := hub.CountRecords("alerts_history", dbx.HashExp{"resolved": ""})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.EqualValues(t, 2, alertHistoryCount, "Should have exactly two unresolved alert history records")
|
||||||
|
|
||||||
|
err = alerts.ResolveStatusAlerts(hub)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Verify alertUp is not triggered after resolving
|
||||||
|
alertUp, err = hub.FindFirstRecordByFilter("alerts", "id={:id}", dbx.Params{"id": alertUp.Id})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.False(t, alertUp.GetBool("triggered"), "Alert should not be triggered after resolving")
|
||||||
|
// Verify alertDown is still triggered
|
||||||
|
alertDown, err = hub.FindFirstRecordByFilter("alerts", "id={:id}", dbx.Params{"id": alertDown.Id})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, alertDown.GetBool("triggered"), "Alert should still be triggered after resolving")
|
||||||
|
|
||||||
|
// Verify we have one unresolved alert history record
|
||||||
|
alertHistoryCount, err = hub.CountRecords("alerts_history", dbx.HashExp{"resolved": ""})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.EqualValues(t, 1, alertHistoryCount, "Should have exactly one unresolved alert history record")
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAlertsHistoryStatus(t *testing.T) {
|
||||||
|
synctest.Test(t, func(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
// Create a system
|
||||||
|
systems, err := beszelTests.CreateSystems(hub, 1, user.Id, "up")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
system := systems[0]
|
||||||
|
|
||||||
|
// Create a status alertRecord for the system
|
||||||
|
alertRecord, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": "Status",
|
||||||
|
"system": system.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"min": 1,
|
||||||
|
})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Verify alert is not triggered initially
|
||||||
|
assert.False(t, alertRecord.GetBool("triggered"), "Alert should not be triggered initially")
|
||||||
|
|
||||||
|
// Set the system to 'down' (this should trigger the alert)
|
||||||
|
system.Set("status", "down")
|
||||||
|
err = hub.Save(system)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
time.Sleep(time.Second * 30)
|
||||||
|
synctest.Wait()
|
||||||
|
|
||||||
|
alertFresh, _ := hub.FindRecordById("alerts", alertRecord.Id)
|
||||||
|
assert.False(t, alertFresh.GetBool("triggered"), "Alert should not be triggered after 30 seconds")
|
||||||
|
|
||||||
|
time.Sleep(time.Minute)
|
||||||
|
synctest.Wait()
|
||||||
|
|
||||||
|
// Verify alert is triggered after setting system to down
|
||||||
|
alertFresh, err = hub.FindRecordById("alerts", alertRecord.Id)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, alertFresh.GetBool("triggered"), "Alert should be triggered after one minute")
|
||||||
|
|
||||||
|
// Verify we have one unresolved alert history record
|
||||||
|
alertHistoryCount, err := hub.CountRecords("alerts_history", dbx.HashExp{"resolved": ""})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.EqualValues(t, 1, alertHistoryCount, "Should have exactly one unresolved alert history record")
|
||||||
|
|
||||||
|
// Set the system back to 'up' (this should resolve the alert)
|
||||||
|
system.Set("status", "up")
|
||||||
|
err = hub.Save(system)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
synctest.Wait()
|
||||||
|
|
||||||
|
// Verify alert is not triggered after setting system back to up
|
||||||
|
alertFresh, err = hub.FindRecordById("alerts", alertRecord.Id)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.False(t, alertFresh.GetBool("triggered"), "Alert should not be triggered after system recovers")
|
||||||
|
|
||||||
|
// Verify the alert history record is resolved
|
||||||
|
alertHistoryCount, err = hub.CountRecords("alerts_history", dbx.HashExp{"resolved": ""})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.EqualValues(t, 0, alertHistoryCount, "Should have no unresolved alert history records")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStatusAlertClearedBeforeSend(t *testing.T) {
|
||||||
|
synctest.Test(t, func(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
// Create a system
|
||||||
|
systems, err := beszelTests.CreateSystems(hub, 1, user.Id, "up")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
system := systems[0]
|
||||||
|
|
||||||
|
// Ensure user settings have an email
|
||||||
|
userSettings, _ := hub.FindFirstRecordByFilter("user_settings", "user={:user}", map[string]any{"user": user.Id})
|
||||||
|
userSettings.Set("settings", `{"emails":["test@example.com"],"webhooks":[]}`)
|
||||||
|
hub.Save(userSettings)
|
||||||
|
|
||||||
|
// Initial email count
|
||||||
|
initialEmailCount := hub.TestMailer.TotalSend()
|
||||||
|
|
||||||
|
// Create a status alertRecord for the system
|
||||||
|
alertRecord, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": "Status",
|
||||||
|
"system": system.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"min": 1,
|
||||||
|
})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Verify alert is not triggered initially
|
||||||
|
assert.False(t, alertRecord.GetBool("triggered"), "Alert should not be triggered initially")
|
||||||
|
|
||||||
|
// Set the system to 'down' (this should trigger the alert)
|
||||||
|
system.Set("status", "down")
|
||||||
|
err = hub.Save(system)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
time.Sleep(time.Second * 30)
|
||||||
|
synctest.Wait()
|
||||||
|
|
||||||
|
// Set system back up to clear the pending alert before it triggers
|
||||||
|
system.Set("status", "up")
|
||||||
|
err = hub.Save(system)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
time.Sleep(time.Minute)
|
||||||
|
synctest.Wait()
|
||||||
|
|
||||||
|
// Verify that we have not sent any emails since the system recovered before the alert triggered
|
||||||
|
assert.Equal(t, initialEmailCount, hub.TestMailer.TotalSend(), "No email should be sent if system recovers before alert triggers")
|
||||||
|
|
||||||
|
// Verify alert is not triggered after setting system back to up
|
||||||
|
alertFresh, err := hub.FindRecordById("alerts", alertRecord.Id)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.False(t, alertFresh.GetBool("triggered"), "Alert should not be triggered after system recovers")
|
||||||
|
|
||||||
|
// Verify that no alert history record was created since the alert never triggered
|
||||||
|
alertHistoryCount, err := hub.CountRecords("alerts_history")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.EqualValues(t, 0, alertHistoryCount, "Should have no unresolved alert history records since alert never triggered")
|
||||||
|
})
|
||||||
|
}
|
||||||
@@ -11,15 +11,11 @@ import (
|
|||||||
"github.com/pocketbase/dbx"
|
"github.com/pocketbase/dbx"
|
||||||
"github.com/pocketbase/pocketbase/core"
|
"github.com/pocketbase/pocketbase/core"
|
||||||
"github.com/pocketbase/pocketbase/tools/types"
|
"github.com/pocketbase/pocketbase/tools/types"
|
||||||
"github.com/spf13/cast"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func (am *AlertManager) HandleSystemAlerts(systemRecord *core.Record, data *system.CombinedData) error {
|
func (am *AlertManager) HandleSystemAlerts(systemRecord *core.Record, data *system.CombinedData) error {
|
||||||
alertRecords, err := am.hub.FindAllRecords("alerts",
|
alerts := am.alertsCache.GetAlertsExcludingNames(systemRecord.Id, "Status")
|
||||||
dbx.NewExp("system={:system} AND name!='Status'", dbx.Params{"system": systemRecord.Id}),
|
if len(alerts) == 0 {
|
||||||
)
|
|
||||||
if err != nil || len(alertRecords) == 0 {
|
|
||||||
// log.Println("no alerts found for system")
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -27,8 +23,8 @@ func (am *AlertManager) HandleSystemAlerts(systemRecord *core.Record, data *syst
|
|||||||
now := systemRecord.GetDateTime("updated").Time().UTC()
|
now := systemRecord.GetDateTime("updated").Time().UTC()
|
||||||
oldestTime := now
|
oldestTime := now
|
||||||
|
|
||||||
for _, alertRecord := range alertRecords {
|
for _, alertData := range alerts {
|
||||||
name := alertRecord.GetString("name")
|
name := alertData.Name
|
||||||
var val float64
|
var val float64
|
||||||
unit := "%"
|
unit := "%"
|
||||||
|
|
||||||
@@ -73,8 +69,8 @@ func (am *AlertManager) HandleSystemAlerts(systemRecord *core.Record, data *syst
|
|||||||
val = float64(data.Stats.Battery[0])
|
val = float64(data.Stats.Battery[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
triggered := alertRecord.GetBool("triggered")
|
triggered := alertData.Triggered
|
||||||
threshold := alertRecord.GetFloat("value")
|
threshold := alertData.Value
|
||||||
|
|
||||||
// Battery alert has inverted logic: trigger when value is BELOW threshold
|
// Battery alert has inverted logic: trigger when value is BELOW threshold
|
||||||
lowAlert := isLowAlert(name)
|
lowAlert := isLowAlert(name)
|
||||||
@@ -92,11 +88,11 @@ func (am *AlertManager) HandleSystemAlerts(systemRecord *core.Record, data *syst
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
min := max(1, cast.ToUint8(alertRecord.Get("min")))
|
min := max(1, alertData.Min)
|
||||||
|
|
||||||
alert := SystemAlertData{
|
alert := SystemAlertData{
|
||||||
systemRecord: systemRecord,
|
systemRecord: systemRecord,
|
||||||
alertRecord: alertRecord,
|
alertData: alertData,
|
||||||
name: name,
|
name: name,
|
||||||
unit: unit,
|
unit: unit,
|
||||||
val: val,
|
val: val,
|
||||||
@@ -129,7 +125,7 @@ func (am *AlertManager) HandleSystemAlerts(systemRecord *core.Record, data *syst
|
|||||||
Created types.DateTime `db:"created"`
|
Created types.DateTime `db:"created"`
|
||||||
}{}
|
}{}
|
||||||
|
|
||||||
err = am.hub.DB().
|
err := am.hub.DB().
|
||||||
Select("stats", "created").
|
Select("stats", "created").
|
||||||
From("system_stats").
|
From("system_stats").
|
||||||
Where(dbx.NewExp(
|
Where(dbx.NewExp(
|
||||||
@@ -192,7 +188,7 @@ func (am *AlertManager) HandleSystemAlerts(systemRecord *core.Record, data *syst
|
|||||||
case "Memory":
|
case "Memory":
|
||||||
alert.val += stats.Mem
|
alert.val += stats.Mem
|
||||||
case "Bandwidth":
|
case "Bandwidth":
|
||||||
alert.val += stats.NetSent + stats.NetRecv
|
alert.val += float64(stats.Bandwidth[0]+stats.Bandwidth[1]) / (1024 * 1024)
|
||||||
case "Disk":
|
case "Disk":
|
||||||
if alert.mapSums == nil {
|
if alert.mapSums == nil {
|
||||||
alert.mapSums = make(map[string]float32, len(stats.ExtraFs)+1)
|
alert.mapSums = make(map[string]float32, len(stats.ExtraFs)+1)
|
||||||
@@ -344,13 +340,12 @@ func (am *AlertManager) sendSystemAlert(alert SystemAlertData) {
|
|||||||
}
|
}
|
||||||
body := fmt.Sprintf("%s averaged %.2f%s for the previous %v %s.", alert.descriptor, alert.val, alert.unit, alert.min, minutesLabel)
|
body := fmt.Sprintf("%s averaged %.2f%s for the previous %v %s.", alert.descriptor, alert.val, alert.unit, alert.min, minutesLabel)
|
||||||
|
|
||||||
alert.alertRecord.Set("triggered", alert.triggered)
|
if err := am.setAlertTriggered(alert.alertData, alert.triggered); err != nil {
|
||||||
if err := am.hub.Save(alert.alertRecord); err != nil {
|
|
||||||
// app.Logger().Error("failed to save alert record", "err", err)
|
// app.Logger().Error("failed to save alert record", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
am.SendAlert(AlertMessageData{
|
am.SendAlert(AlertMessageData{
|
||||||
UserID: alert.alertRecord.GetString("user"),
|
UserID: alert.alertData.UserID,
|
||||||
SystemID: alert.systemRecord.Id,
|
SystemID: alert.systemRecord.Id,
|
||||||
Title: subject,
|
Title: subject,
|
||||||
Message: body,
|
Message: body,
|
||||||
|
|||||||
218
internal/alerts/alerts_system_test.go
Normal file
218
internal/alerts/alerts_system_test.go
Normal file
@@ -0,0 +1,218 @@
|
|||||||
|
//go:build testing
|
||||||
|
|
||||||
|
package alerts_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"testing/synctest"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
beszelTests "github.com/henrygd/beszel/internal/tests"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
type systemAlertValueSetter[T any] func(info *system.Info, stats *system.Stats, value T)
|
||||||
|
|
||||||
|
type systemAlertTestFixture struct {
|
||||||
|
hub *beszelTests.TestHub
|
||||||
|
alertID string
|
||||||
|
submit func(*system.CombinedData) error
|
||||||
|
}
|
||||||
|
|
||||||
|
func createCombinedData[T any](value T, setValue systemAlertValueSetter[T]) *system.CombinedData {
|
||||||
|
var data system.CombinedData
|
||||||
|
setValue(&data.Info, &data.Stats, value)
|
||||||
|
return &data
|
||||||
|
}
|
||||||
|
|
||||||
|
func newSystemAlertTestFixture(t *testing.T, alertName string, min int, threshold float64) *systemAlertTestFixture {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
|
||||||
|
systems, err := beszelTests.CreateSystems(hub, 1, user.Id, "up")
|
||||||
|
require.NoError(t, err)
|
||||||
|
systemRecord := systems[0]
|
||||||
|
|
||||||
|
sysManagerSystem, err := hub.GetSystemManager().GetSystemFromStore(systemRecord.Id)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, sysManagerSystem)
|
||||||
|
sysManagerSystem.StopUpdater()
|
||||||
|
|
||||||
|
userSettings, err := hub.FindFirstRecordByFilter("user_settings", "user={:user}", map[string]any{"user": user.Id})
|
||||||
|
require.NoError(t, err)
|
||||||
|
userSettings.Set("settings", `{"emails":["test@example.com"],"webhooks":[]}`)
|
||||||
|
require.NoError(t, hub.Save(userSettings))
|
||||||
|
|
||||||
|
alertRecord, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": alertName,
|
||||||
|
"system": systemRecord.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"min": min,
|
||||||
|
"value": threshold,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
assert.False(t, alertRecord.GetBool("triggered"), "Alert should not be triggered initially")
|
||||||
|
|
||||||
|
alertsCache := hub.GetAlertManager().GetSystemAlertsCache()
|
||||||
|
cachedAlerts := alertsCache.GetAlertsExcludingNames(systemRecord.Id, "Status")
|
||||||
|
assert.Len(t, cachedAlerts, 1, "Alert should be in cache")
|
||||||
|
|
||||||
|
return &systemAlertTestFixture{
|
||||||
|
hub: hub,
|
||||||
|
alertID: alertRecord.Id,
|
||||||
|
submit: func(data *system.CombinedData) error {
|
||||||
|
_, err := sysManagerSystem.CreateRecords(data)
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fixture *systemAlertTestFixture) cleanup() {
|
||||||
|
fixture.hub.Cleanup()
|
||||||
|
}
|
||||||
|
|
||||||
|
func submitValue[T any](fixture *systemAlertTestFixture, t *testing.T, value T, setValue systemAlertValueSetter[T]) {
|
||||||
|
t.Helper()
|
||||||
|
require.NoError(t, fixture.submit(createCombinedData(value, setValue)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fixture *systemAlertTestFixture) assertTriggered(t *testing.T, triggered bool, message string) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
alertRecord, err := fixture.hub.FindRecordById("alerts", fixture.alertID)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, triggered, alertRecord.GetBool("triggered"), message)
|
||||||
|
}
|
||||||
|
|
||||||
|
func waitForSystemAlert(d time.Duration) {
|
||||||
|
time.Sleep(d)
|
||||||
|
synctest.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func testOneMinuteSystemAlert[T any](t *testing.T, alertName string, threshold float64, setValue systemAlertValueSetter[T], triggerValue, resolveValue T) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
synctest.Test(t, func(t *testing.T) {
|
||||||
|
fixture := newSystemAlertTestFixture(t, alertName, 1, threshold)
|
||||||
|
defer fixture.cleanup()
|
||||||
|
|
||||||
|
submitValue(fixture, t, triggerValue, setValue)
|
||||||
|
waitForSystemAlert(time.Second)
|
||||||
|
|
||||||
|
fixture.assertTriggered(t, true, "Alert should be triggered")
|
||||||
|
assert.Equal(t, 1, fixture.hub.TestMailer.TotalSend(), "An email should have been sent")
|
||||||
|
|
||||||
|
submitValue(fixture, t, resolveValue, setValue)
|
||||||
|
waitForSystemAlert(time.Second)
|
||||||
|
|
||||||
|
fixture.assertTriggered(t, false, "Alert should be untriggered")
|
||||||
|
assert.Equal(t, 2, fixture.hub.TestMailer.TotalSend(), "A second email should have been sent for untriggering the alert")
|
||||||
|
|
||||||
|
waitForSystemAlert(time.Minute)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func testMultiMinuteSystemAlert[T any](t *testing.T, alertName string, threshold float64, min int, setValue systemAlertValueSetter[T], baselineValue, triggerValue, resolveValue T) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
synctest.Test(t, func(t *testing.T) {
|
||||||
|
fixture := newSystemAlertTestFixture(t, alertName, min, threshold)
|
||||||
|
defer fixture.cleanup()
|
||||||
|
|
||||||
|
submitValue(fixture, t, baselineValue, setValue)
|
||||||
|
waitForSystemAlert(time.Minute + time.Second)
|
||||||
|
fixture.assertTriggered(t, false, "Alert should not be triggered yet")
|
||||||
|
|
||||||
|
submitValue(fixture, t, triggerValue, setValue)
|
||||||
|
waitForSystemAlert(time.Minute)
|
||||||
|
fixture.assertTriggered(t, false, "Alert should not be triggered until the history window is full")
|
||||||
|
|
||||||
|
submitValue(fixture, t, triggerValue, setValue)
|
||||||
|
waitForSystemAlert(time.Second)
|
||||||
|
fixture.assertTriggered(t, true, "Alert should be triggered")
|
||||||
|
assert.Equal(t, 1, fixture.hub.TestMailer.TotalSend(), "An email should have been sent")
|
||||||
|
|
||||||
|
submitValue(fixture, t, resolveValue, setValue)
|
||||||
|
waitForSystemAlert(time.Second)
|
||||||
|
fixture.assertTriggered(t, false, "Alert should be untriggered")
|
||||||
|
assert.Equal(t, 2, fixture.hub.TestMailer.TotalSend(), "A second email should have been sent for untriggering the alert")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func setCPUAlertValue(info *system.Info, stats *system.Stats, value float64) {
|
||||||
|
info.Cpu = value
|
||||||
|
stats.Cpu = value
|
||||||
|
}
|
||||||
|
|
||||||
|
func setMemoryAlertValue(info *system.Info, stats *system.Stats, value float64) {
|
||||||
|
info.MemPct = value
|
||||||
|
stats.MemPct = value
|
||||||
|
}
|
||||||
|
|
||||||
|
func setDiskAlertValue(info *system.Info, stats *system.Stats, value float64) {
|
||||||
|
info.DiskPct = value
|
||||||
|
stats.DiskPct = value
|
||||||
|
}
|
||||||
|
|
||||||
|
func setBandwidthAlertValue(info *system.Info, stats *system.Stats, value [2]uint64) {
|
||||||
|
info.BandwidthBytes = value[0] + value[1]
|
||||||
|
stats.Bandwidth = value
|
||||||
|
}
|
||||||
|
|
||||||
|
func megabytesToBytes(mb uint64) uint64 {
|
||||||
|
return mb * 1024 * 1024
|
||||||
|
}
|
||||||
|
|
||||||
|
func setGPUAlertValue(info *system.Info, stats *system.Stats, value float64) {
|
||||||
|
info.GpuPct = value
|
||||||
|
stats.GPUData = map[string]system.GPUData{
|
||||||
|
"GPU0": {Usage: value},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func setTemperatureAlertValue(info *system.Info, stats *system.Stats, value float64) {
|
||||||
|
info.DashboardTemp = value
|
||||||
|
stats.Temperatures = map[string]float64{
|
||||||
|
"Temp0": value,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func setLoadAvgAlertValue(info *system.Info, stats *system.Stats, value [3]float64) {
|
||||||
|
info.LoadAvg = value
|
||||||
|
stats.LoadAvg = value
|
||||||
|
}
|
||||||
|
|
||||||
|
func setBatteryAlertValue(info *system.Info, stats *system.Stats, value [2]uint8) {
|
||||||
|
info.Battery = value
|
||||||
|
stats.Battery = value
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSystemAlertsOneMin(t *testing.T) {
|
||||||
|
testOneMinuteSystemAlert(t, "CPU", 50, setCPUAlertValue, 51, 49)
|
||||||
|
testOneMinuteSystemAlert(t, "Memory", 50, setMemoryAlertValue, 51, 49)
|
||||||
|
testOneMinuteSystemAlert(t, "Disk", 50, setDiskAlertValue, 51, 49)
|
||||||
|
testOneMinuteSystemAlert(t, "Bandwidth", 50, setBandwidthAlertValue, [2]uint64{megabytesToBytes(26), megabytesToBytes(25)}, [2]uint64{megabytesToBytes(25), megabytesToBytes(24)})
|
||||||
|
testOneMinuteSystemAlert(t, "GPU", 50, setGPUAlertValue, 51, 49)
|
||||||
|
testOneMinuteSystemAlert(t, "Temperature", 70, setTemperatureAlertValue, 71, 69)
|
||||||
|
testOneMinuteSystemAlert(t, "LoadAvg1", 4, setLoadAvgAlertValue, [3]float64{4.1, 0, 0}, [3]float64{3.9, 0, 0})
|
||||||
|
testOneMinuteSystemAlert(t, "LoadAvg5", 4, setLoadAvgAlertValue, [3]float64{0, 4.1, 0}, [3]float64{0, 3.9, 0})
|
||||||
|
testOneMinuteSystemAlert(t, "LoadAvg15", 4, setLoadAvgAlertValue, [3]float64{0, 0, 4.1}, [3]float64{0, 0, 3.9})
|
||||||
|
testOneMinuteSystemAlert(t, "Battery", 20, setBatteryAlertValue, [2]uint8{19, 0}, [2]uint8{21, 0})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSystemAlertsTwoMin(t *testing.T) {
|
||||||
|
testMultiMinuteSystemAlert(t, "CPU", 50, 2, setCPUAlertValue, 10, 51, 48)
|
||||||
|
testMultiMinuteSystemAlert(t, "Memory", 50, 2, setMemoryAlertValue, 10, 51, 48)
|
||||||
|
testMultiMinuteSystemAlert(t, "Disk", 50, 2, setDiskAlertValue, 10, 51, 48)
|
||||||
|
testMultiMinuteSystemAlert(t, "Bandwidth", 50, 2, setBandwidthAlertValue, [2]uint64{megabytesToBytes(10), megabytesToBytes(10)}, [2]uint64{megabytesToBytes(26), megabytesToBytes(25)}, [2]uint64{megabytesToBytes(10), megabytesToBytes(10)})
|
||||||
|
testMultiMinuteSystemAlert(t, "GPU", 50, 2, setGPUAlertValue, 10, 51, 48)
|
||||||
|
testMultiMinuteSystemAlert(t, "Temperature", 70, 2, setTemperatureAlertValue, 10, 71, 67)
|
||||||
|
testMultiMinuteSystemAlert(t, "LoadAvg1", 4, 2, setLoadAvgAlertValue, [3]float64{0, 0, 0}, [3]float64{4.1, 0, 0}, [3]float64{3.5, 0, 0})
|
||||||
|
testMultiMinuteSystemAlert(t, "LoadAvg5", 4, 2, setLoadAvgAlertValue, [3]float64{0, 2, 0}, [3]float64{0, 4.1, 0}, [3]float64{0, 3.5, 0})
|
||||||
|
testMultiMinuteSystemAlert(t, "LoadAvg15", 4, 2, setLoadAvgAlertValue, [3]float64{0, 0, 2}, [3]float64{0, 0, 4.1}, [3]float64{0, 0, 3.5})
|
||||||
|
testMultiMinuteSystemAlert(t, "Battery", 20, 2, setBatteryAlertValue, [2]uint8{21, 0}, [2]uint8{19, 0}, [2]uint8{25, 1})
|
||||||
|
}
|
||||||
@@ -12,9 +12,9 @@ import (
|
|||||||
"testing/synctest"
|
"testing/synctest"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/henrygd/beszel/internal/alerts"
|
|
||||||
beszelTests "github.com/henrygd/beszel/internal/tests"
|
beszelTests "github.com/henrygd/beszel/internal/tests"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/alerts"
|
||||||
"github.com/pocketbase/dbx"
|
"github.com/pocketbase/dbx"
|
||||||
"github.com/pocketbase/pocketbase/core"
|
"github.com/pocketbase/pocketbase/core"
|
||||||
pbTests "github.com/pocketbase/pocketbase/tests"
|
pbTests "github.com/pocketbase/pocketbase/tests"
|
||||||
@@ -369,87 +369,6 @@ func TestUserAlertsApi(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStatusAlerts(t *testing.T) {
|
|
||||||
synctest.Test(t, func(t *testing.T) {
|
|
||||||
hub, user := beszelTests.GetHubWithUser(t)
|
|
||||||
defer hub.Cleanup()
|
|
||||||
|
|
||||||
systems, err := beszelTests.CreateSystems(hub, 4, user.Id, "paused")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
var alerts []*core.Record
|
|
||||||
for i, system := range systems {
|
|
||||||
alert, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
|
||||||
"name": "Status",
|
|
||||||
"system": system.Id,
|
|
||||||
"user": user.Id,
|
|
||||||
"min": i + 1,
|
|
||||||
})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
alerts = append(alerts, alert)
|
|
||||||
}
|
|
||||||
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
|
||||||
|
|
||||||
for _, alert := range alerts {
|
|
||||||
assert.False(t, alert.GetBool("triggered"), "Alert should not be triggered immediately")
|
|
||||||
}
|
|
||||||
if hub.TestMailer.TotalSend() != 0 {
|
|
||||||
assert.Zero(t, hub.TestMailer.TotalSend(), "Expected 0 messages, got %d", hub.TestMailer.TotalSend())
|
|
||||||
}
|
|
||||||
for _, system := range systems {
|
|
||||||
assert.EqualValues(t, "paused", system.GetString("status"), "System should be paused")
|
|
||||||
}
|
|
||||||
for _, system := range systems {
|
|
||||||
system.Set("status", "up")
|
|
||||||
err = hub.SaveNoValidate(system)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
assert.EqualValues(t, 0, hub.GetPendingAlertsCount(), "should have 0 alerts in the pendingAlerts map")
|
|
||||||
for _, system := range systems {
|
|
||||||
system.Set("status", "down")
|
|
||||||
err = hub.SaveNoValidate(system)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
// after 30 seconds, should have 4 alerts in the pendingAlerts map, no triggered alerts
|
|
||||||
time.Sleep(time.Second * 30)
|
|
||||||
assert.EqualValues(t, 4, hub.GetPendingAlertsCount(), "should have 4 alerts in the pendingAlerts map")
|
|
||||||
triggeredCount, err := hub.CountRecords("alerts", dbx.HashExp{"triggered": true})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.EqualValues(t, 0, triggeredCount, "should have 0 alert triggered")
|
|
||||||
assert.EqualValues(t, 0, hub.TestMailer.TotalSend(), "should have 0 messages sent")
|
|
||||||
// after 1:30 seconds, should have 1 triggered alert and 3 pending alerts
|
|
||||||
time.Sleep(time.Second * 60)
|
|
||||||
assert.EqualValues(t, 3, hub.GetPendingAlertsCount(), "should have 3 alerts in the pendingAlerts map")
|
|
||||||
triggeredCount, err = hub.CountRecords("alerts", dbx.HashExp{"triggered": true})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.EqualValues(t, 1, triggeredCount, "should have 1 alert triggered")
|
|
||||||
assert.EqualValues(t, 1, hub.TestMailer.TotalSend(), "should have 1 messages sent")
|
|
||||||
// after 2:30 seconds, should have 2 triggered alerts and 2 pending alerts
|
|
||||||
time.Sleep(time.Second * 60)
|
|
||||||
assert.EqualValues(t, 2, hub.GetPendingAlertsCount(), "should have 2 alerts in the pendingAlerts map")
|
|
||||||
triggeredCount, err = hub.CountRecords("alerts", dbx.HashExp{"triggered": true})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.EqualValues(t, 2, triggeredCount, "should have 2 alert triggered")
|
|
||||||
assert.EqualValues(t, 2, hub.TestMailer.TotalSend(), "should have 2 messages sent")
|
|
||||||
// now we will bring the remaning systems back up
|
|
||||||
for _, system := range systems {
|
|
||||||
system.Set("status", "up")
|
|
||||||
err = hub.SaveNoValidate(system)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
}
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
// should have 0 alerts in the pendingAlerts map and 0 alerts triggered
|
|
||||||
assert.EqualValues(t, 0, hub.GetPendingAlertsCount(), "should have 0 alerts in the pendingAlerts map")
|
|
||||||
triggeredCount, err = hub.CountRecords("alerts", dbx.HashExp{"triggered": true})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Zero(t, triggeredCount, "should have 0 alert triggered")
|
|
||||||
// 4 messages sent, 2 down alerts and 2 up alerts for first 2 systems
|
|
||||||
assert.EqualValues(t, 4, hub.TestMailer.TotalSend(), "should have 4 messages sent")
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAlertsHistory(t *testing.T) {
|
func TestAlertsHistory(t *testing.T) {
|
||||||
synctest.Test(t, func(t *testing.T) {
|
synctest.Test(t, func(t *testing.T) {
|
||||||
hub, user := beszelTests.GetHubWithUser(t)
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
@@ -578,102 +497,46 @@ func TestAlertsHistory(t *testing.T) {
|
|||||||
assert.EqualValues(t, 2, totalHistoryCount, "Should have 2 total alert history records")
|
assert.EqualValues(t, 2, totalHistoryCount, "Should have 2 total alert history records")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
func TestResolveStatusAlerts(t *testing.T) {
|
|
||||||
hub, user := beszelTests.GetHubWithUser(t)
|
func TestSetAlertTriggered(t *testing.T) {
|
||||||
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
defer hub.Cleanup()
|
defer hub.Cleanup()
|
||||||
|
|
||||||
// Create a systemUp
|
hub.StartHub()
|
||||||
systemUp, err := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
|
||||||
"name": "test-system",
|
user, _ := beszelTests.CreateUser(hub, "test@example.com", "password")
|
||||||
"users": []string{user.Id},
|
system, _ := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||||
"host": "127.0.0.1",
|
"name": "test-system",
|
||||||
"status": "up",
|
"users": []string{user.Id},
|
||||||
|
"host": "127.0.0.1",
|
||||||
})
|
})
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
systemDown, err := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
alertRecord, _ := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
"name": "test-system-2",
|
"name": "CPU",
|
||||||
"users": []string{user.Id},
|
"system": system.Id,
|
||||||
"host": "127.0.0.2",
|
"user": user.Id,
|
||||||
"status": "up",
|
"value": 80,
|
||||||
|
"triggered": false,
|
||||||
})
|
})
|
||||||
|
|
||||||
|
am := alerts.NewAlertManager(hub)
|
||||||
|
|
||||||
|
var alert alerts.CachedAlertData
|
||||||
|
alert.PopulateFromRecord(alertRecord)
|
||||||
|
|
||||||
|
// Test triggering the alert
|
||||||
|
err := am.SetAlertTriggered(alert, true)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// Create a status alertUp for the system
|
updatedRecord, err := hub.FindRecordById("alerts", alert.Id)
|
||||||
alertUp, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
assert.NoError(t, err)
|
||||||
"name": "Status",
|
assert.True(t, updatedRecord.GetBool("triggered"))
|
||||||
"system": systemUp.Id,
|
|
||||||
"user": user.Id,
|
// Test un-triggering the alert
|
||||||
"min": 1,
|
err = am.SetAlertTriggered(alert, false)
|
||||||
})
|
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
alertDown, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
updatedRecord, err = hub.FindRecordById("alerts", alert.Id)
|
||||||
"name": "Status",
|
|
||||||
"system": systemDown.Id,
|
|
||||||
"user": user.Id,
|
|
||||||
"min": 1,
|
|
||||||
})
|
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
assert.False(t, updatedRecord.GetBool("triggered"))
|
||||||
// Verify alert is not triggered initially
|
|
||||||
assert.False(t, alertUp.GetBool("triggered"), "Alert should not be triggered initially")
|
|
||||||
|
|
||||||
// Set the system to 'up' (this should not trigger the alert)
|
|
||||||
systemUp.Set("status", "up")
|
|
||||||
err = hub.SaveNoValidate(systemUp)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
systemDown.Set("status", "down")
|
|
||||||
err = hub.SaveNoValidate(systemDown)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
// Wait a moment for any processing
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
|
||||||
|
|
||||||
// Verify alertUp is still not triggered after setting system to up
|
|
||||||
alertUp, err = hub.FindFirstRecordByFilter("alerts", "id={:id}", dbx.Params{"id": alertUp.Id})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.False(t, alertUp.GetBool("triggered"), "Alert should not be triggered when system is up")
|
|
||||||
|
|
||||||
// Manually set both alerts triggered to true
|
|
||||||
alertUp.Set("triggered", true)
|
|
||||||
err = hub.SaveNoValidate(alertUp)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
alertDown.Set("triggered", true)
|
|
||||||
err = hub.SaveNoValidate(alertDown)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
// Verify we have exactly one alert with triggered true
|
|
||||||
triggeredCount, err := hub.CountRecords("alerts", dbx.HashExp{"triggered": true})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.EqualValues(t, 2, triggeredCount, "Should have exactly two alerts with triggered true")
|
|
||||||
|
|
||||||
// Verify the specific alertUp is triggered
|
|
||||||
alertUp, err = hub.FindFirstRecordByFilter("alerts", "id={:id}", dbx.Params{"id": alertUp.Id})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.True(t, alertUp.GetBool("triggered"), "Alert should be triggered")
|
|
||||||
|
|
||||||
// Verify we have two unresolved alert history records
|
|
||||||
alertHistoryCount, err := hub.CountRecords("alerts_history", dbx.HashExp{"resolved": ""})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.EqualValues(t, 2, alertHistoryCount, "Should have exactly two unresolved alert history records")
|
|
||||||
|
|
||||||
err = alerts.ResolveStatusAlerts(hub)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
// Verify alertUp is not triggered after resolving
|
|
||||||
alertUp, err = hub.FindFirstRecordByFilter("alerts", "id={:id}", dbx.Params{"id": alertUp.Id})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.False(t, alertUp.GetBool("triggered"), "Alert should not be triggered after resolving")
|
|
||||||
// Verify alertDown is still triggered
|
|
||||||
alertDown, err = hub.FindFirstRecordByFilter("alerts", "id={:id}", dbx.Params{"id": alertDown.Id})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.True(t, alertDown.GetBool("triggered"), "Alert should still be triggered after resolving")
|
|
||||||
|
|
||||||
// Verify we have one unresolved alert history record
|
|
||||||
alertHistoryCount, err = hub.CountRecords("alerts_history", dbx.HashExp{"resolved": ""})
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.EqualValues(t, 1, alertHistoryCount, "Should have exactly one unresolved alert history record")
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,6 +9,18 @@ import (
|
|||||||
"github.com/pocketbase/pocketbase/core"
|
"github.com/pocketbase/pocketbase/core"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func NewTestAlertManagerWithoutWorker(app hubLike) *AlertManager {
|
||||||
|
return &AlertManager{
|
||||||
|
hub: app,
|
||||||
|
alertsCache: NewAlertsCache(app),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSystemAlertsCache returns the internal system alerts cache.
|
||||||
|
func (am *AlertManager) GetSystemAlertsCache() *AlertsCache {
|
||||||
|
return am.alertsCache
|
||||||
|
}
|
||||||
|
|
||||||
func (am *AlertManager) GetAlertManager() *AlertManager {
|
func (am *AlertManager) GetAlertManager() *AlertManager {
|
||||||
return am
|
return am
|
||||||
}
|
}
|
||||||
@@ -27,19 +39,18 @@ func (am *AlertManager) GetPendingAlertsCount() int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ProcessPendingAlerts manually processes all expired alerts (for testing)
|
// ProcessPendingAlerts manually processes all expired alerts (for testing)
|
||||||
func (am *AlertManager) ProcessPendingAlerts() ([]*core.Record, error) {
|
func (am *AlertManager) ProcessPendingAlerts() ([]CachedAlertData, error) {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
var lastErr error
|
var lastErr error
|
||||||
var processedAlerts []*core.Record
|
var processedAlerts []CachedAlertData
|
||||||
am.pendingAlerts.Range(func(key, value any) bool {
|
am.pendingAlerts.Range(func(key, value any) bool {
|
||||||
info := value.(*alertInfo)
|
info := value.(*alertInfo)
|
||||||
if now.After(info.expireTime) {
|
if now.After(info.expireTime) {
|
||||||
// Downtime delay has passed, process alert
|
if info.timer != nil {
|
||||||
if err := am.sendStatusAlert("down", info.systemName, info.alertRecord); err != nil {
|
info.timer.Stop()
|
||||||
lastErr = err
|
|
||||||
}
|
}
|
||||||
processedAlerts = append(processedAlerts, info.alertRecord)
|
am.processPendingAlert(key.(string))
|
||||||
am.pendingAlerts.Delete(key)
|
processedAlerts = append(processedAlerts, info.alertData)
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
@@ -56,6 +67,31 @@ func (am *AlertManager) ForceExpirePendingAlerts() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (am *AlertManager) ResetPendingAlertTimer(alertID string, delay time.Duration) bool {
|
||||||
|
value, loaded := am.pendingAlerts.Load(alertID)
|
||||||
|
if !loaded {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
info := value.(*alertInfo)
|
||||||
|
if info.timer != nil {
|
||||||
|
info.timer.Stop()
|
||||||
|
}
|
||||||
|
info.expireTime = time.Now().Add(delay)
|
||||||
|
info.timer = time.AfterFunc(delay, func() {
|
||||||
|
am.processPendingAlert(alertID)
|
||||||
|
})
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
func ResolveStatusAlerts(app core.App) error {
|
func ResolveStatusAlerts(app core.App) error {
|
||||||
return resolveStatusAlerts(app)
|
return resolveStatusAlerts(app)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (am *AlertManager) RestorePendingStatusAlerts() error {
|
||||||
|
return am.restorePendingStatusAlerts()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (am *AlertManager) SetAlertTriggered(alert CachedAlertData, triggered bool) error {
|
||||||
|
return am.setAlertTriggered(alert, triggered)
|
||||||
|
}
|
||||||
|
|||||||
@@ -28,8 +28,8 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
baseApp := getBaseApp()
|
baseApp := getBaseApp()
|
||||||
h := hub.NewHub(baseApp)
|
hub := hub.NewHub(baseApp)
|
||||||
if err := h.StartHub(); err != nil {
|
if err := hub.StartHub(); err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,10 +10,19 @@ type ApiInfo struct {
|
|||||||
Status string
|
Status string
|
||||||
State string
|
State string
|
||||||
Image string
|
Image string
|
||||||
|
Health struct {
|
||||||
|
Status string
|
||||||
|
// FailingStreak int
|
||||||
|
}
|
||||||
|
Ports []struct {
|
||||||
|
// PrivatePort uint16
|
||||||
|
PublicPort uint16
|
||||||
|
IP string
|
||||||
|
// Type string
|
||||||
|
}
|
||||||
// ImageID string
|
// ImageID string
|
||||||
// Command string
|
// Command string
|
||||||
// Created int64
|
// Created int64
|
||||||
// Ports []Port
|
|
||||||
// SizeRw int64 `json:",omitempty"`
|
// SizeRw int64 `json:",omitempty"`
|
||||||
// SizeRootFs int64 `json:",omitempty"`
|
// SizeRootFs int64 `json:",omitempty"`
|
||||||
// Labels map[string]string
|
// Labels map[string]string
|
||||||
@@ -140,6 +149,7 @@ type Stats struct {
|
|||||||
Status string `json:"-" cbor:"6,keyasint"`
|
Status string `json:"-" cbor:"6,keyasint"`
|
||||||
Id string `json:"-" cbor:"7,keyasint"`
|
Id string `json:"-" cbor:"7,keyasint"`
|
||||||
Image string `json:"-" cbor:"8,keyasint"`
|
Image string `json:"-" cbor:"8,keyasint"`
|
||||||
|
Ports string `json:"-" cbor:"10,keyasint"`
|
||||||
// PrevCpu [2]uint64 `json:"-"`
|
// PrevCpu [2]uint64 `json:"-"`
|
||||||
CpuSystem uint64 `json:"-"`
|
CpuSystem uint64 `json:"-"`
|
||||||
CpuContainer uint64 `json:"-"`
|
CpuContainer uint64 `json:"-"`
|
||||||
|
|||||||
@@ -12,8 +12,9 @@ import (
|
|||||||
|
|
||||||
type Stats struct {
|
type Stats struct {
|
||||||
Cpu float64 `json:"cpu" cbor:"0,keyasint"`
|
Cpu float64 `json:"cpu" cbor:"0,keyasint"`
|
||||||
MaxCpu float64 `json:"cpum,omitempty" cbor:"1,keyasint,omitempty"`
|
MaxCpu float64 `json:"cpum,omitempty" cbor:"-"`
|
||||||
Mem float64 `json:"m" cbor:"2,keyasint"`
|
Mem float64 `json:"m" cbor:"2,keyasint"`
|
||||||
|
MaxMem float64 `json:"mm,omitempty" cbor:"-"`
|
||||||
MemUsed float64 `json:"mu" cbor:"3,keyasint"`
|
MemUsed float64 `json:"mu" cbor:"3,keyasint"`
|
||||||
MemPct float64 `json:"mp" cbor:"4,keyasint"`
|
MemPct float64 `json:"mp" cbor:"4,keyasint"`
|
||||||
MemBuffCache float64 `json:"mb" cbor:"5,keyasint"`
|
MemBuffCache float64 `json:"mb" cbor:"5,keyasint"`
|
||||||
@@ -23,26 +24,25 @@ type Stats struct {
|
|||||||
DiskTotal float64 `json:"d" cbor:"9,keyasint"`
|
DiskTotal float64 `json:"d" cbor:"9,keyasint"`
|
||||||
DiskUsed float64 `json:"du" cbor:"10,keyasint"`
|
DiskUsed float64 `json:"du" cbor:"10,keyasint"`
|
||||||
DiskPct float64 `json:"dp" cbor:"11,keyasint"`
|
DiskPct float64 `json:"dp" cbor:"11,keyasint"`
|
||||||
DiskReadPs float64 `json:"dr" cbor:"12,keyasint"`
|
DiskReadPs float64 `json:"dr,omitzero" cbor:"12,keyasint,omitzero"`
|
||||||
DiskWritePs float64 `json:"dw" cbor:"13,keyasint"`
|
DiskWritePs float64 `json:"dw,omitzero" cbor:"13,keyasint,omitzero"`
|
||||||
MaxDiskReadPs float64 `json:"drm,omitempty" cbor:"14,keyasint,omitempty"`
|
MaxDiskReadPs float64 `json:"drm,omitempty" cbor:"-"`
|
||||||
MaxDiskWritePs float64 `json:"dwm,omitempty" cbor:"15,keyasint,omitempty"`
|
MaxDiskWritePs float64 `json:"dwm,omitempty" cbor:"-"`
|
||||||
NetworkSent float64 `json:"ns,omitzero" cbor:"16,keyasint,omitzero"`
|
NetworkSent float64 `json:"ns,omitzero" cbor:"16,keyasint,omitzero"`
|
||||||
NetworkRecv float64 `json:"nr,omitzero" cbor:"17,keyasint,omitzero"`
|
NetworkRecv float64 `json:"nr,omitzero" cbor:"17,keyasint,omitzero"`
|
||||||
MaxNetworkSent float64 `json:"nsm,omitempty" cbor:"18,keyasint,omitempty"`
|
MaxNetworkSent float64 `json:"nsm,omitempty" cbor:"-"`
|
||||||
MaxNetworkRecv float64 `json:"nrm,omitempty" cbor:"19,keyasint,omitempty"`
|
MaxNetworkRecv float64 `json:"nrm,omitempty" cbor:"-"`
|
||||||
Temperatures map[string]float64 `json:"t,omitempty" cbor:"20,keyasint,omitempty"`
|
Temperatures map[string]float64 `json:"t,omitempty" cbor:"20,keyasint,omitempty"`
|
||||||
ExtraFs map[string]*FsStats `json:"efs,omitempty" cbor:"21,keyasint,omitempty"`
|
ExtraFs map[string]*FsStats `json:"efs,omitempty" cbor:"21,keyasint,omitempty"`
|
||||||
GPUData map[string]GPUData `json:"g,omitempty" cbor:"22,keyasint,omitempty"`
|
GPUData map[string]GPUData `json:"g,omitempty" cbor:"22,keyasint,omitempty"`
|
||||||
LoadAvg1 float64 `json:"l1,omitempty" cbor:"23,keyasint,omitempty"`
|
// LoadAvg1 float64 `json:"l1,omitempty" cbor:"23,keyasint,omitempty"`
|
||||||
LoadAvg5 float64 `json:"l5,omitempty" cbor:"24,keyasint,omitempty"`
|
// LoadAvg5 float64 `json:"l5,omitempty" cbor:"24,keyasint,omitempty"`
|
||||||
LoadAvg15 float64 `json:"l15,omitempty" cbor:"25,keyasint,omitempty"`
|
// LoadAvg15 float64 `json:"l15,omitempty" cbor:"25,keyasint,omitempty"`
|
||||||
Bandwidth [2]uint64 `json:"b,omitzero" cbor:"26,keyasint,omitzero"` // [sent bytes, recv bytes]
|
Bandwidth [2]uint64 `json:"b,omitzero" cbor:"26,keyasint,omitzero"` // [sent bytes, recv bytes]
|
||||||
MaxBandwidth [2]uint64 `json:"bm,omitzero" cbor:"27,keyasint,omitzero"` // [sent bytes, recv bytes]
|
MaxBandwidth [2]uint64 `json:"bm,omitzero" cbor:"-"` // [sent bytes, recv bytes]
|
||||||
// TODO: remove other load fields in future release in favor of load avg array
|
// TODO: remove other load fields in future release in favor of load avg array
|
||||||
LoadAvg [3]float64 `json:"la,omitempty" cbor:"28,keyasint"`
|
LoadAvg [3]float64 `json:"la,omitempty" cbor:"28,keyasint"`
|
||||||
Battery [2]uint8 `json:"bat,omitzero" cbor:"29,keyasint,omitzero"` // [percent, charge state, current]
|
Battery [2]uint8 `json:"bat,omitzero" cbor:"29,keyasint,omitzero"` // [percent, charge state, current]
|
||||||
MaxMem float64 `json:"mm,omitempty" cbor:"30,keyasint,omitempty"`
|
|
||||||
NetworkInterfaces map[string][4]uint64 `json:"ni,omitempty" cbor:"31,keyasint,omitempty"` // [upload bytes, download bytes, total upload, total download]
|
NetworkInterfaces map[string][4]uint64 `json:"ni,omitempty" cbor:"31,keyasint,omitempty"` // [upload bytes, download bytes, total upload, total download]
|
||||||
DiskIO [2]uint64 `json:"dio,omitzero" cbor:"32,keyasint,omitzero"` // [read bytes, write bytes]
|
DiskIO [2]uint64 `json:"dio,omitzero" cbor:"32,keyasint,omitzero"` // [read bytes, write bytes]
|
||||||
MaxDiskIO [2]uint64 `json:"diom,omitzero" cbor:"-"` // [max read bytes, max write bytes]
|
MaxDiskIO [2]uint64 `json:"diom,omitzero" cbor:"-"` // [max read bytes, max write bytes]
|
||||||
@@ -90,8 +90,8 @@ type FsStats struct {
|
|||||||
TotalWrite uint64 `json:"-"`
|
TotalWrite uint64 `json:"-"`
|
||||||
DiskReadPs float64 `json:"r" cbor:"2,keyasint"`
|
DiskReadPs float64 `json:"r" cbor:"2,keyasint"`
|
||||||
DiskWritePs float64 `json:"w" cbor:"3,keyasint"`
|
DiskWritePs float64 `json:"w" cbor:"3,keyasint"`
|
||||||
MaxDiskReadPS float64 `json:"rm,omitempty" cbor:"4,keyasint,omitempty"`
|
MaxDiskReadPS float64 `json:"rm,omitempty" cbor:"-"`
|
||||||
MaxDiskWritePS float64 `json:"wm,omitempty" cbor:"5,keyasint,omitempty"`
|
MaxDiskWritePS float64 `json:"wm,omitempty" cbor:"-"`
|
||||||
// TODO: remove DiskReadPs and DiskWritePs in future release in favor of DiskReadBytes and DiskWriteBytes
|
// TODO: remove DiskReadPs and DiskWritePs in future release in favor of DiskReadBytes and DiskWriteBytes
|
||||||
DiskReadBytes uint64 `json:"rb" cbor:"6,keyasint,omitempty"`
|
DiskReadBytes uint64 `json:"rb" cbor:"6,keyasint,omitempty"`
|
||||||
DiskWriteBytes uint64 `json:"wb" cbor:"7,keyasint,omitempty"`
|
DiskWriteBytes uint64 `json:"wb" cbor:"7,keyasint,omitempty"`
|
||||||
@@ -129,23 +129,23 @@ type Info struct {
|
|||||||
KernelVersion string `json:"k,omitempty" cbor:"1,keyasint,omitempty"` // deprecated - moved to Details struct
|
KernelVersion string `json:"k,omitempty" cbor:"1,keyasint,omitempty"` // deprecated - moved to Details struct
|
||||||
Cores int `json:"c,omitzero" cbor:"2,keyasint,omitzero"` // deprecated - moved to Details struct
|
Cores int `json:"c,omitzero" cbor:"2,keyasint,omitzero"` // deprecated - moved to Details struct
|
||||||
// Threads is needed in Info struct to calculate load average thresholds
|
// Threads is needed in Info struct to calculate load average thresholds
|
||||||
Threads int `json:"t,omitempty" cbor:"3,keyasint,omitempty"`
|
Threads int `json:"t,omitempty" cbor:"3,keyasint,omitempty"`
|
||||||
CpuModel string `json:"m,omitempty" cbor:"4,keyasint,omitempty"` // deprecated - moved to Details struct
|
CpuModel string `json:"m,omitempty" cbor:"4,keyasint,omitempty"` // deprecated - moved to Details struct
|
||||||
Uptime uint64 `json:"u" cbor:"5,keyasint"`
|
Uptime uint64 `json:"u" cbor:"5,keyasint"`
|
||||||
Cpu float64 `json:"cpu" cbor:"6,keyasint"`
|
Cpu float64 `json:"cpu" cbor:"6,keyasint"`
|
||||||
MemPct float64 `json:"mp" cbor:"7,keyasint"`
|
MemPct float64 `json:"mp" cbor:"7,keyasint"`
|
||||||
DiskPct float64 `json:"dp" cbor:"8,keyasint"`
|
DiskPct float64 `json:"dp" cbor:"8,keyasint"`
|
||||||
Bandwidth float64 `json:"b" cbor:"9,keyasint"`
|
Bandwidth float64 `json:"b,omitzero" cbor:"9,keyasint"` // deprecated in favor of BandwidthBytes
|
||||||
AgentVersion string `json:"v" cbor:"10,keyasint"`
|
AgentVersion string `json:"v" cbor:"10,keyasint"`
|
||||||
Podman bool `json:"p,omitempty" cbor:"11,keyasint,omitempty"` // deprecated - moved to Details struct
|
Podman bool `json:"p,omitempty" cbor:"11,keyasint,omitempty"` // deprecated - moved to Details struct
|
||||||
GpuPct float64 `json:"g,omitempty" cbor:"12,keyasint,omitempty"`
|
GpuPct float64 `json:"g,omitempty" cbor:"12,keyasint,omitempty"`
|
||||||
DashboardTemp float64 `json:"dt,omitempty" cbor:"13,keyasint,omitempty"`
|
DashboardTemp float64 `json:"dt,omitempty" cbor:"13,keyasint,omitempty"`
|
||||||
Os Os `json:"os,omitempty" cbor:"14,keyasint,omitempty"` // deprecated - moved to Details struct
|
Os Os `json:"os,omitempty" cbor:"14,keyasint,omitempty"` // deprecated - moved to Details struct
|
||||||
LoadAvg1 float64 `json:"l1,omitempty" cbor:"15,keyasint,omitempty"` // deprecated - use `la` array instead
|
// LoadAvg1 float64 `json:"l1,omitempty" cbor:"15,keyasint,omitempty"` // deprecated - use `la` array instead
|
||||||
LoadAvg5 float64 `json:"l5,omitempty" cbor:"16,keyasint,omitempty"` // deprecated - use `la` array instead
|
// LoadAvg5 float64 `json:"l5,omitempty" cbor:"16,keyasint,omitempty"` // deprecated - use `la` array instead
|
||||||
LoadAvg15 float64 `json:"l15,omitempty" cbor:"17,keyasint,omitempty"` // deprecated - use `la` array instead
|
// LoadAvg15 float64 `json:"l15,omitempty" cbor:"17,keyasint,omitempty"` // deprecated - use `la` array instead
|
||||||
BandwidthBytes uint64 `json:"bb" cbor:"18,keyasint"`
|
|
||||||
|
|
||||||
|
BandwidthBytes uint64 `json:"bb" cbor:"18,keyasint"`
|
||||||
LoadAvg [3]float64 `json:"la,omitempty" cbor:"19,keyasint"`
|
LoadAvg [3]float64 `json:"la,omitempty" cbor:"19,keyasint"`
|
||||||
ConnectionType ConnectionType `json:"ct,omitempty" cbor:"20,keyasint,omitempty,omitzero"`
|
ConnectionType ConnectionType `json:"ct,omitempty" cbor:"20,keyasint,omitempty,omitzero"`
|
||||||
ExtraFsPct map[string]float64 `json:"efs,omitempty" cbor:"21,keyasint,omitempty"`
|
ExtraFsPct map[string]float64 `json:"efs,omitempty" cbor:"21,keyasint,omitempty"`
|
||||||
|
|||||||
@@ -110,21 +110,13 @@ func (p *updater) update() (updated bool, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var latest *release
|
var latest *release
|
||||||
var useMirror bool
|
|
||||||
|
|
||||||
// Determine the API endpoint based on UseMirror flag
|
apiURL := getApiURL(p.config.UseMirror, p.config.Owner, p.config.Repo)
|
||||||
apiURL := fmt.Sprintf("https://api.github.com/repos/%s/%s/releases/latest", p.config.Owner, p.config.Repo)
|
|
||||||
if p.config.UseMirror {
|
if p.config.UseMirror {
|
||||||
useMirror = true
|
|
||||||
apiURL = fmt.Sprintf("https://gh.beszel.dev/repos/%s/%s/releases/latest?api=true", p.config.Owner, p.config.Repo)
|
|
||||||
ColorPrint(ColorYellow, "Using mirror for update.")
|
ColorPrint(ColorYellow, "Using mirror for update.")
|
||||||
}
|
}
|
||||||
|
|
||||||
latest, err = fetchLatestRelease(
|
latest, err = FetchLatestRelease(p.config.Context, p.config.HttpClient, apiURL)
|
||||||
p.config.Context,
|
|
||||||
p.config.HttpClient,
|
|
||||||
apiURL,
|
|
||||||
)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@@ -150,7 +142,7 @@ func (p *updater) update() (updated bool, err error) {
|
|||||||
|
|
||||||
// download the release asset
|
// download the release asset
|
||||||
assetPath := filepath.Join(releaseDir, asset.Name)
|
assetPath := filepath.Join(releaseDir, asset.Name)
|
||||||
if err := downloadFile(p.config.Context, p.config.HttpClient, asset.DownloadUrl, assetPath, useMirror); err != nil {
|
if err := downloadFile(p.config.Context, p.config.HttpClient, asset.DownloadUrl, assetPath, p.config.UseMirror); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -226,11 +218,11 @@ func (p *updater) update() (updated bool, err error) {
|
|||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func fetchLatestRelease(
|
func FetchLatestRelease(ctx context.Context, client HttpClient, url string) (*release, error) {
|
||||||
ctx context.Context,
|
if url == "" {
|
||||||
client HttpClient,
|
url = getApiURL(false, "henrygd", "beszel")
|
||||||
url string,
|
}
|
||||||
) (*release, error) {
|
|
||||||
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
|
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -375,3 +367,10 @@ func isGlibc() bool {
|
|||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getApiURL(useMirror bool, owner, repo string) string {
|
||||||
|
if useMirror {
|
||||||
|
return fmt.Sprintf("https://gh.beszel.dev/repos/%s/%s/releases/latest?api=true", owner, repo)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("https://api.github.com/repos/%s/%s/releases/latest", owner, repo)
|
||||||
|
}
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ func createTestHub(t testing.TB) (*Hub, *pbtests.TestApp, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
return NewHub(testApp), testApp, nil
|
return NewHub(testApp), testApp, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// cleanupTestHub stops background system goroutines before tearing down the app.
|
// cleanupTestHub stops background system goroutines before tearing down the app.
|
||||||
@@ -897,12 +897,8 @@ func TestAgentWebSocketIntegration(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Set up environment variables for the agent
|
// Set up environment variables for the agent
|
||||||
os.Setenv("BESZEL_AGENT_HUB_URL", ts.URL)
|
t.Setenv("BESZEL_AGENT_HUB_URL", ts.URL)
|
||||||
os.Setenv("BESZEL_AGENT_TOKEN", tc.agentToken)
|
t.Setenv("BESZEL_AGENT_TOKEN", tc.agentToken)
|
||||||
defer func() {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Start agent in background
|
// Start agent in background
|
||||||
done := make(chan error, 1)
|
done := make(chan error, 1)
|
||||||
@@ -1080,12 +1076,8 @@ func TestMultipleSystemsWithSameUniversalToken(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Set up environment variables for the agent
|
// Set up environment variables for the agent
|
||||||
os.Setenv("BESZEL_AGENT_HUB_URL", ts.URL)
|
t.Setenv("BESZEL_AGENT_HUB_URL", ts.URL)
|
||||||
os.Setenv("BESZEL_AGENT_TOKEN", universalToken)
|
t.Setenv("BESZEL_AGENT_TOKEN", universalToken)
|
||||||
defer func() {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Count systems before connection
|
// Count systems before connection
|
||||||
systemsBefore, err := testApp.FindRecordsByFilter("systems", "users ~ {:userId}", "", -1, 0, map[string]any{"userId": userRecord.Id})
|
systemsBefore, err := testApp.FindRecordsByFilter("systems", "users ~ {:userId}", "", -1, 0, map[string]any{"userId": userRecord.Id})
|
||||||
@@ -1243,12 +1235,8 @@ func TestPermanentUniversalTokenFromDB(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Set up environment variables for the agent
|
// Set up environment variables for the agent
|
||||||
os.Setenv("BESZEL_AGENT_HUB_URL", ts.URL)
|
t.Setenv("BESZEL_AGENT_HUB_URL", ts.URL)
|
||||||
os.Setenv("BESZEL_AGENT_TOKEN", universalToken)
|
t.Setenv("BESZEL_AGENT_TOKEN", universalToken)
|
||||||
defer func() {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Start agent in background
|
// Start agent in background
|
||||||
done := make(chan error, 1)
|
done := make(chan error, 1)
|
||||||
|
|||||||
361
internal/hub/api.go
Normal file
361
internal/hub/api.go
Normal file
@@ -0,0 +1,361 @@
|
|||||||
|
package hub
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/blang/semver"
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/henrygd/beszel"
|
||||||
|
"github.com/henrygd/beszel/internal/alerts"
|
||||||
|
"github.com/henrygd/beszel/internal/ghupdate"
|
||||||
|
"github.com/henrygd/beszel/internal/hub/config"
|
||||||
|
"github.com/henrygd/beszel/internal/hub/systems"
|
||||||
|
"github.com/pocketbase/dbx"
|
||||||
|
"github.com/pocketbase/pocketbase/apis"
|
||||||
|
"github.com/pocketbase/pocketbase/core"
|
||||||
|
)
|
||||||
|
|
||||||
|
// UpdateInfo holds information about the latest update check
|
||||||
|
type UpdateInfo struct {
|
||||||
|
lastCheck time.Time
|
||||||
|
Version string `json:"v"`
|
||||||
|
Url string `json:"url"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// registerMiddlewares registers custom middlewares
|
||||||
|
func (h *Hub) registerMiddlewares(se *core.ServeEvent) {
|
||||||
|
// authorizes request with user matching the provided email
|
||||||
|
authorizeRequestWithEmail := func(e *core.RequestEvent, email string) (err error) {
|
||||||
|
if e.Auth != nil || email == "" {
|
||||||
|
return e.Next()
|
||||||
|
}
|
||||||
|
isAuthRefresh := e.Request.URL.Path == "/api/collections/users/auth-refresh" && e.Request.Method == http.MethodPost
|
||||||
|
e.Auth, err = e.App.FindFirstRecordByData("users", "email", email)
|
||||||
|
if err != nil || !isAuthRefresh {
|
||||||
|
return e.Next()
|
||||||
|
}
|
||||||
|
// auth refresh endpoint, make sure token is set in header
|
||||||
|
token, _ := e.Auth.NewAuthToken()
|
||||||
|
e.Request.Header.Set("Authorization", token)
|
||||||
|
return e.Next()
|
||||||
|
}
|
||||||
|
// authenticate with trusted header
|
||||||
|
if autoLogin, _ := GetEnv("AUTO_LOGIN"); autoLogin != "" {
|
||||||
|
se.Router.BindFunc(func(e *core.RequestEvent) error {
|
||||||
|
return authorizeRequestWithEmail(e, autoLogin)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
// authenticate with trusted header
|
||||||
|
if trustedHeader, _ := GetEnv("TRUSTED_AUTH_HEADER"); trustedHeader != "" {
|
||||||
|
se.Router.BindFunc(func(e *core.RequestEvent) error {
|
||||||
|
return authorizeRequestWithEmail(e, e.Request.Header.Get(trustedHeader))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// registerApiRoutes registers custom API routes
|
||||||
|
func (h *Hub) registerApiRoutes(se *core.ServeEvent) error {
|
||||||
|
// auth protected routes
|
||||||
|
apiAuth := se.Router.Group("/api/beszel")
|
||||||
|
apiAuth.Bind(apis.RequireAuth())
|
||||||
|
// auth optional routes
|
||||||
|
apiNoAuth := se.Router.Group("/api/beszel")
|
||||||
|
|
||||||
|
// create first user endpoint only needed if no users exist
|
||||||
|
if totalUsers, _ := se.App.CountRecords("users"); totalUsers == 0 {
|
||||||
|
apiNoAuth.POST("/create-user", h.um.CreateFirstUser)
|
||||||
|
}
|
||||||
|
// check if first time setup on login page
|
||||||
|
apiNoAuth.GET("/first-run", func(e *core.RequestEvent) error {
|
||||||
|
total, err := e.App.CountRecords("users")
|
||||||
|
return e.JSON(http.StatusOK, map[string]bool{"firstRun": err == nil && total == 0})
|
||||||
|
})
|
||||||
|
// get public key and version
|
||||||
|
apiAuth.GET("/info", h.getInfo)
|
||||||
|
apiAuth.GET("/getkey", h.getInfo) // deprecated - keep for compatibility w/ integrations
|
||||||
|
// check for updates
|
||||||
|
if optIn, _ := GetEnv("CHECK_UPDATES"); optIn == "true" {
|
||||||
|
var updateInfo UpdateInfo
|
||||||
|
apiAuth.GET("/update", updateInfo.getUpdate)
|
||||||
|
}
|
||||||
|
// send test notification
|
||||||
|
apiAuth.POST("/test-notification", h.SendTestNotification)
|
||||||
|
// heartbeat status and test
|
||||||
|
apiAuth.GET("/heartbeat-status", h.getHeartbeatStatus)
|
||||||
|
apiAuth.POST("/test-heartbeat", h.testHeartbeat)
|
||||||
|
// get config.yml content
|
||||||
|
apiAuth.GET("/config-yaml", config.GetYamlConfig)
|
||||||
|
// handle agent websocket connection
|
||||||
|
apiNoAuth.GET("/agent-connect", h.handleAgentConnect)
|
||||||
|
// get or create universal tokens
|
||||||
|
apiAuth.GET("/universal-token", h.getUniversalToken)
|
||||||
|
// update / delete user alerts
|
||||||
|
apiAuth.POST("/user-alerts", alerts.UpsertUserAlerts)
|
||||||
|
apiAuth.DELETE("/user-alerts", alerts.DeleteUserAlerts)
|
||||||
|
// refresh SMART devices for a system
|
||||||
|
apiAuth.POST("/smart/refresh", h.refreshSmartData)
|
||||||
|
// get systemd service details
|
||||||
|
apiAuth.GET("/systemd/info", h.getSystemdInfo)
|
||||||
|
// /containers routes
|
||||||
|
if enabled, _ := GetEnv("CONTAINER_DETAILS"); enabled != "false" {
|
||||||
|
// get container logs
|
||||||
|
apiAuth.GET("/containers/logs", h.getContainerLogs)
|
||||||
|
// get container info
|
||||||
|
apiAuth.GET("/containers/info", h.getContainerInfo)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getInfo returns data needed by authenticated users, such as the public key and current version
|
||||||
|
func (h *Hub) getInfo(e *core.RequestEvent) error {
|
||||||
|
type infoResponse struct {
|
||||||
|
Key string `json:"key"`
|
||||||
|
Version string `json:"v"`
|
||||||
|
CheckUpdate bool `json:"cu"`
|
||||||
|
}
|
||||||
|
info := infoResponse{
|
||||||
|
Key: h.pubKey,
|
||||||
|
Version: beszel.Version,
|
||||||
|
}
|
||||||
|
if optIn, _ := GetEnv("CHECK_UPDATES"); optIn == "true" {
|
||||||
|
info.CheckUpdate = true
|
||||||
|
}
|
||||||
|
return e.JSON(http.StatusOK, info)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getUpdate checks for the latest release on GitHub and returns update info if a newer version is available
|
||||||
|
func (info *UpdateInfo) getUpdate(e *core.RequestEvent) error {
|
||||||
|
if time.Since(info.lastCheck) < 6*time.Hour {
|
||||||
|
return e.JSON(http.StatusOK, info)
|
||||||
|
}
|
||||||
|
info.lastCheck = time.Now()
|
||||||
|
latestRelease, err := ghupdate.FetchLatestRelease(context.Background(), http.DefaultClient, "")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
currentVersion, err := semver.Parse(strings.TrimPrefix(beszel.Version, "v"))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
latestVersion, err := semver.Parse(strings.TrimPrefix(latestRelease.Tag, "v"))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if latestVersion.GT(currentVersion) {
|
||||||
|
info.Version = strings.TrimPrefix(latestRelease.Tag, "v")
|
||||||
|
info.Url = latestRelease.Url
|
||||||
|
}
|
||||||
|
return e.JSON(http.StatusOK, info)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetUniversalToken handles the universal token API endpoint (create, read, delete)
|
||||||
|
func (h *Hub) getUniversalToken(e *core.RequestEvent) error {
|
||||||
|
tokenMap := universalTokenMap.GetMap()
|
||||||
|
userID := e.Auth.Id
|
||||||
|
query := e.Request.URL.Query()
|
||||||
|
token := query.Get("token")
|
||||||
|
enable := query.Get("enable")
|
||||||
|
permanent := query.Get("permanent")
|
||||||
|
|
||||||
|
// helper for deleting any existing permanent token record for this user
|
||||||
|
deletePermanent := func() error {
|
||||||
|
rec, err := h.FindFirstRecordByFilter("universal_tokens", "user = {:user}", dbx.Params{"user": userID})
|
||||||
|
if err != nil {
|
||||||
|
return nil // no record
|
||||||
|
}
|
||||||
|
return h.Delete(rec)
|
||||||
|
}
|
||||||
|
|
||||||
|
// helper for upserting a permanent token record for this user
|
||||||
|
upsertPermanent := func(token string) error {
|
||||||
|
rec, err := h.FindFirstRecordByFilter("universal_tokens", "user = {:user}", dbx.Params{"user": userID})
|
||||||
|
if err == nil {
|
||||||
|
rec.Set("token", token)
|
||||||
|
return h.Save(rec)
|
||||||
|
}
|
||||||
|
|
||||||
|
col, err := h.FindCachedCollectionByNameOrId("universal_tokens")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
newRec := core.NewRecord(col)
|
||||||
|
newRec.Set("user", userID)
|
||||||
|
newRec.Set("token", token)
|
||||||
|
return h.Save(newRec)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disable universal tokens (both ephemeral and permanent)
|
||||||
|
if enable == "0" {
|
||||||
|
tokenMap.RemovebyValue(userID)
|
||||||
|
_ = deletePermanent()
|
||||||
|
return e.JSON(http.StatusOK, map[string]any{"token": token, "active": false, "permanent": false})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enable universal token (ephemeral or permanent)
|
||||||
|
if enable == "1" {
|
||||||
|
if token == "" {
|
||||||
|
token = uuid.New().String()
|
||||||
|
}
|
||||||
|
|
||||||
|
if permanent == "1" {
|
||||||
|
// make token permanent (persist across restarts)
|
||||||
|
tokenMap.RemovebyValue(userID)
|
||||||
|
if err := upsertPermanent(token); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return e.JSON(http.StatusOK, map[string]any{"token": token, "active": true, "permanent": true})
|
||||||
|
}
|
||||||
|
|
||||||
|
// default: ephemeral mode (1 hour)
|
||||||
|
_ = deletePermanent()
|
||||||
|
tokenMap.Set(token, userID, time.Hour)
|
||||||
|
return e.JSON(http.StatusOK, map[string]any{"token": token, "active": true, "permanent": false})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read current state
|
||||||
|
// Prefer permanent token if it exists.
|
||||||
|
if rec, err := h.FindFirstRecordByFilter("universal_tokens", "user = {:user}", dbx.Params{"user": userID}); err == nil {
|
||||||
|
dbToken := rec.GetString("token")
|
||||||
|
// If no token was provided, or the caller is asking about their permanent token, return it.
|
||||||
|
if token == "" || token == dbToken {
|
||||||
|
return e.JSON(http.StatusOK, map[string]any{"token": dbToken, "active": true, "permanent": true})
|
||||||
|
}
|
||||||
|
// Token doesn't match their permanent token (avoid leaking other info)
|
||||||
|
return e.JSON(http.StatusOK, map[string]any{"token": token, "active": false, "permanent": false})
|
||||||
|
}
|
||||||
|
|
||||||
|
// No permanent token; fall back to ephemeral token map.
|
||||||
|
if token == "" {
|
||||||
|
// return existing token if it exists
|
||||||
|
if token, _, ok := tokenMap.GetByValue(userID); ok {
|
||||||
|
return e.JSON(http.StatusOK, map[string]any{"token": token, "active": true, "permanent": false})
|
||||||
|
}
|
||||||
|
// if no token is provided, generate a new one
|
||||||
|
token = uuid.New().String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Token is considered active only if it belongs to the current user.
|
||||||
|
activeUser, ok := tokenMap.GetOk(token)
|
||||||
|
active := ok && activeUser == userID
|
||||||
|
response := map[string]any{"token": token, "active": active, "permanent": false}
|
||||||
|
return e.JSON(http.StatusOK, response)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getHeartbeatStatus returns current heartbeat configuration and whether it's enabled
|
||||||
|
func (h *Hub) getHeartbeatStatus(e *core.RequestEvent) error {
|
||||||
|
if e.Auth.GetString("role") != "admin" {
|
||||||
|
return e.ForbiddenError("Requires admin role", nil)
|
||||||
|
}
|
||||||
|
if h.hb == nil {
|
||||||
|
return e.JSON(http.StatusOK, map[string]any{
|
||||||
|
"enabled": false,
|
||||||
|
"msg": "Set HEARTBEAT_URL to enable outbound heartbeat monitoring",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
cfg := h.hb.GetConfig()
|
||||||
|
return e.JSON(http.StatusOK, map[string]any{
|
||||||
|
"enabled": true,
|
||||||
|
"url": cfg.URL,
|
||||||
|
"interval": cfg.Interval,
|
||||||
|
"method": cfg.Method,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// testHeartbeat triggers a single heartbeat ping and returns the result
|
||||||
|
func (h *Hub) testHeartbeat(e *core.RequestEvent) error {
|
||||||
|
if e.Auth.GetString("role") != "admin" {
|
||||||
|
return e.ForbiddenError("Requires admin role", nil)
|
||||||
|
}
|
||||||
|
if h.hb == nil {
|
||||||
|
return e.JSON(http.StatusOK, map[string]any{
|
||||||
|
"err": "Heartbeat not configured. Set HEARTBEAT_URL environment variable.",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err := h.hb.Send(); err != nil {
|
||||||
|
return e.JSON(http.StatusOK, map[string]any{"err": err.Error()})
|
||||||
|
}
|
||||||
|
return e.JSON(http.StatusOK, map[string]any{"err": false})
|
||||||
|
}
|
||||||
|
|
||||||
|
// containerRequestHandler handles both container logs and info requests
|
||||||
|
func (h *Hub) containerRequestHandler(e *core.RequestEvent, fetchFunc func(*systems.System, string) (string, error), responseKey string) error {
|
||||||
|
systemID := e.Request.URL.Query().Get("system")
|
||||||
|
containerID := e.Request.URL.Query().Get("container")
|
||||||
|
|
||||||
|
if systemID == "" || containerID == "" {
|
||||||
|
return e.JSON(http.StatusBadRequest, map[string]string{"error": "system and container parameters are required"})
|
||||||
|
}
|
||||||
|
if !containerIDPattern.MatchString(containerID) {
|
||||||
|
return e.JSON(http.StatusBadRequest, map[string]string{"error": "invalid container parameter"})
|
||||||
|
}
|
||||||
|
|
||||||
|
system, err := h.sm.GetSystem(systemID)
|
||||||
|
if err != nil {
|
||||||
|
return e.JSON(http.StatusNotFound, map[string]string{"error": "system not found"})
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := fetchFunc(system, containerID)
|
||||||
|
if err != nil {
|
||||||
|
return e.JSON(http.StatusNotFound, map[string]string{"error": err.Error()})
|
||||||
|
}
|
||||||
|
|
||||||
|
return e.JSON(http.StatusOK, map[string]string{responseKey: data})
|
||||||
|
}
|
||||||
|
|
||||||
|
// getContainerLogs handles GET /api/beszel/containers/logs requests
|
||||||
|
func (h *Hub) getContainerLogs(e *core.RequestEvent) error {
|
||||||
|
return h.containerRequestHandler(e, func(system *systems.System, containerID string) (string, error) {
|
||||||
|
return system.FetchContainerLogsFromAgent(containerID)
|
||||||
|
}, "logs")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Hub) getContainerInfo(e *core.RequestEvent) error {
|
||||||
|
return h.containerRequestHandler(e, func(system *systems.System, containerID string) (string, error) {
|
||||||
|
return system.FetchContainerInfoFromAgent(containerID)
|
||||||
|
}, "info")
|
||||||
|
}
|
||||||
|
|
||||||
|
// getSystemdInfo handles GET /api/beszel/systemd/info requests
|
||||||
|
func (h *Hub) getSystemdInfo(e *core.RequestEvent) error {
|
||||||
|
query := e.Request.URL.Query()
|
||||||
|
systemID := query.Get("system")
|
||||||
|
serviceName := query.Get("service")
|
||||||
|
|
||||||
|
if systemID == "" || serviceName == "" {
|
||||||
|
return e.JSON(http.StatusBadRequest, map[string]string{"error": "system and service parameters are required"})
|
||||||
|
}
|
||||||
|
system, err := h.sm.GetSystem(systemID)
|
||||||
|
if err != nil {
|
||||||
|
return e.JSON(http.StatusNotFound, map[string]string{"error": "system not found"})
|
||||||
|
}
|
||||||
|
details, err := system.FetchSystemdInfoFromAgent(serviceName)
|
||||||
|
if err != nil {
|
||||||
|
return e.JSON(http.StatusNotFound, map[string]string{"error": err.Error()})
|
||||||
|
}
|
||||||
|
e.Response.Header().Set("Cache-Control", "public, max-age=60")
|
||||||
|
return e.JSON(http.StatusOK, map[string]any{"details": details})
|
||||||
|
}
|
||||||
|
|
||||||
|
// refreshSmartData handles POST /api/beszel/smart/refresh requests
|
||||||
|
// Fetches fresh SMART data from the agent and updates the collection
|
||||||
|
func (h *Hub) refreshSmartData(e *core.RequestEvent) error {
|
||||||
|
systemID := e.Request.URL.Query().Get("system")
|
||||||
|
if systemID == "" {
|
||||||
|
return e.JSON(http.StatusBadRequest, map[string]string{"error": "system parameter is required"})
|
||||||
|
}
|
||||||
|
|
||||||
|
system, err := h.sm.GetSystem(systemID)
|
||||||
|
if err != nil {
|
||||||
|
return e.JSON(http.StatusNotFound, map[string]string{"error": "system not found"})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch and save SMART devices
|
||||||
|
if err := system.FetchAndSaveSmartDevices(); err != nil {
|
||||||
|
return e.JSON(http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
||||||
|
}
|
||||||
|
|
||||||
|
return e.JSON(http.StatusOK, map[string]string{"status": "ok"})
|
||||||
|
}
|
||||||
780
internal/hub/api_test.go
Normal file
780
internal/hub/api_test.go
Normal file
@@ -0,0 +1,780 @@
|
|||||||
|
package hub_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
beszelTests "github.com/henrygd/beszel/internal/tests"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/migrations"
|
||||||
|
"github.com/pocketbase/pocketbase/core"
|
||||||
|
pbTests "github.com/pocketbase/pocketbase/tests"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// marshal to json and return an io.Reader (for use in ApiScenario.Body)
|
||||||
|
func jsonReader(v any) io.Reader {
|
||||||
|
data, err := json.Marshal(v)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return bytes.NewReader(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestApiRoutesAuthentication(t *testing.T) {
|
||||||
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
hub.StartHub()
|
||||||
|
|
||||||
|
// Create test user and get auth token
|
||||||
|
user, err := beszelTests.CreateUser(hub, "testuser@example.com", "password123")
|
||||||
|
require.NoError(t, err, "Failed to create test user")
|
||||||
|
|
||||||
|
adminUser, err := beszelTests.CreateRecord(hub, "users", map[string]any{
|
||||||
|
"email": "admin@example.com",
|
||||||
|
"password": "password123",
|
||||||
|
"role": "admin",
|
||||||
|
})
|
||||||
|
require.NoError(t, err, "Failed to create admin user")
|
||||||
|
adminUserToken, err := adminUser.NewAuthToken()
|
||||||
|
|
||||||
|
// superUser, err := beszelTests.CreateRecord(hub, core.CollectionNameSuperusers, map[string]any{
|
||||||
|
// "email": "superuser@example.com",
|
||||||
|
// "password": "password123",
|
||||||
|
// })
|
||||||
|
// require.NoError(t, err, "Failed to create superuser")
|
||||||
|
|
||||||
|
userToken, err := user.NewAuthToken()
|
||||||
|
require.NoError(t, err, "Failed to create auth token")
|
||||||
|
|
||||||
|
// Create test system for user-alerts endpoints
|
||||||
|
system, err := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||||
|
"name": "test-system",
|
||||||
|
"users": []string{user.Id},
|
||||||
|
"host": "127.0.0.1",
|
||||||
|
})
|
||||||
|
require.NoError(t, err, "Failed to create test system")
|
||||||
|
|
||||||
|
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
||||||
|
return hub.TestApp
|
||||||
|
}
|
||||||
|
|
||||||
|
scenarios := []beszelTests.ApiScenario{
|
||||||
|
// Auth Protected Routes - Should require authentication
|
||||||
|
{
|
||||||
|
Name: "POST /test-notification - no auth should fail",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/test-notification",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"url": "generic://127.0.0.1",
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /test-notification - with auth should succeed",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/test-notification",
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"url": "generic://127.0.0.1",
|
||||||
|
}),
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"sending message"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /config-yaml - no auth should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/config-yaml",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /config-yaml - with user auth should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/config-yaml",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 403,
|
||||||
|
ExpectedContent: []string{"Requires admin"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /config-yaml - with admin auth should succeed",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/config-yaml",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": adminUserToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"test-system"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /heartbeat-status - no auth should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/heartbeat-status",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /heartbeat-status - with user auth should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/heartbeat-status",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 403,
|
||||||
|
ExpectedContent: []string{"Requires admin role"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /heartbeat-status - with admin auth should succeed",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/heartbeat-status",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": adminUserToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{`"enabled":false`},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /test-heartbeat - with user auth should fail",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/test-heartbeat",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 403,
|
||||||
|
ExpectedContent: []string{"Requires admin role"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /test-heartbeat - with admin auth should report disabled state",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/test-heartbeat",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": adminUserToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"Heartbeat not configured"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /universal-token - no auth should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/universal-token",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /universal-token - with auth should succeed",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/universal-token",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"active", "token", "permanent"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /universal-token - enable permanent should succeed",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/universal-token?enable=1&permanent=1&token=permanent-token-123",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"permanent\":true", "permanent-token-123"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /user-alerts - no auth should fail",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/user-alerts",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"value": 80,
|
||||||
|
"min": 10,
|
||||||
|
"systems": []string{system.Id},
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /user-alerts - with auth should succeed",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/user-alerts",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"success\":true"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"value": 80,
|
||||||
|
"min": 10,
|
||||||
|
"systems": []string{system.Id},
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "DELETE /user-alerts - no auth should fail",
|
||||||
|
Method: http.MethodDelete,
|
||||||
|
URL: "/api/beszel/user-alerts",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"systems": []string{system.Id},
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "DELETE /user-alerts - with auth should succeed",
|
||||||
|
Method: http.MethodDelete,
|
||||||
|
URL: "/api/beszel/user-alerts",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"success\":true"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"systems": []string{system.Id},
|
||||||
|
}),
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
// Create an alert to delete
|
||||||
|
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"system": system.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"value": 80,
|
||||||
|
"min": 10,
|
||||||
|
})
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /containers/logs - no auth should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/containers/logs?system=test-system&container=test-container",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /containers/logs - with auth but missing system param should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/containers/logs?container=test-container",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 400,
|
||||||
|
ExpectedContent: []string{"system and container parameters are required"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /containers/logs - with auth but missing container param should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/containers/logs?system=test-system",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 400,
|
||||||
|
ExpectedContent: []string{"system and container parameters are required"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /containers/logs - with auth but invalid system should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/containers/logs?system=invalid-system&container=0123456789ab",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 404,
|
||||||
|
ExpectedContent: []string{"system not found"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /containers/logs - traversal container should fail validation",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/containers/logs?system=" + system.Id + "&container=..%2F..%2Fversion",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 400,
|
||||||
|
ExpectedContent: []string{"invalid container parameter"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /containers/info - traversal container should fail validation",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/containers/info?system=" + system.Id + "&container=../../version?x=",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 400,
|
||||||
|
ExpectedContent: []string{"invalid container parameter"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /containers/info - non-hex container should fail validation",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/containers/info?system=" + system.Id + "&container=container_name",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 400,
|
||||||
|
ExpectedContent: []string{"invalid container parameter"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
|
||||||
|
// Auth Optional Routes - Should work without authentication
|
||||||
|
{
|
||||||
|
Name: "GET /getkey - no auth should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/getkey",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /getkey - with auth should also succeed",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/getkey",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"key\":", "\"v\":"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /info - should return the same as /getkey",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/info",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"key\":", "\"v\":"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /first-run - no auth should succeed",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/first-run",
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"firstRun\":false"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /first-run - with auth should also succeed",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/first-run",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"firstRun\":false"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /agent-connect - no auth should succeed (websocket upgrade fails but route is accessible)",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/agent-connect",
|
||||||
|
ExpectedStatus: 400,
|
||||||
|
ExpectedContent: []string{},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /test-notification - invalid auth token should fail",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/test-notification",
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"url": "generic://127.0.0.1",
|
||||||
|
}),
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": "invalid-token",
|
||||||
|
},
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /user-alerts - invalid auth token should fail",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/user-alerts",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": "invalid-token",
|
||||||
|
},
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"value": 80,
|
||||||
|
"min": 10,
|
||||||
|
"systems": []string{system.Id},
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /update - shouldn't exist without CHECK_UPDATES env var",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/update",
|
||||||
|
ExpectedStatus: 502,
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, scenario := range scenarios {
|
||||||
|
scenario.Test(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFirstUserCreation(t *testing.T) {
|
||||||
|
t.Run("CreateUserEndpoint available when no users exist", func(t *testing.T) {
|
||||||
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
hub.StartHub()
|
||||||
|
|
||||||
|
testAppFactoryExisting := func(t testing.TB) *pbTests.TestApp {
|
||||||
|
return hub.TestApp
|
||||||
|
}
|
||||||
|
|
||||||
|
scenarios := []beszelTests.ApiScenario{
|
||||||
|
{
|
||||||
|
Name: "POST /create-user - should be available when no users exist",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/create-user",
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"email": "firstuser@example.com",
|
||||||
|
"password": "password123",
|
||||||
|
}),
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"User created"},
|
||||||
|
TestAppFactory: testAppFactoryExisting,
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
userCount, err := hub.CountRecords("users")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Zero(t, userCount, "Should start with no users")
|
||||||
|
superusers, err := hub.FindAllRecords(core.CollectionNameSuperusers)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.EqualValues(t, 1, len(superusers), "Should start with one temporary superuser")
|
||||||
|
require.EqualValues(t, migrations.TempAdminEmail, superusers[0].GetString("email"), "Should have created one temporary superuser")
|
||||||
|
},
|
||||||
|
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||||
|
userCount, err := hub.CountRecords("users")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.EqualValues(t, 1, userCount, "Should have created one user")
|
||||||
|
superusers, err := hub.FindAllRecords(core.CollectionNameSuperusers)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.EqualValues(t, 1, len(superusers), "Should have created one superuser")
|
||||||
|
require.EqualValues(t, "firstuser@example.com", superusers[0].GetString("email"), "Should have created one superuser")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "POST /create-user - should not be available when users exist",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/create-user",
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"email": "firstuser@example.com",
|
||||||
|
"password": "password123",
|
||||||
|
}),
|
||||||
|
ExpectedStatus: 404,
|
||||||
|
ExpectedContent: []string{"wasn't found"},
|
||||||
|
TestAppFactory: testAppFactoryExisting,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, scenario := range scenarios {
|
||||||
|
scenario.Test(t)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("CreateUserEndpoint not available when USER_EMAIL, USER_PASSWORD are set", func(t *testing.T) {
|
||||||
|
t.Setenv("BESZEL_HUB_USER_EMAIL", "me@example.com")
|
||||||
|
t.Setenv("BESZEL_HUB_USER_PASSWORD", "password123")
|
||||||
|
|
||||||
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
hub.StartHub()
|
||||||
|
|
||||||
|
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
||||||
|
return hub.TestApp
|
||||||
|
}
|
||||||
|
|
||||||
|
scenario := beszelTests.ApiScenario{
|
||||||
|
Name: "POST /create-user - should not be available when USER_EMAIL, USER_PASSWORD are set",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/create-user",
|
||||||
|
ExpectedStatus: 404,
|
||||||
|
ExpectedContent: []string{"wasn't found"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
users, err := hub.FindAllRecords("users")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.EqualValues(t, 1, len(users), "Should start with one user")
|
||||||
|
require.EqualValues(t, "me@example.com", users[0].GetString("email"), "Should have created one user")
|
||||||
|
superusers, err := hub.FindAllRecords(core.CollectionNameSuperusers)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.EqualValues(t, 1, len(superusers), "Should start with one superuser")
|
||||||
|
require.EqualValues(t, "me@example.com", superusers[0].GetString("email"), "Should have created one superuser")
|
||||||
|
},
|
||||||
|
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||||
|
users, err := hub.FindAllRecords("users")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.EqualValues(t, 1, len(users), "Should still have one user")
|
||||||
|
require.EqualValues(t, "me@example.com", users[0].GetString("email"), "Should have created one user")
|
||||||
|
superusers, err := hub.FindAllRecords(core.CollectionNameSuperusers)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.EqualValues(t, 1, len(superusers), "Should still have one superuser")
|
||||||
|
require.EqualValues(t, "me@example.com", superusers[0].GetString("email"), "Should have created one superuser")
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
scenario.Test(t)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCreateUserEndpointAvailability(t *testing.T) {
|
||||||
|
t.Run("CreateUserEndpoint available when no users exist", func(t *testing.T) {
|
||||||
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
// Ensure no users exist
|
||||||
|
userCount, err := hub.CountRecords("users")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Zero(t, userCount, "Should start with no users")
|
||||||
|
|
||||||
|
hub.StartHub()
|
||||||
|
|
||||||
|
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
||||||
|
return hub.TestApp
|
||||||
|
}
|
||||||
|
|
||||||
|
scenario := beszelTests.ApiScenario{
|
||||||
|
Name: "POST /create-user - should be available when no users exist",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/create-user",
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"email": "firstuser@example.com",
|
||||||
|
"password": "password123",
|
||||||
|
}),
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"User created"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
}
|
||||||
|
|
||||||
|
scenario.Test(t)
|
||||||
|
|
||||||
|
// Verify user was created
|
||||||
|
userCount, err = hub.CountRecords("users")
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.EqualValues(t, 1, userCount, "Should have created one user")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("CreateUserEndpoint not available when users exist", func(t *testing.T) {
|
||||||
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
// Create a user first
|
||||||
|
_, err := beszelTests.CreateUser(hub, "existing@example.com", "password")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
hub.StartHub()
|
||||||
|
|
||||||
|
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
||||||
|
return hub.TestApp
|
||||||
|
}
|
||||||
|
|
||||||
|
scenario := beszelTests.ApiScenario{
|
||||||
|
Name: "POST /create-user - should not be available when users exist",
|
||||||
|
Method: http.MethodPost,
|
||||||
|
URL: "/api/beszel/create-user",
|
||||||
|
Body: jsonReader(map[string]any{
|
||||||
|
"email": "another@example.com",
|
||||||
|
"password": "password123",
|
||||||
|
}),
|
||||||
|
ExpectedStatus: 404,
|
||||||
|
ExpectedContent: []string{"wasn't found"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
}
|
||||||
|
|
||||||
|
scenario.Test(t)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAutoLoginMiddleware(t *testing.T) {
|
||||||
|
var hubs []*beszelTests.TestHub
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
for _, hub := range hubs {
|
||||||
|
hub.Cleanup()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
t.Setenv("AUTO_LOGIN", "user@test.com")
|
||||||
|
|
||||||
|
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
||||||
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
hubs = append(hubs, hub)
|
||||||
|
hub.StartHub()
|
||||||
|
return hub.TestApp
|
||||||
|
}
|
||||||
|
|
||||||
|
scenarios := []beszelTests.ApiScenario{
|
||||||
|
{
|
||||||
|
Name: "GET /getkey - without auto login should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/getkey",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /getkey - with auto login should fail if no matching user",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/getkey",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /getkey - with auto login should succeed",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/getkey",
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"key\":", "\"v\":"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
beszelTests.CreateUser(app, "user@test.com", "password123")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, scenario := range scenarios {
|
||||||
|
scenario.Test(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTrustedHeaderMiddleware(t *testing.T) {
|
||||||
|
var hubs []*beszelTests.TestHub
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
for _, hub := range hubs {
|
||||||
|
hub.Cleanup()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
t.Setenv("TRUSTED_AUTH_HEADER", "X-Beszel-Trusted")
|
||||||
|
|
||||||
|
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
||||||
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
hubs = append(hubs, hub)
|
||||||
|
hub.StartHub()
|
||||||
|
return hub.TestApp
|
||||||
|
}
|
||||||
|
|
||||||
|
scenarios := []beszelTests.ApiScenario{
|
||||||
|
{
|
||||||
|
Name: "GET /getkey - without trusted header should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/getkey",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /getkey - with trusted header should fail if no matching user",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/getkey",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"X-Beszel-Trusted": "user@test.com",
|
||||||
|
},
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /getkey - with trusted header should succeed",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/getkey",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"X-Beszel-Trusted": "user@test.com",
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{"\"key\":", "\"v\":"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
beszelTests.CreateUser(app, "user@test.com", "password123")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, scenario := range scenarios {
|
||||||
|
scenario.Test(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUpdateEndpoint(t *testing.T) {
|
||||||
|
t.Setenv("CHECK_UPDATES", "true")
|
||||||
|
|
||||||
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
defer hub.Cleanup()
|
||||||
|
hub.StartHub()
|
||||||
|
|
||||||
|
// Create test user and get auth token
|
||||||
|
// user, err := beszelTests.CreateUser(hub, "testuser@example.com", "password123")
|
||||||
|
// require.NoError(t, err, "Failed to create test user")
|
||||||
|
// userToken, err := user.NewAuthToken()
|
||||||
|
|
||||||
|
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
||||||
|
return hub.TestApp
|
||||||
|
}
|
||||||
|
|
||||||
|
scenarios := []beszelTests.ApiScenario{
|
||||||
|
{
|
||||||
|
Name: "update endpoint shouldn't work without auth",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/update",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
// leave this out for now since it actually makes a request to github
|
||||||
|
// {
|
||||||
|
// Name: "GET /update - with valid auth should succeed",
|
||||||
|
// Method: http.MethodGet,
|
||||||
|
// URL: "/api/beszel/update",
|
||||||
|
// Headers: map[string]string{
|
||||||
|
// "Authorization": userToken,
|
||||||
|
// },
|
||||||
|
// ExpectedStatus: 200,
|
||||||
|
// ExpectedContent: []string{`"v":`},
|
||||||
|
// TestAppFactory: testAppFactory,
|
||||||
|
// },
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, scenario := range scenarios {
|
||||||
|
scenario.Test(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
128
internal/hub/collections.go
Normal file
128
internal/hub/collections.go
Normal file
@@ -0,0 +1,128 @@
|
|||||||
|
package hub
|
||||||
|
|
||||||
|
import "github.com/pocketbase/pocketbase/core"
|
||||||
|
|
||||||
|
type collectionRules struct {
|
||||||
|
list *string
|
||||||
|
view *string
|
||||||
|
create *string
|
||||||
|
update *string
|
||||||
|
delete *string
|
||||||
|
}
|
||||||
|
|
||||||
|
// setCollectionAuthSettings applies Beszel's collection auth settings.
|
||||||
|
func setCollectionAuthSettings(app core.App) error {
|
||||||
|
usersCollection, err := app.FindCollectionByNameOrId("users")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
superusersCollection, err := app.FindCollectionByNameOrId(core.CollectionNameSuperusers)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// disable email auth if DISABLE_PASSWORD_AUTH env var is set
|
||||||
|
disablePasswordAuth, _ := GetEnv("DISABLE_PASSWORD_AUTH")
|
||||||
|
usersCollection.PasswordAuth.Enabled = disablePasswordAuth != "true"
|
||||||
|
usersCollection.PasswordAuth.IdentityFields = []string{"email"}
|
||||||
|
// allow oauth user creation if USER_CREATION is set
|
||||||
|
if userCreation, _ := GetEnv("USER_CREATION"); userCreation == "true" {
|
||||||
|
cr := "@request.context = 'oauth2'"
|
||||||
|
usersCollection.CreateRule = &cr
|
||||||
|
} else {
|
||||||
|
usersCollection.CreateRule = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// enable mfaOtp mfa if MFA_OTP env var is set
|
||||||
|
mfaOtp, _ := GetEnv("MFA_OTP")
|
||||||
|
usersCollection.OTP.Length = 6
|
||||||
|
superusersCollection.OTP.Length = 6
|
||||||
|
usersCollection.OTP.Enabled = mfaOtp == "true"
|
||||||
|
usersCollection.MFA.Enabled = mfaOtp == "true"
|
||||||
|
superusersCollection.OTP.Enabled = mfaOtp == "true" || mfaOtp == "superusers"
|
||||||
|
superusersCollection.MFA.Enabled = mfaOtp == "true" || mfaOtp == "superusers"
|
||||||
|
if err := app.Save(superusersCollection); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := app.Save(usersCollection); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// When SHARE_ALL_SYSTEMS is enabled, any authenticated user can read
|
||||||
|
// system-scoped data. Write rules continue to block readonly users.
|
||||||
|
shareAllSystems, _ := GetEnv("SHARE_ALL_SYSTEMS")
|
||||||
|
|
||||||
|
authenticatedRule := "@request.auth.id != \"\""
|
||||||
|
systemsMemberRule := authenticatedRule + " && users.id ?= @request.auth.id"
|
||||||
|
systemMemberRule := authenticatedRule + " && system.users.id ?= @request.auth.id"
|
||||||
|
|
||||||
|
systemsReadRule := systemsMemberRule
|
||||||
|
systemScopedReadRule := systemMemberRule
|
||||||
|
if shareAllSystems == "true" {
|
||||||
|
systemsReadRule = authenticatedRule
|
||||||
|
systemScopedReadRule = authenticatedRule
|
||||||
|
}
|
||||||
|
systemsWriteRule := systemsReadRule + " && @request.auth.role != \"readonly\""
|
||||||
|
systemScopedWriteRule := systemScopedReadRule + " && @request.auth.role != \"readonly\""
|
||||||
|
|
||||||
|
if err := applyCollectionRules(app, []string{"systems"}, collectionRules{
|
||||||
|
list: &systemsReadRule,
|
||||||
|
view: &systemsReadRule,
|
||||||
|
create: &systemsWriteRule,
|
||||||
|
update: &systemsWriteRule,
|
||||||
|
delete: &systemsWriteRule,
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := applyCollectionRules(app, []string{"containers", "container_stats", "system_stats", "systemd_services"}, collectionRules{
|
||||||
|
list: &systemScopedReadRule,
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := applyCollectionRules(app, []string{"smart_devices"}, collectionRules{
|
||||||
|
list: &systemScopedReadRule,
|
||||||
|
view: &systemScopedReadRule,
|
||||||
|
delete: &systemScopedWriteRule,
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := applyCollectionRules(app, []string{"fingerprints"}, collectionRules{
|
||||||
|
list: &systemScopedReadRule,
|
||||||
|
view: &systemScopedReadRule,
|
||||||
|
create: &systemScopedWriteRule,
|
||||||
|
update: &systemScopedWriteRule,
|
||||||
|
delete: &systemScopedWriteRule,
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := applyCollectionRules(app, []string{"system_details"}, collectionRules{
|
||||||
|
list: &systemScopedReadRule,
|
||||||
|
view: &systemScopedReadRule,
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func applyCollectionRules(app core.App, collectionNames []string, rules collectionRules) error {
|
||||||
|
for _, collectionName := range collectionNames {
|
||||||
|
collection, err := app.FindCollectionByNameOrId(collectionName)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
collection.ListRule = rules.list
|
||||||
|
collection.ViewRule = rules.view
|
||||||
|
collection.CreateRule = rules.create
|
||||||
|
collection.UpdateRule = rules.update
|
||||||
|
collection.DeleteRule = rules.delete
|
||||||
|
if err := app.Save(collection); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
527
internal/hub/collections_test.go
Normal file
527
internal/hub/collections_test.go
Normal file
@@ -0,0 +1,527 @@
|
|||||||
|
//go:build testing
|
||||||
|
|
||||||
|
package hub_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
beszelTests "github.com/henrygd/beszel/internal/tests"
|
||||||
|
"github.com/pocketbase/pocketbase/core"
|
||||||
|
pbTests "github.com/pocketbase/pocketbase/tests"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCollectionRulesDefault(t *testing.T) {
|
||||||
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
const isUserMatchesUser = `@request.auth.id != "" && user = @request.auth.id`
|
||||||
|
|
||||||
|
const isUserInUsers = `@request.auth.id != "" && users.id ?= @request.auth.id`
|
||||||
|
const isUserInUsersNotReadonly = `@request.auth.id != "" && users.id ?= @request.auth.id && @request.auth.role != "readonly"`
|
||||||
|
|
||||||
|
const isUserInSystemUsers = `@request.auth.id != "" && system.users.id ?= @request.auth.id`
|
||||||
|
const isUserInSystemUsersNotReadonly = `@request.auth.id != "" && system.users.id ?= @request.auth.id && @request.auth.role != "readonly"`
|
||||||
|
|
||||||
|
// users collection
|
||||||
|
usersCollection, err := hub.FindCollectionByNameOrId("users")
|
||||||
|
assert.NoError(t, err, "Failed to find users collection")
|
||||||
|
assert.True(t, usersCollection.PasswordAuth.Enabled)
|
||||||
|
assert.Equal(t, usersCollection.PasswordAuth.IdentityFields, []string{"email"})
|
||||||
|
assert.Nil(t, usersCollection.CreateRule)
|
||||||
|
assert.False(t, usersCollection.MFA.Enabled)
|
||||||
|
|
||||||
|
// superusers collection
|
||||||
|
superusersCollection, err := hub.FindCollectionByNameOrId(core.CollectionNameSuperusers)
|
||||||
|
assert.NoError(t, err, "Failed to find superusers collection")
|
||||||
|
assert.True(t, superusersCollection.PasswordAuth.Enabled)
|
||||||
|
assert.Equal(t, superusersCollection.PasswordAuth.IdentityFields, []string{"email"})
|
||||||
|
assert.Nil(t, superusersCollection.CreateRule)
|
||||||
|
assert.False(t, superusersCollection.MFA.Enabled)
|
||||||
|
|
||||||
|
// alerts collection
|
||||||
|
alertsCollection, err := hub.FindCollectionByNameOrId("alerts")
|
||||||
|
require.NoError(t, err, "Failed to find alerts collection")
|
||||||
|
assert.Equal(t, isUserMatchesUser, *alertsCollection.ListRule)
|
||||||
|
assert.Nil(t, alertsCollection.ViewRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *alertsCollection.CreateRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *alertsCollection.UpdateRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *alertsCollection.DeleteRule)
|
||||||
|
|
||||||
|
// alerts_history collection
|
||||||
|
alertsHistoryCollection, err := hub.FindCollectionByNameOrId("alerts_history")
|
||||||
|
require.NoError(t, err, "Failed to find alerts_history collection")
|
||||||
|
assert.Equal(t, isUserMatchesUser, *alertsHistoryCollection.ListRule)
|
||||||
|
assert.Nil(t, alertsHistoryCollection.ViewRule)
|
||||||
|
assert.Nil(t, alertsHistoryCollection.CreateRule)
|
||||||
|
assert.Nil(t, alertsHistoryCollection.UpdateRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *alertsHistoryCollection.DeleteRule)
|
||||||
|
|
||||||
|
// containers collection
|
||||||
|
containersCollection, err := hub.FindCollectionByNameOrId("containers")
|
||||||
|
require.NoError(t, err, "Failed to find containers collection")
|
||||||
|
assert.Equal(t, isUserInSystemUsers, *containersCollection.ListRule)
|
||||||
|
assert.Nil(t, containersCollection.ViewRule)
|
||||||
|
assert.Nil(t, containersCollection.CreateRule)
|
||||||
|
assert.Nil(t, containersCollection.UpdateRule)
|
||||||
|
assert.Nil(t, containersCollection.DeleteRule)
|
||||||
|
|
||||||
|
// container_stats collection
|
||||||
|
containerStatsCollection, err := hub.FindCollectionByNameOrId("container_stats")
|
||||||
|
require.NoError(t, err, "Failed to find container_stats collection")
|
||||||
|
assert.Equal(t, isUserInSystemUsers, *containerStatsCollection.ListRule)
|
||||||
|
assert.Nil(t, containerStatsCollection.ViewRule)
|
||||||
|
assert.Nil(t, containerStatsCollection.CreateRule)
|
||||||
|
assert.Nil(t, containerStatsCollection.UpdateRule)
|
||||||
|
assert.Nil(t, containerStatsCollection.DeleteRule)
|
||||||
|
|
||||||
|
// fingerprints collection
|
||||||
|
fingerprintsCollection, err := hub.FindCollectionByNameOrId("fingerprints")
|
||||||
|
require.NoError(t, err, "Failed to find fingerprints collection")
|
||||||
|
assert.Equal(t, isUserInSystemUsers, *fingerprintsCollection.ListRule)
|
||||||
|
assert.Equal(t, isUserInSystemUsers, *fingerprintsCollection.ViewRule)
|
||||||
|
assert.Equal(t, isUserInSystemUsersNotReadonly, *fingerprintsCollection.CreateRule)
|
||||||
|
assert.Equal(t, isUserInSystemUsersNotReadonly, *fingerprintsCollection.UpdateRule)
|
||||||
|
assert.Equal(t, isUserInSystemUsersNotReadonly, *fingerprintsCollection.DeleteRule)
|
||||||
|
|
||||||
|
// quiet_hours collection
|
||||||
|
quietHoursCollection, err := hub.FindCollectionByNameOrId("quiet_hours")
|
||||||
|
require.NoError(t, err, "Failed to find quiet_hours collection")
|
||||||
|
assert.Equal(t, isUserMatchesUser, *quietHoursCollection.ListRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *quietHoursCollection.ViewRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *quietHoursCollection.CreateRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *quietHoursCollection.UpdateRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *quietHoursCollection.DeleteRule)
|
||||||
|
|
||||||
|
// smart_devices collection
|
||||||
|
smartDevicesCollection, err := hub.FindCollectionByNameOrId("smart_devices")
|
||||||
|
require.NoError(t, err, "Failed to find smart_devices collection")
|
||||||
|
assert.Equal(t, isUserInSystemUsers, *smartDevicesCollection.ListRule)
|
||||||
|
assert.Equal(t, isUserInSystemUsers, *smartDevicesCollection.ViewRule)
|
||||||
|
assert.Nil(t, smartDevicesCollection.CreateRule)
|
||||||
|
assert.Nil(t, smartDevicesCollection.UpdateRule)
|
||||||
|
assert.Equal(t, isUserInSystemUsersNotReadonly, *smartDevicesCollection.DeleteRule)
|
||||||
|
|
||||||
|
// system_details collection
|
||||||
|
systemDetailsCollection, err := hub.FindCollectionByNameOrId("system_details")
|
||||||
|
require.NoError(t, err, "Failed to find system_details collection")
|
||||||
|
assert.Equal(t, isUserInSystemUsers, *systemDetailsCollection.ListRule)
|
||||||
|
assert.Equal(t, isUserInSystemUsers, *systemDetailsCollection.ViewRule)
|
||||||
|
assert.Nil(t, systemDetailsCollection.CreateRule)
|
||||||
|
assert.Nil(t, systemDetailsCollection.UpdateRule)
|
||||||
|
assert.Nil(t, systemDetailsCollection.DeleteRule)
|
||||||
|
|
||||||
|
// system_stats collection
|
||||||
|
systemStatsCollection, err := hub.FindCollectionByNameOrId("system_stats")
|
||||||
|
require.NoError(t, err, "Failed to find system_stats collection")
|
||||||
|
assert.Equal(t, isUserInSystemUsers, *systemStatsCollection.ListRule)
|
||||||
|
assert.Nil(t, systemStatsCollection.ViewRule)
|
||||||
|
assert.Nil(t, systemStatsCollection.CreateRule)
|
||||||
|
assert.Nil(t, systemStatsCollection.UpdateRule)
|
||||||
|
assert.Nil(t, systemStatsCollection.DeleteRule)
|
||||||
|
|
||||||
|
// systemd_services collection
|
||||||
|
systemdServicesCollection, err := hub.FindCollectionByNameOrId("systemd_services")
|
||||||
|
require.NoError(t, err, "Failed to find systemd_services collection")
|
||||||
|
assert.Equal(t, isUserInSystemUsers, *systemdServicesCollection.ListRule)
|
||||||
|
assert.Nil(t, systemdServicesCollection.ViewRule)
|
||||||
|
assert.Nil(t, systemdServicesCollection.CreateRule)
|
||||||
|
assert.Nil(t, systemdServicesCollection.UpdateRule)
|
||||||
|
assert.Nil(t, systemdServicesCollection.DeleteRule)
|
||||||
|
|
||||||
|
// systems collection
|
||||||
|
systemsCollection, err := hub.FindCollectionByNameOrId("systems")
|
||||||
|
require.NoError(t, err, "Failed to find systems collection")
|
||||||
|
assert.Equal(t, isUserInUsers, *systemsCollection.ListRule)
|
||||||
|
assert.Equal(t, isUserInUsers, *systemsCollection.ViewRule)
|
||||||
|
assert.Equal(t, isUserInUsersNotReadonly, *systemsCollection.CreateRule)
|
||||||
|
assert.Equal(t, isUserInUsersNotReadonly, *systemsCollection.UpdateRule)
|
||||||
|
assert.Equal(t, isUserInUsersNotReadonly, *systemsCollection.DeleteRule)
|
||||||
|
|
||||||
|
// universal_tokens collection
|
||||||
|
universalTokensCollection, err := hub.FindCollectionByNameOrId("universal_tokens")
|
||||||
|
require.NoError(t, err, "Failed to find universal_tokens collection")
|
||||||
|
assert.Nil(t, universalTokensCollection.ListRule)
|
||||||
|
assert.Nil(t, universalTokensCollection.ViewRule)
|
||||||
|
assert.Nil(t, universalTokensCollection.CreateRule)
|
||||||
|
assert.Nil(t, universalTokensCollection.UpdateRule)
|
||||||
|
assert.Nil(t, universalTokensCollection.DeleteRule)
|
||||||
|
|
||||||
|
// user_settings collection
|
||||||
|
userSettingsCollection, err := hub.FindCollectionByNameOrId("user_settings")
|
||||||
|
require.NoError(t, err, "Failed to find user_settings collection")
|
||||||
|
assert.Equal(t, isUserMatchesUser, *userSettingsCollection.ListRule)
|
||||||
|
assert.Nil(t, userSettingsCollection.ViewRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *userSettingsCollection.CreateRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *userSettingsCollection.UpdateRule)
|
||||||
|
assert.Nil(t, userSettingsCollection.DeleteRule)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCollectionRulesShareAllSystems(t *testing.T) {
|
||||||
|
t.Setenv("SHARE_ALL_SYSTEMS", "true")
|
||||||
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
const isUser = `@request.auth.id != ""`
|
||||||
|
const isUserNotReadonly = `@request.auth.id != "" && @request.auth.role != "readonly"`
|
||||||
|
|
||||||
|
const isUserMatchesUser = `@request.auth.id != "" && user = @request.auth.id`
|
||||||
|
|
||||||
|
// alerts collection
|
||||||
|
alertsCollection, err := hub.FindCollectionByNameOrId("alerts")
|
||||||
|
require.NoError(t, err, "Failed to find alerts collection")
|
||||||
|
assert.Equal(t, isUserMatchesUser, *alertsCollection.ListRule)
|
||||||
|
assert.Nil(t, alertsCollection.ViewRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *alertsCollection.CreateRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *alertsCollection.UpdateRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *alertsCollection.DeleteRule)
|
||||||
|
|
||||||
|
// alerts_history collection
|
||||||
|
alertsHistoryCollection, err := hub.FindCollectionByNameOrId("alerts_history")
|
||||||
|
require.NoError(t, err, "Failed to find alerts_history collection")
|
||||||
|
assert.Equal(t, isUserMatchesUser, *alertsHistoryCollection.ListRule)
|
||||||
|
assert.Nil(t, alertsHistoryCollection.ViewRule)
|
||||||
|
assert.Nil(t, alertsHistoryCollection.CreateRule)
|
||||||
|
assert.Nil(t, alertsHistoryCollection.UpdateRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *alertsHistoryCollection.DeleteRule)
|
||||||
|
|
||||||
|
// containers collection
|
||||||
|
containersCollection, err := hub.FindCollectionByNameOrId("containers")
|
||||||
|
require.NoError(t, err, "Failed to find containers collection")
|
||||||
|
assert.Equal(t, isUser, *containersCollection.ListRule)
|
||||||
|
assert.Nil(t, containersCollection.ViewRule)
|
||||||
|
assert.Nil(t, containersCollection.CreateRule)
|
||||||
|
assert.Nil(t, containersCollection.UpdateRule)
|
||||||
|
assert.Nil(t, containersCollection.DeleteRule)
|
||||||
|
|
||||||
|
// container_stats collection
|
||||||
|
containerStatsCollection, err := hub.FindCollectionByNameOrId("container_stats")
|
||||||
|
require.NoError(t, err, "Failed to find container_stats collection")
|
||||||
|
assert.Equal(t, isUser, *containerStatsCollection.ListRule)
|
||||||
|
assert.Nil(t, containerStatsCollection.ViewRule)
|
||||||
|
assert.Nil(t, containerStatsCollection.CreateRule)
|
||||||
|
assert.Nil(t, containerStatsCollection.UpdateRule)
|
||||||
|
assert.Nil(t, containerStatsCollection.DeleteRule)
|
||||||
|
|
||||||
|
// fingerprints collection
|
||||||
|
fingerprintsCollection, err := hub.FindCollectionByNameOrId("fingerprints")
|
||||||
|
require.NoError(t, err, "Failed to find fingerprints collection")
|
||||||
|
assert.Equal(t, isUser, *fingerprintsCollection.ListRule)
|
||||||
|
assert.Equal(t, isUser, *fingerprintsCollection.ViewRule)
|
||||||
|
assert.Equal(t, isUserNotReadonly, *fingerprintsCollection.CreateRule)
|
||||||
|
assert.Equal(t, isUserNotReadonly, *fingerprintsCollection.UpdateRule)
|
||||||
|
assert.Equal(t, isUserNotReadonly, *fingerprintsCollection.DeleteRule)
|
||||||
|
|
||||||
|
// quiet_hours collection
|
||||||
|
quietHoursCollection, err := hub.FindCollectionByNameOrId("quiet_hours")
|
||||||
|
require.NoError(t, err, "Failed to find quiet_hours collection")
|
||||||
|
assert.Equal(t, isUserMatchesUser, *quietHoursCollection.ListRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *quietHoursCollection.ViewRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *quietHoursCollection.CreateRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *quietHoursCollection.UpdateRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *quietHoursCollection.DeleteRule)
|
||||||
|
|
||||||
|
// smart_devices collection
|
||||||
|
smartDevicesCollection, err := hub.FindCollectionByNameOrId("smart_devices")
|
||||||
|
require.NoError(t, err, "Failed to find smart_devices collection")
|
||||||
|
assert.Equal(t, isUser, *smartDevicesCollection.ListRule)
|
||||||
|
assert.Equal(t, isUser, *smartDevicesCollection.ViewRule)
|
||||||
|
assert.Nil(t, smartDevicesCollection.CreateRule)
|
||||||
|
assert.Nil(t, smartDevicesCollection.UpdateRule)
|
||||||
|
assert.Equal(t, isUserNotReadonly, *smartDevicesCollection.DeleteRule)
|
||||||
|
|
||||||
|
// system_details collection
|
||||||
|
systemDetailsCollection, err := hub.FindCollectionByNameOrId("system_details")
|
||||||
|
require.NoError(t, err, "Failed to find system_details collection")
|
||||||
|
assert.Equal(t, isUser, *systemDetailsCollection.ListRule)
|
||||||
|
assert.Equal(t, isUser, *systemDetailsCollection.ViewRule)
|
||||||
|
assert.Nil(t, systemDetailsCollection.CreateRule)
|
||||||
|
assert.Nil(t, systemDetailsCollection.UpdateRule)
|
||||||
|
assert.Nil(t, systemDetailsCollection.DeleteRule)
|
||||||
|
|
||||||
|
// system_stats collection
|
||||||
|
systemStatsCollection, err := hub.FindCollectionByNameOrId("system_stats")
|
||||||
|
require.NoError(t, err, "Failed to find system_stats collection")
|
||||||
|
assert.Equal(t, isUser, *systemStatsCollection.ListRule)
|
||||||
|
assert.Nil(t, systemStatsCollection.ViewRule)
|
||||||
|
assert.Nil(t, systemStatsCollection.CreateRule)
|
||||||
|
assert.Nil(t, systemStatsCollection.UpdateRule)
|
||||||
|
assert.Nil(t, systemStatsCollection.DeleteRule)
|
||||||
|
|
||||||
|
// systemd_services collection
|
||||||
|
systemdServicesCollection, err := hub.FindCollectionByNameOrId("systemd_services")
|
||||||
|
require.NoError(t, err, "Failed to find systemd_services collection")
|
||||||
|
assert.Equal(t, isUser, *systemdServicesCollection.ListRule)
|
||||||
|
assert.Nil(t, systemdServicesCollection.ViewRule)
|
||||||
|
assert.Nil(t, systemdServicesCollection.CreateRule)
|
||||||
|
assert.Nil(t, systemdServicesCollection.UpdateRule)
|
||||||
|
assert.Nil(t, systemdServicesCollection.DeleteRule)
|
||||||
|
|
||||||
|
// systems collection
|
||||||
|
systemsCollection, err := hub.FindCollectionByNameOrId("systems")
|
||||||
|
require.NoError(t, err, "Failed to find systems collection")
|
||||||
|
assert.Equal(t, isUser, *systemsCollection.ListRule)
|
||||||
|
assert.Equal(t, isUser, *systemsCollection.ViewRule)
|
||||||
|
assert.Equal(t, isUserNotReadonly, *systemsCollection.CreateRule)
|
||||||
|
assert.Equal(t, isUserNotReadonly, *systemsCollection.UpdateRule)
|
||||||
|
assert.Equal(t, isUserNotReadonly, *systemsCollection.DeleteRule)
|
||||||
|
|
||||||
|
// universal_tokens collection
|
||||||
|
universalTokensCollection, err := hub.FindCollectionByNameOrId("universal_tokens")
|
||||||
|
require.NoError(t, err, "Failed to find universal_tokens collection")
|
||||||
|
assert.Nil(t, universalTokensCollection.ListRule)
|
||||||
|
assert.Nil(t, universalTokensCollection.ViewRule)
|
||||||
|
assert.Nil(t, universalTokensCollection.CreateRule)
|
||||||
|
assert.Nil(t, universalTokensCollection.UpdateRule)
|
||||||
|
assert.Nil(t, universalTokensCollection.DeleteRule)
|
||||||
|
|
||||||
|
// user_settings collection
|
||||||
|
userSettingsCollection, err := hub.FindCollectionByNameOrId("user_settings")
|
||||||
|
require.NoError(t, err, "Failed to find user_settings collection")
|
||||||
|
assert.Equal(t, isUserMatchesUser, *userSettingsCollection.ListRule)
|
||||||
|
assert.Nil(t, userSettingsCollection.ViewRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *userSettingsCollection.CreateRule)
|
||||||
|
assert.Equal(t, isUserMatchesUser, *userSettingsCollection.UpdateRule)
|
||||||
|
assert.Nil(t, userSettingsCollection.DeleteRule)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDisablePasswordAuth(t *testing.T) {
|
||||||
|
t.Setenv("DISABLE_PASSWORD_AUTH", "true")
|
||||||
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
usersCollection, err := hub.FindCollectionByNameOrId("users")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.False(t, usersCollection.PasswordAuth.Enabled)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUserCreation(t *testing.T) {
|
||||||
|
t.Setenv("USER_CREATION", "true")
|
||||||
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
usersCollection, err := hub.FindCollectionByNameOrId("users")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, "@request.context = 'oauth2'", *usersCollection.CreateRule)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMFAOtp(t *testing.T) {
|
||||||
|
t.Setenv("MFA_OTP", "true")
|
||||||
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
usersCollection, err := hub.FindCollectionByNameOrId("users")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, usersCollection.OTP.Enabled)
|
||||||
|
assert.True(t, usersCollection.MFA.Enabled)
|
||||||
|
|
||||||
|
superusersCollection, err := hub.FindCollectionByNameOrId(core.CollectionNameSuperusers)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.True(t, superusersCollection.OTP.Enabled)
|
||||||
|
assert.True(t, superusersCollection.MFA.Enabled)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestApiCollectionsAuthRules(t *testing.T) {
|
||||||
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
hub.StartHub()
|
||||||
|
|
||||||
|
user1, _ := beszelTests.CreateUser(hub, "user1@example.com", "password")
|
||||||
|
user1Token, _ := user1.NewAuthToken()
|
||||||
|
|
||||||
|
user2, _ := beszelTests.CreateUser(hub, "user2@example.com", "password")
|
||||||
|
// user2Token, _ := user2.NewAuthToken()
|
||||||
|
|
||||||
|
userReadonly, _ := beszelTests.CreateUserWithRole(hub, "userreadonly@example.com", "password", "readonly")
|
||||||
|
userReadonlyToken, _ := userReadonly.NewAuthToken()
|
||||||
|
|
||||||
|
userOneSystem, _ := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||||
|
"name": "system1",
|
||||||
|
"users": []string{user1.Id},
|
||||||
|
"host": "127.0.0.1",
|
||||||
|
})
|
||||||
|
|
||||||
|
sharedSystem, _ := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||||
|
"name": "system2",
|
||||||
|
"users": []string{user1.Id, user2.Id},
|
||||||
|
"host": "127.0.0.2",
|
||||||
|
})
|
||||||
|
|
||||||
|
userTwoSystem, _ := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||||
|
"name": "system3",
|
||||||
|
"users": []string{user2.Id},
|
||||||
|
"host": "127.0.0.2",
|
||||||
|
})
|
||||||
|
|
||||||
|
userRecords, _ := hub.CountRecords("users")
|
||||||
|
assert.EqualValues(t, 3, userRecords, "all users should be created")
|
||||||
|
|
||||||
|
systemRecords, _ := hub.CountRecords("systems")
|
||||||
|
assert.EqualValues(t, 3, systemRecords, "all systems should be created")
|
||||||
|
|
||||||
|
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
||||||
|
return hub.TestApp
|
||||||
|
}
|
||||||
|
|
||||||
|
scenarios := []beszelTests.ApiScenario{
|
||||||
|
{
|
||||||
|
Name: "Unauthorized user cannot list systems",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/collections/systems/records",
|
||||||
|
ExpectedStatus: 200, // https://github.com/pocketbase/pocketbase/discussions/1570
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
ExpectedContent: []string{`"items":[]`, `"totalItems":0`},
|
||||||
|
NotExpectedContent: []string{userOneSystem.Id, sharedSystem.Id, userTwoSystem.Id},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "Unauthorized user cannot delete a system",
|
||||||
|
Method: http.MethodDelete,
|
||||||
|
URL: fmt.Sprintf("/api/collections/systems/records/%s", userOneSystem.Id),
|
||||||
|
ExpectedStatus: 404,
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
ExpectedContent: []string{"resource wasn't found"},
|
||||||
|
NotExpectedContent: []string{userOneSystem.Id},
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
systemsCount, _ := app.CountRecords("systems")
|
||||||
|
assert.EqualValues(t, 3, systemsCount, "should have 3 systems before deletion")
|
||||||
|
},
|
||||||
|
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||||
|
systemsCount, _ := app.CountRecords("systems")
|
||||||
|
assert.EqualValues(t, 3, systemsCount, "should still have 3 systems after failed deletion")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "User 1 can list their own systems",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/collections/systems/records",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user1Token,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{userOneSystem.Id, sharedSystem.Id},
|
||||||
|
NotExpectedContent: []string{userTwoSystem.Id},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "User 1 cannot list user 2's system",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/collections/systems/records",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user1Token,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{userOneSystem.Id, sharedSystem.Id},
|
||||||
|
NotExpectedContent: []string{userTwoSystem.Id},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "User 1 can see user 2's system if SHARE_ALL_SYSTEMS is enabled",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/collections/systems/records",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user1Token,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 200,
|
||||||
|
ExpectedContent: []string{userOneSystem.Id, sharedSystem.Id, userTwoSystem.Id},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
t.Setenv("SHARE_ALL_SYSTEMS", "true")
|
||||||
|
hub.SetCollectionAuthSettings()
|
||||||
|
},
|
||||||
|
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||||
|
t.Setenv("SHARE_ALL_SYSTEMS", "")
|
||||||
|
hub.SetCollectionAuthSettings()
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "User 1 can delete their own system",
|
||||||
|
Method: http.MethodDelete,
|
||||||
|
URL: fmt.Sprintf("/api/collections/systems/records/%s", userOneSystem.Id),
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user1Token,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 204,
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
systemsCount, _ := app.CountRecords("systems")
|
||||||
|
assert.EqualValues(t, 3, systemsCount, "should have 3 systems before deletion")
|
||||||
|
},
|
||||||
|
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||||
|
systemsCount, _ := app.CountRecords("systems")
|
||||||
|
assert.EqualValues(t, 2, systemsCount, "should have 2 systems after deletion")
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "User 1 cannot delete user 2's system",
|
||||||
|
Method: http.MethodDelete,
|
||||||
|
URL: fmt.Sprintf("/api/collections/systems/records/%s", userTwoSystem.Id),
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user1Token,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 404,
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
ExpectedContent: []string{"resource wasn't found"},
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
systemsCount, _ := app.CountRecords("systems")
|
||||||
|
assert.EqualValues(t, 2, systemsCount)
|
||||||
|
},
|
||||||
|
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||||
|
systemsCount, _ := app.CountRecords("systems")
|
||||||
|
assert.EqualValues(t, 2, systemsCount)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "Readonly cannot delete a system even if SHARE_ALL_SYSTEMS is enabled",
|
||||||
|
Method: http.MethodDelete,
|
||||||
|
URL: fmt.Sprintf("/api/collections/systems/records/%s", sharedSystem.Id),
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userReadonlyToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 404,
|
||||||
|
ExpectedContent: []string{"resource wasn't found"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
t.Setenv("SHARE_ALL_SYSTEMS", "true")
|
||||||
|
hub.SetCollectionAuthSettings()
|
||||||
|
systemsCount, _ := app.CountRecords("systems")
|
||||||
|
assert.EqualValues(t, 2, systemsCount)
|
||||||
|
},
|
||||||
|
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||||
|
t.Setenv("SHARE_ALL_SYSTEMS", "")
|
||||||
|
hub.SetCollectionAuthSettings()
|
||||||
|
systemsCount, _ := app.CountRecords("systems")
|
||||||
|
assert.EqualValues(t, 2, systemsCount)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "User 1 can delete user 2's system if SHARE_ALL_SYSTEMS is enabled",
|
||||||
|
Method: http.MethodDelete,
|
||||||
|
URL: fmt.Sprintf("/api/collections/systems/records/%s", userTwoSystem.Id),
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": user1Token,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 204,
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
||||||
|
t.Setenv("SHARE_ALL_SYSTEMS", "true")
|
||||||
|
hub.SetCollectionAuthSettings()
|
||||||
|
systemsCount, _ := app.CountRecords("systems")
|
||||||
|
assert.EqualValues(t, 2, systemsCount)
|
||||||
|
},
|
||||||
|
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
||||||
|
t.Setenv("SHARE_ALL_SYSTEMS", "")
|
||||||
|
hub.SetCollectionAuthSettings()
|
||||||
|
systemsCount, _ := app.CountRecords("systems")
|
||||||
|
assert.EqualValues(t, 1, systemsCount)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, scenario := range scenarios {
|
||||||
|
scenario.Test(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -16,7 +16,7 @@ type val[T comparable] struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type ExpiryMap[T comparable] struct {
|
type ExpiryMap[T comparable] struct {
|
||||||
store *store.Store[string, *val[T]]
|
store *store.Store[string, val[T]]
|
||||||
stopChan chan struct{}
|
stopChan chan struct{}
|
||||||
stopOnce sync.Once
|
stopOnce sync.Once
|
||||||
}
|
}
|
||||||
@@ -24,7 +24,7 @@ type ExpiryMap[T comparable] struct {
|
|||||||
// New creates a new expiry map with custom cleanup interval
|
// New creates a new expiry map with custom cleanup interval
|
||||||
func New[T comparable](cleanupInterval time.Duration) *ExpiryMap[T] {
|
func New[T comparable](cleanupInterval time.Duration) *ExpiryMap[T] {
|
||||||
m := &ExpiryMap[T]{
|
m := &ExpiryMap[T]{
|
||||||
store: store.New(map[string]*val[T]{}),
|
store: store.New(map[string]val[T]{}),
|
||||||
stopChan: make(chan struct{}),
|
stopChan: make(chan struct{}),
|
||||||
}
|
}
|
||||||
go m.startCleaner(cleanupInterval)
|
go m.startCleaner(cleanupInterval)
|
||||||
@@ -33,7 +33,7 @@ func New[T comparable](cleanupInterval time.Duration) *ExpiryMap[T] {
|
|||||||
|
|
||||||
// Set stores a value with the given TTL
|
// Set stores a value with the given TTL
|
||||||
func (m *ExpiryMap[T]) Set(key string, value T, ttl time.Duration) {
|
func (m *ExpiryMap[T]) Set(key string, value T, ttl time.Duration) {
|
||||||
m.store.Set(key, &val[T]{
|
m.store.Set(key, val[T]{
|
||||||
value: value,
|
value: value,
|
||||||
expires: time.Now().Add(ttl),
|
expires: time.Now().Add(ttl),
|
||||||
})
|
})
|
||||||
@@ -116,3 +116,12 @@ func (m *ExpiryMap[T]) cleanup() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UpdateExpiration updates the expiration time of a key
|
||||||
|
func (m *ExpiryMap[T]) UpdateExpiration(key string, ttl time.Duration) {
|
||||||
|
value, ok := m.store.GetOk(key)
|
||||||
|
if ok {
|
||||||
|
value.expires = time.Now().Add(ttl)
|
||||||
|
m.store.Set(key, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -178,6 +178,33 @@ func TestExpiryMap_GenericTypes(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestExpiryMap_UpdateExpiration(t *testing.T) {
|
||||||
|
em := New[string](time.Hour)
|
||||||
|
|
||||||
|
// Set a value with short TTL
|
||||||
|
em.Set("key1", "value1", time.Millisecond*50)
|
||||||
|
|
||||||
|
// Verify it exists
|
||||||
|
assert.True(t, em.Has("key1"))
|
||||||
|
|
||||||
|
// Update expiration to a longer TTL
|
||||||
|
em.UpdateExpiration("key1", time.Hour)
|
||||||
|
|
||||||
|
// Wait for the original TTL to pass
|
||||||
|
time.Sleep(time.Millisecond * 100)
|
||||||
|
|
||||||
|
// Should still exist because expiration was updated
|
||||||
|
assert.True(t, em.Has("key1"))
|
||||||
|
value, ok := em.GetOk("key1")
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "value1", value)
|
||||||
|
|
||||||
|
// Try updating non-existent key (should not panic)
|
||||||
|
assert.NotPanics(t, func() {
|
||||||
|
em.UpdateExpiration("nonexistent", time.Hour)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestExpiryMap_ZeroValues(t *testing.T) {
|
func TestExpiryMap_ZeroValues(t *testing.T) {
|
||||||
em := New[string](time.Hour)
|
em := New[string](time.Hour)
|
||||||
|
|
||||||
|
|||||||
@@ -4,16 +4,14 @@ package hub
|
|||||||
import (
|
import (
|
||||||
"crypto/ed25519"
|
"crypto/ed25519"
|
||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/henrygd/beszel"
|
|
||||||
"github.com/henrygd/beszel/internal/alerts"
|
"github.com/henrygd/beszel/internal/alerts"
|
||||||
"github.com/henrygd/beszel/internal/hub/config"
|
"github.com/henrygd/beszel/internal/hub/config"
|
||||||
"github.com/henrygd/beszel/internal/hub/heartbeat"
|
"github.com/henrygd/beszel/internal/hub/heartbeat"
|
||||||
@@ -21,14 +19,12 @@ import (
|
|||||||
"github.com/henrygd/beszel/internal/records"
|
"github.com/henrygd/beszel/internal/records"
|
||||||
"github.com/henrygd/beszel/internal/users"
|
"github.com/henrygd/beszel/internal/users"
|
||||||
|
|
||||||
"github.com/google/uuid"
|
|
||||||
"github.com/pocketbase/dbx"
|
|
||||||
"github.com/pocketbase/pocketbase"
|
"github.com/pocketbase/pocketbase"
|
||||||
"github.com/pocketbase/pocketbase/apis"
|
|
||||||
"github.com/pocketbase/pocketbase/core"
|
"github.com/pocketbase/pocketbase/core"
|
||||||
"golang.org/x/crypto/ssh"
|
"golang.org/x/crypto/ssh"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Hub is the application. It embeds the PocketBase app and keeps references to subcomponents.
|
||||||
type Hub struct {
|
type Hub struct {
|
||||||
core.App
|
core.App
|
||||||
*alerts.AlertManager
|
*alerts.AlertManager
|
||||||
@@ -46,18 +42,16 @@ var containerIDPattern = regexp.MustCompile(`^[a-fA-F0-9]{12,64}$`)
|
|||||||
|
|
||||||
// NewHub creates a new Hub instance with default configuration
|
// NewHub creates a new Hub instance with default configuration
|
||||||
func NewHub(app core.App) *Hub {
|
func NewHub(app core.App) *Hub {
|
||||||
hub := &Hub{}
|
hub := &Hub{App: app}
|
||||||
hub.App = app
|
|
||||||
|
|
||||||
hub.AlertManager = alerts.NewAlertManager(hub)
|
hub.AlertManager = alerts.NewAlertManager(hub)
|
||||||
hub.um = users.NewUserManager(hub)
|
hub.um = users.NewUserManager(hub)
|
||||||
hub.rm = records.NewRecordManager(hub)
|
hub.rm = records.NewRecordManager(hub)
|
||||||
hub.sm = systems.NewSystemManager(hub)
|
hub.sm = systems.NewSystemManager(hub)
|
||||||
hub.appURL, _ = GetEnv("APP_URL")
|
|
||||||
hub.hb = heartbeat.New(app, GetEnv)
|
hub.hb = heartbeat.New(app, GetEnv)
|
||||||
if hub.hb != nil {
|
if hub.hb != nil {
|
||||||
hub.hbStop = make(chan struct{})
|
hub.hbStop = make(chan struct{})
|
||||||
}
|
}
|
||||||
|
_ = onAfterBootstrapAndMigrations(app, hub.initialize)
|
||||||
return hub
|
return hub
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -70,12 +64,28 @@ func GetEnv(key string) (value string, exists bool) {
|
|||||||
return os.LookupEnv(key)
|
return os.LookupEnv(key)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Hub) StartHub() error {
|
// onAfterBootstrapAndMigrations ensures the provided function runs after the database is set up and migrations are applied.
|
||||||
h.App.OnServe().BindFunc(func(e *core.ServeEvent) error {
|
// This is a workaround for behavior in PocketBase where onBootstrap runs before migrations, forcing use of onServe for this purpose.
|
||||||
// initialize settings / collections
|
// However, PB's tests.TestApp is already bootstrapped, generally doesn't serve, but does handle migrations.
|
||||||
if err := h.initialize(e); err != nil {
|
// So this ensures that the provided function runs at the right time either way, after DB is ready and migrations are done.
|
||||||
|
func onAfterBootstrapAndMigrations(app core.App, fn func(app core.App) error) error {
|
||||||
|
// pb tests.TestApp is already bootstrapped and doesn't serve
|
||||||
|
if app.IsBootstrapped() {
|
||||||
|
return fn(app)
|
||||||
|
}
|
||||||
|
// Must use OnServe because OnBootstrap appears to run before migrations, even if calling e.Next() before anything else
|
||||||
|
app.OnServe().BindFunc(func(e *core.ServeEvent) error {
|
||||||
|
if err := fn(e.App); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
return e.Next()
|
||||||
|
})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartHub sets up event handlers and starts the PocketBase server
|
||||||
|
func (h *Hub) StartHub() error {
|
||||||
|
h.App.OnServe().BindFunc(func(e *core.ServeEvent) error {
|
||||||
// sync systems with config
|
// sync systems with config
|
||||||
if err := config.SyncSystems(e); err != nil {
|
if err := config.SyncSystems(e); err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -110,132 +120,29 @@ func (h *Hub) StartHub() error {
|
|||||||
h.App.OnRecordCreate("users").BindFunc(h.um.InitializeUserRole)
|
h.App.OnRecordCreate("users").BindFunc(h.um.InitializeUserRole)
|
||||||
h.App.OnRecordCreate("user_settings").BindFunc(h.um.InitializeUserSettings)
|
h.App.OnRecordCreate("user_settings").BindFunc(h.um.InitializeUserSettings)
|
||||||
|
|
||||||
if pb, ok := h.App.(*pocketbase.PocketBase); ok {
|
pb, ok := h.App.(*pocketbase.PocketBase)
|
||||||
// log.Println("Starting pocketbase")
|
if !ok {
|
||||||
err := pb.Start()
|
return errors.New("not a pocketbase app")
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
return pb.Start()
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// initialize sets up initial configuration (collections, settings, etc.)
|
// initialize sets up initial configuration (collections, settings, etc.)
|
||||||
func (h *Hub) initialize(e *core.ServeEvent) error {
|
func (h *Hub) initialize(app core.App) error {
|
||||||
// set general settings
|
// set general settings
|
||||||
settings := e.App.Settings()
|
settings := app.Settings()
|
||||||
// batch requests (for global alerts)
|
// batch requests (for alerts)
|
||||||
settings.Batch.Enabled = true
|
settings.Batch.Enabled = true
|
||||||
// set URL if BASE_URL env is set
|
// set URL if APP_URL env is set
|
||||||
if h.appURL != "" {
|
if appURL, isSet := GetEnv("APP_URL"); isSet {
|
||||||
settings.Meta.AppURL = h.appURL
|
h.appURL = appURL
|
||||||
|
settings.Meta.AppURL = appURL
|
||||||
}
|
}
|
||||||
if err := e.App.Save(settings); err != nil {
|
if err := app.Save(settings); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// set auth settings
|
// set auth settings
|
||||||
if err := setCollectionAuthSettings(e.App); err != nil {
|
return setCollectionAuthSettings(app)
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// setCollectionAuthSettings sets up default authentication settings for the app
|
|
||||||
func setCollectionAuthSettings(app core.App) error {
|
|
||||||
usersCollection, err := app.FindCollectionByNameOrId("users")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
superusersCollection, err := app.FindCollectionByNameOrId(core.CollectionNameSuperusers)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// disable email auth if DISABLE_PASSWORD_AUTH env var is set
|
|
||||||
disablePasswordAuth, _ := GetEnv("DISABLE_PASSWORD_AUTH")
|
|
||||||
usersCollection.PasswordAuth.Enabled = disablePasswordAuth != "true"
|
|
||||||
usersCollection.PasswordAuth.IdentityFields = []string{"email"}
|
|
||||||
// allow oauth user creation if USER_CREATION is set
|
|
||||||
if userCreation, _ := GetEnv("USER_CREATION"); userCreation == "true" {
|
|
||||||
cr := "@request.context = 'oauth2'"
|
|
||||||
usersCollection.CreateRule = &cr
|
|
||||||
} else {
|
|
||||||
usersCollection.CreateRule = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// enable mfaOtp mfa if MFA_OTP env var is set
|
|
||||||
mfaOtp, _ := GetEnv("MFA_OTP")
|
|
||||||
usersCollection.OTP.Length = 6
|
|
||||||
superusersCollection.OTP.Length = 6
|
|
||||||
usersCollection.OTP.Enabled = mfaOtp == "true"
|
|
||||||
usersCollection.MFA.Enabled = mfaOtp == "true"
|
|
||||||
superusersCollection.OTP.Enabled = mfaOtp == "true" || mfaOtp == "superusers"
|
|
||||||
superusersCollection.MFA.Enabled = mfaOtp == "true" || mfaOtp == "superusers"
|
|
||||||
if err := app.Save(superusersCollection); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := app.Save(usersCollection); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
shareAllSystems, _ := GetEnv("SHARE_ALL_SYSTEMS")
|
|
||||||
|
|
||||||
// allow all users to access systems if SHARE_ALL_SYSTEMS is set
|
|
||||||
systemsCollection, err := app.FindCollectionByNameOrId("systems")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
var systemsReadRule string
|
|
||||||
if shareAllSystems == "true" {
|
|
||||||
systemsReadRule = "@request.auth.id != \"\""
|
|
||||||
} else {
|
|
||||||
systemsReadRule = "@request.auth.id != \"\" && users.id ?= @request.auth.id"
|
|
||||||
}
|
|
||||||
updateDeleteRule := systemsReadRule + " && @request.auth.role != \"readonly\""
|
|
||||||
systemsCollection.ListRule = &systemsReadRule
|
|
||||||
systemsCollection.ViewRule = &systemsReadRule
|
|
||||||
systemsCollection.UpdateRule = &updateDeleteRule
|
|
||||||
systemsCollection.DeleteRule = &updateDeleteRule
|
|
||||||
if err := app.Save(systemsCollection); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// allow all users to access all containers if SHARE_ALL_SYSTEMS is set
|
|
||||||
containersCollection, err := app.FindCollectionByNameOrId("containers")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
containersListRule := strings.Replace(systemsReadRule, "users.id", "system.users.id", 1)
|
|
||||||
containersCollection.ListRule = &containersListRule
|
|
||||||
if err := app.Save(containersCollection); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// allow all users to access system-related collections if SHARE_ALL_SYSTEMS is set
|
|
||||||
// these collections all have a "system" relation field
|
|
||||||
systemRelatedCollections := []string{"system_details", "smart_devices", "systemd_services"}
|
|
||||||
for _, collectionName := range systemRelatedCollections {
|
|
||||||
collection, err := app.FindCollectionByNameOrId(collectionName)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
collection.ListRule = &containersListRule
|
|
||||||
// set viewRule for collections that need it (system_details, smart_devices)
|
|
||||||
if collection.ViewRule != nil {
|
|
||||||
collection.ViewRule = &containersListRule
|
|
||||||
}
|
|
||||||
// set deleteRule for smart_devices (allows user to dismiss disk warnings)
|
|
||||||
if collectionName == "smart_devices" {
|
|
||||||
deleteRule := containersListRule + " && @request.auth.role != \"readonly\""
|
|
||||||
collection.DeleteRule = &deleteRule
|
|
||||||
}
|
|
||||||
if err := app.Save(collection); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// registerCronJobs sets up scheduled tasks
|
// registerCronJobs sets up scheduled tasks
|
||||||
@@ -247,296 +154,7 @@ func (h *Hub) registerCronJobs(_ *core.ServeEvent) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// custom middlewares
|
// GetSSHKey generates key pair if it doesn't exist and returns signer
|
||||||
func (h *Hub) registerMiddlewares(se *core.ServeEvent) {
|
|
||||||
// authorizes request with user matching the provided email
|
|
||||||
authorizeRequestWithEmail := func(e *core.RequestEvent, email string) (err error) {
|
|
||||||
if e.Auth != nil || email == "" {
|
|
||||||
return e.Next()
|
|
||||||
}
|
|
||||||
isAuthRefresh := e.Request.URL.Path == "/api/collections/users/auth-refresh" && e.Request.Method == http.MethodPost
|
|
||||||
e.Auth, err = e.App.FindFirstRecordByData("users", "email", email)
|
|
||||||
if err != nil || !isAuthRefresh {
|
|
||||||
return e.Next()
|
|
||||||
}
|
|
||||||
// auth refresh endpoint, make sure token is set in header
|
|
||||||
token, _ := e.Auth.NewAuthToken()
|
|
||||||
e.Request.Header.Set("Authorization", token)
|
|
||||||
return e.Next()
|
|
||||||
}
|
|
||||||
// authenticate with trusted header
|
|
||||||
if autoLogin, _ := GetEnv("AUTO_LOGIN"); autoLogin != "" {
|
|
||||||
se.Router.BindFunc(func(e *core.RequestEvent) error {
|
|
||||||
return authorizeRequestWithEmail(e, autoLogin)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
// authenticate with trusted header
|
|
||||||
if trustedHeader, _ := GetEnv("TRUSTED_AUTH_HEADER"); trustedHeader != "" {
|
|
||||||
se.Router.BindFunc(func(e *core.RequestEvent) error {
|
|
||||||
return authorizeRequestWithEmail(e, e.Request.Header.Get(trustedHeader))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// custom api routes
|
|
||||||
func (h *Hub) registerApiRoutes(se *core.ServeEvent) error {
|
|
||||||
// auth protected routes
|
|
||||||
apiAuth := se.Router.Group("/api/beszel")
|
|
||||||
apiAuth.Bind(apis.RequireAuth())
|
|
||||||
// auth optional routes
|
|
||||||
apiNoAuth := se.Router.Group("/api/beszel")
|
|
||||||
|
|
||||||
// create first user endpoint only needed if no users exist
|
|
||||||
if totalUsers, _ := se.App.CountRecords("users"); totalUsers == 0 {
|
|
||||||
apiNoAuth.POST("/create-user", h.um.CreateFirstUser)
|
|
||||||
}
|
|
||||||
// check if first time setup on login page
|
|
||||||
apiNoAuth.GET("/first-run", func(e *core.RequestEvent) error {
|
|
||||||
total, err := e.App.CountRecords("users")
|
|
||||||
return e.JSON(http.StatusOK, map[string]bool{"firstRun": err == nil && total == 0})
|
|
||||||
})
|
|
||||||
// get public key and version
|
|
||||||
apiAuth.GET("/getkey", func(e *core.RequestEvent) error {
|
|
||||||
return e.JSON(http.StatusOK, map[string]string{"key": h.pubKey, "v": beszel.Version})
|
|
||||||
})
|
|
||||||
// send test notification
|
|
||||||
apiAuth.POST("/test-notification", h.SendTestNotification)
|
|
||||||
// heartbeat status and test
|
|
||||||
apiAuth.GET("/heartbeat-status", h.getHeartbeatStatus)
|
|
||||||
apiAuth.POST("/test-heartbeat", h.testHeartbeat)
|
|
||||||
// get config.yml content
|
|
||||||
apiAuth.GET("/config-yaml", config.GetYamlConfig)
|
|
||||||
// handle agent websocket connection
|
|
||||||
apiNoAuth.GET("/agent-connect", h.handleAgentConnect)
|
|
||||||
// get or create universal tokens
|
|
||||||
apiAuth.GET("/universal-token", h.getUniversalToken)
|
|
||||||
// update / delete user alerts
|
|
||||||
apiAuth.POST("/user-alerts", alerts.UpsertUserAlerts)
|
|
||||||
apiAuth.DELETE("/user-alerts", alerts.DeleteUserAlerts)
|
|
||||||
// refresh SMART devices for a system
|
|
||||||
apiAuth.POST("/smart/refresh", h.refreshSmartData)
|
|
||||||
// get systemd service details
|
|
||||||
apiAuth.GET("/systemd/info", h.getSystemdInfo)
|
|
||||||
// /containers routes
|
|
||||||
if enabled, _ := GetEnv("CONTAINER_DETAILS"); enabled != "false" {
|
|
||||||
// get container logs
|
|
||||||
apiAuth.GET("/containers/logs", h.getContainerLogs)
|
|
||||||
// get container info
|
|
||||||
apiAuth.GET("/containers/info", h.getContainerInfo)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handler for universal token API endpoint (create, read, delete)
|
|
||||||
func (h *Hub) getUniversalToken(e *core.RequestEvent) error {
|
|
||||||
tokenMap := universalTokenMap.GetMap()
|
|
||||||
userID := e.Auth.Id
|
|
||||||
query := e.Request.URL.Query()
|
|
||||||
token := query.Get("token")
|
|
||||||
enable := query.Get("enable")
|
|
||||||
permanent := query.Get("permanent")
|
|
||||||
|
|
||||||
// helper for deleting any existing permanent token record for this user
|
|
||||||
deletePermanent := func() error {
|
|
||||||
rec, err := h.FindFirstRecordByFilter("universal_tokens", "user = {:user}", dbx.Params{"user": userID})
|
|
||||||
if err != nil {
|
|
||||||
return nil // no record
|
|
||||||
}
|
|
||||||
return h.Delete(rec)
|
|
||||||
}
|
|
||||||
|
|
||||||
// helper for upserting a permanent token record for this user
|
|
||||||
upsertPermanent := func(token string) error {
|
|
||||||
rec, err := h.FindFirstRecordByFilter("universal_tokens", "user = {:user}", dbx.Params{"user": userID})
|
|
||||||
if err == nil {
|
|
||||||
rec.Set("token", token)
|
|
||||||
return h.Save(rec)
|
|
||||||
}
|
|
||||||
|
|
||||||
col, err := h.FindCachedCollectionByNameOrId("universal_tokens")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
newRec := core.NewRecord(col)
|
|
||||||
newRec.Set("user", userID)
|
|
||||||
newRec.Set("token", token)
|
|
||||||
return h.Save(newRec)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Disable universal tokens (both ephemeral and permanent)
|
|
||||||
if enable == "0" {
|
|
||||||
tokenMap.RemovebyValue(userID)
|
|
||||||
_ = deletePermanent()
|
|
||||||
return e.JSON(http.StatusOK, map[string]any{"token": token, "active": false, "permanent": false})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Enable universal token (ephemeral or permanent)
|
|
||||||
if enable == "1" {
|
|
||||||
if token == "" {
|
|
||||||
token = uuid.New().String()
|
|
||||||
}
|
|
||||||
|
|
||||||
if permanent == "1" {
|
|
||||||
// make token permanent (persist across restarts)
|
|
||||||
tokenMap.RemovebyValue(userID)
|
|
||||||
if err := upsertPermanent(token); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return e.JSON(http.StatusOK, map[string]any{"token": token, "active": true, "permanent": true})
|
|
||||||
}
|
|
||||||
|
|
||||||
// default: ephemeral mode (1 hour)
|
|
||||||
_ = deletePermanent()
|
|
||||||
tokenMap.Set(token, userID, time.Hour)
|
|
||||||
return e.JSON(http.StatusOK, map[string]any{"token": token, "active": true, "permanent": false})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read current state
|
|
||||||
// Prefer permanent token if it exists.
|
|
||||||
if rec, err := h.FindFirstRecordByFilter("universal_tokens", "user = {:user}", dbx.Params{"user": userID}); err == nil {
|
|
||||||
dbToken := rec.GetString("token")
|
|
||||||
// If no token was provided, or the caller is asking about their permanent token, return it.
|
|
||||||
if token == "" || token == dbToken {
|
|
||||||
return e.JSON(http.StatusOK, map[string]any{"token": dbToken, "active": true, "permanent": true})
|
|
||||||
}
|
|
||||||
// Token doesn't match their permanent token (avoid leaking other info)
|
|
||||||
return e.JSON(http.StatusOK, map[string]any{"token": token, "active": false, "permanent": false})
|
|
||||||
}
|
|
||||||
|
|
||||||
// No permanent token; fall back to ephemeral token map.
|
|
||||||
if token == "" {
|
|
||||||
// return existing token if it exists
|
|
||||||
if token, _, ok := tokenMap.GetByValue(userID); ok {
|
|
||||||
return e.JSON(http.StatusOK, map[string]any{"token": token, "active": true, "permanent": false})
|
|
||||||
}
|
|
||||||
// if no token is provided, generate a new one
|
|
||||||
token = uuid.New().String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Token is considered active only if it belongs to the current user.
|
|
||||||
activeUser, ok := tokenMap.GetOk(token)
|
|
||||||
active := ok && activeUser == userID
|
|
||||||
response := map[string]any{"token": token, "active": active, "permanent": false}
|
|
||||||
return e.JSON(http.StatusOK, response)
|
|
||||||
}
|
|
||||||
|
|
||||||
// getHeartbeatStatus returns current heartbeat configuration and whether it's enabled
|
|
||||||
func (h *Hub) getHeartbeatStatus(e *core.RequestEvent) error {
|
|
||||||
if e.Auth.GetString("role") != "admin" {
|
|
||||||
return e.ForbiddenError("Requires admin role", nil)
|
|
||||||
}
|
|
||||||
if h.hb == nil {
|
|
||||||
return e.JSON(http.StatusOK, map[string]any{
|
|
||||||
"enabled": false,
|
|
||||||
"msg": "Set HEARTBEAT_URL to enable outbound heartbeat monitoring",
|
|
||||||
})
|
|
||||||
}
|
|
||||||
cfg := h.hb.GetConfig()
|
|
||||||
return e.JSON(http.StatusOK, map[string]any{
|
|
||||||
"enabled": true,
|
|
||||||
"url": cfg.URL,
|
|
||||||
"interval": cfg.Interval,
|
|
||||||
"method": cfg.Method,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// testHeartbeat triggers a single heartbeat ping and returns the result
|
|
||||||
func (h *Hub) testHeartbeat(e *core.RequestEvent) error {
|
|
||||||
if e.Auth.GetString("role") != "admin" {
|
|
||||||
return e.ForbiddenError("Requires admin role", nil)
|
|
||||||
}
|
|
||||||
if h.hb == nil {
|
|
||||||
return e.JSON(http.StatusOK, map[string]any{
|
|
||||||
"err": "Heartbeat not configured. Set HEARTBEAT_URL environment variable.",
|
|
||||||
})
|
|
||||||
}
|
|
||||||
if err := h.hb.Send(); err != nil {
|
|
||||||
return e.JSON(http.StatusOK, map[string]any{"err": err.Error()})
|
|
||||||
}
|
|
||||||
return e.JSON(http.StatusOK, map[string]any{"err": false})
|
|
||||||
}
|
|
||||||
|
|
||||||
// containerRequestHandler handles both container logs and info requests
|
|
||||||
func (h *Hub) containerRequestHandler(e *core.RequestEvent, fetchFunc func(*systems.System, string) (string, error), responseKey string) error {
|
|
||||||
systemID := e.Request.URL.Query().Get("system")
|
|
||||||
containerID := e.Request.URL.Query().Get("container")
|
|
||||||
|
|
||||||
if systemID == "" || containerID == "" {
|
|
||||||
return e.JSON(http.StatusBadRequest, map[string]string{"error": "system and container parameters are required"})
|
|
||||||
}
|
|
||||||
if !containerIDPattern.MatchString(containerID) {
|
|
||||||
return e.JSON(http.StatusBadRequest, map[string]string{"error": "invalid container parameter"})
|
|
||||||
}
|
|
||||||
|
|
||||||
system, err := h.sm.GetSystem(systemID)
|
|
||||||
if err != nil {
|
|
||||||
return e.JSON(http.StatusNotFound, map[string]string{"error": "system not found"})
|
|
||||||
}
|
|
||||||
|
|
||||||
data, err := fetchFunc(system, containerID)
|
|
||||||
if err != nil {
|
|
||||||
return e.JSON(http.StatusNotFound, map[string]string{"error": err.Error()})
|
|
||||||
}
|
|
||||||
|
|
||||||
return e.JSON(http.StatusOK, map[string]string{responseKey: data})
|
|
||||||
}
|
|
||||||
|
|
||||||
// getContainerLogs handles GET /api/beszel/containers/logs requests
|
|
||||||
func (h *Hub) getContainerLogs(e *core.RequestEvent) error {
|
|
||||||
return h.containerRequestHandler(e, func(system *systems.System, containerID string) (string, error) {
|
|
||||||
return system.FetchContainerLogsFromAgent(containerID)
|
|
||||||
}, "logs")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *Hub) getContainerInfo(e *core.RequestEvent) error {
|
|
||||||
return h.containerRequestHandler(e, func(system *systems.System, containerID string) (string, error) {
|
|
||||||
return system.FetchContainerInfoFromAgent(containerID)
|
|
||||||
}, "info")
|
|
||||||
}
|
|
||||||
|
|
||||||
// getSystemdInfo handles GET /api/beszel/systemd/info requests
|
|
||||||
func (h *Hub) getSystemdInfo(e *core.RequestEvent) error {
|
|
||||||
query := e.Request.URL.Query()
|
|
||||||
systemID := query.Get("system")
|
|
||||||
serviceName := query.Get("service")
|
|
||||||
|
|
||||||
if systemID == "" || serviceName == "" {
|
|
||||||
return e.JSON(http.StatusBadRequest, map[string]string{"error": "system and service parameters are required"})
|
|
||||||
}
|
|
||||||
system, err := h.sm.GetSystem(systemID)
|
|
||||||
if err != nil {
|
|
||||||
return e.JSON(http.StatusNotFound, map[string]string{"error": "system not found"})
|
|
||||||
}
|
|
||||||
details, err := system.FetchSystemdInfoFromAgent(serviceName)
|
|
||||||
if err != nil {
|
|
||||||
return e.JSON(http.StatusNotFound, map[string]string{"error": err.Error()})
|
|
||||||
}
|
|
||||||
e.Response.Header().Set("Cache-Control", "public, max-age=60")
|
|
||||||
return e.JSON(http.StatusOK, map[string]any{"details": details})
|
|
||||||
}
|
|
||||||
|
|
||||||
// refreshSmartData handles POST /api/beszel/smart/refresh requests
|
|
||||||
// Fetches fresh SMART data from the agent and updates the collection
|
|
||||||
func (h *Hub) refreshSmartData(e *core.RequestEvent) error {
|
|
||||||
systemID := e.Request.URL.Query().Get("system")
|
|
||||||
if systemID == "" {
|
|
||||||
return e.JSON(http.StatusBadRequest, map[string]string{"error": "system parameter is required"})
|
|
||||||
}
|
|
||||||
|
|
||||||
system, err := h.sm.GetSystem(systemID)
|
|
||||||
if err != nil {
|
|
||||||
return e.JSON(http.StatusNotFound, map[string]string{"error": "system not found"})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fetch and save SMART devices
|
|
||||||
if err := system.FetchAndSaveSmartDevices(); err != nil {
|
|
||||||
return e.JSON(http.StatusInternalServerError, map[string]string{"error": err.Error()})
|
|
||||||
}
|
|
||||||
|
|
||||||
return e.JSON(http.StatusOK, map[string]string{"status": "ok"})
|
|
||||||
}
|
|
||||||
|
|
||||||
// generates key pair if it doesn't exist and returns signer
|
|
||||||
func (h *Hub) GetSSHKey(dataDir string) (ssh.Signer, error) {
|
func (h *Hub) GetSSHKey(dataDir string) (ssh.Signer, error) {
|
||||||
if h.signer != nil {
|
if h.signer != nil {
|
||||||
return h.signer, nil
|
return h.signer, nil
|
||||||
|
|||||||
@@ -3,36 +3,20 @@
|
|||||||
package hub_test
|
package hub_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"crypto/ed25519"
|
"crypto/ed25519"
|
||||||
"encoding/json"
|
|
||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/henrygd/beszel/internal/migrations"
|
|
||||||
beszelTests "github.com/henrygd/beszel/internal/tests"
|
beszelTests "github.com/henrygd/beszel/internal/tests"
|
||||||
|
|
||||||
"github.com/pocketbase/pocketbase/core"
|
|
||||||
pbTests "github.com/pocketbase/pocketbase/tests"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"golang.org/x/crypto/ssh"
|
"golang.org/x/crypto/ssh"
|
||||||
)
|
)
|
||||||
|
|
||||||
// marshal to json and return an io.Reader (for use in ApiScenario.Body)
|
|
||||||
func jsonReader(v any) io.Reader {
|
|
||||||
data, err := json.Marshal(v)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return bytes.NewReader(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMakeLink(t *testing.T) {
|
func TestMakeLink(t *testing.T) {
|
||||||
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
|
|
||||||
@@ -265,699 +249,20 @@ func TestGetSSHKey(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestApiRoutesAuthentication(t *testing.T) {
|
func TestAppUrl(t *testing.T) {
|
||||||
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
t.Run("no APP_URL does't change app url", func(t *testing.T) {
|
||||||
defer hub.Cleanup()
|
|
||||||
|
|
||||||
hub.StartHub()
|
|
||||||
|
|
||||||
// Create test user and get auth token
|
|
||||||
user, err := beszelTests.CreateUser(hub, "testuser@example.com", "password123")
|
|
||||||
require.NoError(t, err, "Failed to create test user")
|
|
||||||
|
|
||||||
adminUser, err := beszelTests.CreateRecord(hub, "users", map[string]any{
|
|
||||||
"email": "admin@example.com",
|
|
||||||
"password": "password123",
|
|
||||||
"role": "admin",
|
|
||||||
})
|
|
||||||
require.NoError(t, err, "Failed to create admin user")
|
|
||||||
adminUserToken, err := adminUser.NewAuthToken()
|
|
||||||
|
|
||||||
// superUser, err := beszelTests.CreateRecord(hub, core.CollectionNameSuperusers, map[string]any{
|
|
||||||
// "email": "superuser@example.com",
|
|
||||||
// "password": "password123",
|
|
||||||
// })
|
|
||||||
// require.NoError(t, err, "Failed to create superuser")
|
|
||||||
|
|
||||||
userToken, err := user.NewAuthToken()
|
|
||||||
require.NoError(t, err, "Failed to create auth token")
|
|
||||||
|
|
||||||
// Create test system for user-alerts endpoints
|
|
||||||
system, err := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
|
||||||
"name": "test-system",
|
|
||||||
"users": []string{user.Id},
|
|
||||||
"host": "127.0.0.1",
|
|
||||||
})
|
|
||||||
require.NoError(t, err, "Failed to create test system")
|
|
||||||
|
|
||||||
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
|
||||||
return hub.TestApp
|
|
||||||
}
|
|
||||||
|
|
||||||
scenarios := []beszelTests.ApiScenario{
|
|
||||||
// Auth Protected Routes - Should require authentication
|
|
||||||
{
|
|
||||||
Name: "POST /test-notification - no auth should fail",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/test-notification",
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"url": "generic://127.0.0.1",
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "POST /test-notification - with auth should succeed",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/test-notification",
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"url": "generic://127.0.0.1",
|
|
||||||
}),
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"sending message"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /config-yaml - no auth should fail",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/config-yaml",
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /config-yaml - with user auth should fail",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/config-yaml",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 403,
|
|
||||||
ExpectedContent: []string{"Requires admin"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /config-yaml - with admin auth should succeed",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/config-yaml",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": adminUserToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"test-system"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /heartbeat-status - no auth should fail",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/heartbeat-status",
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /heartbeat-status - with user auth should fail",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/heartbeat-status",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 403,
|
|
||||||
ExpectedContent: []string{"Requires admin role"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /heartbeat-status - with admin auth should succeed",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/heartbeat-status",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": adminUserToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{`"enabled":false`},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "POST /test-heartbeat - with user auth should fail",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/test-heartbeat",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 403,
|
|
||||||
ExpectedContent: []string{"Requires admin role"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "POST /test-heartbeat - with admin auth should report disabled state",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/test-heartbeat",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": adminUserToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"Heartbeat not configured"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /universal-token - no auth should fail",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/universal-token",
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /universal-token - with auth should succeed",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/universal-token",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"active", "token", "permanent"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /universal-token - enable permanent should succeed",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/universal-token?enable=1&permanent=1&token=permanent-token-123",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"\"permanent\":true", "permanent-token-123"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "POST /user-alerts - no auth should fail",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/user-alerts",
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"value": 80,
|
|
||||||
"min": 10,
|
|
||||||
"systems": []string{system.Id},
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "POST /user-alerts - with auth should succeed",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/user-alerts",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"\"success\":true"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"value": 80,
|
|
||||||
"min": 10,
|
|
||||||
"systems": []string{system.Id},
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "DELETE /user-alerts - no auth should fail",
|
|
||||||
Method: http.MethodDelete,
|
|
||||||
URL: "/api/beszel/user-alerts",
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"systems": []string{system.Id},
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "DELETE /user-alerts - with auth should succeed",
|
|
||||||
Method: http.MethodDelete,
|
|
||||||
URL: "/api/beszel/user-alerts",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"\"success\":true"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"systems": []string{system.Id},
|
|
||||||
}),
|
|
||||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
|
||||||
// Create an alert to delete
|
|
||||||
beszelTests.CreateRecord(app, "alerts", map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"system": system.Id,
|
|
||||||
"user": user.Id,
|
|
||||||
"value": 80,
|
|
||||||
"min": 10,
|
|
||||||
})
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /containers/logs - no auth should fail",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/containers/logs?system=test-system&container=test-container",
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /containers/logs - with auth but missing system param should fail",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/containers/logs?container=test-container",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 400,
|
|
||||||
ExpectedContent: []string{"system and container parameters are required"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /containers/logs - with auth but missing container param should fail",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/containers/logs?system=test-system",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 400,
|
|
||||||
ExpectedContent: []string{"system and container parameters are required"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /containers/logs - with auth but invalid system should fail",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/containers/logs?system=invalid-system&container=0123456789ab",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 404,
|
|
||||||
ExpectedContent: []string{"system not found"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /containers/logs - traversal container should fail validation",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/containers/logs?system=" + system.Id + "&container=..%2F..%2Fversion",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 400,
|
|
||||||
ExpectedContent: []string{"invalid container parameter"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /containers/info - traversal container should fail validation",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/containers/info?system=" + system.Id + "&container=../../version?x=",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 400,
|
|
||||||
ExpectedContent: []string{"invalid container parameter"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /containers/info - non-hex container should fail validation",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/containers/info?system=" + system.Id + "&container=container_name",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 400,
|
|
||||||
ExpectedContent: []string{"invalid container parameter"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
|
|
||||||
// Auth Optional Routes - Should work without authentication
|
|
||||||
{
|
|
||||||
Name: "GET /getkey - no auth should fail",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/getkey",
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /getkey - with auth should also succeed",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/getkey",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"\"key\":", "\"v\":"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /first-run - no auth should succeed",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/first-run",
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"\"firstRun\":false"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /first-run - with auth should also succeed",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/first-run",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": userToken,
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"\"firstRun\":false"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /agent-connect - no auth should succeed (websocket upgrade fails but route is accessible)",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/agent-connect",
|
|
||||||
ExpectedStatus: 400,
|
|
||||||
ExpectedContent: []string{},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "POST /test-notification - invalid auth token should fail",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/test-notification",
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"url": "generic://127.0.0.1",
|
|
||||||
}),
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": "invalid-token",
|
|
||||||
},
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "POST /user-alerts - invalid auth token should fail",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/user-alerts",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"Authorization": "invalid-token",
|
|
||||||
},
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"name": "CPU",
|
|
||||||
"value": 80,
|
|
||||||
"min": 10,
|
|
||||||
"systems": []string{system.Id},
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, scenario := range scenarios {
|
|
||||||
scenario.Test(t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFirstUserCreation(t *testing.T) {
|
|
||||||
t.Run("CreateUserEndpoint available when no users exist", func(t *testing.T) {
|
|
||||||
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
defer hub.Cleanup()
|
defer hub.Cleanup()
|
||||||
|
|
||||||
hub.StartHub()
|
settings := hub.Settings()
|
||||||
|
assert.Equal(t, "http://localhost:8090", settings.Meta.AppURL)
|
||||||
testAppFactoryExisting := func(t testing.TB) *pbTests.TestApp {
|
|
||||||
return hub.TestApp
|
|
||||||
}
|
|
||||||
|
|
||||||
scenarios := []beszelTests.ApiScenario{
|
|
||||||
{
|
|
||||||
Name: "POST /create-user - should be available when no users exist",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/create-user",
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"email": "firstuser@example.com",
|
|
||||||
"password": "password123",
|
|
||||||
}),
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"User created"},
|
|
||||||
TestAppFactory: testAppFactoryExisting,
|
|
||||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
|
||||||
userCount, err := hub.CountRecords("users")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Zero(t, userCount, "Should start with no users")
|
|
||||||
superusers, err := hub.FindAllRecords(core.CollectionNameSuperusers)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.EqualValues(t, 1, len(superusers), "Should start with one temporary superuser")
|
|
||||||
require.EqualValues(t, migrations.TempAdminEmail, superusers[0].GetString("email"), "Should have created one temporary superuser")
|
|
||||||
},
|
|
||||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
|
||||||
userCount, err := hub.CountRecords("users")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.EqualValues(t, 1, userCount, "Should have created one user")
|
|
||||||
superusers, err := hub.FindAllRecords(core.CollectionNameSuperusers)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.EqualValues(t, 1, len(superusers), "Should have created one superuser")
|
|
||||||
require.EqualValues(t, "firstuser@example.com", superusers[0].GetString("email"), "Should have created one superuser")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "POST /create-user - should not be available when users exist",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/create-user",
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"email": "firstuser@example.com",
|
|
||||||
"password": "password123",
|
|
||||||
}),
|
|
||||||
ExpectedStatus: 404,
|
|
||||||
ExpectedContent: []string{"wasn't found"},
|
|
||||||
TestAppFactory: testAppFactoryExisting,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, scenario := range scenarios {
|
|
||||||
scenario.Test(t)
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
|
t.Run("APP_URL changes app url", func(t *testing.T) {
|
||||||
t.Run("CreateUserEndpoint not available when USER_EMAIL, USER_PASSWORD are set", func(t *testing.T) {
|
t.Setenv("APP_URL", "http://example.com/app")
|
||||||
os.Setenv("BESZEL_HUB_USER_EMAIL", "me@example.com")
|
|
||||||
os.Setenv("BESZEL_HUB_USER_PASSWORD", "password123")
|
|
||||||
defer os.Unsetenv("BESZEL_HUB_USER_EMAIL")
|
|
||||||
defer os.Unsetenv("BESZEL_HUB_USER_PASSWORD")
|
|
||||||
|
|
||||||
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
||||||
defer hub.Cleanup()
|
defer hub.Cleanup()
|
||||||
|
|
||||||
hub.StartHub()
|
settings := hub.Settings()
|
||||||
|
assert.Equal(t, "http://example.com/app", settings.Meta.AppURL)
|
||||||
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
|
||||||
return hub.TestApp
|
|
||||||
}
|
|
||||||
|
|
||||||
scenario := beszelTests.ApiScenario{
|
|
||||||
Name: "POST /create-user - should not be available when USER_EMAIL, USER_PASSWORD are set",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/create-user",
|
|
||||||
ExpectedStatus: 404,
|
|
||||||
ExpectedContent: []string{"wasn't found"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
|
||||||
users, err := hub.FindAllRecords("users")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.EqualValues(t, 1, len(users), "Should start with one user")
|
|
||||||
require.EqualValues(t, "me@example.com", users[0].GetString("email"), "Should have created one user")
|
|
||||||
superusers, err := hub.FindAllRecords(core.CollectionNameSuperusers)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.EqualValues(t, 1, len(superusers), "Should start with one superuser")
|
|
||||||
require.EqualValues(t, "me@example.com", superusers[0].GetString("email"), "Should have created one superuser")
|
|
||||||
},
|
|
||||||
AfterTestFunc: func(t testing.TB, app *pbTests.TestApp, res *http.Response) {
|
|
||||||
users, err := hub.FindAllRecords("users")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.EqualValues(t, 1, len(users), "Should still have one user")
|
|
||||||
require.EqualValues(t, "me@example.com", users[0].GetString("email"), "Should have created one user")
|
|
||||||
superusers, err := hub.FindAllRecords(core.CollectionNameSuperusers)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.EqualValues(t, 1, len(superusers), "Should still have one superuser")
|
|
||||||
require.EqualValues(t, "me@example.com", superusers[0].GetString("email"), "Should have created one superuser")
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
scenario.Test(t)
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCreateUserEndpointAvailability(t *testing.T) {
|
|
||||||
t.Run("CreateUserEndpoint available when no users exist", func(t *testing.T) {
|
|
||||||
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
|
||||||
defer hub.Cleanup()
|
|
||||||
|
|
||||||
// Ensure no users exist
|
|
||||||
userCount, err := hub.CountRecords("users")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Zero(t, userCount, "Should start with no users")
|
|
||||||
|
|
||||||
hub.StartHub()
|
|
||||||
|
|
||||||
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
|
||||||
return hub.TestApp
|
|
||||||
}
|
|
||||||
|
|
||||||
scenario := beszelTests.ApiScenario{
|
|
||||||
Name: "POST /create-user - should be available when no users exist",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/create-user",
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"email": "firstuser@example.com",
|
|
||||||
"password": "password123",
|
|
||||||
}),
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"User created"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
}
|
|
||||||
|
|
||||||
scenario.Test(t)
|
|
||||||
|
|
||||||
// Verify user was created
|
|
||||||
userCount, err = hub.CountRecords("users")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.EqualValues(t, 1, userCount, "Should have created one user")
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("CreateUserEndpoint not available when users exist", func(t *testing.T) {
|
|
||||||
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
|
||||||
defer hub.Cleanup()
|
|
||||||
|
|
||||||
// Create a user first
|
|
||||||
_, err := beszelTests.CreateUser(hub, "existing@example.com", "password")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
hub.StartHub()
|
|
||||||
|
|
||||||
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
|
||||||
return hub.TestApp
|
|
||||||
}
|
|
||||||
|
|
||||||
scenario := beszelTests.ApiScenario{
|
|
||||||
Name: "POST /create-user - should not be available when users exist",
|
|
||||||
Method: http.MethodPost,
|
|
||||||
URL: "/api/beszel/create-user",
|
|
||||||
Body: jsonReader(map[string]any{
|
|
||||||
"email": "another@example.com",
|
|
||||||
"password": "password123",
|
|
||||||
}),
|
|
||||||
ExpectedStatus: 404,
|
|
||||||
ExpectedContent: []string{"wasn't found"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
}
|
|
||||||
|
|
||||||
scenario.Test(t)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAutoLoginMiddleware(t *testing.T) {
|
|
||||||
var hubs []*beszelTests.TestHub
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
defer os.Unsetenv("AUTO_LOGIN")
|
|
||||||
for _, hub := range hubs {
|
|
||||||
hub.Cleanup()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
os.Setenv("AUTO_LOGIN", "user@test.com")
|
|
||||||
|
|
||||||
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
|
||||||
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
|
||||||
hubs = append(hubs, hub)
|
|
||||||
hub.StartHub()
|
|
||||||
return hub.TestApp
|
|
||||||
}
|
|
||||||
|
|
||||||
scenarios := []beszelTests.ApiScenario{
|
|
||||||
{
|
|
||||||
Name: "GET /getkey - without auto login should fail",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/getkey",
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /getkey - with auto login should fail if no matching user",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/getkey",
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /getkey - with auto login should succeed",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/getkey",
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"\"key\":", "\"v\":"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
|
||||||
beszelTests.CreateUser(app, "user@test.com", "password123")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, scenario := range scenarios {
|
|
||||||
scenario.Test(t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTrustedHeaderMiddleware(t *testing.T) {
|
|
||||||
var hubs []*beszelTests.TestHub
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
defer os.Unsetenv("TRUSTED_AUTH_HEADER")
|
|
||||||
for _, hub := range hubs {
|
|
||||||
hub.Cleanup()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
os.Setenv("TRUSTED_AUTH_HEADER", "X-Beszel-Trusted")
|
|
||||||
|
|
||||||
testAppFactory := func(t testing.TB) *pbTests.TestApp {
|
|
||||||
hub, _ := beszelTests.NewTestHub(t.TempDir())
|
|
||||||
hubs = append(hubs, hub)
|
|
||||||
hub.StartHub()
|
|
||||||
return hub.TestApp
|
|
||||||
}
|
|
||||||
|
|
||||||
scenarios := []beszelTests.ApiScenario{
|
|
||||||
{
|
|
||||||
Name: "GET /getkey - without trusted header should fail",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/getkey",
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /getkey - with trusted header should fail if no matching user",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/getkey",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"X-Beszel-Trusted": "user@test.com",
|
|
||||||
},
|
|
||||||
ExpectedStatus: 401,
|
|
||||||
ExpectedContent: []string{"requires valid"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "GET /getkey - with trusted header should succeed",
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: "/api/beszel/getkey",
|
|
||||||
Headers: map[string]string{
|
|
||||||
"X-Beszel-Trusted": "user@test.com",
|
|
||||||
},
|
|
||||||
ExpectedStatus: 200,
|
|
||||||
ExpectedContent: []string{"\"key\":", "\"v\":"},
|
|
||||||
TestAppFactory: testAppFactory,
|
|
||||||
BeforeTestFunc: func(t testing.TB, app *pbTests.TestApp, e *core.ServeEvent) {
|
|
||||||
beszelTests.CreateUser(app, "user@test.com", "password123")
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, scenario := range scenarios {
|
|
||||||
scenario.Test(t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -2,7 +2,9 @@
|
|||||||
|
|
||||||
package hub
|
package hub
|
||||||
|
|
||||||
import "github.com/henrygd/beszel/internal/hub/systems"
|
import (
|
||||||
|
"github.com/henrygd/beszel/internal/hub/systems"
|
||||||
|
)
|
||||||
|
|
||||||
// TESTING ONLY: GetSystemManager returns the system manager
|
// TESTING ONLY: GetSystemManager returns the system manager
|
||||||
func (h *Hub) GetSystemManager() *systems.SystemManager {
|
func (h *Hub) GetSystemManager() *systems.SystemManager {
|
||||||
@@ -18,3 +20,7 @@ func (h *Hub) GetPubkey() string {
|
|||||||
func (h *Hub) SetPubkey(pubkey string) {
|
func (h *Hub) SetPubkey(pubkey string) {
|
||||||
h.pubKey = pubkey
|
h.pubKey = pubkey
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (h *Hub) SetCollectionAuthSettings() error {
|
||||||
|
return setCollectionAuthSettings(h)
|
||||||
|
}
|
||||||
|
|||||||
@@ -133,16 +133,31 @@ func (sys *System) update() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ensure deprecated fields from older agents are migrated to current fields
|
||||||
|
migrateDeprecatedFields(data, !sys.detailsFetched.Load())
|
||||||
|
|
||||||
// create system records
|
// create system records
|
||||||
_, err = sys.createRecords(data)
|
_, err = sys.createRecords(data)
|
||||||
|
|
||||||
|
// if details were included and fetched successfully, mark details as fetched and update smart interval if set by agent
|
||||||
|
if err == nil && data.Details != nil {
|
||||||
|
sys.detailsFetched.Store(true)
|
||||||
|
// update smart interval if it's set on the agent side
|
||||||
|
if data.Details.SmartInterval > 0 {
|
||||||
|
sys.smartInterval = data.Details.SmartInterval
|
||||||
|
// make sure we reset expiration of lastFetch to remain as long as the new smart interval
|
||||||
|
// to prevent premature expiration leading to new fetch if interval is different.
|
||||||
|
sys.manager.smartFetchMap.UpdateExpiration(sys.Id, sys.smartInterval+time.Minute)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Fetch and save SMART devices when system first comes online or at intervals
|
// Fetch and save SMART devices when system first comes online or at intervals
|
||||||
if backgroundSmartFetchEnabled() {
|
if backgroundSmartFetchEnabled() && sys.detailsFetched.Load() {
|
||||||
if sys.smartInterval <= 0 {
|
if sys.smartInterval <= 0 {
|
||||||
sys.smartInterval = time.Hour
|
sys.smartInterval = time.Hour
|
||||||
}
|
}
|
||||||
lastFetch, _ := sys.manager.smartFetchMap.GetOk(sys.Id)
|
lastFetch, _ := sys.manager.smartFetchMap.GetOk(sys.Id)
|
||||||
if time.Since(time.UnixMilli(lastFetch)) >= sys.smartInterval && sys.smartFetching.CompareAndSwap(false, true) {
|
if time.Since(time.UnixMilli(lastFetch-1e4)) >= sys.smartInterval && sys.smartFetching.CompareAndSwap(false, true) {
|
||||||
go func() {
|
go func() {
|
||||||
defer sys.smartFetching.Store(false)
|
defer sys.smartFetching.Store(false)
|
||||||
sys.manager.smartFetchMap.Set(sys.Id, time.Now().UnixMilli(), sys.smartInterval+time.Minute)
|
sys.manager.smartFetchMap.Set(sys.Id, time.Now().UnixMilli(), sys.smartInterval+time.Minute)
|
||||||
@@ -220,11 +235,6 @@ func (sys *System) createRecords(data *system.CombinedData) (*core.Record, error
|
|||||||
if err := createSystemDetailsRecord(txApp, data.Details, sys.Id); err != nil {
|
if err := createSystemDetailsRecord(txApp, data.Details, sys.Id); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
sys.detailsFetched.Store(true)
|
|
||||||
// update smart interval if it's set on the agent side
|
|
||||||
if data.Details.SmartInterval > 0 {
|
|
||||||
sys.smartInterval = data.Details.SmartInterval
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// update system record (do this last because it triggers alerts and we need above records to be inserted first)
|
// update system record (do this last because it triggers alerts and we need above records to be inserted first)
|
||||||
@@ -308,10 +318,11 @@ func createContainerRecords(app core.App, data []*container.Stats, systemId stri
|
|||||||
valueStrings := make([]string, 0, len(data))
|
valueStrings := make([]string, 0, len(data))
|
||||||
for i, container := range data {
|
for i, container := range data {
|
||||||
suffix := fmt.Sprintf("%d", i)
|
suffix := fmt.Sprintf("%d", i)
|
||||||
valueStrings = append(valueStrings, fmt.Sprintf("({:id%[1]s}, {:system}, {:name%[1]s}, {:image%[1]s}, {:status%[1]s}, {:health%[1]s}, {:cpu%[1]s}, {:memory%[1]s}, {:net%[1]s}, {:updated})", suffix))
|
valueStrings = append(valueStrings, fmt.Sprintf("({:id%[1]s}, {:system}, {:name%[1]s}, {:image%[1]s}, {:ports%[1]s}, {:status%[1]s}, {:health%[1]s}, {:cpu%[1]s}, {:memory%[1]s}, {:net%[1]s}, {:updated})", suffix))
|
||||||
params["id"+suffix] = container.Id
|
params["id"+suffix] = container.Id
|
||||||
params["name"+suffix] = container.Name
|
params["name"+suffix] = container.Name
|
||||||
params["image"+suffix] = container.Image
|
params["image"+suffix] = container.Image
|
||||||
|
params["ports"+suffix] = container.Ports
|
||||||
params["status"+suffix] = container.Status
|
params["status"+suffix] = container.Status
|
||||||
params["health"+suffix] = container.Health
|
params["health"+suffix] = container.Health
|
||||||
params["cpu"+suffix] = container.Cpu
|
params["cpu"+suffix] = container.Cpu
|
||||||
@@ -323,7 +334,7 @@ func createContainerRecords(app core.App, data []*container.Stats, systemId stri
|
|||||||
params["net"+suffix] = netBytes
|
params["net"+suffix] = netBytes
|
||||||
}
|
}
|
||||||
queryString := fmt.Sprintf(
|
queryString := fmt.Sprintf(
|
||||||
"INSERT INTO containers (id, system, name, image, status, health, cpu, memory, net, updated) VALUES %s ON CONFLICT(id) DO UPDATE SET system = excluded.system, name = excluded.name, image = excluded.image, status = excluded.status, health = excluded.health, cpu = excluded.cpu, memory = excluded.memory, net = excluded.net, updated = excluded.updated",
|
"INSERT INTO containers (id, system, name, image, ports, status, health, cpu, memory, net, updated) VALUES %s ON CONFLICT(id) DO UPDATE SET system = excluded.system, name = excluded.name, image = excluded.image, ports = excluded.ports, status = excluded.status, health = excluded.health, cpu = excluded.cpu, memory = excluded.memory, net = excluded.net, updated = excluded.updated",
|
||||||
strings.Join(valueStrings, ","),
|
strings.Join(valueStrings, ","),
|
||||||
)
|
)
|
||||||
_, err := app.DB().NewQuery(queryString).Bind(params).Execute()
|
_, err := app.DB().NewQuery(queryString).Bind(params).Execute()
|
||||||
@@ -702,3 +713,50 @@ func getJitter() <-chan time.Time {
|
|||||||
msDelay := (interval * minPercent / 100) + rand.Intn(interval*jitterRange/100)
|
msDelay := (interval * minPercent / 100) + rand.Intn(interval*jitterRange/100)
|
||||||
return time.After(time.Duration(msDelay) * time.Millisecond)
|
return time.After(time.Duration(msDelay) * time.Millisecond)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// migrateDeprecatedFields moves values from deprecated fields to their new locations if the new
|
||||||
|
// fields are not already populated. Deprecated fields and refs may be removed at least 30 days
|
||||||
|
// and one minor version release after the release that includes the migration.
|
||||||
|
//
|
||||||
|
// This is run when processing incoming system data from agents, which may be on older versions.
|
||||||
|
func migrateDeprecatedFields(cd *system.CombinedData, createDetails bool) {
|
||||||
|
// migration added 0.19.0
|
||||||
|
if cd.Stats.Bandwidth[0] == 0 && cd.Stats.Bandwidth[1] == 0 {
|
||||||
|
cd.Stats.Bandwidth[0] = uint64(cd.Stats.NetworkSent * 1024 * 1024)
|
||||||
|
cd.Stats.Bandwidth[1] = uint64(cd.Stats.NetworkRecv * 1024 * 1024)
|
||||||
|
cd.Stats.NetworkSent, cd.Stats.NetworkRecv = 0, 0
|
||||||
|
}
|
||||||
|
// migration added 0.19.0
|
||||||
|
if cd.Info.BandwidthBytes == 0 {
|
||||||
|
cd.Info.BandwidthBytes = uint64(cd.Info.Bandwidth * 1024 * 1024)
|
||||||
|
cd.Info.Bandwidth = 0
|
||||||
|
}
|
||||||
|
// migration added 0.19.0
|
||||||
|
if cd.Stats.DiskIO[0] == 0 && cd.Stats.DiskIO[1] == 0 {
|
||||||
|
cd.Stats.DiskIO[0] = uint64(cd.Stats.DiskReadPs * 1024 * 1024)
|
||||||
|
cd.Stats.DiskIO[1] = uint64(cd.Stats.DiskWritePs * 1024 * 1024)
|
||||||
|
cd.Stats.DiskReadPs, cd.Stats.DiskWritePs = 0, 0
|
||||||
|
}
|
||||||
|
// migration added 0.19.0 - Move deprecated Info fields to Details struct
|
||||||
|
if cd.Details == nil && cd.Info.Hostname != "" {
|
||||||
|
if createDetails {
|
||||||
|
cd.Details = &system.Details{
|
||||||
|
Hostname: cd.Info.Hostname,
|
||||||
|
Kernel: cd.Info.KernelVersion,
|
||||||
|
Cores: cd.Info.Cores,
|
||||||
|
Threads: cd.Info.Threads,
|
||||||
|
CpuModel: cd.Info.CpuModel,
|
||||||
|
Podman: cd.Info.Podman,
|
||||||
|
Os: cd.Info.Os,
|
||||||
|
MemoryTotal: uint64(cd.Stats.Mem * 1024 * 1024 * 1024),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// zero the deprecated fields to prevent saving them in systems.info DB json payload
|
||||||
|
cd.Info.Hostname = ""
|
||||||
|
cd.Info.KernelVersion = ""
|
||||||
|
cd.Info.Cores = 0
|
||||||
|
cd.Info.CpuModel = ""
|
||||||
|
cd.Info.Podman = false
|
||||||
|
cd.Info.Os = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -19,7 +19,6 @@ type subscriptionInfo struct {
|
|||||||
var (
|
var (
|
||||||
activeSubscriptions = make(map[string]*subscriptionInfo)
|
activeSubscriptions = make(map[string]*subscriptionInfo)
|
||||||
workerRunning bool
|
workerRunning bool
|
||||||
realtimeTicker *time.Ticker
|
|
||||||
tickerStopChan chan struct{}
|
tickerStopChan chan struct{}
|
||||||
realtimeMutex sync.Mutex
|
realtimeMutex sync.Mutex
|
||||||
)
|
)
|
||||||
@@ -70,7 +69,7 @@ func (sm *SystemManager) onRealtimeSubscribeRequest(e *core.RealtimeSubscribeReq
|
|||||||
}
|
}
|
||||||
|
|
||||||
// onRealtimeSubscriptionAdded initializes or starts the realtime worker when the first subscription is added.
|
// onRealtimeSubscriptionAdded initializes or starts the realtime worker when the first subscription is added.
|
||||||
// It ensures only one worker runs at a time and creates the ticker for periodic data fetching.
|
// It ensures only one worker runs at a time.
|
||||||
func (sm *SystemManager) onRealtimeSubscriptionAdded() {
|
func (sm *SystemManager) onRealtimeSubscriptionAdded() {
|
||||||
realtimeMutex.Lock()
|
realtimeMutex.Lock()
|
||||||
defer realtimeMutex.Unlock()
|
defer realtimeMutex.Unlock()
|
||||||
@@ -82,11 +81,6 @@ func (sm *SystemManager) onRealtimeSubscriptionAdded() {
|
|||||||
tickerStopChan = make(chan struct{})
|
tickerStopChan = make(chan struct{})
|
||||||
go sm.startRealtimeWorker()
|
go sm.startRealtimeWorker()
|
||||||
}
|
}
|
||||||
|
|
||||||
// If no ticker exists, create one
|
|
||||||
if realtimeTicker == nil {
|
|
||||||
realtimeTicker = time.NewTicker(1 * time.Second)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkSubscriptions stops the realtime worker when there are no active subscriptions.
|
// checkSubscriptions stops the realtime worker when there are no active subscriptions.
|
||||||
@@ -107,11 +101,6 @@ func (sm *SystemManager) checkSubscriptions() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if realtimeTicker != nil {
|
|
||||||
realtimeTicker.Stop()
|
|
||||||
realtimeTicker = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mark worker as stopped (will be reset when next subscription comes in)
|
// Mark worker as stopped (will be reset when next subscription comes in)
|
||||||
workerRunning = false
|
workerRunning = false
|
||||||
}
|
}
|
||||||
@@ -135,17 +124,16 @@ func (sm *SystemManager) removeRealtimeSubscription(subscription string, options
|
|||||||
// It continuously fetches system data and broadcasts it to subscribed clients via WebSocket.
|
// It continuously fetches system data and broadcasts it to subscribed clients via WebSocket.
|
||||||
func (sm *SystemManager) startRealtimeWorker() {
|
func (sm *SystemManager) startRealtimeWorker() {
|
||||||
sm.fetchRealtimeDataAndNotify()
|
sm.fetchRealtimeDataAndNotify()
|
||||||
|
tick := time.Tick(1 * time.Second)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-tickerStopChan:
|
case <-tickerStopChan:
|
||||||
return
|
return
|
||||||
case <-realtimeTicker.C:
|
case <-tick:
|
||||||
// Check if ticker is still valid (might have been stopped)
|
if len(activeSubscriptions) == 0 {
|
||||||
if realtimeTicker == nil || len(activeSubscriptions) == 0 {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// slog.Debug("activeSubscriptions", "count", len(activeSubscriptions))
|
|
||||||
sm.fetchRealtimeDataAndNotify()
|
sm.fetchRealtimeDataAndNotify()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
159
internal/hub/systems/system_test.go
Normal file
159
internal/hub/systems/system_test.go
Normal file
@@ -0,0 +1,159 @@
|
|||||||
|
//go:build testing
|
||||||
|
|
||||||
|
package systems
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCombinedData_MigrateDeprecatedFields(t *testing.T) {
|
||||||
|
t.Run("Migrate NetworkSent and NetworkRecv to Bandwidth", func(t *testing.T) {
|
||||||
|
cd := &system.CombinedData{
|
||||||
|
Stats: system.Stats{
|
||||||
|
NetworkSent: 1.5, // 1.5 MB
|
||||||
|
NetworkRecv: 2.5, // 2.5 MB
|
||||||
|
},
|
||||||
|
}
|
||||||
|
migrateDeprecatedFields(cd, true)
|
||||||
|
|
||||||
|
expectedSent := uint64(1.5 * 1024 * 1024)
|
||||||
|
expectedRecv := uint64(2.5 * 1024 * 1024)
|
||||||
|
|
||||||
|
if cd.Stats.Bandwidth[0] != expectedSent {
|
||||||
|
t.Errorf("expected Bandwidth[0] %d, got %d", expectedSent, cd.Stats.Bandwidth[0])
|
||||||
|
}
|
||||||
|
if cd.Stats.Bandwidth[1] != expectedRecv {
|
||||||
|
t.Errorf("expected Bandwidth[1] %d, got %d", expectedRecv, cd.Stats.Bandwidth[1])
|
||||||
|
}
|
||||||
|
if cd.Stats.NetworkSent != 0 || cd.Stats.NetworkRecv != 0 {
|
||||||
|
t.Errorf("expected NetworkSent and NetworkRecv to be reset, got %f, %f", cd.Stats.NetworkSent, cd.Stats.NetworkRecv)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Migrate Info.Bandwidth to Info.BandwidthBytes", func(t *testing.T) {
|
||||||
|
cd := &system.CombinedData{
|
||||||
|
Info: system.Info{
|
||||||
|
Bandwidth: 10.0, // 10 MB
|
||||||
|
},
|
||||||
|
}
|
||||||
|
migrateDeprecatedFields(cd, true)
|
||||||
|
|
||||||
|
expected := uint64(10 * 1024 * 1024)
|
||||||
|
if cd.Info.BandwidthBytes != expected {
|
||||||
|
t.Errorf("expected BandwidthBytes %d, got %d", expected, cd.Info.BandwidthBytes)
|
||||||
|
}
|
||||||
|
if cd.Info.Bandwidth != 0 {
|
||||||
|
t.Errorf("expected Info.Bandwidth to be reset, got %f", cd.Info.Bandwidth)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Migrate DiskReadPs and DiskWritePs to DiskIO", func(t *testing.T) {
|
||||||
|
cd := &system.CombinedData{
|
||||||
|
Stats: system.Stats{
|
||||||
|
DiskReadPs: 3.0, // 3 MB
|
||||||
|
DiskWritePs: 4.0, // 4 MB
|
||||||
|
},
|
||||||
|
}
|
||||||
|
migrateDeprecatedFields(cd, true)
|
||||||
|
|
||||||
|
expectedRead := uint64(3 * 1024 * 1024)
|
||||||
|
expectedWrite := uint64(4 * 1024 * 1024)
|
||||||
|
|
||||||
|
if cd.Stats.DiskIO[0] != expectedRead {
|
||||||
|
t.Errorf("expected DiskIO[0] %d, got %d", expectedRead, cd.Stats.DiskIO[0])
|
||||||
|
}
|
||||||
|
if cd.Stats.DiskIO[1] != expectedWrite {
|
||||||
|
t.Errorf("expected DiskIO[1] %d, got %d", expectedWrite, cd.Stats.DiskIO[1])
|
||||||
|
}
|
||||||
|
if cd.Stats.DiskReadPs != 0 || cd.Stats.DiskWritePs != 0 {
|
||||||
|
t.Errorf("expected DiskReadPs and DiskWritePs to be reset, got %f, %f", cd.Stats.DiskReadPs, cd.Stats.DiskWritePs)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Migrate Info fields to Details struct", func(t *testing.T) {
|
||||||
|
cd := &system.CombinedData{
|
||||||
|
Stats: system.Stats{
|
||||||
|
Mem: 16.0, // 16 GB
|
||||||
|
},
|
||||||
|
Info: system.Info{
|
||||||
|
Hostname: "test-host",
|
||||||
|
KernelVersion: "6.8.0",
|
||||||
|
Cores: 8,
|
||||||
|
Threads: 16,
|
||||||
|
CpuModel: "Intel i7",
|
||||||
|
Podman: true,
|
||||||
|
Os: system.Linux,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
migrateDeprecatedFields(cd, true)
|
||||||
|
|
||||||
|
if cd.Details == nil {
|
||||||
|
t.Fatal("expected Details struct to be created")
|
||||||
|
}
|
||||||
|
if cd.Details.Hostname != "test-host" {
|
||||||
|
t.Errorf("expected Hostname 'test-host', got '%s'", cd.Details.Hostname)
|
||||||
|
}
|
||||||
|
if cd.Details.Kernel != "6.8.0" {
|
||||||
|
t.Errorf("expected Kernel '6.8.0', got '%s'", cd.Details.Kernel)
|
||||||
|
}
|
||||||
|
if cd.Details.Cores != 8 {
|
||||||
|
t.Errorf("expected Cores 8, got %d", cd.Details.Cores)
|
||||||
|
}
|
||||||
|
if cd.Details.Threads != 16 {
|
||||||
|
t.Errorf("expected Threads 16, got %d", cd.Details.Threads)
|
||||||
|
}
|
||||||
|
if cd.Details.CpuModel != "Intel i7" {
|
||||||
|
t.Errorf("expected CpuModel 'Intel i7', got '%s'", cd.Details.CpuModel)
|
||||||
|
}
|
||||||
|
if cd.Details.Podman != true {
|
||||||
|
t.Errorf("expected Podman true, got %v", cd.Details.Podman)
|
||||||
|
}
|
||||||
|
if cd.Details.Os != system.Linux {
|
||||||
|
t.Errorf("expected Os Linux, got %d", cd.Details.Os)
|
||||||
|
}
|
||||||
|
expectedMem := uint64(16 * 1024 * 1024 * 1024)
|
||||||
|
if cd.Details.MemoryTotal != expectedMem {
|
||||||
|
t.Errorf("expected MemoryTotal %d, got %d", expectedMem, cd.Details.MemoryTotal)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cd.Info.Hostname != "" || cd.Info.KernelVersion != "" || cd.Info.Cores != 0 || cd.Info.CpuModel != "" || cd.Info.Podman != false || cd.Info.Os != 0 {
|
||||||
|
t.Errorf("expected Info fields to be reset, got %+v", cd.Info)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Do not migrate if Details already exists", func(t *testing.T) {
|
||||||
|
cd := &system.CombinedData{
|
||||||
|
Details: &system.Details{Hostname: "existing-host"},
|
||||||
|
Info: system.Info{
|
||||||
|
Hostname: "deprecated-host",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
migrateDeprecatedFields(cd, true)
|
||||||
|
|
||||||
|
if cd.Details.Hostname != "existing-host" {
|
||||||
|
t.Errorf("expected Hostname 'existing-host', got '%s'", cd.Details.Hostname)
|
||||||
|
}
|
||||||
|
if cd.Info.Hostname != "deprecated-host" {
|
||||||
|
t.Errorf("expected Info.Hostname to remain 'deprecated-host', got '%s'", cd.Info.Hostname)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("Do not create details if migrateDetails is false", func(t *testing.T) {
|
||||||
|
cd := &system.CombinedData{
|
||||||
|
Info: system.Info{
|
||||||
|
Hostname: "deprecated-host",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
migrateDeprecatedFields(cd, false)
|
||||||
|
|
||||||
|
if cd.Details != nil {
|
||||||
|
t.Fatal("expected Details struct to not be created")
|
||||||
|
}
|
||||||
|
|
||||||
|
if cd.Info.Hostname != "" {
|
||||||
|
t.Errorf("expected Info.Hostname to be reset, got '%s'", cd.Info.Hostname)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
@@ -7,6 +7,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
entities "github.com/henrygd/beszel/internal/entities/system"
|
entities "github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
"github.com/pocketbase/pocketbase/core"
|
||||||
)
|
)
|
||||||
|
|
||||||
// The hub integration tests create/replace systems and cleanup the test apps quickly.
|
// The hub integration tests create/replace systems and cleanup the test apps quickly.
|
||||||
@@ -115,3 +116,12 @@ func (sm *SystemManager) RemoveAllSystems() {
|
|||||||
}
|
}
|
||||||
sm.smartFetchMap.StopCleaner()
|
sm.smartFetchMap.StopCleaner()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *System) StopUpdater() {
|
||||||
|
s.cancel()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *System) CreateRecords(data *entities.CombinedData) (*core.Record, error) {
|
||||||
|
s.data = data
|
||||||
|
return s.createRecords(data)
|
||||||
|
}
|
||||||
|
|||||||
@@ -111,6 +111,9 @@ func (ws *WsConn) Close(msg []byte) {
|
|||||||
|
|
||||||
// Ping sends a ping frame to keep the connection alive.
|
// Ping sends a ping frame to keep the connection alive.
|
||||||
func (ws *WsConn) Ping() error {
|
func (ws *WsConn) Ping() error {
|
||||||
|
if ws.conn == nil {
|
||||||
|
return gws.ErrConnClosed
|
||||||
|
}
|
||||||
ws.conn.SetDeadline(time.Now().Add(deadline))
|
ws.conn.SetDeadline(time.Now().Add(deadline))
|
||||||
return ws.conn.WritePing(nil)
|
return ws.conn.WritePing(nil)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,11 +11,11 @@ func init() {
|
|||||||
jsonData := `[
|
jsonData := `[
|
||||||
{
|
{
|
||||||
"id": "elngm8x1l60zi2v",
|
"id": "elngm8x1l60zi2v",
|
||||||
"listRule": "@request.auth.id != \"\" && user.id = @request.auth.id",
|
"listRule": "@request.auth.id != \"\" && user = @request.auth.id",
|
||||||
"viewRule": "",
|
"viewRule": null,
|
||||||
"createRule": "@request.auth.id != \"\" && user.id = @request.auth.id",
|
"createRule": "@request.auth.id != \"\" && user = @request.auth.id",
|
||||||
"updateRule": "@request.auth.id != \"\" && user.id = @request.auth.id",
|
"updateRule": "@request.auth.id != \"\" && user = @request.auth.id",
|
||||||
"deleteRule": "@request.auth.id != \"\" && user.id = @request.auth.id",
|
"deleteRule": "@request.auth.id != \"\" && user = @request.auth.id",
|
||||||
"name": "alerts",
|
"name": "alerts",
|
||||||
"type": "base",
|
"type": "base",
|
||||||
"fields": [
|
"fields": [
|
||||||
@@ -143,11 +143,11 @@ func init() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "pbc_1697146157",
|
"id": "pbc_1697146157",
|
||||||
"listRule": "@request.auth.id != \"\" && user.id = @request.auth.id",
|
"listRule": "@request.auth.id != \"\" && user = @request.auth.id",
|
||||||
"viewRule": "@request.auth.id != \"\" && user.id = @request.auth.id",
|
"viewRule": null,
|
||||||
"createRule": null,
|
"createRule": null,
|
||||||
"updateRule": null,
|
"updateRule": null,
|
||||||
"deleteRule": "@request.auth.id != \"\" && user.id = @request.auth.id",
|
"deleteRule": "@request.auth.id != \"\" && user = @request.auth.id",
|
||||||
"name": "alerts_history",
|
"name": "alerts_history",
|
||||||
"type": "base",
|
"type": "base",
|
||||||
"fields": [
|
"fields": [
|
||||||
@@ -261,7 +261,7 @@ func init() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "juohu4jipgc13v7",
|
"id": "juohu4jipgc13v7",
|
||||||
"listRule": "@request.auth.id != \"\"",
|
"listRule": null,
|
||||||
"viewRule": null,
|
"viewRule": null,
|
||||||
"createRule": null,
|
"createRule": null,
|
||||||
"updateRule": null,
|
"updateRule": null,
|
||||||
@@ -351,10 +351,10 @@ func init() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "pbc_3663931638",
|
"id": "pbc_3663931638",
|
||||||
"listRule": "@request.auth.id != \"\" && system.users.id ?= @request.auth.id",
|
"listRule": null,
|
||||||
"viewRule": "@request.auth.id != \"\" && system.users.id ?= @request.auth.id",
|
"viewRule": null,
|
||||||
"createRule": "@request.auth.id != \"\" && system.users.id ?= @request.auth.id && @request.auth.role != \"readonly\"",
|
"createRule": null,
|
||||||
"updateRule": "@request.auth.id != \"\" && system.users.id ?= @request.auth.id && @request.auth.role != \"readonly\"",
|
"updateRule": null,
|
||||||
"deleteRule": null,
|
"deleteRule": null,
|
||||||
"name": "fingerprints",
|
"name": "fingerprints",
|
||||||
"type": "base",
|
"type": "base",
|
||||||
@@ -433,7 +433,7 @@ func init() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "ej9oowivz8b2mht",
|
"id": "ej9oowivz8b2mht",
|
||||||
"listRule": "@request.auth.id != \"\"",
|
"listRule": null,
|
||||||
"viewRule": null,
|
"viewRule": null,
|
||||||
"createRule": null,
|
"createRule": null,
|
||||||
"updateRule": null,
|
"updateRule": null,
|
||||||
@@ -523,10 +523,10 @@ func init() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "4afacsdnlu8q8r2",
|
"id": "4afacsdnlu8q8r2",
|
||||||
"listRule": "@request.auth.id != \"\" && user.id = @request.auth.id",
|
"listRule": "@request.auth.id != \"\" && user = @request.auth.id",
|
||||||
"viewRule": null,
|
"viewRule": null,
|
||||||
"createRule": "@request.auth.id != \"\" && user.id = @request.auth.id",
|
"createRule": "@request.auth.id != \"\" && user = @request.auth.id",
|
||||||
"updateRule": "@request.auth.id != \"\" && user.id = @request.auth.id",
|
"updateRule": "@request.auth.id != \"\" && user = @request.auth.id",
|
||||||
"deleteRule": null,
|
"deleteRule": null,
|
||||||
"name": "user_settings",
|
"name": "user_settings",
|
||||||
"type": "base",
|
"type": "base",
|
||||||
@@ -596,11 +596,11 @@ func init() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "2hz5ncl8tizk5nx",
|
"id": "2hz5ncl8tizk5nx",
|
||||||
"listRule": "@request.auth.id != \"\" && users.id ?= @request.auth.id",
|
"listRule": null,
|
||||||
"viewRule": "@request.auth.id != \"\" && users.id ?= @request.auth.id",
|
"viewRule": null,
|
||||||
"createRule": "@request.auth.id != \"\" && users.id ?= @request.auth.id && @request.auth.role != \"readonly\"",
|
"createRule": null,
|
||||||
"updateRule": "@request.auth.id != \"\" && users.id ?= @request.auth.id && @request.auth.role != \"readonly\"",
|
"updateRule": null,
|
||||||
"deleteRule": "@request.auth.id != \"\" && users.id ?= @request.auth.id && @request.auth.role != \"readonly\"",
|
"deleteRule": null,
|
||||||
"name": "systems",
|
"name": "systems",
|
||||||
"type": "base",
|
"type": "base",
|
||||||
"fields": [
|
"fields": [
|
||||||
@@ -866,7 +866,7 @@ func init() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "pbc_1864144027",
|
"id": "pbc_1864144027",
|
||||||
"listRule": "@request.auth.id != \"\" && system.users.id ?= @request.auth.id",
|
"listRule": null,
|
||||||
"viewRule": null,
|
"viewRule": null,
|
||||||
"createRule": null,
|
"createRule": null,
|
||||||
"updateRule": null,
|
"updateRule": null,
|
||||||
@@ -977,18 +977,6 @@ func init() {
|
|||||||
"system": false,
|
"system": false,
|
||||||
"type": "number"
|
"type": "number"
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"hidden": false,
|
|
||||||
"id": "number3332085495",
|
|
||||||
"max": null,
|
|
||||||
"min": null,
|
|
||||||
"name": "updated",
|
|
||||||
"onlyInt": true,
|
|
||||||
"presentable": false,
|
|
||||||
"required": true,
|
|
||||||
"system": false,
|
|
||||||
"type": "number"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"autogeneratePattern": "",
|
"autogeneratePattern": "",
|
||||||
"hidden": false,
|
"hidden": false,
|
||||||
@@ -1002,6 +990,32 @@ func init() {
|
|||||||
"required": false,
|
"required": false,
|
||||||
"system": false,
|
"system": false,
|
||||||
"type": "text"
|
"type": "text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"autogeneratePattern": "",
|
||||||
|
"hidden": false,
|
||||||
|
"id": "text2308952269",
|
||||||
|
"max": 0,
|
||||||
|
"min": 0,
|
||||||
|
"name": "ports",
|
||||||
|
"pattern": "",
|
||||||
|
"presentable": false,
|
||||||
|
"primaryKey": false,
|
||||||
|
"required": false,
|
||||||
|
"system": false,
|
||||||
|
"type": "text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hidden": false,
|
||||||
|
"id": "number3332085495",
|
||||||
|
"max": null,
|
||||||
|
"min": null,
|
||||||
|
"name": "updated",
|
||||||
|
"onlyInt": true,
|
||||||
|
"presentable": false,
|
||||||
|
"required": true,
|
||||||
|
"system": false,
|
||||||
|
"type": "number"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"indexes": [
|
"indexes": [
|
||||||
@@ -1145,7 +1159,7 @@ func init() {
|
|||||||
"CREATE INDEX ` + "`" + `idx_4Z7LuLNdQb` + "`" + ` ON ` + "`" + `systemd_services` + "`" + ` (` + "`" + `system` + "`" + `)",
|
"CREATE INDEX ` + "`" + `idx_4Z7LuLNdQb` + "`" + ` ON ` + "`" + `systemd_services` + "`" + ` (` + "`" + `system` + "`" + `)",
|
||||||
"CREATE INDEX ` + "`" + `idx_pBp1fF837e` + "`" + ` ON ` + "`" + `systemd_services` + "`" + ` (` + "`" + `updated` + "`" + `)"
|
"CREATE INDEX ` + "`" + `idx_pBp1fF837e` + "`" + ` ON ` + "`" + `systemd_services` + "`" + ` (` + "`" + `updated` + "`" + `)"
|
||||||
],
|
],
|
||||||
"listRule": "@request.auth.id != \"\" && system.users.id ?= @request.auth.id",
|
"listRule": null,
|
||||||
"name": "systemd_services",
|
"name": "systemd_services",
|
||||||
"system": false,
|
"system": false,
|
||||||
"type": "base",
|
"type": "base",
|
||||||
@@ -1153,8 +1167,8 @@ func init() {
|
|||||||
"viewRule": null
|
"viewRule": null
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"createRule": "@request.auth.id != \"\" && user.id = @request.auth.id",
|
"createRule": "@request.auth.id != \"\" && user = @request.auth.id",
|
||||||
"deleteRule": "@request.auth.id != \"\" && user.id = @request.auth.id",
|
"deleteRule": "@request.auth.id != \"\" && user = @request.auth.id",
|
||||||
"fields": [
|
"fields": [
|
||||||
{
|
{
|
||||||
"autogeneratePattern": "[a-z0-9]{10}",
|
"autogeneratePattern": "[a-z0-9]{10}",
|
||||||
@@ -1238,16 +1252,16 @@ func init() {
|
|||||||
"CREATE INDEX ` + "`" + `idx_q0iKnRP9v8` + "`" + ` ON ` + "`" + `quiet_hours` + "`" + ` (\n ` + "`" + `user` + "`" + `,\n ` + "`" + `system` + "`" + `\n)",
|
"CREATE INDEX ` + "`" + `idx_q0iKnRP9v8` + "`" + ` ON ` + "`" + `quiet_hours` + "`" + ` (\n ` + "`" + `user` + "`" + `,\n ` + "`" + `system` + "`" + `\n)",
|
||||||
"CREATE INDEX ` + "`" + `idx_6T7ljT7FJd` + "`" + ` ON ` + "`" + `quiet_hours` + "`" + ` (\n ` + "`" + `type` + "`" + `,\n ` + "`" + `end` + "`" + `\n)"
|
"CREATE INDEX ` + "`" + `idx_6T7ljT7FJd` + "`" + ` ON ` + "`" + `quiet_hours` + "`" + ` (\n ` + "`" + `type` + "`" + `,\n ` + "`" + `end` + "`" + `\n)"
|
||||||
],
|
],
|
||||||
"listRule": "@request.auth.id != \"\" && user.id = @request.auth.id",
|
"listRule": "@request.auth.id != \"\" && user = @request.auth.id",
|
||||||
"name": "quiet_hours",
|
"name": "quiet_hours",
|
||||||
"system": false,
|
"system": false,
|
||||||
"type": "base",
|
"type": "base",
|
||||||
"updateRule": "@request.auth.id != \"\" && user.id = @request.auth.id",
|
"updateRule": "@request.auth.id != \"\" && user = @request.auth.id",
|
||||||
"viewRule": "@request.auth.id != \"\" && user.id = @request.auth.id"
|
"viewRule": "@request.auth.id != \"\" && user = @request.auth.id"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"createRule": null,
|
"createRule": null,
|
||||||
"deleteRule": "@request.auth.id != \"\" && system.users.id ?= @request.auth.id",
|
"deleteRule": null,
|
||||||
"fields": [
|
"fields": [
|
||||||
{
|
{
|
||||||
"autogeneratePattern": "[a-z0-9]{10}",
|
"autogeneratePattern": "[a-z0-9]{10}",
|
||||||
@@ -1433,16 +1447,16 @@ func init() {
|
|||||||
"indexes": [
|
"indexes": [
|
||||||
"CREATE INDEX ` + "`" + `idx_DZ9yhvgl44` + "`" + ` ON ` + "`" + `smart_devices` + "`" + ` (` + "`" + `system` + "`" + `)"
|
"CREATE INDEX ` + "`" + `idx_DZ9yhvgl44` + "`" + ` ON ` + "`" + `smart_devices` + "`" + ` (` + "`" + `system` + "`" + `)"
|
||||||
],
|
],
|
||||||
"listRule": "@request.auth.id != \"\" && system.users.id ?= @request.auth.id",
|
"listRule": null,
|
||||||
"name": "smart_devices",
|
"name": "smart_devices",
|
||||||
"system": false,
|
"system": false,
|
||||||
"type": "base",
|
"type": "base",
|
||||||
"updateRule": null,
|
"updateRule": null,
|
||||||
"viewRule": "@request.auth.id != \"\" && system.users.id ?= @request.auth.id"
|
"viewRule": null
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"createRule": "",
|
"createRule": null,
|
||||||
"deleteRule": "",
|
"deleteRule": null,
|
||||||
"fields": [
|
"fields": [
|
||||||
{
|
{
|
||||||
"autogeneratePattern": "[a-z0-9]{15}",
|
"autogeneratePattern": "[a-z0-9]{15}",
|
||||||
@@ -1611,12 +1625,12 @@ func init() {
|
|||||||
],
|
],
|
||||||
"id": "pbc_3116237454",
|
"id": "pbc_3116237454",
|
||||||
"indexes": [],
|
"indexes": [],
|
||||||
"listRule": "@request.auth.id != \"\" && system.users.id ?= @request.auth.id",
|
|
||||||
"name": "system_details",
|
"name": "system_details",
|
||||||
"system": false,
|
"system": false,
|
||||||
"type": "base",
|
"type": "base",
|
||||||
"updateRule": "",
|
"updateRule": null,
|
||||||
"viewRule": "@request.auth.id != \"\" && system.users.id ?= @request.auth.id"
|
"listRule": null,
|
||||||
|
"viewRule": null
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"createRule": null,
|
"createRule": null,
|
||||||
@@ -7,6 +7,19 @@
|
|||||||
<meta name="viewport" content="width=device-width, initial-scale=1.0,maximum-scale=1.0, user-scalable=no, viewport-fit=cover" />
|
<meta name="viewport" content="width=device-width, initial-scale=1.0,maximum-scale=1.0, user-scalable=no, viewport-fit=cover" />
|
||||||
<meta name="robots" content="noindex, nofollow" />
|
<meta name="robots" content="noindex, nofollow" />
|
||||||
<title>Beszel</title>
|
<title>Beszel</title>
|
||||||
|
<style>
|
||||||
|
.dark { background: hsl(220 5.5% 9%); color-scheme: dark; }
|
||||||
|
</style>
|
||||||
|
<script>
|
||||||
|
(function() {
|
||||||
|
try {
|
||||||
|
var theme = localStorage.getItem('ui-theme');
|
||||||
|
var isDark = theme === 'dark' ||
|
||||||
|
(theme !== 'light' && window.matchMedia('(prefers-color-scheme: dark)').matches);
|
||||||
|
document.documentElement.classList.add(isDark ? 'dark' : 'light');
|
||||||
|
} catch (e) {}
|
||||||
|
})();
|
||||||
|
</script>
|
||||||
<script>
|
<script>
|
||||||
globalThis.BESZEL = {
|
globalThis.BESZEL = {
|
||||||
BASE_PATH: "%BASE_URL%",
|
BASE_PATH: "%BASE_URL%",
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
{
|
{
|
||||||
"name": "beszel",
|
"name": "beszel",
|
||||||
"private": true,
|
"private": true,
|
||||||
"version": "0.18.4",
|
"version": "0.18.5",
|
||||||
"type": "module",
|
"type": "module",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"dev": "vite --host",
|
"dev": "vite --host",
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
import { msg, t } from "@lingui/core/macro"
|
import { t } from "@lingui/core/macro"
|
||||||
import { Trans } from "@lingui/react/macro"
|
import { Trans } from "@lingui/react/macro"
|
||||||
import { useStore } from "@nanostores/react"
|
import { useStore } from "@nanostores/react"
|
||||||
import { getPagePath } from "@nanostores/router"
|
import { getPagePath } from "@nanostores/router"
|
||||||
import { ChevronDownIcon, ExternalLinkIcon, PlusIcon } from "lucide-react"
|
import { ChevronDownIcon, ExternalLinkIcon } from "lucide-react"
|
||||||
import { memo, useEffect, useRef, useState } from "react"
|
import { memo, useEffect, useRef, useState } from "react"
|
||||||
import { Button } from "@/components/ui/button"
|
import { Button } from "@/components/ui/button"
|
||||||
import {
|
import {
|
||||||
@@ -12,7 +12,6 @@ import {
|
|||||||
DialogFooter,
|
DialogFooter,
|
||||||
DialogHeader,
|
DialogHeader,
|
||||||
DialogTitle,
|
DialogTitle,
|
||||||
DialogTrigger,
|
|
||||||
} from "@/components/ui/dialog"
|
} from "@/components/ui/dialog"
|
||||||
import { Input } from "@/components/ui/input"
|
import { Input } from "@/components/ui/input"
|
||||||
import { Label } from "@/components/ui/label"
|
import { Label } from "@/components/ui/label"
|
||||||
@@ -35,28 +34,19 @@ import { DropdownMenu, DropdownMenuTrigger } from "./ui/dropdown-menu"
|
|||||||
import { AppleIcon, DockerIcon, FreeBsdIcon, TuxIcon, WindowsIcon } from "./ui/icons"
|
import { AppleIcon, DockerIcon, FreeBsdIcon, TuxIcon, WindowsIcon } from "./ui/icons"
|
||||||
import { InputCopy } from "./ui/input-copy"
|
import { InputCopy } from "./ui/input-copy"
|
||||||
|
|
||||||
export function AddSystemButton({ className }: { className?: string }) {
|
// To avoid a refactor of the dialog, we will just keep this function as a "skeleton" for the actual dialog
|
||||||
if (isReadOnlyUser()) {
|
export function AddSystemDialog({ open, setOpen }: { open: boolean; setOpen: (open: boolean) => void }) {
|
||||||
return null
|
|
||||||
}
|
|
||||||
const [open, setOpen] = useState(false)
|
|
||||||
const opened = useRef(false)
|
const opened = useRef(false)
|
||||||
if (open) {
|
if (open) {
|
||||||
opened.current = true
|
opened.current = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (isReadOnlyUser()) {
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<Dialog open={open} onOpenChange={setOpen}>
|
<Dialog open={open} onOpenChange={setOpen}>
|
||||||
<DialogTrigger asChild>
|
|
||||||
<Button variant="outline" className={cn("flex gap-1 max-xs:h-[2.4rem]", className)}>
|
|
||||||
<PlusIcon className="h-4 w-4 450:-ms-1" />
|
|
||||||
<span className="hidden 450:inline">
|
|
||||||
<Trans>
|
|
||||||
Add <span className="hidden sm:inline">System</span>
|
|
||||||
</Trans>
|
|
||||||
</span>
|
|
||||||
</Button>
|
|
||||||
</DialogTrigger>
|
|
||||||
{opened.current && <SystemDialog setOpen={setOpen} />}
|
{opened.current && <SystemDialog setOpen={setOpen} />}
|
||||||
</Dialog>
|
</Dialog>
|
||||||
)
|
)
|
||||||
@@ -276,7 +266,13 @@ export const SystemDialog = ({ setOpen, system }: { setOpen: (open: boolean) =>
|
|||||||
/>
|
/>
|
||||||
</TabsContent>
|
</TabsContent>
|
||||||
{/* Save */}
|
{/* Save */}
|
||||||
<Button>{system ? <Trans>Save system</Trans> : <Trans>Add system</Trans>}</Button>
|
<Button>
|
||||||
|
{system ? (
|
||||||
|
<Trans>Save {{ foo: systemTranslation }}</Trans>
|
||||||
|
) : (
|
||||||
|
<Trans>Add {{ foo: systemTranslation }}</Trans>
|
||||||
|
)}
|
||||||
|
</Button>
|
||||||
</DialogFooter>
|
</DialogFooter>
|
||||||
</form>
|
</form>
|
||||||
</Tabs>
|
</Tabs>
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
import { useMemo } from "react"
|
import { type ReactNode, useEffect, useMemo, useState } from "react"
|
||||||
import { Area, AreaChart, CartesianGrid, YAxis } from "recharts"
|
import { Area, AreaChart, CartesianGrid, YAxis } from "recharts"
|
||||||
import {
|
import {
|
||||||
ChartContainer,
|
ChartContainer,
|
||||||
@@ -11,18 +11,23 @@ import {
|
|||||||
import { chartMargin, cn, formatShortDate } from "@/lib/utils"
|
import { chartMargin, cn, formatShortDate } from "@/lib/utils"
|
||||||
import type { ChartData, SystemStatsRecord } from "@/types"
|
import type { ChartData, SystemStatsRecord } from "@/types"
|
||||||
import { useYAxisWidth } from "./hooks"
|
import { useYAxisWidth } from "./hooks"
|
||||||
import { AxisDomain } from "recharts/types/util/types"
|
import type { AxisDomain } from "recharts/types/util/types"
|
||||||
|
import { useIntersectionObserver } from "@/lib/use-intersection-observer"
|
||||||
|
|
||||||
export type DataPoint = {
|
export type DataPoint<T = SystemStatsRecord> = {
|
||||||
label: string
|
label: string
|
||||||
dataKey: (data: SystemStatsRecord) => number | undefined
|
dataKey: (data: T) => number | null | undefined
|
||||||
color: number | string
|
color: number | string
|
||||||
opacity: number
|
opacity: number
|
||||||
stackId?: string | number
|
stackId?: string | number
|
||||||
|
order?: number
|
||||||
|
strokeOpacity?: number
|
||||||
|
activeDot?: boolean
|
||||||
}
|
}
|
||||||
|
|
||||||
export default function AreaChartDefault({
|
export default function AreaChartDefault({
|
||||||
chartData,
|
chartData,
|
||||||
|
customData,
|
||||||
max,
|
max,
|
||||||
maxToggled,
|
maxToggled,
|
||||||
tickFormatter,
|
tickFormatter,
|
||||||
@@ -34,96 +39,129 @@ export default function AreaChartDefault({
|
|||||||
showTotal = false,
|
showTotal = false,
|
||||||
reverseStackOrder = false,
|
reverseStackOrder = false,
|
||||||
hideYAxis = false,
|
hideYAxis = false,
|
||||||
|
filter,
|
||||||
|
truncate = false,
|
||||||
}: // logRender = false,
|
}: // logRender = false,
|
||||||
{
|
{
|
||||||
chartData: ChartData
|
chartData: ChartData
|
||||||
max?: number
|
// biome-ignore lint/suspicious/noExplicitAny: accepts different data source types (systemStats or containerData)
|
||||||
maxToggled?: boolean
|
customData?: any[]
|
||||||
tickFormatter: (value: number, index: number) => string
|
max?: number
|
||||||
contentFormatter: ({ value, payload }: { value: number; payload: SystemStatsRecord }) => string
|
maxToggled?: boolean
|
||||||
dataPoints?: DataPoint[]
|
tickFormatter: (value: number, index: number) => string
|
||||||
domain?: AxisDomain
|
// biome-ignore lint/suspicious/noExplicitAny: recharts tooltip item interop
|
||||||
legend?: boolean
|
contentFormatter: (item: any, key: string) => ReactNode
|
||||||
showTotal?: boolean
|
// biome-ignore lint/suspicious/noExplicitAny: accepts DataPoint with different generic types
|
||||||
itemSorter?: (a: any, b: any) => number
|
dataPoints?: DataPoint<any>[]
|
||||||
reverseStackOrder?: boolean
|
domain?: AxisDomain
|
||||||
hideYAxis?: boolean
|
legend?: boolean
|
||||||
// logRender?: boolean
|
showTotal?: boolean
|
||||||
}) {
|
// biome-ignore lint/suspicious/noExplicitAny: recharts tooltip item interop
|
||||||
|
itemSorter?: (a: any, b: any) => number
|
||||||
|
reverseStackOrder?: boolean
|
||||||
|
hideYAxis?: boolean
|
||||||
|
filter?: string
|
||||||
|
truncate?: boolean
|
||||||
|
// logRender?: boolean
|
||||||
|
}) {
|
||||||
const { yAxisWidth, updateYAxisWidth } = useYAxisWidth()
|
const { yAxisWidth, updateYAxisWidth } = useYAxisWidth()
|
||||||
|
const { isIntersecting, ref } = useIntersectionObserver({ freeze: false })
|
||||||
|
const sourceData = customData ?? chartData.systemStats
|
||||||
|
// Only update the rendered data while the chart is visible
|
||||||
|
const [displayData, setDisplayData] = useState(sourceData)
|
||||||
|
|
||||||
|
// Reduce chart redraws by only updating while visible or when chart time changes
|
||||||
|
useEffect(() => {
|
||||||
|
const shouldPrimeData = sourceData.length && !displayData.length
|
||||||
|
const sourceChanged = sourceData !== displayData
|
||||||
|
const shouldUpdate = shouldPrimeData || (sourceChanged && isIntersecting)
|
||||||
|
if (shouldUpdate) {
|
||||||
|
setDisplayData(sourceData)
|
||||||
|
}
|
||||||
|
}, [displayData, isIntersecting, sourceData])
|
||||||
|
|
||||||
|
// Use a stable key derived from data point identities and visual properties
|
||||||
|
const areasKey = dataPoints?.map((d) => `${d.label}:${d.opacity}`).join("\0")
|
||||||
|
|
||||||
|
const Areas = useMemo(() => {
|
||||||
|
return dataPoints?.map((dataPoint, i) => {
|
||||||
|
let { color } = dataPoint
|
||||||
|
if (typeof color === "number") {
|
||||||
|
color = `var(--chart-${color})`
|
||||||
|
}
|
||||||
|
return (
|
||||||
|
<Area
|
||||||
|
key={dataPoint.label}
|
||||||
|
dataKey={dataPoint.dataKey}
|
||||||
|
name={dataPoint.label}
|
||||||
|
type="monotoneX"
|
||||||
|
fill={color}
|
||||||
|
fillOpacity={dataPoint.opacity}
|
||||||
|
stroke={color}
|
||||||
|
strokeOpacity={dataPoint.strokeOpacity}
|
||||||
|
isAnimationActive={false}
|
||||||
|
stackId={dataPoint.stackId}
|
||||||
|
order={dataPoint.order || i}
|
||||||
|
activeDot={dataPoint.activeDot ?? true}
|
||||||
|
/>
|
||||||
|
)
|
||||||
|
})
|
||||||
|
}, [areasKey, maxToggled])
|
||||||
|
|
||||||
// biome-ignore lint/correctness/useExhaustiveDependencies: ignore
|
|
||||||
return useMemo(() => {
|
return useMemo(() => {
|
||||||
if (chartData.systemStats.length === 0) {
|
if (displayData.length === 0) {
|
||||||
return null
|
return null
|
||||||
}
|
}
|
||||||
// if (logRender) {
|
// if (logRender) {
|
||||||
// console.log("Rendered at", new Date())
|
// console.log("Rendered at", new Date(), "for", dataPoints?.at(0)?.label)
|
||||||
// }
|
// }
|
||||||
return (
|
return (
|
||||||
<div>
|
<ChartContainer
|
||||||
<ChartContainer
|
ref={ref}
|
||||||
className={cn("h-full w-full absolute aspect-auto bg-card opacity-0 transition-opacity", {
|
className={cn("h-full w-full absolute aspect-auto bg-card opacity-0 transition-opacity", {
|
||||||
"opacity-100": yAxisWidth || hideYAxis,
|
"opacity-100": yAxisWidth || hideYAxis,
|
||||||
"ps-4": hideYAxis,
|
"ps-4": hideYAxis,
|
||||||
})}
|
})}
|
||||||
|
>
|
||||||
|
<AreaChart
|
||||||
|
reverseStackOrder={reverseStackOrder}
|
||||||
|
accessibilityLayer
|
||||||
|
data={displayData}
|
||||||
|
margin={hideYAxis ? { ...chartMargin, left: 5 } : chartMargin}
|
||||||
>
|
>
|
||||||
<AreaChart
|
<CartesianGrid vertical={false} />
|
||||||
reverseStackOrder={reverseStackOrder}
|
{!hideYAxis && (
|
||||||
accessibilityLayer
|
<YAxis
|
||||||
data={chartData.systemStats}
|
direction="ltr"
|
||||||
margin={hideYAxis ? { ...chartMargin, left: 5 } : chartMargin}
|
orientation={chartData.orientation}
|
||||||
>
|
className="tracking-tighter"
|
||||||
<CartesianGrid vertical={false} />
|
width={yAxisWidth}
|
||||||
{!hideYAxis && (
|
domain={domain ?? [0, max ?? "auto"]}
|
||||||
<YAxis
|
tickFormatter={(value, index) => updateYAxisWidth(tickFormatter(value, index))}
|
||||||
direction="ltr"
|
tickLine={false}
|
||||||
orientation={chartData.orientation}
|
axisLine={false}
|
||||||
className="tracking-tighter"
|
|
||||||
width={yAxisWidth}
|
|
||||||
domain={domain ?? [0, max ?? "auto"]}
|
|
||||||
tickFormatter={(value, index) => updateYAxisWidth(tickFormatter(value, index))}
|
|
||||||
tickLine={false}
|
|
||||||
axisLine={false}
|
|
||||||
/>
|
|
||||||
)}
|
|
||||||
{xAxis(chartData)}
|
|
||||||
<ChartTooltip
|
|
||||||
animationEasing="ease-out"
|
|
||||||
animationDuration={150}
|
|
||||||
// @ts-expect-error
|
|
||||||
itemSorter={itemSorter}
|
|
||||||
content={
|
|
||||||
<ChartTooltipContent
|
|
||||||
labelFormatter={(_, data) => formatShortDate(data[0].payload.created)}
|
|
||||||
contentFormatter={contentFormatter}
|
|
||||||
showTotal={showTotal}
|
|
||||||
/>
|
|
||||||
}
|
|
||||||
/>
|
/>
|
||||||
{dataPoints?.map((dataPoint) => {
|
)}
|
||||||
let { color } = dataPoint
|
{xAxis(chartData)}
|
||||||
if (typeof color === "number") {
|
<ChartTooltip
|
||||||
color = `var(--chart-${color})`
|
animationEasing="ease-out"
|
||||||
}
|
animationDuration={150}
|
||||||
return (
|
// @ts-expect-error
|
||||||
<Area
|
itemSorter={itemSorter}
|
||||||
key={dataPoint.label}
|
content={
|
||||||
dataKey={dataPoint.dataKey}
|
<ChartTooltipContent
|
||||||
name={dataPoint.label}
|
labelFormatter={(_, data) => formatShortDate(data[0].payload.created)}
|
||||||
type="monotoneX"
|
contentFormatter={contentFormatter}
|
||||||
fill={color}
|
showTotal={showTotal}
|
||||||
fillOpacity={dataPoint.opacity}
|
filter={filter}
|
||||||
stroke={color}
|
truncate={truncate}
|
||||||
isAnimationActive={false}
|
/>
|
||||||
stackId={dataPoint.stackId}
|
}
|
||||||
/>
|
/>
|
||||||
)
|
{Areas}
|
||||||
})}
|
{legend && <ChartLegend content={<ChartLegendContent />} />}
|
||||||
{legend && <ChartLegend content={<ChartLegendContent reverse={reverseStackOrder} />} />}
|
</AreaChart>
|
||||||
</AreaChart>
|
</ChartContainer>
|
||||||
</ChartContainer>
|
|
||||||
</div>
|
|
||||||
)
|
)
|
||||||
}, [chartData.systemStats.at(-1), yAxisWidth, maxToggled, showTotal])
|
}, [displayData, yAxisWidth, showTotal, filter])
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,215 +0,0 @@
|
|||||||
// import Spinner from '../spinner'
|
|
||||||
import { useStore } from "@nanostores/react"
|
|
||||||
import { memo, useMemo } from "react"
|
|
||||||
import { Area, AreaChart, CartesianGrid, YAxis } from "recharts"
|
|
||||||
import {
|
|
||||||
type ChartConfig,
|
|
||||||
ChartContainer,
|
|
||||||
ChartTooltip,
|
|
||||||
ChartTooltipContent,
|
|
||||||
pinnedAxisDomain,
|
|
||||||
xAxis,
|
|
||||||
} from "@/components/ui/chart"
|
|
||||||
import { ChartType, Unit } from "@/lib/enums"
|
|
||||||
import { $containerFilter, $userSettings } from "@/lib/stores"
|
|
||||||
import { chartMargin, cn, decimalString, formatBytes, formatShortDate, toFixedFloat } from "@/lib/utils"
|
|
||||||
import type { ChartData } from "@/types"
|
|
||||||
import { Separator } from "../ui/separator"
|
|
||||||
import { useYAxisWidth } from "./hooks"
|
|
||||||
|
|
||||||
export default memo(function ContainerChart({
|
|
||||||
dataKey,
|
|
||||||
chartData,
|
|
||||||
chartType,
|
|
||||||
chartConfig,
|
|
||||||
unit = "%",
|
|
||||||
}: {
|
|
||||||
dataKey: string
|
|
||||||
chartData: ChartData
|
|
||||||
chartType: ChartType
|
|
||||||
chartConfig: ChartConfig
|
|
||||||
unit?: string
|
|
||||||
}) {
|
|
||||||
const filter = useStore($containerFilter)
|
|
||||||
const userSettings = useStore($userSettings)
|
|
||||||
const { yAxisWidth, updateYAxisWidth } = useYAxisWidth()
|
|
||||||
|
|
||||||
const { containerData } = chartData
|
|
||||||
|
|
||||||
const isNetChart = chartType === ChartType.Network
|
|
||||||
|
|
||||||
// Filter with set lookup
|
|
||||||
const filteredKeys = useMemo(() => {
|
|
||||||
if (!filter) {
|
|
||||||
return new Set<string>()
|
|
||||||
}
|
|
||||||
const filterTerms = filter
|
|
||||||
.toLowerCase()
|
|
||||||
.split(" ")
|
|
||||||
.filter((term) => term.length > 0)
|
|
||||||
return new Set(
|
|
||||||
Object.keys(chartConfig).filter((key) => {
|
|
||||||
const keyLower = key.toLowerCase()
|
|
||||||
return !filterTerms.some((term) => keyLower.includes(term))
|
|
||||||
})
|
|
||||||
)
|
|
||||||
}, [chartConfig, filter])
|
|
||||||
|
|
||||||
// biome-ignore lint/correctness/useExhaustiveDependencies: not necessary
|
|
||||||
const { toolTipFormatter, dataFunction, tickFormatter } = useMemo(() => {
|
|
||||||
const obj = {} as {
|
|
||||||
toolTipFormatter: (item: any, key: string) => React.ReactNode | string
|
|
||||||
dataFunction: (key: string, data: any) => number | null
|
|
||||||
tickFormatter: (value: any) => string
|
|
||||||
}
|
|
||||||
// tick formatter
|
|
||||||
if (chartType === ChartType.CPU) {
|
|
||||||
obj.tickFormatter = (value) => {
|
|
||||||
const val = `${toFixedFloat(value, 2)}%`
|
|
||||||
return updateYAxisWidth(val)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
const chartUnit = isNetChart ? userSettings.unitNet : Unit.Bytes
|
|
||||||
obj.tickFormatter = (val) => {
|
|
||||||
const { value, unit } = formatBytes(val, isNetChart, chartUnit, !isNetChart)
|
|
||||||
return updateYAxisWidth(`${toFixedFloat(value, value >= 10 ? 0 : 1)} ${unit}`)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// tooltip formatter
|
|
||||||
if (isNetChart) {
|
|
||||||
const getRxTxBytes = (record?: { b?: [number, number]; ns?: number; nr?: number }) => {
|
|
||||||
if (record?.b?.length && record.b.length >= 2) {
|
|
||||||
return [Number(record.b[0]) || 0, Number(record.b[1]) || 0]
|
|
||||||
}
|
|
||||||
return [(record?.ns ?? 0) * 1024 * 1024, (record?.nr ?? 0) * 1024 * 1024]
|
|
||||||
}
|
|
||||||
const formatRxTx = (recv: number, sent: number) => {
|
|
||||||
const { value: receivedValue, unit: receivedUnit } = formatBytes(recv, true, userSettings.unitNet, false)
|
|
||||||
const { value: sentValue, unit: sentUnit } = formatBytes(sent, true, userSettings.unitNet, false)
|
|
||||||
return (
|
|
||||||
<span className="flex">
|
|
||||||
{decimalString(receivedValue)} {receivedUnit}
|
|
||||||
<span className="opacity-70 ms-0.5"> rx </span>
|
|
||||||
<Separator orientation="vertical" className="h-3 mx-1.5 bg-primary/40" />
|
|
||||||
{decimalString(sentValue)} {sentUnit}
|
|
||||||
<span className="opacity-70 ms-0.5"> tx</span>
|
|
||||||
</span>
|
|
||||||
)
|
|
||||||
}
|
|
||||||
obj.toolTipFormatter = (item: any, key: string) => {
|
|
||||||
try {
|
|
||||||
if (key === "__total__") {
|
|
||||||
let totalSent = 0
|
|
||||||
let totalRecv = 0
|
|
||||||
const payloadData = item?.payload && typeof item.payload === "object" ? item.payload : {}
|
|
||||||
for (const [containerKey, value] of Object.entries(payloadData)) {
|
|
||||||
if (!value || typeof value !== "object") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Skip filtered out containers
|
|
||||||
if (filteredKeys.has(containerKey)) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
const [sent, recv] = getRxTxBytes(value as { b?: [number, number]; ns?: number; nr?: number })
|
|
||||||
totalSent += sent
|
|
||||||
totalRecv += recv
|
|
||||||
}
|
|
||||||
return formatRxTx(totalRecv, totalSent)
|
|
||||||
}
|
|
||||||
const [sent, recv] = getRxTxBytes(item?.payload?.[key])
|
|
||||||
return formatRxTx(recv, sent)
|
|
||||||
} catch (e) {
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if (chartType === ChartType.Memory) {
|
|
||||||
obj.toolTipFormatter = (item: any) => {
|
|
||||||
const { value, unit } = formatBytes(item.value, false, Unit.Bytes, true)
|
|
||||||
return `${decimalString(value)} ${unit}`
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
obj.toolTipFormatter = (item: any) => `${decimalString(item.value)}${unit}`
|
|
||||||
}
|
|
||||||
// data function
|
|
||||||
if (isNetChart) {
|
|
||||||
obj.dataFunction = (key: string, data: any) => {
|
|
||||||
const payload = data[key]
|
|
||||||
if (!payload) {
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
const sent = payload?.b?.[0] ?? (payload?.ns ?? 0) * 1024 * 1024
|
|
||||||
const recv = payload?.b?.[1] ?? (payload?.nr ?? 0) * 1024 * 1024
|
|
||||||
return sent + recv
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
obj.dataFunction = (key: string, data: any) => data[key]?.[dataKey] ?? null
|
|
||||||
}
|
|
||||||
return obj
|
|
||||||
}, [filteredKeys])
|
|
||||||
|
|
||||||
// console.log('rendered at', new Date())
|
|
||||||
|
|
||||||
if (containerData.length === 0) {
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
|
||||||
<div>
|
|
||||||
<ChartContainer
|
|
||||||
className={cn("h-full w-full absolute aspect-auto bg-card opacity-0 transition-opacity", {
|
|
||||||
"opacity-100": yAxisWidth,
|
|
||||||
})}
|
|
||||||
>
|
|
||||||
<AreaChart
|
|
||||||
accessibilityLayer
|
|
||||||
// syncId={'cpu'}
|
|
||||||
data={containerData}
|
|
||||||
margin={chartMargin}
|
|
||||||
reverseStackOrder={true}
|
|
||||||
>
|
|
||||||
<CartesianGrid vertical={false} />
|
|
||||||
<YAxis
|
|
||||||
direction="ltr"
|
|
||||||
domain={pinnedAxisDomain()}
|
|
||||||
orientation={chartData.orientation}
|
|
||||||
className="tracking-tighter"
|
|
||||||
width={yAxisWidth}
|
|
||||||
tickFormatter={tickFormatter}
|
|
||||||
tickLine={false}
|
|
||||||
axisLine={false}
|
|
||||||
/>
|
|
||||||
{xAxis(chartData)}
|
|
||||||
<ChartTooltip
|
|
||||||
animationEasing="ease-out"
|
|
||||||
animationDuration={150}
|
|
||||||
truncate={true}
|
|
||||||
labelFormatter={(_, data) => formatShortDate(data[0].payload.created)}
|
|
||||||
// @ts-expect-error
|
|
||||||
itemSorter={(a, b) => b.value - a.value}
|
|
||||||
content={<ChartTooltipContent filter={filter} contentFormatter={toolTipFormatter} showTotal={true} />}
|
|
||||||
/>
|
|
||||||
{Object.keys(chartConfig).map((key) => {
|
|
||||||
const filtered = filteredKeys.has(key)
|
|
||||||
const fillOpacity = filtered ? 0.05 : 0.4
|
|
||||||
const strokeOpacity = filtered ? 0.1 : 1
|
|
||||||
return (
|
|
||||||
<Area
|
|
||||||
key={key}
|
|
||||||
isAnimationActive={false}
|
|
||||||
dataKey={dataFunction.bind(null, key)}
|
|
||||||
name={key}
|
|
||||||
type="monotoneX"
|
|
||||||
fill={chartConfig[key].color}
|
|
||||||
fillOpacity={fillOpacity}
|
|
||||||
stroke={chartConfig[key].color}
|
|
||||||
strokeOpacity={strokeOpacity}
|
|
||||||
activeDot={{ opacity: filtered ? 0 : 1 }}
|
|
||||||
stackId="a"
|
|
||||||
/>
|
|
||||||
)
|
|
||||||
})}
|
|
||||||
</AreaChart>
|
|
||||||
</ChartContainer>
|
|
||||||
</div>
|
|
||||||
)
|
|
||||||
})
|
|
||||||
@@ -1,83 +0,0 @@
|
|||||||
import { useLingui } from "@lingui/react/macro"
|
|
||||||
import { memo } from "react"
|
|
||||||
import { Area, AreaChart, CartesianGrid, YAxis } from "recharts"
|
|
||||||
import { ChartContainer, ChartTooltip, ChartTooltipContent, xAxis } from "@/components/ui/chart"
|
|
||||||
import { Unit } from "@/lib/enums"
|
|
||||||
import { chartMargin, cn, decimalString, formatBytes, formatShortDate, toFixedFloat } from "@/lib/utils"
|
|
||||||
import type { ChartData, SystemStatsRecord } from "@/types"
|
|
||||||
import { useYAxisWidth } from "./hooks"
|
|
||||||
|
|
||||||
export default memo(function DiskChart({
|
|
||||||
dataKey,
|
|
||||||
diskSize,
|
|
||||||
chartData,
|
|
||||||
}: {
|
|
||||||
dataKey: string | ((data: SystemStatsRecord) => number | undefined)
|
|
||||||
diskSize: number
|
|
||||||
chartData: ChartData
|
|
||||||
}) {
|
|
||||||
const { yAxisWidth, updateYAxisWidth } = useYAxisWidth()
|
|
||||||
const { t } = useLingui()
|
|
||||||
|
|
||||||
// round to nearest GB
|
|
||||||
if (diskSize >= 100) {
|
|
||||||
diskSize = Math.round(diskSize)
|
|
||||||
}
|
|
||||||
|
|
||||||
if (chartData.systemStats.length === 0) {
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
|
||||||
<div>
|
|
||||||
<ChartContainer
|
|
||||||
className={cn("h-full w-full absolute aspect-auto bg-card opacity-0 transition-opacity", {
|
|
||||||
"opacity-100": yAxisWidth,
|
|
||||||
})}
|
|
||||||
>
|
|
||||||
<AreaChart accessibilityLayer data={chartData.systemStats} margin={chartMargin}>
|
|
||||||
<CartesianGrid vertical={false} />
|
|
||||||
<YAxis
|
|
||||||
direction="ltr"
|
|
||||||
orientation={chartData.orientation}
|
|
||||||
className="tracking-tighter"
|
|
||||||
width={yAxisWidth}
|
|
||||||
domain={[0, diskSize]}
|
|
||||||
tickCount={9}
|
|
||||||
minTickGap={6}
|
|
||||||
tickLine={false}
|
|
||||||
axisLine={false}
|
|
||||||
tickFormatter={(val) => {
|
|
||||||
const { value, unit } = formatBytes(val * 1024, false, Unit.Bytes, true)
|
|
||||||
return updateYAxisWidth(toFixedFloat(value, value >= 10 ? 0 : 1) + " " + unit)
|
|
||||||
}}
|
|
||||||
/>
|
|
||||||
{xAxis(chartData)}
|
|
||||||
<ChartTooltip
|
|
||||||
animationEasing="ease-out"
|
|
||||||
animationDuration={150}
|
|
||||||
content={
|
|
||||||
<ChartTooltipContent
|
|
||||||
labelFormatter={(_, data) => formatShortDate(data[0].payload.created)}
|
|
||||||
contentFormatter={({ value }) => {
|
|
||||||
const { value: convertedValue, unit } = formatBytes(value * 1024, false, Unit.Bytes, true)
|
|
||||||
return decimalString(convertedValue) + " " + unit
|
|
||||||
}}
|
|
||||||
/>
|
|
||||||
}
|
|
||||||
/>
|
|
||||||
<Area
|
|
||||||
dataKey={dataKey}
|
|
||||||
name={t`Disk Usage`}
|
|
||||||
type="monotoneX"
|
|
||||||
fill="var(--chart-4)"
|
|
||||||
fillOpacity={0.4}
|
|
||||||
stroke="var(--chart-4)"
|
|
||||||
// animationDuration={1200}
|
|
||||||
isAnimationActive={false}
|
|
||||||
/>
|
|
||||||
</AreaChart>
|
|
||||||
</ChartContainer>
|
|
||||||
</div>
|
|
||||||
)
|
|
||||||
})
|
|
||||||
@@ -1,117 +0,0 @@
|
|||||||
import { memo, useMemo } from "react"
|
|
||||||
import { CartesianGrid, Line, LineChart, YAxis } from "recharts"
|
|
||||||
import {
|
|
||||||
ChartContainer,
|
|
||||||
ChartLegend,
|
|
||||||
ChartLegendContent,
|
|
||||||
ChartTooltip,
|
|
||||||
ChartTooltipContent,
|
|
||||||
xAxis,
|
|
||||||
} from "@/components/ui/chart"
|
|
||||||
import { chartMargin, cn, decimalString, formatShortDate, toFixedFloat } from "@/lib/utils"
|
|
||||||
import type { ChartData, GPUData } from "@/types"
|
|
||||||
import { useYAxisWidth } from "./hooks"
|
|
||||||
import type { DataPoint } from "./line-chart"
|
|
||||||
|
|
||||||
export default memo(function GpuPowerChart({ chartData }: { chartData: ChartData }) {
|
|
||||||
const { yAxisWidth, updateYAxisWidth } = useYAxisWidth()
|
|
||||||
const packageKey = " package"
|
|
||||||
|
|
||||||
const { gpuData, dataPoints } = useMemo(() => {
|
|
||||||
const dataPoints = [] as DataPoint[]
|
|
||||||
const gpuData = [] as Record<string, GPUData | string>[]
|
|
||||||
const addedKeys = new Map<string, number>()
|
|
||||||
|
|
||||||
const addKey = (key: string, value: number) => {
|
|
||||||
addedKeys.set(key, (addedKeys.get(key) ?? 0) + value)
|
|
||||||
}
|
|
||||||
|
|
||||||
for (const stats of chartData.systemStats) {
|
|
||||||
const gpus = stats.stats?.g ?? {}
|
|
||||||
const data = { created: stats.created } as Record<string, GPUData | string>
|
|
||||||
for (const id in gpus) {
|
|
||||||
const gpu = gpus[id] as GPUData
|
|
||||||
data[gpu.n] = gpu
|
|
||||||
addKey(gpu.n, gpu.p ?? 0)
|
|
||||||
if (gpu.pp) {
|
|
||||||
data[`${gpu.n}${packageKey}`] = gpu
|
|
||||||
addKey(`${gpu.n}${packageKey}`, gpu.pp ?? 0)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
gpuData.push(data)
|
|
||||||
}
|
|
||||||
const sortedKeys = Array.from(addedKeys.entries())
|
|
||||||
.sort(([, a], [, b]) => b - a)
|
|
||||||
.map(([key]) => key)
|
|
||||||
|
|
||||||
for (let i = 0; i < sortedKeys.length; i++) {
|
|
||||||
const id = sortedKeys[i]
|
|
||||||
dataPoints.push({
|
|
||||||
label: id,
|
|
||||||
dataKey: (gpuData: Record<string, GPUData>) => {
|
|
||||||
return id.endsWith(packageKey) ? (gpuData[id]?.pp ?? 0) : (gpuData[id]?.p ?? 0)
|
|
||||||
},
|
|
||||||
color: `hsl(${226 + (((i * 360) / addedKeys.size) % 360)}, 65%, 52%)`,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return { gpuData, dataPoints }
|
|
||||||
}, [chartData])
|
|
||||||
|
|
||||||
if (chartData.systemStats.length === 0) {
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
|
||||||
<div>
|
|
||||||
<ChartContainer
|
|
||||||
className={cn("h-full w-full absolute aspect-auto bg-card opacity-0 transition-opacity", {
|
|
||||||
"opacity-100": yAxisWidth,
|
|
||||||
})}
|
|
||||||
>
|
|
||||||
<LineChart accessibilityLayer data={gpuData} margin={chartMargin}>
|
|
||||||
<CartesianGrid vertical={false} />
|
|
||||||
<YAxis
|
|
||||||
direction="ltr"
|
|
||||||
orientation={chartData.orientation}
|
|
||||||
className="tracking-tighter"
|
|
||||||
domain={[0, "auto"]}
|
|
||||||
width={yAxisWidth}
|
|
||||||
tickFormatter={(value) => {
|
|
||||||
const val = toFixedFloat(value, 2)
|
|
||||||
return updateYAxisWidth(`${val}W`)
|
|
||||||
}}
|
|
||||||
tickLine={false}
|
|
||||||
axisLine={false}
|
|
||||||
/>
|
|
||||||
{xAxis(chartData)}
|
|
||||||
<ChartTooltip
|
|
||||||
animationEasing="ease-out"
|
|
||||||
animationDuration={150}
|
|
||||||
// @ts-expect-error
|
|
||||||
itemSorter={(a, b) => b.value - a.value}
|
|
||||||
content={
|
|
||||||
<ChartTooltipContent
|
|
||||||
labelFormatter={(_, data) => formatShortDate(data[0].payload.created)}
|
|
||||||
contentFormatter={(item) => `${decimalString(item.value)}W`}
|
|
||||||
// indicator="line"
|
|
||||||
/>
|
|
||||||
}
|
|
||||||
/>
|
|
||||||
{dataPoints.map((dataPoint) => (
|
|
||||||
<Line
|
|
||||||
key={dataPoint.label}
|
|
||||||
dataKey={dataPoint.dataKey}
|
|
||||||
name={dataPoint.label}
|
|
||||||
type="monotoneX"
|
|
||||||
dot={false}
|
|
||||||
strokeWidth={1.5}
|
|
||||||
stroke={dataPoint.color as string}
|
|
||||||
isAnimationActive={false}
|
|
||||||
/>
|
|
||||||
))}
|
|
||||||
{dataPoints.length > 1 && <ChartLegend content={<ChartLegendContent />} />}
|
|
||||||
</LineChart>
|
|
||||||
</ChartContainer>
|
|
||||||
</div>
|
|
||||||
)
|
|
||||||
})
|
|
||||||
@@ -1,6 +1,9 @@
|
|||||||
import { useMemo, useState } from "react"
|
import { useMemo, useState } from "react"
|
||||||
|
import { useStore } from "@nanostores/react"
|
||||||
import type { ChartConfig } from "@/components/ui/chart"
|
import type { ChartConfig } from "@/components/ui/chart"
|
||||||
import type { ChartData, SystemStats, SystemStatsRecord } from "@/types"
|
import type { ChartData, SystemStats, SystemStatsRecord } from "@/types"
|
||||||
|
import type { DataPoint } from "./area-chart"
|
||||||
|
import { $containerFilter } from "@/lib/stores"
|
||||||
|
|
||||||
/** Chart configurations for CPU, memory, and network usage charts */
|
/** Chart configurations for CPU, memory, and network usage charts */
|
||||||
export interface ContainerChartConfigs {
|
export interface ContainerChartConfigs {
|
||||||
@@ -96,9 +99,9 @@ export function useYAxisWidth() {
|
|||||||
clearTimeout(timeout)
|
clearTimeout(timeout)
|
||||||
timeout = setTimeout(() => {
|
timeout = setTimeout(() => {
|
||||||
document.body.appendChild(div)
|
document.body.appendChild(div)
|
||||||
const width = div.offsetWidth + 24
|
const width = div.offsetWidth + 20
|
||||||
if (width > yAxisWidth) {
|
if (width > yAxisWidth) {
|
||||||
setYAxisWidth(div.offsetWidth + 24)
|
setYAxisWidth(width)
|
||||||
}
|
}
|
||||||
document.body.removeChild(div)
|
document.body.removeChild(div)
|
||||||
})
|
})
|
||||||
@@ -108,6 +111,44 @@ export function useYAxisWidth() {
|
|||||||
return { yAxisWidth, updateYAxisWidth }
|
return { yAxisWidth, updateYAxisWidth }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Subscribes to the container filter store and returns filtered DataPoints for container charts */
|
||||||
|
export function useContainerDataPoints(
|
||||||
|
chartConfig: ChartConfig,
|
||||||
|
// biome-ignore lint/suspicious/noExplicitAny: container data records have dynamic keys
|
||||||
|
dataFn: (key: string, data: Record<string, any>) => number | null
|
||||||
|
) {
|
||||||
|
const filter = useStore($containerFilter)
|
||||||
|
const { dataPoints, filteredKeys } = useMemo(() => {
|
||||||
|
const filterTerms = filter
|
||||||
|
? filter
|
||||||
|
.toLowerCase()
|
||||||
|
.split(" ")
|
||||||
|
.filter((term) => term.length > 0)
|
||||||
|
: []
|
||||||
|
const filtered = new Set<string>()
|
||||||
|
const points = Object.keys(chartConfig).map((key) => {
|
||||||
|
const isFiltered = filterTerms.length > 0 && !filterTerms.some((term) => key.toLowerCase().includes(term))
|
||||||
|
if (isFiltered) filtered.add(key)
|
||||||
|
return {
|
||||||
|
label: key,
|
||||||
|
// biome-ignore lint/suspicious/noExplicitAny: container data records have dynamic keys
|
||||||
|
dataKey: (data: Record<string, any>) => dataFn(key, data),
|
||||||
|
color: chartConfig[key].color ?? "",
|
||||||
|
opacity: isFiltered ? 0.05 : 0.4,
|
||||||
|
strokeOpacity: isFiltered ? 0.1 : 1,
|
||||||
|
activeDot: !isFiltered,
|
||||||
|
stackId: "a",
|
||||||
|
}
|
||||||
|
})
|
||||||
|
return {
|
||||||
|
// biome-ignore lint/suspicious/noExplicitAny: container data records have dynamic keys
|
||||||
|
dataPoints: points as DataPoint<Record<string, any>>[],
|
||||||
|
filteredKeys: filtered,
|
||||||
|
}
|
||||||
|
}, [chartConfig, filter])
|
||||||
|
return { filter, dataPoints, filteredKeys }
|
||||||
|
}
|
||||||
|
|
||||||
// Assures consistent colors for network interfaces
|
// Assures consistent colors for network interfaces
|
||||||
export function useNetworkInterfaces(interfaces: SystemStats["ni"]) {
|
export function useNetworkInterfaces(interfaces: SystemStats["ni"]) {
|
||||||
const keys = Object.keys(interfaces ?? {})
|
const keys = Object.keys(interfaces ?? {})
|
||||||
@@ -124,4 +165,4 @@ export function useNetworkInterfaces(interfaces: SystemStats["ni"]) {
|
|||||||
}))
|
}))
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
import { useMemo } from "react"
|
import { type ReactNode, useEffect, useMemo, useState } from "react"
|
||||||
import { CartesianGrid, Line, LineChart, YAxis } from "recharts"
|
import { CartesianGrid, Line, LineChart, YAxis } from "recharts"
|
||||||
import {
|
import {
|
||||||
ChartContainer,
|
ChartContainer,
|
||||||
@@ -11,15 +11,22 @@ import {
|
|||||||
import { chartMargin, cn, formatShortDate } from "@/lib/utils"
|
import { chartMargin, cn, formatShortDate } from "@/lib/utils"
|
||||||
import type { ChartData, SystemStatsRecord } from "@/types"
|
import type { ChartData, SystemStatsRecord } from "@/types"
|
||||||
import { useYAxisWidth } from "./hooks"
|
import { useYAxisWidth } from "./hooks"
|
||||||
|
import type { AxisDomain } from "recharts/types/util/types"
|
||||||
|
import { useIntersectionObserver } from "@/lib/use-intersection-observer"
|
||||||
|
|
||||||
export type DataPoint = {
|
export type DataPoint<T = SystemStatsRecord> = {
|
||||||
label: string
|
label: string
|
||||||
dataKey: (data: SystemStatsRecord) => number | undefined
|
dataKey: (data: T) => number | null | undefined
|
||||||
color: number | string
|
color: number | string
|
||||||
|
stackId?: string | number
|
||||||
|
order?: number
|
||||||
|
strokeOpacity?: number
|
||||||
|
activeDot?: boolean
|
||||||
}
|
}
|
||||||
|
|
||||||
export default function LineChartDefault({
|
export default function LineChartDefault({
|
||||||
chartData,
|
chartData,
|
||||||
|
customData,
|
||||||
max,
|
max,
|
||||||
maxToggled,
|
maxToggled,
|
||||||
tickFormatter,
|
tickFormatter,
|
||||||
@@ -28,38 +35,101 @@ export default function LineChartDefault({
|
|||||||
domain,
|
domain,
|
||||||
legend,
|
legend,
|
||||||
itemSorter,
|
itemSorter,
|
||||||
|
showTotal = false,
|
||||||
|
reverseStackOrder = false,
|
||||||
|
hideYAxis = false,
|
||||||
|
filter,
|
||||||
|
truncate = false,
|
||||||
}: // logRender = false,
|
}: // logRender = false,
|
||||||
{
|
{
|
||||||
chartData: ChartData
|
chartData: ChartData
|
||||||
|
// biome-ignore lint/suspicious/noExplicitAny: accepts different data source types (systemStats or containerData)
|
||||||
|
customData?: any[]
|
||||||
max?: number
|
max?: number
|
||||||
maxToggled?: boolean
|
maxToggled?: boolean
|
||||||
tickFormatter: (value: number, index: number) => string
|
tickFormatter: (value: number, index: number) => string
|
||||||
contentFormatter: ({ value, payload }: { value: number; payload: SystemStatsRecord }) => string
|
// biome-ignore lint/suspicious/noExplicitAny: recharts tooltip item interop
|
||||||
dataPoints?: DataPoint[]
|
contentFormatter: (item: any, key: string) => ReactNode
|
||||||
domain?: [number, number]
|
// biome-ignore lint/suspicious/noExplicitAny: accepts DataPoint with different generic types
|
||||||
|
dataPoints?: DataPoint<any>[]
|
||||||
|
domain?: AxisDomain
|
||||||
legend?: boolean
|
legend?: boolean
|
||||||
|
showTotal?: boolean
|
||||||
|
// biome-ignore lint/suspicious/noExplicitAny: recharts tooltip item interop
|
||||||
itemSorter?: (a: any, b: any) => number
|
itemSorter?: (a: any, b: any) => number
|
||||||
|
reverseStackOrder?: boolean
|
||||||
|
hideYAxis?: boolean
|
||||||
|
filter?: string
|
||||||
|
truncate?: boolean
|
||||||
// logRender?: boolean
|
// logRender?: boolean
|
||||||
}) {
|
}) {
|
||||||
const { yAxisWidth, updateYAxisWidth } = useYAxisWidth()
|
const { yAxisWidth, updateYAxisWidth } = useYAxisWidth()
|
||||||
|
const { isIntersecting, ref } = useIntersectionObserver({ freeze: false })
|
||||||
|
const sourceData = customData ?? chartData.systemStats
|
||||||
|
// Only update the rendered data while the chart is visible
|
||||||
|
const [displayData, setDisplayData] = useState(sourceData)
|
||||||
|
|
||||||
|
// Reduce chart redraws by only updating while visible or when chart time changes
|
||||||
|
useEffect(() => {
|
||||||
|
const shouldPrimeData = sourceData.length && !displayData.length
|
||||||
|
const sourceChanged = sourceData !== displayData
|
||||||
|
const shouldUpdate = shouldPrimeData || (sourceChanged && isIntersecting)
|
||||||
|
if (shouldUpdate) {
|
||||||
|
setDisplayData(sourceData)
|
||||||
|
}
|
||||||
|
}, [displayData, isIntersecting, sourceData])
|
||||||
|
|
||||||
|
// Use a stable key derived from data point identities and visual properties
|
||||||
|
const linesKey = dataPoints?.map((d) => `${d.label}:${d.strokeOpacity ?? ""}`).join("\0")
|
||||||
|
|
||||||
|
const Lines = useMemo(() => {
|
||||||
|
return dataPoints?.map((dataPoint, i) => {
|
||||||
|
let { color } = dataPoint
|
||||||
|
if (typeof color === "number") {
|
||||||
|
color = `var(--chart-${color})`
|
||||||
|
}
|
||||||
|
return (
|
||||||
|
<Line
|
||||||
|
key={dataPoint.label}
|
||||||
|
dataKey={dataPoint.dataKey}
|
||||||
|
name={dataPoint.label}
|
||||||
|
type="monotoneX"
|
||||||
|
dot={false}
|
||||||
|
strokeWidth={1.5}
|
||||||
|
stroke={color}
|
||||||
|
strokeOpacity={dataPoint.strokeOpacity}
|
||||||
|
isAnimationActive={false}
|
||||||
|
// stackId={dataPoint.stackId}
|
||||||
|
order={dataPoint.order || i}
|
||||||
|
// activeDot={dataPoint.activeDot ?? true}
|
||||||
|
/>
|
||||||
|
)
|
||||||
|
})
|
||||||
|
}, [linesKey, maxToggled])
|
||||||
|
|
||||||
// biome-ignore lint/correctness/useExhaustiveDependencies: ignore
|
|
||||||
return useMemo(() => {
|
return useMemo(() => {
|
||||||
if (chartData.systemStats.length === 0) {
|
if (displayData.length === 0) {
|
||||||
return null
|
return null
|
||||||
}
|
}
|
||||||
// if (logRender) {
|
// if (logRender) {
|
||||||
// console.log("Rendered at", new Date())
|
// console.log("Rendered at", new Date(), "for", dataPoints?.at(0)?.label)
|
||||||
// }
|
// }
|
||||||
return (
|
return (
|
||||||
<div>
|
<ChartContainer
|
||||||
<ChartContainer
|
ref={ref}
|
||||||
className={cn("h-full w-full absolute aspect-auto bg-card opacity-0 transition-opacity", {
|
className={cn("h-full w-full absolute aspect-auto bg-card opacity-0 transition-opacity", {
|
||||||
"opacity-100": yAxisWidth,
|
"opacity-100": yAxisWidth || hideYAxis,
|
||||||
})}
|
"ps-4": hideYAxis,
|
||||||
|
})}
|
||||||
|
>
|
||||||
|
<LineChart
|
||||||
|
reverseStackOrder={reverseStackOrder}
|
||||||
|
accessibilityLayer
|
||||||
|
data={displayData}
|
||||||
|
margin={hideYAxis ? { ...chartMargin, left: 5 } : chartMargin}
|
||||||
>
|
>
|
||||||
<LineChart accessibilityLayer data={chartData.systemStats} margin={chartMargin}>
|
<CartesianGrid vertical={false} />
|
||||||
<CartesianGrid vertical={false} />
|
{!hideYAxis && (
|
||||||
<YAxis
|
<YAxis
|
||||||
direction="ltr"
|
direction="ltr"
|
||||||
orientation={chartData.orientation}
|
orientation={chartData.orientation}
|
||||||
@@ -70,41 +140,27 @@ export default function LineChartDefault({
|
|||||||
tickLine={false}
|
tickLine={false}
|
||||||
axisLine={false}
|
axisLine={false}
|
||||||
/>
|
/>
|
||||||
{xAxis(chartData)}
|
)}
|
||||||
<ChartTooltip
|
{xAxis(chartData)}
|
||||||
animationEasing="ease-out"
|
<ChartTooltip
|
||||||
animationDuration={150}
|
animationEasing="ease-out"
|
||||||
// @ts-expect-error
|
animationDuration={150}
|
||||||
itemSorter={itemSorter}
|
// @ts-expect-error
|
||||||
content={
|
itemSorter={itemSorter}
|
||||||
<ChartTooltipContent
|
content={
|
||||||
labelFormatter={(_, data) => formatShortDate(data[0].payload.created)}
|
<ChartTooltipContent
|
||||||
contentFormatter={contentFormatter}
|
labelFormatter={(_, data) => formatShortDate(data[0].payload.created)}
|
||||||
/>
|
contentFormatter={contentFormatter}
|
||||||
}
|
showTotal={showTotal}
|
||||||
/>
|
filter={filter}
|
||||||
{dataPoints?.map((dataPoint) => {
|
truncate={truncate}
|
||||||
let { color } = dataPoint
|
/>
|
||||||
if (typeof color === "number") {
|
}
|
||||||
color = `var(--chart-${color})`
|
/>
|
||||||
}
|
{Lines}
|
||||||
return (
|
{legend && <ChartLegend content={<ChartLegendContent />} />}
|
||||||
<Line
|
</LineChart>
|
||||||
key={dataPoint.label}
|
</ChartContainer>
|
||||||
dataKey={dataPoint.dataKey}
|
|
||||||
name={dataPoint.label}
|
|
||||||
type="monotoneX"
|
|
||||||
dot={false}
|
|
||||||
strokeWidth={1.5}
|
|
||||||
stroke={color}
|
|
||||||
isAnimationActive={false}
|
|
||||||
/>
|
|
||||||
)
|
|
||||||
})}
|
|
||||||
{legend && <ChartLegend content={<ChartLegendContent />} />}
|
|
||||||
</LineChart>
|
|
||||||
</ChartContainer>
|
|
||||||
</div>
|
|
||||||
)
|
)
|
||||||
}, [chartData.systemStats.at(-1), yAxisWidth, maxToggled])
|
}, [displayData, yAxisWidth, showTotal, filter, chartData.chartTime])
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,95 +0,0 @@
|
|||||||
import { t } from "@lingui/core/macro"
|
|
||||||
import { memo } from "react"
|
|
||||||
import { CartesianGrid, Line, LineChart, YAxis } from "recharts"
|
|
||||||
import {
|
|
||||||
ChartContainer,
|
|
||||||
ChartLegend,
|
|
||||||
ChartLegendContent,
|
|
||||||
ChartTooltip,
|
|
||||||
ChartTooltipContent,
|
|
||||||
xAxis,
|
|
||||||
} from "@/components/ui/chart"
|
|
||||||
import { chartMargin, cn, decimalString, formatShortDate, toFixedFloat } from "@/lib/utils"
|
|
||||||
import type { ChartData, SystemStats } from "@/types"
|
|
||||||
import { useYAxisWidth } from "./hooks"
|
|
||||||
|
|
||||||
export default memo(function LoadAverageChart({ chartData }: { chartData: ChartData }) {
|
|
||||||
const { yAxisWidth, updateYAxisWidth } = useYAxisWidth()
|
|
||||||
|
|
||||||
const keys: { legacy: keyof SystemStats; color: string; label: string }[] = [
|
|
||||||
{
|
|
||||||
legacy: "l1",
|
|
||||||
color: "hsl(271, 81%, 60%)", // Purple
|
|
||||||
label: t({ message: `1 min`, comment: "Load average" }),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
legacy: "l5",
|
|
||||||
color: "hsl(217, 91%, 60%)", // Blue
|
|
||||||
label: t({ message: `5 min`, comment: "Load average" }),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
legacy: "l15",
|
|
||||||
color: "hsl(25, 95%, 53%)", // Orange
|
|
||||||
label: t({ message: `15 min`, comment: "Load average" }),
|
|
||||||
},
|
|
||||||
]
|
|
||||||
|
|
||||||
return (
|
|
||||||
<div>
|
|
||||||
<ChartContainer
|
|
||||||
className={cn("h-full w-full absolute aspect-auto bg-card opacity-0 transition-opacity", {
|
|
||||||
"opacity-100": yAxisWidth,
|
|
||||||
})}
|
|
||||||
>
|
|
||||||
<LineChart accessibilityLayer data={chartData.systemStats} margin={chartMargin}>
|
|
||||||
<CartesianGrid vertical={false} />
|
|
||||||
<YAxis
|
|
||||||
direction="ltr"
|
|
||||||
orientation={chartData.orientation}
|
|
||||||
className="tracking-tighter"
|
|
||||||
domain={[0, "auto"]}
|
|
||||||
width={yAxisWidth}
|
|
||||||
tickFormatter={(value) => {
|
|
||||||
return updateYAxisWidth(String(toFixedFloat(value, 2)))
|
|
||||||
}}
|
|
||||||
tickLine={false}
|
|
||||||
axisLine={false}
|
|
||||||
/>
|
|
||||||
{xAxis(chartData)}
|
|
||||||
<ChartTooltip
|
|
||||||
animationEasing="ease-out"
|
|
||||||
animationDuration={150}
|
|
||||||
content={
|
|
||||||
<ChartTooltipContent
|
|
||||||
labelFormatter={(_, data) => formatShortDate(data[0].payload.created)}
|
|
||||||
contentFormatter={(item) => decimalString(item.value)}
|
|
||||||
/>
|
|
||||||
}
|
|
||||||
/>
|
|
||||||
{keys.map(({ legacy, color, label }, i) => {
|
|
||||||
const dataKey = (value: { stats: SystemStats }) => {
|
|
||||||
const { minor, patch } = chartData.agentVersion
|
|
||||||
if (minor <= 12 && patch < 1) {
|
|
||||||
return value.stats?.[legacy]
|
|
||||||
}
|
|
||||||
return value.stats?.la?.[i] ?? value.stats?.[legacy]
|
|
||||||
}
|
|
||||||
return (
|
|
||||||
<Line
|
|
||||||
key={label}
|
|
||||||
dataKey={dataKey}
|
|
||||||
name={label}
|
|
||||||
type="monotoneX"
|
|
||||||
dot={false}
|
|
||||||
strokeWidth={1.5}
|
|
||||||
stroke={color}
|
|
||||||
isAnimationActive={false}
|
|
||||||
/>
|
|
||||||
)
|
|
||||||
})}
|
|
||||||
<ChartLegend content={<ChartLegendContent />} />
|
|
||||||
</LineChart>
|
|
||||||
</ChartContainer>
|
|
||||||
</div>
|
|
||||||
)
|
|
||||||
})
|
|
||||||
@@ -1,108 +0,0 @@
|
|||||||
import { useLingui } from "@lingui/react/macro"
|
|
||||||
import { memo } from "react"
|
|
||||||
import { Area, AreaChart, CartesianGrid, YAxis } from "recharts"
|
|
||||||
import { ChartContainer, ChartTooltip, ChartTooltipContent, xAxis } from "@/components/ui/chart"
|
|
||||||
import { Unit } from "@/lib/enums"
|
|
||||||
import { chartMargin, cn, decimalString, formatBytes, formatShortDate, toFixedFloat } from "@/lib/utils"
|
|
||||||
import type { ChartData } from "@/types"
|
|
||||||
import { useYAxisWidth } from "./hooks"
|
|
||||||
|
|
||||||
export default memo(function MemChart({ chartData, showMax }: { chartData: ChartData; showMax: boolean }) {
|
|
||||||
const { yAxisWidth, updateYAxisWidth } = useYAxisWidth()
|
|
||||||
const { t } = useLingui()
|
|
||||||
|
|
||||||
const totalMem = toFixedFloat(chartData.systemStats.at(-1)?.stats.m ?? 0, 1)
|
|
||||||
|
|
||||||
// console.log('rendered at', new Date())
|
|
||||||
|
|
||||||
if (chartData.systemStats.length === 0) {
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
|
||||||
<div>
|
|
||||||
{/* {!yAxisSet && <Spinner />} */}
|
|
||||||
<ChartContainer
|
|
||||||
className={cn("h-full w-full absolute aspect-auto bg-card opacity-0 transition-opacity", {
|
|
||||||
"opacity-100": yAxisWidth,
|
|
||||||
})}
|
|
||||||
>
|
|
||||||
<AreaChart accessibilityLayer data={chartData.systemStats} margin={chartMargin}>
|
|
||||||
<CartesianGrid vertical={false} />
|
|
||||||
{totalMem && (
|
|
||||||
<YAxis
|
|
||||||
direction="ltr"
|
|
||||||
orientation={chartData.orientation}
|
|
||||||
// use "ticks" instead of domain / tickcount if need more control
|
|
||||||
domain={[0, totalMem]}
|
|
||||||
tickCount={9}
|
|
||||||
className="tracking-tighter"
|
|
||||||
width={yAxisWidth}
|
|
||||||
tickLine={false}
|
|
||||||
axisLine={false}
|
|
||||||
tickFormatter={(value) => {
|
|
||||||
const { value: convertedValue, unit } = formatBytes(value * 1024, false, Unit.Bytes, true)
|
|
||||||
return updateYAxisWidth(toFixedFloat(convertedValue, value >= 10 ? 0 : 1) + " " + unit)
|
|
||||||
}}
|
|
||||||
/>
|
|
||||||
)}
|
|
||||||
{xAxis(chartData)}
|
|
||||||
<ChartTooltip
|
|
||||||
// cursor={false}
|
|
||||||
animationEasing="ease-out"
|
|
||||||
animationDuration={150}
|
|
||||||
content={
|
|
||||||
<ChartTooltipContent
|
|
||||||
// @ts-expect-error
|
|
||||||
itemSorter={(a, b) => a.order - b.order}
|
|
||||||
labelFormatter={(_, data) => formatShortDate(data[0].payload.created)}
|
|
||||||
contentFormatter={({ value }) => {
|
|
||||||
// mem values are supplied as GB
|
|
||||||
const { value: convertedValue, unit } = formatBytes(value * 1024, false, Unit.Bytes, true)
|
|
||||||
return decimalString(convertedValue, convertedValue >= 100 ? 1 : 2) + " " + unit
|
|
||||||
}}
|
|
||||||
showTotal={true}
|
|
||||||
/>
|
|
||||||
}
|
|
||||||
/>
|
|
||||||
<Area
|
|
||||||
name={t`Used`}
|
|
||||||
order={3}
|
|
||||||
dataKey={({ stats }) => (showMax ? stats?.mm : stats?.mu)}
|
|
||||||
type="monotoneX"
|
|
||||||
fill="var(--chart-2)"
|
|
||||||
fillOpacity={0.4}
|
|
||||||
stroke="var(--chart-2)"
|
|
||||||
stackId="1"
|
|
||||||
isAnimationActive={false}
|
|
||||||
/>
|
|
||||||
{/* {chartData.systemStats.at(-1)?.stats.mz && ( */}
|
|
||||||
<Area
|
|
||||||
name="ZFS ARC"
|
|
||||||
order={2}
|
|
||||||
dataKey={({ stats }) => (showMax ? null : stats?.mz)}
|
|
||||||
type="monotoneX"
|
|
||||||
fill="hsla(175 60% 45% / 0.8)"
|
|
||||||
fillOpacity={0.5}
|
|
||||||
stroke="hsla(175 60% 45% / 0.8)"
|
|
||||||
stackId="1"
|
|
||||||
isAnimationActive={false}
|
|
||||||
/>
|
|
||||||
{/* )} */}
|
|
||||||
<Area
|
|
||||||
name={t`Cache / Buffers`}
|
|
||||||
order={1}
|
|
||||||
dataKey={({ stats }) => (showMax ? null : stats?.mb)}
|
|
||||||
type="monotoneX"
|
|
||||||
fill="hsla(160 60% 45% / 0.5)"
|
|
||||||
fillOpacity={0.4}
|
|
||||||
stroke="hsla(160 60% 45% / 0.5)"
|
|
||||||
stackId="1"
|
|
||||||
isAnimationActive={false}
|
|
||||||
/>
|
|
||||||
{/* <ChartLegend content={<ChartLegendContent />} /> */}
|
|
||||||
</AreaChart>
|
|
||||||
</ChartContainer>
|
|
||||||
</div>
|
|
||||||
)
|
|
||||||
})
|
|
||||||
@@ -1,70 +0,0 @@
|
|||||||
import { t } from "@lingui/core/macro"
|
|
||||||
import { useStore } from "@nanostores/react"
|
|
||||||
import { memo } from "react"
|
|
||||||
import { Area, AreaChart, CartesianGrid, YAxis } from "recharts"
|
|
||||||
import { ChartContainer, ChartTooltip, ChartTooltipContent, xAxis } from "@/components/ui/chart"
|
|
||||||
import { $userSettings } from "@/lib/stores"
|
|
||||||
import { chartMargin, cn, decimalString, formatBytes, formatShortDate, toFixedFloat } from "@/lib/utils"
|
|
||||||
import type { ChartData } from "@/types"
|
|
||||||
import { useYAxisWidth } from "./hooks"
|
|
||||||
|
|
||||||
export default memo(function SwapChart({ chartData }: { chartData: ChartData }) {
|
|
||||||
const { yAxisWidth, updateYAxisWidth } = useYAxisWidth()
|
|
||||||
const userSettings = useStore($userSettings)
|
|
||||||
|
|
||||||
if (chartData.systemStats.length === 0) {
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
|
|
||||||
return (
|
|
||||||
<div>
|
|
||||||
<ChartContainer
|
|
||||||
className={cn("h-full w-full absolute aspect-auto bg-card opacity-0 transition-opacity", {
|
|
||||||
"opacity-100": yAxisWidth,
|
|
||||||
})}
|
|
||||||
>
|
|
||||||
<AreaChart accessibilityLayer data={chartData.systemStats} margin={chartMargin}>
|
|
||||||
<CartesianGrid vertical={false} />
|
|
||||||
<YAxis
|
|
||||||
direction="ltr"
|
|
||||||
orientation={chartData.orientation}
|
|
||||||
className="tracking-tighter"
|
|
||||||
domain={[0, () => toFixedFloat(chartData.systemStats.at(-1)?.stats.s ?? 0.04, 2)]}
|
|
||||||
width={yAxisWidth}
|
|
||||||
tickLine={false}
|
|
||||||
axisLine={false}
|
|
||||||
tickFormatter={(value) => {
|
|
||||||
const { value: convertedValue, unit } = formatBytes(value * 1024, false, userSettings.unitDisk, true)
|
|
||||||
return updateYAxisWidth(toFixedFloat(convertedValue, value >= 10 ? 0 : 1) + " " + unit)
|
|
||||||
}}
|
|
||||||
/>
|
|
||||||
{xAxis(chartData)}
|
|
||||||
<ChartTooltip
|
|
||||||
animationEasing="ease-out"
|
|
||||||
animationDuration={150}
|
|
||||||
content={
|
|
||||||
<ChartTooltipContent
|
|
||||||
labelFormatter={(_, data) => formatShortDate(data[0].payload.created)}
|
|
||||||
contentFormatter={({ value }) => {
|
|
||||||
// mem values are supplied as GB
|
|
||||||
const { value: convertedValue, unit } = formatBytes(value * 1024, false, userSettings.unitDisk, true)
|
|
||||||
return decimalString(convertedValue, convertedValue >= 100 ? 1 : 2) + " " + unit
|
|
||||||
}}
|
|
||||||
// indicator="line"
|
|
||||||
/>
|
|
||||||
}
|
|
||||||
/>
|
|
||||||
<Area
|
|
||||||
dataKey="stats.su"
|
|
||||||
name={t`Used`}
|
|
||||||
type="monotoneX"
|
|
||||||
fill="var(--chart-2)"
|
|
||||||
fillOpacity={0.4}
|
|
||||||
stroke="var(--chart-2)"
|
|
||||||
isAnimationActive={false}
|
|
||||||
/>
|
|
||||||
</AreaChart>
|
|
||||||
</ChartContainer>
|
|
||||||
</div>
|
|
||||||
)
|
|
||||||
})
|
|
||||||
@@ -1,117 +0,0 @@
|
|||||||
import { useStore } from "@nanostores/react"
|
|
||||||
import { memo, useMemo } from "react"
|
|
||||||
import { CartesianGrid, Line, LineChart, YAxis } from "recharts"
|
|
||||||
import {
|
|
||||||
ChartContainer,
|
|
||||||
ChartLegend,
|
|
||||||
ChartLegendContent,
|
|
||||||
ChartTooltip,
|
|
||||||
ChartTooltipContent,
|
|
||||||
xAxis,
|
|
||||||
} from "@/components/ui/chart"
|
|
||||||
import { $temperatureFilter, $userSettings } from "@/lib/stores"
|
|
||||||
import { chartMargin, cn, decimalString, formatShortDate, formatTemperature, toFixedFloat } from "@/lib/utils"
|
|
||||||
import type { ChartData } from "@/types"
|
|
||||||
import { useYAxisWidth } from "./hooks"
|
|
||||||
|
|
||||||
export default memo(function TemperatureChart({ chartData }: { chartData: ChartData }) {
|
|
||||||
const filter = useStore($temperatureFilter)
|
|
||||||
const userSettings = useStore($userSettings)
|
|
||||||
const { yAxisWidth, updateYAxisWidth } = useYAxisWidth()
|
|
||||||
|
|
||||||
if (chartData.systemStats.length === 0) {
|
|
||||||
return null
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Format temperature data for chart and assign colors */
|
|
||||||
const newChartData = useMemo(() => {
|
|
||||||
const newChartData = { data: [], colors: {} } as {
|
|
||||||
data: Record<string, number | string>[]
|
|
||||||
colors: Record<string, string>
|
|
||||||
}
|
|
||||||
const tempSums = {} as Record<string, number>
|
|
||||||
for (const data of chartData.systemStats) {
|
|
||||||
const newData = { created: data.created } as Record<string, number | string>
|
|
||||||
const keys = Object.keys(data.stats?.t ?? {})
|
|
||||||
for (let i = 0; i < keys.length; i++) {
|
|
||||||
const key = keys[i]
|
|
||||||
newData[key] = data.stats.t![key]
|
|
||||||
tempSums[key] = (tempSums[key] ?? 0) + newData[key]
|
|
||||||
}
|
|
||||||
newChartData.data.push(newData)
|
|
||||||
}
|
|
||||||
const keys = Object.keys(tempSums).sort((a, b) => tempSums[b] - tempSums[a])
|
|
||||||
for (const key of keys) {
|
|
||||||
newChartData.colors[key] = `hsl(${((keys.indexOf(key) * 360) / keys.length) % 360}, 60%, 55%)`
|
|
||||||
}
|
|
||||||
return newChartData
|
|
||||||
}, [chartData])
|
|
||||||
|
|
||||||
const colors = Object.keys(newChartData.colors)
|
|
||||||
|
|
||||||
// console.log('rendered at', new Date())
|
|
||||||
|
|
||||||
return (
|
|
||||||
<div>
|
|
||||||
<ChartContainer
|
|
||||||
className={cn("h-full w-full absolute aspect-auto bg-card opacity-0 transition-opacity", {
|
|
||||||
"opacity-100": yAxisWidth,
|
|
||||||
})}
|
|
||||||
>
|
|
||||||
<LineChart accessibilityLayer data={newChartData.data} margin={chartMargin}>
|
|
||||||
<CartesianGrid vertical={false} />
|
|
||||||
<YAxis
|
|
||||||
direction="ltr"
|
|
||||||
orientation={chartData.orientation}
|
|
||||||
className="tracking-tighter"
|
|
||||||
domain={["auto", "auto"]}
|
|
||||||
width={yAxisWidth}
|
|
||||||
tickFormatter={(val) => {
|
|
||||||
const { value, unit } = formatTemperature(val, userSettings.unitTemp)
|
|
||||||
return updateYAxisWidth(toFixedFloat(value, 2) + " " + unit)
|
|
||||||
}}
|
|
||||||
tickLine={false}
|
|
||||||
axisLine={false}
|
|
||||||
/>
|
|
||||||
{xAxis(chartData)}
|
|
||||||
<ChartTooltip
|
|
||||||
animationEasing="ease-out"
|
|
||||||
animationDuration={150}
|
|
||||||
// @ts-expect-error
|
|
||||||
itemSorter={(a, b) => b.value - a.value}
|
|
||||||
content={
|
|
||||||
<ChartTooltipContent
|
|
||||||
labelFormatter={(_, data) => formatShortDate(data[0].payload.created)}
|
|
||||||
contentFormatter={(item) => {
|
|
||||||
const { value, unit } = formatTemperature(item.value, userSettings.unitTemp)
|
|
||||||
return decimalString(value) + " " + unit
|
|
||||||
}}
|
|
||||||
filter={filter}
|
|
||||||
/>
|
|
||||||
}
|
|
||||||
/>
|
|
||||||
{colors.map((key) => {
|
|
||||||
const filterTerms = filter ? filter.toLowerCase().split(" ").filter(term => term.length > 0) : []
|
|
||||||
const filtered = filterTerms.length > 0 && !filterTerms.some(term => key.toLowerCase().includes(term))
|
|
||||||
const strokeOpacity = filtered ? 0.1 : 1
|
|
||||||
return (
|
|
||||||
<Line
|
|
||||||
key={key}
|
|
||||||
dataKey={key}
|
|
||||||
name={key}
|
|
||||||
type="monotoneX"
|
|
||||||
dot={false}
|
|
||||||
strokeWidth={1.5}
|
|
||||||
stroke={newChartData.colors[key]}
|
|
||||||
strokeOpacity={strokeOpacity}
|
|
||||||
activeDot={{ opacity: filtered ? 0 : 1 }}
|
|
||||||
isAnimationActive={false}
|
|
||||||
/>
|
|
||||||
)
|
|
||||||
})}
|
|
||||||
{colors.length < 12 && <ChartLegend content={<ChartLegendContent />} />}
|
|
||||||
</LineChart>
|
|
||||||
</ChartContainer>
|
|
||||||
</div>
|
|
||||||
)
|
|
||||||
})
|
|
||||||
@@ -4,7 +4,6 @@ import { cn, decimalString, formatBytes, hourWithSeconds } from "@/lib/utils"
|
|||||||
import type { ContainerRecord } from "@/types"
|
import type { ContainerRecord } from "@/types"
|
||||||
import { ContainerHealth, ContainerHealthLabels } from "@/lib/enums"
|
import { ContainerHealth, ContainerHealthLabels } from "@/lib/enums"
|
||||||
import {
|
import {
|
||||||
ArrowUpDownIcon,
|
|
||||||
ClockIcon,
|
ClockIcon,
|
||||||
ContainerIcon,
|
ContainerIcon,
|
||||||
CpuIcon,
|
CpuIcon,
|
||||||
@@ -13,11 +12,12 @@ import {
|
|||||||
ServerIcon,
|
ServerIcon,
|
||||||
ShieldCheckIcon,
|
ShieldCheckIcon,
|
||||||
} from "lucide-react"
|
} from "lucide-react"
|
||||||
import { EthernetIcon, HourglassIcon } from "../ui/icons"
|
import { EthernetIcon, HourglassIcon, SquareArrowRightEnterIcon } from "../ui/icons"
|
||||||
import { Badge } from "../ui/badge"
|
import { Badge } from "../ui/badge"
|
||||||
import { t } from "@lingui/core/macro"
|
import { t } from "@lingui/core/macro"
|
||||||
import { $allSystemsById } from "@/lib/stores"
|
import { $allSystemsById, $longestSystemNameLen } from "@/lib/stores"
|
||||||
import { useStore } from "@nanostores/react"
|
import { useStore } from "@nanostores/react"
|
||||||
|
import { Tooltip, TooltipContent, TooltipTrigger } from "../ui/tooltip"
|
||||||
|
|
||||||
// Unit names and their corresponding number of seconds for converting docker status strings
|
// Unit names and their corresponding number of seconds for converting docker status strings
|
||||||
const unitSeconds = [
|
const unitSeconds = [
|
||||||
@@ -63,7 +63,12 @@ export const containerChartCols: ColumnDef<ContainerRecord>[] = [
|
|||||||
header: ({ column }) => <HeaderButton column={column} name={t`System`} Icon={ServerIcon} />,
|
header: ({ column }) => <HeaderButton column={column} name={t`System`} Icon={ServerIcon} />,
|
||||||
cell: ({ getValue }) => {
|
cell: ({ getValue }) => {
|
||||||
const allSystems = useStore($allSystemsById)
|
const allSystems = useStore($allSystemsById)
|
||||||
return <span className="ms-1.5 xl:w-34 block truncate">{allSystems[getValue() as string]?.name ?? ""}</span>
|
const longestName = useStore($longestSystemNameLen)
|
||||||
|
return (
|
||||||
|
<div className="ms-1 max-w-40 truncate" style={{ width: `${longestName / 1.05}ch` }}>
|
||||||
|
{allSystems[getValue() as string]?.name ?? ""}
|
||||||
|
</div>
|
||||||
|
)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// {
|
// {
|
||||||
@@ -82,7 +87,7 @@ export const containerChartCols: ColumnDef<ContainerRecord>[] = [
|
|||||||
header: ({ column }) => <HeaderButton column={column} name={t`CPU`} Icon={CpuIcon} />,
|
header: ({ column }) => <HeaderButton column={column} name={t`CPU`} Icon={CpuIcon} />,
|
||||||
cell: ({ getValue }) => {
|
cell: ({ getValue }) => {
|
||||||
const val = getValue() as number
|
const val = getValue() as number
|
||||||
return <span className="ms-1.5 tabular-nums">{`${decimalString(val, val >= 10 ? 1 : 2)}%`}</span>
|
return <span className="ms-1 tabular-nums">{`${decimalString(val, val >= 10 ? 1 : 2)}%`}</span>
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -94,7 +99,7 @@ export const containerChartCols: ColumnDef<ContainerRecord>[] = [
|
|||||||
const val = getValue() as number
|
const val = getValue() as number
|
||||||
const formatted = formatBytes(val, false, undefined, true)
|
const formatted = formatBytes(val, false, undefined, true)
|
||||||
return (
|
return (
|
||||||
<span className="ms-1.5 tabular-nums">{`${decimalString(formatted.value, formatted.value >= 10 ? 1 : 2)} ${formatted.unit}`}</span>
|
<span className="ms-1 tabular-nums">{`${decimalString(formatted.value, formatted.value >= 10 ? 1 : 2)} ${formatted.unit}`}</span>
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -103,11 +108,12 @@ export const containerChartCols: ColumnDef<ContainerRecord>[] = [
|
|||||||
accessorFn: (record) => record.net,
|
accessorFn: (record) => record.net,
|
||||||
invertSorting: true,
|
invertSorting: true,
|
||||||
header: ({ column }) => <HeaderButton column={column} name={t`Net`} Icon={EthernetIcon} />,
|
header: ({ column }) => <HeaderButton column={column} name={t`Net`} Icon={EthernetIcon} />,
|
||||||
|
minSize: 112,
|
||||||
cell: ({ getValue }) => {
|
cell: ({ getValue }) => {
|
||||||
const val = getValue() as number
|
const val = getValue() as number
|
||||||
const formatted = formatBytes(val, true, undefined, false)
|
const formatted = formatBytes(val, true, undefined, false)
|
||||||
return (
|
return (
|
||||||
<span className="ms-1.5 tabular-nums">{`${decimalString(formatted.value, formatted.value >= 10 ? 1 : 2)} ${formatted.unit}`}</span>
|
<div className="ms-1 tabular-nums">{`${decimalString(formatted.value, formatted.value >= 10 ? 1 : 2)} ${formatted.unit}`}</div>
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -116,6 +122,7 @@ export const containerChartCols: ColumnDef<ContainerRecord>[] = [
|
|||||||
invertSorting: true,
|
invertSorting: true,
|
||||||
accessorFn: (record) => record.health,
|
accessorFn: (record) => record.health,
|
||||||
header: ({ column }) => <HeaderButton column={column} name={t`Health`} Icon={ShieldCheckIcon} />,
|
header: ({ column }) => <HeaderButton column={column} name={t`Health`} Icon={ShieldCheckIcon} />,
|
||||||
|
minSize: 121,
|
||||||
cell: ({ getValue }) => {
|
cell: ({ getValue }) => {
|
||||||
const healthValue = getValue() as number
|
const healthValue = getValue() as number
|
||||||
const healthStatus = ContainerHealthLabels[healthValue] || "Unknown"
|
const healthStatus = ContainerHealthLabels[healthValue] || "Unknown"
|
||||||
@@ -134,6 +141,35 @@ export const containerChartCols: ColumnDef<ContainerRecord>[] = [
|
|||||||
)
|
)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
id: "ports",
|
||||||
|
accessorFn: (record) => record.ports || undefined,
|
||||||
|
header: ({ column }) => (
|
||||||
|
<HeaderButton
|
||||||
|
column={column}
|
||||||
|
name={t({ message: "Ports", context: "Container ports" })}
|
||||||
|
Icon={SquareArrowRightEnterIcon}
|
||||||
|
/>
|
||||||
|
),
|
||||||
|
sortingFn: (a, b) => getPortValue(a.original.ports) - getPortValue(b.original.ports),
|
||||||
|
minSize: 147,
|
||||||
|
cell: ({ getValue }) => {
|
||||||
|
const val = getValue() as string | undefined
|
||||||
|
if (!val) {
|
||||||
|
return <div className="ms-1.5 text-muted-foreground">-</div>
|
||||||
|
}
|
||||||
|
const className = "ms-1 w-27 block truncate tabular-nums"
|
||||||
|
if (val.length > 14) {
|
||||||
|
return (
|
||||||
|
<Tooltip>
|
||||||
|
<TooltipTrigger className={className}>{val}</TooltipTrigger>
|
||||||
|
<TooltipContent>{val}</TooltipContent>
|
||||||
|
</Tooltip>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
return <span className={className}>{val}</span>
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
id: "image",
|
id: "image",
|
||||||
sortingFn: (a, b) => a.original.image.localeCompare(b.original.image),
|
sortingFn: (a, b) => a.original.image.localeCompare(b.original.image),
|
||||||
@@ -142,7 +178,12 @@ export const containerChartCols: ColumnDef<ContainerRecord>[] = [
|
|||||||
<HeaderButton column={column} name={t({ message: "Image", context: "Docker image" })} Icon={LayersIcon} />
|
<HeaderButton column={column} name={t({ message: "Image", context: "Docker image" })} Icon={LayersIcon} />
|
||||||
),
|
),
|
||||||
cell: ({ getValue }) => {
|
cell: ({ getValue }) => {
|
||||||
return <span className="ms-1.5 xl:w-40 block truncate">{getValue() as string}</span>
|
const val = getValue() as string
|
||||||
|
return (
|
||||||
|
<div className="ms-1 xl:w-40 truncate" title={val}>
|
||||||
|
{val}
|
||||||
|
</div>
|
||||||
|
)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -152,7 +193,7 @@ export const containerChartCols: ColumnDef<ContainerRecord>[] = [
|
|||||||
sortingFn: (a, b) => getStatusValue(a.original.status) - getStatusValue(b.original.status),
|
sortingFn: (a, b) => getStatusValue(a.original.status) - getStatusValue(b.original.status),
|
||||||
header: ({ column }) => <HeaderButton column={column} name={t`Status`} Icon={HourglassIcon} />,
|
header: ({ column }) => <HeaderButton column={column} name={t`Status`} Icon={HourglassIcon} />,
|
||||||
cell: ({ getValue }) => {
|
cell: ({ getValue }) => {
|
||||||
return <span className="ms-1.5 w-25 block truncate">{getValue() as string}</span>
|
return <span className="ms-1 w-25 block truncate">{getValue() as string}</span>
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -162,7 +203,7 @@ export const containerChartCols: ColumnDef<ContainerRecord>[] = [
|
|||||||
header: ({ column }) => <HeaderButton column={column} name={t`Updated`} Icon={ClockIcon} />,
|
header: ({ column }) => <HeaderButton column={column} name={t`Updated`} Icon={ClockIcon} />,
|
||||||
cell: ({ getValue }) => {
|
cell: ({ getValue }) => {
|
||||||
const timestamp = getValue() as number
|
const timestamp = getValue() as number
|
||||||
return <span className="ms-1.5 tabular-nums">{hourWithSeconds(new Date(timestamp).toISOString())}</span>
|
return <span className="ms-1 tabular-nums">{hourWithSeconds(new Date(timestamp).toISOString())}</span>
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
@@ -188,7 +229,21 @@ function HeaderButton({
|
|||||||
>
|
>
|
||||||
{Icon && <Icon className="size-4" />}
|
{Icon && <Icon className="size-4" />}
|
||||||
{name}
|
{name}
|
||||||
<ArrowUpDownIcon className="size-4" />
|
{/* <ArrowUpDownIcon className="size-4" /> */}
|
||||||
</Button>
|
</Button>
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Convert port string to a number for sorting.
|
||||||
|
* Handles formats like "80", "127.0.0.1:80", and "80, 443" (takes the first mapping).
|
||||||
|
*/
|
||||||
|
function getPortValue(ports: string | undefined): number {
|
||||||
|
if (!ports) {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
const first = ports.includes(",") ? ports.substring(0, ports.indexOf(",")) : ports
|
||||||
|
const colonIndex = first.lastIndexOf(":")
|
||||||
|
const portStr = colonIndex === -1 ? first : first.substring(colonIndex + 1)
|
||||||
|
return Number(portStr) || 0
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
/** biome-ignore-all lint/security/noDangerouslySetInnerHtml: html comes directly from docker via agent */
|
||||||
import { t } from "@lingui/core/macro"
|
import { t } from "@lingui/core/macro"
|
||||||
import { Trans } from "@lingui/react/macro"
|
import { Trans } from "@lingui/react/macro"
|
||||||
import {
|
import {
|
||||||
@@ -13,7 +14,7 @@ import {
|
|||||||
type VisibilityState,
|
type VisibilityState,
|
||||||
} from "@tanstack/react-table"
|
} from "@tanstack/react-table"
|
||||||
import { useVirtualizer, type VirtualItem } from "@tanstack/react-virtual"
|
import { useVirtualizer, type VirtualItem } from "@tanstack/react-virtual"
|
||||||
import { memo, RefObject, useEffect, useRef, useState } from "react"
|
import { memo, type RefObject, useEffect, useRef, useState } from "react"
|
||||||
import { Input } from "@/components/ui/input"
|
import { Input } from "@/components/ui/input"
|
||||||
import { TableBody, TableCell, TableHead, TableHeader, TableRow } from "@/components/ui/table"
|
import { TableBody, TableCell, TableHead, TableHeader, TableRow } from "@/components/ui/table"
|
||||||
import { pb } from "@/lib/api"
|
import { pb } from "@/lib/api"
|
||||||
@@ -44,6 +45,20 @@ export default function ContainersTable({ systemId }: { systemId?: string }) {
|
|||||||
)
|
)
|
||||||
const [columnFilters, setColumnFilters] = useState<ColumnFiltersState>([])
|
const [columnFilters, setColumnFilters] = useState<ColumnFiltersState>([])
|
||||||
const [columnVisibility, setColumnVisibility] = useState<VisibilityState>({})
|
const [columnVisibility, setColumnVisibility] = useState<VisibilityState>({})
|
||||||
|
|
||||||
|
// Hide ports column if no ports are present
|
||||||
|
useEffect(() => {
|
||||||
|
if (data) {
|
||||||
|
const hasPorts = data.some((container) => container.ports)
|
||||||
|
setColumnVisibility((prev) => {
|
||||||
|
if (prev.ports === hasPorts) {
|
||||||
|
return prev
|
||||||
|
}
|
||||||
|
return { ...prev, ports: hasPorts }
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}, [data])
|
||||||
|
|
||||||
const [rowSelection, setRowSelection] = useState({})
|
const [rowSelection, setRowSelection] = useState({})
|
||||||
const [globalFilter, setGlobalFilter] = useState("")
|
const [globalFilter, setGlobalFilter] = useState("")
|
||||||
|
|
||||||
@@ -51,7 +66,7 @@ export default function ContainersTable({ systemId }: { systemId?: string }) {
|
|||||||
function fetchData(systemId?: string) {
|
function fetchData(systemId?: string) {
|
||||||
pb.collection<ContainerRecord>("containers")
|
pb.collection<ContainerRecord>("containers")
|
||||||
.getList(0, 2000, {
|
.getList(0, 2000, {
|
||||||
fields: "id,name,image,cpu,memory,net,health,status,system,updated",
|
fields: "id,name,image,ports,cpu,memory,net,health,status,system,updated",
|
||||||
filter: systemId ? pb.filter("system={:system}", { system: systemId }) : undefined,
|
filter: systemId ? pb.filter("system={:system}", { system: systemId }) : undefined,
|
||||||
})
|
})
|
||||||
.then(({ items }) => {
|
.then(({ items }) => {
|
||||||
@@ -67,7 +82,7 @@ export default function ContainersTable({ systemId }: { systemId?: string }) {
|
|||||||
setData((curItems) => {
|
setData((curItems) => {
|
||||||
const lastUpdated = Math.max(items[0].updated, items.at(-1)?.updated ?? 0)
|
const lastUpdated = Math.max(items[0].updated, items.at(-1)?.updated ?? 0)
|
||||||
const containerIds = new Set()
|
const containerIds = new Set()
|
||||||
const newItems = []
|
const newItems: ContainerRecord[] = []
|
||||||
for (const item of items) {
|
for (const item of items) {
|
||||||
if (Math.abs(lastUpdated - item.updated) < 70_000) {
|
if (Math.abs(lastUpdated - item.updated) < 70_000) {
|
||||||
containerIds.add(item.id)
|
containerIds.add(item.id)
|
||||||
@@ -134,7 +149,8 @@ export default function ContainersTable({ systemId }: { systemId?: string }) {
|
|||||||
const status = container.status ?? ""
|
const status = container.status ?? ""
|
||||||
const healthLabel = ContainerHealthLabels[container.health as ContainerHealth] ?? ""
|
const healthLabel = ContainerHealthLabels[container.health as ContainerHealth] ?? ""
|
||||||
const image = container.image ?? ""
|
const image = container.image ?? ""
|
||||||
const searchString = `${systemName} ${id} ${name} ${healthLabel} ${status} ${image}`.toLowerCase()
|
const ports = container.ports ?? ""
|
||||||
|
const searchString = `${systemName} ${id} ${name} ${healthLabel} ${status} ${image} ${ports}`.toLowerCase()
|
||||||
|
|
||||||
return (filterValue as string)
|
return (filterValue as string)
|
||||||
.toLowerCase()
|
.toLowerCase()
|
||||||
@@ -147,9 +163,9 @@ export default function ContainersTable({ systemId }: { systemId?: string }) {
|
|||||||
const visibleColumns = table.getVisibleLeafColumns()
|
const visibleColumns = table.getVisibleLeafColumns()
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<Card className="p-6 @container w-full">
|
<Card className="@container w-full px-3 py-5 sm:py-6 sm:px-6">
|
||||||
<CardHeader className="p-0 mb-4">
|
<CardHeader className="p-0 mb-3 sm:mb-4">
|
||||||
<div className="grid md:flex gap-5 w-full items-end">
|
<div className="grid md:flex gap-x-5 gap-y-3 w-full items-end">
|
||||||
<div className="px-2 sm:px-1">
|
<div className="px-2 sm:px-1">
|
||||||
<CardTitle className="mb-2">
|
<CardTitle className="mb-2">
|
||||||
<Trans>All Containers</Trans>
|
<Trans>All Containers</Trans>
|
||||||
@@ -300,9 +316,6 @@ function ContainerSheet({
|
|||||||
setSheetOpen: (open: boolean) => void
|
setSheetOpen: (open: boolean) => void
|
||||||
activeContainer: RefObject<ContainerRecord | null>
|
activeContainer: RefObject<ContainerRecord | null>
|
||||||
}) {
|
}) {
|
||||||
const container = activeContainer.current
|
|
||||||
if (!container) return null
|
|
||||||
|
|
||||||
const [logsDisplay, setLogsDisplay] = useState<string>("")
|
const [logsDisplay, setLogsDisplay] = useState<string>("")
|
||||||
const [infoDisplay, setInfoDisplay] = useState<string>("")
|
const [infoDisplay, setInfoDisplay] = useState<string>("")
|
||||||
const [logsFullscreenOpen, setLogsFullscreenOpen] = useState<boolean>(false)
|
const [logsFullscreenOpen, setLogsFullscreenOpen] = useState<boolean>(false)
|
||||||
@@ -310,6 +323,8 @@ function ContainerSheet({
|
|||||||
const [isRefreshingLogs, setIsRefreshingLogs] = useState<boolean>(false)
|
const [isRefreshingLogs, setIsRefreshingLogs] = useState<boolean>(false)
|
||||||
const logsContainerRef = useRef<HTMLDivElement>(null)
|
const logsContainerRef = useRef<HTMLDivElement>(null)
|
||||||
|
|
||||||
|
const container = activeContainer.current
|
||||||
|
|
||||||
function scrollLogsToBottom() {
|
function scrollLogsToBottom() {
|
||||||
if (logsContainerRef.current) {
|
if (logsContainerRef.current) {
|
||||||
logsContainerRef.current.scrollTo({ top: logsContainerRef.current.scrollHeight })
|
logsContainerRef.current.scrollTo({ top: logsContainerRef.current.scrollHeight })
|
||||||
@@ -317,6 +332,7 @@ function ContainerSheet({
|
|||||||
}
|
}
|
||||||
|
|
||||||
const refreshLogs = async () => {
|
const refreshLogs = async () => {
|
||||||
|
if (!container) return
|
||||||
setIsRefreshingLogs(true)
|
setIsRefreshingLogs(true)
|
||||||
const startTime = Date.now()
|
const startTime = Date.now()
|
||||||
|
|
||||||
@@ -348,6 +364,8 @@ function ContainerSheet({
|
|||||||
})()
|
})()
|
||||||
}, [container])
|
}, [container])
|
||||||
|
|
||||||
|
if (!container) return null
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<>
|
<>
|
||||||
<LogsFullscreenDialog
|
<LogsFullscreenDialog
|
||||||
@@ -378,8 +396,14 @@ function ContainerSheet({
|
|||||||
{container.image}
|
{container.image}
|
||||||
<Separator orientation="vertical" className="h-2.5 bg-muted-foreground opacity-70" />
|
<Separator orientation="vertical" className="h-2.5 bg-muted-foreground opacity-70" />
|
||||||
{container.id}
|
{container.id}
|
||||||
<Separator orientation="vertical" className="h-2.5 bg-muted-foreground opacity-70" />
|
{/* {container.ports && (
|
||||||
{ContainerHealthLabels[container.health as ContainerHealth]}
|
<>
|
||||||
|
<Separator orientation="vertical" className="h-2.5 bg-muted-foreground opacity-70" />
|
||||||
|
{container.ports}
|
||||||
|
</>
|
||||||
|
)} */}
|
||||||
|
{/* <Separator orientation="vertical" className="h-2.5 bg-muted-foreground opacity-70" />
|
||||||
|
{ContainerHealthLabels[container.health as ContainerHealth]} */}
|
||||||
</SheetDescription>
|
</SheetDescription>
|
||||||
</SheetHeader>
|
</SheetHeader>
|
||||||
<div className="px-3 pb-3 -mt-4 flex flex-col gap-3 h-full items-start">
|
<div className="px-3 pb-3 -mt-4 flex flex-col gap-3 h-full items-start">
|
||||||
@@ -442,7 +466,7 @@ function ContainersTableHead({ table }: { table: TableType<ContainerRecord> }) {
|
|||||||
<tr key={headerGroup.id}>
|
<tr key={headerGroup.id}>
|
||||||
{headerGroup.headers.map((header) => {
|
{headerGroup.headers.map((header) => {
|
||||||
return (
|
return (
|
||||||
<TableHead className="px-2" key={header.id}>
|
<TableHead className="px-2" key={header.id} style={{ width: header.getSize() }}>
|
||||||
{header.isPlaceholder ? null : flexRender(header.column.columnDef.header, header.getContext())}
|
{header.isPlaceholder ? null : flexRender(header.column.columnDef.header, header.getContext())}
|
||||||
</TableHead>
|
</TableHead>
|
||||||
)
|
)
|
||||||
@@ -474,6 +498,7 @@ const ContainerTableRow = memo(function ContainerTableRow({
|
|||||||
className="py-0 ps-4.5"
|
className="py-0 ps-4.5"
|
||||||
style={{
|
style={{
|
||||||
height: virtualRow.size,
|
height: virtualRow.size,
|
||||||
|
width: cell.column.getSize(),
|
||||||
}}
|
}}
|
||||||
>
|
>
|
||||||
{flexRender(cell.column.columnDef.cell, cell.getContext())}
|
{flexRender(cell.column.columnDef.cell, cell.getContext())}
|
||||||
|
|||||||
@@ -1,7 +1,11 @@
|
|||||||
|
import { useStore } from "@nanostores/react"
|
||||||
import { GithubIcon } from "lucide-react"
|
import { GithubIcon } from "lucide-react"
|
||||||
|
import { $newVersion } from "@/lib/stores"
|
||||||
import { Separator } from "./ui/separator"
|
import { Separator } from "./ui/separator"
|
||||||
|
import { Trans } from "@lingui/react/macro"
|
||||||
|
|
||||||
export function FooterRepoLink() {
|
export function FooterRepoLink() {
|
||||||
|
const newVersion = useStore($newVersion)
|
||||||
return (
|
return (
|
||||||
<div className="flex gap-1.5 justify-end items-center pe-3 sm:pe-6 mt-3.5 mb-4 text-xs opacity-80">
|
<div className="flex gap-1.5 justify-end items-center pe-3 sm:pe-6 mt-3.5 mb-4 text-xs opacity-80">
|
||||||
<a
|
<a
|
||||||
@@ -21,6 +25,19 @@ export function FooterRepoLink() {
|
|||||||
>
|
>
|
||||||
Beszel {globalThis.BESZEL.HUB_VERSION}
|
Beszel {globalThis.BESZEL.HUB_VERSION}
|
||||||
</a>
|
</a>
|
||||||
|
{newVersion?.v && (
|
||||||
|
<>
|
||||||
|
<Separator orientation="vertical" className="h-2.5 bg-muted-foreground opacity-70" />
|
||||||
|
<a
|
||||||
|
href={newVersion.url}
|
||||||
|
target="_blank"
|
||||||
|
className="text-yellow-500 hover:text-yellow-400 duration-75"
|
||||||
|
rel="noopener"
|
||||||
|
>
|
||||||
|
<Trans context="New version available">{newVersion.v} available</Trans>
|
||||||
|
</a>
|
||||||
|
</>
|
||||||
|
)}
|
||||||
</div>
|
</div>
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
import { Trans, useLingui } from "@lingui/react/macro"
|
import { Trans, useLingui } from "@lingui/react/macro"
|
||||||
import { LanguagesIcon } from "lucide-react"
|
import { LanguagesIcon } from "lucide-react"
|
||||||
import { Button } from "@/components/ui/button"
|
import { buttonVariants } from "@/components/ui/button"
|
||||||
import { DropdownMenu, DropdownMenuContent, DropdownMenuItem, DropdownMenuTrigger } from "@/components/ui/dropdown-menu"
|
import { DropdownMenu, DropdownMenuContent, DropdownMenuItem, DropdownMenuTrigger } from "@/components/ui/dropdown-menu"
|
||||||
import { dynamicActivate } from "@/lib/i18n"
|
import { dynamicActivate } from "@/lib/i18n"
|
||||||
import languages from "@/lib/languages"
|
import languages from "@/lib/languages"
|
||||||
@@ -14,31 +14,29 @@ export function LangToggle() {
|
|||||||
|
|
||||||
return (
|
return (
|
||||||
<DropdownMenu>
|
<DropdownMenu>
|
||||||
<DropdownMenuTrigger>
|
<Tooltip>
|
||||||
<Tooltip>
|
<TooltipTrigger asChild>
|
||||||
<TooltipTrigger asChild>
|
<DropdownMenuTrigger className={cn(buttonVariants({ variant: "ghost", size: "icon" }))}>
|
||||||
<Button variant={"ghost"} size="icon" className="hidden sm:flex">
|
<LanguagesIcon className="absolute h-[1.2rem] w-[1.2rem] light:opacity-85" />
|
||||||
<LanguagesIcon className="absolute h-[1.2rem] w-[1.2rem] light:opacity-85" />
|
<span className="sr-only">{LangTrans}</span>
|
||||||
<span className="sr-only">{LangTrans}</span>
|
<TooltipContent>{LangTrans}</TooltipContent>
|
||||||
</Button>
|
</DropdownMenuTrigger>
|
||||||
</TooltipTrigger>
|
</TooltipTrigger>
|
||||||
<TooltipContent>{LangTrans}</TooltipContent>
|
<DropdownMenuContent className="grid grid-cols-3">
|
||||||
</Tooltip>
|
{languages.map(([lang, label, e]) => (
|
||||||
</DropdownMenuTrigger>
|
<DropdownMenuItem
|
||||||
<DropdownMenuContent className="grid grid-cols-3">
|
key={lang}
|
||||||
{languages.map(([lang, label, e]) => (
|
className={cn("px-2.5 flex gap-2.5 cursor-pointer", lang === i18n.locale && "bg-accent/70 font-medium")}
|
||||||
<DropdownMenuItem
|
onClick={() => dynamicActivate(lang)}
|
||||||
key={lang}
|
>
|
||||||
className={cn("px-2.5 flex gap-2.5 cursor-pointer", lang === i18n.locale && "bg-accent/70 font-medium")}
|
<span>
|
||||||
onClick={() => dynamicActivate(lang)}
|
{e || <code className="font-mono bg-muted text-[.65em] w-5 h-4 grid place-items-center">{lang}</code>}
|
||||||
>
|
</span>{" "}
|
||||||
<span>
|
{label}
|
||||||
{e || <code className="font-mono bg-muted text-[.65em] w-5 h-4 grid place-items-center">{lang}</code>}
|
</DropdownMenuItem>
|
||||||
</span>{" "}
|
))}
|
||||||
{label}
|
</DropdownMenuContent>
|
||||||
</DropdownMenuItem>
|
</Tooltip>
|
||||||
))}
|
|
||||||
</DropdownMenuContent>
|
|
||||||
</DropdownMenu>
|
</DropdownMenu>
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ export function ModeToggle() {
|
|||||||
|
|
||||||
return (
|
return (
|
||||||
<Tooltip>
|
<Tooltip>
|
||||||
<TooltipTrigger>
|
<TooltipTrigger asChild>
|
||||||
<Button
|
<Button
|
||||||
variant={"ghost"}
|
variant={"ghost"}
|
||||||
size="icon"
|
size="icon"
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
import { t } from "@lingui/core/macro"
|
||||||
import { Trans } from "@lingui/react/macro"
|
import { Trans } from "@lingui/react/macro"
|
||||||
import { getPagePath } from "@nanostores/router"
|
import { getPagePath } from "@nanostores/router"
|
||||||
import {
|
import {
|
||||||
@@ -6,6 +7,8 @@ import {
|
|||||||
HardDriveIcon,
|
HardDriveIcon,
|
||||||
LogOutIcon,
|
LogOutIcon,
|
||||||
LogsIcon,
|
LogsIcon,
|
||||||
|
MenuIcon,
|
||||||
|
PlusIcon,
|
||||||
SearchIcon,
|
SearchIcon,
|
||||||
ServerIcon,
|
ServerIcon,
|
||||||
SettingsIcon,
|
SettingsIcon,
|
||||||
@@ -21,15 +24,18 @@ import {
|
|||||||
DropdownMenuItem,
|
DropdownMenuItem,
|
||||||
DropdownMenuLabel,
|
DropdownMenuLabel,
|
||||||
DropdownMenuSeparator,
|
DropdownMenuSeparator,
|
||||||
|
DropdownMenuSub,
|
||||||
|
DropdownMenuSubContent,
|
||||||
|
DropdownMenuSubTrigger,
|
||||||
DropdownMenuTrigger,
|
DropdownMenuTrigger,
|
||||||
} from "@/components/ui/dropdown-menu"
|
} from "@/components/ui/dropdown-menu"
|
||||||
import { isAdmin, isReadOnlyUser, logOut, pb } from "@/lib/api"
|
import { isAdmin, isReadOnlyUser, logOut, pb } from "@/lib/api"
|
||||||
import { cn, runOnce } from "@/lib/utils"
|
import { cn, runOnce } from "@/lib/utils"
|
||||||
import { AddSystemButton } from "./add-system"
|
import { AddSystemDialog } from "./add-system"
|
||||||
import { LangToggle } from "./lang-toggle"
|
import { LangToggle } from "./lang-toggle"
|
||||||
import { Logo } from "./logo"
|
import { Logo } from "./logo"
|
||||||
import { ModeToggle } from "./mode-toggle"
|
import { ModeToggle } from "./mode-toggle"
|
||||||
import { $router, basePath, Link, prependBasePath } from "./router"
|
import { $router, basePath, Link, navigate, prependBasePath } from "./router"
|
||||||
import { Tooltip, TooltipContent, TooltipTrigger } from "./ui/tooltip"
|
import { Tooltip, TooltipContent, TooltipTrigger } from "./ui/tooltip"
|
||||||
|
|
||||||
const CommandPalette = lazy(() => import("./command-palette"))
|
const CommandPalette = lazy(() => import("./command-palette"))
|
||||||
@@ -37,8 +43,20 @@ const CommandPalette = lazy(() => import("./command-palette"))
|
|||||||
const isMac = navigator.platform.toUpperCase().indexOf("MAC") >= 0
|
const isMac = navigator.platform.toUpperCase().indexOf("MAC") >= 0
|
||||||
|
|
||||||
export default function Navbar() {
|
export default function Navbar() {
|
||||||
|
const [addSystemDialogOpen, setAddSystemDialogOpen] = useState(false)
|
||||||
|
const [commandPaletteOpen, setCommandPaletteOpen] = useState(false)
|
||||||
|
|
||||||
|
const AdminLinks = AdminDropdownGroup()
|
||||||
|
|
||||||
|
const systemTranslation = t`System`
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div className="flex items-center h-14 md:h-16 bg-card px-4 pe-3 sm:px-6 border border-border/60 bt-0 rounded-md my-4">
|
<div className="flex items-center h-14 md:h-16 bg-card px-4 pe-3 sm:px-6 border border-border/60 bt-0 rounded-md my-4">
|
||||||
|
<Suspense>
|
||||||
|
<CommandPalette open={commandPaletteOpen} setOpen={setCommandPaletteOpen} />
|
||||||
|
</Suspense>
|
||||||
|
<AddSystemDialog open={addSystemDialogOpen} setOpen={setAddSystemDialogOpen} />
|
||||||
|
|
||||||
<Link
|
<Link
|
||||||
href={basePath}
|
href={basePath}
|
||||||
aria-label="Home"
|
aria-label="Home"
|
||||||
@@ -47,10 +65,93 @@ export default function Navbar() {
|
|||||||
>
|
>
|
||||||
<Logo className="h-[1.1rem] md:h-5 fill-foreground" />
|
<Logo className="h-[1.1rem] md:h-5 fill-foreground" />
|
||||||
</Link>
|
</Link>
|
||||||
<SearchButton />
|
<Button
|
||||||
|
variant="outline"
|
||||||
|
className="hidden md:block text-sm text-muted-foreground px-4"
|
||||||
|
onClick={() => setCommandPaletteOpen(true)}
|
||||||
|
>
|
||||||
|
<span className="flex items-center">
|
||||||
|
<SearchIcon className="me-1.5 h-4 w-4" />
|
||||||
|
<Trans>Search</Trans>
|
||||||
|
<span className="flex items-center ms-3.5">
|
||||||
|
<Kbd>{isMac ? "⌘" : "Ctrl"}</Kbd>
|
||||||
|
<Kbd>K</Kbd>
|
||||||
|
</span>
|
||||||
|
</span>
|
||||||
|
</Button>
|
||||||
|
|
||||||
|
{/* mobile menu */}
|
||||||
|
<div className="ms-auto flex items-center text-xl md:hidden">
|
||||||
|
<ModeToggle />
|
||||||
|
<Button variant="ghost" size="icon" onClick={() => setCommandPaletteOpen(true)}>
|
||||||
|
<SearchIcon className="h-[1.2rem] w-[1.2rem]" />
|
||||||
|
</Button>
|
||||||
|
<DropdownMenu>
|
||||||
|
<DropdownMenuTrigger
|
||||||
|
onMouseEnter={() => import("@/components/routes/settings/general")}
|
||||||
|
className="ms-3"
|
||||||
|
aria-label="Open Menu"
|
||||||
|
>
|
||||||
|
<MenuIcon />
|
||||||
|
</DropdownMenuTrigger>
|
||||||
|
<DropdownMenuContent align="end">
|
||||||
|
<DropdownMenuLabel className="max-w-40 truncate">{pb.authStore.record?.email}</DropdownMenuLabel>
|
||||||
|
<DropdownMenuSeparator />
|
||||||
|
<DropdownMenuGroup>
|
||||||
|
<DropdownMenuItem
|
||||||
|
onClick={() => navigate(getPagePath($router, "containers"))}
|
||||||
|
className="flex items-center"
|
||||||
|
>
|
||||||
|
<ContainerIcon className="h-4 w-4 me-2.5" strokeWidth={1.5} />
|
||||||
|
<Trans>All Containers</Trans>
|
||||||
|
</DropdownMenuItem>
|
||||||
|
<DropdownMenuItem onClick={() => navigate(getPagePath($router, "smart"))} className="flex items-center">
|
||||||
|
<HardDriveIcon className="h-4 w-4 me-2.5" strokeWidth={1.5} />
|
||||||
|
<span>S.M.A.R.T.</span>
|
||||||
|
</DropdownMenuItem>
|
||||||
|
<DropdownMenuItem
|
||||||
|
onClick={() => navigate(getPagePath($router, "settings", { name: "general" }))}
|
||||||
|
className="flex items-center"
|
||||||
|
>
|
||||||
|
<SettingsIcon className="h-4 w-4 me-2.5" />
|
||||||
|
<Trans>Settings</Trans>
|
||||||
|
</DropdownMenuItem>
|
||||||
|
{isAdmin() && (
|
||||||
|
<DropdownMenuSub>
|
||||||
|
<DropdownMenuSubTrigger>
|
||||||
|
<UserIcon className="h-4 w-4 me-2.5" />
|
||||||
|
<Trans>Admin</Trans>
|
||||||
|
</DropdownMenuSubTrigger>
|
||||||
|
<DropdownMenuSubContent>{AdminLinks}</DropdownMenuSubContent>
|
||||||
|
</DropdownMenuSub>
|
||||||
|
)}
|
||||||
|
<DropdownMenuItem
|
||||||
|
className="flex items-center"
|
||||||
|
onSelect={() => {
|
||||||
|
setAddSystemDialogOpen(true)
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<PlusIcon className="h-4 w-4 me-2.5" />
|
||||||
|
<Trans>Add {{ foo: systemTranslation }}</Trans>
|
||||||
|
</DropdownMenuItem>
|
||||||
|
</DropdownMenuGroup>
|
||||||
|
<DropdownMenuSeparator />
|
||||||
|
<DropdownMenuGroup>
|
||||||
|
<DropdownMenuItem onSelect={logOut} className="flex items-center">
|
||||||
|
<LogOutIcon className="h-4 w-4 me-2.5" />
|
||||||
|
<Trans>Log Out</Trans>
|
||||||
|
</DropdownMenuItem>
|
||||||
|
</DropdownMenuGroup>
|
||||||
|
</DropdownMenuContent>
|
||||||
|
</DropdownMenu>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* desktop nav */}
|
||||||
{/** biome-ignore lint/a11y/noStaticElementInteractions: ignore */}
|
{/** biome-ignore lint/a11y/noStaticElementInteractions: ignore */}
|
||||||
<div className="flex items-center ms-auto" onMouseEnter={() => import("@/components/routes/settings/general")}>
|
<div
|
||||||
|
className="hidden md:flex items-center ms-auto"
|
||||||
|
onMouseEnter={() => import("@/components/routes/settings/general")}
|
||||||
|
>
|
||||||
<Tooltip>
|
<Tooltip>
|
||||||
<TooltipTrigger asChild>
|
<TooltipTrigger asChild>
|
||||||
<Link
|
<Link
|
||||||
@@ -102,45 +203,12 @@ export default function Navbar() {
|
|||||||
<DropdownMenuContent align={isReadOnlyUser() ? "end" : "center"} className="min-w-44">
|
<DropdownMenuContent align={isReadOnlyUser() ? "end" : "center"} className="min-w-44">
|
||||||
<DropdownMenuLabel>{pb.authStore.record?.email}</DropdownMenuLabel>
|
<DropdownMenuLabel>{pb.authStore.record?.email}</DropdownMenuLabel>
|
||||||
<DropdownMenuSeparator />
|
<DropdownMenuSeparator />
|
||||||
<DropdownMenuGroup>
|
{isAdmin() && (
|
||||||
{isAdmin() && (
|
<>
|
||||||
<>
|
{AdminLinks}
|
||||||
<DropdownMenuItem asChild>
|
<DropdownMenuSeparator />
|
||||||
<a href={prependBasePath("/_/")} target="_blank">
|
</>
|
||||||
<UsersIcon className="me-2.5 h-4 w-4" />
|
)}
|
||||||
<span>
|
|
||||||
<Trans>Users</Trans>
|
|
||||||
</span>
|
|
||||||
</a>
|
|
||||||
</DropdownMenuItem>
|
|
||||||
<DropdownMenuItem asChild>
|
|
||||||
<a href={prependBasePath("/_/#/collections?collection=systems")} target="_blank">
|
|
||||||
<ServerIcon className="me-2.5 h-4 w-4" />
|
|
||||||
<span>
|
|
||||||
<Trans>Systems</Trans>
|
|
||||||
</span>
|
|
||||||
</a>
|
|
||||||
</DropdownMenuItem>
|
|
||||||
<DropdownMenuItem asChild>
|
|
||||||
<a href={prependBasePath("/_/#/logs")} target="_blank">
|
|
||||||
<LogsIcon className="me-2.5 h-4 w-4" />
|
|
||||||
<span>
|
|
||||||
<Trans>Logs</Trans>
|
|
||||||
</span>
|
|
||||||
</a>
|
|
||||||
</DropdownMenuItem>
|
|
||||||
<DropdownMenuItem asChild>
|
|
||||||
<a href={prependBasePath("/_/#/settings/backups")} target="_blank">
|
|
||||||
<DatabaseBackupIcon className="me-2.5 h-4 w-4" />
|
|
||||||
<span>
|
|
||||||
<Trans>Backups</Trans>
|
|
||||||
</span>
|
|
||||||
</a>
|
|
||||||
</DropdownMenuItem>
|
|
||||||
<DropdownMenuSeparator />
|
|
||||||
</>
|
|
||||||
)}
|
|
||||||
</DropdownMenuGroup>
|
|
||||||
<DropdownMenuItem onSelect={logOut}>
|
<DropdownMenuItem onSelect={logOut}>
|
||||||
<LogOutIcon className="me-2.5 h-4 w-4" />
|
<LogOutIcon className="me-2.5 h-4 w-4" />
|
||||||
<span>
|
<span>
|
||||||
@@ -149,7 +217,10 @@ export default function Navbar() {
|
|||||||
</DropdownMenuItem>
|
</DropdownMenuItem>
|
||||||
</DropdownMenuContent>
|
</DropdownMenuContent>
|
||||||
</DropdownMenu>
|
</DropdownMenu>
|
||||||
<AddSystemButton className="ms-2" />
|
<Button variant="outline" className="flex gap-1 ms-2" onClick={() => setAddSystemDialogOpen(true)}>
|
||||||
|
<PlusIcon className="h-4 w-4 -ms-1" />
|
||||||
|
<Trans>Add {{ foo: systemTranslation }}</Trans>
|
||||||
|
</Button>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
)
|
)
|
||||||
@@ -161,28 +232,41 @@ const Kbd = ({ children }: { children: React.ReactNode }) => (
|
|||||||
</kbd>
|
</kbd>
|
||||||
)
|
)
|
||||||
|
|
||||||
function SearchButton() {
|
function AdminDropdownGroup() {
|
||||||
const [open, setOpen] = useState(false)
|
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<>
|
<DropdownMenuGroup>
|
||||||
<Button
|
<DropdownMenuItem asChild>
|
||||||
variant="outline"
|
<a href={prependBasePath("/_/")} target="_blank">
|
||||||
className="hidden md:block text-sm text-muted-foreground px-4"
|
<UsersIcon className="me-2.5 h-4 w-4" />
|
||||||
onClick={() => setOpen(true)}
|
<span>
|
||||||
>
|
<Trans>Users</Trans>
|
||||||
<span className="flex items-center">
|
|
||||||
<SearchIcon className="me-1.5 h-4 w-4" />
|
|
||||||
<Trans>Search</Trans>
|
|
||||||
<span className="flex items-center ms-3.5">
|
|
||||||
<Kbd>{isMac ? "⌘" : "Ctrl"}</Kbd>
|
|
||||||
<Kbd>K</Kbd>
|
|
||||||
</span>
|
</span>
|
||||||
</span>
|
</a>
|
||||||
</Button>
|
</DropdownMenuItem>
|
||||||
<Suspense>
|
<DropdownMenuItem asChild>
|
||||||
<CommandPalette open={open} setOpen={setOpen} />
|
<a href={prependBasePath("/_/#/collections?collection=systems")} target="_blank">
|
||||||
</Suspense>
|
<ServerIcon className="me-2.5 h-4 w-4" />
|
||||||
</>
|
<span>
|
||||||
|
<Trans>Systems</Trans>
|
||||||
|
</span>
|
||||||
|
</a>
|
||||||
|
</DropdownMenuItem>
|
||||||
|
<DropdownMenuItem asChild>
|
||||||
|
<a href={prependBasePath("/_/#/logs")} target="_blank">
|
||||||
|
<LogsIcon className="me-2.5 h-4 w-4" />
|
||||||
|
<span>
|
||||||
|
<Trans>Logs</Trans>
|
||||||
|
</span>
|
||||||
|
</a>
|
||||||
|
</DropdownMenuItem>
|
||||||
|
<DropdownMenuItem asChild>
|
||||||
|
<a href={prependBasePath("/_/#/settings/backups")} target="_blank">
|
||||||
|
<DatabaseBackupIcon className="me-2.5 h-4 w-4" />
|
||||||
|
<span>
|
||||||
|
<Trans>Backups</Trans>
|
||||||
|
</span>
|
||||||
|
</a>
|
||||||
|
</DropdownMenuItem>
|
||||||
|
</DropdownMenuGroup>
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ import Slider from "@/components/ui/slider"
|
|||||||
import { HourFormat, Unit } from "@/lib/enums"
|
import { HourFormat, Unit } from "@/lib/enums"
|
||||||
import { dynamicActivate } from "@/lib/i18n"
|
import { dynamicActivate } from "@/lib/i18n"
|
||||||
import languages from "@/lib/languages"
|
import languages from "@/lib/languages"
|
||||||
import { $userSettings } from "@/lib/stores"
|
import { $userSettings, defaultLayoutWidth } from "@/lib/stores"
|
||||||
import { chartTimeData, currentHour12 } from "@/lib/utils"
|
import { chartTimeData, currentHour12 } from "@/lib/utils"
|
||||||
import type { UserSettings } from "@/types"
|
import type { UserSettings } from "@/types"
|
||||||
import { saveSettings } from "./layout"
|
import { saveSettings } from "./layout"
|
||||||
@@ -21,7 +21,7 @@ export default function SettingsProfilePage({ userSettings }: { userSettings: Us
|
|||||||
const [isLoading, setIsLoading] = useState(false)
|
const [isLoading, setIsLoading] = useState(false)
|
||||||
const { i18n } = useLingui()
|
const { i18n } = useLingui()
|
||||||
const currentUserSettings = useStore($userSettings)
|
const currentUserSettings = useStore($userSettings)
|
||||||
const layoutWidth = currentUserSettings.layoutWidth ?? 1500
|
const layoutWidth = currentUserSettings.layoutWidth ?? defaultLayoutWidth
|
||||||
|
|
||||||
async function handleSubmit(e: React.FormEvent<HTMLFormElement>) {
|
async function handleSubmit(e: React.FormEvent<HTMLFormElement>) {
|
||||||
e.preventDefault()
|
e.preventDefault()
|
||||||
|
|||||||
@@ -134,10 +134,10 @@ export function QuietHours() {
|
|||||||
const startMinutes = startDate.getUTCHours() * 60 + startDate.getUTCMinutes()
|
const startMinutes = startDate.getUTCHours() * 60 + startDate.getUTCMinutes()
|
||||||
const endMinutes = endDate.getUTCHours() * 60 + endDate.getUTCMinutes()
|
const endMinutes = endDate.getUTCHours() * 60 + endDate.getUTCMinutes()
|
||||||
|
|
||||||
// Convert UTC to local time offset
|
// Convert UTC to local time using the stored date's offset, not the current date's offset
|
||||||
const offset = now.getTimezoneOffset()
|
// This avoids DST mismatch when records were saved in a different DST period
|
||||||
const localStartMinutes = (startMinutes - offset + 1440) % 1440
|
const localStartMinutes = (startMinutes - startDate.getTimezoneOffset() + 1440) % 1440
|
||||||
const localEndMinutes = (endMinutes - offset + 1440) % 1440
|
const localEndMinutes = (endMinutes - endDate.getTimezoneOffset() + 1440) % 1440
|
||||||
|
|
||||||
// Handle cases where window spans midnight
|
// Handle cases where window spans midnight
|
||||||
if (localStartMinutes <= localEndMinutes) {
|
if (localStartMinutes <= localEndMinutes) {
|
||||||
@@ -347,12 +347,13 @@ function QuietHoursDialog({
|
|||||||
|
|
||||||
if (windowType === "daily") {
|
if (windowType === "daily") {
|
||||||
// For daily windows, convert local time to UTC
|
// For daily windows, convert local time to UTC
|
||||||
// Create a date with the time in local timezone, then convert to UTC
|
// Use today's date so the current DST offset is applied (not a fixed historical date)
|
||||||
const startDate = new Date(`2000-01-01T${startTime}:00`)
|
const today = new Date().toISOString().split("T")[0]
|
||||||
|
const startDate = new Date(`${today}T${startTime}:00`)
|
||||||
startValue = startDate.toISOString()
|
startValue = startDate.toISOString()
|
||||||
|
|
||||||
if (endTime) {
|
if (endTime) {
|
||||||
const endDate = new Date(`2000-01-01T${endTime}:00`)
|
const endDate = new Date(`${today}T${endTime}:00`)
|
||||||
endValue = endDate.toISOString()
|
endValue = endDate.toISOString()
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
133
internal/site/src/components/routes/system/chart-card.tsx
Normal file
133
internal/site/src/components/routes/system/chart-card.tsx
Normal file
@@ -0,0 +1,133 @@
|
|||||||
|
import { t } from "@lingui/core/macro"
|
||||||
|
import { Trans, useLingui } from "@lingui/react/macro"
|
||||||
|
import { useStore } from "@nanostores/react"
|
||||||
|
import { XIcon } from "lucide-react"
|
||||||
|
import React, { type JSX, memo, useCallback, useEffect, useState } from "react"
|
||||||
|
import { $containerFilter, $maxValues } from "@/lib/stores"
|
||||||
|
import { useIntersectionObserver } from "@/lib/use-intersection-observer"
|
||||||
|
import { cn } from "@/lib/utils"
|
||||||
|
import Spinner from "../../spinner"
|
||||||
|
import { Button } from "../../ui/button"
|
||||||
|
import { Card, CardDescription, CardHeader, CardTitle } from "../../ui/card"
|
||||||
|
import { ChartAverage, ChartMax } from "../../ui/icons"
|
||||||
|
import { Input } from "../../ui/input"
|
||||||
|
import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "../../ui/select"
|
||||||
|
|
||||||
|
export function FilterBar({ store = $containerFilter }: { store?: typeof $containerFilter }) {
|
||||||
|
const storeValue = useStore(store)
|
||||||
|
const [inputValue, setInputValue] = useState(storeValue)
|
||||||
|
const { t } = useLingui()
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
setInputValue(storeValue)
|
||||||
|
}, [storeValue])
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
if (inputValue === storeValue) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
const handle = window.setTimeout(() => store.set(inputValue), 80)
|
||||||
|
return () => clearTimeout(handle)
|
||||||
|
}, [inputValue, storeValue, store])
|
||||||
|
|
||||||
|
const handleChange = useCallback((e: React.ChangeEvent<HTMLInputElement>) => {
|
||||||
|
const value = e.target.value
|
||||||
|
setInputValue(value)
|
||||||
|
}, [])
|
||||||
|
|
||||||
|
const handleClear = useCallback(() => {
|
||||||
|
setInputValue("")
|
||||||
|
store.set("")
|
||||||
|
}, [store])
|
||||||
|
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
<Input
|
||||||
|
placeholder={t`Filter...`}
|
||||||
|
className="ps-4 pe-8 w-full sm:w-44"
|
||||||
|
onChange={handleChange}
|
||||||
|
value={inputValue}
|
||||||
|
/>
|
||||||
|
{inputValue && (
|
||||||
|
<Button
|
||||||
|
type="button"
|
||||||
|
variant="ghost"
|
||||||
|
size="icon"
|
||||||
|
aria-label="Clear"
|
||||||
|
className="absolute right-1 top-1/2 -translate-y-1/2 h-7 w-7 text-gray-500 hover:text-gray-900 dark:text-gray-400 dark:hover:text-gray-100"
|
||||||
|
onClick={handleClear}
|
||||||
|
>
|
||||||
|
<XIcon className="h-4 w-4" />
|
||||||
|
</Button>
|
||||||
|
)}
|
||||||
|
</>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
export const SelectAvgMax = memo(({ max }: { max: boolean }) => {
|
||||||
|
const Icon = max ? ChartMax : ChartAverage
|
||||||
|
return (
|
||||||
|
<Select value={max ? "max" : "avg"} onValueChange={(e) => $maxValues.set(e === "max")}>
|
||||||
|
<SelectTrigger className="relative ps-10 pe-5 w-full sm:w-44">
|
||||||
|
<Icon className="h-4 w-4 absolute start-4 top-1/2 -translate-y-1/2 opacity-85" />
|
||||||
|
<SelectValue />
|
||||||
|
</SelectTrigger>
|
||||||
|
<SelectContent>
|
||||||
|
<SelectItem key="avg" value="avg">
|
||||||
|
<Trans>Average</Trans>
|
||||||
|
</SelectItem>
|
||||||
|
<SelectItem key="max" value="max">
|
||||||
|
<Trans comment="Chart select field. Please try to keep this short.">Max 1 min</Trans>
|
||||||
|
</SelectItem>
|
||||||
|
</SelectContent>
|
||||||
|
</Select>
|
||||||
|
)
|
||||||
|
})
|
||||||
|
|
||||||
|
export function ChartCard({
|
||||||
|
title,
|
||||||
|
description,
|
||||||
|
children,
|
||||||
|
grid,
|
||||||
|
empty,
|
||||||
|
cornerEl,
|
||||||
|
legend,
|
||||||
|
className,
|
||||||
|
}: {
|
||||||
|
title: string
|
||||||
|
description: string
|
||||||
|
children: React.ReactNode
|
||||||
|
grid?: boolean
|
||||||
|
empty?: boolean
|
||||||
|
cornerEl?: JSX.Element | null
|
||||||
|
legend?: boolean
|
||||||
|
className?: string
|
||||||
|
}) {
|
||||||
|
const { isIntersecting, ref } = useIntersectionObserver()
|
||||||
|
|
||||||
|
return (
|
||||||
|
<Card
|
||||||
|
className={cn(
|
||||||
|
"px-3 py-5 sm:py-6 sm:px-6 odd:last-of-type:col-span-full min-h-full",
|
||||||
|
{ "col-span-full": !grid },
|
||||||
|
className
|
||||||
|
)}
|
||||||
|
ref={ref}
|
||||||
|
>
|
||||||
|
<CardHeader className="gap-1.5 relative p-0 mb-3 sm:mb-4">
|
||||||
|
<CardTitle>{title}</CardTitle>
|
||||||
|
<CardDescription>{description}</CardDescription>
|
||||||
|
{cornerEl && <div className="grid sm:justify-end sm:absolute sm:top-0 sm:end-0 my-1 sm:my-0">{cornerEl}</div>}
|
||||||
|
</CardHeader>
|
||||||
|
<div className={cn("ps-0 -me-1 -ms-3.5 relative group", legend ? "h-54 md:h-56" : "h-48 md:h-52")}>
|
||||||
|
{
|
||||||
|
<Spinner
|
||||||
|
msg={empty ? t`Waiting for enough records to display` : undefined}
|
||||||
|
className="group-has-[.opacity-100]:invisible duration-100"
|
||||||
|
/>
|
||||||
|
}
|
||||||
|
{isIntersecting && children}
|
||||||
|
</div>
|
||||||
|
</Card>
|
||||||
|
)
|
||||||
|
}
|
||||||
116
internal/site/src/components/routes/system/chart-data.ts
Normal file
116
internal/site/src/components/routes/system/chart-data.ts
Normal file
@@ -0,0 +1,116 @@
|
|||||||
|
import { timeTicks } from "d3-time"
|
||||||
|
import { getPbTimestamp, pb } from "@/lib/api"
|
||||||
|
import { chartTimeData } from "@/lib/utils"
|
||||||
|
import type { ChartData, ChartTimes, ContainerStatsRecord, SystemStatsRecord } from "@/types"
|
||||||
|
|
||||||
|
type ChartTimeData = {
|
||||||
|
time: number
|
||||||
|
data: {
|
||||||
|
ticks: number[]
|
||||||
|
domain: number[]
|
||||||
|
}
|
||||||
|
chartTime: ChartTimes
|
||||||
|
}
|
||||||
|
|
||||||
|
export const cache = new Map<
|
||||||
|
string,
|
||||||
|
ChartTimeData | SystemStatsRecord[] | ContainerStatsRecord[] | ChartData["containerData"]
|
||||||
|
>()
|
||||||
|
|
||||||
|
// create ticks and domain for charts
|
||||||
|
export function getTimeData(chartTime: ChartTimes, lastCreated: number) {
|
||||||
|
const cached = cache.get("td") as ChartTimeData | undefined
|
||||||
|
if (cached && cached.chartTime === chartTime) {
|
||||||
|
if (!lastCreated || cached.time >= lastCreated) {
|
||||||
|
return cached.data
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// const buffer = chartTime === "1m" ? 400 : 20_000
|
||||||
|
const now = new Date(Date.now())
|
||||||
|
const startTime = chartTimeData[chartTime].getOffset(now)
|
||||||
|
const ticks = timeTicks(startTime, now, chartTimeData[chartTime].ticks ?? 12).map((date) => date.getTime())
|
||||||
|
const data = {
|
||||||
|
ticks,
|
||||||
|
domain: [chartTimeData[chartTime].getOffset(now).getTime(), now.getTime()],
|
||||||
|
}
|
||||||
|
cache.set("td", { time: now.getTime(), data, chartTime })
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Append new records onto prev with gap detection. Converts string `created` values to ms timestamps in place.
|
||||||
|
* Pass `maxLen` to cap the result length in one copy instead of slicing again after the call. */
|
||||||
|
export function appendData<T extends { created: string | number | null }>(
|
||||||
|
prev: T[],
|
||||||
|
newRecords: T[],
|
||||||
|
expectedInterval: number,
|
||||||
|
maxLen?: number
|
||||||
|
): T[] {
|
||||||
|
if (!newRecords.length) return prev
|
||||||
|
// Pre-trim prev so the single slice() below is the only copy we make
|
||||||
|
const trimmed = maxLen && prev.length >= maxLen ? prev.slice(-(maxLen - newRecords.length)) : prev
|
||||||
|
const result = trimmed.slice()
|
||||||
|
let prevTime = (trimmed.at(-1)?.created as number) ?? 0
|
||||||
|
for (const record of newRecords) {
|
||||||
|
if (record.created !== null) {
|
||||||
|
if (typeof record.created === "string") {
|
||||||
|
record.created = new Date(record.created).getTime()
|
||||||
|
}
|
||||||
|
if (prevTime && (record.created as number) - prevTime > expectedInterval * 1.5) {
|
||||||
|
result.push({ created: null, ...("stats" in record ? { stats: null } : {}) } as T)
|
||||||
|
}
|
||||||
|
prevTime = record.created as number
|
||||||
|
}
|
||||||
|
result.push(record)
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function getStats<T extends SystemStatsRecord | ContainerStatsRecord>(
|
||||||
|
collection: string,
|
||||||
|
systemId: string,
|
||||||
|
chartTime: ChartTimes
|
||||||
|
): Promise<T[]> {
|
||||||
|
const cachedStats = cache.get(`${systemId}_${chartTime}_${collection}`) as T[] | undefined
|
||||||
|
const lastCached = cachedStats?.at(-1)?.created as number
|
||||||
|
return await pb.collection<T>(collection).getFullList({
|
||||||
|
filter: pb.filter("system={:id} && created > {:created} && type={:type}", {
|
||||||
|
id: systemId,
|
||||||
|
created: getPbTimestamp(chartTime, lastCached ? new Date(lastCached + 1000) : undefined),
|
||||||
|
type: chartTimeData[chartTime].type,
|
||||||
|
}),
|
||||||
|
fields: "created,stats",
|
||||||
|
sort: "created",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
export function makeContainerData(containers: ContainerStatsRecord[]): ChartData["containerData"] {
|
||||||
|
const result = [] as ChartData["containerData"]
|
||||||
|
for (const { created, stats } of containers) {
|
||||||
|
if (!created) {
|
||||||
|
result.push({ created: null } as ChartData["containerData"][0])
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
result.push(makeContainerPoint(new Date(created).getTime(), stats))
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Transform a single realtime container stats message into a ChartDataContainer point. */
|
||||||
|
export function makeContainerPoint(
|
||||||
|
created: number,
|
||||||
|
stats: ContainerStatsRecord["stats"]
|
||||||
|
): ChartData["containerData"][0] {
|
||||||
|
const point: ChartData["containerData"][0] = { created } as ChartData["containerData"][0]
|
||||||
|
for (const container of stats) {
|
||||||
|
;(point as Record<string, unknown>)[container.n] = container
|
||||||
|
}
|
||||||
|
return point
|
||||||
|
}
|
||||||
|
|
||||||
|
export function dockerOrPodman(str: string, isPodman: boolean): string {
|
||||||
|
if (isPodman) {
|
||||||
|
return str.replace("docker", "podman").replace("Docker", "Podman")
|
||||||
|
}
|
||||||
|
return str
|
||||||
|
}
|
||||||
@@ -0,0 +1,99 @@
|
|||||||
|
import { t } from "@lingui/core/macro"
|
||||||
|
import AreaChartDefault from "@/components/charts/area-chart"
|
||||||
|
import { useContainerDataPoints } from "@/components/charts/hooks"
|
||||||
|
import { decimalString, toFixedFloat } from "@/lib/utils"
|
||||||
|
import type { ChartConfig } from "@/components/ui/chart"
|
||||||
|
import type { ChartData } from "@/types"
|
||||||
|
import { pinnedAxisDomain } from "@/components/ui/chart"
|
||||||
|
import CpuCoresSheet from "../cpu-sheet"
|
||||||
|
import { ChartCard, FilterBar, SelectAvgMax } from "../chart-card"
|
||||||
|
import { dockerOrPodman } from "../chart-data"
|
||||||
|
|
||||||
|
export function CpuChart({
|
||||||
|
chartData,
|
||||||
|
grid,
|
||||||
|
dataEmpty,
|
||||||
|
showMax,
|
||||||
|
isLongerChart,
|
||||||
|
maxValues,
|
||||||
|
}: {
|
||||||
|
chartData: ChartData
|
||||||
|
grid: boolean
|
||||||
|
dataEmpty: boolean
|
||||||
|
showMax: boolean
|
||||||
|
isLongerChart: boolean
|
||||||
|
maxValues: boolean
|
||||||
|
}) {
|
||||||
|
const maxValSelect = isLongerChart ? <SelectAvgMax max={maxValues} /> : null
|
||||||
|
|
||||||
|
return (
|
||||||
|
<ChartCard
|
||||||
|
empty={dataEmpty}
|
||||||
|
grid={grid}
|
||||||
|
title={t`CPU Usage`}
|
||||||
|
description={t`Average system-wide CPU utilization`}
|
||||||
|
cornerEl={
|
||||||
|
<div className="flex gap-2">
|
||||||
|
{maxValSelect}
|
||||||
|
<CpuCoresSheet chartData={chartData} dataEmpty={dataEmpty} grid={grid} maxValues={maxValues} />
|
||||||
|
</div>
|
||||||
|
}
|
||||||
|
>
|
||||||
|
<AreaChartDefault
|
||||||
|
chartData={chartData}
|
||||||
|
maxToggled={showMax}
|
||||||
|
dataPoints={[
|
||||||
|
{
|
||||||
|
label: t`CPU Usage`,
|
||||||
|
dataKey: ({ stats }) => (showMax ? stats?.cpum : stats?.cpu),
|
||||||
|
color: 1,
|
||||||
|
opacity: 0.4,
|
||||||
|
},
|
||||||
|
]}
|
||||||
|
tickFormatter={(val) => `${toFixedFloat(val, 2)}%`}
|
||||||
|
contentFormatter={({ value }) => `${decimalString(value)}%`}
|
||||||
|
domain={pinnedAxisDomain()}
|
||||||
|
/>
|
||||||
|
</ChartCard>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
export function ContainerCpuChart({
|
||||||
|
chartData,
|
||||||
|
grid,
|
||||||
|
dataEmpty,
|
||||||
|
isPodman,
|
||||||
|
cpuConfig,
|
||||||
|
}: {
|
||||||
|
chartData: ChartData
|
||||||
|
grid: boolean
|
||||||
|
dataEmpty: boolean
|
||||||
|
isPodman: boolean
|
||||||
|
cpuConfig: ChartConfig
|
||||||
|
}) {
|
||||||
|
const { filter, dataPoints } = useContainerDataPoints(cpuConfig, (key, data) => data[key]?.c ?? null)
|
||||||
|
|
||||||
|
return (
|
||||||
|
<ChartCard
|
||||||
|
empty={dataEmpty}
|
||||||
|
grid={grid}
|
||||||
|
title={dockerOrPodman(t`Docker CPU Usage`, isPodman)}
|
||||||
|
description={t`Average CPU utilization of containers`}
|
||||||
|
cornerEl={<FilterBar />}
|
||||||
|
>
|
||||||
|
<AreaChartDefault
|
||||||
|
chartData={chartData}
|
||||||
|
customData={chartData.containerData}
|
||||||
|
dataPoints={dataPoints}
|
||||||
|
tickFormatter={(val) => `${toFixedFloat(val, 2)}%`}
|
||||||
|
contentFormatter={({ value }) => `${decimalString(value)}%`}
|
||||||
|
domain={pinnedAxisDomain()}
|
||||||
|
showTotal={true}
|
||||||
|
reverseStackOrder={true}
|
||||||
|
filter={filter}
|
||||||
|
truncate={true}
|
||||||
|
itemSorter={(a, b) => b.value - a.value}
|
||||||
|
/>
|
||||||
|
</ChartCard>
|
||||||
|
)
|
||||||
|
}
|
||||||
@@ -0,0 +1,106 @@
|
|||||||
|
import { t } from "@lingui/core/macro"
|
||||||
|
import AreaChartDefault from "@/components/charts/area-chart"
|
||||||
|
import { $userSettings } from "@/lib/stores"
|
||||||
|
import { decimalString, formatBytes, toFixedFloat } from "@/lib/utils"
|
||||||
|
import type { ChartData, SystemStatsRecord } from "@/types"
|
||||||
|
import { ChartCard, SelectAvgMax } from "../chart-card"
|
||||||
|
import { Unit } from "@/lib/enums"
|
||||||
|
|
||||||
|
export function DiskCharts({
|
||||||
|
chartData,
|
||||||
|
grid,
|
||||||
|
dataEmpty,
|
||||||
|
showMax,
|
||||||
|
isLongerChart,
|
||||||
|
maxValues,
|
||||||
|
}: {
|
||||||
|
chartData: ChartData
|
||||||
|
grid: boolean
|
||||||
|
dataEmpty: boolean
|
||||||
|
showMax: boolean
|
||||||
|
isLongerChart: boolean
|
||||||
|
maxValues: boolean
|
||||||
|
systemStats: SystemStatsRecord[]
|
||||||
|
}) {
|
||||||
|
const maxValSelect = isLongerChart ? <SelectAvgMax max={maxValues} /> : null
|
||||||
|
const userSettings = $userSettings.get()
|
||||||
|
|
||||||
|
let diskSize = chartData.systemStats?.at(-1)?.stats.d ?? NaN
|
||||||
|
// round to nearest GB
|
||||||
|
if (diskSize >= 100) {
|
||||||
|
diskSize = Math.round(diskSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
<ChartCard empty={dataEmpty} grid={grid} title={t`Disk Usage`} description={t`Usage of root partition`}>
|
||||||
|
<AreaChartDefault
|
||||||
|
chartData={chartData}
|
||||||
|
domain={[0, diskSize]}
|
||||||
|
tickFormatter={(val) => {
|
||||||
|
const { value, unit } = formatBytes(val * 1024, false, Unit.Bytes, true)
|
||||||
|
return `${toFixedFloat(value, value >= 10 ? 0 : 1)} ${unit}`
|
||||||
|
}}
|
||||||
|
contentFormatter={({ value }) => {
|
||||||
|
const { value: convertedValue, unit } = formatBytes(value * 1024, false, Unit.Bytes, true)
|
||||||
|
return `${decimalString(convertedValue)} ${unit}`
|
||||||
|
}}
|
||||||
|
dataPoints={[
|
||||||
|
{
|
||||||
|
label: t`Disk Usage`,
|
||||||
|
color: 4,
|
||||||
|
opacity: 0.4,
|
||||||
|
dataKey: ({ stats }) => stats?.du,
|
||||||
|
},
|
||||||
|
]}
|
||||||
|
></AreaChartDefault>
|
||||||
|
</ChartCard>
|
||||||
|
|
||||||
|
<ChartCard
|
||||||
|
empty={dataEmpty}
|
||||||
|
grid={grid}
|
||||||
|
title={t`Disk I/O`}
|
||||||
|
description={t`Throughput of root filesystem`}
|
||||||
|
cornerEl={maxValSelect}
|
||||||
|
>
|
||||||
|
<AreaChartDefault
|
||||||
|
chartData={chartData}
|
||||||
|
maxToggled={showMax}
|
||||||
|
dataPoints={[
|
||||||
|
{
|
||||||
|
label: t({ message: "Write", comment: "Disk write" }),
|
||||||
|
dataKey: ({ stats }: SystemStatsRecord) => {
|
||||||
|
if (showMax) {
|
||||||
|
return stats?.dio?.[1] ?? (stats?.dwm ?? 0) * 1024 * 1024
|
||||||
|
}
|
||||||
|
return stats?.dio?.[1] ?? (stats?.dw ?? 0) * 1024 * 1024
|
||||||
|
},
|
||||||
|
color: 3,
|
||||||
|
opacity: 0.3,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
label: t({ message: "Read", comment: "Disk read" }),
|
||||||
|
dataKey: ({ stats }: SystemStatsRecord) => {
|
||||||
|
if (showMax) {
|
||||||
|
return stats?.diom?.[0] ?? (stats?.drm ?? 0) * 1024 * 1024
|
||||||
|
}
|
||||||
|
return stats?.dio?.[0] ?? (stats?.dr ?? 0) * 1024 * 1024
|
||||||
|
},
|
||||||
|
color: 1,
|
||||||
|
opacity: 0.3,
|
||||||
|
},
|
||||||
|
]}
|
||||||
|
tickFormatter={(val) => {
|
||||||
|
const { value, unit } = formatBytes(val, true, userSettings.unitDisk, false)
|
||||||
|
return `${toFixedFloat(value, value >= 10 ? 0 : 1)} ${unit}`
|
||||||
|
}}
|
||||||
|
contentFormatter={({ value }) => {
|
||||||
|
const { value: convertedValue, unit } = formatBytes(value, true, userSettings.unitDisk, false)
|
||||||
|
return `${decimalString(convertedValue, convertedValue >= 100 ? 1 : 2)} ${unit}`
|
||||||
|
}}
|
||||||
|
showTotal={true}
|
||||||
|
/>
|
||||||
|
</ChartCard>
|
||||||
|
</>
|
||||||
|
)
|
||||||
|
}
|
||||||
@@ -0,0 +1,120 @@
|
|||||||
|
import { t } from "@lingui/core/macro"
|
||||||
|
import AreaChartDefault from "@/components/charts/area-chart"
|
||||||
|
import { $userSettings } from "@/lib/stores"
|
||||||
|
import { decimalString, formatBytes, toFixedFloat } from "@/lib/utils"
|
||||||
|
import type { ChartData, SystemStatsRecord } from "@/types"
|
||||||
|
import { ChartCard, SelectAvgMax } from "../chart-card"
|
||||||
|
import { Unit } from "@/lib/enums"
|
||||||
|
|
||||||
|
export function ExtraFsCharts({
|
||||||
|
chartData,
|
||||||
|
grid,
|
||||||
|
dataEmpty,
|
||||||
|
showMax,
|
||||||
|
isLongerChart,
|
||||||
|
maxValues,
|
||||||
|
systemStats,
|
||||||
|
}: {
|
||||||
|
chartData: ChartData
|
||||||
|
grid: boolean
|
||||||
|
dataEmpty: boolean
|
||||||
|
showMax: boolean
|
||||||
|
isLongerChart: boolean
|
||||||
|
maxValues: boolean
|
||||||
|
systemStats: SystemStatsRecord[]
|
||||||
|
}) {
|
||||||
|
const maxValSelect = isLongerChart ? <SelectAvgMax max={maxValues} /> : null
|
||||||
|
const userSettings = $userSettings.get()
|
||||||
|
const extraFs = systemStats.at(-1)?.stats.efs
|
||||||
|
if (!extraFs || Object.keys(extraFs).length === 0) {
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div className="grid xl:grid-cols-2 gap-4">
|
||||||
|
{Object.keys(extraFs).map((extraFsName) => {
|
||||||
|
let diskSize = systemStats.at(-1)?.stats.efs?.[extraFsName].d ?? NaN
|
||||||
|
// round to nearest GB
|
||||||
|
if (diskSize >= 100) {
|
||||||
|
diskSize = Math.round(diskSize)
|
||||||
|
}
|
||||||
|
return (
|
||||||
|
<div key={extraFsName} className="contents">
|
||||||
|
<ChartCard
|
||||||
|
empty={dataEmpty}
|
||||||
|
grid={grid}
|
||||||
|
title={`${extraFsName} ${t`Usage`}`}
|
||||||
|
description={t`Disk usage of ${extraFsName}`}
|
||||||
|
>
|
||||||
|
<AreaChartDefault
|
||||||
|
chartData={chartData}
|
||||||
|
domain={[0, diskSize]}
|
||||||
|
tickFormatter={(val) => {
|
||||||
|
const { value, unit } = formatBytes(val * 1024, false, Unit.Bytes, true)
|
||||||
|
return `${toFixedFloat(value, value >= 10 ? 0 : 1)} ${unit}`
|
||||||
|
}}
|
||||||
|
contentFormatter={({ value }) => {
|
||||||
|
const { value: convertedValue, unit } = formatBytes(value * 1024, false, Unit.Bytes, true)
|
||||||
|
return `${decimalString(convertedValue)} ${unit}`
|
||||||
|
}}
|
||||||
|
dataPoints={[
|
||||||
|
{
|
||||||
|
label: t`Disk Usage`,
|
||||||
|
color: 4,
|
||||||
|
opacity: 0.4,
|
||||||
|
dataKey: ({ stats }) => stats?.efs?.[extraFsName]?.du,
|
||||||
|
},
|
||||||
|
]}
|
||||||
|
></AreaChartDefault>
|
||||||
|
</ChartCard>
|
||||||
|
<ChartCard
|
||||||
|
empty={dataEmpty}
|
||||||
|
grid={grid}
|
||||||
|
title={`${extraFsName} I/O`}
|
||||||
|
description={t`Throughput of ${extraFsName}`}
|
||||||
|
cornerEl={maxValSelect}
|
||||||
|
>
|
||||||
|
<AreaChartDefault
|
||||||
|
chartData={chartData}
|
||||||
|
showTotal={true}
|
||||||
|
dataPoints={[
|
||||||
|
{
|
||||||
|
label: t`Write`,
|
||||||
|
dataKey: ({ stats }) => {
|
||||||
|
if (showMax) {
|
||||||
|
return stats?.efs?.[extraFsName]?.wbm || (stats?.efs?.[extraFsName]?.wm ?? 0) * 1024 * 1024
|
||||||
|
}
|
||||||
|
return stats?.efs?.[extraFsName]?.wb || (stats?.efs?.[extraFsName]?.w ?? 0) * 1024 * 1024
|
||||||
|
},
|
||||||
|
color: 3,
|
||||||
|
opacity: 0.3,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
label: t`Read`,
|
||||||
|
dataKey: ({ stats }) => {
|
||||||
|
if (showMax) {
|
||||||
|
return stats?.efs?.[extraFsName]?.rbm ?? (stats?.efs?.[extraFsName]?.rm ?? 0) * 1024 * 1024
|
||||||
|
}
|
||||||
|
return stats?.efs?.[extraFsName]?.rb ?? (stats?.efs?.[extraFsName]?.r ?? 0) * 1024 * 1024
|
||||||
|
},
|
||||||
|
color: 1,
|
||||||
|
opacity: 0.3,
|
||||||
|
},
|
||||||
|
]}
|
||||||
|
maxToggled={showMax}
|
||||||
|
tickFormatter={(val) => {
|
||||||
|
const { value, unit } = formatBytes(val, true, userSettings.unitDisk, false)
|
||||||
|
return `${toFixedFloat(value, value >= 10 ? 0 : 1)} ${unit}`
|
||||||
|
}}
|
||||||
|
contentFormatter={({ value }) => {
|
||||||
|
const { value: convertedValue, unit } = formatBytes(value, true, userSettings.unitDisk, false)
|
||||||
|
return `${decimalString(convertedValue, convertedValue >= 100 ? 1 : 2)} ${unit}`
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
</ChartCard>
|
||||||
|
</div>
|
||||||
|
)
|
||||||
|
})}
|
||||||
|
</div>
|
||||||
|
)
|
||||||
|
}
|
||||||
232
internal/site/src/components/routes/system/charts/gpu-charts.tsx
Normal file
232
internal/site/src/components/routes/system/charts/gpu-charts.tsx
Normal file
@@ -0,0 +1,232 @@
|
|||||||
|
import { t } from "@lingui/core/macro"
|
||||||
|
import { useRef, useMemo } from "react"
|
||||||
|
import AreaChartDefault, { type DataPoint } from "@/components/charts/area-chart"
|
||||||
|
import LineChartDefault from "@/components/charts/line-chart"
|
||||||
|
import { Unit } from "@/lib/enums"
|
||||||
|
import { cn, decimalString, formatBytes, toFixedFloat } from "@/lib/utils"
|
||||||
|
import type { ChartData, GPUData, SystemStatsRecord } from "@/types"
|
||||||
|
import { ChartCard } from "../chart-card"
|
||||||
|
|
||||||
|
/** GPU power draw chart for the main grid */
|
||||||
|
export function GpuPowerChart({
|
||||||
|
chartData,
|
||||||
|
grid,
|
||||||
|
dataEmpty,
|
||||||
|
}: {
|
||||||
|
chartData: ChartData
|
||||||
|
grid: boolean
|
||||||
|
dataEmpty: boolean
|
||||||
|
}) {
|
||||||
|
const packageKey = " package"
|
||||||
|
const statsRef = useRef(chartData.systemStats)
|
||||||
|
statsRef.current = chartData.systemStats
|
||||||
|
|
||||||
|
// Derive GPU power config key (cheap per render)
|
||||||
|
let gpuPowerKey = ""
|
||||||
|
for (let i = chartData.systemStats.length - 1; i >= 0; i--) {
|
||||||
|
const gpus = chartData.systemStats[i].stats?.g
|
||||||
|
if (gpus) {
|
||||||
|
const parts: string[] = []
|
||||||
|
for (const id in gpus) {
|
||||||
|
const gpu = gpus[id] as GPUData
|
||||||
|
if (gpu.p !== undefined) parts.push(`${id}:${gpu.n}`)
|
||||||
|
if (gpu.pp !== undefined) parts.push(`${id}:${gpu.n}${packageKey}`)
|
||||||
|
}
|
||||||
|
gpuPowerKey = parts.sort().join("\0")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const dataPoints = useMemo((): DataPoint[] => {
|
||||||
|
if (!gpuPowerKey) return []
|
||||||
|
const totals = new Map<string, { label: string; gpuId: string; isPackage: boolean; total: number }>()
|
||||||
|
for (const record of statsRef.current) {
|
||||||
|
const gpus = record.stats?.g
|
||||||
|
if (!gpus) continue
|
||||||
|
for (const id in gpus) {
|
||||||
|
const gpu = gpus[id] as GPUData
|
||||||
|
const key = gpu.n
|
||||||
|
const existing = totals.get(key)
|
||||||
|
if (existing) {
|
||||||
|
existing.total += gpu.p ?? 0
|
||||||
|
} else {
|
||||||
|
totals.set(key, { label: gpu.n, gpuId: id, isPackage: false, total: gpu.p ?? 0 })
|
||||||
|
}
|
||||||
|
if (gpu.pp !== undefined) {
|
||||||
|
const pkgKey = `${gpu.n}${packageKey}`
|
||||||
|
const existingPkg = totals.get(pkgKey)
|
||||||
|
if (existingPkg) {
|
||||||
|
existingPkg.total += gpu.pp
|
||||||
|
} else {
|
||||||
|
totals.set(pkgKey, { label: pkgKey, gpuId: id, isPackage: true, total: gpu.pp })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
const sorted = Array.from(totals.values()).sort((a, b) => b.total - a.total)
|
||||||
|
return sorted.map(
|
||||||
|
(entry, i): DataPoint => ({
|
||||||
|
label: entry.label,
|
||||||
|
dataKey: (data: SystemStatsRecord) => {
|
||||||
|
const gpu = data.stats?.g?.[entry.gpuId]
|
||||||
|
return entry.isPackage ? (gpu?.pp ?? 0) : (gpu?.p ?? 0)
|
||||||
|
},
|
||||||
|
color: `hsl(${226 + (((i * 360) / sorted.length) % 360)}, 65%, 52%)`,
|
||||||
|
opacity: 1,
|
||||||
|
})
|
||||||
|
)
|
||||||
|
}, [gpuPowerKey])
|
||||||
|
|
||||||
|
return (
|
||||||
|
<ChartCard
|
||||||
|
empty={dataEmpty}
|
||||||
|
grid={grid}
|
||||||
|
title={t`GPU Power Draw`}
|
||||||
|
description={t`Average power consumption of GPUs`}
|
||||||
|
>
|
||||||
|
<LineChartDefault
|
||||||
|
legend={dataPoints.length > 1}
|
||||||
|
chartData={chartData}
|
||||||
|
dataPoints={dataPoints}
|
||||||
|
itemSorter={(a: { value: number }, b: { value: number }) => b.value - a.value}
|
||||||
|
tickFormatter={(val) => `${toFixedFloat(val, 2)}W`}
|
||||||
|
contentFormatter={({ value }) => `${decimalString(value)}W`}
|
||||||
|
/>
|
||||||
|
</ChartCard>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
/** GPU detail grid (engines + per-GPU usage/VRAM) — rendered outside the main 2-col grid */
|
||||||
|
export function GpuDetailCharts({
|
||||||
|
chartData,
|
||||||
|
grid,
|
||||||
|
dataEmpty,
|
||||||
|
lastGpus,
|
||||||
|
hasGpuEnginesData,
|
||||||
|
}: {
|
||||||
|
chartData: ChartData
|
||||||
|
grid: boolean
|
||||||
|
dataEmpty: boolean
|
||||||
|
lastGpus: Record<string, GPUData>
|
||||||
|
hasGpuEnginesData: boolean
|
||||||
|
}) {
|
||||||
|
return (
|
||||||
|
<div className="grid xl:grid-cols-2 gap-4">
|
||||||
|
{hasGpuEnginesData && (
|
||||||
|
<ChartCard
|
||||||
|
legend={true}
|
||||||
|
empty={dataEmpty}
|
||||||
|
grid={grid}
|
||||||
|
title={t`GPU Engines`}
|
||||||
|
description={t`Average utilization of GPU engines`}
|
||||||
|
>
|
||||||
|
<GpuEnginesChart chartData={chartData} />
|
||||||
|
</ChartCard>
|
||||||
|
)}
|
||||||
|
{Object.keys(lastGpus).map((id) => {
|
||||||
|
const gpu = lastGpus[id] as GPUData
|
||||||
|
return (
|
||||||
|
<div key={id} className="contents">
|
||||||
|
<ChartCard
|
||||||
|
className={cn(grid && "!col-span-1")}
|
||||||
|
empty={dataEmpty}
|
||||||
|
grid={grid}
|
||||||
|
title={`${gpu.n} ${t`Usage`}`}
|
||||||
|
description={t`Average utilization of ${gpu.n}`}
|
||||||
|
>
|
||||||
|
<AreaChartDefault
|
||||||
|
chartData={chartData}
|
||||||
|
dataPoints={[
|
||||||
|
{
|
||||||
|
label: t`Usage`,
|
||||||
|
dataKey: ({ stats }) => stats?.g?.[id]?.u ?? 0,
|
||||||
|
color: 1,
|
||||||
|
opacity: 0.35,
|
||||||
|
},
|
||||||
|
]}
|
||||||
|
tickFormatter={(val) => `${toFixedFloat(val, 2)}%`}
|
||||||
|
contentFormatter={({ value }) => `${decimalString(value)}%`}
|
||||||
|
/>
|
||||||
|
</ChartCard>
|
||||||
|
|
||||||
|
{(gpu.mt ?? 0) > 0 && (
|
||||||
|
<ChartCard
|
||||||
|
empty={dataEmpty}
|
||||||
|
grid={grid}
|
||||||
|
title={`${gpu.n} VRAM`}
|
||||||
|
description={t`Precise utilization at the recorded time`}
|
||||||
|
>
|
||||||
|
<AreaChartDefault
|
||||||
|
chartData={chartData}
|
||||||
|
dataPoints={[
|
||||||
|
{
|
||||||
|
label: t`Usage`,
|
||||||
|
dataKey: ({ stats }) => stats?.g?.[id]?.mu ?? 0,
|
||||||
|
color: 2,
|
||||||
|
opacity: 0.25,
|
||||||
|
},
|
||||||
|
]}
|
||||||
|
max={gpu.mt}
|
||||||
|
tickFormatter={(val) => {
|
||||||
|
const { value, unit } = formatBytes(val, false, Unit.Bytes, true)
|
||||||
|
return `${toFixedFloat(value, value >= 10 ? 0 : 1)} ${unit}`
|
||||||
|
}}
|
||||||
|
contentFormatter={({ value }) => {
|
||||||
|
const { value: convertedValue, unit } = formatBytes(value, false, Unit.Bytes, true)
|
||||||
|
return `${decimalString(convertedValue)} ${unit}`
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
</ChartCard>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
)
|
||||||
|
})}
|
||||||
|
</div>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
function GpuEnginesChart({ chartData }: { chartData: ChartData }) {
|
||||||
|
// Derive stable engine config key (cheap per render)
|
||||||
|
let enginesKey = ""
|
||||||
|
for (let i = chartData.systemStats.length - 1; i >= 0; i--) {
|
||||||
|
const gpus = chartData.systemStats[i].stats?.g
|
||||||
|
if (!gpus) continue
|
||||||
|
for (const id in gpus) {
|
||||||
|
if (gpus[id].e) {
|
||||||
|
enginesKey = id + "\0" + Object.keys(gpus[id].e).sort().join("\0")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (enginesKey) break
|
||||||
|
}
|
||||||
|
|
||||||
|
const { gpuId, dataPoints } = useMemo((): { gpuId: string | null; dataPoints: DataPoint[] } => {
|
||||||
|
if (!enginesKey) return { gpuId: null, dataPoints: [] }
|
||||||
|
const parts = enginesKey.split("\0")
|
||||||
|
const gId = parts[0]
|
||||||
|
const engineNames = parts.slice(1)
|
||||||
|
return {
|
||||||
|
gpuId: gId,
|
||||||
|
dataPoints: engineNames.map((engine, i) => ({
|
||||||
|
label: engine,
|
||||||
|
dataKey: ({ stats }: SystemStatsRecord) => stats?.g?.[gId]?.e?.[engine] ?? 0,
|
||||||
|
color: `hsl(${140 + (((i * 360) / engineNames.length) % 360)}, 65%, 52%)`,
|
||||||
|
opacity: 0.35,
|
||||||
|
})),
|
||||||
|
}
|
||||||
|
}, [enginesKey])
|
||||||
|
|
||||||
|
if (!gpuId) {
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
|
||||||
|
return (
|
||||||
|
<LineChartDefault
|
||||||
|
legend={true}
|
||||||
|
chartData={chartData}
|
||||||
|
dataPoints={dataPoints}
|
||||||
|
tickFormatter={(val) => `${toFixedFloat(val, 2)}%`}
|
||||||
|
contentFormatter={({ value }) => `${decimalString(value)}%`}
|
||||||
|
/>
|
||||||
|
)
|
||||||
|
}
|
||||||
@@ -0,0 +1,55 @@
|
|||||||
|
import { t } from "@lingui/core/macro"
|
||||||
|
import type { ChartData } from "@/types"
|
||||||
|
import { ChartCard } from "../chart-card"
|
||||||
|
import LineChartDefault from "@/components/charts/line-chart"
|
||||||
|
import { decimalString, toFixedFloat } from "@/lib/utils"
|
||||||
|
|
||||||
|
export function LoadAverageChart({
|
||||||
|
chartData,
|
||||||
|
grid,
|
||||||
|
dataEmpty,
|
||||||
|
}: {
|
||||||
|
chartData: ChartData
|
||||||
|
grid: boolean
|
||||||
|
dataEmpty: boolean
|
||||||
|
}) {
|
||||||
|
const { major, minor } = chartData.agentVersion
|
||||||
|
if (major === 0 && minor <= 12) {
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
return (
|
||||||
|
<ChartCard
|
||||||
|
empty={dataEmpty}
|
||||||
|
grid={grid}
|
||||||
|
title={t`Load Average`}
|
||||||
|
description={t`System load averages over time`}
|
||||||
|
legend={true}
|
||||||
|
>
|
||||||
|
<LineChartDefault
|
||||||
|
chartData={chartData}
|
||||||
|
contentFormatter={(item) => decimalString(item.value)}
|
||||||
|
tickFormatter={(value) => {
|
||||||
|
return String(toFixedFloat(value, 2))
|
||||||
|
}}
|
||||||
|
legend={true}
|
||||||
|
dataPoints={[
|
||||||
|
{
|
||||||
|
label: t({ message: `1 min`, comment: "Load average" }),
|
||||||
|
color: "hsl(271, 81%, 60%)", // Purple
|
||||||
|
dataKey: ({ stats }) => stats?.la?.[0],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
label: t({ message: `5 min`, comment: "Load average" }),
|
||||||
|
color: "hsl(217, 91%, 60%)", // Blue
|
||||||
|
dataKey: ({ stats }) => stats?.la?.[1],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
label: t({ message: `15 min`, comment: "Load average" }),
|
||||||
|
color: "hsl(25, 95%, 53%)", // Orange
|
||||||
|
dataKey: ({ stats }) => stats?.la?.[2],
|
||||||
|
},
|
||||||
|
]}
|
||||||
|
></LineChartDefault>
|
||||||
|
</ChartCard>
|
||||||
|
)
|
||||||
|
}
|
||||||
@@ -0,0 +1,170 @@
|
|||||||
|
import { t } from "@lingui/core/macro"
|
||||||
|
import AreaChartDefault from "@/components/charts/area-chart"
|
||||||
|
import { useContainerDataPoints } from "@/components/charts/hooks"
|
||||||
|
import { Unit } from "@/lib/enums"
|
||||||
|
import type { ChartConfig } from "@/components/ui/chart"
|
||||||
|
import type { ChartData, SystemStatsRecord } from "@/types"
|
||||||
|
import { ChartCard, FilterBar, SelectAvgMax } from "../chart-card"
|
||||||
|
import { dockerOrPodman } from "../chart-data"
|
||||||
|
import { decimalString, formatBytes, toFixedFloat } from "@/lib/utils"
|
||||||
|
import { pinnedAxisDomain } from "@/components/ui/chart"
|
||||||
|
|
||||||
|
export function MemoryChart({
|
||||||
|
chartData,
|
||||||
|
grid,
|
||||||
|
dataEmpty,
|
||||||
|
showMax,
|
||||||
|
isLongerChart,
|
||||||
|
maxValues,
|
||||||
|
}: {
|
||||||
|
chartData: ChartData
|
||||||
|
grid: boolean
|
||||||
|
dataEmpty: boolean
|
||||||
|
showMax: boolean
|
||||||
|
isLongerChart: boolean
|
||||||
|
maxValues: boolean
|
||||||
|
}) {
|
||||||
|
const maxValSelect = isLongerChart ? <SelectAvgMax max={maxValues} /> : null
|
||||||
|
const totalMem = toFixedFloat(chartData.systemStats.at(-1)?.stats.m ?? 0, 1)
|
||||||
|
|
||||||
|
return (
|
||||||
|
<ChartCard
|
||||||
|
empty={dataEmpty}
|
||||||
|
grid={grid}
|
||||||
|
title={t`Memory Usage`}
|
||||||
|
description={t`Precise utilization at the recorded time`}
|
||||||
|
cornerEl={maxValSelect}
|
||||||
|
>
|
||||||
|
<AreaChartDefault
|
||||||
|
chartData={chartData}
|
||||||
|
domain={[0, totalMem]}
|
||||||
|
itemSorter={(a, b) => a.order - b.order}
|
||||||
|
maxToggled={showMax}
|
||||||
|
showTotal={true}
|
||||||
|
tickFormatter={(value) => {
|
||||||
|
const { value: convertedValue, unit } = formatBytes(value * 1024, false, Unit.Bytes, true)
|
||||||
|
return `${toFixedFloat(convertedValue, value >= 10 ? 0 : 1)} ${unit}`
|
||||||
|
}}
|
||||||
|
contentFormatter={({ value }) => {
|
||||||
|
const { value: convertedValue, unit } = formatBytes(value * 1024, false, Unit.Bytes, true)
|
||||||
|
return `${decimalString(convertedValue, convertedValue >= 100 ? 1 : 2)} ${unit}`
|
||||||
|
}}
|
||||||
|
dataPoints={[
|
||||||
|
{
|
||||||
|
label: t`Used`,
|
||||||
|
dataKey: ({ stats }) => (showMax ? stats?.mm : stats?.mu),
|
||||||
|
color: 2,
|
||||||
|
opacity: 0.4,
|
||||||
|
stackId: "1",
|
||||||
|
order: 3,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
label: "ZFS ARC",
|
||||||
|
dataKey: ({ stats }) => (showMax ? null : stats?.mz),
|
||||||
|
color: "hsla(175 60% 45% / 0.8)",
|
||||||
|
opacity: 0.5,
|
||||||
|
order: 2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
label: t`Cache / Buffers`,
|
||||||
|
dataKey: ({ stats }) => (showMax ? null : stats?.mb),
|
||||||
|
color: "hsla(160 60% 45% / 0.5)",
|
||||||
|
opacity: 0.4,
|
||||||
|
stackId: "1",
|
||||||
|
order: 1,
|
||||||
|
},
|
||||||
|
]}
|
||||||
|
/>
|
||||||
|
</ChartCard>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
export function ContainerMemoryChart({
|
||||||
|
chartData,
|
||||||
|
grid,
|
||||||
|
dataEmpty,
|
||||||
|
isPodman,
|
||||||
|
memoryConfig,
|
||||||
|
}: {
|
||||||
|
chartData: ChartData
|
||||||
|
grid: boolean
|
||||||
|
dataEmpty: boolean
|
||||||
|
isPodman: boolean
|
||||||
|
memoryConfig: ChartConfig
|
||||||
|
}) {
|
||||||
|
const { filter, dataPoints } = useContainerDataPoints(memoryConfig, (key, data) => data[key]?.m ?? null)
|
||||||
|
|
||||||
|
return (
|
||||||
|
<ChartCard
|
||||||
|
empty={dataEmpty}
|
||||||
|
grid={grid}
|
||||||
|
title={dockerOrPodman(t`Docker Memory Usage`, isPodman)}
|
||||||
|
description={dockerOrPodman(t`Memory usage of docker containers`, isPodman)}
|
||||||
|
cornerEl={<FilterBar />}
|
||||||
|
>
|
||||||
|
<AreaChartDefault
|
||||||
|
chartData={chartData}
|
||||||
|
customData={chartData.containerData}
|
||||||
|
dataPoints={dataPoints}
|
||||||
|
tickFormatter={(val) => {
|
||||||
|
const { value, unit } = formatBytes(val, false, Unit.Bytes, true)
|
||||||
|
return `${toFixedFloat(value, val >= 10 ? 0 : 1)} ${unit}`
|
||||||
|
}}
|
||||||
|
contentFormatter={(item) => {
|
||||||
|
const { value, unit } = formatBytes(item.value, false, Unit.Bytes, true)
|
||||||
|
return `${decimalString(value)} ${unit}`
|
||||||
|
}}
|
||||||
|
domain={pinnedAxisDomain()}
|
||||||
|
showTotal={true}
|
||||||
|
reverseStackOrder={true}
|
||||||
|
filter={filter}
|
||||||
|
truncate={true}
|
||||||
|
itemSorter={(a, b) => b.value - a.value}
|
||||||
|
/>
|
||||||
|
</ChartCard>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
export function SwapChart({
|
||||||
|
chartData,
|
||||||
|
grid,
|
||||||
|
dataEmpty,
|
||||||
|
systemStats,
|
||||||
|
}: {
|
||||||
|
chartData: ChartData
|
||||||
|
grid: boolean
|
||||||
|
dataEmpty: boolean
|
||||||
|
systemStats: SystemStatsRecord[]
|
||||||
|
}) {
|
||||||
|
// const userSettings = useStore($userSettings)
|
||||||
|
|
||||||
|
const hasSwapData = (systemStats.at(-1)?.stats.su ?? 0) > 0
|
||||||
|
if (!hasSwapData) {
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
return (
|
||||||
|
<ChartCard empty={dataEmpty} grid={grid} title={t`Swap Usage`} description={t`Swap space used by the system`}>
|
||||||
|
<AreaChartDefault
|
||||||
|
chartData={chartData}
|
||||||
|
domain={[0, () => toFixedFloat(chartData.systemStats.at(-1)?.stats.s ?? 0.04, 2)]}
|
||||||
|
contentFormatter={({ value }) => {
|
||||||
|
// mem values are supplied as GB
|
||||||
|
const { value: convertedValue, unit } = formatBytes(value * 1024, false, Unit.Bytes, true)
|
||||||
|
return `${decimalString(convertedValue, convertedValue >= 100 ? 1 : 2)} ${unit}`
|
||||||
|
}}
|
||||||
|
tickFormatter={(value) => {
|
||||||
|
const { value: convertedValue, unit } = formatBytes(value * 1024, false, Unit.Bytes, true)
|
||||||
|
return `${toFixedFloat(convertedValue, value >= 10 ? 0 : 1)} ${unit}`
|
||||||
|
}}
|
||||||
|
dataPoints={[
|
||||||
|
{
|
||||||
|
label: t`Used`,
|
||||||
|
dataKey: ({ stats }) => stats?.su,
|
||||||
|
color: 2,
|
||||||
|
opacity: 0.4,
|
||||||
|
},
|
||||||
|
]}
|
||||||
|
></AreaChartDefault>
|
||||||
|
</ChartCard>
|
||||||
|
)
|
||||||
|
}
|
||||||
@@ -0,0 +1,183 @@
|
|||||||
|
import { useMemo } from "react"
|
||||||
|
import { t } from "@lingui/core/macro"
|
||||||
|
import AreaChartDefault from "@/components/charts/area-chart"
|
||||||
|
import { useContainerDataPoints } from "@/components/charts/hooks"
|
||||||
|
import { $userSettings } from "@/lib/stores"
|
||||||
|
import { decimalString, formatBytes, toFixedFloat } from "@/lib/utils"
|
||||||
|
import type { ChartConfig } from "@/components/ui/chart"
|
||||||
|
import { pinnedAxisDomain } from "@/components/ui/chart"
|
||||||
|
import type { ChartData, SystemStatsRecord } from "@/types"
|
||||||
|
import { Separator } from "@/components/ui/separator"
|
||||||
|
import NetworkSheet from "../network-sheet"
|
||||||
|
import { ChartCard, FilterBar, SelectAvgMax } from "../chart-card"
|
||||||
|
import { dockerOrPodman } from "../chart-data"
|
||||||
|
|
||||||
|
export function BandwidthChart({
|
||||||
|
chartData,
|
||||||
|
grid,
|
||||||
|
dataEmpty,
|
||||||
|
showMax,
|
||||||
|
isLongerChart,
|
||||||
|
maxValues,
|
||||||
|
systemStats,
|
||||||
|
}: {
|
||||||
|
chartData: ChartData
|
||||||
|
grid: boolean
|
||||||
|
dataEmpty: boolean
|
||||||
|
showMax: boolean
|
||||||
|
isLongerChart: boolean
|
||||||
|
maxValues: boolean
|
||||||
|
systemStats: SystemStatsRecord[]
|
||||||
|
}) {
|
||||||
|
const maxValSelect = isLongerChart ? <SelectAvgMax max={maxValues} /> : null
|
||||||
|
const userSettings = $userSettings.get()
|
||||||
|
|
||||||
|
return (
|
||||||
|
<ChartCard
|
||||||
|
empty={dataEmpty}
|
||||||
|
grid={grid}
|
||||||
|
title={t`Bandwidth`}
|
||||||
|
cornerEl={
|
||||||
|
<div className="flex gap-2">
|
||||||
|
{maxValSelect}
|
||||||
|
<NetworkSheet chartData={chartData} dataEmpty={dataEmpty} grid={grid} maxValues={maxValues} />
|
||||||
|
</div>
|
||||||
|
}
|
||||||
|
description={t`Network traffic of public interfaces`}
|
||||||
|
>
|
||||||
|
<AreaChartDefault
|
||||||
|
chartData={chartData}
|
||||||
|
maxToggled={showMax}
|
||||||
|
dataPoints={[
|
||||||
|
{
|
||||||
|
label: t`Sent`,
|
||||||
|
dataKey(data: SystemStatsRecord) {
|
||||||
|
if (showMax) {
|
||||||
|
return data?.stats?.bm?.[0] ?? (data?.stats?.nsm ?? 0) * 1024 * 1024
|
||||||
|
}
|
||||||
|
return data?.stats?.b?.[0] ?? (data?.stats?.ns ?? 0) * 1024 * 1024
|
||||||
|
},
|
||||||
|
color: 5,
|
||||||
|
opacity: 0.2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
label: t`Received`,
|
||||||
|
dataKey(data: SystemStatsRecord) {
|
||||||
|
if (showMax) {
|
||||||
|
return data?.stats?.bm?.[1] ?? (data?.stats?.nrm ?? 0) * 1024 * 1024
|
||||||
|
}
|
||||||
|
return data?.stats?.b?.[1] ?? (data?.stats?.nr ?? 0) * 1024 * 1024
|
||||||
|
},
|
||||||
|
color: 2,
|
||||||
|
opacity: 0.2,
|
||||||
|
},
|
||||||
|
]
|
||||||
|
// try to place the lesser number in front for better visibility
|
||||||
|
.sort(() => (systemStats.at(-1)?.stats.b?.[1] ?? 0) - (systemStats.at(-1)?.stats.b?.[0] ?? 0))}
|
||||||
|
tickFormatter={(val) => {
|
||||||
|
const { value, unit } = formatBytes(val, true, userSettings.unitNet, false)
|
||||||
|
return `${toFixedFloat(value, value >= 10 ? 0 : 1)} ${unit}`
|
||||||
|
}}
|
||||||
|
contentFormatter={(data) => {
|
||||||
|
const { value, unit } = formatBytes(data.value, true, userSettings.unitNet, false)
|
||||||
|
return `${decimalString(value, value >= 100 ? 1 : 2)} ${unit}`
|
||||||
|
}}
|
||||||
|
showTotal={true}
|
||||||
|
/>
|
||||||
|
</ChartCard>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
export function ContainerNetworkChart({
|
||||||
|
chartData,
|
||||||
|
grid,
|
||||||
|
dataEmpty,
|
||||||
|
isPodman,
|
||||||
|
networkConfig,
|
||||||
|
}: {
|
||||||
|
chartData: ChartData
|
||||||
|
grid: boolean
|
||||||
|
dataEmpty: boolean
|
||||||
|
isPodman: boolean
|
||||||
|
networkConfig: ChartConfig
|
||||||
|
}) {
|
||||||
|
const userSettings = $userSettings.get()
|
||||||
|
const { filter, dataPoints, filteredKeys } = useContainerDataPoints(networkConfig, (key, data) => {
|
||||||
|
const payload = data[key]
|
||||||
|
if (!payload) return null
|
||||||
|
const sent = payload?.b?.[0] ?? (payload?.ns ?? 0) * 1024 * 1024
|
||||||
|
const recv = payload?.b?.[1] ?? (payload?.nr ?? 0) * 1024 * 1024
|
||||||
|
return sent + recv
|
||||||
|
})
|
||||||
|
|
||||||
|
const contentFormatter = useMemo(() => {
|
||||||
|
const getRxTxBytes = (record?: { b?: [number, number]; ns?: number; nr?: number }) => {
|
||||||
|
if (record?.b?.length && record.b.length >= 2) {
|
||||||
|
return [Number(record.b[0]) || 0, Number(record.b[1]) || 0]
|
||||||
|
}
|
||||||
|
return [(record?.ns ?? 0) * 1024 * 1024, (record?.nr ?? 0) * 1024 * 1024]
|
||||||
|
}
|
||||||
|
const formatRxTx = (recv: number, sent: number) => {
|
||||||
|
const { value: receivedValue, unit: receivedUnit } = formatBytes(recv, true, userSettings.unitNet, false)
|
||||||
|
const { value: sentValue, unit: sentUnit } = formatBytes(sent, true, userSettings.unitNet, false)
|
||||||
|
return (
|
||||||
|
<span className="flex">
|
||||||
|
{decimalString(receivedValue)} {receivedUnit}
|
||||||
|
<span className="opacity-70 ms-0.5"> rx </span>
|
||||||
|
<Separator orientation="vertical" className="h-3 mx-1.5 bg-primary/40" />
|
||||||
|
{decimalString(sentValue)} {sentUnit}
|
||||||
|
<span className="opacity-70 ms-0.5"> tx</span>
|
||||||
|
</span>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
// biome-ignore lint/suspicious/noExplicitAny: recharts tooltip item
|
||||||
|
return (item: any, key: string) => {
|
||||||
|
try {
|
||||||
|
if (key === "__total__") {
|
||||||
|
let totalSent = 0
|
||||||
|
let totalRecv = 0
|
||||||
|
const payloadData = item?.payload && typeof item.payload === "object" ? item.payload : {}
|
||||||
|
for (const [containerKey, value] of Object.entries(payloadData)) {
|
||||||
|
if (!value || typeof value !== "object") continue
|
||||||
|
if (filteredKeys.has(containerKey)) continue
|
||||||
|
const [sent, recv] = getRxTxBytes(value as { b?: [number, number]; ns?: number; nr?: number })
|
||||||
|
totalSent += sent
|
||||||
|
totalRecv += recv
|
||||||
|
}
|
||||||
|
return formatRxTx(totalRecv, totalSent)
|
||||||
|
}
|
||||||
|
const [sent, recv] = getRxTxBytes(item?.payload?.[key])
|
||||||
|
return formatRxTx(recv, sent)
|
||||||
|
} catch {
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}, [filteredKeys, userSettings.unitNet])
|
||||||
|
|
||||||
|
return (
|
||||||
|
<ChartCard
|
||||||
|
empty={dataEmpty}
|
||||||
|
grid={grid}
|
||||||
|
title={dockerOrPodman(t`Docker Network I/O`, isPodman)}
|
||||||
|
description={dockerOrPodman(t`Network traffic of docker containers`, isPodman)}
|
||||||
|
cornerEl={<FilterBar />}
|
||||||
|
>
|
||||||
|
<AreaChartDefault
|
||||||
|
chartData={chartData}
|
||||||
|
customData={chartData.containerData}
|
||||||
|
dataPoints={dataPoints}
|
||||||
|
tickFormatter={(val) => {
|
||||||
|
const { value, unit } = formatBytes(val, true, userSettings.unitNet, false)
|
||||||
|
return `${toFixedFloat(value, value >= 10 ? 0 : 1)} ${unit}`
|
||||||
|
}}
|
||||||
|
contentFormatter={contentFormatter}
|
||||||
|
domain={pinnedAxisDomain()}
|
||||||
|
showTotal={true}
|
||||||
|
reverseStackOrder={true}
|
||||||
|
filter={filter}
|
||||||
|
truncate={true}
|
||||||
|
itemSorter={(a, b) => b.value - a.value}
|
||||||
|
/>
|
||||||
|
</ChartCard>
|
||||||
|
)
|
||||||
|
}
|
||||||
@@ -0,0 +1,209 @@
|
|||||||
|
import { t } from "@lingui/core/macro"
|
||||||
|
import AreaChartDefault from "@/components/charts/area-chart"
|
||||||
|
import { batteryStateTranslations } from "@/lib/i18n"
|
||||||
|
import { $temperatureFilter, $userSettings } from "@/lib/stores"
|
||||||
|
import { cn, decimalString, formatTemperature, toFixedFloat } from "@/lib/utils"
|
||||||
|
import type { ChartData, SystemStatsRecord } from "@/types"
|
||||||
|
import { ChartCard, FilterBar } from "../chart-card"
|
||||||
|
import LineChartDefault from "@/components/charts/line-chart"
|
||||||
|
import { useStore } from "@nanostores/react"
|
||||||
|
import { useRef, useMemo, useState, useEffect } from "react"
|
||||||
|
|
||||||
|
export function BatteryChart({
|
||||||
|
chartData,
|
||||||
|
grid,
|
||||||
|
dataEmpty,
|
||||||
|
maxValues,
|
||||||
|
}: {
|
||||||
|
chartData: ChartData
|
||||||
|
grid: boolean
|
||||||
|
dataEmpty: boolean
|
||||||
|
maxValues: boolean
|
||||||
|
}) {
|
||||||
|
const showBatteryChart = chartData.systemStats.at(-1)?.stats.bat
|
||||||
|
|
||||||
|
if (!showBatteryChart) {
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
|
||||||
|
return (
|
||||||
|
<ChartCard
|
||||||
|
empty={dataEmpty}
|
||||||
|
grid={grid}
|
||||||
|
title={t`Battery`}
|
||||||
|
description={`${t({
|
||||||
|
message: "Current state",
|
||||||
|
comment: "Context: Battery state",
|
||||||
|
})}: ${batteryStateTranslations[chartData.systemStats.at(-1)?.stats.bat?.[1] ?? 0]()}`}
|
||||||
|
>
|
||||||
|
<AreaChartDefault
|
||||||
|
chartData={chartData}
|
||||||
|
maxToggled={maxValues}
|
||||||
|
dataPoints={[
|
||||||
|
{
|
||||||
|
label: t`Charge`,
|
||||||
|
dataKey: ({ stats }) => stats?.bat?.[0],
|
||||||
|
color: 1,
|
||||||
|
opacity: 0.35,
|
||||||
|
},
|
||||||
|
]}
|
||||||
|
domain={[0, 100]}
|
||||||
|
tickFormatter={(val) => `${val}%`}
|
||||||
|
contentFormatter={({ value }) => `${value}%`}
|
||||||
|
/>
|
||||||
|
</ChartCard>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
export function TemperatureChart({
|
||||||
|
chartData,
|
||||||
|
grid,
|
||||||
|
dataEmpty,
|
||||||
|
setPageBottomExtraMargin,
|
||||||
|
}: {
|
||||||
|
chartData: ChartData
|
||||||
|
grid: boolean
|
||||||
|
dataEmpty: boolean
|
||||||
|
setPageBottomExtraMargin?: (margin: number) => void
|
||||||
|
}) {
|
||||||
|
const showTempChart = chartData.systemStats.at(-1)?.stats.t
|
||||||
|
|
||||||
|
const filter = useStore($temperatureFilter)
|
||||||
|
const userSettings = useStore($userSettings)
|
||||||
|
|
||||||
|
const statsRef = useRef(chartData.systemStats)
|
||||||
|
statsRef.current = chartData.systemStats
|
||||||
|
|
||||||
|
// Derive sensor names key from latest data point
|
||||||
|
let sensorNamesKey = ""
|
||||||
|
for (let i = chartData.systemStats.length - 1; i >= 0; i--) {
|
||||||
|
const t = chartData.systemStats[i].stats?.t
|
||||||
|
if (t) {
|
||||||
|
sensorNamesKey = Object.keys(t).sort().join("\0")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only recompute colors and dataKey functions when sensor names change
|
||||||
|
const { colorMap, dataKeys, sortedKeys } = useMemo(() => {
|
||||||
|
const stats = statsRef.current
|
||||||
|
const tempSums = {} as Record<string, number>
|
||||||
|
for (const data of stats) {
|
||||||
|
const t = data.stats?.t
|
||||||
|
if (!t) continue
|
||||||
|
for (const key of Object.keys(t)) {
|
||||||
|
tempSums[key] = (tempSums[key] ?? 0) + t[key]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
const sorted = Object.keys(tempSums).sort((a, b) => tempSums[b] - tempSums[a])
|
||||||
|
const colorMap = {} as Record<string, string>
|
||||||
|
const dataKeys = {} as Record<string, (d: SystemStatsRecord) => number | undefined>
|
||||||
|
for (let i = 0; i < sorted.length; i++) {
|
||||||
|
const key = sorted[i]
|
||||||
|
colorMap[key] = `hsl(${((i * 360) / sorted.length) % 360}, 60%, 55%)`
|
||||||
|
dataKeys[key] = (d: SystemStatsRecord) => d.stats?.t?.[key]
|
||||||
|
}
|
||||||
|
return { colorMap, dataKeys, sortedKeys: sorted }
|
||||||
|
}, [sensorNamesKey])
|
||||||
|
|
||||||
|
const dataPoints = useMemo(() => {
|
||||||
|
return sortedKeys.map((key) => {
|
||||||
|
const filterTerms = filter
|
||||||
|
? filter
|
||||||
|
.toLowerCase()
|
||||||
|
.split(" ")
|
||||||
|
.filter((term) => term.length > 0)
|
||||||
|
: []
|
||||||
|
const filtered = filterTerms.length > 0 && !filterTerms.some((term) => key.toLowerCase().includes(term))
|
||||||
|
const strokeOpacity = filtered ? 0.1 : 1
|
||||||
|
return {
|
||||||
|
label: key,
|
||||||
|
dataKey: dataKeys[key],
|
||||||
|
color: colorMap[key],
|
||||||
|
opacity: strokeOpacity,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}, [sortedKeys, filter, dataKeys, colorMap])
|
||||||
|
|
||||||
|
// test with lots of data points
|
||||||
|
// const totalPoints = 50
|
||||||
|
// if (dataPoints.length > 0 && dataPoints.length < totalPoints) {
|
||||||
|
// let i = 0
|
||||||
|
// while (dataPoints.length < totalPoints) {
|
||||||
|
// dataPoints.push({
|
||||||
|
// label: `Test ${++i}`,
|
||||||
|
// dataKey: () => 0,
|
||||||
|
// color: "red",
|
||||||
|
// opacity: 1,
|
||||||
|
// })
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
|
const chartRef = useRef<HTMLDivElement>(null)
|
||||||
|
const [addMargin, setAddMargin] = useState(false)
|
||||||
|
const marginPx = (dataPoints.length - 13) * 18
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
if (setPageBottomExtraMargin && dataPoints.length > 13 && chartRef.current) {
|
||||||
|
const checkPosition = () => {
|
||||||
|
if (!chartRef.current) return
|
||||||
|
const rect = chartRef.current.getBoundingClientRect()
|
||||||
|
const actualScrollHeight = addMargin
|
||||||
|
? document.documentElement.scrollHeight - marginPx
|
||||||
|
: document.documentElement.scrollHeight
|
||||||
|
const distanceToBottom = actualScrollHeight - (rect.bottom + window.scrollY)
|
||||||
|
|
||||||
|
if (distanceToBottom < 250) {
|
||||||
|
setAddMargin(true)
|
||||||
|
setPageBottomExtraMargin(marginPx)
|
||||||
|
} else {
|
||||||
|
setAddMargin(false)
|
||||||
|
setPageBottomExtraMargin(0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
checkPosition()
|
||||||
|
const timer = setTimeout(checkPosition, 500)
|
||||||
|
return () => {
|
||||||
|
clearTimeout(timer)
|
||||||
|
}
|
||||||
|
} else if (addMargin) {
|
||||||
|
setAddMargin(false)
|
||||||
|
if (setPageBottomExtraMargin) setPageBottomExtraMargin(0)
|
||||||
|
}
|
||||||
|
}, [dataPoints.length, addMargin, marginPx, setPageBottomExtraMargin])
|
||||||
|
|
||||||
|
if (!showTempChart) {
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
|
||||||
|
const legend = dataPoints.length < 12
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div ref={chartRef} className={cn("odd:last-of-type:col-span-full", { "col-span-full": !grid })}>
|
||||||
|
<ChartCard
|
||||||
|
empty={dataEmpty}
|
||||||
|
grid={grid}
|
||||||
|
title={t`Temperature`}
|
||||||
|
description={t`Temperatures of system sensors`}
|
||||||
|
cornerEl={<FilterBar store={$temperatureFilter} />}
|
||||||
|
legend={legend}
|
||||||
|
>
|
||||||
|
<LineChartDefault
|
||||||
|
chartData={chartData}
|
||||||
|
itemSorter={(a, b) => b.value - a.value}
|
||||||
|
domain={["auto", "auto"]}
|
||||||
|
legend={legend}
|
||||||
|
tickFormatter={(val) => {
|
||||||
|
const { value, unit } = formatTemperature(val, userSettings.unitTemp)
|
||||||
|
return `${toFixedFloat(value, 2)} ${unit}`
|
||||||
|
}}
|
||||||
|
contentFormatter={(item) => {
|
||||||
|
const { value, unit } = formatTemperature(item.value, userSettings.unitTemp)
|
||||||
|
return `${decimalString(value)} ${unit}`
|
||||||
|
}}
|
||||||
|
dataPoints={dataPoints}
|
||||||
|
></LineChartDefault>
|
||||||
|
</ChartCard>
|
||||||
|
</div>
|
||||||
|
)
|
||||||
|
}
|
||||||
@@ -1,14 +1,14 @@
|
|||||||
import { t } from "@lingui/core/macro"
|
import { t } from "@lingui/core/macro"
|
||||||
import { MoreHorizontalIcon } from "lucide-react"
|
import { MoreHorizontalIcon } from "lucide-react"
|
||||||
import { memo, useRef, useState } from "react"
|
import { memo, useRef, useState } from "react"
|
||||||
import AreaChartDefault, { DataPoint } from "@/components/charts/area-chart"
|
import AreaChartDefault, { type DataPoint } from "@/components/charts/area-chart"
|
||||||
import ChartTimeSelect from "@/components/charts/chart-time-select"
|
import ChartTimeSelect from "@/components/charts/chart-time-select"
|
||||||
import { Button } from "@/components/ui/button"
|
import { Button } from "@/components/ui/button"
|
||||||
import { Sheet, SheetContent, SheetTrigger } from "@/components/ui/sheet"
|
import { Sheet, SheetContent, SheetTrigger } from "@/components/ui/sheet"
|
||||||
import { DialogTitle } from "@/components/ui/dialog"
|
import { DialogTitle } from "@/components/ui/dialog"
|
||||||
import { compareSemVer, decimalString, parseSemVer, toFixedFloat } from "@/lib/utils"
|
import { compareSemVer, decimalString, parseSemVer, toFixedFloat } from "@/lib/utils"
|
||||||
import type { ChartData, SystemStatsRecord } from "@/types"
|
import type { ChartData, SystemStatsRecord } from "@/types"
|
||||||
import { ChartCard } from "../system"
|
import { ChartCard } from "./chart-card"
|
||||||
|
|
||||||
const minAgentVersion = parseSemVer("0.15.3")
|
const minAgentVersion = parseSemVer("0.15.3")
|
||||||
|
|
||||||
@@ -42,41 +42,54 @@ export default memo(function CpuCoresSheet({
|
|||||||
const numCores = cpus.length
|
const numCores = cpus.length
|
||||||
const hasBreakdown = (latest?.cpub?.length ?? 0) > 0
|
const hasBreakdown = (latest?.cpub?.length ?? 0) > 0
|
||||||
|
|
||||||
|
// make sure all individual core data points have the same y axis domain to make relative comparison easier
|
||||||
|
let highestCpuCorePct = 1
|
||||||
|
if (hasOpened.current) {
|
||||||
|
for (let i = 0; i < numCores; i++) {
|
||||||
|
for (let j = 0; j < chartData.systemStats.length; j++) {
|
||||||
|
const pct = chartData.systemStats[j].stats?.cpus?.[i] ?? 0
|
||||||
|
if (pct > highestCpuCorePct) {
|
||||||
|
highestCpuCorePct = pct
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const breakdownDataPoints = [
|
const breakdownDataPoints = [
|
||||||
{
|
{
|
||||||
label: "System",
|
label: "System",
|
||||||
dataKey: ({ stats }: SystemStatsRecord) => stats?.cpub?.[1],
|
dataKey: ({ stats }: SystemStatsRecord) => stats?.cpub?.[1],
|
||||||
color: 3,
|
color: 3,
|
||||||
opacity: 0.35,
|
opacity: 0.35,
|
||||||
stackId: "a"
|
stackId: "a",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
label: "User",
|
label: "User",
|
||||||
dataKey: ({ stats }: SystemStatsRecord) => stats?.cpub?.[0],
|
dataKey: ({ stats }: SystemStatsRecord) => stats?.cpub?.[0],
|
||||||
color: 1,
|
color: 1,
|
||||||
opacity: 0.35,
|
opacity: 0.35,
|
||||||
stackId: "a"
|
stackId: "a",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
label: "IOWait",
|
label: "IOWait",
|
||||||
dataKey: ({ stats }: SystemStatsRecord) => stats?.cpub?.[2],
|
dataKey: ({ stats }: SystemStatsRecord) => stats?.cpub?.[2],
|
||||||
color: 4,
|
color: 4,
|
||||||
opacity: 0.35,
|
opacity: 0.35,
|
||||||
stackId: "a"
|
stackId: "a",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
label: "Steal",
|
label: "Steal",
|
||||||
dataKey: ({ stats }: SystemStatsRecord) => stats?.cpub?.[3],
|
dataKey: ({ stats }: SystemStatsRecord) => stats?.cpub?.[3],
|
||||||
color: 5,
|
color: 5,
|
||||||
opacity: 0.35,
|
opacity: 0.35,
|
||||||
stackId: "a"
|
stackId: "a",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
label: "Idle",
|
label: "Idle",
|
||||||
dataKey: ({ stats }: SystemStatsRecord) => stats?.cpub?.[4],
|
dataKey: ({ stats }: SystemStatsRecord) => stats?.cpub?.[4],
|
||||||
color: 2,
|
color: 2,
|
||||||
opacity: 0.35,
|
opacity: 0.35,
|
||||||
stackId: "a"
|
stackId: "a",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
label: t`Other`,
|
label: t`Other`,
|
||||||
@@ -86,11 +99,10 @@ export default memo(function CpuCoresSheet({
|
|||||||
},
|
},
|
||||||
color: `hsl(80, 65%, 52%)`,
|
color: `hsl(80, 65%, 52%)`,
|
||||||
opacity: 0.35,
|
opacity: 0.35,
|
||||||
stackId: "a"
|
stackId: "a",
|
||||||
},
|
},
|
||||||
] as DataPoint[]
|
] as DataPoint[]
|
||||||
|
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<Sheet open={cpuCoresOpen} onOpenChange={setCpuCoresOpen}>
|
<Sheet open={cpuCoresOpen} onOpenChange={setCpuCoresOpen}>
|
||||||
<DialogTitle className="sr-only">{t`CPU Usage`}</DialogTitle>
|
<DialogTitle className="sr-only">{t`CPU Usage`}</DialogTitle>
|
||||||
@@ -99,7 +111,7 @@ export default memo(function CpuCoresSheet({
|
|||||||
title={t`View more`}
|
title={t`View more`}
|
||||||
variant="outline"
|
variant="outline"
|
||||||
size="icon"
|
size="icon"
|
||||||
className="shrink-0 max-sm:absolute max-sm:top-3 max-sm:end-3"
|
className="shrink-0 max-sm:absolute max-sm:top-0 max-sm:end-0"
|
||||||
>
|
>
|
||||||
<MoreHorizontalIcon />
|
<MoreHorizontalIcon />
|
||||||
</Button>
|
</Button>
|
||||||
@@ -151,7 +163,7 @@ export default memo(function CpuCoresSheet({
|
|||||||
dataKey: ({ stats }: SystemStatsRecord) => stats?.cpus?.[i] ?? 1 / (stats?.cpus?.length ?? 1),
|
dataKey: ({ stats }: SystemStatsRecord) => stats?.cpus?.[i] ?? 1 / (stats?.cpus?.length ?? 1),
|
||||||
color: `hsl(${226 + (((i * 360) / Math.max(1, numCores)) % 360)}, var(--chart-saturation), var(--chart-lightness))`,
|
color: `hsl(${226 + (((i * 360) / Math.max(1, numCores)) % 360)}, var(--chart-saturation), var(--chart-lightness))`,
|
||||||
opacity: 0.35,
|
opacity: 0.35,
|
||||||
stackId: "a"
|
stackId: "a",
|
||||||
}))}
|
}))}
|
||||||
tickFormatter={(val) => `${val}%`}
|
tickFormatter={(val) => `${val}%`}
|
||||||
contentFormatter={({ value }) => `${value}%`}
|
contentFormatter={({ value }) => `${value}%`}
|
||||||
@@ -174,7 +186,7 @@ export default memo(function CpuCoresSheet({
|
|||||||
<AreaChartDefault
|
<AreaChartDefault
|
||||||
chartData={chartData}
|
chartData={chartData}
|
||||||
maxToggled={maxValues}
|
maxToggled={maxValues}
|
||||||
legend={false}
|
domain={[0, highestCpuCorePct]}
|
||||||
dataPoints={[
|
dataPoints={[
|
||||||
{
|
{
|
||||||
label: t`Usage`,
|
label: t`Usage`,
|
||||||
|
|||||||
@@ -1,20 +1,28 @@
|
|||||||
import { plural } from "@lingui/core/macro"
|
import { plural } from "@lingui/core/macro"
|
||||||
import { useLingui } from "@lingui/react/macro"
|
import { Trans, useLingui } from "@lingui/react/macro"
|
||||||
import {
|
import {
|
||||||
AppleIcon,
|
AppleIcon,
|
||||||
ChevronRightSquareIcon,
|
ChevronRightSquareIcon,
|
||||||
ClockArrowUp,
|
ClockArrowUp,
|
||||||
CpuIcon,
|
CpuIcon,
|
||||||
GlobeIcon,
|
GlobeIcon,
|
||||||
LayoutGridIcon,
|
|
||||||
MemoryStickIcon,
|
MemoryStickIcon,
|
||||||
MonitorIcon,
|
MonitorIcon,
|
||||||
Rows,
|
Settings2Icon,
|
||||||
} from "lucide-react"
|
} from "lucide-react"
|
||||||
import { useMemo } from "react"
|
import { useMemo } from "react"
|
||||||
import ChartTimeSelect from "@/components/charts/chart-time-select"
|
import ChartTimeSelect from "@/components/charts/chart-time-select"
|
||||||
import { Button } from "@/components/ui/button"
|
import { Button } from "@/components/ui/button"
|
||||||
import { Card } from "@/components/ui/card"
|
import { Card } from "@/components/ui/card"
|
||||||
|
import {
|
||||||
|
DropdownMenu,
|
||||||
|
DropdownMenuContent,
|
||||||
|
DropdownMenuLabel,
|
||||||
|
DropdownMenuRadioGroup,
|
||||||
|
DropdownMenuRadioItem,
|
||||||
|
DropdownMenuSeparator,
|
||||||
|
DropdownMenuTrigger,
|
||||||
|
} from "@/components/ui/dropdown-menu"
|
||||||
import { FreeBsdIcon, TuxIcon, WebSocketIcon, WindowsIcon } from "@/components/ui/icons"
|
import { FreeBsdIcon, TuxIcon, WebSocketIcon, WindowsIcon } from "@/components/ui/icons"
|
||||||
import { Separator } from "@/components/ui/separator"
|
import { Separator } from "@/components/ui/separator"
|
||||||
import { Tooltip, TooltipContent, TooltipTrigger } from "@/components/ui/tooltip"
|
import { Tooltip, TooltipContent, TooltipTrigger } from "@/components/ui/tooltip"
|
||||||
@@ -27,12 +35,16 @@ export default function InfoBar({
|
|||||||
chartData,
|
chartData,
|
||||||
grid,
|
grid,
|
||||||
setGrid,
|
setGrid,
|
||||||
|
displayMode,
|
||||||
|
setDisplayMode,
|
||||||
details,
|
details,
|
||||||
}: {
|
}: {
|
||||||
system: SystemRecord
|
system: SystemRecord
|
||||||
chartData: ChartData
|
chartData: ChartData
|
||||||
grid: boolean
|
grid: boolean
|
||||||
setGrid: (grid: boolean) => void
|
setGrid: (grid: boolean) => void
|
||||||
|
displayMode: "default" | "tabs"
|
||||||
|
setDisplayMode: (mode: "default" | "tabs") => void
|
||||||
details: SystemDetailsRecord | null
|
details: SystemDetailsRecord | null
|
||||||
}) {
|
}) {
|
||||||
const { t } = useLingui()
|
const { t } = useLingui()
|
||||||
@@ -123,10 +135,10 @@ export default function InfoBar({
|
|||||||
|
|
||||||
return (
|
return (
|
||||||
<Card>
|
<Card>
|
||||||
<div className="grid xl:flex gap-4 px-4 sm:px-6 pt-3 sm:pt-4 pb-5">
|
<div className="grid xl:flex xl:gap-4 px-4 sm:px-6 pt-3 sm:pt-4 pb-5">
|
||||||
<div>
|
<div className="min-w-0">
|
||||||
<h1 className="text-[1.6rem] font-semibold mb-1.5">{system.name}</h1>
|
<h1 className="text-2xl sm:text-[1.6rem] font-semibold mb-1.5">{system.name}</h1>
|
||||||
<div className="flex flex-wrap items-center gap-3 gap-y-2 text-sm opacity-90">
|
<div className="flex xl:flex-wrap items-center py-4 xl:p-0 -mt-3 xl:mt-1 gap-3 text-sm text-nowrap opacity-90 overflow-x-auto scrollbar-hide -mx-4 px-4 xl:mx-0">
|
||||||
<Tooltip>
|
<Tooltip>
|
||||||
<TooltipTrigger asChild>
|
<TooltipTrigger asChild>
|
||||||
<div className="capitalize flex gap-2 items-center">
|
<div className="capitalize flex gap-2 items-center">
|
||||||
@@ -190,24 +202,53 @@ export default function InfoBar({
|
|||||||
</div>
|
</div>
|
||||||
<div className="xl:ms-auto flex items-center gap-2 max-sm:-mb-1">
|
<div className="xl:ms-auto flex items-center gap-2 max-sm:-mb-1">
|
||||||
<ChartTimeSelect className="w-full xl:w-40" agentVersion={chartData.agentVersion} />
|
<ChartTimeSelect className="w-full xl:w-40" agentVersion={chartData.agentVersion} />
|
||||||
<Tooltip>
|
<DropdownMenu>
|
||||||
<TooltipTrigger asChild>
|
<DropdownMenuTrigger asChild>
|
||||||
<Button
|
<Button
|
||||||
aria-label={t`Toggle grid`}
|
aria-label={t`Settings`}
|
||||||
variant="outline"
|
variant="outline"
|
||||||
size="icon"
|
size="icon"
|
||||||
className="hidden xl:flex p-0 text-primary"
|
className="hidden xl:flex p-0 text-primary"
|
||||||
onClick={() => setGrid(!grid)}
|
|
||||||
>
|
>
|
||||||
{grid ? (
|
<Settings2Icon className="size-4 opacity-90" />
|
||||||
<LayoutGridIcon className="h-[1.2rem] w-[1.2rem] opacity-75" />
|
|
||||||
) : (
|
|
||||||
<Rows className="h-[1.3rem] w-[1.3rem] opacity-75" />
|
|
||||||
)}
|
|
||||||
</Button>
|
</Button>
|
||||||
</TooltipTrigger>
|
</DropdownMenuTrigger>
|
||||||
<TooltipContent>{t`Toggle grid`}</TooltipContent>
|
<DropdownMenuContent align="end" className="min-w-44">
|
||||||
</Tooltip>
|
<DropdownMenuLabel className="px-3.5">
|
||||||
|
<Trans context="Layout display options">Display</Trans>
|
||||||
|
</DropdownMenuLabel>
|
||||||
|
<DropdownMenuSeparator />
|
||||||
|
<DropdownMenuRadioGroup
|
||||||
|
className="px-1 pb-1"
|
||||||
|
value={displayMode}
|
||||||
|
onValueChange={(v) => setDisplayMode(v as "default" | "tabs")}
|
||||||
|
>
|
||||||
|
<DropdownMenuRadioItem value="default" onSelect={(e) => e.preventDefault()}>
|
||||||
|
<Trans context="Default system layout option">Default</Trans>
|
||||||
|
</DropdownMenuRadioItem>
|
||||||
|
<DropdownMenuRadioItem value="tabs" onSelect={(e) => e.preventDefault()}>
|
||||||
|
<Trans context="Tabs system layout option">Tabs</Trans>
|
||||||
|
</DropdownMenuRadioItem>
|
||||||
|
</DropdownMenuRadioGroup>
|
||||||
|
<DropdownMenuSeparator />
|
||||||
|
<DropdownMenuLabel className="px-3.5">
|
||||||
|
<Trans>Chart width</Trans>
|
||||||
|
</DropdownMenuLabel>
|
||||||
|
<DropdownMenuSeparator />
|
||||||
|
<DropdownMenuRadioGroup
|
||||||
|
className="px-1 pb-1"
|
||||||
|
value={grid ? "grid" : "full"}
|
||||||
|
onValueChange={(v) => setGrid(v === "grid")}
|
||||||
|
>
|
||||||
|
<DropdownMenuRadioItem value="grid" onSelect={(e) => e.preventDefault()}>
|
||||||
|
<Trans>Grid</Trans>
|
||||||
|
</DropdownMenuRadioItem>
|
||||||
|
<DropdownMenuRadioItem value="full" onSelect={(e) => e.preventDefault()}>
|
||||||
|
<Trans>Full</Trans>
|
||||||
|
</DropdownMenuRadioItem>
|
||||||
|
</DropdownMenuRadioGroup>
|
||||||
|
</DropdownMenuContent>
|
||||||
|
</DropdownMenu>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</Card>
|
</Card>
|
||||||
|
|||||||
36
internal/site/src/components/routes/system/lazy-tables.tsx
Normal file
36
internal/site/src/components/routes/system/lazy-tables.tsx
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
import { lazy } from "react"
|
||||||
|
import { useIntersectionObserver } from "@/lib/use-intersection-observer"
|
||||||
|
import { cn } from "@/lib/utils"
|
||||||
|
|
||||||
|
const ContainersTable = lazy(() => import("../../containers-table/containers-table"))
|
||||||
|
|
||||||
|
export function LazyContainersTable({ systemId }: { systemId: string }) {
|
||||||
|
const { isIntersecting, ref } = useIntersectionObserver({ rootMargin: "90px" })
|
||||||
|
return (
|
||||||
|
<div ref={ref} className={cn(isIntersecting && "contents")}>
|
||||||
|
{isIntersecting && <ContainersTable systemId={systemId} />}
|
||||||
|
</div>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
const SmartTable = lazy(() => import("./smart-table"))
|
||||||
|
|
||||||
|
export function LazySmartTable({ systemId }: { systemId: string }) {
|
||||||
|
const { isIntersecting, ref } = useIntersectionObserver({ rootMargin: "90px" })
|
||||||
|
return (
|
||||||
|
<div ref={ref} className={cn(isIntersecting && "contents")}>
|
||||||
|
{isIntersecting && <SmartTable systemId={systemId} />}
|
||||||
|
</div>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
const SystemdTable = lazy(() => import("../../systemd-table/systemd-table"))
|
||||||
|
|
||||||
|
export function LazySystemdTable({ systemId }: { systemId: string }) {
|
||||||
|
const { isIntersecting, ref } = useIntersectionObserver()
|
||||||
|
return (
|
||||||
|
<div ref={ref} className={cn(isIntersecting && "contents")}>
|
||||||
|
{isIntersecting && <SystemdTable systemId={systemId} />}
|
||||||
|
</div>
|
||||||
|
)
|
||||||
|
}
|
||||||
@@ -11,7 +11,7 @@ import { DialogTitle } from "@/components/ui/dialog"
|
|||||||
import { $userSettings } from "@/lib/stores"
|
import { $userSettings } from "@/lib/stores"
|
||||||
import { decimalString, formatBytes, toFixedFloat } from "@/lib/utils"
|
import { decimalString, formatBytes, toFixedFloat } from "@/lib/utils"
|
||||||
import type { ChartData } from "@/types"
|
import type { ChartData } from "@/types"
|
||||||
import { ChartCard } from "../system"
|
import { ChartCard } from "./chart-card"
|
||||||
|
|
||||||
export default memo(function NetworkSheet({
|
export default memo(function NetworkSheet({
|
||||||
chartData,
|
chartData,
|
||||||
@@ -46,7 +46,7 @@ export default memo(function NetworkSheet({
|
|||||||
title={t`View more`}
|
title={t`View more`}
|
||||||
variant="outline"
|
variant="outline"
|
||||||
size="icon"
|
size="icon"
|
||||||
className="shrink-0 max-sm:absolute max-sm:top-3 max-sm:end-3"
|
className="shrink-0 max-sm:absolute max-sm:top-0 max-sm:end-0"
|
||||||
>
|
>
|
||||||
<MoreHorizontalIcon />
|
<MoreHorizontalIcon />
|
||||||
</Button>
|
</Button>
|
||||||
|
|||||||
@@ -3,13 +3,16 @@ import {
|
|||||||
type ColumnDef,
|
type ColumnDef,
|
||||||
type ColumnFiltersState,
|
type ColumnFiltersState,
|
||||||
type Column,
|
type Column,
|
||||||
|
type Row,
|
||||||
type SortingState,
|
type SortingState,
|
||||||
|
type Table as TableType,
|
||||||
flexRender,
|
flexRender,
|
||||||
getCoreRowModel,
|
getCoreRowModel,
|
||||||
getFilteredRowModel,
|
getFilteredRowModel,
|
||||||
getSortedRowModel,
|
getSortedRowModel,
|
||||||
useReactTable,
|
useReactTable,
|
||||||
} from "@tanstack/react-table"
|
} from "@tanstack/react-table"
|
||||||
|
import { useVirtualizer, type VirtualItem } from "@tanstack/react-virtual"
|
||||||
import {
|
import {
|
||||||
Activity,
|
Activity,
|
||||||
Box,
|
Box,
|
||||||
@@ -40,6 +43,7 @@ import {
|
|||||||
toFixedFloat,
|
toFixedFloat,
|
||||||
formatTemperature,
|
formatTemperature,
|
||||||
cn,
|
cn,
|
||||||
|
getVisualStringWidth,
|
||||||
secondsToString,
|
secondsToString,
|
||||||
hourWithSeconds,
|
hourWithSeconds,
|
||||||
formatShortDate,
|
formatShortDate,
|
||||||
@@ -57,7 +61,7 @@ import {
|
|||||||
DropdownMenuSeparator,
|
DropdownMenuSeparator,
|
||||||
DropdownMenuTrigger,
|
DropdownMenuTrigger,
|
||||||
} from "@/components/ui/dropdown-menu"
|
} from "@/components/ui/dropdown-menu"
|
||||||
import { useCallback, useMemo, useEffect, useState } from "react"
|
import { memo, useCallback, useMemo, useEffect, useRef, useState } from "react"
|
||||||
import { Tooltip, TooltipContent, TooltipTrigger } from "@/components/ui/tooltip"
|
import { Tooltip, TooltipContent, TooltipTrigger } from "@/components/ui/tooltip"
|
||||||
|
|
||||||
// Column definition for S.M.A.R.T. attributes table
|
// Column definition for S.M.A.R.T. attributes table
|
||||||
@@ -101,7 +105,11 @@ function formatCapacity(bytes: number): string {
|
|||||||
|
|
||||||
const SMART_DEVICE_FIELDS = "id,system,name,model,state,capacity,temp,type,hours,cycles,updated"
|
const SMART_DEVICE_FIELDS = "id,system,name,model,state,capacity,temp,type,hours,cycles,updated"
|
||||||
|
|
||||||
export const columns: ColumnDef<SmartDeviceRecord>[] = [
|
export const createColumns = (
|
||||||
|
longestName: number,
|
||||||
|
longestModel: number,
|
||||||
|
longestDevice: number
|
||||||
|
): ColumnDef<SmartDeviceRecord>[] => [
|
||||||
{
|
{
|
||||||
id: "system",
|
id: "system",
|
||||||
accessorFn: (record) => record.system,
|
accessorFn: (record) => record.system,
|
||||||
@@ -114,7 +122,11 @@ export const columns: ColumnDef<SmartDeviceRecord>[] = [
|
|||||||
header: ({ column }) => <HeaderButton column={column} name={t`System`} Icon={ServerIcon} />,
|
header: ({ column }) => <HeaderButton column={column} name={t`System`} Icon={ServerIcon} />,
|
||||||
cell: ({ getValue }) => {
|
cell: ({ getValue }) => {
|
||||||
const allSystems = useStore($allSystemsById)
|
const allSystems = useStore($allSystemsById)
|
||||||
return <span className="ms-1.5 xl:w-30 block truncate">{allSystems[getValue() as string]?.name ?? ""}</span>
|
return (
|
||||||
|
<div className="ms-1.5 max-w-40 block truncate" style={{ width: `${longestName / 1.05}ch` }}>
|
||||||
|
{allSystems[getValue() as string]?.name ?? ""}
|
||||||
|
</div>
|
||||||
|
)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -122,7 +134,11 @@ export const columns: ColumnDef<SmartDeviceRecord>[] = [
|
|||||||
sortingFn: (a, b) => a.original.name.localeCompare(b.original.name),
|
sortingFn: (a, b) => a.original.name.localeCompare(b.original.name),
|
||||||
header: ({ column }) => <HeaderButton column={column} name={t`Device`} Icon={HardDrive} />,
|
header: ({ column }) => <HeaderButton column={column} name={t`Device`} Icon={HardDrive} />,
|
||||||
cell: ({ getValue }) => (
|
cell: ({ getValue }) => (
|
||||||
<div className="font-medium max-w-40 truncate ms-1.5" title={getValue() as string}>
|
<div
|
||||||
|
className="font-medium max-w-40 truncate ms-1"
|
||||||
|
title={getValue() as string}
|
||||||
|
style={{ width: `${longestDevice / 1.05}ch` }}
|
||||||
|
>
|
||||||
{getValue() as string}
|
{getValue() as string}
|
||||||
</div>
|
</div>
|
||||||
),
|
),
|
||||||
@@ -130,9 +146,15 @@ export const columns: ColumnDef<SmartDeviceRecord>[] = [
|
|||||||
{
|
{
|
||||||
accessorKey: "model",
|
accessorKey: "model",
|
||||||
sortingFn: (a, b) => a.original.model.localeCompare(b.original.model),
|
sortingFn: (a, b) => a.original.model.localeCompare(b.original.model),
|
||||||
header: ({ column }) => <HeaderButton column={column} name={t`Model`} Icon={Box} />,
|
header: ({ column }) => (
|
||||||
|
<HeaderButton column={column} name={t({ message: "Model", comment: "Device model" })} Icon={Box} />
|
||||||
|
),
|
||||||
cell: ({ getValue }) => (
|
cell: ({ getValue }) => (
|
||||||
<div className="max-w-48 truncate ms-1.5" title={getValue() as string}>
|
<div
|
||||||
|
className="max-w-48 truncate ms-1"
|
||||||
|
title={getValue() as string}
|
||||||
|
style={{ width: `${longestModel / 1.05}ch` }}
|
||||||
|
>
|
||||||
{getValue() as string}
|
{getValue() as string}
|
||||||
</div>
|
</div>
|
||||||
),
|
),
|
||||||
@@ -141,7 +163,7 @@ export const columns: ColumnDef<SmartDeviceRecord>[] = [
|
|||||||
accessorKey: "capacity",
|
accessorKey: "capacity",
|
||||||
invertSorting: true,
|
invertSorting: true,
|
||||||
header: ({ column }) => <HeaderButton column={column} name={t`Capacity`} Icon={BinaryIcon} />,
|
header: ({ column }) => <HeaderButton column={column} name={t`Capacity`} Icon={BinaryIcon} />,
|
||||||
cell: ({ getValue }) => <span className="ms-1.5">{formatCapacity(getValue() as number)}</span>,
|
cell: ({ getValue }) => <span className="ms-1">{formatCapacity(getValue() as number)}</span>,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
accessorKey: "state",
|
accessorKey: "state",
|
||||||
@@ -149,9 +171,9 @@ export const columns: ColumnDef<SmartDeviceRecord>[] = [
|
|||||||
cell: ({ getValue }) => {
|
cell: ({ getValue }) => {
|
||||||
const status = getValue() as string
|
const status = getValue() as string
|
||||||
return (
|
return (
|
||||||
<div className="ms-1.5">
|
<Badge className="ms-1" variant={status === "PASSED" ? "success" : status === "FAILED" ? "danger" : "warning"}>
|
||||||
<Badge variant={status === "PASSED" ? "success" : status === "FAILED" ? "danger" : "warning"}>{status}</Badge>
|
{status}
|
||||||
</div>
|
</Badge>
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -160,11 +182,9 @@ export const columns: ColumnDef<SmartDeviceRecord>[] = [
|
|||||||
sortingFn: (a, b) => a.original.type.localeCompare(b.original.type),
|
sortingFn: (a, b) => a.original.type.localeCompare(b.original.type),
|
||||||
header: ({ column }) => <HeaderButton column={column} name={t`Type`} Icon={ArrowLeftRightIcon} />,
|
header: ({ column }) => <HeaderButton column={column} name={t`Type`} Icon={ArrowLeftRightIcon} />,
|
||||||
cell: ({ getValue }) => (
|
cell: ({ getValue }) => (
|
||||||
<div className="ms-1.5">
|
<Badge variant="outline" className="ms-1 uppercase">
|
||||||
<Badge variant="outline" className="uppercase">
|
{getValue() as string}
|
||||||
{getValue() as string}
|
</Badge>
|
||||||
</Badge>
|
|
||||||
</div>
|
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -176,11 +196,11 @@ export const columns: ColumnDef<SmartDeviceRecord>[] = [
|
|||||||
cell: ({ getValue }) => {
|
cell: ({ getValue }) => {
|
||||||
const hours = getValue() as number | undefined
|
const hours = getValue() as number | undefined
|
||||||
if (hours == null) {
|
if (hours == null) {
|
||||||
return <div className="text-sm text-muted-foreground ms-1.5">N/A</div>
|
return <div className="text-sm text-muted-foreground ms-1">N/A</div>
|
||||||
}
|
}
|
||||||
const seconds = hours * 3600
|
const seconds = hours * 3600
|
||||||
return (
|
return (
|
||||||
<div className="text-sm ms-1.5">
|
<div className="text-sm ms-1">
|
||||||
<div>{secondsToString(seconds, "hour")}</div>
|
<div>{secondsToString(seconds, "hour")}</div>
|
||||||
<div className="text-muted-foreground text-xs">{secondsToString(seconds, "day")}</div>
|
<div className="text-muted-foreground text-xs">{secondsToString(seconds, "day")}</div>
|
||||||
</div>
|
</div>
|
||||||
@@ -196,9 +216,9 @@ export const columns: ColumnDef<SmartDeviceRecord>[] = [
|
|||||||
cell: ({ getValue }) => {
|
cell: ({ getValue }) => {
|
||||||
const cycles = getValue() as number | undefined
|
const cycles = getValue() as number | undefined
|
||||||
if (cycles == null) {
|
if (cycles == null) {
|
||||||
return <div className="text-muted-foreground ms-1.5">N/A</div>
|
return <div className="text-muted-foreground ms-1">N/A</div>
|
||||||
}
|
}
|
||||||
return <span className="ms-1.5">{cycles.toLocaleString()}</span>
|
return <span className="ms-1">{cycles.toLocaleString()}</span>
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -208,10 +228,10 @@ export const columns: ColumnDef<SmartDeviceRecord>[] = [
|
|||||||
cell: ({ getValue }) => {
|
cell: ({ getValue }) => {
|
||||||
const temp = getValue() as number | null | undefined
|
const temp = getValue() as number | null | undefined
|
||||||
if (!temp) {
|
if (!temp) {
|
||||||
return <div className="text-muted-foreground ms-1.5">N/A</div>
|
return <div className="text-muted-foreground ms-1">N/A</div>
|
||||||
}
|
}
|
||||||
const { value, unit } = formatTemperature(temp)
|
const { value, unit } = formatTemperature(temp)
|
||||||
return <span className="ms-1.5">{`${value} ${unit}`}</span>
|
return <span className="ms-1">{`${value} ${unit}`}</span>
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// {
|
// {
|
||||||
@@ -236,7 +256,7 @@ export const columns: ColumnDef<SmartDeviceRecord>[] = [
|
|||||||
// if today, use hourWithSeconds, otherwise use formatShortDate
|
// if today, use hourWithSeconds, otherwise use formatShortDate
|
||||||
const formatter =
|
const formatter =
|
||||||
new Date(timestamp).toDateString() === new Date().toDateString() ? hourWithSeconds : formatShortDate
|
new Date(timestamp).toDateString() === new Date().toDateString() ? hourWithSeconds : formatShortDate
|
||||||
return <span className="ms-1.5 tabular-nums">{formatter(timestamp)}</span>
|
return <span className="ms-1 tabular-nums">{formatter(timestamp)}</span>
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
]
|
]
|
||||||
@@ -275,6 +295,36 @@ export default function DisksTable({ systemId }: { systemId?: string }) {
|
|||||||
const [sheetOpen, setSheetOpen] = useState(false)
|
const [sheetOpen, setSheetOpen] = useState(false)
|
||||||
const [rowActionState, setRowActionState] = useState<{ type: "refresh" | "delete"; id: string } | null>(null)
|
const [rowActionState, setRowActionState] = useState<{ type: "refresh" | "delete"; id: string } | null>(null)
|
||||||
const [globalFilter, setGlobalFilter] = useState("")
|
const [globalFilter, setGlobalFilter] = useState("")
|
||||||
|
const allSystems = useStore($allSystemsById)
|
||||||
|
|
||||||
|
// duplicate the devices to test with more rows
|
||||||
|
// if (
|
||||||
|
// smartDevices?.length &&
|
||||||
|
// smartDevices.length < 50 &&
|
||||||
|
// typeof window !== "undefined" &&
|
||||||
|
// window.location.hostname === "localhost"
|
||||||
|
// ) {
|
||||||
|
// setSmartDevices([...smartDevices, ...smartDevices, ...smartDevices])
|
||||||
|
// }
|
||||||
|
|
||||||
|
// Calculate the right width for the columns based on the longest strings among the displayed devices
|
||||||
|
const { longestName, longestModel, longestDevice } = useMemo(() => {
|
||||||
|
const result = { longestName: 0, longestModel: 0, longestDevice: 0 }
|
||||||
|
if (!smartDevices || Object.keys(allSystems).length === 0) {
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
const seenSystems = new Set<string>()
|
||||||
|
for (const device of smartDevices) {
|
||||||
|
if (!systemId && !seenSystems.has(device.system)) {
|
||||||
|
seenSystems.add(device.system)
|
||||||
|
const name = allSystems[device.system]?.name ?? ""
|
||||||
|
result.longestName = Math.max(result.longestName, getVisualStringWidth(name))
|
||||||
|
}
|
||||||
|
result.longestModel = Math.max(result.longestModel, getVisualStringWidth(device.model ?? ""))
|
||||||
|
result.longestDevice = Math.max(result.longestDevice, getVisualStringWidth(device.name ?? ""))
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}, [smartDevices, systemId, allSystems])
|
||||||
|
|
||||||
const openSheet = (disk: SmartDeviceRecord) => {
|
const openSheet = (disk: SmartDeviceRecord) => {
|
||||||
setActiveDiskId(disk.id)
|
setActiveDiskId(disk.id)
|
||||||
@@ -440,9 +490,10 @@ export default function DisksTable({ systemId }: { systemId?: string }) {
|
|||||||
|
|
||||||
// Filter columns based on whether systemId is provided
|
// Filter columns based on whether systemId is provided
|
||||||
const tableColumns = useMemo(() => {
|
const tableColumns = useMemo(() => {
|
||||||
|
const columns = createColumns(longestName, longestModel, longestDevice)
|
||||||
const baseColumns = systemId ? columns.filter((col) => col.id !== "system") : columns
|
const baseColumns = systemId ? columns.filter((col) => col.id !== "system") : columns
|
||||||
return [...baseColumns, actionColumn]
|
return [...baseColumns, actionColumn]
|
||||||
}, [systemId, actionColumn])
|
}, [systemId, actionColumn, longestName, longestModel, longestDevice])
|
||||||
|
|
||||||
const table = useReactTable({
|
const table = useReactTable({
|
||||||
data: smartDevices || ([] as SmartDeviceRecord[]),
|
data: smartDevices || ([] as SmartDeviceRecord[]),
|
||||||
@@ -474,6 +525,7 @@ export default function DisksTable({ systemId }: { systemId?: string }) {
|
|||||||
.every((term) => searchString.includes(term))
|
.every((term) => searchString.includes(term))
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
const rows = table.getRowModel().rows
|
||||||
|
|
||||||
// Hide the table on system pages if there's no data, but always show on global page
|
// Hide the table on system pages if there's no data, but always show on global page
|
||||||
if (systemId && !smartDevices?.length && !columnFilters.length) {
|
if (systemId && !smartDevices?.length && !columnFilters.length) {
|
||||||
@@ -482,9 +534,9 @@ export default function DisksTable({ systemId }: { systemId?: string }) {
|
|||||||
|
|
||||||
return (
|
return (
|
||||||
<div>
|
<div>
|
||||||
<Card className="p-6 @container w-full">
|
<Card className="@container w-full px-3 py-5 sm:py-6 sm:px-6">
|
||||||
<CardHeader className="p-0 mb-4">
|
<CardHeader className="p-0 mb-3 sm:mb-4">
|
||||||
<div className="grid md:flex gap-5 w-full items-end">
|
<div className="grid md:flex gap-x-5 gap-y-3 w-full items-end">
|
||||||
<div className="px-2 sm:px-1">
|
<div className="px-2 sm:px-1">
|
||||||
<CardTitle className="mb-2">S.M.A.R.T.</CardTitle>
|
<CardTitle className="mb-2">S.M.A.R.T.</CardTitle>
|
||||||
<CardDescription className="flex">
|
<CardDescription className="flex">
|
||||||
@@ -513,57 +565,124 @@ export default function DisksTable({ systemId }: { systemId?: string }) {
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</CardHeader>
|
</CardHeader>
|
||||||
<div className="rounded-md border text-nowrap">
|
<SmartDevicesTable
|
||||||
<Table>
|
table={table}
|
||||||
<TableHeader>
|
rows={rows}
|
||||||
{table.getHeaderGroups().map((headerGroup) => (
|
colLength={tableColumns.length}
|
||||||
<TableRow key={headerGroup.id}>
|
data={smartDevices}
|
||||||
{headerGroup.headers.map((header) => {
|
openSheet={openSheet}
|
||||||
return (
|
/>
|
||||||
<TableHead key={header.id} className="px-2">
|
|
||||||
{header.isPlaceholder ? null : flexRender(header.column.columnDef.header, header.getContext())}
|
|
||||||
</TableHead>
|
|
||||||
)
|
|
||||||
})}
|
|
||||||
</TableRow>
|
|
||||||
))}
|
|
||||||
</TableHeader>
|
|
||||||
<TableBody>
|
|
||||||
{table.getRowModel().rows?.length ? (
|
|
||||||
table.getRowModel().rows.map((row) => (
|
|
||||||
<TableRow
|
|
||||||
key={row.id}
|
|
||||||
data-state={row.getIsSelected() && "selected"}
|
|
||||||
className="cursor-pointer"
|
|
||||||
onClick={() => openSheet(row.original)}
|
|
||||||
>
|
|
||||||
{row.getVisibleCells().map((cell) => (
|
|
||||||
<TableCell key={cell.id} className="md:ps-5">
|
|
||||||
{flexRender(cell.column.columnDef.cell, cell.getContext())}
|
|
||||||
</TableCell>
|
|
||||||
))}
|
|
||||||
</TableRow>
|
|
||||||
))
|
|
||||||
) : (
|
|
||||||
<TableRow>
|
|
||||||
<TableCell colSpan={tableColumns.length} className="h-24 text-center">
|
|
||||||
{smartDevices ? (
|
|
||||||
t`No results.`
|
|
||||||
) : (
|
|
||||||
<LoaderCircleIcon className="animate-spin size-10 opacity-60 mx-auto" />
|
|
||||||
)}
|
|
||||||
</TableCell>
|
|
||||||
</TableRow>
|
|
||||||
)}
|
|
||||||
</TableBody>
|
|
||||||
</Table>
|
|
||||||
</div>
|
|
||||||
</Card>
|
</Card>
|
||||||
<DiskSheet diskId={activeDiskId} open={sheetOpen} onOpenChange={setSheetOpen} />
|
<DiskSheet diskId={activeDiskId} open={sheetOpen} onOpenChange={setSheetOpen} />
|
||||||
</div>
|
</div>
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const SmartDevicesTable = memo(function SmartDevicesTable({
|
||||||
|
table,
|
||||||
|
rows,
|
||||||
|
colLength,
|
||||||
|
data,
|
||||||
|
openSheet,
|
||||||
|
}: {
|
||||||
|
table: TableType<SmartDeviceRecord>
|
||||||
|
rows: Row<SmartDeviceRecord>[]
|
||||||
|
colLength: number
|
||||||
|
data: SmartDeviceRecord[] | undefined
|
||||||
|
openSheet: (disk: SmartDeviceRecord) => void
|
||||||
|
}) {
|
||||||
|
const scrollRef = useRef<HTMLDivElement>(null)
|
||||||
|
|
||||||
|
const virtualizer = useVirtualizer<HTMLDivElement, HTMLTableRowElement>({
|
||||||
|
count: rows.length,
|
||||||
|
estimateSize: () => 65,
|
||||||
|
getScrollElement: () => scrollRef.current,
|
||||||
|
overscan: 5,
|
||||||
|
})
|
||||||
|
const virtualRows = virtualizer.getVirtualItems()
|
||||||
|
|
||||||
|
const paddingTop = Math.max(0, virtualRows[0]?.start ?? 0 - virtualizer.options.scrollMargin)
|
||||||
|
const paddingBottom = Math.max(0, virtualizer.getTotalSize() - (virtualRows[virtualRows.length - 1]?.end ?? 0))
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div
|
||||||
|
className={cn(
|
||||||
|
"h-min max-h-[calc(100dvh-17rem)] max-w-full relative overflow-auto rounded-md border",
|
||||||
|
(!rows.length || rows.length > 2) && "min-h-50"
|
||||||
|
)}
|
||||||
|
ref={scrollRef}
|
||||||
|
>
|
||||||
|
<div style={{ height: `${virtualizer.getTotalSize() + 48}px`, paddingTop, paddingBottom }}>
|
||||||
|
<table className="w-full text-sm text-nowrap">
|
||||||
|
<SmartTableHead table={table} />
|
||||||
|
<TableBody>
|
||||||
|
{rows.length ? (
|
||||||
|
virtualRows.map((virtualRow) => {
|
||||||
|
const row = rows[virtualRow.index]
|
||||||
|
return <SmartDeviceTableRow key={row.id} row={row} virtualRow={virtualRow} openSheet={openSheet} />
|
||||||
|
})
|
||||||
|
) : (
|
||||||
|
<TableCell colSpan={colLength} className="h-37 text-center pointer-events-none">
|
||||||
|
{data ? (
|
||||||
|
<Trans>No results.</Trans>
|
||||||
|
) : (
|
||||||
|
<LoaderCircleIcon className="animate-spin size-10 opacity-60 mx-auto" />
|
||||||
|
)}
|
||||||
|
</TableCell>
|
||||||
|
)}
|
||||||
|
</TableBody>
|
||||||
|
</table>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)
|
||||||
|
})
|
||||||
|
|
||||||
|
function SmartTableHead({ table }: { table: TableType<SmartDeviceRecord> }) {
|
||||||
|
return (
|
||||||
|
<TableHeader className="sticky top-0 z-50 w-full border-b-2">
|
||||||
|
{table.getHeaderGroups().map((headerGroup) => (
|
||||||
|
<TableRow key={headerGroup.id}>
|
||||||
|
{headerGroup.headers.map((header) => (
|
||||||
|
<TableHead key={header.id} className="px-2">
|
||||||
|
{header.isPlaceholder ? null : flexRender(header.column.columnDef.header, header.getContext())}
|
||||||
|
</TableHead>
|
||||||
|
))}
|
||||||
|
</TableRow>
|
||||||
|
))}
|
||||||
|
</TableHeader>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
const SmartDeviceTableRow = memo(function SmartDeviceTableRow({
|
||||||
|
row,
|
||||||
|
virtualRow,
|
||||||
|
openSheet,
|
||||||
|
}: {
|
||||||
|
row: Row<SmartDeviceRecord>
|
||||||
|
virtualRow: VirtualItem
|
||||||
|
openSheet: (disk: SmartDeviceRecord) => void
|
||||||
|
}) {
|
||||||
|
return (
|
||||||
|
<TableRow
|
||||||
|
data-state={row.getIsSelected() && "selected"}
|
||||||
|
className="cursor-pointer"
|
||||||
|
onClick={() => openSheet(row.original)}
|
||||||
|
>
|
||||||
|
{row.getVisibleCells().map((cell) => (
|
||||||
|
<TableCell
|
||||||
|
key={cell.id}
|
||||||
|
className="md:ps-5 py-0"
|
||||||
|
style={{
|
||||||
|
height: virtualRow.size,
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{flexRender(cell.column.columnDef.cell, cell.getContext())}
|
||||||
|
</TableCell>
|
||||||
|
))}
|
||||||
|
</TableRow>
|
||||||
|
)
|
||||||
|
})
|
||||||
|
|
||||||
function DiskSheet({
|
function DiskSheet({
|
||||||
diskId,
|
diskId,
|
||||||
open,
|
open,
|
||||||
|
|||||||
344
internal/site/src/components/routes/system/use-system-data.ts
Normal file
344
internal/site/src/components/routes/system/use-system-data.ts
Normal file
@@ -0,0 +1,344 @@
|
|||||||
|
import { useStore } from "@nanostores/react"
|
||||||
|
import { getPagePath } from "@nanostores/router"
|
||||||
|
import { subscribeKeys } from "nanostores"
|
||||||
|
import { useEffect, useMemo, useRef, useState } from "react"
|
||||||
|
import { useContainerChartConfigs } from "@/components/charts/hooks"
|
||||||
|
import { pb } from "@/lib/api"
|
||||||
|
import { SystemStatus } from "@/lib/enums"
|
||||||
|
import {
|
||||||
|
$allSystemsById,
|
||||||
|
$allSystemsByName,
|
||||||
|
$chartTime,
|
||||||
|
$containerFilter,
|
||||||
|
$direction,
|
||||||
|
$maxValues,
|
||||||
|
$systems,
|
||||||
|
$userSettings,
|
||||||
|
} from "@/lib/stores"
|
||||||
|
import { chartTimeData, listen, parseSemVer, useBrowserStorage } from "@/lib/utils"
|
||||||
|
import type {
|
||||||
|
ChartData,
|
||||||
|
ContainerStatsRecord,
|
||||||
|
SystemDetailsRecord,
|
||||||
|
SystemInfo,
|
||||||
|
SystemRecord,
|
||||||
|
SystemStats,
|
||||||
|
SystemStatsRecord,
|
||||||
|
} from "@/types"
|
||||||
|
import { $router, navigate } from "../../router"
|
||||||
|
import { appendData, cache, getStats, getTimeData, makeContainerData, makeContainerPoint } from "./chart-data"
|
||||||
|
|
||||||
|
export function useSystemData(id: string) {
|
||||||
|
const direction = useStore($direction)
|
||||||
|
const systems = useStore($systems)
|
||||||
|
const chartTime = useStore($chartTime)
|
||||||
|
const maxValues = useStore($maxValues)
|
||||||
|
const [grid, setGrid] = useBrowserStorage("grid", true)
|
||||||
|
const [displayMode, setDisplayMode] = useBrowserStorage<"default" | "tabs">("displayMode", "default")
|
||||||
|
const [activeTab, setActiveTabRaw] = useState("core")
|
||||||
|
const [mountedTabs, setMountedTabs] = useState(() => new Set<string>(["core"]))
|
||||||
|
const tabsRef = useRef<string[]>(["core", "disk"])
|
||||||
|
|
||||||
|
function setActiveTab(tab: string) {
|
||||||
|
setActiveTabRaw(tab)
|
||||||
|
setMountedTabs((prev) => (prev.has(tab) ? prev : new Set([...prev, tab])))
|
||||||
|
}
|
||||||
|
const [system, setSystem] = useState({} as SystemRecord)
|
||||||
|
const [systemStats, setSystemStats] = useState([] as SystemStatsRecord[])
|
||||||
|
const [containerData, setContainerData] = useState([] as ChartData["containerData"])
|
||||||
|
const persistChartTime = useRef(false)
|
||||||
|
const statsRequestId = useRef(0)
|
||||||
|
const [chartLoading, setChartLoading] = useState(true)
|
||||||
|
const [details, setDetails] = useState<SystemDetailsRecord>({} as SystemDetailsRecord)
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
return () => {
|
||||||
|
if (!persistChartTime.current) {
|
||||||
|
$chartTime.set($userSettings.get().chartTime)
|
||||||
|
}
|
||||||
|
persistChartTime.current = false
|
||||||
|
setSystemStats([])
|
||||||
|
setContainerData([])
|
||||||
|
setDetails({} as SystemDetailsRecord)
|
||||||
|
$containerFilter.set("")
|
||||||
|
}
|
||||||
|
}, [id])
|
||||||
|
|
||||||
|
// find matching system and update when it changes
|
||||||
|
useEffect(() => {
|
||||||
|
if (!systems.length) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// allow old system-name slug to work
|
||||||
|
const store = $allSystemsById.get()[id] ? $allSystemsById : $allSystemsByName
|
||||||
|
return subscribeKeys(store, [id], (newSystems) => {
|
||||||
|
const sys = newSystems[id]
|
||||||
|
if (sys) {
|
||||||
|
setSystem(sys)
|
||||||
|
document.title = `${sys?.name} / Beszel`
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}, [id, systems.length])
|
||||||
|
|
||||||
|
// hide 1m chart time if system agent version is less than 0.13.0
|
||||||
|
useEffect(() => {
|
||||||
|
if (parseSemVer(system?.info?.v) < parseSemVer("0.13.0")) {
|
||||||
|
$chartTime.set("1h")
|
||||||
|
}
|
||||||
|
}, [system?.info?.v])
|
||||||
|
|
||||||
|
// fetch system details
|
||||||
|
useEffect(() => {
|
||||||
|
// if system.info.m exists, agent is old version without system details
|
||||||
|
if (!system.id || system.info?.m) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
pb.collection<SystemDetailsRecord>("system_details")
|
||||||
|
.getOne(system.id, {
|
||||||
|
fields: "hostname,kernel,cores,threads,cpu,os,os_name,arch,memory,podman",
|
||||||
|
headers: {
|
||||||
|
"Cache-Control": "public, max-age=60",
|
||||||
|
},
|
||||||
|
})
|
||||||
|
.then(setDetails)
|
||||||
|
}, [system.id])
|
||||||
|
|
||||||
|
// subscribe to realtime metrics if chart time is 1m
|
||||||
|
useEffect(() => {
|
||||||
|
let unsub = () => {}
|
||||||
|
if (!system.id || chartTime !== "1m") {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if (system.status !== SystemStatus.Up || parseSemVer(system?.info?.v).minor < 13) {
|
||||||
|
$chartTime.set("1h")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
let isFirst = true
|
||||||
|
pb.realtime
|
||||||
|
.subscribe(
|
||||||
|
`rt_metrics`,
|
||||||
|
(data: { container: ContainerStatsRecord[]; info: SystemInfo; stats: SystemStats }) => {
|
||||||
|
const now = Date.now()
|
||||||
|
const statsPoint = { created: now, stats: data.stats } as SystemStatsRecord
|
||||||
|
const containerPoint =
|
||||||
|
data.container?.length > 0
|
||||||
|
? makeContainerPoint(now, data.container as unknown as ContainerStatsRecord["stats"])
|
||||||
|
: null
|
||||||
|
// on first message, make sure we clear out data from other time periods
|
||||||
|
if (isFirst) {
|
||||||
|
isFirst = false
|
||||||
|
setSystemStats([statsPoint])
|
||||||
|
setContainerData(containerPoint ? [containerPoint] : [])
|
||||||
|
return
|
||||||
|
}
|
||||||
|
setSystemStats((prev) => appendData(prev, [statsPoint], 1000, 60))
|
||||||
|
if (containerPoint) {
|
||||||
|
setContainerData((prev) => appendData(prev, [containerPoint], 1000, 60))
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{ query: { system: system.id } }
|
||||||
|
)
|
||||||
|
.then((us) => {
|
||||||
|
unsub = us
|
||||||
|
})
|
||||||
|
return () => {
|
||||||
|
unsub?.()
|
||||||
|
}
|
||||||
|
}, [chartTime, system.id])
|
||||||
|
|
||||||
|
const agentVersion = useMemo(() => parseSemVer(system?.info?.v), [system?.info?.v])
|
||||||
|
|
||||||
|
const chartData: ChartData = useMemo(() => {
|
||||||
|
const lastCreated = Math.max(
|
||||||
|
(systemStats.at(-1)?.created as number) ?? 0,
|
||||||
|
(containerData.at(-1)?.created as number) ?? 0
|
||||||
|
)
|
||||||
|
return {
|
||||||
|
systemStats,
|
||||||
|
containerData,
|
||||||
|
chartTime,
|
||||||
|
orientation: direction === "rtl" ? "right" : "left",
|
||||||
|
...getTimeData(chartTime, lastCreated),
|
||||||
|
agentVersion,
|
||||||
|
}
|
||||||
|
}, [systemStats, containerData, direction])
|
||||||
|
|
||||||
|
// Share chart config computation for all container charts
|
||||||
|
const containerChartConfigs = useContainerChartConfigs(containerData)
|
||||||
|
|
||||||
|
// get stats when system "changes." (Not just system to system,
|
||||||
|
// also when new info comes in via systemManager realtime connection, indicating an update)
|
||||||
|
useEffect(() => {
|
||||||
|
if (!system.id || !chartTime || chartTime === "1m") {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
const systemId = system.id
|
||||||
|
const { expectedInterval } = chartTimeData[chartTime]
|
||||||
|
const ss_cache_key = `${systemId}_${chartTime}_system_stats`
|
||||||
|
const cs_cache_key = `${systemId}_${chartTime}_container_stats`
|
||||||
|
const requestId = ++statsRequestId.current
|
||||||
|
|
||||||
|
const cachedSystemStats = cache.get(ss_cache_key) as SystemStatsRecord[] | undefined
|
||||||
|
const cachedContainerData = cache.get(cs_cache_key) as ChartData["containerData"] | undefined
|
||||||
|
|
||||||
|
// Render from cache immediately if available
|
||||||
|
if (cachedSystemStats?.length) {
|
||||||
|
setSystemStats(cachedSystemStats)
|
||||||
|
setContainerData(cachedContainerData || [])
|
||||||
|
setChartLoading(false)
|
||||||
|
|
||||||
|
// Skip the fetch if the latest cached point is recent enough that no new point is expected yet
|
||||||
|
const lastCreated = cachedSystemStats.at(-1)?.created as number | undefined
|
||||||
|
if (lastCreated && Date.now() - lastCreated < expectedInterval) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
setChartLoading(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
Promise.allSettled([
|
||||||
|
getStats<SystemStatsRecord>("system_stats", systemId, chartTime),
|
||||||
|
getStats<ContainerStatsRecord>("container_stats", systemId, chartTime),
|
||||||
|
]).then(([systemStats, containerStats]) => {
|
||||||
|
// If another request has been made since this one, ignore the results
|
||||||
|
if (requestId !== statsRequestId.current) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
setChartLoading(false)
|
||||||
|
|
||||||
|
// make new system stats
|
||||||
|
let systemData = (cache.get(ss_cache_key) || []) as SystemStatsRecord[]
|
||||||
|
if (systemStats.status === "fulfilled" && systemStats.value.length) {
|
||||||
|
systemData = appendData(systemData, systemStats.value, expectedInterval, 100)
|
||||||
|
cache.set(ss_cache_key, systemData)
|
||||||
|
}
|
||||||
|
setSystemStats(systemData)
|
||||||
|
// make new container stats
|
||||||
|
let containerData = (cache.get(cs_cache_key) || []) as ChartData["containerData"]
|
||||||
|
if (containerStats.status === "fulfilled" && containerStats.value.length) {
|
||||||
|
containerData = appendData(containerData, makeContainerData(containerStats.value), expectedInterval, 100)
|
||||||
|
cache.set(cs_cache_key, containerData)
|
||||||
|
}
|
||||||
|
setContainerData(containerData)
|
||||||
|
})
|
||||||
|
}, [system, chartTime])
|
||||||
|
|
||||||
|
// keyboard navigation between systems
|
||||||
|
// in tabs mode: arrow keys switch tabs, shift+arrow switches systems
|
||||||
|
// in default mode: arrow keys switch systems
|
||||||
|
useEffect(() => {
|
||||||
|
if (!systems.length) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
const handleKeyUp = (e: KeyboardEvent) => {
|
||||||
|
if (
|
||||||
|
e.target instanceof HTMLInputElement ||
|
||||||
|
e.target instanceof HTMLTextAreaElement ||
|
||||||
|
e.ctrlKey ||
|
||||||
|
e.metaKey ||
|
||||||
|
e.altKey
|
||||||
|
) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
const isLeft = e.key === "ArrowLeft" || e.key === "h"
|
||||||
|
const isRight = e.key === "ArrowRight" || e.key === "l"
|
||||||
|
if (!isLeft && !isRight) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// in tabs mode, plain arrows switch tabs, shift+arrows switch systems
|
||||||
|
if (displayMode === "tabs") {
|
||||||
|
if (!e.shiftKey) {
|
||||||
|
// skip if focused in tablist (Radix handles it natively)
|
||||||
|
if (e.target instanceof HTMLElement && e.target.closest('[role="tablist"]')) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
const tabs = tabsRef.current
|
||||||
|
const currentIdx = tabs.indexOf(activeTab)
|
||||||
|
const nextIdx = isLeft ? (currentIdx - 1 + tabs.length) % tabs.length : (currentIdx + 1) % tabs.length
|
||||||
|
setActiveTab(tabs[nextIdx])
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else if (e.shiftKey) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
const currentIndex = systems.findIndex((s) => s.id === id)
|
||||||
|
if (currentIndex === -1 || systems.length <= 1) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if (isLeft) {
|
||||||
|
const prevIndex = (currentIndex - 1 + systems.length) % systems.length
|
||||||
|
persistChartTime.current = true
|
||||||
|
setActiveTabRaw("core")
|
||||||
|
setMountedTabs(new Set(["core"]))
|
||||||
|
return navigate(getPagePath($router, "system", { id: systems[prevIndex].id }))
|
||||||
|
}
|
||||||
|
if (isRight) {
|
||||||
|
const nextIndex = (currentIndex + 1) % systems.length
|
||||||
|
persistChartTime.current = true
|
||||||
|
setActiveTabRaw("core")
|
||||||
|
setMountedTabs(new Set(["core"]))
|
||||||
|
return navigate(getPagePath($router, "system", { id: systems[nextIndex].id }))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return listen(document, "keyup", handleKeyUp)
|
||||||
|
}, [id, systems, displayMode, activeTab])
|
||||||
|
|
||||||
|
// derived values
|
||||||
|
const isLongerChart = !["1m", "1h"].includes(chartTime)
|
||||||
|
const showMax = maxValues && isLongerChart
|
||||||
|
const dataEmpty = !chartLoading && chartData.systemStats.length === 0
|
||||||
|
const lastGpus = systemStats.at(-1)?.stats?.g
|
||||||
|
const isPodman = details?.podman ?? system.info?.p ?? false
|
||||||
|
|
||||||
|
let hasGpuData = false
|
||||||
|
let hasGpuEnginesData = false
|
||||||
|
let hasGpuPowerData = false
|
||||||
|
|
||||||
|
if (lastGpus) {
|
||||||
|
hasGpuData = Object.keys(lastGpus).length > 0
|
||||||
|
for (let i = 0; i < systemStats.length && (!hasGpuEnginesData || !hasGpuPowerData); i++) {
|
||||||
|
const gpus = systemStats[i].stats?.g
|
||||||
|
if (!gpus) continue
|
||||||
|
for (const id in gpus) {
|
||||||
|
if (!hasGpuEnginesData && gpus[id].e !== undefined) {
|
||||||
|
hasGpuEnginesData = true
|
||||||
|
}
|
||||||
|
if (!hasGpuPowerData && (gpus[id].p !== undefined || gpus[id].pp !== undefined)) {
|
||||||
|
hasGpuPowerData = true
|
||||||
|
}
|
||||||
|
if (hasGpuEnginesData && hasGpuPowerData) break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
system,
|
||||||
|
systemStats,
|
||||||
|
containerData,
|
||||||
|
chartData,
|
||||||
|
containerChartConfigs,
|
||||||
|
details,
|
||||||
|
grid,
|
||||||
|
setGrid,
|
||||||
|
displayMode,
|
||||||
|
setDisplayMode,
|
||||||
|
activeTab,
|
||||||
|
setActiveTab,
|
||||||
|
mountedTabs,
|
||||||
|
tabsRef,
|
||||||
|
maxValues,
|
||||||
|
isLongerChart,
|
||||||
|
showMax,
|
||||||
|
dataEmpty,
|
||||||
|
isPodman,
|
||||||
|
lastGpus,
|
||||||
|
hasGpuData,
|
||||||
|
hasGpuEnginesData,
|
||||||
|
hasGpuPowerData,
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -46,7 +46,6 @@ export default function SystemdTable({ systemId }: { systemId?: string }) {
|
|||||||
return setData([])
|
return setData([])
|
||||||
}, [systemId])
|
}, [systemId])
|
||||||
|
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
const lastUpdated = data[0]?.updated ?? 0
|
const lastUpdated = data[0]?.updated ?? 0
|
||||||
|
|
||||||
@@ -155,9 +154,9 @@ export default function SystemdTable({ systemId }: { systemId?: string }) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<Card className="p-6 @container w-full">
|
<Card className="@container w-full px-3 py-5 sm:py-6 sm:px-6">
|
||||||
<CardHeader className="p-0 mb-4">
|
<CardHeader className="p-0 mb-3 sm:mb-4">
|
||||||
<div className="grid md:flex gap-5 w-full items-end">
|
<div className="grid md:flex gap-x-5 gap-y-3 w-full items-end">
|
||||||
<div className="px-2 sm:px-1">
|
<div className="px-2 sm:px-1">
|
||||||
<CardTitle className="mb-2">
|
<CardTitle className="mb-2">
|
||||||
<Trans>Systemd Services</Trans>
|
<Trans>Systemd Services</Trans>
|
||||||
@@ -360,15 +359,9 @@ function SystemdSheet({
|
|||||||
return (
|
return (
|
||||||
<>
|
<>
|
||||||
{hasCurrent ? current : notAvailable}
|
{hasCurrent ? current : notAvailable}
|
||||||
{hasMax && (
|
{hasMax && <span className="text-muted-foreground ms-1.5">{`(${t`limit`}: ${max})`}</span>}
|
||||||
<span className="text-muted-foreground ms-1.5">
|
|
||||||
{`(${t`limit`}: ${max})`}
|
|
||||||
</span>
|
|
||||||
)}
|
|
||||||
{max === null && (
|
{max === null && (
|
||||||
<span className="text-muted-foreground ms-1.5">
|
<span className="text-muted-foreground ms-1.5">{`(${t`limit`}: ${t`Unlimited`.toLowerCase()})`}</span>
|
||||||
{`(${t`limit`}: ${t`Unlimited`.toLowerCase()})`}
|
|
||||||
</span>
|
|
||||||
)}
|
)}
|
||||||
</>
|
</>
|
||||||
)
|
)
|
||||||
@@ -435,7 +428,7 @@ function SystemdSheet({
|
|||||||
</tr>
|
</tr>
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
const capitalize = (str: string) => `${str.charAt(0).toUpperCase()}${str.slice(1).toLowerCase()}`
|
const capitalize = (str: string) => `${str.charAt(0).toUpperCase()}${str.slice(1).toLowerCase()}`
|
||||||
|
|
||||||
return (
|
return (
|
||||||
|
|||||||
@@ -184,7 +184,8 @@ export function SystemsTableColumns(viewMode: "table" | "grid"): ColumnDef<Syste
|
|||||||
accessorFn: ({ info }) => info.dp || undefined,
|
accessorFn: ({ info }) => info.dp || undefined,
|
||||||
id: "disk",
|
id: "disk",
|
||||||
name: () => t`Disk`,
|
name: () => t`Disk`,
|
||||||
cell: DiskCellWithMultiple,
|
cell: (info: CellContext<SystemRecord, unknown>) =>
|
||||||
|
info.row.original.info.efs ? DiskCellWithMultiple(info) : TableCellWithMeter(info),
|
||||||
Icon: HardDriveIcon,
|
Icon: HardDriveIcon,
|
||||||
header: sortableHeader,
|
header: sortableHeader,
|
||||||
},
|
},
|
||||||
@@ -198,32 +199,19 @@ export function SystemsTableColumns(viewMode: "table" | "grid"): ColumnDef<Syste
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: "loadAverage",
|
id: "loadAverage",
|
||||||
accessorFn: ({ info }) => {
|
accessorFn: ({ info }) => info.la?.reduce((acc, curr) => acc + curr, 0),
|
||||||
const sum = info.la?.reduce((acc, curr) => acc + curr, 0)
|
|
||||||
// TODO: remove this in future release in favor of la array
|
|
||||||
if (!sum) {
|
|
||||||
return (info.l1 ?? 0) + (info.l5 ?? 0) + (info.l15 ?? 0) || undefined
|
|
||||||
}
|
|
||||||
return sum || undefined
|
|
||||||
},
|
|
||||||
name: () => t({ message: "Load Avg", comment: "Short label for load average" }),
|
name: () => t({ message: "Load Avg", comment: "Short label for load average" }),
|
||||||
size: 0,
|
size: 0,
|
||||||
Icon: HourglassIcon,
|
Icon: HourglassIcon,
|
||||||
header: sortableHeader,
|
header: sortableHeader,
|
||||||
cell(info: CellContext<SystemRecord, unknown>) {
|
cell(info: CellContext<SystemRecord, unknown>) {
|
||||||
const { info: sysInfo, status } = info.row.original
|
const { info: sysInfo, status } = info.row.original
|
||||||
|
const { major, minor } = parseSemVer(sysInfo.v)
|
||||||
const { colorWarn = 65, colorCrit = 90 } = useStore($userSettings, { keys: ["colorWarn", "colorCrit"] })
|
const { colorWarn = 65, colorCrit = 90 } = useStore($userSettings, { keys: ["colorWarn", "colorCrit"] })
|
||||||
// agent version
|
const loadAverages = sysInfo.la || []
|
||||||
const { minor, patch } = parseSemVer(sysInfo.v)
|
|
||||||
let loadAverages = sysInfo.la
|
|
||||||
|
|
||||||
// use legacy load averages if agent version is less than 12.1.0
|
|
||||||
if (!loadAverages || (minor === 12 && patch < 1)) {
|
|
||||||
loadAverages = [sysInfo.l1 ?? 0, sysInfo.l5 ?? 0, sysInfo.l15 ?? 0]
|
|
||||||
}
|
|
||||||
|
|
||||||
const max = Math.max(...loadAverages)
|
const max = Math.max(...loadAverages)
|
||||||
if (max === 0 && (status === SystemStatus.Paused || minor < 12)) {
|
if (max === 0 && (status === SystemStatus.Paused || (major < 1 && minor < 13))) {
|
||||||
return null
|
return null
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -248,19 +236,20 @@ export function SystemsTableColumns(viewMode: "table" | "grid"): ColumnDef<Syste
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
accessorFn: ({ info }) => info.bb || (info.b || 0) * 1024 * 1024 || undefined,
|
accessorFn: ({ info, status }) => (status !== SystemStatus.Up ? undefined : info.bb),
|
||||||
id: "net",
|
id: "net",
|
||||||
name: () => t`Net`,
|
name: () => t`Net`,
|
||||||
size: 0,
|
size: 0,
|
||||||
Icon: EthernetIcon,
|
Icon: EthernetIcon,
|
||||||
header: sortableHeader,
|
header: sortableHeader,
|
||||||
|
sortUndefined: "last",
|
||||||
cell(info) {
|
cell(info) {
|
||||||
const sys = info.row.original
|
const val = info.getValue() as number | undefined
|
||||||
const userSettings = useStore($userSettings, { keys: ["unitNet"] })
|
if (val === undefined) {
|
||||||
if (sys.status === SystemStatus.Paused) {
|
|
||||||
return null
|
return null
|
||||||
}
|
}
|
||||||
const { value, unit } = formatBytes((info.getValue() || 0) as number, true, userSettings.unitNet, false)
|
const userSettings = useStore($userSettings, { keys: ["unitNet"] })
|
||||||
|
const { value, unit } = formatBytes(val, true, userSettings.unitNet, false)
|
||||||
return (
|
return (
|
||||||
<span className="tabular-nums whitespace-nowrap">
|
<span className="tabular-nums whitespace-nowrap">
|
||||||
{decimalString(value, value >= 100 ? 1 : 2)} {unit}
|
{decimalString(value, value >= 100 ? 1 : 2)} {unit}
|
||||||
@@ -491,11 +480,6 @@ function DiskCellWithMultiple(info: CellContext<SystemRecord, unknown>) {
|
|||||||
const { colorWarn = 65, colorCrit = 90 } = useStore($userSettings, { keys: ["colorWarn", "colorCrit"] })
|
const { colorWarn = 65, colorCrit = 90 } = useStore($userSettings, { keys: ["colorWarn", "colorCrit"] })
|
||||||
const { info: sysInfo, status, id } = info.row.original
|
const { info: sysInfo, status, id } = info.row.original
|
||||||
const extraFs = Object.entries(sysInfo.efs ?? {})
|
const extraFs = Object.entries(sysInfo.efs ?? {})
|
||||||
|
|
||||||
if (extraFs.length === 0) {
|
|
||||||
return TableCellWithMeter(info)
|
|
||||||
}
|
|
||||||
|
|
||||||
const rootDiskPct = sysInfo.dp
|
const rootDiskPct = sysInfo.dp
|
||||||
|
|
||||||
// sort extra disks by percentage descending
|
// sort extra disks by percentage descending
|
||||||
|
|||||||
@@ -134,8 +134,8 @@ export default function SystemsTable() {
|
|||||||
|
|
||||||
const CardHead = useMemo(() => {
|
const CardHead = useMemo(() => {
|
||||||
return (
|
return (
|
||||||
<CardHeader className="pb-4.5 px-2 sm:px-6 max-sm:pt-5 max-sm:pb-1">
|
<CardHeader className="p-0 mb-3 sm:mb-4">
|
||||||
<div className="grid md:flex gap-5 w-full items-end">
|
<div className="grid md:flex gap-x-5 gap-y-3 w-full items-end">
|
||||||
<div className="px-2 sm:px-1">
|
<div className="px-2 sm:px-1">
|
||||||
<CardTitle className="mb-2">
|
<CardTitle className="mb-2">
|
||||||
<Trans>All Systems</Trans>
|
<Trans>All Systems</Trans>
|
||||||
@@ -302,29 +302,27 @@ export default function SystemsTable() {
|
|||||||
])
|
])
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<Card>
|
<Card className="w-full px-3 py-5 sm:py-6 sm:px-6">
|
||||||
{CardHead}
|
{CardHead}
|
||||||
<div className="p-6 pt-0 max-sm:py-3 max-sm:px-2">
|
{viewMode === "table" ? (
|
||||||
{viewMode === "table" ? (
|
// table layout
|
||||||
// table layout
|
<div className="rounded-md">
|
||||||
<div className="rounded-md">
|
<AllSystemsTable table={table} rows={rows} colLength={visibleColumns.length} />
|
||||||
<AllSystemsTable table={table} rows={rows} colLength={visibleColumns.length} />
|
</div>
|
||||||
</div>
|
) : (
|
||||||
) : (
|
// grid layout
|
||||||
// grid layout
|
<div className="grid gap-4 grid-cols-1 sm:grid-cols-2 lg:grid-cols-3">
|
||||||
<div className="grid gap-4 grid-cols-1 sm:grid-cols-2 lg:grid-cols-3">
|
{rows?.length ? (
|
||||||
{rows?.length ? (
|
rows.map((row) => {
|
||||||
rows.map((row) => {
|
return <SystemCard key={row.original.id} row={row} table={table} colLength={visibleColumns.length} />
|
||||||
return <SystemCard key={row.original.id} row={row} table={table} colLength={visibleColumns.length} />
|
})
|
||||||
})
|
) : (
|
||||||
) : (
|
<div className="col-span-full text-center py-8">
|
||||||
<div className="col-span-full text-center py-8">
|
<Trans>No systems found.</Trans>
|
||||||
<Trans>No systems found.</Trans>
|
</div>
|
||||||
</div>
|
)}
|
||||||
)}
|
</div>
|
||||||
</div>
|
)}
|
||||||
)}
|
|
||||||
</div>
|
|
||||||
</Card>
|
</Card>
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,7 +18,11 @@ CardHeader.displayName = "CardHeader"
|
|||||||
|
|
||||||
const CardTitle = React.forwardRef<HTMLParagraphElement, React.HTMLAttributes<HTMLHeadingElement>>(
|
const CardTitle = React.forwardRef<HTMLParagraphElement, React.HTMLAttributes<HTMLHeadingElement>>(
|
||||||
({ className, ...props }, ref) => (
|
({ className, ...props }, ref) => (
|
||||||
<h3 ref={ref} className={cn("text-2xl font-semibold leading-none tracking-tight", className)} {...props} />
|
<h3
|
||||||
|
ref={ref}
|
||||||
|
className={cn("text-[1.4em] sm:text-2xl font-semibold leading-none tracking-tight", className)}
|
||||||
|
{...props}
|
||||||
|
/>
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
CardTitle.displayName = "CardTitle"
|
CardTitle.displayName = "CardTitle"
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ const DropdownMenuSubTrigger = React.forwardRef<
|
|||||||
<DropdownMenuPrimitive.SubTrigger
|
<DropdownMenuPrimitive.SubTrigger
|
||||||
ref={ref}
|
ref={ref}
|
||||||
className={cn(
|
className={cn(
|
||||||
"flex select-none items-center rounded-sm px-2.5 py-1.5 text-sm outline-hidden focus:bg-accent/70 data-[state=open]:bg-accent/70",
|
"flex select-none items-center rounded-sm px-2.5 py-1.5 text-[.95em] outline-hidden focus:bg-accent/70 data-[state=open]:bg-accent/70",
|
||||||
inset && "ps-8",
|
inset && "ps-8",
|
||||||
className
|
className
|
||||||
)}
|
)}
|
||||||
@@ -79,7 +79,7 @@ const DropdownMenuItem = React.forwardRef<
|
|||||||
<DropdownMenuPrimitive.Item
|
<DropdownMenuPrimitive.Item
|
||||||
ref={ref}
|
ref={ref}
|
||||||
className={cn(
|
className={cn(
|
||||||
"cursor-pointer relative flex select-none items-center rounded-sm px-2.5 py-1.5 text-sm outline-hidden focus:bg-accent/70 focus:text-accent-foreground data-disabled:pointer-events-none data-disabled:opacity-50",
|
"cursor-pointer relative flex select-none items-center rounded-sm px-2.5 py-1.5 text-[.95em] outline-hidden focus:bg-accent/70 focus:text-accent-foreground data-disabled:pointer-events-none data-disabled:opacity-50",
|
||||||
inset && "ps-8",
|
inset && "ps-8",
|
||||||
className
|
className
|
||||||
)}
|
)}
|
||||||
@@ -95,7 +95,7 @@ const DropdownMenuCheckboxItem = React.forwardRef<
|
|||||||
<DropdownMenuPrimitive.CheckboxItem
|
<DropdownMenuPrimitive.CheckboxItem
|
||||||
ref={ref}
|
ref={ref}
|
||||||
className={cn(
|
className={cn(
|
||||||
"relative flex cursor-pointer select-none items-center rounded-sm py-1.5 ps-8 pe-2.5 text-sm outline-hidden focus:bg-accent/70 focus:text-accent-foreground data-disabled:pointer-events-none data-disabled:opacity-50",
|
"relative flex cursor-pointer select-none items-center rounded-sm py-1.5 ps-8 pe-2.5 text-[.95em] outline-hidden focus:bg-accent/70 focus:text-accent-foreground data-disabled:pointer-events-none data-disabled:opacity-50",
|
||||||
className
|
className
|
||||||
)}
|
)}
|
||||||
checked={checked}
|
checked={checked}
|
||||||
@@ -118,7 +118,7 @@ const DropdownMenuRadioItem = React.forwardRef<
|
|||||||
<DropdownMenuPrimitive.RadioItem
|
<DropdownMenuPrimitive.RadioItem
|
||||||
ref={ref}
|
ref={ref}
|
||||||
className={cn(
|
className={cn(
|
||||||
"relative flex cursor-pointer select-none items-center rounded-sm py-1.5 ps-8 pe-2.5 text-sm outline-hidden focus:bg-accent/70 focus:text-accent-foreground data-disabled:pointer-events-none data-disabled:opacity-50",
|
"relative flex cursor-pointer select-none items-center rounded-sm py-1.5 ps-8 pe-2.5 text-[.95em] outline-hidden focus:bg-accent/70 focus:text-accent-foreground data-disabled:pointer-events-none data-disabled:opacity-50",
|
||||||
className
|
className
|
||||||
)}
|
)}
|
||||||
{...props}
|
{...props}
|
||||||
@@ -141,7 +141,7 @@ const DropdownMenuLabel = React.forwardRef<
|
|||||||
>(({ className, inset, ...props }, ref) => (
|
>(({ className, inset, ...props }, ref) => (
|
||||||
<DropdownMenuPrimitive.Label
|
<DropdownMenuPrimitive.Label
|
||||||
ref={ref}
|
ref={ref}
|
||||||
className={cn("px-2.5 py-1.5 text-sm font-semibold", inset && "ps-8", className)}
|
className={cn("px-2.5 py-1.5 text-[.95em] font-semibold", inset && "ps-8", className)}
|
||||||
{...props}
|
{...props}
|
||||||
/>
|
/>
|
||||||
))
|
))
|
||||||
|
|||||||
@@ -185,3 +185,14 @@ export function PlugChargingIcon(props: SVGProps<SVGSVGElement>) {
|
|||||||
</svg>
|
</svg>
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Lucide Icons (ISC) - used for ports
|
||||||
|
export function SquareArrowRightEnterIcon(props: SVGProps<SVGSVGElement>) {
|
||||||
|
return (
|
||||||
|
<svg viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2" strokeLinecap="round" {...props}>
|
||||||
|
<path d="m10 16 4-4-4-4" />
|
||||||
|
<path d="M3 12h11" />
|
||||||
|
<path d="M3 8V5a2 2 0 0 1 2-2h14a2 2 0 0 1 2 2v14a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2v-3" />
|
||||||
|
</svg>
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|||||||
@@ -27,7 +27,7 @@ const TabsTrigger = React.forwardRef<
|
|||||||
<TabsPrimitive.Trigger
|
<TabsPrimitive.Trigger
|
||||||
ref={ref}
|
ref={ref}
|
||||||
className={cn(
|
className={cn(
|
||||||
"inline-flex items-center justify-center whitespace-nowrap rounded-sm px-3 py-1.5 text-sm font-medium ring-offset-background transition-all focus-visible:outline-hidden focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 disabled:pointer-events-none disabled:opacity-50 data-[state=active]:bg-background data-[state=active]:text-foreground data-[state=active]:shadow-xs cursor-pointer",
|
"inline-flex items-center justify-center whitespace-nowrap rounded-sm px-3 py-1.5 text-sm font-medium ring-offset-background transition-all focus-visible:outline-hidden focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 disabled:pointer-events-none disabled:opacity-50 data-[state=active]:bg-background data-[state=active]:text-foreground data-[state=active]:shadow-xs cursor-pointer hover:text-foreground",
|
||||||
className
|
className
|
||||||
)}
|
)}
|
||||||
{...props}
|
{...props}
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user