mirror of
https://github.com/henrygd/beszel.git
synced 2026-03-23 14:06:18 +01:00
Compare commits
156 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4bfe9dd5ad | ||
|
|
e159a75b79 | ||
|
|
a69686125e | ||
|
|
3eb025ded2 | ||
|
|
1d0e646094 | ||
|
|
32c8e047e3 | ||
|
|
3650482b09 | ||
|
|
79adfd2c0d | ||
|
|
779dcc62aa | ||
|
|
abe39c1a0a | ||
|
|
bd41ad813c | ||
|
|
77fe63fb63 | ||
|
|
f61ba202d8 | ||
|
|
e1067fa1a3 | ||
|
|
0a3eb898ae | ||
|
|
6c33e9dc93 | ||
|
|
f8ed6ce705 | ||
|
|
f64478b75e | ||
|
|
854a3697d7 | ||
|
|
b7915b9d0e | ||
|
|
4443b606f6 | ||
|
|
6c20a98651 | ||
|
|
b722ccc5bc | ||
|
|
db0315394b | ||
|
|
a7ef1235f4 | ||
|
|
f64a361c60 | ||
|
|
aaa788bc2f | ||
|
|
3eede6bead | ||
|
|
02abfbcb54 | ||
|
|
01d20562f0 | ||
|
|
cbe6ee6499 | ||
|
|
9a61ea8356 | ||
|
|
1af7ff600f | ||
|
|
02d594cc82 | ||
|
|
7d0b5c1c67 | ||
|
|
d3dc8a7af0 | ||
|
|
d67fefe7c5 | ||
|
|
4d364c5e4d | ||
|
|
954400ea45 | ||
|
|
04b6067e64 | ||
|
|
d77ee5554f | ||
|
|
2e034bdead | ||
|
|
fc0947aa04 | ||
|
|
1d546a4091 | ||
|
|
f60b3bbbfb | ||
|
|
8e99b9f1ad | ||
|
|
fa5ed2bc11 | ||
|
|
21d961ab97 | ||
|
|
aaa93b84d2 | ||
|
|
6a562ce03b | ||
|
|
3dbc48727e | ||
|
|
85ac2e5e9a | ||
|
|
af6bd4e505 | ||
|
|
e54c4b3499 | ||
|
|
078c88f825 | ||
|
|
85169b6c5e | ||
|
|
d0ff8ee2c0 | ||
|
|
e898768997 | ||
|
|
0f5b504f23 | ||
|
|
365d291393 | ||
|
|
3dbab24c0f | ||
|
|
1f67fb7c8d | ||
|
|
219e09fc78 | ||
|
|
cd9c2bd9ab | ||
|
|
9f969d843c | ||
|
|
b22a6472fc | ||
|
|
d231ace28e | ||
|
|
473cb7f437 | ||
|
|
783ed9f456 | ||
|
|
9a9a89ee50 | ||
|
|
5122d0341d | ||
|
|
81731689da | ||
|
|
b3e9857448 | ||
|
|
2eda9eb0e3 | ||
|
|
82a5df5048 | ||
|
|
f11564a7ac | ||
|
|
9df4d29236 | ||
|
|
1452817423 | ||
|
|
c57e496f5e | ||
|
|
6287f7003c | ||
|
|
37037b1f4e | ||
|
|
7cf123a99e | ||
|
|
97394e775f | ||
|
|
d5c381188b | ||
|
|
b107d12a62 | ||
|
|
e646f2c1fc | ||
|
|
b18528d24a | ||
|
|
a6e64df399 | ||
|
|
66ba21dd41 | ||
|
|
1851e7a111 | ||
|
|
74b78e96b3 | ||
|
|
a9657f9c00 | ||
|
|
1dee63a0eb | ||
|
|
d608cf0955 | ||
|
|
b9139a1f9b | ||
|
|
7f372c46db | ||
|
|
40010ad9b9 | ||
|
|
5927f45a4a | ||
|
|
962613df7c | ||
|
|
92b1f236e3 | ||
|
|
a911670a2d | ||
|
|
b0cb0c2269 | ||
|
|
735d03577f | ||
|
|
a33f88d822 | ||
|
|
dfd1fc8fda | ||
|
|
1df08801a2 | ||
|
|
62f5f986bb | ||
|
|
a87b9af9d5 | ||
|
|
03900e54cc | ||
|
|
f4abbd1a5b | ||
|
|
77ed90cb4a | ||
|
|
2fe3b1adb1 | ||
|
|
f56093d0f0 | ||
|
|
77dba42f17 | ||
|
|
e233a0b0dc | ||
|
|
18e4c88875 | ||
|
|
904a6038cd | ||
|
|
ae55b86493 | ||
|
|
5360f762e4 | ||
|
|
0d464787f2 | ||
|
|
24f72ef596 | ||
|
|
2d8739052b | ||
|
|
1e32d13650 | ||
|
|
dbf3f94247 | ||
|
|
8a81c7bbac | ||
|
|
d24150c78b | ||
|
|
013da18789 | ||
|
|
5b663621e4 | ||
|
|
4056345216 | ||
|
|
d00c0488c3 | ||
|
|
d352ce00fa | ||
|
|
1623f5e751 | ||
|
|
612ad1238f | ||
|
|
1ad4409609 | ||
|
|
2a94e1d1ec | ||
|
|
75b372437c | ||
|
|
b661d00159 | ||
|
|
898dbf73c8 | ||
|
|
e099304948 | ||
|
|
b61b7a12dc | ||
|
|
37769050e5 | ||
|
|
d81e137291 | ||
|
|
ae820d348e | ||
|
|
ddb298ac7c | ||
|
|
cca7b36039 | ||
|
|
adda381d9d | ||
|
|
1630b1558f | ||
|
|
733c10ff31 | ||
|
|
ed3fd185d3 | ||
|
|
b1fd7e6695 | ||
|
|
7d6230de74 | ||
|
|
f9a39c6004 | ||
|
|
f21a6d15fe | ||
|
|
bf38716095 | ||
|
|
45816e7de6 | ||
|
|
2a6946906e |
125
.github/workflows/docker-images.yml
vendored
125
.github/workflows/docker-images.yml
vendored
@@ -10,67 +10,141 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
|
max-parallel: 5
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
|
# henrygd/beszel
|
||||||
- image: henrygd/beszel
|
- image: henrygd/beszel
|
||||||
context: ./
|
|
||||||
dockerfile: ./internal/dockerfile_hub
|
dockerfile: ./internal/dockerfile_hub
|
||||||
registry: docker.io
|
registry: docker.io
|
||||||
username_secret: DOCKERHUB_USERNAME
|
username_secret: DOCKERHUB_USERNAME
|
||||||
password_secret: DOCKERHUB_TOKEN
|
password_secret: DOCKERHUB_TOKEN
|
||||||
|
tags: |
|
||||||
- image: henrygd/beszel-agent
|
type=raw,value=edge
|
||||||
context: ./
|
type=semver,pattern={{version}}
|
||||||
dockerfile: ./internal/dockerfile_agent
|
type=semver,pattern={{major}}.{{minor}}
|
||||||
registry: docker.io
|
type=semver,pattern={{major}}
|
||||||
username_secret: DOCKERHUB_USERNAME
|
type=raw,value={{sha}},enable=${{ github.ref_type != 'tag' }}
|
||||||
password_secret: DOCKERHUB_TOKEN
|
|
||||||
|
|
||||||
- image: henrygd/beszel-agent-nvidia
|
|
||||||
context: ./
|
|
||||||
dockerfile: ./internal/dockerfile_agent_nvidia
|
|
||||||
platforms: linux/amd64
|
|
||||||
registry: docker.io
|
|
||||||
username_secret: DOCKERHUB_USERNAME
|
|
||||||
password_secret: DOCKERHUB_TOKEN
|
|
||||||
|
|
||||||
|
# henrygd/beszel-agent:alpine
|
||||||
|
- image: henrygd/beszel-agent
|
||||||
|
dockerfile: ./internal/dockerfile_agent_alpine
|
||||||
|
registry: docker.io
|
||||||
|
username_secret: DOCKERHUB_USERNAME
|
||||||
|
password_secret: DOCKERHUB_TOKEN
|
||||||
|
tags: |
|
||||||
|
type=raw,value=alpine
|
||||||
|
type=semver,pattern={{version}}-alpine
|
||||||
|
type=semver,pattern={{major}}.{{minor}}-alpine
|
||||||
|
type=semver,pattern={{major}}-alpine
|
||||||
|
|
||||||
|
# henrygd/beszel-agent-nvidia
|
||||||
|
- image: henrygd/beszel-agent-nvidia
|
||||||
|
dockerfile: ./internal/dockerfile_agent_nvidia
|
||||||
|
platforms: linux/amd64
|
||||||
|
registry: docker.io
|
||||||
|
username_secret: DOCKERHUB_USERNAME
|
||||||
|
password_secret: DOCKERHUB_TOKEN
|
||||||
|
tags: |
|
||||||
|
type=raw,value=edge
|
||||||
|
type=semver,pattern={{version}}
|
||||||
|
type=semver,pattern={{major}}.{{minor}}
|
||||||
|
type=semver,pattern={{major}}
|
||||||
|
type=raw,value={{sha}},enable=${{ github.ref_type != 'tag' }}
|
||||||
|
|
||||||
|
# henrygd/beszel-agent-intel
|
||||||
- image: henrygd/beszel-agent-intel
|
- image: henrygd/beszel-agent-intel
|
||||||
context: ./
|
|
||||||
dockerfile: ./internal/dockerfile_agent_intel
|
dockerfile: ./internal/dockerfile_agent_intel
|
||||||
platforms: linux/amd64
|
platforms: linux/amd64
|
||||||
registry: docker.io
|
registry: docker.io
|
||||||
username_secret: DOCKERHUB_USERNAME
|
username_secret: DOCKERHUB_USERNAME
|
||||||
password_secret: DOCKERHUB_TOKEN
|
password_secret: DOCKERHUB_TOKEN
|
||||||
|
tags: |
|
||||||
|
type=raw,value=edge
|
||||||
|
type=semver,pattern={{version}}
|
||||||
|
type=semver,pattern={{major}}.{{minor}}
|
||||||
|
type=semver,pattern={{major}}
|
||||||
|
type=raw,value={{sha}},enable=${{ github.ref_type != 'tag' }}
|
||||||
|
|
||||||
|
# ghcr.io/henrygd/beszel
|
||||||
- image: ghcr.io/${{ github.repository }}/beszel
|
- image: ghcr.io/${{ github.repository }}/beszel
|
||||||
context: ./
|
|
||||||
dockerfile: ./internal/dockerfile_hub
|
dockerfile: ./internal/dockerfile_hub
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: ${{ github.actor }}
|
username: ${{ github.actor }}
|
||||||
password_secret: GITHUB_TOKEN
|
password_secret: GITHUB_TOKEN
|
||||||
|
tags: |
|
||||||
|
type=raw,value=edge
|
||||||
|
type=semver,pattern={{version}}
|
||||||
|
type=semver,pattern={{major}}.{{minor}}
|
||||||
|
type=semver,pattern={{major}}
|
||||||
|
type=raw,value={{sha}},enable=${{ github.ref_type != 'tag' }}
|
||||||
|
|
||||||
|
# ghcr.io/henrygd/beszel-agent
|
||||||
- image: ghcr.io/${{ github.repository }}/beszel-agent
|
- image: ghcr.io/${{ github.repository }}/beszel-agent
|
||||||
context: ./
|
|
||||||
dockerfile: ./internal/dockerfile_agent
|
dockerfile: ./internal/dockerfile_agent
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: ${{ github.actor }}
|
username: ${{ github.actor }}
|
||||||
password_secret: GITHUB_TOKEN
|
password_secret: GITHUB_TOKEN
|
||||||
|
tags: |
|
||||||
|
type=raw,value=edge
|
||||||
|
type=raw,value=latest
|
||||||
|
type=semver,pattern={{version}}
|
||||||
|
type=semver,pattern={{major}}.{{minor}}
|
||||||
|
type=semver,pattern={{major}}
|
||||||
|
type=raw,value={{sha}},enable=${{ github.ref_type != 'tag' }}
|
||||||
|
|
||||||
|
# ghcr.io/henrygd/beszel-agent-nvidia
|
||||||
- image: ghcr.io/${{ github.repository }}/beszel-agent-nvidia
|
- image: ghcr.io/${{ github.repository }}/beszel-agent-nvidia
|
||||||
context: ./
|
|
||||||
dockerfile: ./internal/dockerfile_agent_nvidia
|
dockerfile: ./internal/dockerfile_agent_nvidia
|
||||||
platforms: linux/amd64
|
platforms: linux/amd64
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: ${{ github.actor }}
|
username: ${{ github.actor }}
|
||||||
password_secret: GITHUB_TOKEN
|
password_secret: GITHUB_TOKEN
|
||||||
|
tags: |
|
||||||
|
type=raw,value=edge
|
||||||
|
type=semver,pattern={{version}}
|
||||||
|
type=semver,pattern={{major}}.{{minor}}
|
||||||
|
type=semver,pattern={{major}}
|
||||||
|
type=raw,value={{sha}},enable=${{ github.ref_type != 'tag' }}
|
||||||
|
|
||||||
|
# ghcr.io/henrygd/beszel-agent-intel
|
||||||
- image: ghcr.io/${{ github.repository }}/beszel-agent-intel
|
- image: ghcr.io/${{ github.repository }}/beszel-agent-intel
|
||||||
context: ./
|
|
||||||
dockerfile: ./internal/dockerfile_agent_intel
|
dockerfile: ./internal/dockerfile_agent_intel
|
||||||
platforms: linux/amd64
|
platforms: linux/amd64
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: ${{ github.actor }}
|
username: ${{ github.actor }}
|
||||||
password_secret: GITHUB_TOKEN
|
password_secret: GITHUB_TOKEN
|
||||||
|
tags: |
|
||||||
|
type=raw,value=edge
|
||||||
|
type=semver,pattern={{version}}
|
||||||
|
type=semver,pattern={{major}}.{{minor}}
|
||||||
|
type=semver,pattern={{major}}
|
||||||
|
type=raw,value={{sha}},enable=${{ github.ref_type != 'tag' }}
|
||||||
|
|
||||||
|
# ghcr.io/henrygd/beszel-agent:alpine
|
||||||
|
- image: ghcr.io/${{ github.repository }}/beszel-agent
|
||||||
|
dockerfile: ./internal/dockerfile_agent_alpine
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.actor }}
|
||||||
|
password_secret: GITHUB_TOKEN
|
||||||
|
tags: |
|
||||||
|
type=raw,value=alpine
|
||||||
|
type=semver,pattern={{version}}-alpine
|
||||||
|
type=semver,pattern={{major}}.{{minor}}-alpine
|
||||||
|
type=semver,pattern={{major}}-alpine
|
||||||
|
|
||||||
|
# henrygd/beszel-agent (keep at bottom so it gets built after :alpine and gets the latest tag)
|
||||||
|
- image: henrygd/beszel-agent
|
||||||
|
dockerfile: ./internal/dockerfile_agent
|
||||||
|
registry: docker.io
|
||||||
|
username_secret: DOCKERHUB_USERNAME
|
||||||
|
password_secret: DOCKERHUB_TOKEN
|
||||||
|
tags: |
|
||||||
|
type=raw,value=edge
|
||||||
|
type=semver,pattern={{version}}
|
||||||
|
type=semver,pattern={{major}}.{{minor}}
|
||||||
|
type=semver,pattern={{major}}
|
||||||
|
type=raw,value={{sha}},enable=${{ github.ref_type != 'tag' }}
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
@@ -100,12 +174,7 @@ jobs:
|
|||||||
uses: docker/metadata-action@v5
|
uses: docker/metadata-action@v5
|
||||||
with:
|
with:
|
||||||
images: ${{ matrix.image }}
|
images: ${{ matrix.image }}
|
||||||
tags: |
|
tags: ${{ matrix.tags }}
|
||||||
type=raw,value=edge
|
|
||||||
type=semver,pattern={{version}}
|
|
||||||
type=semver,pattern={{major}}.{{minor}}
|
|
||||||
type=semver,pattern={{major}}
|
|
||||||
type=raw,value={{sha}},enable=${{ github.ref_type != 'tag' }}
|
|
||||||
|
|
||||||
# https://github.com/docker/login-action
|
# https://github.com/docker/login-action
|
||||||
- name: Login to Docker Hub
|
- name: Login to Docker Hub
|
||||||
@@ -123,7 +192,7 @@ jobs:
|
|||||||
- name: Build and push Docker image
|
- name: Build and push Docker image
|
||||||
uses: docker/build-push-action@v5
|
uses: docker/build-push-action@v5
|
||||||
with:
|
with:
|
||||||
context: "${{ matrix.context }}"
|
context: ./
|
||||||
file: ${{ matrix.dockerfile }}
|
file: ${{ matrix.dockerfile }}
|
||||||
platforms: ${{ matrix.platforms || 'linux/amd64,linux/arm64,linux/arm/v7' }}
|
platforms: ${{ matrix.platforms || 'linux/amd64,linux/arm64,linux/arm/v7' }}
|
||||||
push: ${{ github.ref_type == 'tag' && secrets[matrix.password_secret] != '' }}
|
push: ${{ github.ref_type == 'tag' && secrets[matrix.password_secret] != '' }}
|
||||||
|
|||||||
17
.github/workflows/inactivity-actions.yml
vendored
17
.github/workflows/inactivity-actions.yml
vendored
@@ -10,12 +10,25 @@ permissions:
|
|||||||
pull-requests: write
|
pull-requests: write
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
lock-inactive:
|
||||||
|
name: Lock Inactive Issues
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
|
steps:
|
||||||
|
- uses: klaasnicolaas/action-inactivity-lock@v1.1.3
|
||||||
|
id: lock
|
||||||
|
with:
|
||||||
|
days-inactive-issues: 14
|
||||||
|
lock-reason-issues: ""
|
||||||
|
# Action can not skip PRs, set it to 100 years to cover it.
|
||||||
|
days-inactive-prs: 36524
|
||||||
|
lock-reason-prs: ""
|
||||||
|
|
||||||
close-stale:
|
close-stale:
|
||||||
name: Close Stale Issues
|
name: Close Stale Issues
|
||||||
runs-on: ubuntu-24.04
|
runs-on: ubuntu-24.04
|
||||||
steps:
|
steps:
|
||||||
- name: Close Stale Issues
|
- name: Close Stale Issues
|
||||||
uses: actions/stale@v9
|
uses: actions/stale@v10
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
@@ -32,6 +45,8 @@ jobs:
|
|||||||
# Timing
|
# Timing
|
||||||
days-before-issue-stale: 14
|
days-before-issue-stale: 14
|
||||||
days-before-issue-close: 7
|
days-before-issue-close: 7
|
||||||
|
# Action can not skip PRs, set it to 100 years to cover it.
|
||||||
|
days-before-pr-stale: 36524
|
||||||
|
|
||||||
# Labels
|
# Labels
|
||||||
stale-issue-label: 'stale'
|
stale-issue-label: 'stale'
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ project_name: beszel
|
|||||||
before:
|
before:
|
||||||
hooks:
|
hooks:
|
||||||
- go mod tidy
|
- go mod tidy
|
||||||
|
- go generate -run fetchsmartctl ./agent
|
||||||
|
|
||||||
builds:
|
builds:
|
||||||
- id: beszel
|
- id: beszel
|
||||||
|
|||||||
10
Makefile
10
Makefile
@@ -7,7 +7,7 @@ SKIP_WEB ?= false
|
|||||||
# Set executable extension based on target OS
|
# Set executable extension based on target OS
|
||||||
EXE_EXT := $(if $(filter windows,$(OS)),.exe,)
|
EXE_EXT := $(if $(filter windows,$(OS)),.exe,)
|
||||||
|
|
||||||
.PHONY: tidy build-agent build-hub build-hub-dev build clean lint dev-server dev-agent dev-hub dev generate-locales
|
.PHONY: tidy build-agent build-hub build-hub-dev build clean lint dev-server dev-agent dev-hub dev generate-locales fetch-smartctl-conditional
|
||||||
.DEFAULT_GOAL := build
|
.DEFAULT_GOAL := build
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
@@ -46,8 +46,14 @@ build-dotnet-conditional:
|
|||||||
fi; \
|
fi; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Download smartctl.exe at build time for Windows (skips if already present)
|
||||||
|
fetch-smartctl-conditional:
|
||||||
|
@if [ "$(OS)" = "windows" ]; then \
|
||||||
|
go generate -run fetchsmartctl ./agent; \
|
||||||
|
fi
|
||||||
|
|
||||||
# Update build-agent to include conditional .NET build
|
# Update build-agent to include conditional .NET build
|
||||||
build-agent: tidy build-dotnet-conditional
|
build-agent: tidy build-dotnet-conditional fetch-smartctl-conditional
|
||||||
GOOS=$(OS) GOARCH=$(ARCH) go build -o ./build/beszel-agent_$(OS)_$(ARCH)$(EXE_EXT) -ldflags "-w -s" ./internal/cmd/agent
|
GOOS=$(OS) GOARCH=$(ARCH) go build -o ./build/beszel-agent_$(OS)_$(ARCH)$(EXE_EXT) -ldflags "-w -s" ./internal/cmd/agent
|
||||||
|
|
||||||
build-hub: tidy $(if $(filter false,$(SKIP_WEB)),build-web-ui)
|
build-hub: tidy $(if $(filter false,$(SKIP_WEB)),build-web-ui)
|
||||||
|
|||||||
108
agent/agent.go
108
agent/agent.go
@@ -12,33 +12,38 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/gliderlabs/ssh"
|
"github.com/gliderlabs/ssh"
|
||||||
"github.com/henrygd/beszel"
|
"github.com/henrygd/beszel"
|
||||||
|
"github.com/henrygd/beszel/agent/deltatracker"
|
||||||
"github.com/henrygd/beszel/internal/entities/system"
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
"github.com/shirou/gopsutil/v4/host"
|
"github.com/shirou/gopsutil/v4/host"
|
||||||
gossh "golang.org/x/crypto/ssh"
|
gossh "golang.org/x/crypto/ssh"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Agent struct {
|
type Agent struct {
|
||||||
sync.Mutex // Used to lock agent while collecting data
|
sync.Mutex // Used to lock agent while collecting data
|
||||||
debug bool // true if LOG_LEVEL is set to debug
|
debug bool // true if LOG_LEVEL is set to debug
|
||||||
zfs bool // true if system has arcstats
|
zfs bool // true if system has arcstats
|
||||||
memCalc string // Memory calculation formula
|
memCalc string // Memory calculation formula
|
||||||
fsNames []string // List of filesystem device names being monitored
|
fsNames []string // List of filesystem device names being monitored
|
||||||
fsStats map[string]*system.FsStats // Keeps track of disk stats for each filesystem
|
fsStats map[string]*system.FsStats // Keeps track of disk stats for each filesystem
|
||||||
netInterfaces map[string]struct{} // Stores all valid network interfaces
|
diskPrev map[uint16]map[string]prevDisk // Previous disk I/O counters per cache interval
|
||||||
netIoStats system.NetIoStats // Keeps track of bandwidth usage
|
netInterfaces map[string]struct{} // Stores all valid network interfaces
|
||||||
dockerManager *dockerManager // Manages Docker API requests
|
netIoStats map[uint16]system.NetIoStats // Keeps track of bandwidth usage per cache interval
|
||||||
sensorConfig *SensorConfig // Sensors config
|
netInterfaceDeltaTrackers map[uint16]*deltatracker.DeltaTracker[string, uint64] // Per-cache-time NIC delta trackers
|
||||||
systemInfo system.Info // Host system info
|
dockerManager *dockerManager // Manages Docker API requests
|
||||||
gpuManager *GPUManager // Manages GPU data
|
sensorConfig *SensorConfig // Sensors config
|
||||||
cache *SessionCache // Cache for system stats based on primary session ID
|
systemInfo system.Info // Host system info
|
||||||
connectionManager *ConnectionManager // Channel to signal connection events
|
gpuManager *GPUManager // Manages GPU data
|
||||||
server *ssh.Server // SSH server
|
cache *systemDataCache // Cache for system stats based on cache time
|
||||||
dataDir string // Directory for persisting data
|
connectionManager *ConnectionManager // Channel to signal connection events
|
||||||
keys []gossh.PublicKey // SSH public keys
|
handlerRegistry *HandlerRegistry // Registry for routing incoming messages
|
||||||
|
server *ssh.Server // SSH server
|
||||||
|
dataDir string // Directory for persisting data
|
||||||
|
keys []gossh.PublicKey // SSH public keys
|
||||||
|
smartManager *SmartManager // Manages SMART data
|
||||||
|
systemdManager *systemdManager // Manages systemd services
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewAgent creates a new agent with the given data directory for persisting data.
|
// NewAgent creates a new agent with the given data directory for persisting data.
|
||||||
@@ -46,9 +51,15 @@ type Agent struct {
|
|||||||
func NewAgent(dataDir ...string) (agent *Agent, err error) {
|
func NewAgent(dataDir ...string) (agent *Agent, err error) {
|
||||||
agent = &Agent{
|
agent = &Agent{
|
||||||
fsStats: make(map[string]*system.FsStats),
|
fsStats: make(map[string]*system.FsStats),
|
||||||
cache: NewSessionCache(69 * time.Second),
|
cache: NewSystemDataCache(),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Initialize disk I/O previous counters storage
|
||||||
|
agent.diskPrev = make(map[uint16]map[string]prevDisk)
|
||||||
|
// Initialize per-cache-time network tracking structures
|
||||||
|
agent.netIoStats = make(map[uint16]system.NetIoStats)
|
||||||
|
agent.netInterfaceDeltaTrackers = make(map[uint16]*deltatracker.DeltaTracker[string, uint64])
|
||||||
|
|
||||||
agent.dataDir, err = getDataDir(dataDir...)
|
agent.dataDir, err = getDataDir(dataDir...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Warn("Data directory not found")
|
slog.Warn("Data directory not found")
|
||||||
@@ -79,6 +90,9 @@ func NewAgent(dataDir ...string) (agent *Agent, err error) {
|
|||||||
// initialize connection manager
|
// initialize connection manager
|
||||||
agent.connectionManager = newConnectionManager(agent)
|
agent.connectionManager = newConnectionManager(agent)
|
||||||
|
|
||||||
|
// initialize handler registry
|
||||||
|
agent.handlerRegistry = NewHandlerRegistry()
|
||||||
|
|
||||||
// initialize disk info
|
// initialize disk info
|
||||||
agent.initializeDiskInfo()
|
agent.initializeDiskInfo()
|
||||||
|
|
||||||
@@ -88,16 +102,25 @@ func NewAgent(dataDir ...string) (agent *Agent, err error) {
|
|||||||
// initialize docker manager
|
// initialize docker manager
|
||||||
agent.dockerManager = newDockerManager(agent)
|
agent.dockerManager = newDockerManager(agent)
|
||||||
|
|
||||||
|
agent.systemdManager, err = newSystemdManager()
|
||||||
|
if err != nil {
|
||||||
|
slog.Debug("Systemd", "err", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
agent.smartManager, err = NewSmartManager()
|
||||||
|
if err != nil {
|
||||||
|
slog.Debug("SMART", "err", err)
|
||||||
|
}
|
||||||
|
|
||||||
// initialize GPU manager
|
// initialize GPU manager
|
||||||
if gm, err := NewGPUManager(); err != nil {
|
agent.gpuManager, err = NewGPUManager()
|
||||||
|
if err != nil {
|
||||||
slog.Debug("GPU", "err", err)
|
slog.Debug("GPU", "err", err)
|
||||||
} else {
|
|
||||||
agent.gpuManager = gm
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// if debugging, print stats
|
// if debugging, print stats
|
||||||
if agent.debug {
|
if agent.debug {
|
||||||
slog.Debug("Stats", "data", agent.gatherStats(""))
|
slog.Debug("Stats", "data", agent.gatherStats(0))
|
||||||
}
|
}
|
||||||
|
|
||||||
return agent, nil
|
return agent, nil
|
||||||
@@ -112,24 +135,24 @@ func GetEnv(key string) (value string, exists bool) {
|
|||||||
return os.LookupEnv(key)
|
return os.LookupEnv(key)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Agent) gatherStats(sessionID string) *system.CombinedData {
|
func (a *Agent) gatherStats(cacheTimeMs uint16) *system.CombinedData {
|
||||||
a.Lock()
|
a.Lock()
|
||||||
defer a.Unlock()
|
defer a.Unlock()
|
||||||
|
|
||||||
data, isCached := a.cache.Get(sessionID)
|
data, isCached := a.cache.Get(cacheTimeMs)
|
||||||
if isCached {
|
if isCached {
|
||||||
slog.Debug("Cached data", "session", sessionID)
|
slog.Debug("Cached data", "cacheTimeMs", cacheTimeMs)
|
||||||
return data
|
return data
|
||||||
}
|
}
|
||||||
|
|
||||||
*data = system.CombinedData{
|
*data = system.CombinedData{
|
||||||
Stats: a.getSystemStats(),
|
Stats: a.getSystemStats(cacheTimeMs),
|
||||||
Info: a.systemInfo,
|
Info: a.systemInfo,
|
||||||
}
|
}
|
||||||
slog.Debug("System data", "data", data)
|
// slog.Info("System data", "data", data, "cacheTimeMs", cacheTimeMs)
|
||||||
|
|
||||||
if a.dockerManager != nil {
|
if a.dockerManager != nil {
|
||||||
if containerStats, err := a.dockerManager.getDockerStats(); err == nil {
|
if containerStats, err := a.dockerManager.getDockerStats(cacheTimeMs); err == nil {
|
||||||
data.Containers = containerStats
|
data.Containers = containerStats
|
||||||
slog.Debug("Containers", "data", data.Containers)
|
slog.Debug("Containers", "data", data.Containers)
|
||||||
} else {
|
} else {
|
||||||
@@ -137,15 +160,38 @@ func (a *Agent) gatherStats(sessionID string) *system.CombinedData {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// skip updating systemd services if cache time is not the default 60sec interval
|
||||||
|
if a.systemdManager != nil && cacheTimeMs == 60_000 {
|
||||||
|
totalCount := uint16(a.systemdManager.getServiceStatsCount())
|
||||||
|
if totalCount > 0 {
|
||||||
|
numFailed := a.systemdManager.getFailedServiceCount()
|
||||||
|
data.Info.Services = []uint16{totalCount, numFailed}
|
||||||
|
}
|
||||||
|
if a.systemdManager.hasFreshStats {
|
||||||
|
data.SystemdServices = a.systemdManager.getServiceStats(nil, false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
data.Stats.ExtraFs = make(map[string]*system.FsStats)
|
data.Stats.ExtraFs = make(map[string]*system.FsStats)
|
||||||
|
data.Info.ExtraFsPct = make(map[string]float64)
|
||||||
for name, stats := range a.fsStats {
|
for name, stats := range a.fsStats {
|
||||||
if !stats.Root && stats.DiskTotal > 0 {
|
if !stats.Root && stats.DiskTotal > 0 {
|
||||||
data.Stats.ExtraFs[name] = stats
|
// Use custom name if available, otherwise use device name
|
||||||
|
key := name
|
||||||
|
if stats.Name != "" {
|
||||||
|
key = stats.Name
|
||||||
|
}
|
||||||
|
data.Stats.ExtraFs[key] = stats
|
||||||
|
// Add percentages to Info struct for dashboard
|
||||||
|
if stats.DiskTotal > 0 {
|
||||||
|
pct := twoDecimals((stats.DiskUsed / stats.DiskTotal) * 100)
|
||||||
|
data.Info.ExtraFsPct[key] = pct
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
slog.Debug("Extra FS", "data", data.Stats.ExtraFs)
|
slog.Debug("Extra FS", "data", data.Stats.ExtraFs)
|
||||||
|
|
||||||
a.cache.Set(sessionID, data)
|
a.cache.Set(data, cacheTimeMs)
|
||||||
return data
|
return data
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,37 +1,55 @@
|
|||||||
package agent
|
package agent
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/henrygd/beszel/internal/entities/system"
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Not thread safe since we only access from gatherStats which is already locked
|
type systemDataCache struct {
|
||||||
type SessionCache struct {
|
sync.RWMutex
|
||||||
data *system.CombinedData
|
cache map[uint16]*cacheNode
|
||||||
lastUpdate time.Time
|
|
||||||
primarySession string
|
|
||||||
leaseTime time.Duration
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewSessionCache(leaseTime time.Duration) *SessionCache {
|
type cacheNode struct {
|
||||||
return &SessionCache{
|
data *system.CombinedData
|
||||||
leaseTime: leaseTime,
|
lastUpdate time.Time
|
||||||
data: &system.CombinedData{},
|
}
|
||||||
|
|
||||||
|
// NewSystemDataCache creates a cache keyed by the polling interval in milliseconds.
|
||||||
|
func NewSystemDataCache() *systemDataCache {
|
||||||
|
return &systemDataCache{
|
||||||
|
cache: make(map[uint16]*cacheNode),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *SessionCache) Get(sessionID string) (stats *system.CombinedData, isCached bool) {
|
// Get returns cached combined data when the entry is still considered fresh.
|
||||||
if sessionID != c.primarySession && time.Since(c.lastUpdate) < c.leaseTime {
|
func (c *systemDataCache) Get(cacheTimeMs uint16) (stats *system.CombinedData, isCached bool) {
|
||||||
return c.data, true
|
c.RLock()
|
||||||
|
defer c.RUnlock()
|
||||||
|
|
||||||
|
node, ok := c.cache[cacheTimeMs]
|
||||||
|
if !ok {
|
||||||
|
return &system.CombinedData{}, false
|
||||||
}
|
}
|
||||||
return c.data, false
|
// allowedSkew := time.Second
|
||||||
|
// isFresh := time.Since(node.lastUpdate) < time.Duration(cacheTimeMs)*time.Millisecond-allowedSkew
|
||||||
|
// allow a 50% skew of the cache time
|
||||||
|
isFresh := time.Since(node.lastUpdate) < time.Duration(cacheTimeMs/2)*time.Millisecond
|
||||||
|
return node.data, isFresh
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *SessionCache) Set(sessionID string, data *system.CombinedData) {
|
// Set stores the latest combined data snapshot for the given interval.
|
||||||
if data != nil {
|
func (c *systemDataCache) Set(data *system.CombinedData, cacheTimeMs uint16) {
|
||||||
*c.data = *data
|
c.Lock()
|
||||||
|
defer c.Unlock()
|
||||||
|
|
||||||
|
node, ok := c.cache[cacheTimeMs]
|
||||||
|
if !ok {
|
||||||
|
node = &cacheNode{}
|
||||||
|
c.cache[cacheTimeMs] = node
|
||||||
}
|
}
|
||||||
c.primarySession = sessionID
|
node.data = data
|
||||||
c.lastUpdate = time.Now()
|
node.lastUpdate = time.Now()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,82 +8,239 @@ import (
|
|||||||
"testing/synctest"
|
"testing/synctest"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/entities/container"
|
||||||
"github.com/henrygd/beszel/internal/entities/system"
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestSessionCache_GetSet(t *testing.T) {
|
func createTestCacheData() *system.CombinedData {
|
||||||
synctest.Test(t, func(t *testing.T) {
|
return &system.CombinedData{
|
||||||
cache := NewSessionCache(69 * time.Second)
|
Stats: system.Stats{
|
||||||
|
Cpu: 50.5,
|
||||||
|
Mem: 8192,
|
||||||
|
DiskTotal: 100000,
|
||||||
|
},
|
||||||
|
Info: system.Info{
|
||||||
|
Hostname: "test-host",
|
||||||
|
},
|
||||||
|
Containers: []*container.Stats{
|
||||||
|
{
|
||||||
|
Name: "test-container",
|
||||||
|
Cpu: 25.0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
testData := &system.CombinedData{
|
func TestNewSystemDataCache(t *testing.T) {
|
||||||
Info: system.Info{
|
cache := NewSystemDataCache()
|
||||||
Hostname: "test-host",
|
require.NotNil(t, cache)
|
||||||
Cores: 4,
|
assert.NotNil(t, cache.cache)
|
||||||
},
|
assert.Empty(t, cache.cache)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCacheGetSet(t *testing.T) {
|
||||||
|
cache := NewSystemDataCache()
|
||||||
|
data := createTestCacheData()
|
||||||
|
|
||||||
|
// Test setting data
|
||||||
|
cache.Set(data, 1000) // 1 second cache
|
||||||
|
|
||||||
|
// Test getting fresh data
|
||||||
|
retrieved, isCached := cache.Get(1000)
|
||||||
|
assert.True(t, isCached)
|
||||||
|
assert.Equal(t, data, retrieved)
|
||||||
|
|
||||||
|
// Test getting non-existent cache key
|
||||||
|
_, isCached = cache.Get(2000)
|
||||||
|
assert.False(t, isCached)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCacheFreshness(t *testing.T) {
|
||||||
|
cache := NewSystemDataCache()
|
||||||
|
data := createTestCacheData()
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
cacheTimeMs uint16
|
||||||
|
sleepMs time.Duration
|
||||||
|
expectFresh bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "fresh data - well within cache time",
|
||||||
|
cacheTimeMs: 1000, // 1 second
|
||||||
|
sleepMs: 100, // 100ms
|
||||||
|
expectFresh: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "fresh data - at 50% of cache time boundary",
|
||||||
|
cacheTimeMs: 1000, // 1 second, 50% = 500ms
|
||||||
|
sleepMs: 499, // just under 500ms
|
||||||
|
expectFresh: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "stale data - exactly at 50% cache time",
|
||||||
|
cacheTimeMs: 1000, // 1 second, 50% = 500ms
|
||||||
|
sleepMs: 500, // exactly 500ms
|
||||||
|
expectFresh: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "stale data - well beyond cache time",
|
||||||
|
cacheTimeMs: 1000, // 1 second
|
||||||
|
sleepMs: 800, // 800ms
|
||||||
|
expectFresh: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "short cache time",
|
||||||
|
cacheTimeMs: 200, // 200ms, 50% = 100ms
|
||||||
|
sleepMs: 150, // 150ms > 100ms
|
||||||
|
expectFresh: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
synctest.Test(t, func(t *testing.T) {
|
||||||
|
// Set data
|
||||||
|
cache.Set(data, tc.cacheTimeMs)
|
||||||
|
|
||||||
|
// Wait for the specified duration
|
||||||
|
if tc.sleepMs > 0 {
|
||||||
|
time.Sleep(tc.sleepMs * time.Millisecond)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check freshness
|
||||||
|
_, isCached := cache.Get(tc.cacheTimeMs)
|
||||||
|
assert.Equal(t, tc.expectFresh, isCached)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCacheMultipleIntervals(t *testing.T) {
|
||||||
|
synctest.Test(t, func(t *testing.T) {
|
||||||
|
cache := NewSystemDataCache()
|
||||||
|
data1 := createTestCacheData()
|
||||||
|
data2 := &system.CombinedData{
|
||||||
Stats: system.Stats{
|
Stats: system.Stats{
|
||||||
Cpu: 50.0,
|
Cpu: 75.0,
|
||||||
MemPct: 30.0,
|
Mem: 16384,
|
||||||
DiskPct: 40.0,
|
|
||||||
},
|
},
|
||||||
|
Info: system.Info{
|
||||||
|
Hostname: "test-host-2",
|
||||||
|
},
|
||||||
|
Containers: []*container.Stats{},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test initial state - should not be cached
|
// Set data for different intervals
|
||||||
data, isCached := cache.Get("session1")
|
cache.Set(data1, 500) // 500ms cache
|
||||||
assert.False(t, isCached, "Expected no cached data initially")
|
cache.Set(data2, 1000) // 1000ms cache
|
||||||
assert.NotNil(t, data, "Expected data to be initialized")
|
|
||||||
// Set data for session1
|
|
||||||
cache.Set("session1", testData)
|
|
||||||
|
|
||||||
time.Sleep(15 * time.Second)
|
// Both should be fresh immediately
|
||||||
|
retrieved1, isCached1 := cache.Get(500)
|
||||||
|
assert.True(t, isCached1)
|
||||||
|
assert.Equal(t, data1, retrieved1)
|
||||||
|
|
||||||
// Get data for a different session - should be cached
|
retrieved2, isCached2 := cache.Get(1000)
|
||||||
data, isCached = cache.Get("session2")
|
assert.True(t, isCached2)
|
||||||
assert.True(t, isCached, "Expected data to be cached for non-primary session")
|
assert.Equal(t, data2, retrieved2)
|
||||||
require.NotNil(t, data, "Expected cached data to be returned")
|
|
||||||
assert.Equal(t, "test-host", data.Info.Hostname, "Hostname should match test data")
|
|
||||||
assert.Equal(t, 4, data.Info.Cores, "Cores should match test data")
|
|
||||||
assert.Equal(t, 50.0, data.Stats.Cpu, "CPU should match test data")
|
|
||||||
assert.Equal(t, 30.0, data.Stats.MemPct, "Memory percentage should match test data")
|
|
||||||
assert.Equal(t, 40.0, data.Stats.DiskPct, "Disk percentage should match test data")
|
|
||||||
|
|
||||||
time.Sleep(10 * time.Second)
|
// Wait 300ms - 500ms cache should be stale (250ms threshold), 1000ms should still be fresh (500ms threshold)
|
||||||
|
time.Sleep(300 * time.Millisecond)
|
||||||
|
|
||||||
// Get data for the primary session - should not be cached
|
_, isCached1 = cache.Get(500)
|
||||||
data, isCached = cache.Get("session1")
|
assert.False(t, isCached1)
|
||||||
assert.False(t, isCached, "Expected data not to be cached for primary session")
|
|
||||||
require.NotNil(t, data, "Expected data to be returned even if not cached")
|
|
||||||
assert.Equal(t, "test-host", data.Info.Hostname, "Hostname should match test data")
|
|
||||||
// if not cached, agent will update the data
|
|
||||||
cache.Set("session1", testData)
|
|
||||||
|
|
||||||
time.Sleep(45 * time.Second)
|
_, isCached2 = cache.Get(1000)
|
||||||
|
assert.True(t, isCached2)
|
||||||
|
|
||||||
// Get data for a different session - should still be cached
|
// Wait another 300ms (total 600ms) - now 1000ms cache should also be stale
|
||||||
_, isCached = cache.Get("session2")
|
time.Sleep(300 * time.Millisecond)
|
||||||
assert.True(t, isCached, "Expected data to be cached for non-primary session")
|
_, isCached2 = cache.Get(1000)
|
||||||
|
assert.False(t, isCached2)
|
||||||
// Wait for the lease to expire
|
|
||||||
time.Sleep(30 * time.Second)
|
|
||||||
|
|
||||||
// Get data for session2 - should not be cached
|
|
||||||
_, isCached = cache.Get("session2")
|
|
||||||
assert.False(t, isCached, "Expected data not to be cached after lease expiration")
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSessionCache_NilData(t *testing.T) {
|
func TestCacheOverwrite(t *testing.T) {
|
||||||
// Create a new SessionCache
|
cache := NewSystemDataCache()
|
||||||
cache := NewSessionCache(30 * time.Second)
|
data1 := createTestCacheData()
|
||||||
|
data2 := &system.CombinedData{
|
||||||
|
Stats: system.Stats{
|
||||||
|
Cpu: 90.0,
|
||||||
|
Mem: 32768,
|
||||||
|
},
|
||||||
|
Info: system.Info{
|
||||||
|
Hostname: "updated-host",
|
||||||
|
},
|
||||||
|
Containers: []*container.Stats{},
|
||||||
|
}
|
||||||
|
|
||||||
// Test setting nil data (should not panic)
|
// Set initial data
|
||||||
assert.NotPanics(t, func() {
|
cache.Set(data1, 1000)
|
||||||
cache.Set("session1", nil)
|
retrieved, isCached := cache.Get(1000)
|
||||||
}, "Setting nil data should not panic")
|
assert.True(t, isCached)
|
||||||
|
assert.Equal(t, data1, retrieved)
|
||||||
|
|
||||||
// Get data - should not be nil even though we set nil
|
// Overwrite with new data
|
||||||
data, _ := cache.Get("session2")
|
cache.Set(data2, 1000)
|
||||||
assert.NotNil(t, data, "Expected data to not be nil after setting nil data")
|
retrieved, isCached = cache.Get(1000)
|
||||||
|
assert.True(t, isCached)
|
||||||
|
assert.Equal(t, data2, retrieved)
|
||||||
|
assert.NotEqual(t, data1, retrieved)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCacheMiss(t *testing.T) {
|
||||||
|
synctest.Test(t, func(t *testing.T) {
|
||||||
|
cache := NewSystemDataCache()
|
||||||
|
|
||||||
|
// Test getting from empty cache
|
||||||
|
_, isCached := cache.Get(1000)
|
||||||
|
assert.False(t, isCached)
|
||||||
|
|
||||||
|
// Set data for one interval
|
||||||
|
data := createTestCacheData()
|
||||||
|
cache.Set(data, 1000)
|
||||||
|
|
||||||
|
// Test getting different interval
|
||||||
|
_, isCached = cache.Get(2000)
|
||||||
|
assert.False(t, isCached)
|
||||||
|
|
||||||
|
// Test getting after data has expired
|
||||||
|
time.Sleep(600 * time.Millisecond) // 600ms > 500ms (50% of 1000ms)
|
||||||
|
_, isCached = cache.Get(1000)
|
||||||
|
assert.False(t, isCached)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCacheZeroInterval(t *testing.T) {
|
||||||
|
cache := NewSystemDataCache()
|
||||||
|
data := createTestCacheData()
|
||||||
|
|
||||||
|
// Set with zero interval - should allow immediate cache
|
||||||
|
cache.Set(data, 0)
|
||||||
|
|
||||||
|
// With 0 interval, 50% is 0, so it should never be considered fresh
|
||||||
|
// (time.Since(lastUpdate) >= 0, which is not < 0)
|
||||||
|
_, isCached := cache.Get(0)
|
||||||
|
assert.False(t, isCached)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCacheLargeInterval(t *testing.T) {
|
||||||
|
synctest.Test(t, func(t *testing.T) {
|
||||||
|
cache := NewSystemDataCache()
|
||||||
|
data := createTestCacheData()
|
||||||
|
|
||||||
|
// Test with maximum uint16 value
|
||||||
|
cache.Set(data, 65535) // ~65 seconds
|
||||||
|
|
||||||
|
// Should be fresh immediately
|
||||||
|
_, isCached := cache.Get(65535)
|
||||||
|
assert.True(t, isCached)
|
||||||
|
|
||||||
|
// Should still be fresh after a short time
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
_, isCached = cache.Get(65535)
|
||||||
|
assert.True(t, isCached)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,12 +6,15 @@ package battery
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
|
"math"
|
||||||
|
|
||||||
"github.com/distatus/battery"
|
"github.com/distatus/battery"
|
||||||
)
|
)
|
||||||
|
|
||||||
var systemHasBattery = false
|
var (
|
||||||
var haveCheckedBattery = false
|
systemHasBattery = false
|
||||||
|
haveCheckedBattery = false
|
||||||
|
)
|
||||||
|
|
||||||
// HasReadableBattery checks if the system has a battery and returns true if it does.
|
// HasReadableBattery checks if the system has a battery and returns true if it does.
|
||||||
func HasReadableBattery() bool {
|
func HasReadableBattery() bool {
|
||||||
@@ -19,8 +22,13 @@ func HasReadableBattery() bool {
|
|||||||
return systemHasBattery
|
return systemHasBattery
|
||||||
}
|
}
|
||||||
haveCheckedBattery = true
|
haveCheckedBattery = true
|
||||||
bat, err := battery.Get(0)
|
batteries, err := battery.GetAll()
|
||||||
systemHasBattery = err == nil && bat != nil && bat.Design != 0 && bat.Full != 0
|
for _, bat := range batteries {
|
||||||
|
if bat != nil && (bat.Full > 0 || bat.Design > 0) {
|
||||||
|
systemHasBattery = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
if !systemHasBattery {
|
if !systemHasBattery {
|
||||||
slog.Debug("No battery found", "err", err)
|
slog.Debug("No battery found", "err", err)
|
||||||
}
|
}
|
||||||
@@ -28,25 +36,49 @@ func HasReadableBattery() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetBatteryStats returns the current battery percent and charge state
|
// GetBatteryStats returns the current battery percent and charge state
|
||||||
|
// percent = (current charge of all batteries) / (sum of designed/full capacity of all batteries)
|
||||||
func GetBatteryStats() (batteryPercent uint8, batteryState uint8, err error) {
|
func GetBatteryStats() (batteryPercent uint8, batteryState uint8, err error) {
|
||||||
if !systemHasBattery {
|
if !HasReadableBattery() {
|
||||||
return batteryPercent, batteryState, errors.ErrUnsupported
|
return batteryPercent, batteryState, errors.ErrUnsupported
|
||||||
}
|
}
|
||||||
batteries, err := battery.GetAll()
|
batteries, err := battery.GetAll()
|
||||||
if err != nil || len(batteries) == 0 {
|
// we'll handle errors later by skipping batteries with errors, rather
|
||||||
return batteryPercent, batteryState, err
|
// than skipping everything because of the presence of some errors.
|
||||||
|
if len(batteries) == 0 {
|
||||||
|
return batteryPercent, batteryState, errors.New("no batteries")
|
||||||
}
|
}
|
||||||
|
|
||||||
totalCapacity := float64(0)
|
totalCapacity := float64(0)
|
||||||
totalCharge := float64(0)
|
totalCharge := float64(0)
|
||||||
for _, bat := range batteries {
|
errs, partialErrs := err.(battery.Errors)
|
||||||
if bat.Design != 0 {
|
|
||||||
totalCapacity += bat.Design
|
batteryState = math.MaxUint8
|
||||||
} else {
|
|
||||||
totalCapacity += bat.Full
|
for i, bat := range batteries {
|
||||||
|
if partialErrs && errs[i] != nil {
|
||||||
|
// if there were some errors, like missing data, skip it
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
if bat == nil || bat.Full == 0 {
|
||||||
|
// skip batteries with no capacity. Charge is unlikely to ever be zero, but
|
||||||
|
// we can't guarantee that, so don't skip based on charge.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
totalCapacity += bat.Full
|
||||||
totalCharge += bat.Current
|
totalCharge += bat.Current
|
||||||
|
if bat.State.Raw >= 0 {
|
||||||
|
batteryState = uint8(bat.State.Raw)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if totalCapacity == 0 || batteryState == math.MaxUint8 {
|
||||||
|
// for macs there's sometimes a ghost battery with 0 capacity
|
||||||
|
// https://github.com/distatus/battery/issues/34
|
||||||
|
// Instead of skipping over those batteries, we'll check for total 0 capacity
|
||||||
|
// and return an error. This also prevents a divide by zero.
|
||||||
|
return batteryPercent, batteryState, errors.New("no battery capacity")
|
||||||
|
}
|
||||||
|
|
||||||
batteryPercent = uint8(totalCharge / totalCapacity * 100)
|
batteryPercent = uint8(totalCharge / totalCapacity * 100)
|
||||||
batteryState = uint8(batteries[0].State.Raw)
|
|
||||||
return batteryPercent, batteryState, nil
|
return batteryPercent, batteryState, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,6 +15,9 @@ import (
|
|||||||
|
|
||||||
"github.com/henrygd/beszel"
|
"github.com/henrygd/beszel"
|
||||||
"github.com/henrygd/beszel/internal/common"
|
"github.com/henrygd/beszel/internal/common"
|
||||||
|
"github.com/henrygd/beszel/internal/entities/smart"
|
||||||
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
"github.com/henrygd/beszel/internal/entities/systemd"
|
||||||
|
|
||||||
"github.com/fxamacker/cbor/v2"
|
"github.com/fxamacker/cbor/v2"
|
||||||
"github.com/lxzan/gws"
|
"github.com/lxzan/gws"
|
||||||
@@ -142,7 +145,9 @@ func (client *WebSocketClient) OnOpen(conn *gws.Conn) {
|
|||||||
// OnClose handles WebSocket connection closure.
|
// OnClose handles WebSocket connection closure.
|
||||||
// It logs the closure reason and notifies the connection manager.
|
// It logs the closure reason and notifies the connection manager.
|
||||||
func (client *WebSocketClient) OnClose(conn *gws.Conn, err error) {
|
func (client *WebSocketClient) OnClose(conn *gws.Conn, err error) {
|
||||||
slog.Warn("Connection closed", "err", strings.TrimPrefix(err.Error(), "gws: "))
|
if err != nil {
|
||||||
|
slog.Warn("Connection closed", "err", strings.TrimPrefix(err.Error(), "gws: "))
|
||||||
|
}
|
||||||
client.agent.connectionManager.eventChan <- WebSocketDisconnect
|
client.agent.connectionManager.eventChan <- WebSocketDisconnect
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -156,11 +161,15 @@ func (client *WebSocketClient) OnMessage(conn *gws.Conn, message *gws.Message) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := cbor.NewDecoder(message.Data).Decode(client.hubRequest); err != nil {
|
var HubRequest common.HubRequest[cbor.RawMessage]
|
||||||
|
|
||||||
|
err := cbor.Unmarshal(message.Data.Bytes(), &HubRequest)
|
||||||
|
if err != nil {
|
||||||
slog.Error("Error parsing message", "err", err)
|
slog.Error("Error parsing message", "err", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := client.handleHubRequest(client.hubRequest); err != nil {
|
|
||||||
|
if err := client.handleHubRequest(&HubRequest, HubRequest.Id); err != nil {
|
||||||
slog.Error("Error handling message", "err", err)
|
slog.Error("Error handling message", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -173,7 +182,7 @@ func (client *WebSocketClient) OnPing(conn *gws.Conn, message []byte) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// handleAuthChallenge verifies the authenticity of the hub and returns the system's fingerprint.
|
// handleAuthChallenge verifies the authenticity of the hub and returns the system's fingerprint.
|
||||||
func (client *WebSocketClient) handleAuthChallenge(msg *common.HubRequest[cbor.RawMessage]) (err error) {
|
func (client *WebSocketClient) handleAuthChallenge(msg *common.HubRequest[cbor.RawMessage], requestID *uint32) (err error) {
|
||||||
var authRequest common.FingerprintRequest
|
var authRequest common.FingerprintRequest
|
||||||
if err := cbor.Unmarshal(msg.Data, &authRequest); err != nil {
|
if err := cbor.Unmarshal(msg.Data, &authRequest); err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -191,12 +200,13 @@ func (client *WebSocketClient) handleAuthChallenge(msg *common.HubRequest[cbor.R
|
|||||||
}
|
}
|
||||||
|
|
||||||
if authRequest.NeedSysInfo {
|
if authRequest.NeedSysInfo {
|
||||||
|
response.Name, _ = GetEnv("SYSTEM_NAME")
|
||||||
response.Hostname = client.agent.systemInfo.Hostname
|
response.Hostname = client.agent.systemInfo.Hostname
|
||||||
serverAddr := client.agent.connectionManager.serverOptions.Addr
|
serverAddr := client.agent.connectionManager.serverOptions.Addr
|
||||||
_, response.Port, _ = net.SplitHostPort(serverAddr)
|
_, response.Port, _ = net.SplitHostPort(serverAddr)
|
||||||
}
|
}
|
||||||
|
|
||||||
return client.sendMessage(response)
|
return client.sendResponse(response, requestID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// verifySignature verifies the signature of the token using the public keys.
|
// verifySignature verifies the signature of the token using the public keys.
|
||||||
@@ -221,25 +231,17 @@ func (client *WebSocketClient) Close() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// handleHubRequest routes the request to the appropriate handler.
|
// handleHubRequest routes the request to the appropriate handler using the handler registry.
|
||||||
// It ensures the hub is verified before processing most requests.
|
func (client *WebSocketClient) handleHubRequest(msg *common.HubRequest[cbor.RawMessage], requestID *uint32) error {
|
||||||
func (client *WebSocketClient) handleHubRequest(msg *common.HubRequest[cbor.RawMessage]) error {
|
ctx := &HandlerContext{
|
||||||
if !client.hubVerified && msg.Action != common.CheckFingerprint {
|
Client: client,
|
||||||
return errors.New("hub not verified")
|
Agent: client.agent,
|
||||||
|
Request: msg,
|
||||||
|
RequestID: requestID,
|
||||||
|
HubVerified: client.hubVerified,
|
||||||
|
SendResponse: client.sendResponse,
|
||||||
}
|
}
|
||||||
switch msg.Action {
|
return client.agent.handlerRegistry.Handle(ctx)
|
||||||
case common.GetData:
|
|
||||||
return client.sendSystemData()
|
|
||||||
case common.CheckFingerprint:
|
|
||||||
return client.handleAuthChallenge(msg)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// sendSystemData gathers and sends current system statistics to the hub.
|
|
||||||
func (client *WebSocketClient) sendSystemData() error {
|
|
||||||
sysStats := client.agent.gatherStats(client.token)
|
|
||||||
return client.sendMessage(sysStats)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// sendMessage encodes the given data to CBOR and sends it as a binary message over the WebSocket connection to the hub.
|
// sendMessage encodes the given data to CBOR and sends it as a binary message over the WebSocket connection to the hub.
|
||||||
@@ -248,7 +250,49 @@ func (client *WebSocketClient) sendMessage(data any) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return client.Conn.WriteMessage(gws.OpcodeBinary, bytes)
|
err = client.Conn.WriteMessage(gws.OpcodeBinary, bytes)
|
||||||
|
if err != nil {
|
||||||
|
// If writing fails (e.g., broken pipe due to network issues),
|
||||||
|
// close the connection to trigger reconnection logic (#1263)
|
||||||
|
client.Close()
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// sendResponse sends a response with optional request ID for the new protocol
|
||||||
|
func (client *WebSocketClient) sendResponse(data any, requestID *uint32) error {
|
||||||
|
if requestID != nil {
|
||||||
|
// New format with ID - use typed fields
|
||||||
|
response := common.AgentResponse{
|
||||||
|
Id: requestID,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the appropriate typed field based on data type
|
||||||
|
switch v := data.(type) {
|
||||||
|
case *system.CombinedData:
|
||||||
|
response.SystemData = v
|
||||||
|
case *common.FingerprintResponse:
|
||||||
|
response.Fingerprint = v
|
||||||
|
case string:
|
||||||
|
response.String = &v
|
||||||
|
case map[string]smart.SmartData:
|
||||||
|
response.SmartData = v
|
||||||
|
case systemd.ServiceDetails:
|
||||||
|
response.ServiceInfo = v
|
||||||
|
// case []byte:
|
||||||
|
// response.RawBytes = v
|
||||||
|
// case string:
|
||||||
|
// response.RawBytes = []byte(v)
|
||||||
|
default:
|
||||||
|
// For any other type, convert to error
|
||||||
|
response.Error = fmt.Sprintf("unsupported response type: %T", data)
|
||||||
|
}
|
||||||
|
|
||||||
|
return client.sendMessage(response)
|
||||||
|
} else {
|
||||||
|
// Legacy format - send data directly
|
||||||
|
return client.sendMessage(data)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// getUserAgent returns one of two User-Agent strings based on current time.
|
// getUserAgent returns one of two User-Agent strings based on current time.
|
||||||
|
|||||||
@@ -301,7 +301,7 @@ func TestWebSocketClient_HandleHubRequest(t *testing.T) {
|
|||||||
Data: cbor.RawMessage{},
|
Data: cbor.RawMessage{},
|
||||||
}
|
}
|
||||||
|
|
||||||
err := client.handleHubRequest(hubRequest)
|
err := client.handleHubRequest(hubRequest, nil)
|
||||||
|
|
||||||
if tc.expectError {
|
if tc.expectError {
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
|
|||||||
134
agent/cpu.go
Normal file
134
agent/cpu.go
Normal file
@@ -0,0 +1,134 @@
|
|||||||
|
package agent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"runtime"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
"github.com/shirou/gopsutil/v4/cpu"
|
||||||
|
)
|
||||||
|
|
||||||
|
var lastCpuTimes = make(map[uint16]cpu.TimesStat)
|
||||||
|
var lastPerCoreCpuTimes = make(map[uint16][]cpu.TimesStat)
|
||||||
|
|
||||||
|
// init initializes the CPU monitoring by storing the initial CPU times
|
||||||
|
// for the default 60-second cache interval.
|
||||||
|
func init() {
|
||||||
|
if times, err := cpu.Times(false); err == nil {
|
||||||
|
lastCpuTimes[60000] = times[0]
|
||||||
|
}
|
||||||
|
if perCoreTimes, err := cpu.Times(true); err == nil {
|
||||||
|
lastPerCoreCpuTimes[60000] = perCoreTimes
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CpuMetrics contains detailed CPU usage breakdown
|
||||||
|
type CpuMetrics struct {
|
||||||
|
Total float64
|
||||||
|
User float64
|
||||||
|
System float64
|
||||||
|
Iowait float64
|
||||||
|
Steal float64
|
||||||
|
Idle float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// getCpuMetrics calculates detailed CPU usage metrics using cached previous measurements.
|
||||||
|
// It returns percentages for total, user, system, iowait, and steal time.
|
||||||
|
func getCpuMetrics(cacheTimeMs uint16) (CpuMetrics, error) {
|
||||||
|
times, err := cpu.Times(false)
|
||||||
|
if err != nil || len(times) == 0 {
|
||||||
|
return CpuMetrics{}, err
|
||||||
|
}
|
||||||
|
// if cacheTimeMs is not in lastCpuTimes, use 60000 as fallback lastCpuTime
|
||||||
|
if _, ok := lastCpuTimes[cacheTimeMs]; !ok {
|
||||||
|
lastCpuTimes[cacheTimeMs] = lastCpuTimes[60000]
|
||||||
|
}
|
||||||
|
|
||||||
|
t1 := lastCpuTimes[cacheTimeMs]
|
||||||
|
t2 := times[0]
|
||||||
|
|
||||||
|
t1All, _ := getAllBusy(t1)
|
||||||
|
t2All, _ := getAllBusy(t2)
|
||||||
|
|
||||||
|
totalDelta := t2All - t1All
|
||||||
|
if totalDelta <= 0 {
|
||||||
|
return CpuMetrics{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
metrics := CpuMetrics{
|
||||||
|
Total: calculateBusy(t1, t2),
|
||||||
|
User: clampPercent((t2.User - t1.User) / totalDelta * 100),
|
||||||
|
System: clampPercent((t2.System - t1.System) / totalDelta * 100),
|
||||||
|
Iowait: clampPercent((t2.Iowait - t1.Iowait) / totalDelta * 100),
|
||||||
|
Steal: clampPercent((t2.Steal - t1.Steal) / totalDelta * 100),
|
||||||
|
Idle: clampPercent((t2.Idle - t1.Idle) / totalDelta * 100),
|
||||||
|
}
|
||||||
|
|
||||||
|
lastCpuTimes[cacheTimeMs] = times[0]
|
||||||
|
return metrics, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// clampPercent ensures the percentage is between 0 and 100
|
||||||
|
func clampPercent(value float64) float64 {
|
||||||
|
return math.Min(100, math.Max(0, value))
|
||||||
|
}
|
||||||
|
|
||||||
|
// getPerCoreCpuUsage calculates per-core CPU busy usage as integer percentages (0-100).
|
||||||
|
// It uses cached previous measurements for the provided cache interval.
|
||||||
|
func getPerCoreCpuUsage(cacheTimeMs uint16) (system.Uint8Slice, error) {
|
||||||
|
perCoreTimes, err := cpu.Times(true)
|
||||||
|
if err != nil || len(perCoreTimes) == 0 {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize cache if needed
|
||||||
|
if _, ok := lastPerCoreCpuTimes[cacheTimeMs]; !ok {
|
||||||
|
lastPerCoreCpuTimes[cacheTimeMs] = lastPerCoreCpuTimes[60000]
|
||||||
|
}
|
||||||
|
|
||||||
|
lastTimes := lastPerCoreCpuTimes[cacheTimeMs]
|
||||||
|
|
||||||
|
// Limit to the number of cores available in both samples
|
||||||
|
length := len(perCoreTimes)
|
||||||
|
if len(lastTimes) < length {
|
||||||
|
length = len(lastTimes)
|
||||||
|
}
|
||||||
|
|
||||||
|
usage := make([]uint8, length)
|
||||||
|
for i := 0; i < length; i++ {
|
||||||
|
t1 := lastTimes[i]
|
||||||
|
t2 := perCoreTimes[i]
|
||||||
|
usage[i] = uint8(math.Round(calculateBusy(t1, t2)))
|
||||||
|
}
|
||||||
|
|
||||||
|
lastPerCoreCpuTimes[cacheTimeMs] = perCoreTimes
|
||||||
|
return usage, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// calculateBusy calculates the CPU busy percentage between two time points.
|
||||||
|
// It computes the ratio of busy time to total time elapsed between t1 and t2,
|
||||||
|
// returning a percentage clamped between 0 and 100.
|
||||||
|
func calculateBusy(t1, t2 cpu.TimesStat) float64 {
|
||||||
|
t1All, t1Busy := getAllBusy(t1)
|
||||||
|
t2All, t2Busy := getAllBusy(t2)
|
||||||
|
|
||||||
|
if t2All <= t1All || t2Busy <= t1Busy {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return clampPercent((t2Busy - t1Busy) / (t2All - t1All) * 100)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getAllBusy calculates the total CPU time and busy CPU time from CPU times statistics.
|
||||||
|
// On Linux, it excludes guest and guest_nice time from the total to match kernel behavior.
|
||||||
|
// Returns total CPU time and busy CPU time (total minus idle and I/O wait time).
|
||||||
|
func getAllBusy(t cpu.TimesStat) (float64, float64) {
|
||||||
|
tot := t.Total()
|
||||||
|
if runtime.GOOS == "linux" {
|
||||||
|
tot -= t.Guest // Linux 2.6.24+
|
||||||
|
tot -= t.GuestNice // Linux 3.2.0+
|
||||||
|
}
|
||||||
|
|
||||||
|
busy := tot - t.Idle - t.Iowait
|
||||||
|
|
||||||
|
return tot, busy
|
||||||
|
}
|
||||||
@@ -37,6 +37,16 @@ func (t *DeltaTracker[K, V]) Set(id K, value V) {
|
|||||||
t.current[id] = value
|
t.current[id] = value
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Snapshot returns a copy of the current map.
|
||||||
|
// func (t *DeltaTracker[K, V]) Snapshot() map[K]V {
|
||||||
|
// t.RLock()
|
||||||
|
// defer t.RUnlock()
|
||||||
|
|
||||||
|
// copyMap := make(map[K]V, len(t.current))
|
||||||
|
// maps.Copy(copyMap, t.current)
|
||||||
|
// return copyMap
|
||||||
|
// }
|
||||||
|
|
||||||
// Deltas returns a map of all calculated deltas for the current interval.
|
// Deltas returns a map of all calculated deltas for the current interval.
|
||||||
func (t *DeltaTracker[K, V]) Deltas() map[K]V {
|
func (t *DeltaTracker[K, V]) Deltas() map[K]V {
|
||||||
t.RLock()
|
t.RLock()
|
||||||
@@ -53,6 +63,15 @@ func (t *DeltaTracker[K, V]) Deltas() map[K]V {
|
|||||||
return deltas
|
return deltas
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Previous returns the previously recorded value for the given key, if it exists.
|
||||||
|
func (t *DeltaTracker[K, V]) Previous(id K) (V, bool) {
|
||||||
|
t.RLock()
|
||||||
|
defer t.RUnlock()
|
||||||
|
|
||||||
|
value, ok := t.previous[id]
|
||||||
|
return value, ok
|
||||||
|
}
|
||||||
|
|
||||||
// Delta returns the delta for a single key.
|
// Delta returns the delta for a single key.
|
||||||
// Returns 0 if the key doesn't exist or has no previous value.
|
// Returns 0 if the key doesn't exist or has no previous value.
|
||||||
func (t *DeltaTracker[K, V]) Delta(id K) V {
|
func (t *DeltaTracker[K, V]) Delta(id K) V {
|
||||||
|
|||||||
177
agent/disk.go
177
agent/disk.go
@@ -13,11 +13,25 @@ import (
|
|||||||
"github.com/shirou/gopsutil/v4/disk"
|
"github.com/shirou/gopsutil/v4/disk"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// parseFilesystemEntry parses a filesystem entry in the format "device__customname"
|
||||||
|
// Returns the device/filesystem part and the custom name part
|
||||||
|
func parseFilesystemEntry(entry string) (device, customName string) {
|
||||||
|
entry = strings.TrimSpace(entry)
|
||||||
|
if parts := strings.SplitN(entry, "__", 2); len(parts) == 2 {
|
||||||
|
device = strings.TrimSpace(parts[0])
|
||||||
|
customName = strings.TrimSpace(parts[1])
|
||||||
|
} else {
|
||||||
|
device = entry
|
||||||
|
}
|
||||||
|
return device, customName
|
||||||
|
}
|
||||||
|
|
||||||
// Sets up the filesystems to monitor for disk usage and I/O.
|
// Sets up the filesystems to monitor for disk usage and I/O.
|
||||||
func (a *Agent) initializeDiskInfo() {
|
func (a *Agent) initializeDiskInfo() {
|
||||||
filesystem, _ := GetEnv("FILESYSTEM")
|
filesystem, _ := GetEnv("FILESYSTEM")
|
||||||
efPath := "/extra-filesystems"
|
efPath := "/extra-filesystems"
|
||||||
hasRoot := false
|
hasRoot := false
|
||||||
|
isWindows := runtime.GOOS == "windows"
|
||||||
|
|
||||||
partitions, err := disk.Partitions(false)
|
partitions, err := disk.Partitions(false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -25,6 +39,13 @@ func (a *Agent) initializeDiskInfo() {
|
|||||||
}
|
}
|
||||||
slog.Debug("Disk", "partitions", partitions)
|
slog.Debug("Disk", "partitions", partitions)
|
||||||
|
|
||||||
|
// trim trailing backslash for Windows devices (#1361)
|
||||||
|
if isWindows {
|
||||||
|
for i, p := range partitions {
|
||||||
|
partitions[i].Device = strings.TrimSuffix(p.Device, "\\")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ioContext := context.WithValue(a.sensorsContext,
|
// ioContext := context.WithValue(a.sensorsContext,
|
||||||
// common.EnvKey, common.EnvMap{common.HostProcEnvKey: "/tmp/testproc"},
|
// common.EnvKey, common.EnvMap{common.HostProcEnvKey: "/tmp/testproc"},
|
||||||
// )
|
// )
|
||||||
@@ -37,9 +58,9 @@ func (a *Agent) initializeDiskInfo() {
|
|||||||
slog.Debug("Disk I/O", "diskstats", diskIoCounters)
|
slog.Debug("Disk I/O", "diskstats", diskIoCounters)
|
||||||
|
|
||||||
// Helper function to add a filesystem to fsStats if it doesn't exist
|
// Helper function to add a filesystem to fsStats if it doesn't exist
|
||||||
addFsStat := func(device, mountpoint string, root bool) {
|
addFsStat := func(device, mountpoint string, root bool, customName ...string) {
|
||||||
var key string
|
var key string
|
||||||
if runtime.GOOS == "windows" {
|
if isWindows {
|
||||||
key = device
|
key = device
|
||||||
} else {
|
} else {
|
||||||
key = filepath.Base(device)
|
key = filepath.Base(device)
|
||||||
@@ -66,10 +87,17 @@ func (a *Agent) initializeDiskInfo() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
a.fsStats[key] = &system.FsStats{Root: root, Mountpoint: mountpoint}
|
fsStats := &system.FsStats{Root: root, Mountpoint: mountpoint}
|
||||||
|
if len(customName) > 0 && customName[0] != "" {
|
||||||
|
fsStats.Name = customName[0]
|
||||||
|
}
|
||||||
|
a.fsStats[key] = fsStats
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Get the appropriate root mount point for this system
|
||||||
|
rootMountPoint := a.getRootMountPoint()
|
||||||
|
|
||||||
// Use FILESYSTEM env var to find root filesystem
|
// Use FILESYSTEM env var to find root filesystem
|
||||||
if filesystem != "" {
|
if filesystem != "" {
|
||||||
for _, p := range partitions {
|
for _, p := range partitions {
|
||||||
@@ -86,11 +114,14 @@ func (a *Agent) initializeDiskInfo() {
|
|||||||
|
|
||||||
// Add EXTRA_FILESYSTEMS env var values to fsStats
|
// Add EXTRA_FILESYSTEMS env var values to fsStats
|
||||||
if extraFilesystems, exists := GetEnv("EXTRA_FILESYSTEMS"); exists {
|
if extraFilesystems, exists := GetEnv("EXTRA_FILESYSTEMS"); exists {
|
||||||
for _, fs := range strings.Split(extraFilesystems, ",") {
|
for _, fsEntry := range strings.Split(extraFilesystems, ",") {
|
||||||
|
// Parse custom name from format: device__customname
|
||||||
|
fs, customName := parseFilesystemEntry(fsEntry)
|
||||||
|
|
||||||
found := false
|
found := false
|
||||||
for _, p := range partitions {
|
for _, p := range partitions {
|
||||||
if strings.HasSuffix(p.Device, fs) || p.Mountpoint == fs {
|
if strings.HasSuffix(p.Device, fs) || p.Mountpoint == fs {
|
||||||
addFsStat(p.Device, p.Mountpoint, false)
|
addFsStat(p.Device, p.Mountpoint, false, customName)
|
||||||
found = true
|
found = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -98,7 +129,7 @@ func (a *Agent) initializeDiskInfo() {
|
|||||||
// if not in partitions, test if we can get disk usage
|
// if not in partitions, test if we can get disk usage
|
||||||
if !found {
|
if !found {
|
||||||
if _, err := disk.Usage(fs); err == nil {
|
if _, err := disk.Usage(fs); err == nil {
|
||||||
addFsStat(filepath.Base(fs), fs, false)
|
addFsStat(filepath.Base(fs), fs, false, customName)
|
||||||
} else {
|
} else {
|
||||||
slog.Error("Invalid filesystem", "name", fs, "err", err)
|
slog.Error("Invalid filesystem", "name", fs, "err", err)
|
||||||
}
|
}
|
||||||
@@ -110,7 +141,7 @@ func (a *Agent) initializeDiskInfo() {
|
|||||||
for _, p := range partitions {
|
for _, p := range partitions {
|
||||||
// fmt.Println(p.Device, p.Mountpoint)
|
// fmt.Println(p.Device, p.Mountpoint)
|
||||||
// Binary root fallback or docker root fallback
|
// Binary root fallback or docker root fallback
|
||||||
if !hasRoot && (p.Mountpoint == "/" || (p.Mountpoint == "/etc/hosts" && strings.HasPrefix(p.Device, "/dev"))) {
|
if !hasRoot && (p.Mountpoint == rootMountPoint || (p.Mountpoint == "/etc/hosts" && strings.HasPrefix(p.Device, "/dev"))) {
|
||||||
fs, match := findIoDevice(filepath.Base(p.Device), diskIoCounters, a.fsStats)
|
fs, match := findIoDevice(filepath.Base(p.Device), diskIoCounters, a.fsStats)
|
||||||
if match {
|
if match {
|
||||||
addFsStat(fs, p.Mountpoint, true)
|
addFsStat(fs, p.Mountpoint, true)
|
||||||
@@ -120,7 +151,8 @@ func (a *Agent) initializeDiskInfo() {
|
|||||||
|
|
||||||
// Check if device is in /extra-filesystems
|
// Check if device is in /extra-filesystems
|
||||||
if strings.HasPrefix(p.Mountpoint, efPath) {
|
if strings.HasPrefix(p.Mountpoint, efPath) {
|
||||||
addFsStat(p.Device, p.Mountpoint, false)
|
device, customName := parseFilesystemEntry(p.Mountpoint)
|
||||||
|
addFsStat(device, p.Mountpoint, false, customName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -135,7 +167,8 @@ func (a *Agent) initializeDiskInfo() {
|
|||||||
mountpoint := filepath.Join(efPath, folder.Name())
|
mountpoint := filepath.Join(efPath, folder.Name())
|
||||||
slog.Debug("/extra-filesystems", "mountpoint", mountpoint)
|
slog.Debug("/extra-filesystems", "mountpoint", mountpoint)
|
||||||
if !existingMountpoints[mountpoint] {
|
if !existingMountpoints[mountpoint] {
|
||||||
addFsStat(folder.Name(), mountpoint, false)
|
device, customName := parseFilesystemEntry(folder.Name())
|
||||||
|
addFsStat(device, mountpoint, false, customName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -144,8 +177,8 @@ func (a *Agent) initializeDiskInfo() {
|
|||||||
// If no root filesystem set, use fallback
|
// If no root filesystem set, use fallback
|
||||||
if !hasRoot {
|
if !hasRoot {
|
||||||
rootDevice, _ := findIoDevice(filepath.Base(filesystem), diskIoCounters, a.fsStats)
|
rootDevice, _ := findIoDevice(filepath.Base(filesystem), diskIoCounters, a.fsStats)
|
||||||
slog.Info("Root disk", "mountpoint", "/", "io", rootDevice)
|
slog.Info("Root disk", "mountpoint", rootMountPoint, "io", rootDevice)
|
||||||
a.fsStats[rootDevice] = &system.FsStats{Root: true, Mountpoint: "/"}
|
a.fsStats[rootDevice] = &system.FsStats{Root: true, Mountpoint: rootMountPoint}
|
||||||
}
|
}
|
||||||
|
|
||||||
a.initializeDiskIoStats(diskIoCounters)
|
a.initializeDiskIoStats(diskIoCounters)
|
||||||
@@ -189,3 +222,125 @@ func (a *Agent) initializeDiskIoStats(diskIoCounters map[string]disk.IOCountersS
|
|||||||
a.fsNames = append(a.fsNames, device)
|
a.fsNames = append(a.fsNames, device)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Updates disk usage statistics for all monitored filesystems
|
||||||
|
func (a *Agent) updateDiskUsage(systemStats *system.Stats) {
|
||||||
|
// disk usage
|
||||||
|
for _, stats := range a.fsStats {
|
||||||
|
if d, err := disk.Usage(stats.Mountpoint); err == nil {
|
||||||
|
stats.DiskTotal = bytesToGigabytes(d.Total)
|
||||||
|
stats.DiskUsed = bytesToGigabytes(d.Used)
|
||||||
|
if stats.Root {
|
||||||
|
systemStats.DiskTotal = bytesToGigabytes(d.Total)
|
||||||
|
systemStats.DiskUsed = bytesToGigabytes(d.Used)
|
||||||
|
systemStats.DiskPct = twoDecimals(d.UsedPercent)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// reset stats if error (likely unmounted)
|
||||||
|
slog.Error("Error getting disk stats", "name", stats.Mountpoint, "err", err)
|
||||||
|
stats.DiskTotal = 0
|
||||||
|
stats.DiskUsed = 0
|
||||||
|
stats.TotalRead = 0
|
||||||
|
stats.TotalWrite = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Updates disk I/O statistics for all monitored filesystems
|
||||||
|
func (a *Agent) updateDiskIo(cacheTimeMs uint16, systemStats *system.Stats) {
|
||||||
|
// disk i/o (cache-aware per interval)
|
||||||
|
if ioCounters, err := disk.IOCounters(a.fsNames...); err == nil {
|
||||||
|
// Ensure map for this interval exists
|
||||||
|
if _, ok := a.diskPrev[cacheTimeMs]; !ok {
|
||||||
|
a.diskPrev[cacheTimeMs] = make(map[string]prevDisk)
|
||||||
|
}
|
||||||
|
now := time.Now()
|
||||||
|
for name, d := range ioCounters {
|
||||||
|
stats := a.fsStats[d.Name]
|
||||||
|
if stats == nil {
|
||||||
|
// skip devices not tracked
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Previous snapshot for this interval and device
|
||||||
|
prev, hasPrev := a.diskPrev[cacheTimeMs][name]
|
||||||
|
if !hasPrev {
|
||||||
|
// Seed from agent-level fsStats if present, else seed from current
|
||||||
|
prev = prevDisk{readBytes: stats.TotalRead, writeBytes: stats.TotalWrite, at: stats.Time}
|
||||||
|
if prev.at.IsZero() {
|
||||||
|
prev = prevDisk{readBytes: d.ReadBytes, writeBytes: d.WriteBytes, at: now}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
msElapsed := uint64(now.Sub(prev.at).Milliseconds())
|
||||||
|
if msElapsed < 100 {
|
||||||
|
// Avoid division by zero or clock issues; update snapshot and continue
|
||||||
|
a.diskPrev[cacheTimeMs][name] = prevDisk{readBytes: d.ReadBytes, writeBytes: d.WriteBytes, at: now}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
diskIORead := (d.ReadBytes - prev.readBytes) * 1000 / msElapsed
|
||||||
|
diskIOWrite := (d.WriteBytes - prev.writeBytes) * 1000 / msElapsed
|
||||||
|
readMbPerSecond := bytesToMegabytes(float64(diskIORead))
|
||||||
|
writeMbPerSecond := bytesToMegabytes(float64(diskIOWrite))
|
||||||
|
|
||||||
|
// validate values
|
||||||
|
if readMbPerSecond > 50_000 || writeMbPerSecond > 50_000 {
|
||||||
|
slog.Warn("Invalid disk I/O. Resetting.", "name", d.Name, "read", readMbPerSecond, "write", writeMbPerSecond)
|
||||||
|
// Reset interval snapshot and seed from current
|
||||||
|
a.diskPrev[cacheTimeMs][name] = prevDisk{readBytes: d.ReadBytes, writeBytes: d.WriteBytes, at: now}
|
||||||
|
// also refresh agent baseline to avoid future negatives
|
||||||
|
a.initializeDiskIoStats(ioCounters)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update per-interval snapshot
|
||||||
|
a.diskPrev[cacheTimeMs][name] = prevDisk{readBytes: d.ReadBytes, writeBytes: d.WriteBytes, at: now}
|
||||||
|
|
||||||
|
// Update global fsStats baseline for cross-interval correctness
|
||||||
|
stats.Time = now
|
||||||
|
stats.TotalRead = d.ReadBytes
|
||||||
|
stats.TotalWrite = d.WriteBytes
|
||||||
|
stats.DiskReadPs = readMbPerSecond
|
||||||
|
stats.DiskWritePs = writeMbPerSecond
|
||||||
|
stats.DiskReadBytes = diskIORead
|
||||||
|
stats.DiskWriteBytes = diskIOWrite
|
||||||
|
|
||||||
|
if stats.Root {
|
||||||
|
systemStats.DiskReadPs = stats.DiskReadPs
|
||||||
|
systemStats.DiskWritePs = stats.DiskWritePs
|
||||||
|
systemStats.DiskIO[0] = diskIORead
|
||||||
|
systemStats.DiskIO[1] = diskIOWrite
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getRootMountPoint returns the appropriate root mount point for the system
|
||||||
|
// For immutable systems like Fedora Silverblue, it returns /sysroot instead of /
|
||||||
|
func (a *Agent) getRootMountPoint() string {
|
||||||
|
// 1. Check if /etc/os-release contains indicators of an immutable system
|
||||||
|
if osReleaseContent, err := os.ReadFile("/etc/os-release"); err == nil {
|
||||||
|
content := string(osReleaseContent)
|
||||||
|
if strings.Contains(content, "fedora") && strings.Contains(content, "silverblue") ||
|
||||||
|
strings.Contains(content, "coreos") ||
|
||||||
|
strings.Contains(content, "flatcar") ||
|
||||||
|
strings.Contains(content, "rhel-atomic") ||
|
||||||
|
strings.Contains(content, "centos-atomic") {
|
||||||
|
// Verify that /sysroot exists before returning it
|
||||||
|
if _, err := os.Stat("/sysroot"); err == nil {
|
||||||
|
return "/sysroot"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. Check if /run/ostree is present (ostree-based systems like Silverblue)
|
||||||
|
if _, err := os.Stat("/run/ostree"); err == nil {
|
||||||
|
// Verify that /sysroot exists before returning it
|
||||||
|
if _, err := os.Stat("/sysroot"); err == nil {
|
||||||
|
return "/sysroot"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return "/"
|
||||||
|
}
|
||||||
|
|||||||
235
agent/disk_test.go
Normal file
235
agent/disk_test.go
Normal file
@@ -0,0 +1,235 @@
|
|||||||
|
//go:build testing
|
||||||
|
// +build testing
|
||||||
|
|
||||||
|
package agent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
"github.com/shirou/gopsutil/v4/disk"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestParseFilesystemEntry(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
input string
|
||||||
|
expectedFs string
|
||||||
|
expectedName string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "simple device name",
|
||||||
|
input: "sda1",
|
||||||
|
expectedFs: "sda1",
|
||||||
|
expectedName: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "device with custom name",
|
||||||
|
input: "sda1__my-storage",
|
||||||
|
expectedFs: "sda1",
|
||||||
|
expectedName: "my-storage",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "full device path with custom name",
|
||||||
|
input: "/dev/sdb1__backup-drive",
|
||||||
|
expectedFs: "/dev/sdb1",
|
||||||
|
expectedName: "backup-drive",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "NVMe device with custom name",
|
||||||
|
input: "nvme0n1p2__fast-ssd",
|
||||||
|
expectedFs: "nvme0n1p2",
|
||||||
|
expectedName: "fast-ssd",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "whitespace trimmed",
|
||||||
|
input: " sda2__trimmed-name ",
|
||||||
|
expectedFs: "sda2",
|
||||||
|
expectedName: "trimmed-name",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "empty custom name",
|
||||||
|
input: "sda3__",
|
||||||
|
expectedFs: "sda3",
|
||||||
|
expectedName: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "empty device name",
|
||||||
|
input: "__just-custom",
|
||||||
|
expectedFs: "",
|
||||||
|
expectedName: "just-custom",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "multiple underscores in custom name",
|
||||||
|
input: "sda1__my_custom_drive",
|
||||||
|
expectedFs: "sda1",
|
||||||
|
expectedName: "my_custom_drive",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "custom name with spaces",
|
||||||
|
input: "sda1__My Storage Drive",
|
||||||
|
expectedFs: "sda1",
|
||||||
|
expectedName: "My Storage Drive",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
fsEntry := strings.TrimSpace(tt.input)
|
||||||
|
var fs, customName string
|
||||||
|
if parts := strings.SplitN(fsEntry, "__", 2); len(parts) == 2 {
|
||||||
|
fs = strings.TrimSpace(parts[0])
|
||||||
|
customName = strings.TrimSpace(parts[1])
|
||||||
|
} else {
|
||||||
|
fs = fsEntry
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, tt.expectedFs, fs)
|
||||||
|
assert.Equal(t, tt.expectedName, customName)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInitializeDiskInfoWithCustomNames(t *testing.T) {
|
||||||
|
// Set up environment variables
|
||||||
|
oldEnv := os.Getenv("EXTRA_FILESYSTEMS")
|
||||||
|
defer func() {
|
||||||
|
if oldEnv != "" {
|
||||||
|
os.Setenv("EXTRA_FILESYSTEMS", oldEnv)
|
||||||
|
} else {
|
||||||
|
os.Unsetenv("EXTRA_FILESYSTEMS")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Test with custom names
|
||||||
|
os.Setenv("EXTRA_FILESYSTEMS", "sda1__my-storage,/dev/sdb1__backup-drive,nvme0n1p2")
|
||||||
|
|
||||||
|
// Mock disk partitions (we'll just test the parsing logic)
|
||||||
|
// Since the actual disk operations are system-dependent, we'll focus on the parsing
|
||||||
|
testCases := []struct {
|
||||||
|
envValue string
|
||||||
|
expectedFs []string
|
||||||
|
expectedNames map[string]string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
envValue: "sda1__my-storage,sdb1__backup-drive",
|
||||||
|
expectedFs: []string{"sda1", "sdb1"},
|
||||||
|
expectedNames: map[string]string{
|
||||||
|
"sda1": "my-storage",
|
||||||
|
"sdb1": "backup-drive",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
envValue: "sda1,nvme0n1p2__fast-ssd",
|
||||||
|
expectedFs: []string{"sda1", "nvme0n1p2"},
|
||||||
|
expectedNames: map[string]string{
|
||||||
|
"nvme0n1p2": "fast-ssd",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run("env_"+tc.envValue, func(t *testing.T) {
|
||||||
|
os.Setenv("EXTRA_FILESYSTEMS", tc.envValue)
|
||||||
|
|
||||||
|
// Create mock partitions that would match our test cases
|
||||||
|
partitions := []disk.PartitionStat{}
|
||||||
|
for _, fs := range tc.expectedFs {
|
||||||
|
if strings.HasPrefix(fs, "/dev/") {
|
||||||
|
partitions = append(partitions, disk.PartitionStat{
|
||||||
|
Device: fs,
|
||||||
|
Mountpoint: fs,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
partitions = append(partitions, disk.PartitionStat{
|
||||||
|
Device: "/dev/" + fs,
|
||||||
|
Mountpoint: "/" + fs,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test the parsing logic by calling the relevant part
|
||||||
|
// We'll create a simplified version to test just the parsing
|
||||||
|
extraFilesystems := tc.envValue
|
||||||
|
for _, fsEntry := range strings.Split(extraFilesystems, ",") {
|
||||||
|
// Parse the entry
|
||||||
|
fsEntry = strings.TrimSpace(fsEntry)
|
||||||
|
var fs, customName string
|
||||||
|
if parts := strings.SplitN(fsEntry, "__", 2); len(parts) == 2 {
|
||||||
|
fs = strings.TrimSpace(parts[0])
|
||||||
|
customName = strings.TrimSpace(parts[1])
|
||||||
|
} else {
|
||||||
|
fs = fsEntry
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify the device is in our expected list
|
||||||
|
assert.Contains(t, tc.expectedFs, fs, "parsed device should be in expected list")
|
||||||
|
|
||||||
|
// Check if custom name should exist
|
||||||
|
if expectedName, exists := tc.expectedNames[fs]; exists {
|
||||||
|
assert.Equal(t, expectedName, customName, "custom name should match expected")
|
||||||
|
} else {
|
||||||
|
assert.Empty(t, customName, "custom name should be empty when not expected")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFsStatsWithCustomNames(t *testing.T) {
|
||||||
|
// Test that FsStats properly stores custom names
|
||||||
|
fsStats := &system.FsStats{
|
||||||
|
Mountpoint: "/mnt/storage",
|
||||||
|
Name: "my-custom-storage",
|
||||||
|
DiskTotal: 100.0,
|
||||||
|
DiskUsed: 50.0,
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, "my-custom-storage", fsStats.Name)
|
||||||
|
assert.Equal(t, "/mnt/storage", fsStats.Mountpoint)
|
||||||
|
assert.Equal(t, 100.0, fsStats.DiskTotal)
|
||||||
|
assert.Equal(t, 50.0, fsStats.DiskUsed)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestExtraFsKeyGeneration(t *testing.T) {
|
||||||
|
// Test the logic for generating ExtraFs keys with custom names
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
deviceName string
|
||||||
|
customName string
|
||||||
|
expectedKey string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "with custom name",
|
||||||
|
deviceName: "sda1",
|
||||||
|
customName: "my-storage",
|
||||||
|
expectedKey: "my-storage",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "without custom name",
|
||||||
|
deviceName: "sda1",
|
||||||
|
customName: "",
|
||||||
|
expectedKey: "sda1",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "empty custom name falls back to device",
|
||||||
|
deviceName: "nvme0n1p2",
|
||||||
|
customName: "",
|
||||||
|
expectedKey: "nvme0n1p2",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
// Simulate the key generation logic from agent.go
|
||||||
|
key := tc.deviceName
|
||||||
|
if tc.customName != "" {
|
||||||
|
key = tc.customName
|
||||||
|
}
|
||||||
|
assert.Equal(t, tc.expectedKey, key)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
537
agent/docker.go
537
agent/docker.go
@@ -3,28 +3,50 @@ package agent
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/binary"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/agent/deltatracker"
|
||||||
"github.com/henrygd/beszel/internal/entities/container"
|
"github.com/henrygd/beszel/internal/entities/container"
|
||||||
|
|
||||||
"github.com/blang/semver"
|
"github.com/blang/semver"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Docker API timeout in milliseconds
|
||||||
|
dockerTimeoutMs = 2100
|
||||||
|
// Maximum realistic network speed (5 GB/s) to detect bad deltas
|
||||||
|
maxNetworkSpeedBps uint64 = 5e9
|
||||||
|
// Maximum conceivable memory usage of a container (100TB) to detect bad memory stats
|
||||||
|
maxMemoryUsage uint64 = 100 * 1024 * 1024 * 1024 * 1024
|
||||||
|
// Number of log lines to request when fetching container logs
|
||||||
|
dockerLogsTail = 200
|
||||||
|
// Maximum size of a single log frame (1MB) to prevent memory exhaustion
|
||||||
|
// A single log line larger than 1MB is likely an error or misconfiguration
|
||||||
|
maxLogFrameSize = 1024 * 1024
|
||||||
|
// Maximum total log content size (5MB) to prevent memory exhaustion
|
||||||
|
// This provides a reasonable limit for network transfer and browser rendering
|
||||||
|
maxTotalLogSize = 5 * 1024 * 1024
|
||||||
|
)
|
||||||
|
|
||||||
type dockerManager struct {
|
type dockerManager struct {
|
||||||
client *http.Client // Client to query Docker API
|
client *http.Client // Client to query Docker API
|
||||||
wg sync.WaitGroup // WaitGroup to wait for all goroutines to finish
|
wg sync.WaitGroup // WaitGroup to wait for all goroutines to finish
|
||||||
sem chan struct{} // Semaphore to limit concurrent container requests
|
sem chan struct{} // Semaphore to limit concurrent container requests
|
||||||
containerStatsMutex sync.RWMutex // Mutex to prevent concurrent access to containerStatsMap
|
containerStatsMutex sync.RWMutex // Mutex to prevent concurrent access to containerStatsMap
|
||||||
apiContainerList []*container.ApiInfo // List of containers from Docker API (no pointer)
|
apiContainerList []*container.ApiInfo // List of containers from Docker API
|
||||||
containerStatsMap map[string]*container.Stats // Keeps track of container stats
|
containerStatsMap map[string]*container.Stats // Keeps track of container stats
|
||||||
validIds map[string]struct{} // Map of valid container ids, used to prune invalid containers from containerStatsMap
|
validIds map[string]struct{} // Map of valid container ids, used to prune invalid containers from containerStatsMap
|
||||||
goodDockerVersion bool // Whether docker version is at least 25.0.0 (one-shot works correctly)
|
goodDockerVersion bool // Whether docker version is at least 25.0.0 (one-shot works correctly)
|
||||||
@@ -32,6 +54,18 @@ type dockerManager struct {
|
|||||||
buf *bytes.Buffer // Buffer to store and read response bodies
|
buf *bytes.Buffer // Buffer to store and read response bodies
|
||||||
decoder *json.Decoder // Reusable JSON decoder that reads from buf
|
decoder *json.Decoder // Reusable JSON decoder that reads from buf
|
||||||
apiStats *container.ApiStats // Reusable API stats object
|
apiStats *container.ApiStats // Reusable API stats object
|
||||||
|
excludeContainers []string // Patterns to exclude containers by name
|
||||||
|
|
||||||
|
// Cache-time-aware tracking for CPU stats (similar to cpu.go)
|
||||||
|
// Maps cache time intervals to container-specific CPU usage tracking
|
||||||
|
lastCpuContainer map[uint16]map[string]uint64 // cacheTimeMs -> containerId -> last cpu container usage
|
||||||
|
lastCpuSystem map[uint16]map[string]uint64 // cacheTimeMs -> containerId -> last cpu system usage
|
||||||
|
lastCpuReadTime map[uint16]map[string]time.Time // cacheTimeMs -> containerId -> last read time (Windows)
|
||||||
|
|
||||||
|
// Network delta trackers - one per cache time to avoid interference
|
||||||
|
// cacheTimeMs -> DeltaTracker for network bytes sent/received
|
||||||
|
networkSentTrackers map[uint16]*deltatracker.DeltaTracker[string, uint64]
|
||||||
|
networkRecvTrackers map[uint16]*deltatracker.DeltaTracker[string, uint64]
|
||||||
}
|
}
|
||||||
|
|
||||||
// userAgentRoundTripper is a custom http.RoundTripper that adds a User-Agent header to all requests
|
// userAgentRoundTripper is a custom http.RoundTripper that adds a User-Agent header to all requests
|
||||||
@@ -62,8 +96,21 @@ func (d *dockerManager) dequeue() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns stats for all running containers
|
// shouldExcludeContainer checks if a container name matches any exclusion pattern
|
||||||
func (dm *dockerManager) getDockerStats() ([]*container.Stats, error) {
|
func (dm *dockerManager) shouldExcludeContainer(name string) bool {
|
||||||
|
if len(dm.excludeContainers) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, pattern := range dm.excludeContainers {
|
||||||
|
if match, _ := path.Match(pattern, name); match {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns stats for all running containers with cache-time-aware delta tracking
|
||||||
|
func (dm *dockerManager) getDockerStats(cacheTimeMs uint16) ([]*container.Stats, error) {
|
||||||
resp, err := dm.client.Get("http://localhost/containers/json")
|
resp, err := dm.client.Get("http://localhost/containers/json")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -87,9 +134,15 @@ func (dm *dockerManager) getDockerStats() ([]*container.Stats, error) {
|
|||||||
|
|
||||||
var failedContainers []*container.ApiInfo
|
var failedContainers []*container.ApiInfo
|
||||||
|
|
||||||
for i := range dm.apiContainerList {
|
for _, ctr := range dm.apiContainerList {
|
||||||
ctr := dm.apiContainerList[i]
|
|
||||||
ctr.IdShort = ctr.Id[:12]
|
ctr.IdShort = ctr.Id[:12]
|
||||||
|
|
||||||
|
// Skip this container if it matches the exclusion pattern
|
||||||
|
if dm.shouldExcludeContainer(ctr.Names[0][1:]) {
|
||||||
|
slog.Debug("Excluding container", "name", ctr.Names[0][1:])
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
dm.validIds[ctr.IdShort] = struct{}{}
|
dm.validIds[ctr.IdShort] = struct{}{}
|
||||||
// check if container is less than 1 minute old (possible restart)
|
// check if container is less than 1 minute old (possible restart)
|
||||||
// note: can't use Created field because it's not updated on restart
|
// note: can't use Created field because it's not updated on restart
|
||||||
@@ -98,9 +151,9 @@ func (dm *dockerManager) getDockerStats() ([]*container.Stats, error) {
|
|||||||
dm.deleteContainerStatsSync(ctr.IdShort)
|
dm.deleteContainerStatsSync(ctr.IdShort)
|
||||||
}
|
}
|
||||||
dm.queue()
|
dm.queue()
|
||||||
go func() {
|
go func(ctr *container.ApiInfo) {
|
||||||
defer dm.dequeue()
|
defer dm.dequeue()
|
||||||
err := dm.updateContainerStats(ctr)
|
err := dm.updateContainerStats(ctr, cacheTimeMs)
|
||||||
// if error, delete from map and add to failed list to retry
|
// if error, delete from map and add to failed list to retry
|
||||||
if err != nil {
|
if err != nil {
|
||||||
dm.containerStatsMutex.Lock()
|
dm.containerStatsMutex.Lock()
|
||||||
@@ -108,7 +161,7 @@ func (dm *dockerManager) getDockerStats() ([]*container.Stats, error) {
|
|||||||
failedContainers = append(failedContainers, ctr)
|
failedContainers = append(failedContainers, ctr)
|
||||||
dm.containerStatsMutex.Unlock()
|
dm.containerStatsMutex.Unlock()
|
||||||
}
|
}
|
||||||
}()
|
}(ctr)
|
||||||
}
|
}
|
||||||
|
|
||||||
dm.wg.Wait()
|
dm.wg.Wait()
|
||||||
@@ -119,13 +172,12 @@ func (dm *dockerManager) getDockerStats() ([]*container.Stats, error) {
|
|||||||
for i := range failedContainers {
|
for i := range failedContainers {
|
||||||
ctr := failedContainers[i]
|
ctr := failedContainers[i]
|
||||||
dm.queue()
|
dm.queue()
|
||||||
go func() {
|
go func(ctr *container.ApiInfo) {
|
||||||
defer dm.dequeue()
|
defer dm.dequeue()
|
||||||
err = dm.updateContainerStats(ctr)
|
if err2 := dm.updateContainerStats(ctr, cacheTimeMs); err2 != nil {
|
||||||
if err != nil {
|
slog.Error("Error getting container stats", "err", err2)
|
||||||
slog.Error("Error getting container stats", "err", err)
|
|
||||||
}
|
}
|
||||||
}()
|
}(ctr)
|
||||||
}
|
}
|
||||||
dm.wg.Wait()
|
dm.wg.Wait()
|
||||||
}
|
}
|
||||||
@@ -140,18 +192,191 @@ func (dm *dockerManager) getDockerStats() ([]*container.Stats, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// prepare network trackers for next interval for this cache time
|
||||||
|
dm.cycleNetworkDeltasForCacheTime(cacheTimeMs)
|
||||||
|
|
||||||
return stats, nil
|
return stats, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Updates stats for individual container
|
// initializeCpuTracking initializes CPU tracking maps for a specific cache time interval
|
||||||
func (dm *dockerManager) updateContainerStats(ctr *container.ApiInfo) error {
|
func (dm *dockerManager) initializeCpuTracking(cacheTimeMs uint16) {
|
||||||
|
// Initialize cache time maps if they don't exist
|
||||||
|
if dm.lastCpuContainer[cacheTimeMs] == nil {
|
||||||
|
dm.lastCpuContainer[cacheTimeMs] = make(map[string]uint64)
|
||||||
|
}
|
||||||
|
if dm.lastCpuSystem[cacheTimeMs] == nil {
|
||||||
|
dm.lastCpuSystem[cacheTimeMs] = make(map[string]uint64)
|
||||||
|
}
|
||||||
|
// Ensure the outer map exists before indexing
|
||||||
|
if dm.lastCpuReadTime == nil {
|
||||||
|
dm.lastCpuReadTime = make(map[uint16]map[string]time.Time)
|
||||||
|
}
|
||||||
|
if dm.lastCpuReadTime[cacheTimeMs] == nil {
|
||||||
|
dm.lastCpuReadTime[cacheTimeMs] = make(map[string]time.Time)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getCpuPreviousValues returns previous CPU values for a container and cache time interval
|
||||||
|
func (dm *dockerManager) getCpuPreviousValues(cacheTimeMs uint16, containerId string) (uint64, uint64) {
|
||||||
|
return dm.lastCpuContainer[cacheTimeMs][containerId], dm.lastCpuSystem[cacheTimeMs][containerId]
|
||||||
|
}
|
||||||
|
|
||||||
|
// setCpuCurrentValues stores current CPU values for a container and cache time interval
|
||||||
|
func (dm *dockerManager) setCpuCurrentValues(cacheTimeMs uint16, containerId string, cpuContainer, cpuSystem uint64) {
|
||||||
|
dm.lastCpuContainer[cacheTimeMs][containerId] = cpuContainer
|
||||||
|
dm.lastCpuSystem[cacheTimeMs][containerId] = cpuSystem
|
||||||
|
}
|
||||||
|
|
||||||
|
// calculateMemoryUsage calculates memory usage from Docker API stats
|
||||||
|
func calculateMemoryUsage(apiStats *container.ApiStats, isWindows bool) (uint64, error) {
|
||||||
|
if isWindows {
|
||||||
|
return apiStats.MemoryStats.PrivateWorkingSet, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
memCache := apiStats.MemoryStats.Stats.InactiveFile
|
||||||
|
if memCache == 0 {
|
||||||
|
memCache = apiStats.MemoryStats.Stats.Cache
|
||||||
|
}
|
||||||
|
|
||||||
|
usedDelta := apiStats.MemoryStats.Usage - memCache
|
||||||
|
if usedDelta <= 0 || usedDelta > maxMemoryUsage {
|
||||||
|
return 0, fmt.Errorf("bad memory stats")
|
||||||
|
}
|
||||||
|
|
||||||
|
return usedDelta, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getNetworkTracker returns the DeltaTracker for a specific cache time, creating it if needed
|
||||||
|
func (dm *dockerManager) getNetworkTracker(cacheTimeMs uint16, isSent bool) *deltatracker.DeltaTracker[string, uint64] {
|
||||||
|
var trackers map[uint16]*deltatracker.DeltaTracker[string, uint64]
|
||||||
|
if isSent {
|
||||||
|
trackers = dm.networkSentTrackers
|
||||||
|
} else {
|
||||||
|
trackers = dm.networkRecvTrackers
|
||||||
|
}
|
||||||
|
|
||||||
|
if trackers[cacheTimeMs] == nil {
|
||||||
|
trackers[cacheTimeMs] = deltatracker.NewDeltaTracker[string, uint64]()
|
||||||
|
}
|
||||||
|
|
||||||
|
return trackers[cacheTimeMs]
|
||||||
|
}
|
||||||
|
|
||||||
|
// cycleNetworkDeltasForCacheTime cycles the network delta trackers for a specific cache time
|
||||||
|
func (dm *dockerManager) cycleNetworkDeltasForCacheTime(cacheTimeMs uint16) {
|
||||||
|
if dm.networkSentTrackers[cacheTimeMs] != nil {
|
||||||
|
dm.networkSentTrackers[cacheTimeMs].Cycle()
|
||||||
|
}
|
||||||
|
if dm.networkRecvTrackers[cacheTimeMs] != nil {
|
||||||
|
dm.networkRecvTrackers[cacheTimeMs].Cycle()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// calculateNetworkStats calculates network sent/receive deltas using DeltaTracker
|
||||||
|
func (dm *dockerManager) calculateNetworkStats(ctr *container.ApiInfo, apiStats *container.ApiStats, stats *container.Stats, initialized bool, name string, cacheTimeMs uint16) (uint64, uint64) {
|
||||||
|
var total_sent, total_recv uint64
|
||||||
|
for _, v := range apiStats.Networks {
|
||||||
|
total_sent += v.TxBytes
|
||||||
|
total_recv += v.RxBytes
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the DeltaTracker for this specific cache time
|
||||||
|
sentTracker := dm.getNetworkTracker(cacheTimeMs, true)
|
||||||
|
recvTracker := dm.getNetworkTracker(cacheTimeMs, false)
|
||||||
|
|
||||||
|
// Set current values in the cache-time-specific DeltaTracker
|
||||||
|
sentTracker.Set(ctr.IdShort, total_sent)
|
||||||
|
recvTracker.Set(ctr.IdShort, total_recv)
|
||||||
|
|
||||||
|
// Get deltas (bytes since last measurement)
|
||||||
|
sent_delta_raw := sentTracker.Delta(ctr.IdShort)
|
||||||
|
recv_delta_raw := recvTracker.Delta(ctr.IdShort)
|
||||||
|
|
||||||
|
// Calculate bytes per second independently for Tx and Rx if we have previous data
|
||||||
|
var sent_delta, recv_delta uint64
|
||||||
|
if initialized {
|
||||||
|
millisecondsElapsed := uint64(time.Since(stats.PrevReadTime).Milliseconds())
|
||||||
|
if millisecondsElapsed > 0 {
|
||||||
|
if sent_delta_raw > 0 {
|
||||||
|
sent_delta = sent_delta_raw * 1000 / millisecondsElapsed
|
||||||
|
if sent_delta > maxNetworkSpeedBps {
|
||||||
|
slog.Warn("Bad network delta", "container", name)
|
||||||
|
sent_delta = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if recv_delta_raw > 0 {
|
||||||
|
recv_delta = recv_delta_raw * 1000 / millisecondsElapsed
|
||||||
|
if recv_delta > maxNetworkSpeedBps {
|
||||||
|
slog.Warn("Bad network delta", "container", name)
|
||||||
|
recv_delta = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return sent_delta, recv_delta
|
||||||
|
}
|
||||||
|
|
||||||
|
// validateCpuPercentage checks if CPU percentage is within valid range
|
||||||
|
func validateCpuPercentage(cpuPct float64, containerName string) error {
|
||||||
|
if cpuPct > 100 {
|
||||||
|
return fmt.Errorf("%s cpu pct greater than 100: %+v", containerName, cpuPct)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateContainerStatsValues updates the final stats values
|
||||||
|
func updateContainerStatsValues(stats *container.Stats, cpuPct float64, usedMemory uint64, sent_delta, recv_delta uint64, readTime time.Time) {
|
||||||
|
stats.Cpu = twoDecimals(cpuPct)
|
||||||
|
stats.Mem = bytesToMegabytes(float64(usedMemory))
|
||||||
|
stats.NetworkSent = bytesToMegabytes(float64(sent_delta))
|
||||||
|
stats.NetworkRecv = bytesToMegabytes(float64(recv_delta))
|
||||||
|
stats.PrevReadTime = readTime
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseDockerStatus(status string) (string, container.DockerHealth) {
|
||||||
|
trimmed := strings.TrimSpace(status)
|
||||||
|
if trimmed == "" {
|
||||||
|
return "", container.DockerHealthNone
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove "About " from status
|
||||||
|
trimmed = strings.Replace(trimmed, "About ", "", 1)
|
||||||
|
|
||||||
|
openIdx := strings.LastIndex(trimmed, "(")
|
||||||
|
if openIdx == -1 || !strings.HasSuffix(trimmed, ")") {
|
||||||
|
return trimmed, container.DockerHealthNone
|
||||||
|
}
|
||||||
|
|
||||||
|
statusText := strings.TrimSpace(trimmed[:openIdx])
|
||||||
|
if statusText == "" {
|
||||||
|
statusText = trimmed
|
||||||
|
}
|
||||||
|
|
||||||
|
healthText := strings.ToLower(strings.TrimSpace(strings.TrimSuffix(trimmed[openIdx+1:], ")")))
|
||||||
|
// Some Docker statuses include a "health:" prefix inside the parentheses.
|
||||||
|
// Strip it so it maps correctly to the known health states.
|
||||||
|
if colonIdx := strings.IndexRune(healthText, ':'); colonIdx != -1 {
|
||||||
|
prefix := strings.TrimSpace(healthText[:colonIdx])
|
||||||
|
if prefix == "health" || prefix == "health status" {
|
||||||
|
healthText = strings.TrimSpace(healthText[colonIdx+1:])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if health, ok := container.DockerHealthStrings[healthText]; ok {
|
||||||
|
return statusText, health
|
||||||
|
}
|
||||||
|
|
||||||
|
return trimmed, container.DockerHealthNone
|
||||||
|
}
|
||||||
|
|
||||||
|
// Updates stats for individual container with cache-time-aware delta tracking
|
||||||
|
func (dm *dockerManager) updateContainerStats(ctr *container.ApiInfo, cacheTimeMs uint16) error {
|
||||||
name := ctr.Names[0][1:]
|
name := ctr.Names[0][1:]
|
||||||
|
|
||||||
resp, err := dm.client.Get("http://localhost/containers/" + ctr.IdShort + "/stats?stream=0&one-shot=1")
|
resp, err := dm.client.Get(fmt.Sprintf("http://localhost/containers/%s/stats?stream=0&one-shot=1", ctr.IdShort))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
dm.containerStatsMutex.Lock()
|
dm.containerStatsMutex.Lock()
|
||||||
defer dm.containerStatsMutex.Unlock()
|
defer dm.containerStatsMutex.Unlock()
|
||||||
@@ -159,82 +384,74 @@ func (dm *dockerManager) updateContainerStats(ctr *container.ApiInfo) error {
|
|||||||
// add empty values if they doesn't exist in map
|
// add empty values if they doesn't exist in map
|
||||||
stats, initialized := dm.containerStatsMap[ctr.IdShort]
|
stats, initialized := dm.containerStatsMap[ctr.IdShort]
|
||||||
if !initialized {
|
if !initialized {
|
||||||
stats = &container.Stats{Name: name}
|
stats = &container.Stats{Name: name, Id: ctr.IdShort, Image: ctr.Image}
|
||||||
dm.containerStatsMap[ctr.IdShort] = stats
|
dm.containerStatsMap[ctr.IdShort] = stats
|
||||||
}
|
}
|
||||||
|
|
||||||
|
stats.Id = ctr.IdShort
|
||||||
|
|
||||||
|
statusText, health := parseDockerStatus(ctr.Status)
|
||||||
|
stats.Status = statusText
|
||||||
|
stats.Health = health
|
||||||
|
|
||||||
// reset current stats
|
// reset current stats
|
||||||
stats.Cpu = 0
|
stats.Cpu = 0
|
||||||
stats.Mem = 0
|
stats.Mem = 0
|
||||||
stats.NetworkSent = 0
|
stats.NetworkSent = 0
|
||||||
stats.NetworkRecv = 0
|
stats.NetworkRecv = 0
|
||||||
|
|
||||||
// docker host container stats response
|
|
||||||
// res := dm.getApiStats()
|
|
||||||
// defer dm.putApiStats(res)
|
|
||||||
//
|
|
||||||
|
|
||||||
res := dm.apiStats
|
res := dm.apiStats
|
||||||
res.Networks = nil
|
res.Networks = nil
|
||||||
if err := dm.decode(resp, res); err != nil {
|
if err := dm.decode(resp, res); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// calculate cpu and memory stats
|
// Initialize CPU tracking for this cache time interval
|
||||||
var usedMemory uint64
|
dm.initializeCpuTracking(cacheTimeMs)
|
||||||
|
|
||||||
|
// Get previous CPU values
|
||||||
|
prevCpuContainer, prevCpuSystem := dm.getCpuPreviousValues(cacheTimeMs, ctr.IdShort)
|
||||||
|
|
||||||
|
// Calculate CPU percentage based on platform
|
||||||
var cpuPct float64
|
var cpuPct float64
|
||||||
|
|
||||||
// store current cpu stats
|
|
||||||
prevCpuContainer, prevCpuSystem := stats.CpuContainer, stats.CpuSystem
|
|
||||||
stats.CpuContainer = res.CPUStats.CPUUsage.TotalUsage
|
|
||||||
stats.CpuSystem = res.CPUStats.SystemUsage
|
|
||||||
|
|
||||||
if dm.isWindows {
|
if dm.isWindows {
|
||||||
usedMemory = res.MemoryStats.PrivateWorkingSet
|
prevRead := dm.lastCpuReadTime[cacheTimeMs][ctr.IdShort]
|
||||||
cpuPct = res.CalculateCpuPercentWindows(prevCpuContainer, stats.PrevReadTime)
|
cpuPct = res.CalculateCpuPercentWindows(prevCpuContainer, prevRead)
|
||||||
} else {
|
} else {
|
||||||
// check if container has valid data, otherwise may be in restart loop (#103)
|
|
||||||
if res.MemoryStats.Usage == 0 {
|
|
||||||
return fmt.Errorf("%s - no memory stats - see https://github.com/henrygd/beszel/issues/144", name)
|
|
||||||
}
|
|
||||||
memCache := res.MemoryStats.Stats.InactiveFile
|
|
||||||
if memCache == 0 {
|
|
||||||
memCache = res.MemoryStats.Stats.Cache
|
|
||||||
}
|
|
||||||
usedMemory = res.MemoryStats.Usage - memCache
|
|
||||||
|
|
||||||
cpuPct = res.CalculateCpuPercentLinux(prevCpuContainer, prevCpuSystem)
|
cpuPct = res.CalculateCpuPercentLinux(prevCpuContainer, prevCpuSystem)
|
||||||
}
|
}
|
||||||
|
|
||||||
if cpuPct > 100 {
|
// Calculate memory usage
|
||||||
return fmt.Errorf("%s cpu pct greater than 100: %+v", name, cpuPct)
|
usedMemory, err := calculateMemoryUsage(res, dm.isWindows)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("%s - %w - see https://github.com/henrygd/beszel/issues/144", name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// network
|
// Store current CPU stats for next calculation
|
||||||
|
currentCpuContainer := res.CPUStats.CPUUsage.TotalUsage
|
||||||
|
currentCpuSystem := res.CPUStats.SystemUsage
|
||||||
|
dm.setCpuCurrentValues(cacheTimeMs, ctr.IdShort, currentCpuContainer, currentCpuSystem)
|
||||||
|
|
||||||
|
// Validate CPU percentage
|
||||||
|
if err := validateCpuPercentage(cpuPct, name); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate network stats using DeltaTracker
|
||||||
|
sent_delta, recv_delta := dm.calculateNetworkStats(ctr, res, stats, initialized, name, cacheTimeMs)
|
||||||
|
|
||||||
|
// Store current network values for legacy compatibility
|
||||||
var total_sent, total_recv uint64
|
var total_sent, total_recv uint64
|
||||||
for _, v := range res.Networks {
|
for _, v := range res.Networks {
|
||||||
total_sent += v.TxBytes
|
total_sent += v.TxBytes
|
||||||
total_recv += v.RxBytes
|
total_recv += v.RxBytes
|
||||||
}
|
}
|
||||||
var sent_delta, recv_delta uint64
|
|
||||||
millisecondsElapsed := uint64(time.Since(stats.PrevReadTime).Milliseconds())
|
|
||||||
if initialized && millisecondsElapsed > 0 {
|
|
||||||
// get bytes per second
|
|
||||||
sent_delta = (total_sent - stats.PrevNet.Sent) * 1000 / millisecondsElapsed
|
|
||||||
recv_delta = (total_recv - stats.PrevNet.Recv) * 1000 / millisecondsElapsed
|
|
||||||
// check for unrealistic network values (> 5GB/s)
|
|
||||||
if sent_delta > 5e9 || recv_delta > 5e9 {
|
|
||||||
slog.Warn("Bad network delta", "container", name)
|
|
||||||
sent_delta, recv_delta = 0, 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stats.PrevNet.Sent, stats.PrevNet.Recv = total_sent, total_recv
|
stats.PrevNet.Sent, stats.PrevNet.Recv = total_sent, total_recv
|
||||||
|
|
||||||
stats.Cpu = twoDecimals(cpuPct)
|
// Update final stats values
|
||||||
stats.Mem = bytesToMegabytes(float64(usedMemory))
|
updateContainerStatsValues(stats, cpuPct, usedMemory, sent_delta, recv_delta, res.Read)
|
||||||
stats.NetworkSent = bytesToMegabytes(float64(sent_delta))
|
// store per-cache-time read time for Windows CPU percent calc
|
||||||
stats.NetworkRecv = bytesToMegabytes(float64(recv_delta))
|
dm.lastCpuReadTime[cacheTimeMs][ctr.IdShort] = res.Read
|
||||||
stats.PrevReadTime = res.Read
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -244,6 +461,15 @@ func (dm *dockerManager) deleteContainerStatsSync(id string) {
|
|||||||
dm.containerStatsMutex.Lock()
|
dm.containerStatsMutex.Lock()
|
||||||
defer dm.containerStatsMutex.Unlock()
|
defer dm.containerStatsMutex.Unlock()
|
||||||
delete(dm.containerStatsMap, id)
|
delete(dm.containerStatsMap, id)
|
||||||
|
for ct := range dm.lastCpuContainer {
|
||||||
|
delete(dm.lastCpuContainer[ct], id)
|
||||||
|
}
|
||||||
|
for ct := range dm.lastCpuSystem {
|
||||||
|
delete(dm.lastCpuSystem[ct], id)
|
||||||
|
}
|
||||||
|
for ct := range dm.lastCpuReadTime {
|
||||||
|
delete(dm.lastCpuReadTime[ct], id)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creates a new http client for Docker or Podman API
|
// Creates a new http client for Docker or Podman API
|
||||||
@@ -283,7 +509,7 @@ func newDockerManager(a *Agent) *dockerManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// configurable timeout
|
// configurable timeout
|
||||||
timeout := time.Millisecond * 2100
|
timeout := time.Millisecond * time.Duration(dockerTimeoutMs)
|
||||||
if t, set := GetEnv("DOCKER_TIMEOUT"); set {
|
if t, set := GetEnv("DOCKER_TIMEOUT"); set {
|
||||||
timeout, err = time.ParseDuration(t)
|
timeout, err = time.ParseDuration(t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -299,6 +525,19 @@ func newDockerManager(a *Agent) *dockerManager {
|
|||||||
userAgent: "Docker-Client/",
|
userAgent: "Docker-Client/",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Read container exclusion patterns from environment variable
|
||||||
|
var excludeContainers []string
|
||||||
|
if excludeStr, set := GetEnv("EXCLUDE_CONTAINERS"); set && excludeStr != "" {
|
||||||
|
parts := strings.SplitSeq(excludeStr, ",")
|
||||||
|
for part := range parts {
|
||||||
|
trimmed := strings.TrimSpace(part)
|
||||||
|
if trimmed != "" {
|
||||||
|
excludeContainers = append(excludeContainers, trimmed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
slog.Info("EXCLUDE_CONTAINERS", "patterns", excludeContainers)
|
||||||
|
}
|
||||||
|
|
||||||
manager := &dockerManager{
|
manager := &dockerManager{
|
||||||
client: &http.Client{
|
client: &http.Client{
|
||||||
Timeout: timeout,
|
Timeout: timeout,
|
||||||
@@ -308,6 +547,14 @@ func newDockerManager(a *Agent) *dockerManager {
|
|||||||
sem: make(chan struct{}, 5),
|
sem: make(chan struct{}, 5),
|
||||||
apiContainerList: []*container.ApiInfo{},
|
apiContainerList: []*container.ApiInfo{},
|
||||||
apiStats: &container.ApiStats{},
|
apiStats: &container.ApiStats{},
|
||||||
|
excludeContainers: excludeContainers,
|
||||||
|
|
||||||
|
// Initialize cache-time-aware tracking structures
|
||||||
|
lastCpuContainer: make(map[uint16]map[string]uint64),
|
||||||
|
lastCpuSystem: make(map[uint16]map[string]uint64),
|
||||||
|
lastCpuReadTime: make(map[uint16]map[string]time.Time),
|
||||||
|
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
}
|
}
|
||||||
|
|
||||||
// If using podman, return client
|
// If using podman, return client
|
||||||
@@ -317,28 +564,49 @@ func newDockerManager(a *Agent) *dockerManager {
|
|||||||
return manager
|
return manager
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check docker version
|
// this can take up to 5 seconds with retry, so run in goroutine
|
||||||
// (versions before 25.0.0 have a bug with one-shot which requires all requests to be made in one batch)
|
go manager.checkDockerVersion()
|
||||||
|
|
||||||
|
// give version check a chance to complete before returning
|
||||||
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
|
||||||
|
return manager
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkDockerVersion checks Docker version and sets goodDockerVersion if at least 25.0.0.
|
||||||
|
// Versions before 25.0.0 have a bug with one-shot which requires all requests to be made in one batch.
|
||||||
|
func (dm *dockerManager) checkDockerVersion() {
|
||||||
|
var err error
|
||||||
|
var resp *http.Response
|
||||||
var versionInfo struct {
|
var versionInfo struct {
|
||||||
Version string `json:"Version"`
|
Version string `json:"Version"`
|
||||||
}
|
}
|
||||||
resp, err := manager.client.Get("http://localhost/version")
|
const versionMaxTries = 2
|
||||||
|
for i := 1; i <= versionMaxTries; i++ {
|
||||||
|
resp, err = dm.client.Get("http://localhost/version")
|
||||||
|
if err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if resp != nil {
|
||||||
|
resp.Body.Close()
|
||||||
|
}
|
||||||
|
if i < versionMaxTries {
|
||||||
|
slog.Debug("Failed to get Docker version; retrying", "attempt", i, "error", err)
|
||||||
|
time.Sleep(5 * time.Second)
|
||||||
|
}
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return manager
|
return
|
||||||
}
|
}
|
||||||
|
if err := dm.decode(resp, &versionInfo); err != nil {
|
||||||
if err := manager.decode(resp, &versionInfo); err != nil {
|
return
|
||||||
return manager
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// if version > 24, one-shot works correctly and we can limit concurrent operations
|
// if version > 24, one-shot works correctly and we can limit concurrent operations
|
||||||
if dockerVersion, err := semver.Parse(versionInfo.Version); err == nil && dockerVersion.Major > 24 {
|
if dockerVersion, err := semver.Parse(versionInfo.Version); err == nil && dockerVersion.Major > 24 {
|
||||||
manager.goodDockerVersion = true
|
dm.goodDockerVersion = true
|
||||||
} else {
|
} else {
|
||||||
slog.Info(fmt.Sprintf("Docker %s is outdated. Upgrade if possible. See https://github.com/henrygd/beszel/issues/58", versionInfo.Version))
|
slog.Info(fmt.Sprintf("Docker %s is outdated. Upgrade if possible. See https://github.com/henrygd/beszel/issues/58", versionInfo.Version))
|
||||||
}
|
}
|
||||||
|
|
||||||
return manager
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decodes Docker API JSON response using a reusable buffer and decoder. Not thread safe.
|
// Decodes Docker API JSON response using a reusable buffer and decoder. Not thread safe.
|
||||||
@@ -368,3 +636,122 @@ func getDockerHost() string {
|
|||||||
}
|
}
|
||||||
return scheme + socks[0]
|
return scheme + socks[0]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getContainerInfo fetches the inspection data for a container
|
||||||
|
func (dm *dockerManager) getContainerInfo(ctx context.Context, containerID string) ([]byte, error) {
|
||||||
|
endpoint := fmt.Sprintf("http://localhost/containers/%s/json", containerID)
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := dm.client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
body, _ := io.ReadAll(io.LimitReader(resp.Body, 1024))
|
||||||
|
return nil, fmt.Errorf("container info request failed: %s: %s", resp.Status, strings.TrimSpace(string(body)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove sensitive environment variables from Config.Env
|
||||||
|
var containerInfo map[string]any
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&containerInfo); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if config, ok := containerInfo["Config"].(map[string]any); ok {
|
||||||
|
delete(config, "Env")
|
||||||
|
}
|
||||||
|
|
||||||
|
return json.Marshal(containerInfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getLogs fetches the logs for a container
|
||||||
|
func (dm *dockerManager) getLogs(ctx context.Context, containerID string) (string, error) {
|
||||||
|
endpoint := fmt.Sprintf("http://localhost/containers/%s/logs?stdout=1&stderr=1&tail=%d", containerID, dockerLogsTail)
|
||||||
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := dm.client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
body, _ := io.ReadAll(io.LimitReader(resp.Body, 1024))
|
||||||
|
return "", fmt.Errorf("logs request failed: %s: %s", resp.Status, strings.TrimSpace(string(body)))
|
||||||
|
}
|
||||||
|
|
||||||
|
var builder strings.Builder
|
||||||
|
if err := decodeDockerLogStream(resp.Body, &builder); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return builder.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeDockerLogStream(reader io.Reader, builder *strings.Builder) error {
|
||||||
|
const headerSize = 8
|
||||||
|
var header [headerSize]byte
|
||||||
|
buf := make([]byte, 0, dockerLogsTail*200)
|
||||||
|
totalBytesRead := 0
|
||||||
|
|
||||||
|
for {
|
||||||
|
if _, err := io.ReadFull(reader, header[:]); err != nil {
|
||||||
|
if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
frameLen := binary.BigEndian.Uint32(header[4:])
|
||||||
|
if frameLen == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prevent memory exhaustion from excessively large frames
|
||||||
|
if frameLen > maxLogFrameSize {
|
||||||
|
return fmt.Errorf("log frame size (%d) exceeds maximum (%d)", frameLen, maxLogFrameSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if reading this frame would exceed total log size limit
|
||||||
|
if totalBytesRead+int(frameLen) > maxTotalLogSize {
|
||||||
|
// Read and discard remaining data to avoid blocking
|
||||||
|
_, _ = io.Copy(io.Discard, io.LimitReader(reader, int64(frameLen)))
|
||||||
|
slog.Debug("Truncating logs: limit reached", "read", totalBytesRead, "limit", maxTotalLogSize)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
buf = allocateBuffer(buf, int(frameLen))
|
||||||
|
if _, err := io.ReadFull(reader, buf[:frameLen]); err != nil {
|
||||||
|
if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) {
|
||||||
|
if len(buf) > 0 {
|
||||||
|
builder.Write(buf[:min(int(frameLen), len(buf))])
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
builder.Write(buf[:frameLen])
|
||||||
|
totalBytesRead += int(frameLen)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func allocateBuffer(current []byte, needed int) []byte {
|
||||||
|
if cap(current) >= needed {
|
||||||
|
return current[:needed]
|
||||||
|
}
|
||||||
|
return make([]byte, needed)
|
||||||
|
}
|
||||||
|
|
||||||
|
func min(a, b int) int {
|
||||||
|
if a < b {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|||||||
1205
agent/docker_test.go
Normal file
1205
agent/docker_test.go
Normal file
File diff suppressed because it is too large
Load Diff
168
agent/gpu.go
168
agent/gpu.go
@@ -5,6 +5,7 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"maps"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
@@ -44,6 +45,21 @@ type GPUManager struct {
|
|||||||
tegrastats bool
|
tegrastats bool
|
||||||
intelGpuStats bool
|
intelGpuStats bool
|
||||||
GpuDataMap map[string]*system.GPUData
|
GpuDataMap map[string]*system.GPUData
|
||||||
|
// lastAvgData stores the last calculated averages for each GPU
|
||||||
|
// Used when a collection happens before new data arrives (Count == 0)
|
||||||
|
lastAvgData map[string]system.GPUData
|
||||||
|
// Per-cache-key tracking for delta calculations
|
||||||
|
// cacheKey -> gpuId -> snapshot of last count/usage/power values
|
||||||
|
lastSnapshots map[uint16]map[string]*gpuSnapshot
|
||||||
|
}
|
||||||
|
|
||||||
|
// gpuSnapshot stores the last observed incremental values for delta tracking
|
||||||
|
type gpuSnapshot struct {
|
||||||
|
count uint32
|
||||||
|
usage float64
|
||||||
|
power float64
|
||||||
|
powerPkg float64
|
||||||
|
engines map[string]float64
|
||||||
}
|
}
|
||||||
|
|
||||||
// RocmSmiJson represents the JSON structure of rocm-smi output
|
// RocmSmiJson represents the JSON structure of rocm-smi output
|
||||||
@@ -229,48 +245,21 @@ func (gm *GPUManager) parseAmdData(output []byte) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// sums and resets the current GPU utilization data since the last update
|
// GetCurrentData returns GPU utilization data averaged since the last call with this cacheKey
|
||||||
func (gm *GPUManager) GetCurrentData() map[string]system.GPUData {
|
func (gm *GPUManager) GetCurrentData(cacheKey uint16) map[string]system.GPUData {
|
||||||
gm.Lock()
|
gm.Lock()
|
||||||
defer gm.Unlock()
|
defer gm.Unlock()
|
||||||
|
|
||||||
// check for GPUs with the same name
|
gm.initializeSnapshots(cacheKey)
|
||||||
nameCounts := make(map[string]int)
|
nameCounts := gm.countGPUNames()
|
||||||
for _, gpu := range gm.GpuDataMap {
|
|
||||||
nameCounts[gpu.Name]++
|
|
||||||
}
|
|
||||||
|
|
||||||
// copy / reset the data
|
|
||||||
gpuData := make(map[string]system.GPUData, len(gm.GpuDataMap))
|
gpuData := make(map[string]system.GPUData, len(gm.GpuDataMap))
|
||||||
for id, gpu := range gm.GpuDataMap {
|
for id, gpu := range gm.GpuDataMap {
|
||||||
// avoid division by zero
|
gpuAvg := gm.calculateGPUAverage(id, gpu, cacheKey)
|
||||||
count := max(gpu.Count, 1)
|
gm.updateInstantaneousValues(&gpuAvg, gpu)
|
||||||
|
gm.storeSnapshot(id, gpu, cacheKey)
|
||||||
|
|
||||||
// average the data
|
// Append id to name if there are multiple GPUs with the same name
|
||||||
gpuAvg := *gpu
|
|
||||||
gpuAvg.Temperature = twoDecimals(gpu.Temperature)
|
|
||||||
gpuAvg.Power = twoDecimals(gpu.Power / count)
|
|
||||||
|
|
||||||
// intel gpu stats doesn't provide usage, memory used, or memory total
|
|
||||||
if gpu.Engines != nil {
|
|
||||||
maxEngineUsage := 0.0
|
|
||||||
for name, engine := range gpu.Engines {
|
|
||||||
gpuAvg.Engines[name] = twoDecimals(engine / count)
|
|
||||||
maxEngineUsage = max(maxEngineUsage, engine/count)
|
|
||||||
}
|
|
||||||
gpuAvg.PowerPkg = twoDecimals(gpu.PowerPkg / count)
|
|
||||||
gpuAvg.Usage = twoDecimals(maxEngineUsage)
|
|
||||||
} else {
|
|
||||||
gpuAvg.Usage = twoDecimals(gpu.Usage / count)
|
|
||||||
gpuAvg.MemoryUsed = twoDecimals(gpu.MemoryUsed)
|
|
||||||
gpuAvg.MemoryTotal = twoDecimals(gpu.MemoryTotal)
|
|
||||||
}
|
|
||||||
|
|
||||||
// reset accumulators in the original gpu data for next collection
|
|
||||||
gpu.Usage, gpu.Power, gpu.PowerPkg, gpu.Count = gpuAvg.Usage, gpuAvg.Power, gpuAvg.PowerPkg, 1
|
|
||||||
gpu.Engines = gpuAvg.Engines
|
|
||||||
|
|
||||||
// append id to the name if there are multiple GPUs with the same name
|
|
||||||
if nameCounts[gpu.Name] > 1 {
|
if nameCounts[gpu.Name] > 1 {
|
||||||
gpuAvg.Name = fmt.Sprintf("%s %s", gpu.Name, id)
|
gpuAvg.Name = fmt.Sprintf("%s %s", gpu.Name, id)
|
||||||
}
|
}
|
||||||
@@ -280,6 +269,115 @@ func (gm *GPUManager) GetCurrentData() map[string]system.GPUData {
|
|||||||
return gpuData
|
return gpuData
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// initializeSnapshots ensures snapshot maps are initialized for the given cache key
|
||||||
|
func (gm *GPUManager) initializeSnapshots(cacheKey uint16) {
|
||||||
|
if gm.lastAvgData == nil {
|
||||||
|
gm.lastAvgData = make(map[string]system.GPUData)
|
||||||
|
}
|
||||||
|
if gm.lastSnapshots == nil {
|
||||||
|
gm.lastSnapshots = make(map[uint16]map[string]*gpuSnapshot)
|
||||||
|
}
|
||||||
|
if gm.lastSnapshots[cacheKey] == nil {
|
||||||
|
gm.lastSnapshots[cacheKey] = make(map[string]*gpuSnapshot)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// countGPUNames returns a map of GPU names to their occurrence count
|
||||||
|
func (gm *GPUManager) countGPUNames() map[string]int {
|
||||||
|
nameCounts := make(map[string]int)
|
||||||
|
for _, gpu := range gm.GpuDataMap {
|
||||||
|
nameCounts[gpu.Name]++
|
||||||
|
}
|
||||||
|
return nameCounts
|
||||||
|
}
|
||||||
|
|
||||||
|
// calculateGPUAverage computes the average GPU metrics since the last snapshot for this cache key
|
||||||
|
func (gm *GPUManager) calculateGPUAverage(id string, gpu *system.GPUData, cacheKey uint16) system.GPUData {
|
||||||
|
lastSnapshot := gm.lastSnapshots[cacheKey][id]
|
||||||
|
currentCount := uint32(gpu.Count)
|
||||||
|
deltaCount := gm.calculateDeltaCount(currentCount, lastSnapshot)
|
||||||
|
|
||||||
|
// If no new data arrived, use last known average
|
||||||
|
if deltaCount == 0 {
|
||||||
|
return gm.lastAvgData[id] // zero value if not found
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate new average
|
||||||
|
gpuAvg := *gpu
|
||||||
|
deltaUsage, deltaPower, deltaPowerPkg := gm.calculateDeltas(gpu, lastSnapshot)
|
||||||
|
|
||||||
|
gpuAvg.Power = twoDecimals(deltaPower / float64(deltaCount))
|
||||||
|
|
||||||
|
if gpu.Engines != nil {
|
||||||
|
// make fresh map for averaged engine metrics to avoid mutating
|
||||||
|
// the accumulator map stored in gm.GpuDataMap
|
||||||
|
gpuAvg.Engines = make(map[string]float64, len(gpu.Engines))
|
||||||
|
gpuAvg.Usage = gm.calculateIntelGPUUsage(&gpuAvg, gpu, lastSnapshot, deltaCount)
|
||||||
|
gpuAvg.PowerPkg = twoDecimals(deltaPowerPkg / float64(deltaCount))
|
||||||
|
} else {
|
||||||
|
gpuAvg.Usage = twoDecimals(deltaUsage / float64(deltaCount))
|
||||||
|
}
|
||||||
|
|
||||||
|
gm.lastAvgData[id] = gpuAvg
|
||||||
|
return gpuAvg
|
||||||
|
}
|
||||||
|
|
||||||
|
// calculateDeltaCount returns the change in count since the last snapshot
|
||||||
|
func (gm *GPUManager) calculateDeltaCount(currentCount uint32, lastSnapshot *gpuSnapshot) uint32 {
|
||||||
|
if lastSnapshot != nil {
|
||||||
|
return currentCount - lastSnapshot.count
|
||||||
|
}
|
||||||
|
return currentCount
|
||||||
|
}
|
||||||
|
|
||||||
|
// calculateDeltas computes the change in usage, power, and powerPkg since the last snapshot
|
||||||
|
func (gm *GPUManager) calculateDeltas(gpu *system.GPUData, lastSnapshot *gpuSnapshot) (deltaUsage, deltaPower, deltaPowerPkg float64) {
|
||||||
|
if lastSnapshot != nil {
|
||||||
|
return gpu.Usage - lastSnapshot.usage,
|
||||||
|
gpu.Power - lastSnapshot.power,
|
||||||
|
gpu.PowerPkg - lastSnapshot.powerPkg
|
||||||
|
}
|
||||||
|
return gpu.Usage, gpu.Power, gpu.PowerPkg
|
||||||
|
}
|
||||||
|
|
||||||
|
// calculateIntelGPUUsage computes Intel GPU usage from engine metrics and returns max engine usage
|
||||||
|
func (gm *GPUManager) calculateIntelGPUUsage(gpuAvg, gpu *system.GPUData, lastSnapshot *gpuSnapshot, deltaCount uint32) float64 {
|
||||||
|
maxEngineUsage := 0.0
|
||||||
|
for name, engine := range gpu.Engines {
|
||||||
|
var deltaEngine float64
|
||||||
|
if lastSnapshot != nil && lastSnapshot.engines != nil {
|
||||||
|
deltaEngine = engine - lastSnapshot.engines[name]
|
||||||
|
} else {
|
||||||
|
deltaEngine = engine
|
||||||
|
}
|
||||||
|
gpuAvg.Engines[name] = twoDecimals(deltaEngine / float64(deltaCount))
|
||||||
|
maxEngineUsage = max(maxEngineUsage, deltaEngine/float64(deltaCount))
|
||||||
|
}
|
||||||
|
return twoDecimals(maxEngineUsage)
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateInstantaneousValues updates values that should reflect current state, not averages
|
||||||
|
func (gm *GPUManager) updateInstantaneousValues(gpuAvg *system.GPUData, gpu *system.GPUData) {
|
||||||
|
gpuAvg.Temperature = twoDecimals(gpu.Temperature)
|
||||||
|
gpuAvg.MemoryUsed = twoDecimals(gpu.MemoryUsed)
|
||||||
|
gpuAvg.MemoryTotal = twoDecimals(gpu.MemoryTotal)
|
||||||
|
}
|
||||||
|
|
||||||
|
// storeSnapshot saves the current GPU state for this cache key
|
||||||
|
func (gm *GPUManager) storeSnapshot(id string, gpu *system.GPUData, cacheKey uint16) {
|
||||||
|
snapshot := &gpuSnapshot{
|
||||||
|
count: uint32(gpu.Count),
|
||||||
|
usage: gpu.Usage,
|
||||||
|
power: gpu.Power,
|
||||||
|
powerPkg: gpu.PowerPkg,
|
||||||
|
}
|
||||||
|
if gpu.Engines != nil {
|
||||||
|
snapshot.engines = make(map[string]float64, len(gpu.Engines))
|
||||||
|
maps.Copy(snapshot.engines, gpu.Engines)
|
||||||
|
}
|
||||||
|
gm.lastSnapshots[cacheKey][id] = snapshot
|
||||||
|
}
|
||||||
|
|
||||||
// detectGPUs checks for the presence of GPU management tools (nvidia-smi, rocm-smi, tegrastats)
|
// detectGPUs checks for the presence of GPU management tools (nvidia-smi, rocm-smi, tegrastats)
|
||||||
// in the system path. It sets the corresponding flags in the GPUManager struct if any of these
|
// in the system path. It sets the corresponding flags in the GPUManager struct if any of these
|
||||||
// tools are found. If none of the tools are found, it returns an error indicating that no GPU
|
// tools are found. If none of the tools are found, it returns an error indicating that no GPU
|
||||||
|
|||||||
@@ -49,7 +49,12 @@ func (gm *GPUManager) updateIntelFromStats(sample *intelGpuStats) bool {
|
|||||||
|
|
||||||
// collectIntelStats executes intel_gpu_top in text mode (-l) and parses the output
|
// collectIntelStats executes intel_gpu_top in text mode (-l) and parses the output
|
||||||
func (gm *GPUManager) collectIntelStats() (err error) {
|
func (gm *GPUManager) collectIntelStats() (err error) {
|
||||||
cmd := exec.Command(intelGpuStatsCmd, "-s", intelGpuStatsInterval, "-l")
|
// Build command arguments, optionally selecting a device via -d
|
||||||
|
args := []string{"-s", intelGpuStatsInterval, "-l"}
|
||||||
|
if dev, ok := GetEnv("INTEL_GPU_DEVICE"); ok && dev != "" {
|
||||||
|
args = append(args, "-d", dev)
|
||||||
|
}
|
||||||
|
cmd := exec.Command(intelGpuStatsCmd, args...)
|
||||||
// Avoid blocking if intel_gpu_top writes to stderr
|
// Avoid blocking if intel_gpu_top writes to stderr
|
||||||
cmd.Stderr = io.Discard
|
cmd.Stderr = io.Discard
|
||||||
stdout, err := cmd.StdoutPipe()
|
stdout, err := cmd.StdoutPipe()
|
||||||
@@ -129,7 +134,9 @@ func (gm *GPUManager) parseIntelHeaders(header1 string, header2 string) (engineN
|
|||||||
powerIndex = -1 // Initialize to -1, will be set to actual index if found
|
powerIndex = -1 // Initialize to -1, will be set to actual index if found
|
||||||
// Collect engine names from header1
|
// Collect engine names from header1
|
||||||
for _, col := range h1 {
|
for _, col := range h1 {
|
||||||
key := strings.TrimRightFunc(col, func(r rune) bool { return r >= '0' && r <= '9' })
|
key := strings.TrimRightFunc(col, func(r rune) bool {
|
||||||
|
return (r >= '0' && r <= '9') || r == '/'
|
||||||
|
})
|
||||||
var friendly string
|
var friendly string
|
||||||
switch key {
|
switch key {
|
||||||
case "RCS":
|
case "RCS":
|
||||||
|
|||||||
@@ -4,8 +4,10 @@
|
|||||||
package agent
|
package agent
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -332,7 +334,7 @@ func TestParseJetsonData(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestGetCurrentData(t *testing.T) {
|
func TestGetCurrentData(t *testing.T) {
|
||||||
t.Run("calculates averages and resets accumulators", func(t *testing.T) {
|
t.Run("calculates averages with per-cache-key delta tracking", func(t *testing.T) {
|
||||||
gm := &GPUManager{
|
gm := &GPUManager{
|
||||||
GpuDataMap: map[string]*system.GPUData{
|
GpuDataMap: map[string]*system.GPUData{
|
||||||
"0": {
|
"0": {
|
||||||
@@ -365,7 +367,8 @@ func TestGetCurrentData(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
result := gm.GetCurrentData()
|
cacheKey := uint16(5000)
|
||||||
|
result := gm.GetCurrentData(cacheKey)
|
||||||
|
|
||||||
// Verify name disambiguation
|
// Verify name disambiguation
|
||||||
assert.Equal(t, "GPU1 0", result["0"].Name)
|
assert.Equal(t, "GPU1 0", result["0"].Name)
|
||||||
@@ -378,13 +381,19 @@ func TestGetCurrentData(t *testing.T) {
|
|||||||
assert.InDelta(t, 30.0, result["1"].Usage, 0.01)
|
assert.InDelta(t, 30.0, result["1"].Usage, 0.01)
|
||||||
assert.InDelta(t, 60.0, result["1"].Power, 0.01)
|
assert.InDelta(t, 60.0, result["1"].Power, 0.01)
|
||||||
|
|
||||||
// Verify that accumulators in the original map are reset
|
// Verify that accumulators in the original map are NOT reset (they keep growing)
|
||||||
assert.EqualValues(t, float64(1), gm.GpuDataMap["0"].Count, "GPU 0 Count should be reset")
|
assert.EqualValues(t, 2, gm.GpuDataMap["0"].Count, "GPU 0 Count should remain at 2")
|
||||||
assert.EqualValues(t, float64(50.0), gm.GpuDataMap["0"].Usage, "GPU 0 Usage should be reset")
|
assert.EqualValues(t, 100, gm.GpuDataMap["0"].Usage, "GPU 0 Usage should remain at 100")
|
||||||
assert.Equal(t, float64(100.0), gm.GpuDataMap["0"].Power, "GPU 0 Power should be reset")
|
assert.Equal(t, 200.0, gm.GpuDataMap["0"].Power, "GPU 0 Power should remain at 200")
|
||||||
assert.Equal(t, float64(1), gm.GpuDataMap["1"].Count, "GPU 1 Count should be reset")
|
assert.Equal(t, 1.0, gm.GpuDataMap["1"].Count, "GPU 1 Count should remain at 1")
|
||||||
assert.Equal(t, float64(30), gm.GpuDataMap["1"].Usage, "GPU 1 Usage should be reset")
|
assert.Equal(t, 30.0, gm.GpuDataMap["1"].Usage, "GPU 1 Usage should remain at 30")
|
||||||
assert.Equal(t, float64(60), gm.GpuDataMap["1"].Power, "GPU 1 Power should be reset")
|
assert.Equal(t, 60.0, gm.GpuDataMap["1"].Power, "GPU 1 Power should remain at 60")
|
||||||
|
|
||||||
|
// Verify snapshots were stored for this cache key
|
||||||
|
assert.NotNil(t, gm.lastSnapshots[cacheKey]["0"])
|
||||||
|
assert.Equal(t, uint32(2), gm.lastSnapshots[cacheKey]["0"].count)
|
||||||
|
assert.Equal(t, 100.0, gm.lastSnapshots[cacheKey]["0"].usage)
|
||||||
|
assert.Equal(t, 200.0, gm.lastSnapshots[cacheKey]["0"].power)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("handles zero count without panicking", func(t *testing.T) {
|
t.Run("handles zero count without panicking", func(t *testing.T) {
|
||||||
@@ -399,17 +408,543 @@ func TestGetCurrentData(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cacheKey := uint16(5000)
|
||||||
var result map[string]system.GPUData
|
var result map[string]system.GPUData
|
||||||
assert.NotPanics(t, func() {
|
assert.NotPanics(t, func() {
|
||||||
result = gm.GetCurrentData()
|
result = gm.GetCurrentData(cacheKey)
|
||||||
})
|
})
|
||||||
|
|
||||||
// Check that usage and power are 0
|
// Check that usage and power are 0
|
||||||
assert.Equal(t, 0.0, result["0"].Usage)
|
assert.Equal(t, 0.0, result["0"].Usage)
|
||||||
assert.Equal(t, 0.0, result["0"].Power)
|
assert.Equal(t, 0.0, result["0"].Power)
|
||||||
|
|
||||||
// Verify reset count
|
// Verify count remains 0
|
||||||
assert.EqualValues(t, 1, gm.GpuDataMap["0"].Count)
|
assert.EqualValues(t, 0, gm.GpuDataMap["0"].Count)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("uses last average when no new data arrives", func(t *testing.T) {
|
||||||
|
gm := &GPUManager{
|
||||||
|
GpuDataMap: map[string]*system.GPUData{
|
||||||
|
"0": {
|
||||||
|
Name: "TestGPU",
|
||||||
|
Temperature: 55.0,
|
||||||
|
MemoryUsed: 1500,
|
||||||
|
MemoryTotal: 8000,
|
||||||
|
Usage: 100, // Will average to 50
|
||||||
|
Power: 200, // Will average to 100
|
||||||
|
Count: 2,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
cacheKey := uint16(5000)
|
||||||
|
|
||||||
|
// First collection - should calculate averages and store them
|
||||||
|
result1 := gm.GetCurrentData(cacheKey)
|
||||||
|
assert.InDelta(t, 50.0, result1["0"].Usage, 0.01)
|
||||||
|
assert.InDelta(t, 100.0, result1["0"].Power, 0.01)
|
||||||
|
assert.EqualValues(t, 2, gm.GpuDataMap["0"].Count, "Count should remain at 2")
|
||||||
|
|
||||||
|
// Update temperature but no new usage/power data (count stays same)
|
||||||
|
gm.GpuDataMap["0"].Temperature = 60.0
|
||||||
|
gm.GpuDataMap["0"].MemoryUsed = 1600
|
||||||
|
|
||||||
|
// Second collection - should use last averages since count hasn't changed (delta = 0)
|
||||||
|
result2 := gm.GetCurrentData(cacheKey)
|
||||||
|
assert.InDelta(t, 50.0, result2["0"].Usage, 0.01, "Should use last average")
|
||||||
|
assert.InDelta(t, 100.0, result2["0"].Power, 0.01, "Should use last average")
|
||||||
|
assert.InDelta(t, 60.0, result2["0"].Temperature, 0.01, "Should use current temperature")
|
||||||
|
assert.InDelta(t, 1600.0, result2["0"].MemoryUsed, 0.01, "Should use current memory")
|
||||||
|
assert.EqualValues(t, 2, gm.GpuDataMap["0"].Count, "Count should still be 2")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("tracks separate averages per cache key", func(t *testing.T) {
|
||||||
|
gm := &GPUManager{
|
||||||
|
GpuDataMap: map[string]*system.GPUData{
|
||||||
|
"0": {
|
||||||
|
Name: "TestGPU",
|
||||||
|
Temperature: 55.0,
|
||||||
|
MemoryUsed: 1500,
|
||||||
|
MemoryTotal: 8000,
|
||||||
|
Usage: 100, // Initial: 100 over 2 counts = 50 avg
|
||||||
|
Power: 200, // Initial: 200 over 2 counts = 100 avg
|
||||||
|
Count: 2,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
cacheKey1 := uint16(5000)
|
||||||
|
cacheKey2 := uint16(10000)
|
||||||
|
|
||||||
|
// First check with cacheKey1 - baseline
|
||||||
|
result1 := gm.GetCurrentData(cacheKey1)
|
||||||
|
assert.InDelta(t, 50.0, result1["0"].Usage, 0.01, "CacheKey1: Initial average should be 50")
|
||||||
|
assert.InDelta(t, 100.0, result1["0"].Power, 0.01, "CacheKey1: Initial average should be 100")
|
||||||
|
|
||||||
|
// Simulate GPU activity - accumulate more data
|
||||||
|
gm.GpuDataMap["0"].Usage += 60 // Now total: 160
|
||||||
|
gm.GpuDataMap["0"].Power += 150 // Now total: 350
|
||||||
|
gm.GpuDataMap["0"].Count += 3 // Now total: 5
|
||||||
|
|
||||||
|
// Check with cacheKey1 again - should get delta since last cacheKey1 check
|
||||||
|
result2 := gm.GetCurrentData(cacheKey1)
|
||||||
|
assert.InDelta(t, 20.0, result2["0"].Usage, 0.01, "CacheKey1: Delta average should be 60/3 = 20")
|
||||||
|
assert.InDelta(t, 50.0, result2["0"].Power, 0.01, "CacheKey1: Delta average should be 150/3 = 50")
|
||||||
|
|
||||||
|
// Check with cacheKey2 for the first time - should get average since beginning
|
||||||
|
result3 := gm.GetCurrentData(cacheKey2)
|
||||||
|
assert.InDelta(t, 32.0, result3["0"].Usage, 0.01, "CacheKey2: Total average should be 160/5 = 32")
|
||||||
|
assert.InDelta(t, 70.0, result3["0"].Power, 0.01, "CacheKey2: Total average should be 350/5 = 70")
|
||||||
|
|
||||||
|
// Simulate more GPU activity
|
||||||
|
gm.GpuDataMap["0"].Usage += 80 // Now total: 240
|
||||||
|
gm.GpuDataMap["0"].Power += 160 // Now total: 510
|
||||||
|
gm.GpuDataMap["0"].Count += 2 // Now total: 7
|
||||||
|
|
||||||
|
// Check with cacheKey1 - should get delta since last cacheKey1 check
|
||||||
|
result4 := gm.GetCurrentData(cacheKey1)
|
||||||
|
assert.InDelta(t, 40.0, result4["0"].Usage, 0.01, "CacheKey1: New delta average should be 80/2 = 40")
|
||||||
|
assert.InDelta(t, 80.0, result4["0"].Power, 0.01, "CacheKey1: New delta average should be 160/2 = 80")
|
||||||
|
|
||||||
|
// Check with cacheKey2 - should get delta since last cacheKey2 check
|
||||||
|
result5 := gm.GetCurrentData(cacheKey2)
|
||||||
|
assert.InDelta(t, 40.0, result5["0"].Usage, 0.01, "CacheKey2: Delta average should be 80/2 = 40")
|
||||||
|
assert.InDelta(t, 80.0, result5["0"].Power, 0.01, "CacheKey2: Delta average should be 160/2 = 80")
|
||||||
|
|
||||||
|
// Verify snapshots exist for both cache keys
|
||||||
|
assert.NotNil(t, gm.lastSnapshots[cacheKey1])
|
||||||
|
assert.NotNil(t, gm.lastSnapshots[cacheKey2])
|
||||||
|
assert.NotNil(t, gm.lastSnapshots[cacheKey1]["0"])
|
||||||
|
assert.NotNil(t, gm.lastSnapshots[cacheKey2]["0"])
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCalculateDeltaCount(t *testing.T) {
|
||||||
|
gm := &GPUManager{}
|
||||||
|
|
||||||
|
t.Run("with no previous snapshot", func(t *testing.T) {
|
||||||
|
delta := gm.calculateDeltaCount(10, nil)
|
||||||
|
assert.Equal(t, uint32(10), delta, "Should return current count when no snapshot exists")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("with previous snapshot", func(t *testing.T) {
|
||||||
|
snapshot := &gpuSnapshot{count: 5}
|
||||||
|
delta := gm.calculateDeltaCount(15, snapshot)
|
||||||
|
assert.Equal(t, uint32(10), delta, "Should return difference between current and snapshot")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("with same count", func(t *testing.T) {
|
||||||
|
snapshot := &gpuSnapshot{count: 10}
|
||||||
|
delta := gm.calculateDeltaCount(10, snapshot)
|
||||||
|
assert.Equal(t, uint32(0), delta, "Should return zero when count hasn't changed")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCalculateDeltas(t *testing.T) {
|
||||||
|
gm := &GPUManager{}
|
||||||
|
|
||||||
|
t.Run("with no previous snapshot", func(t *testing.T) {
|
||||||
|
gpu := &system.GPUData{
|
||||||
|
Usage: 100.5,
|
||||||
|
Power: 250.75,
|
||||||
|
PowerPkg: 300.25,
|
||||||
|
}
|
||||||
|
deltaUsage, deltaPower, deltaPowerPkg := gm.calculateDeltas(gpu, nil)
|
||||||
|
assert.Equal(t, 100.5, deltaUsage)
|
||||||
|
assert.Equal(t, 250.75, deltaPower)
|
||||||
|
assert.Equal(t, 300.25, deltaPowerPkg)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("with previous snapshot", func(t *testing.T) {
|
||||||
|
gpu := &system.GPUData{
|
||||||
|
Usage: 150.5,
|
||||||
|
Power: 300.75,
|
||||||
|
PowerPkg: 400.25,
|
||||||
|
}
|
||||||
|
snapshot := &gpuSnapshot{
|
||||||
|
usage: 100.5,
|
||||||
|
power: 250.75,
|
||||||
|
powerPkg: 300.25,
|
||||||
|
}
|
||||||
|
deltaUsage, deltaPower, deltaPowerPkg := gm.calculateDeltas(gpu, snapshot)
|
||||||
|
assert.InDelta(t, 50.0, deltaUsage, 0.01)
|
||||||
|
assert.InDelta(t, 50.0, deltaPower, 0.01)
|
||||||
|
assert.InDelta(t, 100.0, deltaPowerPkg, 0.01)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCalculateIntelGPUUsage(t *testing.T) {
|
||||||
|
gm := &GPUManager{}
|
||||||
|
|
||||||
|
t.Run("with no previous snapshot", func(t *testing.T) {
|
||||||
|
gpuAvg := &system.GPUData{
|
||||||
|
Engines: make(map[string]float64),
|
||||||
|
}
|
||||||
|
gpu := &system.GPUData{
|
||||||
|
Engines: map[string]float64{
|
||||||
|
"Render/3D": 80.0,
|
||||||
|
"Video": 40.0,
|
||||||
|
"Compute": 60.0,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
maxUsage := gm.calculateIntelGPUUsage(gpuAvg, gpu, nil, 2)
|
||||||
|
|
||||||
|
assert.Equal(t, 40.0, maxUsage, "Should return max engine usage (80/2=40)")
|
||||||
|
assert.Equal(t, 40.0, gpuAvg.Engines["Render/3D"])
|
||||||
|
assert.Equal(t, 20.0, gpuAvg.Engines["Video"])
|
||||||
|
assert.Equal(t, 30.0, gpuAvg.Engines["Compute"])
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("with previous snapshot", func(t *testing.T) {
|
||||||
|
gpuAvg := &system.GPUData{
|
||||||
|
Engines: make(map[string]float64),
|
||||||
|
}
|
||||||
|
gpu := &system.GPUData{
|
||||||
|
Engines: map[string]float64{
|
||||||
|
"Render/3D": 180.0,
|
||||||
|
"Video": 100.0,
|
||||||
|
"Compute": 140.0,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
snapshot := &gpuSnapshot{
|
||||||
|
engines: map[string]float64{
|
||||||
|
"Render/3D": 80.0,
|
||||||
|
"Video": 40.0,
|
||||||
|
"Compute": 60.0,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
maxUsage := gm.calculateIntelGPUUsage(gpuAvg, gpu, snapshot, 5)
|
||||||
|
|
||||||
|
// Deltas: Render/3D=100, Video=60, Compute=80 over 5 counts
|
||||||
|
assert.Equal(t, 20.0, maxUsage, "Should return max engine delta (100/5=20)")
|
||||||
|
assert.Equal(t, 20.0, gpuAvg.Engines["Render/3D"])
|
||||||
|
assert.Equal(t, 12.0, gpuAvg.Engines["Video"])
|
||||||
|
assert.Equal(t, 16.0, gpuAvg.Engines["Compute"])
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("handles missing engine in snapshot", func(t *testing.T) {
|
||||||
|
gpuAvg := &system.GPUData{
|
||||||
|
Engines: make(map[string]float64),
|
||||||
|
}
|
||||||
|
gpu := &system.GPUData{
|
||||||
|
Engines: map[string]float64{
|
||||||
|
"Render/3D": 100.0,
|
||||||
|
"NewEngine": 50.0,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
snapshot := &gpuSnapshot{
|
||||||
|
engines: map[string]float64{
|
||||||
|
"Render/3D": 80.0,
|
||||||
|
// NewEngine doesn't exist in snapshot
|
||||||
|
},
|
||||||
|
}
|
||||||
|
maxUsage := gm.calculateIntelGPUUsage(gpuAvg, gpu, snapshot, 2)
|
||||||
|
|
||||||
|
assert.Equal(t, 25.0, maxUsage)
|
||||||
|
assert.Equal(t, 10.0, gpuAvg.Engines["Render/3D"], "Should use delta for existing engine")
|
||||||
|
assert.Equal(t, 25.0, gpuAvg.Engines["NewEngine"], "Should use full value for new engine")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUpdateInstantaneousValues(t *testing.T) {
|
||||||
|
gm := &GPUManager{}
|
||||||
|
|
||||||
|
t.Run("updates temperature, memory used and total", func(t *testing.T) {
|
||||||
|
gpuAvg := &system.GPUData{
|
||||||
|
Temperature: 50.123,
|
||||||
|
MemoryUsed: 1000.456,
|
||||||
|
MemoryTotal: 8000.789,
|
||||||
|
}
|
||||||
|
gpu := &system.GPUData{
|
||||||
|
Temperature: 75.567,
|
||||||
|
MemoryUsed: 2500.891,
|
||||||
|
MemoryTotal: 8192.234,
|
||||||
|
}
|
||||||
|
|
||||||
|
gm.updateInstantaneousValues(gpuAvg, gpu)
|
||||||
|
|
||||||
|
assert.Equal(t, 75.57, gpuAvg.Temperature, "Should update and round temperature")
|
||||||
|
assert.Equal(t, 2500.89, gpuAvg.MemoryUsed, "Should update and round memory used")
|
||||||
|
assert.Equal(t, 8192.23, gpuAvg.MemoryTotal, "Should update and round memory total")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStoreSnapshot(t *testing.T) {
|
||||||
|
gm := &GPUManager{
|
||||||
|
lastSnapshots: make(map[uint16]map[string]*gpuSnapshot),
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("stores standard GPU snapshot", func(t *testing.T) {
|
||||||
|
cacheKey := uint16(5000)
|
||||||
|
gm.lastSnapshots[cacheKey] = make(map[string]*gpuSnapshot)
|
||||||
|
|
||||||
|
gpu := &system.GPUData{
|
||||||
|
Count: 10.0,
|
||||||
|
Usage: 150.5,
|
||||||
|
Power: 250.75,
|
||||||
|
PowerPkg: 300.25,
|
||||||
|
}
|
||||||
|
|
||||||
|
gm.storeSnapshot("0", gpu, cacheKey)
|
||||||
|
|
||||||
|
snapshot := gm.lastSnapshots[cacheKey]["0"]
|
||||||
|
assert.NotNil(t, snapshot)
|
||||||
|
assert.Equal(t, uint32(10), snapshot.count)
|
||||||
|
assert.Equal(t, 150.5, snapshot.usage)
|
||||||
|
assert.Equal(t, 250.75, snapshot.power)
|
||||||
|
assert.Equal(t, 300.25, snapshot.powerPkg)
|
||||||
|
assert.Nil(t, snapshot.engines, "Should not have engines for standard GPU")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("stores Intel GPU snapshot with engines", func(t *testing.T) {
|
||||||
|
cacheKey := uint16(10000)
|
||||||
|
gm.lastSnapshots[cacheKey] = make(map[string]*gpuSnapshot)
|
||||||
|
|
||||||
|
gpu := &system.GPUData{
|
||||||
|
Count: 5.0,
|
||||||
|
Usage: 100.0,
|
||||||
|
Power: 200.0,
|
||||||
|
PowerPkg: 250.0,
|
||||||
|
Engines: map[string]float64{
|
||||||
|
"Render/3D": 80.0,
|
||||||
|
"Video": 40.0,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
gm.storeSnapshot("0", gpu, cacheKey)
|
||||||
|
|
||||||
|
snapshot := gm.lastSnapshots[cacheKey]["0"]
|
||||||
|
assert.NotNil(t, snapshot)
|
||||||
|
assert.Equal(t, uint32(5), snapshot.count)
|
||||||
|
assert.NotNil(t, snapshot.engines, "Should have engines for Intel GPU")
|
||||||
|
assert.Equal(t, 80.0, snapshot.engines["Render/3D"])
|
||||||
|
assert.Equal(t, 40.0, snapshot.engines["Video"])
|
||||||
|
assert.Len(t, snapshot.engines, 2)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("overwrites existing snapshot", func(t *testing.T) {
|
||||||
|
cacheKey := uint16(5000)
|
||||||
|
gm.lastSnapshots[cacheKey] = make(map[string]*gpuSnapshot)
|
||||||
|
|
||||||
|
// Store initial snapshot
|
||||||
|
gpu1 := &system.GPUData{Count: 5.0, Usage: 100.0, Power: 200.0}
|
||||||
|
gm.storeSnapshot("0", gpu1, cacheKey)
|
||||||
|
|
||||||
|
// Store updated snapshot
|
||||||
|
gpu2 := &system.GPUData{Count: 10.0, Usage: 250.0, Power: 400.0}
|
||||||
|
gm.storeSnapshot("0", gpu2, cacheKey)
|
||||||
|
|
||||||
|
snapshot := gm.lastSnapshots[cacheKey]["0"]
|
||||||
|
assert.Equal(t, uint32(10), snapshot.count, "Should overwrite previous count")
|
||||||
|
assert.Equal(t, 250.0, snapshot.usage, "Should overwrite previous usage")
|
||||||
|
assert.Equal(t, 400.0, snapshot.power, "Should overwrite previous power")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCountGPUNames(t *testing.T) {
|
||||||
|
t.Run("returns empty map for no GPUs", func(t *testing.T) {
|
||||||
|
gm := &GPUManager{
|
||||||
|
GpuDataMap: make(map[string]*system.GPUData),
|
||||||
|
}
|
||||||
|
counts := gm.countGPUNames()
|
||||||
|
assert.Empty(t, counts)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("counts unique GPU names", func(t *testing.T) {
|
||||||
|
gm := &GPUManager{
|
||||||
|
GpuDataMap: map[string]*system.GPUData{
|
||||||
|
"0": {Name: "GPU A"},
|
||||||
|
"1": {Name: "GPU B"},
|
||||||
|
"2": {Name: "GPU C"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
counts := gm.countGPUNames()
|
||||||
|
assert.Equal(t, 1, counts["GPU A"])
|
||||||
|
assert.Equal(t, 1, counts["GPU B"])
|
||||||
|
assert.Equal(t, 1, counts["GPU C"])
|
||||||
|
assert.Len(t, counts, 3)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("counts duplicate GPU names", func(t *testing.T) {
|
||||||
|
gm := &GPUManager{
|
||||||
|
GpuDataMap: map[string]*system.GPUData{
|
||||||
|
"0": {Name: "RTX 4090"},
|
||||||
|
"1": {Name: "RTX 4090"},
|
||||||
|
"2": {Name: "RTX 4090"},
|
||||||
|
"3": {Name: "RTX 3080"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
counts := gm.countGPUNames()
|
||||||
|
assert.Equal(t, 3, counts["RTX 4090"])
|
||||||
|
assert.Equal(t, 1, counts["RTX 3080"])
|
||||||
|
assert.Len(t, counts, 2)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInitializeSnapshots(t *testing.T) {
|
||||||
|
t.Run("initializes all maps from scratch", func(t *testing.T) {
|
||||||
|
gm := &GPUManager{}
|
||||||
|
cacheKey := uint16(5000)
|
||||||
|
|
||||||
|
gm.initializeSnapshots(cacheKey)
|
||||||
|
|
||||||
|
assert.NotNil(t, gm.lastAvgData)
|
||||||
|
assert.NotNil(t, gm.lastSnapshots)
|
||||||
|
assert.NotNil(t, gm.lastSnapshots[cacheKey])
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("initializes only missing maps", func(t *testing.T) {
|
||||||
|
gm := &GPUManager{
|
||||||
|
lastAvgData: make(map[string]system.GPUData),
|
||||||
|
}
|
||||||
|
cacheKey := uint16(5000)
|
||||||
|
|
||||||
|
gm.initializeSnapshots(cacheKey)
|
||||||
|
|
||||||
|
assert.NotNil(t, gm.lastAvgData, "Should preserve existing lastAvgData")
|
||||||
|
assert.NotNil(t, gm.lastSnapshots)
|
||||||
|
assert.NotNil(t, gm.lastSnapshots[cacheKey])
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("adds new cache key to existing snapshots", func(t *testing.T) {
|
||||||
|
existingKey := uint16(5000)
|
||||||
|
newKey := uint16(10000)
|
||||||
|
|
||||||
|
gm := &GPUManager{
|
||||||
|
lastSnapshots: map[uint16]map[string]*gpuSnapshot{
|
||||||
|
existingKey: {"0": {count: 10}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
gm.initializeSnapshots(newKey)
|
||||||
|
|
||||||
|
assert.NotNil(t, gm.lastSnapshots[existingKey], "Should preserve existing cache key")
|
||||||
|
assert.NotNil(t, gm.lastSnapshots[newKey], "Should add new cache key")
|
||||||
|
assert.NotNil(t, gm.lastSnapshots[existingKey]["0"], "Should preserve existing snapshot data")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCalculateGPUAverage(t *testing.T) {
|
||||||
|
t.Run("returns zero value when deltaCount is zero", func(t *testing.T) {
|
||||||
|
gm := &GPUManager{
|
||||||
|
lastSnapshots: map[uint16]map[string]*gpuSnapshot{
|
||||||
|
5000: {
|
||||||
|
"0": {count: 10, usage: 100, power: 200},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
lastAvgData: map[string]system.GPUData{
|
||||||
|
"0": {Usage: 50.0, Power: 100.0},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
gpu := &system.GPUData{
|
||||||
|
Count: 10.0, // Same as snapshot, so delta = 0
|
||||||
|
Usage: 100.0,
|
||||||
|
Power: 200.0,
|
||||||
|
}
|
||||||
|
|
||||||
|
result := gm.calculateGPUAverage("0", gpu, 5000)
|
||||||
|
|
||||||
|
assert.Equal(t, 50.0, result.Usage, "Should return cached average")
|
||||||
|
assert.Equal(t, 100.0, result.Power, "Should return cached average")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("calculates average for standard GPU", func(t *testing.T) {
|
||||||
|
gm := &GPUManager{
|
||||||
|
lastSnapshots: map[uint16]map[string]*gpuSnapshot{
|
||||||
|
5000: {},
|
||||||
|
},
|
||||||
|
lastAvgData: make(map[string]system.GPUData),
|
||||||
|
}
|
||||||
|
|
||||||
|
gpu := &system.GPUData{
|
||||||
|
Name: "Test GPU",
|
||||||
|
Count: 4.0,
|
||||||
|
Usage: 200.0, // 200 / 4 = 50
|
||||||
|
Power: 400.0, // 400 / 4 = 100
|
||||||
|
}
|
||||||
|
|
||||||
|
result := gm.calculateGPUAverage("0", gpu, 5000)
|
||||||
|
|
||||||
|
assert.Equal(t, 50.0, result.Usage)
|
||||||
|
assert.Equal(t, 100.0, result.Power)
|
||||||
|
assert.Equal(t, "Test GPU", result.Name)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("calculates average for Intel GPU with engines", func(t *testing.T) {
|
||||||
|
gm := &GPUManager{
|
||||||
|
lastSnapshots: map[uint16]map[string]*gpuSnapshot{
|
||||||
|
5000: {},
|
||||||
|
},
|
||||||
|
lastAvgData: make(map[string]system.GPUData),
|
||||||
|
}
|
||||||
|
|
||||||
|
gpu := &system.GPUData{
|
||||||
|
Name: "Intel GPU",
|
||||||
|
Count: 5.0,
|
||||||
|
Power: 500.0,
|
||||||
|
PowerPkg: 600.0,
|
||||||
|
Engines: map[string]float64{
|
||||||
|
"Render/3D": 100.0, // 100 / 5 = 20
|
||||||
|
"Video": 50.0, // 50 / 5 = 10
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
result := gm.calculateGPUAverage("0", gpu, 5000)
|
||||||
|
|
||||||
|
assert.Equal(t, 100.0, result.Power)
|
||||||
|
assert.Equal(t, 120.0, result.PowerPkg)
|
||||||
|
assert.Equal(t, 20.0, result.Usage, "Should use max engine usage")
|
||||||
|
assert.Equal(t, 20.0, result.Engines["Render/3D"])
|
||||||
|
assert.Equal(t, 10.0, result.Engines["Video"])
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("calculates delta from previous snapshot", func(t *testing.T) {
|
||||||
|
gm := &GPUManager{
|
||||||
|
lastSnapshots: map[uint16]map[string]*gpuSnapshot{
|
||||||
|
5000: {
|
||||||
|
"0": {
|
||||||
|
count: 2,
|
||||||
|
usage: 50.0,
|
||||||
|
power: 100.0,
|
||||||
|
powerPkg: 120.0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
lastAvgData: make(map[string]system.GPUData),
|
||||||
|
}
|
||||||
|
|
||||||
|
gpu := &system.GPUData{
|
||||||
|
Name: "Test GPU",
|
||||||
|
Count: 7.0, // Delta = 7 - 2 = 5
|
||||||
|
Usage: 200.0, // Delta = 200 - 50 = 150, avg = 150/5 = 30
|
||||||
|
Power: 350.0, // Delta = 350 - 100 = 250, avg = 250/5 = 50
|
||||||
|
PowerPkg: 420.0, // Delta = 420 - 120 = 300, avg = 300/5 = 60
|
||||||
|
}
|
||||||
|
|
||||||
|
result := gm.calculateGPUAverage("0", gpu, 5000)
|
||||||
|
|
||||||
|
assert.Equal(t, 30.0, result.Usage)
|
||||||
|
assert.Equal(t, 50.0, result.Power)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("stores result in lastAvgData", func(t *testing.T) {
|
||||||
|
gm := &GPUManager{
|
||||||
|
lastSnapshots: map[uint16]map[string]*gpuSnapshot{
|
||||||
|
5000: {},
|
||||||
|
},
|
||||||
|
lastAvgData: make(map[string]system.GPUData),
|
||||||
|
}
|
||||||
|
|
||||||
|
gpu := &system.GPUData{
|
||||||
|
Count: 2.0,
|
||||||
|
Usage: 100.0,
|
||||||
|
Power: 200.0,
|
||||||
|
}
|
||||||
|
|
||||||
|
result := gm.calculateGPUAverage("0", gpu, 5000)
|
||||||
|
|
||||||
|
assert.Equal(t, result, gm.lastAvgData["0"], "Should store calculated average")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -765,7 +1300,8 @@ func TestAccumulation(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Verify average calculation in GetCurrentData
|
// Verify average calculation in GetCurrentData
|
||||||
result := gm.GetCurrentData()
|
cacheKey := uint16(5000)
|
||||||
|
result := gm.GetCurrentData(cacheKey)
|
||||||
for id, expected := range tt.expectedValues {
|
for id, expected := range tt.expectedValues {
|
||||||
gpu, exists := result[id]
|
gpu, exists := result[id]
|
||||||
assert.True(t, exists, "GPU with ID %s should exist in GetCurrentData result", id)
|
assert.True(t, exists, "GPU with ID %s should exist in GetCurrentData result", id)
|
||||||
@@ -778,16 +1314,16 @@ func TestAccumulation(t *testing.T) {
|
|||||||
assert.EqualValues(t, expected.avgPower, gpu.Power, "Average power in GetCurrentData should match")
|
assert.EqualValues(t, expected.avgPower, gpu.Power, "Average power in GetCurrentData should match")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify that accumulators in the original map are reset
|
// Verify that accumulators in the original map are NOT reset (they keep growing)
|
||||||
for id, expected := range tt.expectedValues {
|
for id, expected := range tt.expectedValues {
|
||||||
gpu, exists := gm.GpuDataMap[id]
|
gpu, exists := gm.GpuDataMap[id]
|
||||||
assert.True(t, exists, "GPU with ID %s should still exist after GetCurrentData", id)
|
assert.True(t, exists, "GPU with ID %s should still exist after GetCurrentData", id)
|
||||||
if !exists {
|
if !exists {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
assert.EqualValues(t, 1, gpu.Count, "Count should be reset for GPU ID %s", id)
|
assert.EqualValues(t, expected.count, gpu.Count, "Count should remain at accumulated value for GPU ID %s", id)
|
||||||
assert.EqualValues(t, expected.avgUsage, gpu.Usage, "Usage should be reset for GPU ID %s", id)
|
assert.EqualValues(t, expected.usage, gpu.Usage, "Usage should remain at accumulated value for GPU ID %s", id)
|
||||||
assert.EqualValues(t, expected.avgPower, gpu.Power, "Power should be reset for GPU ID %s", id)
|
assert.EqualValues(t, expected.power, gpu.Power, "Power should remain at accumulated value for GPU ID %s", id)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -903,6 +1439,15 @@ func TestParseIntelHeaders(t *testing.T) {
|
|||||||
wantPowerIndex: 4, // "gpu" is at index 4
|
wantPowerIndex: 4, // "gpu" is at index 4
|
||||||
wantPreEngineCols: 8, // 17 total cols - 3*3 = 8
|
wantPreEngineCols: 8, // 17 total cols - 3*3 = 8
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "basic headers with RCS BCS VCS using index in name",
|
||||||
|
header1: "Freq MHz IRQ RC6 Power W IMC MiB/s RCS/0 BCS/1 VCS/2",
|
||||||
|
header2: " req act /s % gpu pkg rd wr % se wa % se wa % se wa",
|
||||||
|
wantEngineNames: []string{"RCS", "BCS", "VCS"},
|
||||||
|
wantFriendlyNames: []string{"Render/3D", "Blitter", "Video"},
|
||||||
|
wantPowerIndex: 4, // "gpu" is at index 4
|
||||||
|
wantPreEngineCols: 8, // 17 total cols - 3*3 = 8
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "headers with only RCS",
|
name: "headers with only RCS",
|
||||||
header1: "Freq MHz IRQ RC6 Power W IMC MiB/s RCS",
|
header1: "Freq MHz IRQ RC6 Power W IMC MiB/s RCS",
|
||||||
@@ -1090,3 +1635,42 @@ func TestParseIntelData(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestIntelCollectorDeviceEnv(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
t.Setenv("PATH", dir)
|
||||||
|
|
||||||
|
// Prepare a file to capture args
|
||||||
|
argsFile := filepath.Join(dir, "args.txt")
|
||||||
|
|
||||||
|
// Create a fake intel_gpu_top that records its arguments and prints minimal valid output
|
||||||
|
scriptPath := filepath.Join(dir, "intel_gpu_top")
|
||||||
|
script := fmt.Sprintf(`#!/bin/sh
|
||||||
|
echo "$@" > %s
|
||||||
|
echo "Freq MHz IRQ RC6 Power W IMC MiB/s RCS VCS"
|
||||||
|
echo " req act /s %% gpu pkg rd wr %% se wa %% se wa"
|
||||||
|
echo "226 223 338 58 2.00 2.69 1820 965 0.00 0 0 0.00 0 0"
|
||||||
|
echo "189 187 412 67 1.80 2.45 1950 823 8.50 2 1 15.00 1 0"
|
||||||
|
`, argsFile)
|
||||||
|
if err := os.WriteFile(scriptPath, []byte(script), 0755); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set device selector via prefixed env var
|
||||||
|
t.Setenv("BESZEL_AGENT_INTEL_GPU_DEVICE", "sriov")
|
||||||
|
|
||||||
|
gm := &GPUManager{GpuDataMap: make(map[string]*system.GPUData)}
|
||||||
|
if err := gm.collectIntelStats(); err != nil {
|
||||||
|
t.Fatalf("collectIntelStats error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify that -d sriov was passed
|
||||||
|
data, err := os.ReadFile(argsFile)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed reading args file: %v", err)
|
||||||
|
}
|
||||||
|
argsStr := strings.TrimSpace(string(data))
|
||||||
|
require.Contains(t, argsStr, "-d sriov")
|
||||||
|
require.Contains(t, argsStr, "-s ")
|
||||||
|
require.Contains(t, argsStr, "-l")
|
||||||
|
}
|
||||||
|
|||||||
205
agent/handlers.go
Normal file
205
agent/handlers.go
Normal file
@@ -0,0 +1,205 @@
|
|||||||
|
package agent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/fxamacker/cbor/v2"
|
||||||
|
"github.com/henrygd/beszel/internal/common"
|
||||||
|
"github.com/henrygd/beszel/internal/entities/smart"
|
||||||
|
|
||||||
|
"golang.org/x/exp/slog"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HandlerContext provides context for request handlers
|
||||||
|
type HandlerContext struct {
|
||||||
|
Client *WebSocketClient
|
||||||
|
Agent *Agent
|
||||||
|
Request *common.HubRequest[cbor.RawMessage]
|
||||||
|
RequestID *uint32
|
||||||
|
HubVerified bool
|
||||||
|
// SendResponse abstracts how a handler sends responses (WS or SSH)
|
||||||
|
SendResponse func(data any, requestID *uint32) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestHandler defines the interface for handling specific websocket request types
|
||||||
|
type RequestHandler interface {
|
||||||
|
// Handle processes the request and returns an error if unsuccessful
|
||||||
|
Handle(hctx *HandlerContext) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Responder sends handler responses back to the hub (over WS or SSH)
|
||||||
|
type Responder interface {
|
||||||
|
SendResponse(data any, requestID *uint32) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandlerRegistry manages the mapping between actions and their handlers
|
||||||
|
type HandlerRegistry struct {
|
||||||
|
handlers map[common.WebSocketAction]RequestHandler
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewHandlerRegistry creates a new handler registry with default handlers
|
||||||
|
func NewHandlerRegistry() *HandlerRegistry {
|
||||||
|
registry := &HandlerRegistry{
|
||||||
|
handlers: make(map[common.WebSocketAction]RequestHandler),
|
||||||
|
}
|
||||||
|
|
||||||
|
registry.Register(common.GetData, &GetDataHandler{})
|
||||||
|
registry.Register(common.CheckFingerprint, &CheckFingerprintHandler{})
|
||||||
|
registry.Register(common.GetContainerLogs, &GetContainerLogsHandler{})
|
||||||
|
registry.Register(common.GetContainerInfo, &GetContainerInfoHandler{})
|
||||||
|
registry.Register(common.GetSmartData, &GetSmartDataHandler{})
|
||||||
|
registry.Register(common.GetSystemdInfo, &GetSystemdInfoHandler{})
|
||||||
|
|
||||||
|
return registry
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register registers a handler for a specific action type
|
||||||
|
func (hr *HandlerRegistry) Register(action common.WebSocketAction, handler RequestHandler) {
|
||||||
|
hr.handlers[action] = handler
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle routes the request to the appropriate handler
|
||||||
|
func (hr *HandlerRegistry) Handle(hctx *HandlerContext) error {
|
||||||
|
handler, exists := hr.handlers[hctx.Request.Action]
|
||||||
|
if !exists {
|
||||||
|
return fmt.Errorf("unknown action: %d", hctx.Request.Action)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check verification requirement - default to requiring verification
|
||||||
|
if hctx.Request.Action != common.CheckFingerprint && !hctx.HubVerified {
|
||||||
|
return errors.New("hub not verified")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Log handler execution for debugging
|
||||||
|
// slog.Debug("Executing handler", "action", hctx.Request.Action)
|
||||||
|
|
||||||
|
return handler.Handle(hctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetHandler returns the handler for a specific action
|
||||||
|
func (hr *HandlerRegistry) GetHandler(action common.WebSocketAction) (RequestHandler, bool) {
|
||||||
|
handler, exists := hr.handlers[action]
|
||||||
|
return handler, exists
|
||||||
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
// GetDataHandler handles system data requests
|
||||||
|
type GetDataHandler struct{}
|
||||||
|
|
||||||
|
func (h *GetDataHandler) Handle(hctx *HandlerContext) error {
|
||||||
|
var options common.DataRequestOptions
|
||||||
|
_ = cbor.Unmarshal(hctx.Request.Data, &options)
|
||||||
|
|
||||||
|
sysStats := hctx.Agent.gatherStats(options.CacheTimeMs)
|
||||||
|
return hctx.SendResponse(sysStats, hctx.RequestID)
|
||||||
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
// CheckFingerprintHandler handles authentication challenges
|
||||||
|
type CheckFingerprintHandler struct{}
|
||||||
|
|
||||||
|
func (h *CheckFingerprintHandler) Handle(hctx *HandlerContext) error {
|
||||||
|
return hctx.Client.handleAuthChallenge(hctx.Request, hctx.RequestID)
|
||||||
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
// GetContainerLogsHandler handles container log requests
|
||||||
|
type GetContainerLogsHandler struct{}
|
||||||
|
|
||||||
|
func (h *GetContainerLogsHandler) Handle(hctx *HandlerContext) error {
|
||||||
|
if hctx.Agent.dockerManager == nil {
|
||||||
|
return hctx.SendResponse("", hctx.RequestID)
|
||||||
|
}
|
||||||
|
|
||||||
|
var req common.ContainerLogsRequest
|
||||||
|
if err := cbor.Unmarshal(hctx.Request.Data, &req); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
logContent, err := hctx.Agent.dockerManager.getLogs(ctx, req.ContainerID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return hctx.SendResponse(logContent, hctx.RequestID)
|
||||||
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
// GetContainerInfoHandler handles container info requests
|
||||||
|
type GetContainerInfoHandler struct{}
|
||||||
|
|
||||||
|
func (h *GetContainerInfoHandler) Handle(hctx *HandlerContext) error {
|
||||||
|
if hctx.Agent.dockerManager == nil {
|
||||||
|
return hctx.SendResponse("", hctx.RequestID)
|
||||||
|
}
|
||||||
|
|
||||||
|
var req common.ContainerInfoRequest
|
||||||
|
if err := cbor.Unmarshal(hctx.Request.Data, &req); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
info, err := hctx.Agent.dockerManager.getContainerInfo(ctx, req.ContainerID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return hctx.SendResponse(string(info), hctx.RequestID)
|
||||||
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
// GetSmartDataHandler handles SMART data requests
|
||||||
|
type GetSmartDataHandler struct{}
|
||||||
|
|
||||||
|
func (h *GetSmartDataHandler) Handle(hctx *HandlerContext) error {
|
||||||
|
if hctx.Agent.smartManager == nil {
|
||||||
|
// return empty map to indicate no data
|
||||||
|
return hctx.SendResponse(map[string]smart.SmartData{}, hctx.RequestID)
|
||||||
|
}
|
||||||
|
if err := hctx.Agent.smartManager.Refresh(false); err != nil {
|
||||||
|
slog.Debug("smart refresh failed", "err", err)
|
||||||
|
}
|
||||||
|
data := hctx.Agent.smartManager.GetCurrentData()
|
||||||
|
return hctx.SendResponse(data, hctx.RequestID)
|
||||||
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
// GetSystemdInfoHandler handles detailed systemd service info requests
|
||||||
|
type GetSystemdInfoHandler struct{}
|
||||||
|
|
||||||
|
func (h *GetSystemdInfoHandler) Handle(hctx *HandlerContext) error {
|
||||||
|
if hctx.Agent.systemdManager == nil {
|
||||||
|
return errors.ErrUnsupported
|
||||||
|
}
|
||||||
|
|
||||||
|
var req common.SystemdInfoRequest
|
||||||
|
if err := cbor.Unmarshal(hctx.Request.Data, &req); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if req.ServiceName == "" {
|
||||||
|
return errors.New("service name is required")
|
||||||
|
}
|
||||||
|
|
||||||
|
details, err := hctx.Agent.systemdManager.getServiceDetails(req.ServiceName)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return hctx.SendResponse(details, hctx.RequestID)
|
||||||
|
}
|
||||||
112
agent/handlers_test.go
Normal file
112
agent/handlers_test.go
Normal file
@@ -0,0 +1,112 @@
|
|||||||
|
//go:build testing
|
||||||
|
// +build testing
|
||||||
|
|
||||||
|
package agent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/fxamacker/cbor/v2"
|
||||||
|
"github.com/henrygd/beszel/internal/common"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MockHandler for testing
|
||||||
|
type MockHandler struct {
|
||||||
|
requiresVerification bool
|
||||||
|
description string
|
||||||
|
handleFunc func(ctx *HandlerContext) error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MockHandler) Handle(ctx *HandlerContext) error {
|
||||||
|
if m.handleFunc != nil {
|
||||||
|
return m.handleFunc(ctx)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MockHandler) RequiresVerification() bool {
|
||||||
|
return m.requiresVerification
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestHandlerRegistry tests the handler registry functionality
|
||||||
|
func TestHandlerRegistry(t *testing.T) {
|
||||||
|
t.Run("default registration", func(t *testing.T) {
|
||||||
|
registry := NewHandlerRegistry()
|
||||||
|
|
||||||
|
// Check default handlers are registered
|
||||||
|
getDataHandler, exists := registry.GetHandler(common.GetData)
|
||||||
|
assert.True(t, exists)
|
||||||
|
assert.IsType(t, &GetDataHandler{}, getDataHandler)
|
||||||
|
|
||||||
|
fingerprintHandler, exists := registry.GetHandler(common.CheckFingerprint)
|
||||||
|
assert.True(t, exists)
|
||||||
|
assert.IsType(t, &CheckFingerprintHandler{}, fingerprintHandler)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("custom handler registration", func(t *testing.T) {
|
||||||
|
registry := NewHandlerRegistry()
|
||||||
|
mockHandler := &MockHandler{
|
||||||
|
requiresVerification: true,
|
||||||
|
description: "Test handler",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register a custom handler for a mock action
|
||||||
|
const mockAction common.WebSocketAction = 99
|
||||||
|
registry.Register(mockAction, mockHandler)
|
||||||
|
|
||||||
|
// Verify registration
|
||||||
|
handler, exists := registry.GetHandler(mockAction)
|
||||||
|
assert.True(t, exists)
|
||||||
|
assert.Equal(t, mockHandler, handler)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("unknown action", func(t *testing.T) {
|
||||||
|
registry := NewHandlerRegistry()
|
||||||
|
ctx := &HandlerContext{
|
||||||
|
Request: &common.HubRequest[cbor.RawMessage]{
|
||||||
|
Action: common.WebSocketAction(255), // Unknown action
|
||||||
|
},
|
||||||
|
HubVerified: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
err := registry.Handle(ctx)
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.Contains(t, err.Error(), "unknown action: 255")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("verification required", func(t *testing.T) {
|
||||||
|
registry := NewHandlerRegistry()
|
||||||
|
ctx := &HandlerContext{
|
||||||
|
Request: &common.HubRequest[cbor.RawMessage]{
|
||||||
|
Action: common.GetData, // Requires verification
|
||||||
|
},
|
||||||
|
HubVerified: false, // Not verified
|
||||||
|
}
|
||||||
|
|
||||||
|
err := registry.Handle(ctx)
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.Contains(t, err.Error(), "hub not verified")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestCheckFingerprintHandler tests the CheckFingerprint handler
|
||||||
|
func TestCheckFingerprintHandler(t *testing.T) {
|
||||||
|
handler := &CheckFingerprintHandler{}
|
||||||
|
|
||||||
|
t.Run("handle with invalid data", func(t *testing.T) {
|
||||||
|
client := &WebSocketClient{}
|
||||||
|
ctx := &HandlerContext{
|
||||||
|
Client: client,
|
||||||
|
HubVerified: false,
|
||||||
|
Request: &common.HubRequest[cbor.RawMessage]{
|
||||||
|
Action: common.CheckFingerprint,
|
||||||
|
Data: cbor.RawMessage{}, // Empty/invalid data
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should fail to decode the fingerprint request
|
||||||
|
err := handler.Handle(ctx)
|
||||||
|
assert.Error(t, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
209
agent/network.go
209
agent/network.go
@@ -12,8 +12,6 @@ import (
|
|||||||
psutilNet "github.com/shirou/gopsutil/v4/net"
|
psutilNet "github.com/shirou/gopsutil/v4/net"
|
||||||
)
|
)
|
||||||
|
|
||||||
var netInterfaceDeltaTracker = deltatracker.NewDeltaTracker[string, uint64]()
|
|
||||||
|
|
||||||
// NicConfig controls inclusion/exclusion of network interfaces via the NICS env var
|
// NicConfig controls inclusion/exclusion of network interfaces via the NICS env var
|
||||||
//
|
//
|
||||||
// Behavior mirrors SensorConfig's matching logic:
|
// Behavior mirrors SensorConfig's matching logic:
|
||||||
@@ -77,75 +75,17 @@ func isValidNic(nicName string, cfg *NicConfig) bool {
|
|||||||
return cfg.isBlacklist
|
return cfg.isBlacklist
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Agent) updateNetworkStats(systemStats *system.Stats) {
|
func (a *Agent) updateNetworkStats(cacheTimeMs uint16, systemStats *system.Stats) {
|
||||||
// network stats
|
// network stats
|
||||||
if len(a.netInterfaces) == 0 {
|
a.ensureNetInterfacesInitialized()
|
||||||
// if no network interfaces, initialize again
|
|
||||||
// this is a fix if agent started before network is online (#466)
|
|
||||||
// maybe refactor this in the future to not cache interface names at all so we
|
|
||||||
// don't miss an interface that's been added after agent started in any circumstance
|
|
||||||
a.initializeNetIoStats()
|
|
||||||
}
|
|
||||||
|
|
||||||
if systemStats.NetworkInterfaces == nil {
|
a.ensureNetworkInterfacesMap(systemStats)
|
||||||
systemStats.NetworkInterfaces = make(map[string][4]uint64, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
if netIO, err := psutilNet.IOCounters(true); err == nil {
|
if netIO, err := psutilNet.IOCounters(true); err == nil {
|
||||||
msElapsed := uint64(time.Since(a.netIoStats.Time).Milliseconds())
|
nis, msElapsed := a.loadAndTickNetBaseline(cacheTimeMs)
|
||||||
a.netIoStats.Time = time.Now()
|
totalBytesSent, totalBytesRecv := a.sumAndTrackPerNicDeltas(cacheTimeMs, msElapsed, netIO, systemStats)
|
||||||
totalBytesSent := uint64(0)
|
bytesSentPerSecond, bytesRecvPerSecond := a.computeBytesPerSecond(msElapsed, totalBytesSent, totalBytesRecv, nis)
|
||||||
totalBytesRecv := uint64(0)
|
a.applyNetworkTotals(cacheTimeMs, netIO, systemStats, nis, totalBytesSent, totalBytesRecv, bytesSentPerSecond, bytesRecvPerSecond)
|
||||||
netInterfaceDeltaTracker.Cycle()
|
|
||||||
// sum all bytes sent and received
|
|
||||||
for _, v := range netIO {
|
|
||||||
// skip if not in valid network interfaces list
|
|
||||||
if _, exists := a.netInterfaces[v.Name]; !exists {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
totalBytesSent += v.BytesSent
|
|
||||||
totalBytesRecv += v.BytesRecv
|
|
||||||
|
|
||||||
// track deltas for each network interface
|
|
||||||
var upDelta, downDelta uint64
|
|
||||||
upKey, downKey := fmt.Sprintf("%sup", v.Name), fmt.Sprintf("%sdown", v.Name)
|
|
||||||
netInterfaceDeltaTracker.Set(upKey, v.BytesSent)
|
|
||||||
netInterfaceDeltaTracker.Set(downKey, v.BytesRecv)
|
|
||||||
if msElapsed > 0 {
|
|
||||||
upDelta = netInterfaceDeltaTracker.Delta(upKey) * 1000 / msElapsed
|
|
||||||
downDelta = netInterfaceDeltaTracker.Delta(downKey) * 1000 / msElapsed
|
|
||||||
}
|
|
||||||
// add interface to systemStats
|
|
||||||
systemStats.NetworkInterfaces[v.Name] = [4]uint64{upDelta, downDelta, v.BytesSent, v.BytesRecv}
|
|
||||||
}
|
|
||||||
|
|
||||||
// add to systemStats
|
|
||||||
var bytesSentPerSecond, bytesRecvPerSecond uint64
|
|
||||||
if msElapsed > 0 {
|
|
||||||
bytesSentPerSecond = (totalBytesSent - a.netIoStats.BytesSent) * 1000 / msElapsed
|
|
||||||
bytesRecvPerSecond = (totalBytesRecv - a.netIoStats.BytesRecv) * 1000 / msElapsed
|
|
||||||
}
|
|
||||||
networkSentPs := bytesToMegabytes(float64(bytesSentPerSecond))
|
|
||||||
networkRecvPs := bytesToMegabytes(float64(bytesRecvPerSecond))
|
|
||||||
// add check for issue (#150) where sent is a massive number
|
|
||||||
if networkSentPs > 10_000 || networkRecvPs > 10_000 {
|
|
||||||
slog.Warn("Invalid net stats. Resetting.", "sent", networkSentPs, "recv", networkRecvPs)
|
|
||||||
for _, v := range netIO {
|
|
||||||
if _, exists := a.netInterfaces[v.Name]; !exists {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
slog.Info(v.Name, "recv", v.BytesRecv, "sent", v.BytesSent)
|
|
||||||
}
|
|
||||||
// reset network I/O stats
|
|
||||||
a.initializeNetIoStats()
|
|
||||||
} else {
|
|
||||||
systemStats.NetworkSent = networkSentPs
|
|
||||||
systemStats.NetworkRecv = networkRecvPs
|
|
||||||
systemStats.Bandwidth[0], systemStats.Bandwidth[1] = bytesSentPerSecond, bytesRecvPerSecond
|
|
||||||
// update netIoStats
|
|
||||||
a.netIoStats.BytesSent = totalBytesSent
|
|
||||||
a.netIoStats.BytesRecv = totalBytesRecv
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -160,13 +100,8 @@ func (a *Agent) initializeNetIoStats() {
|
|||||||
nicCfg = newNicConfig(nicsEnvVal)
|
nicCfg = newNicConfig(nicsEnvVal)
|
||||||
}
|
}
|
||||||
|
|
||||||
// reset network I/O stats
|
// get current network I/O stats and record valid interfaces
|
||||||
a.netIoStats.BytesSent = 0
|
|
||||||
a.netIoStats.BytesRecv = 0
|
|
||||||
|
|
||||||
// get intial network I/O stats
|
|
||||||
if netIO, err := psutilNet.IOCounters(true); err == nil {
|
if netIO, err := psutilNet.IOCounters(true); err == nil {
|
||||||
a.netIoStats.Time = time.Now()
|
|
||||||
for _, v := range netIO {
|
for _, v := range netIO {
|
||||||
if nicsEnvExists && !isValidNic(v.Name, nicCfg) {
|
if nicsEnvExists && !isValidNic(v.Name, nicCfg) {
|
||||||
continue
|
continue
|
||||||
@@ -175,12 +110,136 @@ func (a *Agent) initializeNetIoStats() {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
slog.Info("Detected network interface", "name", v.Name, "sent", v.BytesSent, "recv", v.BytesRecv)
|
slog.Info("Detected network interface", "name", v.Name, "sent", v.BytesSent, "recv", v.BytesRecv)
|
||||||
a.netIoStats.BytesSent += v.BytesSent
|
|
||||||
a.netIoStats.BytesRecv += v.BytesRecv
|
|
||||||
// store as a valid network interface
|
// store as a valid network interface
|
||||||
a.netInterfaces[v.Name] = struct{}{}
|
a.netInterfaces[v.Name] = struct{}{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Reset per-cache-time trackers and baselines so they will reinitialize on next use
|
||||||
|
a.netInterfaceDeltaTrackers = make(map[uint16]*deltatracker.DeltaTracker[string, uint64])
|
||||||
|
a.netIoStats = make(map[uint16]system.NetIoStats)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ensureNetInterfacesInitialized re-initializes NICs if none are currently tracked
|
||||||
|
func (a *Agent) ensureNetInterfacesInitialized() {
|
||||||
|
if len(a.netInterfaces) == 0 {
|
||||||
|
// if no network interfaces, initialize again
|
||||||
|
// this is a fix if agent started before network is online (#466)
|
||||||
|
// maybe refactor this in the future to not cache interface names at all so we
|
||||||
|
// don't miss an interface that's been added after agent started in any circumstance
|
||||||
|
a.initializeNetIoStats()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ensureNetworkInterfacesMap ensures systemStats.NetworkInterfaces map exists
|
||||||
|
func (a *Agent) ensureNetworkInterfacesMap(systemStats *system.Stats) {
|
||||||
|
if systemStats.NetworkInterfaces == nil {
|
||||||
|
systemStats.NetworkInterfaces = make(map[string][4]uint64, 0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadAndTickNetBaseline returns the NetIoStats baseline and milliseconds elapsed, updating time
|
||||||
|
func (a *Agent) loadAndTickNetBaseline(cacheTimeMs uint16) (netIoStat system.NetIoStats, msElapsed uint64) {
|
||||||
|
netIoStat = a.netIoStats[cacheTimeMs]
|
||||||
|
if netIoStat.Time.IsZero() {
|
||||||
|
netIoStat.Time = time.Now()
|
||||||
|
msElapsed = 0
|
||||||
|
} else {
|
||||||
|
msElapsed = uint64(time.Since(netIoStat.Time).Milliseconds())
|
||||||
|
netIoStat.Time = time.Now()
|
||||||
|
}
|
||||||
|
return netIoStat, msElapsed
|
||||||
|
}
|
||||||
|
|
||||||
|
// sumAndTrackPerNicDeltas accumulates totals and records per-NIC up/down deltas into systemStats
|
||||||
|
func (a *Agent) sumAndTrackPerNicDeltas(cacheTimeMs uint16, msElapsed uint64, netIO []psutilNet.IOCountersStat, systemStats *system.Stats) (totalBytesSent, totalBytesRecv uint64) {
|
||||||
|
tracker := a.netInterfaceDeltaTrackers[cacheTimeMs]
|
||||||
|
if tracker == nil {
|
||||||
|
tracker = deltatracker.NewDeltaTracker[string, uint64]()
|
||||||
|
a.netInterfaceDeltaTrackers[cacheTimeMs] = tracker
|
||||||
|
}
|
||||||
|
tracker.Cycle()
|
||||||
|
|
||||||
|
for _, v := range netIO {
|
||||||
|
if _, exists := a.netInterfaces[v.Name]; !exists {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
totalBytesSent += v.BytesSent
|
||||||
|
totalBytesRecv += v.BytesRecv
|
||||||
|
|
||||||
|
var upDelta, downDelta uint64
|
||||||
|
upKey, downKey := fmt.Sprintf("%sup", v.Name), fmt.Sprintf("%sdown", v.Name)
|
||||||
|
tracker.Set(upKey, v.BytesSent)
|
||||||
|
tracker.Set(downKey, v.BytesRecv)
|
||||||
|
if msElapsed > 0 {
|
||||||
|
if prevVal, ok := tracker.Previous(upKey); ok {
|
||||||
|
var deltaBytes uint64
|
||||||
|
if v.BytesSent >= prevVal {
|
||||||
|
deltaBytes = v.BytesSent - prevVal
|
||||||
|
} else {
|
||||||
|
deltaBytes = v.BytesSent
|
||||||
|
}
|
||||||
|
upDelta = deltaBytes * 1000 / msElapsed
|
||||||
|
}
|
||||||
|
if prevVal, ok := tracker.Previous(downKey); ok {
|
||||||
|
var deltaBytes uint64
|
||||||
|
if v.BytesRecv >= prevVal {
|
||||||
|
deltaBytes = v.BytesRecv - prevVal
|
||||||
|
} else {
|
||||||
|
deltaBytes = v.BytesRecv
|
||||||
|
}
|
||||||
|
downDelta = deltaBytes * 1000 / msElapsed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
systemStats.NetworkInterfaces[v.Name] = [4]uint64{upDelta, downDelta, v.BytesSent, v.BytesRecv}
|
||||||
|
}
|
||||||
|
|
||||||
|
return totalBytesSent, totalBytesRecv
|
||||||
|
}
|
||||||
|
|
||||||
|
// computeBytesPerSecond calculates per-second totals from elapsed time and totals
|
||||||
|
func (a *Agent) computeBytesPerSecond(msElapsed, totalBytesSent, totalBytesRecv uint64, nis system.NetIoStats) (bytesSentPerSecond, bytesRecvPerSecond uint64) {
|
||||||
|
if msElapsed > 0 {
|
||||||
|
bytesSentPerSecond = (totalBytesSent - nis.BytesSent) * 1000 / msElapsed
|
||||||
|
bytesRecvPerSecond = (totalBytesRecv - nis.BytesRecv) * 1000 / msElapsed
|
||||||
|
}
|
||||||
|
return bytesSentPerSecond, bytesRecvPerSecond
|
||||||
|
}
|
||||||
|
|
||||||
|
// applyNetworkTotals validates and writes computed network stats, or resets on anomaly
|
||||||
|
func (a *Agent) applyNetworkTotals(
|
||||||
|
cacheTimeMs uint16,
|
||||||
|
netIO []psutilNet.IOCountersStat,
|
||||||
|
systemStats *system.Stats,
|
||||||
|
nis system.NetIoStats,
|
||||||
|
totalBytesSent, totalBytesRecv uint64,
|
||||||
|
bytesSentPerSecond, bytesRecvPerSecond uint64,
|
||||||
|
) {
|
||||||
|
networkSentPs := bytesToMegabytes(float64(bytesSentPerSecond))
|
||||||
|
networkRecvPs := bytesToMegabytes(float64(bytesRecvPerSecond))
|
||||||
|
if networkSentPs > 10_000 || networkRecvPs > 10_000 {
|
||||||
|
slog.Warn("Invalid net stats. Resetting.", "sent", networkSentPs, "recv", networkRecvPs)
|
||||||
|
for _, v := range netIO {
|
||||||
|
if _, exists := a.netInterfaces[v.Name]; !exists {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
slog.Info(v.Name, "recv", v.BytesRecv, "sent", v.BytesSent)
|
||||||
|
}
|
||||||
|
a.initializeNetIoStats()
|
||||||
|
delete(a.netIoStats, cacheTimeMs)
|
||||||
|
delete(a.netInterfaceDeltaTrackers, cacheTimeMs)
|
||||||
|
systemStats.NetworkSent = 0
|
||||||
|
systemStats.NetworkRecv = 0
|
||||||
|
systemStats.Bandwidth[0], systemStats.Bandwidth[1] = 0, 0
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
systemStats.NetworkSent = networkSentPs
|
||||||
|
systemStats.NetworkRecv = networkRecvPs
|
||||||
|
systemStats.Bandwidth[0], systemStats.Bandwidth[1] = bytesSentPerSecond, bytesRecvPerSecond
|
||||||
|
nis.BytesSent = totalBytesSent
|
||||||
|
nis.BytesRecv = totalBytesRecv
|
||||||
|
a.netIoStats[cacheTimeMs] = nis
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Agent) skipNetworkInterface(v psutilNet.IOCountersStat) bool {
|
func (a *Agent) skipNetworkInterface(v psutilNet.IOCountersStat) bool {
|
||||||
|
|||||||
@@ -4,7 +4,11 @@ package agent
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/agent/deltatracker"
|
||||||
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
psutilNet "github.com/shirou/gopsutil/v4/net"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
@@ -257,3 +261,242 @@ func TestNewNicConfig(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
func TestEnsureNetworkInterfacesMap(t *testing.T) {
|
||||||
|
var a Agent
|
||||||
|
var stats system.Stats
|
||||||
|
|
||||||
|
// Initially nil
|
||||||
|
assert.Nil(t, stats.NetworkInterfaces)
|
||||||
|
// Ensure map is created
|
||||||
|
a.ensureNetworkInterfacesMap(&stats)
|
||||||
|
assert.NotNil(t, stats.NetworkInterfaces)
|
||||||
|
// Idempotent
|
||||||
|
a.ensureNetworkInterfacesMap(&stats)
|
||||||
|
assert.NotNil(t, stats.NetworkInterfaces)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLoadAndTickNetBaseline(t *testing.T) {
|
||||||
|
a := &Agent{netIoStats: make(map[uint16]system.NetIoStats)}
|
||||||
|
|
||||||
|
// First call initializes time and returns 0 elapsed
|
||||||
|
ni, elapsed := a.loadAndTickNetBaseline(100)
|
||||||
|
assert.Equal(t, uint64(0), elapsed)
|
||||||
|
assert.False(t, ni.Time.IsZero())
|
||||||
|
|
||||||
|
// Store back what loadAndTick returns to mimic updateNetworkStats behavior
|
||||||
|
a.netIoStats[100] = ni
|
||||||
|
|
||||||
|
time.Sleep(2 * time.Millisecond)
|
||||||
|
|
||||||
|
// Next call should produce >= 0 elapsed and update time
|
||||||
|
ni2, elapsed2 := a.loadAndTickNetBaseline(100)
|
||||||
|
assert.True(t, elapsed2 > 0)
|
||||||
|
assert.False(t, ni2.Time.IsZero())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestComputeBytesPerSecond(t *testing.T) {
|
||||||
|
a := &Agent{}
|
||||||
|
|
||||||
|
// No elapsed -> zero rate
|
||||||
|
bytesUp, bytesDown := a.computeBytesPerSecond(0, 2000, 3000, system.NetIoStats{BytesSent: 1000, BytesRecv: 1000})
|
||||||
|
assert.Equal(t, uint64(0), bytesUp)
|
||||||
|
assert.Equal(t, uint64(0), bytesDown)
|
||||||
|
|
||||||
|
// With elapsed -> per-second calculation
|
||||||
|
bytesUp, bytesDown = a.computeBytesPerSecond(500, 6000, 11000, system.NetIoStats{BytesSent: 1000, BytesRecv: 1000})
|
||||||
|
// (6000-1000)*1000/500 = 10000; (11000-1000)*1000/500 = 20000
|
||||||
|
assert.Equal(t, uint64(10000), bytesUp)
|
||||||
|
assert.Equal(t, uint64(20000), bytesDown)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSumAndTrackPerNicDeltas(t *testing.T) {
|
||||||
|
a := &Agent{
|
||||||
|
netInterfaces: map[string]struct{}{"eth0": {}, "wlan0": {}},
|
||||||
|
netInterfaceDeltaTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Two samples for same cache interval to verify delta behavior
|
||||||
|
cache := uint16(42)
|
||||||
|
net1 := []psutilNet.IOCountersStat{{Name: "eth0", BytesSent: 1000, BytesRecv: 2000}}
|
||||||
|
stats1 := &system.Stats{}
|
||||||
|
a.ensureNetworkInterfacesMap(stats1)
|
||||||
|
tx1, rx1 := a.sumAndTrackPerNicDeltas(cache, 0, net1, stats1)
|
||||||
|
assert.Equal(t, uint64(1000), tx1)
|
||||||
|
assert.Equal(t, uint64(2000), rx1)
|
||||||
|
|
||||||
|
// Second cycle with elapsed, larger counters -> deltas computed inside
|
||||||
|
net2 := []psutilNet.IOCountersStat{{Name: "eth0", BytesSent: 4000, BytesRecv: 9000}}
|
||||||
|
stats := &system.Stats{}
|
||||||
|
a.ensureNetworkInterfacesMap(stats)
|
||||||
|
tx2, rx2 := a.sumAndTrackPerNicDeltas(cache, 1000, net2, stats)
|
||||||
|
assert.Equal(t, uint64(4000), tx2)
|
||||||
|
assert.Equal(t, uint64(9000), rx2)
|
||||||
|
// Up/Down deltas per second should be (4000-1000)/1s = 3000 and (9000-2000)/1s = 7000
|
||||||
|
ni, ok := stats.NetworkInterfaces["eth0"]
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, uint64(3000), ni[0])
|
||||||
|
assert.Equal(t, uint64(7000), ni[1])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSumAndTrackPerNicDeltasHandlesCounterReset(t *testing.T) {
|
||||||
|
a := &Agent{
|
||||||
|
netInterfaces: map[string]struct{}{"eth0": {}},
|
||||||
|
netInterfaceDeltaTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
}
|
||||||
|
|
||||||
|
cache := uint16(77)
|
||||||
|
|
||||||
|
// First interval establishes baseline values
|
||||||
|
initial := []psutilNet.IOCountersStat{{Name: "eth0", BytesSent: 4_000, BytesRecv: 6_000}}
|
||||||
|
statsInitial := &system.Stats{}
|
||||||
|
a.ensureNetworkInterfacesMap(statsInitial)
|
||||||
|
_, _ = a.sumAndTrackPerNicDeltas(cache, 0, initial, statsInitial)
|
||||||
|
|
||||||
|
// Second interval increments counters normally so previous snapshot gets populated
|
||||||
|
increment := []psutilNet.IOCountersStat{{Name: "eth0", BytesSent: 9_000, BytesRecv: 11_000}}
|
||||||
|
statsIncrement := &system.Stats{}
|
||||||
|
a.ensureNetworkInterfacesMap(statsIncrement)
|
||||||
|
_, _ = a.sumAndTrackPerNicDeltas(cache, 1_000, increment, statsIncrement)
|
||||||
|
|
||||||
|
niIncrement, ok := statsIncrement.NetworkInterfaces["eth0"]
|
||||||
|
require.True(t, ok)
|
||||||
|
assert.Equal(t, uint64(5_000), niIncrement[0])
|
||||||
|
assert.Equal(t, uint64(5_000), niIncrement[1])
|
||||||
|
|
||||||
|
// Third interval simulates counter reset (values drop below previous totals)
|
||||||
|
reset := []psutilNet.IOCountersStat{{Name: "eth0", BytesSent: 1_200, BytesRecv: 1_500}}
|
||||||
|
statsReset := &system.Stats{}
|
||||||
|
a.ensureNetworkInterfacesMap(statsReset)
|
||||||
|
_, _ = a.sumAndTrackPerNicDeltas(cache, 1_000, reset, statsReset)
|
||||||
|
|
||||||
|
niReset, ok := statsReset.NetworkInterfaces["eth0"]
|
||||||
|
require.True(t, ok)
|
||||||
|
assert.Equal(t, uint64(1_200), niReset[0], "upload delta should match new counter value after reset")
|
||||||
|
assert.Equal(t, uint64(1_500), niReset[1], "download delta should match new counter value after reset")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestApplyNetworkTotals(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
bytesSentPerSecond uint64
|
||||||
|
bytesRecvPerSecond uint64
|
||||||
|
totalBytesSent uint64
|
||||||
|
totalBytesRecv uint64
|
||||||
|
expectReset bool
|
||||||
|
expectedNetworkSent float64
|
||||||
|
expectedNetworkRecv float64
|
||||||
|
expectedBandwidthSent uint64
|
||||||
|
expectedBandwidthRecv uint64
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "Valid network stats - normal values",
|
||||||
|
bytesSentPerSecond: 1000000, // 1 MB/s
|
||||||
|
bytesRecvPerSecond: 2000000, // 2 MB/s
|
||||||
|
totalBytesSent: 10000000,
|
||||||
|
totalBytesRecv: 20000000,
|
||||||
|
expectReset: false,
|
||||||
|
expectedNetworkSent: 0.95, // ~1 MB/s rounded to 2 decimals
|
||||||
|
expectedNetworkRecv: 1.91, // ~2 MB/s rounded to 2 decimals
|
||||||
|
expectedBandwidthSent: 1000000,
|
||||||
|
expectedBandwidthRecv: 2000000,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Invalid network stats - sent exceeds threshold",
|
||||||
|
bytesSentPerSecond: 11000000000, // ~10.5 GB/s > 10 GB/s threshold
|
||||||
|
bytesRecvPerSecond: 1000000, // 1 MB/s
|
||||||
|
totalBytesSent: 10000000,
|
||||||
|
totalBytesRecv: 20000000,
|
||||||
|
expectReset: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Invalid network stats - recv exceeds threshold",
|
||||||
|
bytesSentPerSecond: 1000000, // 1 MB/s
|
||||||
|
bytesRecvPerSecond: 11000000000, // ~10.5 GB/s > 10 GB/s threshold
|
||||||
|
totalBytesSent: 10000000,
|
||||||
|
totalBytesRecv: 20000000,
|
||||||
|
expectReset: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Invalid network stats - both exceed threshold",
|
||||||
|
bytesSentPerSecond: 12000000000, // ~11.4 GB/s
|
||||||
|
bytesRecvPerSecond: 13000000000, // ~12.4 GB/s
|
||||||
|
totalBytesSent: 10000000,
|
||||||
|
totalBytesRecv: 20000000,
|
||||||
|
expectReset: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Valid network stats - at threshold boundary",
|
||||||
|
bytesSentPerSecond: 10485750000, // ~9999.99 MB/s (rounds to 9999.99)
|
||||||
|
bytesRecvPerSecond: 10485750000, // ~9999.99 MB/s (rounds to 9999.99)
|
||||||
|
totalBytesSent: 10000000,
|
||||||
|
totalBytesRecv: 20000000,
|
||||||
|
expectReset: false,
|
||||||
|
expectedNetworkSent: 9999.99,
|
||||||
|
expectedNetworkRecv: 9999.99,
|
||||||
|
expectedBandwidthSent: 10485750000,
|
||||||
|
expectedBandwidthRecv: 10485750000,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Zero values",
|
||||||
|
bytesSentPerSecond: 0,
|
||||||
|
bytesRecvPerSecond: 0,
|
||||||
|
totalBytesSent: 0,
|
||||||
|
totalBytesRecv: 0,
|
||||||
|
expectReset: false,
|
||||||
|
expectedNetworkSent: 0.0,
|
||||||
|
expectedNetworkRecv: 0.0,
|
||||||
|
expectedBandwidthSent: 0,
|
||||||
|
expectedBandwidthRecv: 0,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
// Setup agent with initialized maps
|
||||||
|
a := &Agent{
|
||||||
|
netInterfaces: make(map[string]struct{}),
|
||||||
|
netIoStats: make(map[uint16]system.NetIoStats),
|
||||||
|
netInterfaceDeltaTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
}
|
||||||
|
|
||||||
|
cacheTimeMs := uint16(100)
|
||||||
|
netIO := []psutilNet.IOCountersStat{
|
||||||
|
{Name: "eth0", BytesSent: 1000, BytesRecv: 2000},
|
||||||
|
}
|
||||||
|
systemStats := &system.Stats{}
|
||||||
|
nis := system.NetIoStats{}
|
||||||
|
|
||||||
|
a.applyNetworkTotals(
|
||||||
|
cacheTimeMs,
|
||||||
|
netIO,
|
||||||
|
systemStats,
|
||||||
|
nis,
|
||||||
|
tt.totalBytesSent,
|
||||||
|
tt.totalBytesRecv,
|
||||||
|
tt.bytesSentPerSecond,
|
||||||
|
tt.bytesRecvPerSecond,
|
||||||
|
)
|
||||||
|
|
||||||
|
if tt.expectReset {
|
||||||
|
// Should have reset network tracking state - maps cleared and stats zeroed
|
||||||
|
assert.NotContains(t, a.netIoStats, cacheTimeMs, "cache entry should be cleared after reset")
|
||||||
|
assert.NotContains(t, a.netInterfaceDeltaTrackers, cacheTimeMs, "tracker should be cleared on reset")
|
||||||
|
assert.Zero(t, systemStats.NetworkSent)
|
||||||
|
assert.Zero(t, systemStats.NetworkRecv)
|
||||||
|
assert.Zero(t, systemStats.Bandwidth[0])
|
||||||
|
assert.Zero(t, systemStats.Bandwidth[1])
|
||||||
|
} else {
|
||||||
|
// Should have applied stats
|
||||||
|
assert.Equal(t, tt.expectedNetworkSent, systemStats.NetworkSent)
|
||||||
|
assert.Equal(t, tt.expectedNetworkRecv, systemStats.NetworkRecv)
|
||||||
|
assert.Equal(t, tt.expectedBandwidthSent, systemStats.Bandwidth[0])
|
||||||
|
assert.Equal(t, tt.expectedBandwidthRecv, systemStats.Bandwidth[1])
|
||||||
|
|
||||||
|
// Should have updated NetIoStats
|
||||||
|
updatedNis := a.netIoStats[cacheTimeMs]
|
||||||
|
assert.Equal(t, tt.totalBytesSent, updatedNis.BytesSent)
|
||||||
|
assert.Equal(t, tt.totalBytesRecv, updatedNis.BytesRecv)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -13,7 +13,9 @@ import (
|
|||||||
|
|
||||||
"github.com/henrygd/beszel"
|
"github.com/henrygd/beszel"
|
||||||
"github.com/henrygd/beszel/internal/common"
|
"github.com/henrygd/beszel/internal/common"
|
||||||
|
"github.com/henrygd/beszel/internal/entities/smart"
|
||||||
"github.com/henrygd/beszel/internal/entities/system"
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
"github.com/henrygd/beszel/internal/entities/systemd"
|
||||||
|
|
||||||
"github.com/blang/semver"
|
"github.com/blang/semver"
|
||||||
"github.com/fxamacker/cbor/v2"
|
"github.com/fxamacker/cbor/v2"
|
||||||
@@ -127,15 +129,81 @@ func (a *Agent) handleSession(s ssh.Session) {
|
|||||||
|
|
||||||
hubVersion := a.getHubVersion(sessionID, sessionCtx)
|
hubVersion := a.getHubVersion(sessionID, sessionCtx)
|
||||||
|
|
||||||
stats := a.gatherStats(sessionID)
|
// Legacy one-shot behavior for older hubs
|
||||||
|
if hubVersion.LT(beszel.MinVersionAgentResponse) {
|
||||||
err := a.writeToSession(s, stats, hubVersion)
|
if err := a.handleLegacyStats(s, hubVersion); err != nil {
|
||||||
if err != nil {
|
slog.Error("Error encoding stats", "err", err)
|
||||||
slog.Error("Error encoding stats", "err", err, "stats", stats)
|
s.Exit(1)
|
||||||
s.Exit(1)
|
return
|
||||||
} else {
|
}
|
||||||
s.Exit(0)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var req common.HubRequest[cbor.RawMessage]
|
||||||
|
if err := cbor.NewDecoder(s).Decode(&req); err != nil {
|
||||||
|
// Fallback to legacy one-shot if the first decode fails
|
||||||
|
if err2 := a.handleLegacyStats(s, hubVersion); err2 != nil {
|
||||||
|
slog.Error("Error encoding stats (fallback)", "err", err2)
|
||||||
|
s.Exit(1)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.Exit(0)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := a.handleSSHRequest(s, &req); err != nil {
|
||||||
|
slog.Error("SSH request handling failed", "err", err)
|
||||||
|
s.Exit(1)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.Exit(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleSSHRequest builds a handler context and dispatches to the shared registry
|
||||||
|
func (a *Agent) handleSSHRequest(w io.Writer, req *common.HubRequest[cbor.RawMessage]) error {
|
||||||
|
// SSH does not support fingerprint auth action
|
||||||
|
if req.Action == common.CheckFingerprint {
|
||||||
|
return cbor.NewEncoder(w).Encode(common.AgentResponse{Error: "unsupported action"})
|
||||||
|
}
|
||||||
|
|
||||||
|
// responder that writes AgentResponse to stdout
|
||||||
|
sshResponder := func(data any, requestID *uint32) error {
|
||||||
|
response := common.AgentResponse{Id: requestID}
|
||||||
|
switch v := data.(type) {
|
||||||
|
case *system.CombinedData:
|
||||||
|
response.SystemData = v
|
||||||
|
case string:
|
||||||
|
response.String = &v
|
||||||
|
case map[string]smart.SmartData:
|
||||||
|
response.SmartData = v
|
||||||
|
case systemd.ServiceDetails:
|
||||||
|
response.ServiceInfo = v
|
||||||
|
default:
|
||||||
|
response.Error = fmt.Sprintf("unsupported response type: %T", data)
|
||||||
|
}
|
||||||
|
return cbor.NewEncoder(w).Encode(response)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := &HandlerContext{
|
||||||
|
Client: nil,
|
||||||
|
Agent: a,
|
||||||
|
Request: req,
|
||||||
|
RequestID: nil,
|
||||||
|
HubVerified: true,
|
||||||
|
SendResponse: sshResponder,
|
||||||
|
}
|
||||||
|
|
||||||
|
if handler, ok := a.handlerRegistry.GetHandler(req.Action); ok {
|
||||||
|
if err := handler.Handle(ctx); err != nil {
|
||||||
|
return cbor.NewEncoder(w).Encode(common.AgentResponse{Error: err.Error()})
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return cbor.NewEncoder(w).Encode(common.AgentResponse{Error: fmt.Sprintf("unknown action: %d", req.Action)})
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleLegacyStats serves the legacy one-shot stats payload for older hubs
|
||||||
|
func (a *Agent) handleLegacyStats(w io.Writer, hubVersion semver.Version) error {
|
||||||
|
stats := a.gatherStats(60_000)
|
||||||
|
return a.writeToSession(w, stats, hubVersion)
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeToSession encodes and writes system statistics to the session.
|
// writeToSession encodes and writes system statistics to the session.
|
||||||
|
|||||||
974
agent/smart.go
Normal file
974
agent/smart.go
Normal file
@@ -0,0 +1,974 @@
|
|||||||
|
//go:generate -command fetchsmartctl go run ./tools/fetchsmartctl
|
||||||
|
//go:generate fetchsmartctl -out ./smartmontools/smartctl.exe -url https://static.beszel.dev/bin/smartctl/smartctl-nc.exe -sha 3912249c3b329249aa512ce796fd1b64d7cbd8378b68ad2756b39163d9c30b47
|
||||||
|
|
||||||
|
package agent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"runtime"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/entities/smart"
|
||||||
|
|
||||||
|
"golang.org/x/exp/slog"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SmartManager manages data collection for SMART devices
|
||||||
|
type SmartManager struct {
|
||||||
|
sync.Mutex
|
||||||
|
SmartDataMap map[string]*smart.SmartData
|
||||||
|
SmartDevices []*DeviceInfo
|
||||||
|
refreshMutex sync.Mutex
|
||||||
|
lastScanTime time.Time
|
||||||
|
binPath string
|
||||||
|
excludedDevices map[string]struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
type scanOutput struct {
|
||||||
|
Devices []struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
InfoName string `json:"info_name"`
|
||||||
|
Protocol string `json:"protocol"`
|
||||||
|
} `json:"devices"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DeviceInfo struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
InfoName string `json:"info_name"`
|
||||||
|
Protocol string `json:"protocol"`
|
||||||
|
// typeVerified reports whether we have already parsed SMART data for this device
|
||||||
|
// with the stored parserType. When true we can skip re-running the detection logic.
|
||||||
|
typeVerified bool
|
||||||
|
// parserType holds the parser type (nvme, sat, scsi) that last succeeded.
|
||||||
|
parserType string
|
||||||
|
}
|
||||||
|
|
||||||
|
var errNoValidSmartData = fmt.Errorf("no valid SMART data found") // Error for missing data
|
||||||
|
|
||||||
|
// Refresh updates SMART data for all known devices
|
||||||
|
func (sm *SmartManager) Refresh(forceScan bool) error {
|
||||||
|
sm.refreshMutex.Lock()
|
||||||
|
defer sm.refreshMutex.Unlock()
|
||||||
|
|
||||||
|
scanErr := sm.ScanDevices(false)
|
||||||
|
if scanErr != nil {
|
||||||
|
slog.Debug("smartctl scan failed", "err", scanErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
devices := sm.devicesSnapshot()
|
||||||
|
var collectErr error
|
||||||
|
for _, deviceInfo := range devices {
|
||||||
|
if deviceInfo == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := sm.CollectSmart(deviceInfo); err != nil {
|
||||||
|
slog.Debug("smartctl collect failed", "device", deviceInfo.Name, "err", err)
|
||||||
|
collectErr = err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return sm.resolveRefreshError(scanErr, collectErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// devicesSnapshot returns a copy of the current device slice to avoid iterating
|
||||||
|
// while holding the primary mutex for longer than necessary.
|
||||||
|
func (sm *SmartManager) devicesSnapshot() []*DeviceInfo {
|
||||||
|
sm.Lock()
|
||||||
|
defer sm.Unlock()
|
||||||
|
|
||||||
|
devices := make([]*DeviceInfo, len(sm.SmartDevices))
|
||||||
|
copy(devices, sm.SmartDevices)
|
||||||
|
return devices
|
||||||
|
}
|
||||||
|
|
||||||
|
// hasSmartData reports whether any SMART data has been collected.
|
||||||
|
// func (sm *SmartManager) hasSmartData() bool {
|
||||||
|
// sm.Lock()
|
||||||
|
// defer sm.Unlock()
|
||||||
|
|
||||||
|
// return len(sm.SmartDataMap) > 0
|
||||||
|
// }
|
||||||
|
|
||||||
|
// resolveRefreshError determines the proper error to return after a refresh.
|
||||||
|
func (sm *SmartManager) resolveRefreshError(scanErr, collectErr error) error {
|
||||||
|
sm.Lock()
|
||||||
|
noDevices := len(sm.SmartDevices) == 0
|
||||||
|
noData := len(sm.SmartDataMap) == 0
|
||||||
|
sm.Unlock()
|
||||||
|
|
||||||
|
if noDevices {
|
||||||
|
if scanErr != nil {
|
||||||
|
return scanErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !noData {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if collectErr != nil {
|
||||||
|
return collectErr
|
||||||
|
}
|
||||||
|
if scanErr != nil {
|
||||||
|
return scanErr
|
||||||
|
}
|
||||||
|
return errNoValidSmartData
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCurrentData returns the current SMART data
|
||||||
|
func (sm *SmartManager) GetCurrentData() map[string]smart.SmartData {
|
||||||
|
sm.Lock()
|
||||||
|
defer sm.Unlock()
|
||||||
|
result := make(map[string]smart.SmartData, len(sm.SmartDataMap))
|
||||||
|
for key, value := range sm.SmartDataMap {
|
||||||
|
if value != nil {
|
||||||
|
result[key] = *value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScanDevices scans for SMART devices
|
||||||
|
// Scan devices using `smartctl --scan -j`
|
||||||
|
// If scan fails, return error
|
||||||
|
// If scan succeeds, parse the output and update the SmartDevices slice
|
||||||
|
func (sm *SmartManager) ScanDevices(force bool) error {
|
||||||
|
if !force && time.Since(sm.lastScanTime) < 30*time.Minute {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
sm.lastScanTime = time.Now()
|
||||||
|
currentDevices := sm.devicesSnapshot()
|
||||||
|
|
||||||
|
var configuredDevices []*DeviceInfo
|
||||||
|
if configuredRaw, ok := GetEnv("SMART_DEVICES"); ok {
|
||||||
|
slog.Info("SMART_DEVICES", "value", configuredRaw)
|
||||||
|
config := strings.TrimSpace(configuredRaw)
|
||||||
|
if config == "" {
|
||||||
|
return errNoValidSmartData
|
||||||
|
}
|
||||||
|
|
||||||
|
parsedDevices, err := sm.parseConfiguredDevices(config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
configuredDevices = parsedDevices
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
cmd := exec.CommandContext(ctx, sm.binPath, "--scan", "-j")
|
||||||
|
output, err := cmd.Output()
|
||||||
|
|
||||||
|
var (
|
||||||
|
scanErr error
|
||||||
|
scannedDevices []*DeviceInfo
|
||||||
|
hasValidScan bool
|
||||||
|
)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
scanErr = err
|
||||||
|
} else {
|
||||||
|
scannedDevices, hasValidScan = sm.parseScan(output)
|
||||||
|
if !hasValidScan {
|
||||||
|
scanErr = errNoValidSmartData
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
finalDevices := mergeDeviceLists(currentDevices, scannedDevices, configuredDevices)
|
||||||
|
finalDevices = sm.filterExcludedDevices(finalDevices)
|
||||||
|
sm.updateSmartDevices(finalDevices)
|
||||||
|
|
||||||
|
if len(finalDevices) == 0 {
|
||||||
|
if scanErr != nil {
|
||||||
|
slog.Debug("smartctl scan failed", "err", scanErr)
|
||||||
|
return scanErr
|
||||||
|
}
|
||||||
|
return errNoValidSmartData
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sm *SmartManager) parseConfiguredDevices(config string) ([]*DeviceInfo, error) {
|
||||||
|
entries := strings.Split(config, ",")
|
||||||
|
devices := make([]*DeviceInfo, 0, len(entries))
|
||||||
|
for _, entry := range entries {
|
||||||
|
entry = strings.TrimSpace(entry)
|
||||||
|
if entry == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
parts := strings.SplitN(entry, ":", 2)
|
||||||
|
|
||||||
|
name := strings.TrimSpace(parts[0])
|
||||||
|
if name == "" {
|
||||||
|
return nil, fmt.Errorf("invalid SMART_DEVICES entry %q", entry)
|
||||||
|
}
|
||||||
|
|
||||||
|
devType := ""
|
||||||
|
if len(parts) == 2 {
|
||||||
|
devType = strings.ToLower(strings.TrimSpace(parts[1]))
|
||||||
|
}
|
||||||
|
|
||||||
|
devices = append(devices, &DeviceInfo{
|
||||||
|
Name: name,
|
||||||
|
Type: devType,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(devices) == 0 {
|
||||||
|
return nil, errNoValidSmartData
|
||||||
|
}
|
||||||
|
|
||||||
|
return devices, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sm *SmartManager) refreshExcludedDevices() {
|
||||||
|
rawValue, _ := GetEnv("EXCLUDE_SMART")
|
||||||
|
sm.excludedDevices = make(map[string]struct{})
|
||||||
|
|
||||||
|
for entry := range strings.SplitSeq(rawValue, ",") {
|
||||||
|
device := strings.TrimSpace(entry)
|
||||||
|
if device == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
sm.excludedDevices[device] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sm *SmartManager) isExcludedDevice(deviceName string) bool {
|
||||||
|
_, exists := sm.excludedDevices[deviceName]
|
||||||
|
return exists
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sm *SmartManager) filterExcludedDevices(devices []*DeviceInfo) []*DeviceInfo {
|
||||||
|
if devices == nil {
|
||||||
|
return []*DeviceInfo{}
|
||||||
|
}
|
||||||
|
|
||||||
|
excluded := sm.excludedDevices
|
||||||
|
if len(excluded) == 0 {
|
||||||
|
return devices
|
||||||
|
}
|
||||||
|
|
||||||
|
filtered := make([]*DeviceInfo, 0, len(devices))
|
||||||
|
for _, device := range devices {
|
||||||
|
if device == nil || device.Name == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, skip := excluded[device.Name]; skip {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
filtered = append(filtered, device)
|
||||||
|
}
|
||||||
|
return filtered
|
||||||
|
}
|
||||||
|
|
||||||
|
// detectSmartOutputType inspects sections that are unique to each smartctl
|
||||||
|
// JSON schema (NVMe, ATA/SATA, SCSI) to determine which parser should be used
|
||||||
|
// when the reported device type is ambiguous or missing.
|
||||||
|
func detectSmartOutputType(output []byte) string {
|
||||||
|
var hints struct {
|
||||||
|
AtaSmartAttributes json.RawMessage `json:"ata_smart_attributes"`
|
||||||
|
NVMeSmartHealthInformationLog json.RawMessage `json:"nvme_smart_health_information_log"`
|
||||||
|
ScsiErrorCounterLog json.RawMessage `json:"scsi_error_counter_log"`
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.Unmarshal(output, &hints); err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case hasJSONValue(hints.NVMeSmartHealthInformationLog):
|
||||||
|
return "nvme"
|
||||||
|
case hasJSONValue(hints.AtaSmartAttributes):
|
||||||
|
return "sat"
|
||||||
|
case hasJSONValue(hints.ScsiErrorCounterLog):
|
||||||
|
return "scsi"
|
||||||
|
default:
|
||||||
|
return "sat"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// hasJSONValue reports whether a JSON payload contains a concrete value. The
|
||||||
|
// smartctl output often emits "null" for sections that do not apply, so we
|
||||||
|
// only treat non-null content as a hint.
|
||||||
|
func hasJSONValue(raw json.RawMessage) bool {
|
||||||
|
if len(raw) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
trimmed := strings.TrimSpace(string(raw))
|
||||||
|
return trimmed != "" && trimmed != "null"
|
||||||
|
}
|
||||||
|
|
||||||
|
func normalizeParserType(value string) string {
|
||||||
|
switch strings.ToLower(strings.TrimSpace(value)) {
|
||||||
|
case "nvme", "sntasmedia", "sntrealtek":
|
||||||
|
return "nvme"
|
||||||
|
case "sat", "ata":
|
||||||
|
return "sat"
|
||||||
|
case "scsi":
|
||||||
|
return "scsi"
|
||||||
|
default:
|
||||||
|
return strings.ToLower(strings.TrimSpace(value))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseSmartOutput attempts each SMART parser, optionally detecting the type when
|
||||||
|
// it is not provided, and updates the device info when a parser succeeds.
|
||||||
|
func (sm *SmartManager) parseSmartOutput(deviceInfo *DeviceInfo, output []byte) bool {
|
||||||
|
parsers := []struct {
|
||||||
|
Type string
|
||||||
|
Parse func([]byte) (bool, int)
|
||||||
|
}{
|
||||||
|
{Type: "nvme", Parse: sm.parseSmartForNvme},
|
||||||
|
{Type: "sat", Parse: sm.parseSmartForSata},
|
||||||
|
{Type: "scsi", Parse: sm.parseSmartForScsi},
|
||||||
|
}
|
||||||
|
|
||||||
|
deviceType := normalizeParserType(deviceInfo.parserType)
|
||||||
|
if deviceType == "" {
|
||||||
|
deviceType = normalizeParserType(deviceInfo.Type)
|
||||||
|
}
|
||||||
|
if deviceInfo.parserType == "" {
|
||||||
|
switch deviceType {
|
||||||
|
case "nvme", "sat", "scsi":
|
||||||
|
deviceInfo.parserType = deviceType
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only run the type detection when we do not yet know which parser works
|
||||||
|
// or the previous attempt failed.
|
||||||
|
needsDetection := deviceType == "" || !deviceInfo.typeVerified
|
||||||
|
if needsDetection {
|
||||||
|
structureType := detectSmartOutputType(output)
|
||||||
|
if deviceType != structureType {
|
||||||
|
deviceType = structureType
|
||||||
|
deviceInfo.parserType = structureType
|
||||||
|
deviceInfo.typeVerified = false
|
||||||
|
}
|
||||||
|
if deviceInfo.Type == "" || strings.EqualFold(deviceInfo.Type, structureType) {
|
||||||
|
deviceInfo.Type = structureType
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try the most likely parser first, but keep the remaining parsers in reserve
|
||||||
|
// so an incorrect hint never leaves the device unparsed.
|
||||||
|
selectedParsers := make([]struct {
|
||||||
|
Type string
|
||||||
|
Parse func([]byte) (bool, int)
|
||||||
|
}, 0, len(parsers))
|
||||||
|
if deviceType != "" {
|
||||||
|
for _, parser := range parsers {
|
||||||
|
if parser.Type == deviceType {
|
||||||
|
selectedParsers = append(selectedParsers, parser)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, parser := range parsers {
|
||||||
|
alreadySelected := false
|
||||||
|
for _, selected := range selectedParsers {
|
||||||
|
if selected.Type == parser.Type {
|
||||||
|
alreadySelected = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if alreadySelected {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
selectedParsers = append(selectedParsers, parser)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try the selected parsers in order until we find one that succeeds.
|
||||||
|
for _, parser := range selectedParsers {
|
||||||
|
hasData, _ := parser.Parse(output)
|
||||||
|
if hasData {
|
||||||
|
deviceInfo.parserType = parser.Type
|
||||||
|
if deviceInfo.Type == "" || strings.EqualFold(deviceInfo.Type, parser.Type) {
|
||||||
|
deviceInfo.Type = parser.Type
|
||||||
|
}
|
||||||
|
// Remember that this parser is valid so future refreshes can bypass
|
||||||
|
// detection entirely.
|
||||||
|
deviceInfo.typeVerified = true
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
slog.Debug("parser failed", "device", deviceInfo.Name, "parser", parser.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Leave verification false so the next pass will attempt detection again.
|
||||||
|
deviceInfo.typeVerified = false
|
||||||
|
slog.Debug("parsing failed", "device", deviceInfo.Name)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// CollectSmart collects SMART data for a device
|
||||||
|
// Collect data using `smartctl -d <type> -aj /dev/<device>` when device type is known
|
||||||
|
// Always attempts to parse output even if command fails, as some data may still be available
|
||||||
|
// If collect fails, return error
|
||||||
|
// If collect succeeds, parse the output and update the SmartDataMap
|
||||||
|
// Uses -n standby to avoid waking up sleeping disks, but bypasses standby mode
|
||||||
|
// for initial data collection when no cached data exists
|
||||||
|
func (sm *SmartManager) CollectSmart(deviceInfo *DeviceInfo) error {
|
||||||
|
if deviceInfo != nil && sm.isExcludedDevice(deviceInfo.Name) {
|
||||||
|
return errNoValidSmartData
|
||||||
|
}
|
||||||
|
|
||||||
|
// slog.Info("collecting SMART data", "device", deviceInfo.Name, "type", deviceInfo.Type, "has_existing_data", sm.hasDataForDevice(deviceInfo.Name))
|
||||||
|
|
||||||
|
// Check if we have any existing data for this device
|
||||||
|
hasExistingData := sm.hasDataForDevice(deviceInfo.Name)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Try with -n standby first if we have existing data
|
||||||
|
args := sm.smartctlArgs(deviceInfo, true)
|
||||||
|
cmd := exec.CommandContext(ctx, sm.binPath, args...)
|
||||||
|
output, err := cmd.CombinedOutput()
|
||||||
|
|
||||||
|
// Check if device is in standby (exit status 2)
|
||||||
|
if exitErr, ok := err.(*exec.ExitError); ok && exitErr.ExitCode() == 2 {
|
||||||
|
if hasExistingData {
|
||||||
|
// Device is in standby and we have cached data, keep using cache
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// No cached data, need to collect initial data by bypassing standby
|
||||||
|
ctx2, cancel2 := context.WithTimeout(context.Background(), 2*time.Second)
|
||||||
|
defer cancel2()
|
||||||
|
args = sm.smartctlArgs(deviceInfo, false)
|
||||||
|
cmd = exec.CommandContext(ctx2, sm.binPath, args...)
|
||||||
|
output, err = cmd.CombinedOutput()
|
||||||
|
}
|
||||||
|
|
||||||
|
hasValidData := sm.parseSmartOutput(deviceInfo, output)
|
||||||
|
|
||||||
|
if !hasValidData {
|
||||||
|
if err != nil {
|
||||||
|
slog.Info("smartctl failed", "device", deviceInfo.Name, "err", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
slog.Info("no valid SMART data found", "device", deviceInfo.Name)
|
||||||
|
return errNoValidSmartData
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// smartctlArgs returns the arguments for the smartctl command
|
||||||
|
// based on the device type and whether to include standby mode
|
||||||
|
func (sm *SmartManager) smartctlArgs(deviceInfo *DeviceInfo, includeStandby bool) []string {
|
||||||
|
args := make([]string, 0, 7)
|
||||||
|
|
||||||
|
if deviceInfo != nil {
|
||||||
|
deviceType := strings.ToLower(deviceInfo.Type)
|
||||||
|
// types sometimes misidentified in scan; see github.com/henrygd/beszel/issues/1345
|
||||||
|
if deviceType != "" && deviceType != "scsi" && deviceType != "ata" {
|
||||||
|
args = append(args, "-d", deviceInfo.Type)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
args = append(args, "-a", "--json=c")
|
||||||
|
|
||||||
|
if includeStandby {
|
||||||
|
args = append(args, "-n", "standby")
|
||||||
|
}
|
||||||
|
|
||||||
|
if deviceInfo != nil {
|
||||||
|
args = append(args, deviceInfo.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
return args
|
||||||
|
}
|
||||||
|
|
||||||
|
// hasDataForDevice checks if we have cached SMART data for a specific device
|
||||||
|
func (sm *SmartManager) hasDataForDevice(deviceName string) bool {
|
||||||
|
sm.Lock()
|
||||||
|
defer sm.Unlock()
|
||||||
|
|
||||||
|
// Check if any cached data has this device name
|
||||||
|
for _, data := range sm.SmartDataMap {
|
||||||
|
if data != nil && data.DiskName == deviceName {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseScan parses the output of smartctl --scan -j and returns the discovered devices.
|
||||||
|
func (sm *SmartManager) parseScan(output []byte) ([]*DeviceInfo, bool) {
|
||||||
|
scan := &scanOutput{}
|
||||||
|
|
||||||
|
if err := json.Unmarshal(output, scan); err != nil {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(scan.Devices) == 0 {
|
||||||
|
slog.Debug("no devices found in smartctl scan")
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
devices := make([]*DeviceInfo, 0, len(scan.Devices))
|
||||||
|
for _, device := range scan.Devices {
|
||||||
|
slog.Debug("smartctl scan", "name", device.Name, "type", device.Type, "protocol", device.Protocol)
|
||||||
|
devices = append(devices, &DeviceInfo{
|
||||||
|
Name: device.Name,
|
||||||
|
Type: device.Type,
|
||||||
|
InfoName: device.InfoName,
|
||||||
|
Protocol: device.Protocol,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return devices, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// mergeDeviceLists combines scanned and configured SMART devices, preferring
|
||||||
|
// configured SMART_DEVICES when both sources reference the same device.
|
||||||
|
func mergeDeviceLists(existing, scanned, configured []*DeviceInfo) []*DeviceInfo {
|
||||||
|
if len(scanned) == 0 && len(configured) == 0 {
|
||||||
|
return existing
|
||||||
|
}
|
||||||
|
|
||||||
|
// preserveVerifiedType copies the verified type/parser metadata from an existing
|
||||||
|
// device record so that subsequent scans/config updates never downgrade a
|
||||||
|
// previously verified device.
|
||||||
|
preserveVerifiedType := func(target, prev *DeviceInfo) {
|
||||||
|
if prev == nil || !prev.typeVerified {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
target.Type = prev.Type
|
||||||
|
target.typeVerified = true
|
||||||
|
target.parserType = prev.parserType
|
||||||
|
}
|
||||||
|
|
||||||
|
existingIndex := make(map[string]*DeviceInfo, len(existing))
|
||||||
|
for _, dev := range existing {
|
||||||
|
if dev == nil || dev.Name == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
existingIndex[dev.Name] = dev
|
||||||
|
}
|
||||||
|
|
||||||
|
finalDevices := make([]*DeviceInfo, 0, len(scanned)+len(configured))
|
||||||
|
deviceIndex := make(map[string]*DeviceInfo, len(scanned)+len(configured))
|
||||||
|
|
||||||
|
// Start with the newly scanned devices so we always surface fresh metadata,
|
||||||
|
// but ensure we retain any previously verified parser assignment.
|
||||||
|
for _, dev := range scanned {
|
||||||
|
if dev == nil || dev.Name == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Work on a copy so we can safely adjust metadata without mutating the
|
||||||
|
// input slices that may be reused elsewhere.
|
||||||
|
copyDev := *dev
|
||||||
|
if prev := existingIndex[copyDev.Name]; prev != nil {
|
||||||
|
preserveVerifiedType(©Dev, prev)
|
||||||
|
}
|
||||||
|
|
||||||
|
finalDevices = append(finalDevices, ©Dev)
|
||||||
|
deviceIndex[copyDev.Name] = finalDevices[len(finalDevices)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge configured devices on top so users can override scan results (except
|
||||||
|
// for verified type information).
|
||||||
|
for _, dev := range configured {
|
||||||
|
if dev == nil || dev.Name == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if existingDev, ok := deviceIndex[dev.Name]; ok {
|
||||||
|
// Only update the type if it has not been verified yet; otherwise we
|
||||||
|
// keep the existing verified metadata intact.
|
||||||
|
if dev.Type != "" && !existingDev.typeVerified {
|
||||||
|
newType := strings.TrimSpace(dev.Type)
|
||||||
|
existingDev.Type = newType
|
||||||
|
existingDev.typeVerified = false
|
||||||
|
existingDev.parserType = normalizeParserType(newType)
|
||||||
|
}
|
||||||
|
if dev.InfoName != "" {
|
||||||
|
existingDev.InfoName = dev.InfoName
|
||||||
|
}
|
||||||
|
if dev.Protocol != "" {
|
||||||
|
existingDev.Protocol = dev.Protocol
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
copyDev := *dev
|
||||||
|
if prev := existingIndex[copyDev.Name]; prev != nil {
|
||||||
|
preserveVerifiedType(©Dev, prev)
|
||||||
|
} else if copyDev.Type != "" {
|
||||||
|
copyDev.parserType = normalizeParserType(copyDev.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
finalDevices = append(finalDevices, ©Dev)
|
||||||
|
deviceIndex[copyDev.Name] = finalDevices[len(finalDevices)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
return finalDevices
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateSmartDevices replaces the cached device list and prunes SMART data
|
||||||
|
// entries whose backing device no longer exists.
|
||||||
|
func (sm *SmartManager) updateSmartDevices(devices []*DeviceInfo) {
|
||||||
|
sm.Lock()
|
||||||
|
defer sm.Unlock()
|
||||||
|
|
||||||
|
sm.SmartDevices = devices
|
||||||
|
|
||||||
|
if len(sm.SmartDataMap) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
validNames := make(map[string]struct{}, len(devices))
|
||||||
|
for _, device := range devices {
|
||||||
|
if device == nil || device.Name == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
validNames[device.Name] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
for key, data := range sm.SmartDataMap {
|
||||||
|
if data == nil {
|
||||||
|
delete(sm.SmartDataMap, key)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := validNames[data.DiskName]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
delete(sm.SmartDataMap, key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// isVirtualDevice checks if a device is a virtual disk that should be filtered out
|
||||||
|
func (sm *SmartManager) isVirtualDevice(data *smart.SmartInfoForSata) bool {
|
||||||
|
vendorUpper := strings.ToUpper(data.ScsiVendor)
|
||||||
|
productUpper := strings.ToUpper(data.ScsiProduct)
|
||||||
|
modelUpper := strings.ToUpper(data.ModelName)
|
||||||
|
|
||||||
|
return sm.isVirtualDeviceFromStrings(vendorUpper, productUpper, modelUpper)
|
||||||
|
}
|
||||||
|
|
||||||
|
// isVirtualDeviceNvme checks if an NVMe device is a virtual disk that should be filtered out
|
||||||
|
func (sm *SmartManager) isVirtualDeviceNvme(data *smart.SmartInfoForNvme) bool {
|
||||||
|
modelUpper := strings.ToUpper(data.ModelName)
|
||||||
|
|
||||||
|
return sm.isVirtualDeviceFromStrings(modelUpper)
|
||||||
|
}
|
||||||
|
|
||||||
|
// isVirtualDeviceScsi checks if a SCSI device is a virtual disk that should be filtered out
|
||||||
|
func (sm *SmartManager) isVirtualDeviceScsi(data *smart.SmartInfoForScsi) bool {
|
||||||
|
vendorUpper := strings.ToUpper(data.ScsiVendor)
|
||||||
|
productUpper := strings.ToUpper(data.ScsiProduct)
|
||||||
|
modelUpper := strings.ToUpper(data.ScsiModelName)
|
||||||
|
|
||||||
|
return sm.isVirtualDeviceFromStrings(vendorUpper, productUpper, modelUpper)
|
||||||
|
}
|
||||||
|
|
||||||
|
// isVirtualDeviceFromStrings checks if any of the provided strings indicate a virtual device
|
||||||
|
func (sm *SmartManager) isVirtualDeviceFromStrings(fields ...string) bool {
|
||||||
|
for _, field := range fields {
|
||||||
|
fieldUpper := strings.ToUpper(field)
|
||||||
|
switch {
|
||||||
|
case strings.Contains(fieldUpper, "IET"), // iSCSI Enterprise Target
|
||||||
|
strings.Contains(fieldUpper, "VIRTUAL"),
|
||||||
|
strings.Contains(fieldUpper, "QEMU"),
|
||||||
|
strings.Contains(fieldUpper, "VBOX"),
|
||||||
|
strings.Contains(fieldUpper, "VMWARE"),
|
||||||
|
strings.Contains(fieldUpper, "MSFT"): // Microsoft Hyper-V
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseSmartForSata parses the output of smartctl --all -j for SATA/ATA devices and updates the SmartDataMap
|
||||||
|
// Returns hasValidData and exitStatus
|
||||||
|
func (sm *SmartManager) parseSmartForSata(output []byte) (bool, int) {
|
||||||
|
var data smart.SmartInfoForSata
|
||||||
|
|
||||||
|
if err := json.Unmarshal(output, &data); err != nil {
|
||||||
|
return false, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if data.SerialNumber == "" {
|
||||||
|
slog.Debug("no serial number", "device", data.Device.Name)
|
||||||
|
return false, data.Smartctl.ExitStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip virtual devices (e.g., Kubernetes PVCs, QEMU, VirtualBox, etc.)
|
||||||
|
if sm.isVirtualDevice(&data) {
|
||||||
|
slog.Debug("skipping smart", "device", data.Device.Name, "model", data.ModelName)
|
||||||
|
return false, data.Smartctl.ExitStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
sm.Lock()
|
||||||
|
defer sm.Unlock()
|
||||||
|
|
||||||
|
keyName := data.SerialNumber
|
||||||
|
|
||||||
|
// if device does not exist in SmartDataMap, initialize it
|
||||||
|
if _, ok := sm.SmartDataMap[keyName]; !ok {
|
||||||
|
sm.SmartDataMap[keyName] = &smart.SmartData{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// update SmartData
|
||||||
|
smartData := sm.SmartDataMap[keyName]
|
||||||
|
// smartData.ModelFamily = data.ModelFamily
|
||||||
|
smartData.ModelName = data.ModelName
|
||||||
|
smartData.SerialNumber = data.SerialNumber
|
||||||
|
smartData.FirmwareVersion = data.FirmwareVersion
|
||||||
|
smartData.Capacity = data.UserCapacity.Bytes
|
||||||
|
smartData.Temperature = data.Temperature.Current
|
||||||
|
smartData.SmartStatus = getSmartStatus(smartData.Temperature, data.SmartStatus.Passed)
|
||||||
|
smartData.DiskName = data.Device.Name
|
||||||
|
smartData.DiskType = data.Device.Type
|
||||||
|
|
||||||
|
// update SmartAttributes
|
||||||
|
smartData.Attributes = make([]*smart.SmartAttribute, 0, len(data.AtaSmartAttributes.Table))
|
||||||
|
for _, attr := range data.AtaSmartAttributes.Table {
|
||||||
|
rawValue := uint64(attr.Raw.Value)
|
||||||
|
if parsed, ok := smart.ParseSmartRawValueString(attr.Raw.String); ok {
|
||||||
|
rawValue = parsed
|
||||||
|
}
|
||||||
|
smartAttr := &smart.SmartAttribute{
|
||||||
|
ID: attr.ID,
|
||||||
|
Name: attr.Name,
|
||||||
|
Value: attr.Value,
|
||||||
|
Worst: attr.Worst,
|
||||||
|
Threshold: attr.Thresh,
|
||||||
|
RawValue: rawValue,
|
||||||
|
RawString: attr.Raw.String,
|
||||||
|
WhenFailed: attr.WhenFailed,
|
||||||
|
}
|
||||||
|
smartData.Attributes = append(smartData.Attributes, smartAttr)
|
||||||
|
}
|
||||||
|
sm.SmartDataMap[keyName] = smartData
|
||||||
|
|
||||||
|
return true, data.Smartctl.ExitStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
func getSmartStatus(temperature uint8, passed bool) string {
|
||||||
|
if passed {
|
||||||
|
return "PASSED"
|
||||||
|
} else if temperature > 0 {
|
||||||
|
return "FAILED"
|
||||||
|
} else {
|
||||||
|
return "UNKNOWN"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sm *SmartManager) parseSmartForScsi(output []byte) (bool, int) {
|
||||||
|
var data smart.SmartInfoForScsi
|
||||||
|
|
||||||
|
if err := json.Unmarshal(output, &data); err != nil {
|
||||||
|
return false, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if data.SerialNumber == "" {
|
||||||
|
slog.Debug("no serial number", "device", data.Device.Name)
|
||||||
|
return false, data.Smartctl.ExitStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip virtual devices (e.g., Kubernetes PVCs, QEMU, VirtualBox, etc.)
|
||||||
|
if sm.isVirtualDeviceScsi(&data) {
|
||||||
|
slog.Debug("skipping smart", "device", data.Device.Name, "model", data.ScsiModelName)
|
||||||
|
return false, data.Smartctl.ExitStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
sm.Lock()
|
||||||
|
defer sm.Unlock()
|
||||||
|
|
||||||
|
keyName := data.SerialNumber
|
||||||
|
if _, ok := sm.SmartDataMap[keyName]; !ok {
|
||||||
|
sm.SmartDataMap[keyName] = &smart.SmartData{}
|
||||||
|
}
|
||||||
|
|
||||||
|
smartData := sm.SmartDataMap[keyName]
|
||||||
|
smartData.ModelName = data.ScsiModelName
|
||||||
|
smartData.SerialNumber = data.SerialNumber
|
||||||
|
smartData.FirmwareVersion = data.ScsiRevision
|
||||||
|
smartData.Capacity = data.UserCapacity.Bytes
|
||||||
|
smartData.Temperature = data.Temperature.Current
|
||||||
|
smartData.SmartStatus = getSmartStatus(smartData.Temperature, data.SmartStatus.Passed)
|
||||||
|
smartData.DiskName = data.Device.Name
|
||||||
|
smartData.DiskType = data.Device.Type
|
||||||
|
|
||||||
|
attributes := make([]*smart.SmartAttribute, 0, 10)
|
||||||
|
attributes = append(attributes, &smart.SmartAttribute{Name: "PowerOnHours", RawValue: data.PowerOnTime.Hours})
|
||||||
|
attributes = append(attributes, &smart.SmartAttribute{Name: "PowerOnMinutes", RawValue: data.PowerOnTime.Minutes})
|
||||||
|
attributes = append(attributes, &smart.SmartAttribute{Name: "GrownDefectList", RawValue: data.ScsiGrownDefectList})
|
||||||
|
attributes = append(attributes, &smart.SmartAttribute{Name: "StartStopCycles", RawValue: data.ScsiStartStopCycleCounter.AccumulatedStartStopCycles})
|
||||||
|
attributes = append(attributes, &smart.SmartAttribute{Name: "LoadUnloadCycles", RawValue: data.ScsiStartStopCycleCounter.AccumulatedLoadUnloadCycles})
|
||||||
|
attributes = append(attributes, &smart.SmartAttribute{Name: "StartStopSpecified", RawValue: data.ScsiStartStopCycleCounter.SpecifiedCycleCountOverDeviceLifetime})
|
||||||
|
attributes = append(attributes, &smart.SmartAttribute{Name: "LoadUnloadSpecified", RawValue: data.ScsiStartStopCycleCounter.SpecifiedLoadUnloadCountOverDeviceLifetime})
|
||||||
|
|
||||||
|
readStats := data.ScsiErrorCounterLog.Read
|
||||||
|
writeStats := data.ScsiErrorCounterLog.Write
|
||||||
|
verifyStats := data.ScsiErrorCounterLog.Verify
|
||||||
|
|
||||||
|
attributes = append(attributes, &smart.SmartAttribute{Name: "ReadTotalErrorsCorrected", RawValue: readStats.TotalErrorsCorrected})
|
||||||
|
attributes = append(attributes, &smart.SmartAttribute{Name: "ReadTotalUncorrectedErrors", RawValue: readStats.TotalUncorrectedErrors})
|
||||||
|
attributes = append(attributes, &smart.SmartAttribute{Name: "ReadCorrectionAlgorithmInvocations", RawValue: readStats.CorrectionAlgorithmInvocations})
|
||||||
|
if val := parseScsiGigabytesProcessed(readStats.GigabytesProcessed); val >= 0 {
|
||||||
|
attributes = append(attributes, &smart.SmartAttribute{Name: "ReadGigabytesProcessed", RawValue: uint64(val)})
|
||||||
|
}
|
||||||
|
attributes = append(attributes, &smart.SmartAttribute{Name: "WriteTotalErrorsCorrected", RawValue: writeStats.TotalErrorsCorrected})
|
||||||
|
attributes = append(attributes, &smart.SmartAttribute{Name: "WriteTotalUncorrectedErrors", RawValue: writeStats.TotalUncorrectedErrors})
|
||||||
|
attributes = append(attributes, &smart.SmartAttribute{Name: "WriteCorrectionAlgorithmInvocations", RawValue: writeStats.CorrectionAlgorithmInvocations})
|
||||||
|
if val := parseScsiGigabytesProcessed(writeStats.GigabytesProcessed); val >= 0 {
|
||||||
|
attributes = append(attributes, &smart.SmartAttribute{Name: "WriteGigabytesProcessed", RawValue: uint64(val)})
|
||||||
|
}
|
||||||
|
attributes = append(attributes, &smart.SmartAttribute{Name: "VerifyTotalErrorsCorrected", RawValue: verifyStats.TotalErrorsCorrected})
|
||||||
|
attributes = append(attributes, &smart.SmartAttribute{Name: "VerifyTotalUncorrectedErrors", RawValue: verifyStats.TotalUncorrectedErrors})
|
||||||
|
attributes = append(attributes, &smart.SmartAttribute{Name: "VerifyCorrectionAlgorithmInvocations", RawValue: verifyStats.CorrectionAlgorithmInvocations})
|
||||||
|
if val := parseScsiGigabytesProcessed(verifyStats.GigabytesProcessed); val >= 0 {
|
||||||
|
attributes = append(attributes, &smart.SmartAttribute{Name: "VerifyGigabytesProcessed", RawValue: uint64(val)})
|
||||||
|
}
|
||||||
|
|
||||||
|
smartData.Attributes = attributes
|
||||||
|
sm.SmartDataMap[keyName] = smartData
|
||||||
|
|
||||||
|
return true, data.Smartctl.ExitStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseScsiGigabytesProcessed(value string) int64 {
|
||||||
|
if value == "" {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
normalized := strings.ReplaceAll(value, ",", "")
|
||||||
|
parsed, err := strconv.ParseInt(normalized, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return parsed
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseSmartForNvme parses the output of smartctl --all -j /dev/nvmeX and updates the SmartDataMap
|
||||||
|
// Returns hasValidData and exitStatus
|
||||||
|
func (sm *SmartManager) parseSmartForNvme(output []byte) (bool, int) {
|
||||||
|
data := &smart.SmartInfoForNvme{}
|
||||||
|
|
||||||
|
if err := json.Unmarshal(output, &data); err != nil {
|
||||||
|
return false, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if data.SerialNumber == "" {
|
||||||
|
slog.Debug("no serial number", "device", data.Device.Name)
|
||||||
|
return false, data.Smartctl.ExitStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip virtual devices (e.g., Kubernetes PVCs, QEMU, VirtualBox, etc.)
|
||||||
|
if sm.isVirtualDeviceNvme(data) {
|
||||||
|
slog.Debug("skipping smart", "device", data.Device.Name, "model", data.ModelName)
|
||||||
|
return false, data.Smartctl.ExitStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
sm.Lock()
|
||||||
|
defer sm.Unlock()
|
||||||
|
|
||||||
|
keyName := data.SerialNumber
|
||||||
|
|
||||||
|
// if device does not exist in SmartDataMap, initialize it
|
||||||
|
if _, ok := sm.SmartDataMap[keyName]; !ok {
|
||||||
|
sm.SmartDataMap[keyName] = &smart.SmartData{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// update SmartData
|
||||||
|
smartData := sm.SmartDataMap[keyName]
|
||||||
|
smartData.ModelName = data.ModelName
|
||||||
|
smartData.SerialNumber = data.SerialNumber
|
||||||
|
smartData.FirmwareVersion = data.FirmwareVersion
|
||||||
|
smartData.Capacity = data.UserCapacity.Bytes
|
||||||
|
smartData.Temperature = data.NVMeSmartHealthInformationLog.Temperature
|
||||||
|
smartData.SmartStatus = getSmartStatus(smartData.Temperature, data.SmartStatus.Passed)
|
||||||
|
smartData.DiskName = data.Device.Name
|
||||||
|
smartData.DiskType = data.Device.Type
|
||||||
|
|
||||||
|
// nvme attributes does not follow the same format as ata attributes,
|
||||||
|
// so we manually map each field to SmartAttributes
|
||||||
|
log := data.NVMeSmartHealthInformationLog
|
||||||
|
smartData.Attributes = []*smart.SmartAttribute{
|
||||||
|
{Name: "CriticalWarning", RawValue: uint64(log.CriticalWarning)},
|
||||||
|
{Name: "Temperature", RawValue: uint64(log.Temperature)},
|
||||||
|
{Name: "AvailableSpare", RawValue: uint64(log.AvailableSpare)},
|
||||||
|
{Name: "AvailableSpareThreshold", RawValue: uint64(log.AvailableSpareThreshold)},
|
||||||
|
{Name: "PercentageUsed", RawValue: uint64(log.PercentageUsed)},
|
||||||
|
{Name: "DataUnitsRead", RawValue: log.DataUnitsRead},
|
||||||
|
{Name: "DataUnitsWritten", RawValue: log.DataUnitsWritten},
|
||||||
|
{Name: "HostReads", RawValue: uint64(log.HostReads)},
|
||||||
|
{Name: "HostWrites", RawValue: uint64(log.HostWrites)},
|
||||||
|
{Name: "ControllerBusyTime", RawValue: uint64(log.ControllerBusyTime)},
|
||||||
|
{Name: "PowerCycles", RawValue: uint64(log.PowerCycles)},
|
||||||
|
{Name: "PowerOnHours", RawValue: uint64(log.PowerOnHours)},
|
||||||
|
{Name: "UnsafeShutdowns", RawValue: uint64(log.UnsafeShutdowns)},
|
||||||
|
{Name: "MediaErrors", RawValue: uint64(log.MediaErrors)},
|
||||||
|
{Name: "NumErrLogEntries", RawValue: uint64(log.NumErrLogEntries)},
|
||||||
|
{Name: "WarningTempTime", RawValue: uint64(log.WarningTempTime)},
|
||||||
|
{Name: "CriticalCompTime", RawValue: uint64(log.CriticalCompTime)},
|
||||||
|
}
|
||||||
|
|
||||||
|
sm.SmartDataMap[keyName] = smartData
|
||||||
|
|
||||||
|
return true, data.Smartctl.ExitStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
// detectSmartctl checks if smartctl is installed, returns an error if not
|
||||||
|
func (sm *SmartManager) detectSmartctl() (string, error) {
|
||||||
|
isWindows := runtime.GOOS == "windows"
|
||||||
|
|
||||||
|
// Load embedded smartctl.exe for Windows amd64 builds.
|
||||||
|
if isWindows && runtime.GOARCH == "amd64" {
|
||||||
|
if path, err := ensureEmbeddedSmartctl(); err == nil {
|
||||||
|
return path, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if path, err := exec.LookPath("smartctl"); err == nil {
|
||||||
|
return path, nil
|
||||||
|
}
|
||||||
|
locations := []string{}
|
||||||
|
if isWindows {
|
||||||
|
locations = append(locations,
|
||||||
|
"C:\\Program Files\\smartmontools\\bin\\smartctl.exe",
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
locations = append(locations, "/opt/homebrew/bin/smartctl")
|
||||||
|
}
|
||||||
|
for _, location := range locations {
|
||||||
|
if _, err := os.Stat(location); err == nil {
|
||||||
|
return location, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", errors.New("smartctl not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSmartManager creates and initializes a new SmartManager
|
||||||
|
func NewSmartManager() (*SmartManager, error) {
|
||||||
|
sm := &SmartManager{
|
||||||
|
SmartDataMap: make(map[string]*smart.SmartData),
|
||||||
|
}
|
||||||
|
sm.refreshExcludedDevices()
|
||||||
|
path, err := sm.detectSmartctl()
|
||||||
|
if err != nil {
|
||||||
|
slog.Debug(err.Error())
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
slog.Debug("smartctl", "path", path)
|
||||||
|
sm.binPath = path
|
||||||
|
return sm, nil
|
||||||
|
}
|
||||||
9
agent/smart_nonwindows.go
Normal file
9
agent/smart_nonwindows.go
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
//go:build !windows
|
||||||
|
|
||||||
|
package agent
|
||||||
|
|
||||||
|
import "errors"
|
||||||
|
|
||||||
|
func ensureEmbeddedSmartctl() (string, error) {
|
||||||
|
return "", errors.ErrUnsupported
|
||||||
|
}
|
||||||
782
agent/smart_test.go
Normal file
782
agent/smart_test.go
Normal file
@@ -0,0 +1,782 @@
|
|||||||
|
//go:build testing
|
||||||
|
// +build testing
|
||||||
|
|
||||||
|
package agent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/entities/smart"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestParseSmartForScsi(t *testing.T) {
|
||||||
|
fixturePath := filepath.Join("test-data", "smart", "scsi.json")
|
||||||
|
data, err := os.ReadFile(fixturePath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed reading fixture: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
sm := &SmartManager{
|
||||||
|
SmartDataMap: make(map[string]*smart.SmartData),
|
||||||
|
}
|
||||||
|
|
||||||
|
hasData, exitStatus := sm.parseSmartForScsi(data)
|
||||||
|
if !hasData {
|
||||||
|
t.Fatalf("expected SCSI data to parse successfully")
|
||||||
|
}
|
||||||
|
if exitStatus != 0 {
|
||||||
|
t.Fatalf("expected exit status 0, got %d", exitStatus)
|
||||||
|
}
|
||||||
|
|
||||||
|
deviceData, ok := sm.SmartDataMap["9YHSDH9B"]
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("expected smart data entry for serial 9YHSDH9B")
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, deviceData.ModelName, "YADRO WUH721414AL4204")
|
||||||
|
assert.Equal(t, deviceData.SerialNumber, "9YHSDH9B")
|
||||||
|
assert.Equal(t, deviceData.FirmwareVersion, "C240")
|
||||||
|
assert.Equal(t, deviceData.DiskName, "/dev/sde")
|
||||||
|
assert.Equal(t, deviceData.DiskType, "scsi")
|
||||||
|
assert.EqualValues(t, deviceData.Temperature, 34)
|
||||||
|
assert.Equal(t, deviceData.SmartStatus, "PASSED")
|
||||||
|
assert.EqualValues(t, deviceData.Capacity, 14000519643136)
|
||||||
|
|
||||||
|
if len(deviceData.Attributes) == 0 {
|
||||||
|
t.Fatalf("expected attributes to be populated")
|
||||||
|
}
|
||||||
|
|
||||||
|
assertAttrValue(t, deviceData.Attributes, "PowerOnHours", 458)
|
||||||
|
assertAttrValue(t, deviceData.Attributes, "PowerOnMinutes", 25)
|
||||||
|
assertAttrValue(t, deviceData.Attributes, "GrownDefectList", 0)
|
||||||
|
assertAttrValue(t, deviceData.Attributes, "StartStopCycles", 2)
|
||||||
|
assertAttrValue(t, deviceData.Attributes, "LoadUnloadCycles", 418)
|
||||||
|
assertAttrValue(t, deviceData.Attributes, "ReadGigabytesProcessed", 3641)
|
||||||
|
assertAttrValue(t, deviceData.Attributes, "WriteGigabytesProcessed", 2124590)
|
||||||
|
assertAttrValue(t, deviceData.Attributes, "VerifyGigabytesProcessed", 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseSmartForSata(t *testing.T) {
|
||||||
|
fixturePath := filepath.Join("test-data", "smart", "sda.json")
|
||||||
|
data, err := os.ReadFile(fixturePath)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
sm := &SmartManager{
|
||||||
|
SmartDataMap: make(map[string]*smart.SmartData),
|
||||||
|
}
|
||||||
|
|
||||||
|
hasData, exitStatus := sm.parseSmartForSata(data)
|
||||||
|
require.True(t, hasData)
|
||||||
|
assert.Equal(t, 64, exitStatus)
|
||||||
|
|
||||||
|
deviceData, ok := sm.SmartDataMap["9C40918040082"]
|
||||||
|
require.True(t, ok, "expected smart data entry for serial 9C40918040082")
|
||||||
|
|
||||||
|
assert.Equal(t, "P3-2TB", deviceData.ModelName)
|
||||||
|
assert.Equal(t, "X0104A0", deviceData.FirmwareVersion)
|
||||||
|
assert.Equal(t, "/dev/sda", deviceData.DiskName)
|
||||||
|
assert.Equal(t, "sat", deviceData.DiskType)
|
||||||
|
assert.Equal(t, uint8(31), deviceData.Temperature)
|
||||||
|
assert.Equal(t, "PASSED", deviceData.SmartStatus)
|
||||||
|
assert.Equal(t, uint64(2048408248320), deviceData.Capacity)
|
||||||
|
if assert.NotEmpty(t, deviceData.Attributes) {
|
||||||
|
assertAttrValue(t, deviceData.Attributes, "Temperature_Celsius", 31)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseSmartForSataParentheticalRawValue(t *testing.T) {
|
||||||
|
jsonPayload := []byte(`{
|
||||||
|
"smartctl": {"exit_status": 0},
|
||||||
|
"device": {"name": "/dev/sdz", "type": "sat"},
|
||||||
|
"model_name": "Example",
|
||||||
|
"serial_number": "PARENTHESES123",
|
||||||
|
"firmware_version": "1.0",
|
||||||
|
"user_capacity": {"bytes": 1024},
|
||||||
|
"smart_status": {"passed": true},
|
||||||
|
"temperature": {"current": 25},
|
||||||
|
"ata_smart_attributes": {
|
||||||
|
"table": [
|
||||||
|
{
|
||||||
|
"id": 9,
|
||||||
|
"name": "Power_On_Hours",
|
||||||
|
"value": 93,
|
||||||
|
"worst": 55,
|
||||||
|
"thresh": 0,
|
||||||
|
"when_failed": "",
|
||||||
|
"raw": {
|
||||||
|
"value": 57891864217128,
|
||||||
|
"string": "39925 (212 206 0)"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}`)
|
||||||
|
|
||||||
|
sm := &SmartManager{SmartDataMap: make(map[string]*smart.SmartData)}
|
||||||
|
|
||||||
|
hasData, exitStatus := sm.parseSmartForSata(jsonPayload)
|
||||||
|
require.True(t, hasData)
|
||||||
|
assert.Equal(t, 0, exitStatus)
|
||||||
|
|
||||||
|
data, ok := sm.SmartDataMap["PARENTHESES123"]
|
||||||
|
require.True(t, ok)
|
||||||
|
require.Len(t, data.Attributes, 1)
|
||||||
|
|
||||||
|
attr := data.Attributes[0]
|
||||||
|
assert.Equal(t, uint64(39925), attr.RawValue)
|
||||||
|
assert.Equal(t, "39925 (212 206 0)", attr.RawString)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseSmartForNvme(t *testing.T) {
|
||||||
|
fixturePath := filepath.Join("test-data", "smart", "nvme0.json")
|
||||||
|
data, err := os.ReadFile(fixturePath)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
sm := &SmartManager{
|
||||||
|
SmartDataMap: make(map[string]*smart.SmartData),
|
||||||
|
}
|
||||||
|
|
||||||
|
hasData, exitStatus := sm.parseSmartForNvme(data)
|
||||||
|
require.True(t, hasData)
|
||||||
|
assert.Equal(t, 0, exitStatus)
|
||||||
|
|
||||||
|
deviceData, ok := sm.SmartDataMap["2024031600129"]
|
||||||
|
require.True(t, ok, "expected smart data entry for serial 2024031600129")
|
||||||
|
|
||||||
|
assert.Equal(t, "PELADN 512GB", deviceData.ModelName)
|
||||||
|
assert.Equal(t, "VC2S038E", deviceData.FirmwareVersion)
|
||||||
|
assert.Equal(t, "/dev/nvme0", deviceData.DiskName)
|
||||||
|
assert.Equal(t, "nvme", deviceData.DiskType)
|
||||||
|
assert.Equal(t, uint8(61), deviceData.Temperature)
|
||||||
|
assert.Equal(t, "PASSED", deviceData.SmartStatus)
|
||||||
|
assert.Equal(t, uint64(512110190592), deviceData.Capacity)
|
||||||
|
if assert.NotEmpty(t, deviceData.Attributes) {
|
||||||
|
assertAttrValue(t, deviceData.Attributes, "PercentageUsed", 0)
|
||||||
|
assertAttrValue(t, deviceData.Attributes, "DataUnitsWritten", 16040567)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHasDataForDevice(t *testing.T) {
|
||||||
|
sm := &SmartManager{
|
||||||
|
SmartDataMap: map[string]*smart.SmartData{
|
||||||
|
"serial-1": {DiskName: "/dev/sda"},
|
||||||
|
"serial-2": nil,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.True(t, sm.hasDataForDevice("/dev/sda"))
|
||||||
|
assert.False(t, sm.hasDataForDevice("/dev/sdb"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDevicesSnapshotReturnsCopy(t *testing.T) {
|
||||||
|
originalDevice := &DeviceInfo{Name: "/dev/sda"}
|
||||||
|
sm := &SmartManager{
|
||||||
|
SmartDevices: []*DeviceInfo{
|
||||||
|
originalDevice,
|
||||||
|
{Name: "/dev/sdb"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshot := sm.devicesSnapshot()
|
||||||
|
require.Len(t, snapshot, 2)
|
||||||
|
|
||||||
|
sm.SmartDevices[0] = &DeviceInfo{Name: "/dev/sdz"}
|
||||||
|
assert.Equal(t, "/dev/sda", snapshot[0].Name)
|
||||||
|
|
||||||
|
snapshot[1] = &DeviceInfo{Name: "/dev/nvme0"}
|
||||||
|
assert.Equal(t, "/dev/sdb", sm.SmartDevices[1].Name)
|
||||||
|
|
||||||
|
sm.SmartDevices = append(sm.SmartDevices, &DeviceInfo{Name: "/dev/nvme1"})
|
||||||
|
assert.Len(t, snapshot, 2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestScanDevicesWithEnvOverride(t *testing.T) {
|
||||||
|
t.Setenv("SMART_DEVICES", "/dev/sda:sat, /dev/nvme0:nvme")
|
||||||
|
|
||||||
|
sm := &SmartManager{
|
||||||
|
SmartDataMap: make(map[string]*smart.SmartData),
|
||||||
|
}
|
||||||
|
|
||||||
|
err := sm.ScanDevices(true)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Len(t, sm.SmartDevices, 2)
|
||||||
|
assert.Equal(t, "/dev/sda", sm.SmartDevices[0].Name)
|
||||||
|
assert.Equal(t, "sat", sm.SmartDevices[0].Type)
|
||||||
|
assert.Equal(t, "/dev/nvme0", sm.SmartDevices[1].Name)
|
||||||
|
assert.Equal(t, "nvme", sm.SmartDevices[1].Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestScanDevicesWithEnvOverrideInvalid(t *testing.T) {
|
||||||
|
t.Setenv("SMART_DEVICES", ":sat")
|
||||||
|
|
||||||
|
sm := &SmartManager{
|
||||||
|
SmartDataMap: make(map[string]*smart.SmartData),
|
||||||
|
}
|
||||||
|
|
||||||
|
err := sm.ScanDevices(true)
|
||||||
|
require.Error(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestScanDevicesWithEnvOverrideEmpty(t *testing.T) {
|
||||||
|
t.Setenv("SMART_DEVICES", " ")
|
||||||
|
|
||||||
|
sm := &SmartManager{
|
||||||
|
SmartDataMap: make(map[string]*smart.SmartData),
|
||||||
|
}
|
||||||
|
|
||||||
|
err := sm.ScanDevices(true)
|
||||||
|
assert.ErrorIs(t, err, errNoValidSmartData)
|
||||||
|
assert.Empty(t, sm.SmartDevices)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSmartctlArgsWithoutType(t *testing.T) {
|
||||||
|
device := &DeviceInfo{Name: "/dev/sda"}
|
||||||
|
|
||||||
|
sm := &SmartManager{}
|
||||||
|
|
||||||
|
args := sm.smartctlArgs(device, true)
|
||||||
|
assert.Equal(t, []string{"-a", "--json=c", "-n", "standby", "/dev/sda"}, args)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSmartctlArgs(t *testing.T) {
|
||||||
|
sm := &SmartManager{}
|
||||||
|
|
||||||
|
sataDevice := &DeviceInfo{Name: "/dev/sda", Type: "sat"}
|
||||||
|
assert.Equal(t,
|
||||||
|
[]string{"-d", "sat", "-a", "--json=c", "-n", "standby", "/dev/sda"},
|
||||||
|
sm.smartctlArgs(sataDevice, true),
|
||||||
|
)
|
||||||
|
|
||||||
|
assert.Equal(t,
|
||||||
|
[]string{"-d", "sat", "-a", "--json=c", "/dev/sda"},
|
||||||
|
sm.smartctlArgs(sataDevice, false),
|
||||||
|
)
|
||||||
|
|
||||||
|
assert.Equal(t,
|
||||||
|
[]string{"-a", "--json=c", "-n", "standby"},
|
||||||
|
sm.smartctlArgs(nil, true),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResolveRefreshError(t *testing.T) {
|
||||||
|
scanErr := errors.New("scan failed")
|
||||||
|
collectErr := errors.New("collect failed")
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
devices []*DeviceInfo
|
||||||
|
data map[string]*smart.SmartData
|
||||||
|
scanErr error
|
||||||
|
collectErr error
|
||||||
|
expectedErr error
|
||||||
|
expectNoErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "no devices returns scan error",
|
||||||
|
devices: nil,
|
||||||
|
data: make(map[string]*smart.SmartData),
|
||||||
|
scanErr: scanErr,
|
||||||
|
expectedErr: scanErr,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "has data ignores errors",
|
||||||
|
devices: []*DeviceInfo{{Name: "/dev/sda"}},
|
||||||
|
data: map[string]*smart.SmartData{"serial": {}},
|
||||||
|
scanErr: scanErr,
|
||||||
|
collectErr: collectErr,
|
||||||
|
expectNoErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "collect error preferred",
|
||||||
|
devices: []*DeviceInfo{{Name: "/dev/sda"}},
|
||||||
|
data: make(map[string]*smart.SmartData),
|
||||||
|
collectErr: collectErr,
|
||||||
|
expectedErr: collectErr,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "scan error returned when no data",
|
||||||
|
devices: []*DeviceInfo{{Name: "/dev/sda"}},
|
||||||
|
data: make(map[string]*smart.SmartData),
|
||||||
|
scanErr: scanErr,
|
||||||
|
expectedErr: scanErr,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no errors returns sentinel",
|
||||||
|
devices: []*DeviceInfo{{Name: "/dev/sda"}},
|
||||||
|
data: make(map[string]*smart.SmartData),
|
||||||
|
expectedErr: errNoValidSmartData,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no devices collect error",
|
||||||
|
devices: nil,
|
||||||
|
data: make(map[string]*smart.SmartData),
|
||||||
|
collectErr: collectErr,
|
||||||
|
expectedErr: collectErr,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
sm := &SmartManager{
|
||||||
|
SmartDevices: tt.devices,
|
||||||
|
SmartDataMap: tt.data,
|
||||||
|
}
|
||||||
|
|
||||||
|
err := sm.resolveRefreshError(tt.scanErr, tt.collectErr)
|
||||||
|
if tt.expectNoErr {
|
||||||
|
assert.NoError(t, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if tt.expectedErr == nil {
|
||||||
|
assert.NoError(t, err)
|
||||||
|
} else {
|
||||||
|
assert.Equal(t, tt.expectedErr, err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseScan(t *testing.T) {
|
||||||
|
sm := &SmartManager{
|
||||||
|
SmartDataMap: map[string]*smart.SmartData{
|
||||||
|
"serial-active": {DiskName: "/dev/sda"},
|
||||||
|
"serial-stale": {DiskName: "/dev/sdb"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
scanJSON := []byte(`{
|
||||||
|
"devices": [
|
||||||
|
{"name": "/dev/sda", "type": "sat", "info_name": "/dev/sda [SAT]", "protocol": "ATA"},
|
||||||
|
{"name": "/dev/nvme0", "type": "nvme", "info_name": "/dev/nvme0", "protocol": "NVMe"}
|
||||||
|
]
|
||||||
|
}`)
|
||||||
|
|
||||||
|
devices, hasData := sm.parseScan(scanJSON)
|
||||||
|
assert.True(t, hasData)
|
||||||
|
|
||||||
|
sm.updateSmartDevices(devices)
|
||||||
|
|
||||||
|
require.Len(t, sm.SmartDevices, 2)
|
||||||
|
assert.Equal(t, "/dev/sda", sm.SmartDevices[0].Name)
|
||||||
|
assert.Equal(t, "sat", sm.SmartDevices[0].Type)
|
||||||
|
assert.Equal(t, "/dev/nvme0", sm.SmartDevices[1].Name)
|
||||||
|
assert.Equal(t, "nvme", sm.SmartDevices[1].Type)
|
||||||
|
|
||||||
|
_, activeExists := sm.SmartDataMap["serial-active"]
|
||||||
|
assert.True(t, activeExists, "active smart data should be preserved when device path remains")
|
||||||
|
|
||||||
|
_, staleExists := sm.SmartDataMap["serial-stale"]
|
||||||
|
assert.False(t, staleExists, "stale smart data entry should be removed when device path disappears")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMergeDeviceListsPrefersConfigured(t *testing.T) {
|
||||||
|
scanned := []*DeviceInfo{
|
||||||
|
{Name: "/dev/sda", Type: "sat", InfoName: "scan-info", Protocol: "ATA"},
|
||||||
|
{Name: "/dev/nvme0", Type: "nvme"},
|
||||||
|
}
|
||||||
|
|
||||||
|
configured := []*DeviceInfo{
|
||||||
|
{Name: "/dev/sda", Type: "sat-override"},
|
||||||
|
{Name: "/dev/sdb", Type: "sat"},
|
||||||
|
}
|
||||||
|
|
||||||
|
merged := mergeDeviceLists(nil, scanned, configured)
|
||||||
|
require.Len(t, merged, 3)
|
||||||
|
|
||||||
|
byName := make(map[string]*DeviceInfo, len(merged))
|
||||||
|
for _, dev := range merged {
|
||||||
|
byName[dev.Name] = dev
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Contains(t, byName, "/dev/sda")
|
||||||
|
assert.Equal(t, "sat-override", byName["/dev/sda"].Type, "configured type should override scanned type")
|
||||||
|
assert.Equal(t, "scan-info", byName["/dev/sda"].InfoName, "scan metadata should be preserved when config does not provide it")
|
||||||
|
|
||||||
|
require.Contains(t, byName, "/dev/nvme0")
|
||||||
|
assert.Equal(t, "nvme", byName["/dev/nvme0"].Type)
|
||||||
|
|
||||||
|
require.Contains(t, byName, "/dev/sdb")
|
||||||
|
assert.Equal(t, "sat", byName["/dev/sdb"].Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMergeDeviceListsPreservesVerification(t *testing.T) {
|
||||||
|
existing := []*DeviceInfo{
|
||||||
|
{Name: "/dev/sda", Type: "sat+megaraid", parserType: "sat", typeVerified: true},
|
||||||
|
}
|
||||||
|
|
||||||
|
scanned := []*DeviceInfo{
|
||||||
|
{Name: "/dev/sda", Type: "nvme"},
|
||||||
|
}
|
||||||
|
|
||||||
|
merged := mergeDeviceLists(existing, scanned, nil)
|
||||||
|
require.Len(t, merged, 1)
|
||||||
|
|
||||||
|
device := merged[0]
|
||||||
|
assert.True(t, device.typeVerified)
|
||||||
|
assert.Equal(t, "sat", device.parserType)
|
||||||
|
assert.Equal(t, "sat+megaraid", device.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMergeDeviceListsUpdatesTypeWhenUnverified(t *testing.T) {
|
||||||
|
existing := []*DeviceInfo{
|
||||||
|
{Name: "/dev/sda", Type: "sat", parserType: "sat", typeVerified: false},
|
||||||
|
}
|
||||||
|
|
||||||
|
scanned := []*DeviceInfo{
|
||||||
|
{Name: "/dev/sda", Type: "nvme"},
|
||||||
|
}
|
||||||
|
|
||||||
|
merged := mergeDeviceLists(existing, scanned, nil)
|
||||||
|
require.Len(t, merged, 1)
|
||||||
|
|
||||||
|
device := merged[0]
|
||||||
|
assert.False(t, device.typeVerified)
|
||||||
|
assert.Equal(t, "nvme", device.Type)
|
||||||
|
assert.Equal(t, "", device.parserType)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseSmartOutputMarksVerified(t *testing.T) {
|
||||||
|
fixturePath := filepath.Join("test-data", "smart", "nvme0.json")
|
||||||
|
data, err := os.ReadFile(fixturePath)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
sm := &SmartManager{SmartDataMap: make(map[string]*smart.SmartData)}
|
||||||
|
device := &DeviceInfo{Name: "/dev/nvme0"}
|
||||||
|
|
||||||
|
require.True(t, sm.parseSmartOutput(device, data))
|
||||||
|
assert.Equal(t, "nvme", device.Type)
|
||||||
|
assert.Equal(t, "nvme", device.parserType)
|
||||||
|
assert.True(t, device.typeVerified)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseSmartOutputKeepsCustomType(t *testing.T) {
|
||||||
|
fixturePath := filepath.Join("test-data", "smart", "sda.json")
|
||||||
|
data, err := os.ReadFile(fixturePath)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
sm := &SmartManager{SmartDataMap: make(map[string]*smart.SmartData)}
|
||||||
|
device := &DeviceInfo{Name: "/dev/sda", Type: "sat+megaraid"}
|
||||||
|
|
||||||
|
require.True(t, sm.parseSmartOutput(device, data))
|
||||||
|
assert.Equal(t, "sat+megaraid", device.Type)
|
||||||
|
assert.Equal(t, "sat", device.parserType)
|
||||||
|
assert.True(t, device.typeVerified)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseSmartOutputResetsVerificationOnFailure(t *testing.T) {
|
||||||
|
sm := &SmartManager{SmartDataMap: make(map[string]*smart.SmartData)}
|
||||||
|
device := &DeviceInfo{Name: "/dev/sda", Type: "sat", parserType: "sat", typeVerified: true}
|
||||||
|
|
||||||
|
assert.False(t, sm.parseSmartOutput(device, []byte("not json")))
|
||||||
|
assert.False(t, device.typeVerified)
|
||||||
|
assert.Equal(t, "sat", device.parserType)
|
||||||
|
}
|
||||||
|
|
||||||
|
func assertAttrValue(t *testing.T, attributes []*smart.SmartAttribute, name string, expected uint64) {
|
||||||
|
t.Helper()
|
||||||
|
attr := findAttr(attributes, name)
|
||||||
|
if attr == nil {
|
||||||
|
t.Fatalf("expected attribute %s to be present", name)
|
||||||
|
}
|
||||||
|
if attr.RawValue != expected {
|
||||||
|
t.Fatalf("unexpected attribute %s value: got %d, want %d", name, attr.RawValue, expected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func findAttr(attributes []*smart.SmartAttribute, name string) *smart.SmartAttribute {
|
||||||
|
for _, attr := range attributes {
|
||||||
|
if attr != nil && attr.Name == name {
|
||||||
|
return attr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIsVirtualDevice(t *testing.T) {
|
||||||
|
sm := &SmartManager{}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
vendor string
|
||||||
|
product string
|
||||||
|
model string
|
||||||
|
expected bool
|
||||||
|
}{
|
||||||
|
{"regular drive", "SEAGATE", "ST1000DM003", "ST1000DM003-1CH162", false},
|
||||||
|
{"qemu virtual", "QEMU", "QEMU HARDDISK", "QEMU HARDDISK", true},
|
||||||
|
{"virtualbox virtual", "VBOX", "HARDDISK", "VBOX HARDDISK", true},
|
||||||
|
{"vmware virtual", "VMWARE", "Virtual disk", "VMWARE Virtual disk", true},
|
||||||
|
{"virtual in model", "ATA", "VIRTUAL", "VIRTUAL DISK", true},
|
||||||
|
{"iet virtual", "IET", "VIRTUAL-DISK", "VIRTUAL-DISK", true},
|
||||||
|
{"hyper-v virtual", "MSFT", "VIRTUAL HD", "VIRTUAL HD", true},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
data := &smart.SmartInfoForSata{
|
||||||
|
ScsiVendor: tt.vendor,
|
||||||
|
ScsiProduct: tt.product,
|
||||||
|
ModelName: tt.model,
|
||||||
|
}
|
||||||
|
result := sm.isVirtualDevice(data)
|
||||||
|
assert.Equal(t, tt.expected, result)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIsVirtualDeviceNvme(t *testing.T) {
|
||||||
|
sm := &SmartManager{}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
model string
|
||||||
|
expected bool
|
||||||
|
}{
|
||||||
|
{"regular nvme", "Samsung SSD 970 EVO Plus 1TB", false},
|
||||||
|
{"qemu virtual", "QEMU NVMe Ctrl", true},
|
||||||
|
{"virtualbox virtual", "VBOX NVMe", true},
|
||||||
|
{"vmware virtual", "VMWARE NVMe", true},
|
||||||
|
{"virtual in model", "Virtual NVMe Device", true},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
data := &smart.SmartInfoForNvme{
|
||||||
|
ModelName: tt.model,
|
||||||
|
}
|
||||||
|
result := sm.isVirtualDeviceNvme(data)
|
||||||
|
assert.Equal(t, tt.expected, result)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIsVirtualDeviceScsi(t *testing.T) {
|
||||||
|
sm := &SmartManager{}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
vendor string
|
||||||
|
product string
|
||||||
|
model string
|
||||||
|
expected bool
|
||||||
|
}{
|
||||||
|
{"regular scsi", "SEAGATE", "ST1000DM003", "ST1000DM003-1CH162", false},
|
||||||
|
{"qemu virtual", "QEMU", "QEMU HARDDISK", "QEMU HARDDISK", true},
|
||||||
|
{"virtualbox virtual", "VBOX", "HARDDISK", "VBOX HARDDISK", true},
|
||||||
|
{"vmware virtual", "VMWARE", "Virtual disk", "VMWARE Virtual disk", true},
|
||||||
|
{"virtual in model", "ATA", "VIRTUAL", "VIRTUAL DISK", true},
|
||||||
|
{"iet virtual", "IET", "VIRTUAL-DISK", "VIRTUAL-DISK", true},
|
||||||
|
{"hyper-v virtual", "MSFT", "VIRTUAL HD", "VIRTUAL HD", true},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
data := &smart.SmartInfoForScsi{
|
||||||
|
ScsiVendor: tt.vendor,
|
||||||
|
ScsiProduct: tt.product,
|
||||||
|
ScsiModelName: tt.model,
|
||||||
|
}
|
||||||
|
result := sm.isVirtualDeviceScsi(data)
|
||||||
|
assert.Equal(t, tt.expected, result)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRefreshExcludedDevices(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
envValue string
|
||||||
|
expectedDevs map[string]struct{}
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "empty env",
|
||||||
|
envValue: "",
|
||||||
|
expectedDevs: map[string]struct{}{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "single device",
|
||||||
|
envValue: "/dev/sda",
|
||||||
|
expectedDevs: map[string]struct{}{
|
||||||
|
"/dev/sda": {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "multiple devices",
|
||||||
|
envValue: "/dev/sda,/dev/sdb,/dev/nvme0",
|
||||||
|
expectedDevs: map[string]struct{}{
|
||||||
|
"/dev/sda": {},
|
||||||
|
"/dev/sdb": {},
|
||||||
|
"/dev/nvme0": {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "devices with whitespace",
|
||||||
|
envValue: " /dev/sda , /dev/sdb , /dev/nvme0 ",
|
||||||
|
expectedDevs: map[string]struct{}{
|
||||||
|
"/dev/sda": {},
|
||||||
|
"/dev/sdb": {},
|
||||||
|
"/dev/nvme0": {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "duplicate devices",
|
||||||
|
envValue: "/dev/sda,/dev/sdb,/dev/sda",
|
||||||
|
expectedDevs: map[string]struct{}{
|
||||||
|
"/dev/sda": {},
|
||||||
|
"/dev/sdb": {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "empty entries and whitespace",
|
||||||
|
envValue: "/dev/sda,, /dev/sdb , , ",
|
||||||
|
expectedDevs: map[string]struct{}{
|
||||||
|
"/dev/sda": {},
|
||||||
|
"/dev/sdb": {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
if tt.envValue != "" {
|
||||||
|
t.Setenv("EXCLUDE_SMART", tt.envValue)
|
||||||
|
} else {
|
||||||
|
// Ensure env var is not set for empty test
|
||||||
|
os.Unsetenv("EXCLUDE_SMART")
|
||||||
|
}
|
||||||
|
|
||||||
|
sm := &SmartManager{}
|
||||||
|
sm.refreshExcludedDevices()
|
||||||
|
|
||||||
|
assert.Equal(t, tt.expectedDevs, sm.excludedDevices)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIsExcludedDevice(t *testing.T) {
|
||||||
|
sm := &SmartManager{
|
||||||
|
excludedDevices: map[string]struct{}{
|
||||||
|
"/dev/sda": {},
|
||||||
|
"/dev/nvme0": {},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
deviceName string
|
||||||
|
expectedBool bool
|
||||||
|
}{
|
||||||
|
{"excluded device sda", "/dev/sda", true},
|
||||||
|
{"excluded device nvme0", "/dev/nvme0", true},
|
||||||
|
{"non-excluded device sdb", "/dev/sdb", false},
|
||||||
|
{"non-excluded device nvme1", "/dev/nvme1", false},
|
||||||
|
{"empty device name", "", false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
result := sm.isExcludedDevice(tt.deviceName)
|
||||||
|
assert.Equal(t, tt.expectedBool, result)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFilterExcludedDevices(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
excludedDevs map[string]struct{}
|
||||||
|
inputDevices []*DeviceInfo
|
||||||
|
expectedDevs []*DeviceInfo
|
||||||
|
expectedLength int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "no exclusions",
|
||||||
|
excludedDevs: map[string]struct{}{},
|
||||||
|
inputDevices: []*DeviceInfo{
|
||||||
|
{Name: "/dev/sda"},
|
||||||
|
{Name: "/dev/sdb"},
|
||||||
|
{Name: "/dev/nvme0"},
|
||||||
|
},
|
||||||
|
expectedDevs: []*DeviceInfo{
|
||||||
|
{Name: "/dev/sda"},
|
||||||
|
{Name: "/dev/sdb"},
|
||||||
|
{Name: "/dev/nvme0"},
|
||||||
|
},
|
||||||
|
expectedLength: 3,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "some devices excluded",
|
||||||
|
excludedDevs: map[string]struct{}{
|
||||||
|
"/dev/sda": {},
|
||||||
|
"/dev/nvme0": {},
|
||||||
|
},
|
||||||
|
inputDevices: []*DeviceInfo{
|
||||||
|
{Name: "/dev/sda"},
|
||||||
|
{Name: "/dev/sdb"},
|
||||||
|
{Name: "/dev/nvme0"},
|
||||||
|
{Name: "/dev/nvme1"},
|
||||||
|
},
|
||||||
|
expectedDevs: []*DeviceInfo{
|
||||||
|
{Name: "/dev/sdb"},
|
||||||
|
{Name: "/dev/nvme1"},
|
||||||
|
},
|
||||||
|
expectedLength: 2,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "all devices excluded",
|
||||||
|
excludedDevs: map[string]struct{}{
|
||||||
|
"/dev/sda": {},
|
||||||
|
"/dev/sdb": {},
|
||||||
|
},
|
||||||
|
inputDevices: []*DeviceInfo{
|
||||||
|
{Name: "/dev/sda"},
|
||||||
|
{Name: "/dev/sdb"},
|
||||||
|
},
|
||||||
|
expectedDevs: []*DeviceInfo{},
|
||||||
|
expectedLength: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "nil devices",
|
||||||
|
excludedDevs: map[string]struct{}{},
|
||||||
|
inputDevices: nil,
|
||||||
|
expectedDevs: []*DeviceInfo{},
|
||||||
|
expectedLength: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "filter nil and empty name devices",
|
||||||
|
excludedDevs: map[string]struct{}{
|
||||||
|
"/dev/sda": {},
|
||||||
|
},
|
||||||
|
inputDevices: []*DeviceInfo{
|
||||||
|
{Name: "/dev/sda"},
|
||||||
|
nil,
|
||||||
|
{Name: ""},
|
||||||
|
{Name: "/dev/sdb"},
|
||||||
|
},
|
||||||
|
expectedDevs: []*DeviceInfo{
|
||||||
|
{Name: "/dev/sdb"},
|
||||||
|
},
|
||||||
|
expectedLength: 1,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
sm := &SmartManager{
|
||||||
|
excludedDevices: tt.excludedDevs,
|
||||||
|
}
|
||||||
|
|
||||||
|
result := sm.filterExcludedDevices(tt.inputDevices)
|
||||||
|
|
||||||
|
assert.Len(t, result, tt.expectedLength)
|
||||||
|
assert.Equal(t, tt.expectedDevs, result)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
40
agent/smart_windows.go
Normal file
40
agent/smart_windows.go
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
|
package agent
|
||||||
|
|
||||||
|
import (
|
||||||
|
_ "embed"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
//go:embed smartmontools/smartctl.exe
|
||||||
|
var embeddedSmartctl []byte
|
||||||
|
|
||||||
|
var (
|
||||||
|
smartctlOnce sync.Once
|
||||||
|
smartctlPath string
|
||||||
|
smartctlErr error
|
||||||
|
)
|
||||||
|
|
||||||
|
func ensureEmbeddedSmartctl() (string, error) {
|
||||||
|
smartctlOnce.Do(func() {
|
||||||
|
destDir := filepath.Join(os.TempDir(), "beszel", "smartmontools")
|
||||||
|
if err := os.MkdirAll(destDir, 0o755); err != nil {
|
||||||
|
smartctlErr = fmt.Errorf("failed to create smartctl directory: %w", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
destPath := filepath.Join(destDir, "smartctl.exe")
|
||||||
|
if err := os.WriteFile(destPath, embeddedSmartctl, 0o755); err != nil {
|
||||||
|
smartctlErr = fmt.Errorf("failed to write embedded smartctl: %w", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
smartctlPath = destPath
|
||||||
|
})
|
||||||
|
|
||||||
|
return smartctlPath, smartctlErr
|
||||||
|
}
|
||||||
@@ -14,12 +14,18 @@ import (
|
|||||||
"github.com/henrygd/beszel/internal/entities/system"
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
|
||||||
"github.com/shirou/gopsutil/v4/cpu"
|
"github.com/shirou/gopsutil/v4/cpu"
|
||||||
"github.com/shirou/gopsutil/v4/disk"
|
|
||||||
"github.com/shirou/gopsutil/v4/host"
|
"github.com/shirou/gopsutil/v4/host"
|
||||||
"github.com/shirou/gopsutil/v4/load"
|
"github.com/shirou/gopsutil/v4/load"
|
||||||
"github.com/shirou/gopsutil/v4/mem"
|
"github.com/shirou/gopsutil/v4/mem"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// prevDisk stores previous per-device disk counters for a given cache interval
|
||||||
|
type prevDisk struct {
|
||||||
|
readBytes uint64
|
||||||
|
writeBytes uint64
|
||||||
|
at time.Time
|
||||||
|
}
|
||||||
|
|
||||||
// Sets initial / non-changing values about the host system
|
// Sets initial / non-changing values about the host system
|
||||||
func (a *Agent) initializeSystemInfo() {
|
func (a *Agent) initializeSystemInfo() {
|
||||||
a.systemInfo.AgentVersion = beszel.Version
|
a.systemInfo.AgentVersion = beszel.Version
|
||||||
@@ -68,20 +74,33 @@ func (a *Agent) initializeSystemInfo() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Returns current info, stats about the host system
|
// Returns current info, stats about the host system
|
||||||
func (a *Agent) getSystemStats() system.Stats {
|
func (a *Agent) getSystemStats(cacheTimeMs uint16) system.Stats {
|
||||||
var systemStats system.Stats
|
var systemStats system.Stats
|
||||||
|
|
||||||
// battery
|
// battery
|
||||||
if battery.HasReadableBattery() {
|
if batteryPercent, batteryState, err := battery.GetBatteryStats(); err == nil {
|
||||||
systemStats.Battery[0], systemStats.Battery[1], _ = battery.GetBatteryStats()
|
systemStats.Battery[0] = batteryPercent
|
||||||
|
systemStats.Battery[1] = batteryState
|
||||||
}
|
}
|
||||||
|
|
||||||
// cpu percent
|
// cpu metrics
|
||||||
cpuPct, err := cpu.Percent(0, false)
|
cpuMetrics, err := getCpuMetrics(cacheTimeMs)
|
||||||
if err != nil {
|
if err == nil {
|
||||||
slog.Error("Error getting cpu percent", "err", err)
|
systemStats.Cpu = twoDecimals(cpuMetrics.Total)
|
||||||
} else if len(cpuPct) > 0 {
|
systemStats.CpuBreakdown = []float64{
|
||||||
systemStats.Cpu = twoDecimals(cpuPct[0])
|
twoDecimals(cpuMetrics.User),
|
||||||
|
twoDecimals(cpuMetrics.System),
|
||||||
|
twoDecimals(cpuMetrics.Iowait),
|
||||||
|
twoDecimals(cpuMetrics.Steal),
|
||||||
|
twoDecimals(cpuMetrics.Idle),
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
slog.Error("Error getting cpu metrics", "err", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// per-core cpu usage
|
||||||
|
if perCoreUsage, err := getPerCoreCpuUsage(cacheTimeMs); err == nil {
|
||||||
|
systemStats.CpuCoresUsage = perCoreUsage
|
||||||
}
|
}
|
||||||
|
|
||||||
// load average
|
// load average
|
||||||
@@ -131,56 +150,13 @@ func (a *Agent) getSystemStats() system.Stats {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// disk usage
|
// disk usage
|
||||||
for _, stats := range a.fsStats {
|
a.updateDiskUsage(&systemStats)
|
||||||
if d, err := disk.Usage(stats.Mountpoint); err == nil {
|
|
||||||
stats.DiskTotal = bytesToGigabytes(d.Total)
|
|
||||||
stats.DiskUsed = bytesToGigabytes(d.Used)
|
|
||||||
if stats.Root {
|
|
||||||
systemStats.DiskTotal = bytesToGigabytes(d.Total)
|
|
||||||
systemStats.DiskUsed = bytesToGigabytes(d.Used)
|
|
||||||
systemStats.DiskPct = twoDecimals(d.UsedPercent)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// reset stats if error (likely unmounted)
|
|
||||||
slog.Error("Error getting disk stats", "name", stats.Mountpoint, "err", err)
|
|
||||||
stats.DiskTotal = 0
|
|
||||||
stats.DiskUsed = 0
|
|
||||||
stats.TotalRead = 0
|
|
||||||
stats.TotalWrite = 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// disk i/o
|
// disk i/o (cache-aware per interval)
|
||||||
if ioCounters, err := disk.IOCounters(a.fsNames...); err == nil {
|
a.updateDiskIo(cacheTimeMs, &systemStats)
|
||||||
for _, d := range ioCounters {
|
|
||||||
stats := a.fsStats[d.Name]
|
|
||||||
if stats == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
secondsElapsed := time.Since(stats.Time).Seconds()
|
|
||||||
readPerSecond := bytesToMegabytes(float64(d.ReadBytes-stats.TotalRead) / secondsElapsed)
|
|
||||||
writePerSecond := bytesToMegabytes(float64(d.WriteBytes-stats.TotalWrite) / secondsElapsed)
|
|
||||||
// check for invalid values and reset stats if so
|
|
||||||
if readPerSecond < 0 || writePerSecond < 0 || readPerSecond > 50_000 || writePerSecond > 50_000 {
|
|
||||||
slog.Warn("Invalid disk I/O. Resetting.", "name", d.Name, "read", readPerSecond, "write", writePerSecond)
|
|
||||||
a.initializeDiskIoStats(ioCounters)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
stats.Time = time.Now()
|
|
||||||
stats.DiskReadPs = readPerSecond
|
|
||||||
stats.DiskWritePs = writePerSecond
|
|
||||||
stats.TotalRead = d.ReadBytes
|
|
||||||
stats.TotalWrite = d.WriteBytes
|
|
||||||
// if root filesystem, update system stats
|
|
||||||
if stats.Root {
|
|
||||||
systemStats.DiskReadPs = stats.DiskReadPs
|
|
||||||
systemStats.DiskWritePs = stats.DiskWritePs
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// network stats
|
// network stats (per cache interval)
|
||||||
a.updateNetworkStats(&systemStats)
|
a.updateNetworkStats(cacheTimeMs, &systemStats)
|
||||||
|
|
||||||
// temperatures
|
// temperatures
|
||||||
// TODO: maybe refactor to methods on systemStats
|
// TODO: maybe refactor to methods on systemStats
|
||||||
@@ -191,7 +167,7 @@ func (a *Agent) getSystemStats() system.Stats {
|
|||||||
// reset high gpu percent
|
// reset high gpu percent
|
||||||
a.systemInfo.GpuPct = 0
|
a.systemInfo.GpuPct = 0
|
||||||
// get current GPU data
|
// get current GPU data
|
||||||
if gpuData := a.gpuManager.GetCurrentData(); len(gpuData) > 0 {
|
if gpuData := a.gpuManager.GetCurrentData(cacheTimeMs); len(gpuData) > 0 {
|
||||||
systemStats.GPUData = gpuData
|
systemStats.GPUData = gpuData
|
||||||
|
|
||||||
// add temperatures
|
// add temperatures
|
||||||
|
|||||||
272
agent/systemd.go
Normal file
272
agent/systemd.go
Normal file
@@ -0,0 +1,272 @@
|
|||||||
|
//go:build linux
|
||||||
|
|
||||||
|
package agent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"log/slog"
|
||||||
|
"maps"
|
||||||
|
"math"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/coreos/go-systemd/v22/dbus"
|
||||||
|
"github.com/henrygd/beszel/internal/entities/systemd"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
errNoActiveTime = errors.New("no active time")
|
||||||
|
)
|
||||||
|
|
||||||
|
// systemdManager manages the collection of systemd service statistics.
|
||||||
|
type systemdManager struct {
|
||||||
|
sync.Mutex
|
||||||
|
serviceStatsMap map[string]*systemd.Service
|
||||||
|
isRunning bool
|
||||||
|
hasFreshStats bool
|
||||||
|
patterns []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// newSystemdManager creates a new systemdManager.
|
||||||
|
func newSystemdManager() (*systemdManager, error) {
|
||||||
|
conn, err := dbus.NewSystemConnectionContext(context.Background())
|
||||||
|
if err != nil {
|
||||||
|
slog.Warn("Error connecting to systemd", "err", err, "ref", "https://beszel.dev/guide/systemd")
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
manager := &systemdManager{
|
||||||
|
serviceStatsMap: make(map[string]*systemd.Service),
|
||||||
|
patterns: getServicePatterns(),
|
||||||
|
}
|
||||||
|
|
||||||
|
manager.startWorker(conn)
|
||||||
|
|
||||||
|
return manager, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sm *systemdManager) startWorker(conn *dbus.Conn) {
|
||||||
|
if sm.isRunning {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sm.isRunning = true
|
||||||
|
// prime the service stats map with the current services
|
||||||
|
_ = sm.getServiceStats(conn, true)
|
||||||
|
// update the services every 10 minutes
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
time.Sleep(time.Minute * 10)
|
||||||
|
_ = sm.getServiceStats(nil, true)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// getServiceStatsCount returns the number of systemd services.
|
||||||
|
func (sm *systemdManager) getServiceStatsCount() int {
|
||||||
|
return len(sm.serviceStatsMap)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getFailedServiceCount returns the number of systemd services in a failed state.
|
||||||
|
func (sm *systemdManager) getFailedServiceCount() uint16 {
|
||||||
|
sm.Lock()
|
||||||
|
defer sm.Unlock()
|
||||||
|
count := uint16(0)
|
||||||
|
for _, service := range sm.serviceStatsMap {
|
||||||
|
if service.State == systemd.StatusFailed {
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
|
// getServiceStats collects statistics for all running systemd services.
|
||||||
|
func (sm *systemdManager) getServiceStats(conn *dbus.Conn, refresh bool) []*systemd.Service {
|
||||||
|
// start := time.Now()
|
||||||
|
// defer func() {
|
||||||
|
// slog.Info("systemdManager.getServiceStats", "duration", time.Since(start))
|
||||||
|
// }()
|
||||||
|
|
||||||
|
var services []*systemd.Service
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if !refresh {
|
||||||
|
// return nil
|
||||||
|
sm.Lock()
|
||||||
|
defer sm.Unlock()
|
||||||
|
for _, service := range sm.serviceStatsMap {
|
||||||
|
services = append(services, service)
|
||||||
|
}
|
||||||
|
sm.hasFreshStats = false
|
||||||
|
return services
|
||||||
|
}
|
||||||
|
|
||||||
|
if conn == nil || !conn.Connected() {
|
||||||
|
conn, err = dbus.NewSystemConnectionContext(context.Background())
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
units, err := conn.ListUnitsByPatternsContext(context.Background(), []string{"loaded"}, sm.patterns)
|
||||||
|
if err != nil {
|
||||||
|
slog.Error("Error listing systemd service units", "err", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, unit := range units {
|
||||||
|
service, err := sm.updateServiceStats(conn, unit)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
services = append(services, service)
|
||||||
|
}
|
||||||
|
sm.hasFreshStats = true
|
||||||
|
return services
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateServiceStats updates the statistics for a single systemd service.
|
||||||
|
func (sm *systemdManager) updateServiceStats(conn *dbus.Conn, unit dbus.UnitStatus) (*systemd.Service, error) {
|
||||||
|
sm.Lock()
|
||||||
|
defer sm.Unlock()
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// if service has never been active (no active since time), skip it
|
||||||
|
if activeEnterTsProp, err := conn.GetUnitTypePropertyContext(ctx, unit.Name, "Unit", "ActiveEnterTimestamp"); err == nil {
|
||||||
|
if ts, ok := activeEnterTsProp.Value.Value().(uint64); !ok || ts == 0 || ts == math.MaxUint64 {
|
||||||
|
return nil, errNoActiveTime
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
service, serviceExists := sm.serviceStatsMap[unit.Name]
|
||||||
|
if !serviceExists {
|
||||||
|
service = &systemd.Service{Name: unescapeServiceName(strings.TrimSuffix(unit.Name, ".service"))}
|
||||||
|
sm.serviceStatsMap[unit.Name] = service
|
||||||
|
}
|
||||||
|
|
||||||
|
memPeak := service.MemPeak
|
||||||
|
if memPeakProp, err := conn.GetUnitTypePropertyContext(ctx, unit.Name, "Service", "MemoryPeak"); err == nil {
|
||||||
|
// If memPeak is MaxUint64 the api is saying it's not available
|
||||||
|
if v, ok := memPeakProp.Value.Value().(uint64); ok && v != math.MaxUint64 {
|
||||||
|
memPeak = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var memUsage uint64
|
||||||
|
if memProp, err := conn.GetUnitTypePropertyContext(ctx, unit.Name, "Service", "MemoryCurrent"); err == nil {
|
||||||
|
// If memUsage is MaxUint64 the api is saying it's not available
|
||||||
|
if v, ok := memProp.Value.Value().(uint64); ok && v != math.MaxUint64 {
|
||||||
|
memUsage = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
service.State = systemd.ParseServiceStatus(unit.ActiveState)
|
||||||
|
service.Sub = systemd.ParseServiceSubState(unit.SubState)
|
||||||
|
|
||||||
|
// some systems always return 0 for mem peak, so we should update the peak if the current usage is greater
|
||||||
|
if memUsage > memPeak {
|
||||||
|
memPeak = memUsage
|
||||||
|
}
|
||||||
|
|
||||||
|
var cpuUsage uint64
|
||||||
|
if cpuProp, err := conn.GetUnitTypePropertyContext(ctx, unit.Name, "Service", "CPUUsageNSec"); err == nil {
|
||||||
|
if v, ok := cpuProp.Value.Value().(uint64); ok {
|
||||||
|
cpuUsage = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
service.Mem = memUsage
|
||||||
|
if memPeak > service.MemPeak {
|
||||||
|
service.MemPeak = memPeak
|
||||||
|
}
|
||||||
|
service.UpdateCPUPercent(cpuUsage)
|
||||||
|
|
||||||
|
return service, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getServiceDetails collects extended information for a specific systemd service.
|
||||||
|
func (sm *systemdManager) getServiceDetails(serviceName string) (systemd.ServiceDetails, error) {
|
||||||
|
conn, err := dbus.NewSystemConnectionContext(context.Background())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
|
||||||
|
unitName := serviceName
|
||||||
|
if !strings.HasSuffix(unitName, ".service") {
|
||||||
|
unitName += ".service"
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
props, err := conn.GetUnitPropertiesContext(ctx, unitName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start with all unit properties
|
||||||
|
details := make(systemd.ServiceDetails)
|
||||||
|
maps.Copy(details, props)
|
||||||
|
|
||||||
|
// // Add service-specific properties
|
||||||
|
servicePropNames := []string{
|
||||||
|
"MainPID", "ExecMainPID", "TasksCurrent", "TasksMax",
|
||||||
|
"MemoryCurrent", "MemoryPeak", "MemoryLimit", "CPUUsageNSec",
|
||||||
|
"NRestarts", "ExecMainStartTimestampRealtime", "Result",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, propName := range servicePropNames {
|
||||||
|
if variant, err := conn.GetUnitTypePropertyContext(ctx, unitName, "Service", propName); err == nil {
|
||||||
|
value := variant.Value.Value()
|
||||||
|
// Check if the value is MaxUint64, which indicates unlimited/infinite
|
||||||
|
if uint64Value, ok := value.(uint64); ok && uint64Value == math.MaxUint64 {
|
||||||
|
// Set to nil to indicate unlimited - frontend will handle this appropriately
|
||||||
|
details[propName] = nil
|
||||||
|
} else {
|
||||||
|
details[propName] = value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return details, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// unescapeServiceName unescapes systemd service names that contain C-style escape sequences like \x2d
|
||||||
|
func unescapeServiceName(name string) string {
|
||||||
|
if !strings.Contains(name, "\\x") {
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
unescaped, err := strconv.Unquote("\"" + name + "\"")
|
||||||
|
if err != nil {
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
return unescaped
|
||||||
|
}
|
||||||
|
|
||||||
|
// getServicePatterns returns the list of service patterns to match.
|
||||||
|
// It reads from the SERVICE_PATTERNS environment variable if set,
|
||||||
|
// otherwise defaults to "*service".
|
||||||
|
func getServicePatterns() []string {
|
||||||
|
patterns := []string{}
|
||||||
|
if envPatterns, _ := GetEnv("SERVICE_PATTERNS"); envPatterns != "" {
|
||||||
|
for pattern := range strings.SplitSeq(envPatterns, ",") {
|
||||||
|
pattern = strings.TrimSpace(pattern)
|
||||||
|
if pattern == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !strings.HasSuffix(pattern, ".service") {
|
||||||
|
pattern += ".service"
|
||||||
|
}
|
||||||
|
patterns = append(patterns, pattern)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(patterns) == 0 {
|
||||||
|
patterns = []string{"*.service"}
|
||||||
|
}
|
||||||
|
return patterns
|
||||||
|
}
|
||||||
38
agent/systemd_nonlinux.go
Normal file
38
agent/systemd_nonlinux.go
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
//go:build !linux
|
||||||
|
|
||||||
|
package agent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/entities/systemd"
|
||||||
|
)
|
||||||
|
|
||||||
|
// systemdManager manages the collection of systemd service statistics.
|
||||||
|
type systemdManager struct {
|
||||||
|
hasFreshStats bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// newSystemdManager creates a new systemdManager.
|
||||||
|
func newSystemdManager() (*systemdManager, error) {
|
||||||
|
return &systemdManager{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getServiceStats returns nil for non-linux systems.
|
||||||
|
func (sm *systemdManager) getServiceStats(conn any, refresh bool) []*systemd.Service {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getServiceStatsCount returns 0 for non-linux systems.
|
||||||
|
func (sm *systemdManager) getServiceStatsCount() int {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// getFailedServiceCount returns 0 for non-linux systems.
|
||||||
|
func (sm *systemdManager) getFailedServiceCount() uint16 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sm *systemdManager) getServiceDetails(string) (systemd.ServiceDetails, error) {
|
||||||
|
return nil, errors.New("systemd manager unavailable")
|
||||||
|
}
|
||||||
53
agent/systemd_nonlinux_test.go
Normal file
53
agent/systemd_nonlinux_test.go
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
//go:build !linux && testing
|
||||||
|
|
||||||
|
package agent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNewSystemdManager(t *testing.T) {
|
||||||
|
manager, err := newSystemdManager()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.NotNil(t, manager)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSystemdManagerGetServiceStats(t *testing.T) {
|
||||||
|
manager, err := newSystemdManager()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Test with refresh = true
|
||||||
|
result := manager.getServiceStats(true)
|
||||||
|
assert.Nil(t, result)
|
||||||
|
|
||||||
|
// Test with refresh = false
|
||||||
|
result = manager.getServiceStats(false)
|
||||||
|
assert.Nil(t, result)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSystemdManagerGetServiceDetails(t *testing.T) {
|
||||||
|
manager, err := newSystemdManager()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
result, err := manager.getServiceDetails("any-service")
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.Equal(t, "systemd manager unavailable", err.Error())
|
||||||
|
assert.Nil(t, result)
|
||||||
|
|
||||||
|
// Test with empty service name
|
||||||
|
result, err = manager.getServiceDetails("")
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.Equal(t, "systemd manager unavailable", err.Error())
|
||||||
|
assert.Nil(t, result)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSystemdManagerFields(t *testing.T) {
|
||||||
|
manager, err := newSystemdManager()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// The non-linux manager should be a simple struct with no special fields
|
||||||
|
// We can't test private fields directly, but we can test the methods work
|
||||||
|
assert.NotNil(t, manager)
|
||||||
|
}
|
||||||
158
agent/systemd_test.go
Normal file
158
agent/systemd_test.go
Normal file
@@ -0,0 +1,158 @@
|
|||||||
|
//go:build linux && testing
|
||||||
|
|
||||||
|
package agent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestUnescapeServiceName(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
input string
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{"nginx.service", "nginx.service"}, // No escaping needed
|
||||||
|
{"test\\x2dwith\\x2ddashes.service", "test-with-dashes.service"}, // \x2d is dash
|
||||||
|
{"service\\x20with\\x20spaces.service", "service with spaces.service"}, // \x20 is space
|
||||||
|
{"mixed\\x2dand\\x2dnormal", "mixed-and-normal"}, // Mixed escaped and normal
|
||||||
|
{"no-escape-here", "no-escape-here"}, // No escape sequences
|
||||||
|
{"", ""}, // Empty string
|
||||||
|
{"\\x2d\\x2d", "--"}, // Multiple escapes
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.input, func(t *testing.T) {
|
||||||
|
result := unescapeServiceName(test.input)
|
||||||
|
assert.Equal(t, test.expected, result)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUnescapeServiceNameInvalid(t *testing.T) {
|
||||||
|
// Test invalid escape sequences - should return original string
|
||||||
|
invalidInputs := []string{
|
||||||
|
"invalid\\x", // Incomplete escape
|
||||||
|
"invalid\\xZZ", // Invalid hex
|
||||||
|
"invalid\\x2", // Incomplete hex
|
||||||
|
"invalid\\xyz", // Not a valid escape
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, input := range invalidInputs {
|
||||||
|
t.Run(input, func(t *testing.T) {
|
||||||
|
result := unescapeServiceName(input)
|
||||||
|
assert.Equal(t, input, result, "Invalid escape sequences should return original string")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetServicePatterns(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
prefixedEnv string
|
||||||
|
unprefixedEnv string
|
||||||
|
expected []string
|
||||||
|
cleanupEnvVars bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "default when no env var set",
|
||||||
|
prefixedEnv: "",
|
||||||
|
unprefixedEnv: "",
|
||||||
|
expected: []string{"*.service"},
|
||||||
|
cleanupEnvVars: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "single pattern with prefixed env",
|
||||||
|
prefixedEnv: "nginx",
|
||||||
|
unprefixedEnv: "",
|
||||||
|
expected: []string{"nginx.service"},
|
||||||
|
cleanupEnvVars: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "single pattern with unprefixed env",
|
||||||
|
prefixedEnv: "",
|
||||||
|
unprefixedEnv: "nginx",
|
||||||
|
expected: []string{"nginx.service"},
|
||||||
|
cleanupEnvVars: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "prefixed env takes precedence",
|
||||||
|
prefixedEnv: "nginx",
|
||||||
|
unprefixedEnv: "apache",
|
||||||
|
expected: []string{"nginx.service"},
|
||||||
|
cleanupEnvVars: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "multiple patterns",
|
||||||
|
prefixedEnv: "nginx,apache,postgresql",
|
||||||
|
unprefixedEnv: "",
|
||||||
|
expected: []string{"nginx.service", "apache.service", "postgresql.service"},
|
||||||
|
cleanupEnvVars: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "patterns with .service suffix",
|
||||||
|
prefixedEnv: "nginx.service,apache.service",
|
||||||
|
unprefixedEnv: "",
|
||||||
|
expected: []string{"nginx.service", "apache.service"},
|
||||||
|
cleanupEnvVars: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "mixed patterns with and without suffix",
|
||||||
|
prefixedEnv: "nginx.service,apache,postgresql.service",
|
||||||
|
unprefixedEnv: "",
|
||||||
|
expected: []string{"nginx.service", "apache.service", "postgresql.service"},
|
||||||
|
cleanupEnvVars: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "patterns with whitespace",
|
||||||
|
prefixedEnv: " nginx , apache , postgresql ",
|
||||||
|
unprefixedEnv: "",
|
||||||
|
expected: []string{"nginx.service", "apache.service", "postgresql.service"},
|
||||||
|
cleanupEnvVars: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "empty patterns are skipped",
|
||||||
|
prefixedEnv: "nginx,,apache, ,postgresql",
|
||||||
|
unprefixedEnv: "",
|
||||||
|
expected: []string{"nginx.service", "apache.service", "postgresql.service"},
|
||||||
|
cleanupEnvVars: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "wildcard pattern",
|
||||||
|
prefixedEnv: "*nginx*,*apache*",
|
||||||
|
unprefixedEnv: "",
|
||||||
|
expected: []string{"*nginx*.service", "*apache*.service"},
|
||||||
|
cleanupEnvVars: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
// Clean up any existing env vars
|
||||||
|
os.Unsetenv("BESZEL_AGENT_SERVICE_PATTERNS")
|
||||||
|
os.Unsetenv("SERVICE_PATTERNS")
|
||||||
|
|
||||||
|
// Set up environment variables
|
||||||
|
if tt.prefixedEnv != "" {
|
||||||
|
os.Setenv("BESZEL_AGENT_SERVICE_PATTERNS", tt.prefixedEnv)
|
||||||
|
}
|
||||||
|
if tt.unprefixedEnv != "" {
|
||||||
|
os.Setenv("SERVICE_PATTERNS", tt.unprefixedEnv)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run the function
|
||||||
|
result := getServicePatterns()
|
||||||
|
|
||||||
|
// Verify results
|
||||||
|
assert.Equal(t, tt.expected, result, "Patterns should match expected values")
|
||||||
|
|
||||||
|
// Cleanup
|
||||||
|
if tt.cleanupEnvVars {
|
||||||
|
os.Unsetenv("BESZEL_AGENT_SERVICE_PATTERNS")
|
||||||
|
os.Unsetenv("SERVICE_PATTERNS")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
24
agent/test-data/container.json
Normal file
24
agent/test-data/container.json
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
{
|
||||||
|
"cpu_stats": {
|
||||||
|
"cpu_usage": {
|
||||||
|
"total_usage": 312055276000
|
||||||
|
},
|
||||||
|
"system_cpu_usage": 1366399830000000
|
||||||
|
},
|
||||||
|
"memory_stats": {
|
||||||
|
"usage": 507400192,
|
||||||
|
"stats": {
|
||||||
|
"inactive_file": 165130240
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"networks": {
|
||||||
|
"eth0": {
|
||||||
|
"tx_bytes": 20376558,
|
||||||
|
"rx_bytes": 537029455
|
||||||
|
},
|
||||||
|
"eth1": {
|
||||||
|
"tx_bytes": 2003766,
|
||||||
|
"rx_bytes": 6241
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
24
agent/test-data/container2.json
Normal file
24
agent/test-data/container2.json
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
{
|
||||||
|
"cpu_stats": {
|
||||||
|
"cpu_usage": {
|
||||||
|
"total_usage": 314891801000
|
||||||
|
},
|
||||||
|
"system_cpu_usage": 1368474900000000
|
||||||
|
},
|
||||||
|
"memory_stats": {
|
||||||
|
"usage": 507400192,
|
||||||
|
"stats": {
|
||||||
|
"inactive_file": 165130240
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"networks": {
|
||||||
|
"eth0": {
|
||||||
|
"tx_bytes": 20376558,
|
||||||
|
"rx_bytes": 537029455
|
||||||
|
},
|
||||||
|
"eth1": {
|
||||||
|
"tx_bytes": 2003766,
|
||||||
|
"rx_bytes": 6241
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
272
agent/test-data/smart/nvme0.json
Normal file
272
agent/test-data/smart/nvme0.json
Normal file
@@ -0,0 +1,272 @@
|
|||||||
|
{
|
||||||
|
"json_format_version": [
|
||||||
|
1,
|
||||||
|
0
|
||||||
|
],
|
||||||
|
"smartctl": {
|
||||||
|
"version": [
|
||||||
|
7,
|
||||||
|
5
|
||||||
|
],
|
||||||
|
"pre_release": false,
|
||||||
|
"svn_revision": "5714",
|
||||||
|
"platform_info": "x86_64-linux-6.17.1-2-cachyos",
|
||||||
|
"build_info": "(local build)",
|
||||||
|
"argv": [
|
||||||
|
"smartctl",
|
||||||
|
"-aj",
|
||||||
|
"/dev/nvme0"
|
||||||
|
],
|
||||||
|
"exit_status": 0
|
||||||
|
},
|
||||||
|
"local_time": {
|
||||||
|
"time_t": 1761507494,
|
||||||
|
"asctime": "Sun Oct 26 15:38:14 2025 EDT"
|
||||||
|
},
|
||||||
|
"device": {
|
||||||
|
"name": "/dev/nvme0",
|
||||||
|
"info_name": "/dev/nvme0",
|
||||||
|
"type": "nvme",
|
||||||
|
"protocol": "NVMe"
|
||||||
|
},
|
||||||
|
"model_name": "PELADN 512GB",
|
||||||
|
"serial_number": "2024031600129",
|
||||||
|
"firmware_version": "VC2S038E",
|
||||||
|
"nvme_pci_vendor": {
|
||||||
|
"id": 4332,
|
||||||
|
"subsystem_id": 4332
|
||||||
|
},
|
||||||
|
"nvme_ieee_oui_identifier": 57420,
|
||||||
|
"nvme_controller_id": 1,
|
||||||
|
"nvme_version": {
|
||||||
|
"string": "1.4",
|
||||||
|
"value": 66560
|
||||||
|
},
|
||||||
|
"nvme_number_of_namespaces": 1,
|
||||||
|
"nvme_namespaces": [
|
||||||
|
{
|
||||||
|
"id": 1,
|
||||||
|
"size": {
|
||||||
|
"blocks": 1000215216,
|
||||||
|
"bytes": 512110190592
|
||||||
|
},
|
||||||
|
"capacity": {
|
||||||
|
"blocks": 1000215216,
|
||||||
|
"bytes": 512110190592
|
||||||
|
},
|
||||||
|
"utilization": {
|
||||||
|
"blocks": 1000215216,
|
||||||
|
"bytes": 512110190592
|
||||||
|
},
|
||||||
|
"formatted_lba_size": 512,
|
||||||
|
"eui64": {
|
||||||
|
"oui": 57420,
|
||||||
|
"ext_id": 112094110470
|
||||||
|
},
|
||||||
|
"features": {
|
||||||
|
"value": 0,
|
||||||
|
"thin_provisioning": false,
|
||||||
|
"na_fields": false,
|
||||||
|
"dealloc_or_unwritten_block_error": false,
|
||||||
|
"uid_reuse": false,
|
||||||
|
"np_fields": false,
|
||||||
|
"other": 0
|
||||||
|
},
|
||||||
|
"lba_formats": [
|
||||||
|
{
|
||||||
|
"formatted": true,
|
||||||
|
"data_bytes": 512,
|
||||||
|
"metadata_bytes": 0,
|
||||||
|
"relative_performance": 0
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"user_capacity": {
|
||||||
|
"blocks": 1000215216,
|
||||||
|
"bytes": 512110190592
|
||||||
|
},
|
||||||
|
"logical_block_size": 512,
|
||||||
|
"smart_support": {
|
||||||
|
"available": true,
|
||||||
|
"enabled": true
|
||||||
|
},
|
||||||
|
"nvme_firmware_update_capabilities": {
|
||||||
|
"value": 2,
|
||||||
|
"slots": 1,
|
||||||
|
"first_slot_is_read_only": false,
|
||||||
|
"activiation_without_reset": false,
|
||||||
|
"multiple_update_detection": false,
|
||||||
|
"other": 0
|
||||||
|
},
|
||||||
|
"nvme_optional_admin_commands": {
|
||||||
|
"value": 23,
|
||||||
|
"security_send_receive": true,
|
||||||
|
"format_nvm": true,
|
||||||
|
"firmware_download": true,
|
||||||
|
"namespace_management": false,
|
||||||
|
"self_test": true,
|
||||||
|
"directives": false,
|
||||||
|
"mi_send_receive": false,
|
||||||
|
"virtualization_management": false,
|
||||||
|
"doorbell_buffer_config": false,
|
||||||
|
"get_lba_status": false,
|
||||||
|
"command_and_feature_lockdown": false,
|
||||||
|
"other": 0
|
||||||
|
},
|
||||||
|
"nvme_optional_nvm_commands": {
|
||||||
|
"value": 94,
|
||||||
|
"compare": false,
|
||||||
|
"write_uncorrectable": true,
|
||||||
|
"dataset_management": true,
|
||||||
|
"write_zeroes": true,
|
||||||
|
"save_select_feature_nonzero": true,
|
||||||
|
"reservations": false,
|
||||||
|
"timestamp": true,
|
||||||
|
"verify": false,
|
||||||
|
"copy": false,
|
||||||
|
"other": 0
|
||||||
|
},
|
||||||
|
"nvme_log_page_attributes": {
|
||||||
|
"value": 2,
|
||||||
|
"smart_health_per_namespace": false,
|
||||||
|
"commands_effects_log": true,
|
||||||
|
"extended_get_log_page_cmd": false,
|
||||||
|
"telemetry_log": false,
|
||||||
|
"persistent_event_log": false,
|
||||||
|
"supported_log_pages_log": false,
|
||||||
|
"telemetry_data_area_4": false,
|
||||||
|
"other": 0
|
||||||
|
},
|
||||||
|
"nvme_maximum_data_transfer_pages": 32,
|
||||||
|
"nvme_composite_temperature_threshold": {
|
||||||
|
"warning": 100,
|
||||||
|
"critical": 110
|
||||||
|
},
|
||||||
|
"temperature": {
|
||||||
|
"op_limit_max": 100,
|
||||||
|
"critical_limit_max": 110,
|
||||||
|
"current": 61
|
||||||
|
},
|
||||||
|
"nvme_power_states": [
|
||||||
|
{
|
||||||
|
"non_operational_state": false,
|
||||||
|
"relative_read_latency": 0,
|
||||||
|
"relative_read_throughput": 0,
|
||||||
|
"relative_write_latency": 0,
|
||||||
|
"relative_write_throughput": 0,
|
||||||
|
"entry_latency_us": 230000,
|
||||||
|
"exit_latency_us": 50000,
|
||||||
|
"max_power": {
|
||||||
|
"value": 800,
|
||||||
|
"scale": 2,
|
||||||
|
"units_per_watt": 100
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"non_operational_state": false,
|
||||||
|
"relative_read_latency": 1,
|
||||||
|
"relative_read_throughput": 1,
|
||||||
|
"relative_write_latency": 1,
|
||||||
|
"relative_write_throughput": 1,
|
||||||
|
"entry_latency_us": 4000,
|
||||||
|
"exit_latency_us": 50000,
|
||||||
|
"max_power": {
|
||||||
|
"value": 400,
|
||||||
|
"scale": 2,
|
||||||
|
"units_per_watt": 100
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"non_operational_state": false,
|
||||||
|
"relative_read_latency": 2,
|
||||||
|
"relative_read_throughput": 2,
|
||||||
|
"relative_write_latency": 2,
|
||||||
|
"relative_write_throughput": 2,
|
||||||
|
"entry_latency_us": 4000,
|
||||||
|
"exit_latency_us": 250000,
|
||||||
|
"max_power": {
|
||||||
|
"value": 300,
|
||||||
|
"scale": 2,
|
||||||
|
"units_per_watt": 100
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"non_operational_state": true,
|
||||||
|
"relative_read_latency": 3,
|
||||||
|
"relative_read_throughput": 3,
|
||||||
|
"relative_write_latency": 3,
|
||||||
|
"relative_write_throughput": 3,
|
||||||
|
"entry_latency_us": 5000,
|
||||||
|
"exit_latency_us": 10000,
|
||||||
|
"max_power": {
|
||||||
|
"value": 300,
|
||||||
|
"scale": 1,
|
||||||
|
"units_per_watt": 10000
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"non_operational_state": true,
|
||||||
|
"relative_read_latency": 4,
|
||||||
|
"relative_read_throughput": 4,
|
||||||
|
"relative_write_latency": 4,
|
||||||
|
"relative_write_throughput": 4,
|
||||||
|
"entry_latency_us": 54000,
|
||||||
|
"exit_latency_us": 45000,
|
||||||
|
"max_power": {
|
||||||
|
"value": 50,
|
||||||
|
"scale": 1,
|
||||||
|
"units_per_watt": 10000
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"smart_status": {
|
||||||
|
"passed": true,
|
||||||
|
"nvme": {
|
||||||
|
"value": 0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nvme_smart_health_information_log": {
|
||||||
|
"nsid": -1,
|
||||||
|
"critical_warning": 0,
|
||||||
|
"temperature": 61,
|
||||||
|
"available_spare": 100,
|
||||||
|
"available_spare_threshold": 32,
|
||||||
|
"percentage_used": 0,
|
||||||
|
"data_units_read": 6573104,
|
||||||
|
"data_units_written": 16040567,
|
||||||
|
"host_reads": 63241130,
|
||||||
|
"host_writes": 253050006,
|
||||||
|
"controller_busy_time": 0,
|
||||||
|
"power_cycles": 430,
|
||||||
|
"power_on_hours": 4399,
|
||||||
|
"unsafe_shutdowns": 44,
|
||||||
|
"media_errors": 0,
|
||||||
|
"num_err_log_entries": 0,
|
||||||
|
"warning_temp_time": 0,
|
||||||
|
"critical_comp_time": 0
|
||||||
|
},
|
||||||
|
"spare_available": {
|
||||||
|
"current_percent": 100,
|
||||||
|
"threshold_percent": 32
|
||||||
|
},
|
||||||
|
"endurance_used": {
|
||||||
|
"current_percent": 0
|
||||||
|
},
|
||||||
|
"power_cycle_count": 430,
|
||||||
|
"power_on_time": {
|
||||||
|
"hours": 4399
|
||||||
|
},
|
||||||
|
"nvme_error_information_log": {
|
||||||
|
"size": 8,
|
||||||
|
"read": 8,
|
||||||
|
"unread": 0
|
||||||
|
},
|
||||||
|
"nvme_self_test_log": {
|
||||||
|
"nsid": -1,
|
||||||
|
"current_self_test_operation": {
|
||||||
|
"value": 0,
|
||||||
|
"string": "No self-test in progress"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
36
agent/test-data/smart/scan.json
Normal file
36
agent/test-data/smart/scan.json
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
{
|
||||||
|
"json_format_version": [
|
||||||
|
1,
|
||||||
|
0
|
||||||
|
],
|
||||||
|
"smartctl": {
|
||||||
|
"version": [
|
||||||
|
7,
|
||||||
|
5
|
||||||
|
],
|
||||||
|
"pre_release": false,
|
||||||
|
"svn_revision": "5714",
|
||||||
|
"platform_info": "x86_64-linux-6.17.1-2-cachyos",
|
||||||
|
"build_info": "(local build)",
|
||||||
|
"argv": [
|
||||||
|
"smartctl",
|
||||||
|
"--scan",
|
||||||
|
"-j"
|
||||||
|
],
|
||||||
|
"exit_status": 0
|
||||||
|
},
|
||||||
|
"devices": [
|
||||||
|
{
|
||||||
|
"name": "/dev/sda",
|
||||||
|
"info_name": "/dev/sda [SAT]",
|
||||||
|
"type": "sat",
|
||||||
|
"protocol": "ATA"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "/dev/nvme0",
|
||||||
|
"info_name": "/dev/nvme0",
|
||||||
|
"type": "nvme",
|
||||||
|
"protocol": "NVMe"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
125
agent/test-data/smart/scsi.json
Normal file
125
agent/test-data/smart/scsi.json
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
{
|
||||||
|
"json_format_version": [
|
||||||
|
1,
|
||||||
|
0
|
||||||
|
],
|
||||||
|
"smartctl": {
|
||||||
|
"version": [
|
||||||
|
7,
|
||||||
|
3
|
||||||
|
],
|
||||||
|
"svn_revision": "5338",
|
||||||
|
"platform_info": "x86_64-linux-6.12.43+deb12-amd64",
|
||||||
|
"build_info": "(local build)",
|
||||||
|
"argv": [
|
||||||
|
"smartctl",
|
||||||
|
"-aj",
|
||||||
|
"/dev/sde"
|
||||||
|
],
|
||||||
|
"exit_status": 0
|
||||||
|
},
|
||||||
|
"local_time": {
|
||||||
|
"time_t": 1761502142,
|
||||||
|
"asctime": "Sun Oct 21 21:09:02 2025 MSK"
|
||||||
|
},
|
||||||
|
"device": {
|
||||||
|
"name": "/dev/sde",
|
||||||
|
"info_name": "/dev/sde",
|
||||||
|
"type": "scsi",
|
||||||
|
"protocol": "SCSI"
|
||||||
|
},
|
||||||
|
"scsi_vendor": "YADRO",
|
||||||
|
"scsi_product": "WUH721414AL4204",
|
||||||
|
"scsi_model_name": "YADRO WUH721414AL4204",
|
||||||
|
"scsi_revision": "C240",
|
||||||
|
"scsi_version": "SPC-4",
|
||||||
|
"user_capacity": {
|
||||||
|
"blocks": 3418095616,
|
||||||
|
"bytes": 14000519643136
|
||||||
|
},
|
||||||
|
"logical_block_size": 4096,
|
||||||
|
"scsi_lb_provisioning": {
|
||||||
|
"name": "fully provisioned",
|
||||||
|
"value": 0,
|
||||||
|
"management_enabled": {
|
||||||
|
"name": "LBPME",
|
||||||
|
"value": 0
|
||||||
|
},
|
||||||
|
"read_zeros": {
|
||||||
|
"name": "LBPRZ",
|
||||||
|
"value": 0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"rotation_rate": 7200,
|
||||||
|
"form_factor": {
|
||||||
|
"scsi_value": 2,
|
||||||
|
"name": "3.5 inches"
|
||||||
|
},
|
||||||
|
"logical_unit_id": "0x5000cca29063dc00",
|
||||||
|
"serial_number": "9YHSDH9B",
|
||||||
|
"device_type": {
|
||||||
|
"scsi_terminology": "Peripheral Device Type [PDT]",
|
||||||
|
"scsi_value": 0,
|
||||||
|
"name": "disk"
|
||||||
|
},
|
||||||
|
"scsi_transport_protocol": {
|
||||||
|
"name": "SAS (SPL-4)",
|
||||||
|
"value": 6
|
||||||
|
},
|
||||||
|
"smart_support": {
|
||||||
|
"available": true,
|
||||||
|
"enabled": true
|
||||||
|
},
|
||||||
|
"temperature_warning": {
|
||||||
|
"enabled": true
|
||||||
|
},
|
||||||
|
"smart_status": {
|
||||||
|
"passed": true
|
||||||
|
},
|
||||||
|
"temperature": {
|
||||||
|
"current": 34,
|
||||||
|
"drive_trip": 85
|
||||||
|
},
|
||||||
|
"power_on_time": {
|
||||||
|
"hours": 458,
|
||||||
|
"minutes": 25
|
||||||
|
},
|
||||||
|
"scsi_start_stop_cycle_counter": {
|
||||||
|
"year_of_manufacture": "2022",
|
||||||
|
"week_of_manufacture": "41",
|
||||||
|
"specified_cycle_count_over_device_lifetime": 50000,
|
||||||
|
"accumulated_start_stop_cycles": 2,
|
||||||
|
"specified_load_unload_count_over_device_lifetime": 600000,
|
||||||
|
"accumulated_load_unload_cycles": 418
|
||||||
|
},
|
||||||
|
"scsi_grown_defect_list": 0,
|
||||||
|
"scsi_error_counter_log": {
|
||||||
|
"read": {
|
||||||
|
"errors_corrected_by_eccfast": 0,
|
||||||
|
"errors_corrected_by_eccdelayed": 0,
|
||||||
|
"errors_corrected_by_rereads_rewrites": 0,
|
||||||
|
"total_errors_corrected": 0,
|
||||||
|
"correction_algorithm_invocations": 346,
|
||||||
|
"gigabytes_processed": "3,641",
|
||||||
|
"total_uncorrected_errors": 0
|
||||||
|
},
|
||||||
|
"write": {
|
||||||
|
"errors_corrected_by_eccfast": 0,
|
||||||
|
"errors_corrected_by_eccdelayed": 0,
|
||||||
|
"errors_corrected_by_rereads_rewrites": 0,
|
||||||
|
"total_errors_corrected": 0,
|
||||||
|
"correction_algorithm_invocations": 4052,
|
||||||
|
"gigabytes_processed": "2124,590",
|
||||||
|
"total_uncorrected_errors": 0
|
||||||
|
},
|
||||||
|
"verify": {
|
||||||
|
"errors_corrected_by_eccfast": 0,
|
||||||
|
"errors_corrected_by_eccdelayed": 0,
|
||||||
|
"errors_corrected_by_rereads_rewrites": 0,
|
||||||
|
"total_errors_corrected": 0,
|
||||||
|
"correction_algorithm_invocations": 223,
|
||||||
|
"gigabytes_processed": "0,000",
|
||||||
|
"total_uncorrected_errors": 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
1013
agent/test-data/smart/sda.json
Normal file
1013
agent/test-data/smart/sda.json
Normal file
File diff suppressed because it is too large
Load Diff
130
agent/tools/fetchsmartctl/main.go
Normal file
130
agent/tools/fetchsmartctl/main.go
Normal file
@@ -0,0 +1,130 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha1"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"hash"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Download smartctl.exe from the given URL and save it to the given destination.
|
||||||
|
// This is used to embed smartctl.exe in the Windows build.
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
url := flag.String("url", "", "URL to download smartctl.exe from (required)")
|
||||||
|
out := flag.String("out", "", "Destination path for smartctl.exe (required)")
|
||||||
|
sha := flag.String("sha", "", "Optional SHA1/SHA256 checksum for integrity validation")
|
||||||
|
force := flag.Bool("force", false, "Force re-download even if destination exists")
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
if *url == "" || *out == "" {
|
||||||
|
fatalf("-url and -out are required")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !*force {
|
||||||
|
if info, err := os.Stat(*out); err == nil && info.Size() > 0 {
|
||||||
|
fmt.Println("smartctl.exe already present, skipping download")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := downloadFile(*url, *out, *sha); err != nil {
|
||||||
|
fatalf("download failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func downloadFile(url, dest, shaHex string) error {
|
||||||
|
// Prepare destination
|
||||||
|
if err := os.MkdirAll(filepath.Dir(dest), 0o755); err != nil {
|
||||||
|
return fmt.Errorf("create dir: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTP client
|
||||||
|
client := &http.Client{Timeout: 60 * time.Second}
|
||||||
|
req, err := http.NewRequest(http.MethodGet, url, nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("new request: %w", err)
|
||||||
|
}
|
||||||
|
req.Header.Set("User-Agent", "beszel-fetchsmartctl/1.0")
|
||||||
|
|
||||||
|
resp, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("http get: %w", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||||
|
return fmt.Errorf("unexpected HTTP status: %s", resp.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
tmp := dest + ".tmp"
|
||||||
|
f, err := os.OpenFile(tmp, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0o644)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("open tmp: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine hash algorithm based on length (SHA1=40, SHA256=64)
|
||||||
|
var hasher hash.Hash
|
||||||
|
if shaHex := strings.TrimSpace(shaHex); shaHex != "" {
|
||||||
|
cleanSha := strings.ToLower(strings.ReplaceAll(shaHex, " ", ""))
|
||||||
|
switch len(cleanSha) {
|
||||||
|
case 40:
|
||||||
|
hasher = sha1.New()
|
||||||
|
case 64:
|
||||||
|
hasher = sha256.New()
|
||||||
|
default:
|
||||||
|
f.Close()
|
||||||
|
os.Remove(tmp)
|
||||||
|
return fmt.Errorf("unsupported hash length: %d (expected 40 for SHA1 or 64 for SHA256)", len(cleanSha))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var mw io.Writer = f
|
||||||
|
if hasher != nil {
|
||||||
|
mw = io.MultiWriter(f, hasher)
|
||||||
|
}
|
||||||
|
if _, err := io.Copy(mw, resp.Body); err != nil {
|
||||||
|
f.Close()
|
||||||
|
os.Remove(tmp)
|
||||||
|
return fmt.Errorf("write tmp: %w", err)
|
||||||
|
}
|
||||||
|
if err := f.Close(); err != nil {
|
||||||
|
os.Remove(tmp)
|
||||||
|
return fmt.Errorf("close tmp: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if hasher != nil && shaHex != "" {
|
||||||
|
cleanSha := strings.ToLower(strings.ReplaceAll(strings.TrimSpace(shaHex), " ", ""))
|
||||||
|
got := strings.ToLower(hex.EncodeToString(hasher.Sum(nil)))
|
||||||
|
if got != cleanSha {
|
||||||
|
os.Remove(tmp)
|
||||||
|
return fmt.Errorf("hash mismatch: got %s want %s", got, cleanSha)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make executable and move into place
|
||||||
|
if err := os.Chmod(tmp, 0o755); err != nil {
|
||||||
|
os.Remove(tmp)
|
||||||
|
return fmt.Errorf("chmod: %w", err)
|
||||||
|
}
|
||||||
|
if err := os.Rename(tmp, dest); err != nil {
|
||||||
|
os.Remove(tmp)
|
||||||
|
return fmt.Errorf("rename: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("smartctl.exe downloaded to", dest)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func fatalf(format string, a ...any) {
|
||||||
|
fmt.Fprintf(os.Stderr, format+"\n", a...)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
@@ -40,11 +40,12 @@ func (o *openRCRestarter) Restart() error {
|
|||||||
type openWRTRestarter struct{ cmd string }
|
type openWRTRestarter struct{ cmd string }
|
||||||
|
|
||||||
func (w *openWRTRestarter) Restart() error {
|
func (w *openWRTRestarter) Restart() error {
|
||||||
if err := exec.Command(w.cmd, "running", "beszel-agent").Run(); err != nil {
|
// https://openwrt.org/docs/guide-user/base-system/managing_services?s[]=service
|
||||||
|
if err := exec.Command("/etc/init.d/beszel-agent", "running").Run(); err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
ghupdate.ColorPrint(ghupdate.ColorYellow, "Restarting beszel-agent via procd…")
|
ghupdate.ColorPrint(ghupdate.ColorYellow, "Restarting beszel-agent via procd…")
|
||||||
return exec.Command(w.cmd, "restart", "beszel-agent").Run()
|
return exec.Command("/etc/init.d/beszel-agent", "restart").Run()
|
||||||
}
|
}
|
||||||
|
|
||||||
type freeBSDRestarter struct{ cmd string }
|
type freeBSDRestarter struct{ cmd string }
|
||||||
@@ -64,11 +65,13 @@ func detectRestarter() restarter {
|
|||||||
if path, err := exec.LookPath("rc-service"); err == nil {
|
if path, err := exec.LookPath("rc-service"); err == nil {
|
||||||
return &openRCRestarter{cmd: path}
|
return &openRCRestarter{cmd: path}
|
||||||
}
|
}
|
||||||
|
if path, err := exec.LookPath("procd"); err == nil {
|
||||||
|
return &openWRTRestarter{cmd: path}
|
||||||
|
}
|
||||||
if path, err := exec.LookPath("service"); err == nil {
|
if path, err := exec.LookPath("service"); err == nil {
|
||||||
if runtime.GOOS == "freebsd" {
|
if runtime.GOOS == "freebsd" {
|
||||||
return &freeBSDRestarter{cmd: path}
|
return &freeBSDRestarter{cmd: path}
|
||||||
}
|
}
|
||||||
return &openWRTRestarter{cmd: path}
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,10 +6,13 @@ import "github.com/blang/semver"
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
// Version is the current version of the application.
|
// Version is the current version of the application.
|
||||||
Version = "0.12.12"
|
Version = "0.16.1"
|
||||||
// AppName is the name of the application.
|
// AppName is the name of the application.
|
||||||
AppName = "beszel"
|
AppName = "beszel"
|
||||||
)
|
)
|
||||||
|
|
||||||
// MinVersionCbor is the minimum supported version for CBOR compatibility.
|
// MinVersionCbor is the minimum supported version for CBOR compatibility.
|
||||||
var MinVersionCbor = semver.MustParse("0.12.0")
|
var MinVersionCbor = semver.MustParse("0.12.0")
|
||||||
|
|
||||||
|
// MinVersionAgentResponse is the minimum supported version for AgentResponse compatibility.
|
||||||
|
var MinVersionAgentResponse = semver.MustParse("0.13.0")
|
||||||
|
|||||||
49
go.mod
49
go.mod
@@ -1,27 +1,25 @@
|
|||||||
module github.com/henrygd/beszel
|
module github.com/henrygd/beszel
|
||||||
|
|
||||||
go 1.25.1
|
go 1.25.3
|
||||||
|
|
||||||
// lock shoutrrr to specific version to allow review before updating
|
|
||||||
replace github.com/nicholas-fedor/shoutrrr => github.com/nicholas-fedor/shoutrrr v0.9.1
|
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/blang/semver v3.5.1+incompatible
|
github.com/blang/semver v3.5.1+incompatible
|
||||||
|
github.com/coreos/go-systemd/v22 v22.6.0
|
||||||
github.com/distatus/battery v0.11.0
|
github.com/distatus/battery v0.11.0
|
||||||
github.com/fxamacker/cbor/v2 v2.9.0
|
github.com/fxamacker/cbor/v2 v2.9.0
|
||||||
github.com/gliderlabs/ssh v0.3.8
|
github.com/gliderlabs/ssh v0.3.8
|
||||||
github.com/google/uuid v1.6.0
|
github.com/google/uuid v1.6.0
|
||||||
github.com/lxzan/gws v1.8.9
|
github.com/lxzan/gws v1.8.9
|
||||||
github.com/nicholas-fedor/shoutrrr v0.9.1
|
github.com/nicholas-fedor/shoutrrr v0.12.0
|
||||||
github.com/pocketbase/dbx v1.11.0
|
github.com/pocketbase/dbx v1.11.0
|
||||||
github.com/pocketbase/pocketbase v0.30.0
|
github.com/pocketbase/pocketbase v0.33.0
|
||||||
github.com/shirou/gopsutil/v4 v4.25.8
|
github.com/shirou/gopsutil/v4 v4.25.10
|
||||||
github.com/spf13/cast v1.10.0
|
github.com/spf13/cast v1.10.0
|
||||||
github.com/spf13/cobra v1.10.1
|
github.com/spf13/cobra v1.10.1
|
||||||
github.com/spf13/pflag v1.0.10
|
github.com/spf13/pflag v1.0.10
|
||||||
github.com/stretchr/testify v1.11.1
|
github.com/stretchr/testify v1.11.1
|
||||||
golang.org/x/crypto v0.42.0
|
golang.org/x/crypto v0.44.0
|
||||||
golang.org/x/exp v0.0.0-20250911091902-df9299821621
|
golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6
|
||||||
gopkg.in/yaml.v3 v3.0.1
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -33,37 +31,38 @@ require (
|
|||||||
github.com/dolthub/maphash v0.1.0 // indirect
|
github.com/dolthub/maphash v0.1.0 // indirect
|
||||||
github.com/domodwyer/mailyak/v3 v3.6.2 // indirect
|
github.com/domodwyer/mailyak/v3 v3.6.2 // indirect
|
||||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||||
github.com/ebitengine/purego v0.9.0 // indirect
|
github.com/ebitengine/purego v0.9.1 // indirect
|
||||||
github.com/fatih/color v1.18.0 // indirect
|
github.com/fatih/color v1.18.0 // indirect
|
||||||
github.com/gabriel-vasile/mimetype v1.4.10 // indirect
|
github.com/gabriel-vasile/mimetype v1.4.11 // indirect
|
||||||
github.com/ganigeorgiev/fexpr v0.5.0 // indirect
|
github.com/ganigeorgiev/fexpr v0.5.0 // indirect
|
||||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||||
github.com/go-ozzo/ozzo-validation/v4 v4.3.0 // indirect
|
github.com/go-ozzo/ozzo-validation/v4 v4.3.0 // indirect
|
||||||
github.com/go-sql-driver/mysql v1.9.1 // indirect
|
github.com/go-sql-driver/mysql v1.9.1 // indirect
|
||||||
|
github.com/godbus/dbus/v5 v5.2.0 // indirect
|
||||||
github.com/golang-jwt/jwt/v5 v5.3.0 // indirect
|
github.com/golang-jwt/jwt/v5 v5.3.0 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||||
github.com/klauspost/compress v1.18.0 // indirect
|
github.com/klauspost/compress v1.18.1 // indirect
|
||||||
github.com/lufia/plan9stats v0.0.0-20250827001030-24949be3fa54 // indirect
|
github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||||
github.com/ncruces/go-strftime v0.1.9 // indirect
|
github.com/ncruces/go-strftime v1.0.0 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
|
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||||
github.com/tklauser/go-sysconf v0.3.15 // indirect
|
github.com/tklauser/go-sysconf v0.3.16 // indirect
|
||||||
github.com/tklauser/numcpus v0.10.0 // indirect
|
github.com/tklauser/numcpus v0.11.0 // indirect
|
||||||
github.com/x448/float16 v0.8.4 // indirect
|
github.com/x448/float16 v0.8.4 // indirect
|
||||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||||
golang.org/x/image v0.31.0 // indirect
|
golang.org/x/image v0.33.0 // indirect
|
||||||
golang.org/x/net v0.44.0 // indirect
|
golang.org/x/net v0.47.0 // indirect
|
||||||
golang.org/x/oauth2 v0.31.0 // indirect
|
golang.org/x/oauth2 v0.33.0 // indirect
|
||||||
golang.org/x/sync v0.17.0 // indirect
|
golang.org/x/sync v0.18.0 // indirect
|
||||||
golang.org/x/sys v0.36.0 // indirect
|
golang.org/x/sys v0.38.0 // indirect
|
||||||
golang.org/x/text v0.29.0 // indirect
|
golang.org/x/term v0.37.0 // indirect
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
golang.org/x/text v0.31.0 // indirect
|
||||||
howett.net/plist v1.0.1 // indirect
|
howett.net/plist v1.0.1 // indirect
|
||||||
modernc.org/libc v1.66.3 // indirect
|
modernc.org/libc v1.66.10 // indirect
|
||||||
modernc.org/mathutil v1.7.1 // indirect
|
modernc.org/mathutil v1.7.1 // indirect
|
||||||
modernc.org/memory v1.11.0 // indirect
|
modernc.org/memory v1.11.0 // indirect
|
||||||
modernc.org/sqlite v1.38.2 // indirect
|
modernc.org/sqlite v1.40.0 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
134
go.sum
134
go.sum
@@ -9,6 +9,8 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d
|
|||||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||||
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
|
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
|
||||||
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||||
|
github.com/coreos/go-systemd/v22 v22.6.0 h1:aGVa/v8B7hpb0TKl0MWoAavPDmHvobFe5R5zn0bCJWo=
|
||||||
|
github.com/coreos/go-systemd/v22 v22.6.0/go.mod h1:iG+pp635Fo7ZmV/j14KUcmEyWF+0X7Lua8rrTWzYgWU=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||||
@@ -23,16 +25,16 @@ github.com/domodwyer/mailyak/v3 v3.6.2 h1:x3tGMsyFhTCaxp6ycgR0FE/bu5QiNp+hetUuCO
|
|||||||
github.com/domodwyer/mailyak/v3 v3.6.2/go.mod h1:lOm/u9CyCVWHeaAmHIdF4RiKVxKUT/H5XX10lIKAL6c=
|
github.com/domodwyer/mailyak/v3 v3.6.2/go.mod h1:lOm/u9CyCVWHeaAmHIdF4RiKVxKUT/H5XX10lIKAL6c=
|
||||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||||
github.com/ebitengine/purego v0.9.0 h1:mh0zpKBIXDceC63hpvPuGLiJ8ZAa3DfrFTudmfi8A4k=
|
github.com/ebitengine/purego v0.9.1 h1:a/k2f2HQU3Pi399RPW1MOaZyhKJL9w/xFpKAg4q1s0A=
|
||||||
github.com/ebitengine/purego v0.9.0/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
github.com/ebitengine/purego v0.9.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||||
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
|
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
|
||||||
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
|
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
|
||||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||||
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
|
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
|
||||||
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
|
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
|
||||||
github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIpU+bX4+ZK0=
|
github.com/gabriel-vasile/mimetype v1.4.11 h1:AQvxbp830wPhHTqc1u7nzoLT+ZFxGY7emj5DR5DYFik=
|
||||||
github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
|
github.com/gabriel-vasile/mimetype v1.4.11/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
|
||||||
github.com/ganigeorgiev/fexpr v0.5.0 h1:XA9JxtTE/Xm+g/JFI6RfZEHSiQlk+1glLvRK1Lpv/Tk=
|
github.com/ganigeorgiev/fexpr v0.5.0 h1:XA9JxtTE/Xm+g/JFI6RfZEHSiQlk+1glLvRK1Lpv/Tk=
|
||||||
github.com/ganigeorgiev/fexpr v0.5.0/go.mod h1:RyGiGqmeXhEQ6+mlGdnUleLHgtzzu/VGO2WtJkF5drE=
|
github.com/ganigeorgiev/fexpr v0.5.0/go.mod h1:RyGiGqmeXhEQ6+mlGdnUleLHgtzzu/VGO2WtJkF5drE=
|
||||||
github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c=
|
github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c=
|
||||||
@@ -49,40 +51,44 @@ github.com/go-sql-driver/mysql v1.9.1 h1:FrjNGn/BsJQjVRuSa8CBrM5BWA9BWoXXat3KrtS
|
|||||||
github.com/go-sql-driver/mysql v1.9.1/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU=
|
github.com/go-sql-driver/mysql v1.9.1/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU=
|
||||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||||
|
github.com/godbus/dbus/v5 v5.2.0 h1:3WexO+U+yg9T70v9FdHr9kCxYlazaAXUhx2VMkbfax8=
|
||||||
|
github.com/godbus/dbus/v5 v5.2.0/go.mod h1:3AAv2+hPq5rdnr5txxxRwiGjPXamgoIHgz9FPBfOp3c=
|
||||||
github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
|
github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
|
||||||
github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
|
github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
|
||||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||||
github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6 h1:EEHtgt9IwisQ2AZ4pIsMjahcegHh6rmhqxzIRQIyepY=
|
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d h1:KJIErDwbSHjnp/SGzE5ed8Aol7JsKiI5X7yWKAtzhM0=
|
||||||
github.com/google/pprof v0.0.0-20250820193118-f64d9cf942d6/go.mod h1:I6V7YzU0XDpsHqbsyrghnFZLO1gwK6NPTNvmetQIk9U=
|
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d/go.mod h1:I6V7YzU0XDpsHqbsyrghnFZLO1gwK6NPTNvmetQIk9U=
|
||||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||||
|
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||||
github.com/jarcoal/httpmock v1.4.1 h1:0Ju+VCFuARfFlhVXFc2HxlcQkfB+Xq12/EotHko+x2A=
|
github.com/jarcoal/httpmock v1.4.1 h1:0Ju+VCFuARfFlhVXFc2HxlcQkfB+Xq12/EotHko+x2A=
|
||||||
github.com/jarcoal/httpmock v1.4.1/go.mod h1:ftW1xULwo+j0R0JJkJIIi7UKigZUXCLLanykgjwBXL0=
|
github.com/jarcoal/httpmock v1.4.1/go.mod h1:ftW1xULwo+j0R0JJkJIIi7UKigZUXCLLanykgjwBXL0=
|
||||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co=
|
||||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0=
|
||||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
github.com/lufia/plan9stats v0.0.0-20250827001030-24949be3fa54 h1:mFWunSatvkQQDhpdyuFAYwyAan3hzCuma+Pz8sqvOfg=
|
github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3 h1:PwQumkgq4/acIiZhtifTV5OUqqiP82UAl0h87xj/l9k=
|
||||||
github.com/lufia/plan9stats v0.0.0-20250827001030-24949be3fa54/go.mod h1:autxFIvghDt3jPTLoqZ9OZ7s9qTGNAWmYCjVFWPX/zg=
|
github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3/go.mod h1:autxFIvghDt3jPTLoqZ9OZ7s9qTGNAWmYCjVFWPX/zg=
|
||||||
github.com/lxzan/gws v1.8.9 h1:VU3SGUeWlQrEwfUSfokcZep8mdg/BrUF+y73YYshdBM=
|
github.com/lxzan/gws v1.8.9 h1:VU3SGUeWlQrEwfUSfokcZep8mdg/BrUF+y73YYshdBM=
|
||||||
github.com/lxzan/gws v1.8.9/go.mod h1:d9yHaR1eDTBHagQC6KY7ycUOaz5KWeqQtP3xu7aMK8Y=
|
github.com/lxzan/gws v1.8.9/go.mod h1:d9yHaR1eDTBHagQC6KY7ycUOaz5KWeqQtP3xu7aMK8Y=
|
||||||
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
||||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
||||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
|
github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w=
|
||||||
github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
||||||
github.com/nicholas-fedor/shoutrrr v0.9.1 h1:SEBhM6P1favzILO0f55CY3P9JwvM9RZ7B1ZMCl+Injs=
|
github.com/nicholas-fedor/shoutrrr v0.12.0 h1:8mwJdfU+uBEybSymwQJMGl/grG7lvVUKbVSNxn3XvUI=
|
||||||
github.com/nicholas-fedor/shoutrrr v0.9.1/go.mod h1:khue5m8LYyMzdPWuJxDTJeT89l9gjwjA+a+r0e8qxxk=
|
github.com/nicholas-fedor/shoutrrr v0.12.0/go.mod h1:WYiRalR4C43Qmd2zhPWGIFIxu633NB1hDM6Ap/DQcsA=
|
||||||
github.com/onsi/ginkgo/v2 v2.25.3 h1:Ty8+Yi/ayDAGtk4XxmmfUy4GabvM+MegeB4cDLRi6nw=
|
github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns=
|
||||||
github.com/onsi/ginkgo/v2 v2.25.3/go.mod h1:43uiyQC4Ed2tkOzLsEYm7hnrb7UJTWHYNsuy3bG/snE=
|
github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo=
|
||||||
github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A=
|
github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A=
|
||||||
github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k=
|
github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
@@ -90,8 +96,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI
|
|||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/pocketbase/dbx v1.11.0 h1:LpZezioMfT3K4tLrqA55wWFw1EtH1pM4tzSVa7kgszU=
|
github.com/pocketbase/dbx v1.11.0 h1:LpZezioMfT3K4tLrqA55wWFw1EtH1pM4tzSVa7kgszU=
|
||||||
github.com/pocketbase/dbx v1.11.0/go.mod h1:xXRCIAKTHMgUCyCKZm55pUOdvFziJjQfXaWKhu2vhMs=
|
github.com/pocketbase/dbx v1.11.0/go.mod h1:xXRCIAKTHMgUCyCKZm55pUOdvFziJjQfXaWKhu2vhMs=
|
||||||
github.com/pocketbase/pocketbase v0.30.0 h1:7v9O3hBYyHyptnnFjdP8tEJIuyHEfjhG6PC4gjf5eoE=
|
github.com/pocketbase/pocketbase v0.33.0 h1:v2EfiY3hxigzRJ/BwFuwVn0vUv7d2QQoD5zUFPaKR9o=
|
||||||
github.com/pocketbase/pocketbase v0.30.0/go.mod h1:gZIwampw4VqMcEdGHwBZgSa54xWIDgVJb4uINUMXLmA=
|
github.com/pocketbase/pocketbase v0.33.0/go.mod h1:9BEs+CRV7CrS+X5LfBh4bdJQsbzQAIklft3ovGe/c5A=
|
||||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
|
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
|
||||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||||
@@ -99,8 +105,8 @@ github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qq
|
|||||||
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
github.com/shirou/gopsutil/v4 v4.25.8 h1:NnAsw9lN7587WHxjJA9ryDnqhJpFH6A+wagYWTOH970=
|
github.com/shirou/gopsutil/v4 v4.25.10 h1:at8lk/5T1OgtuCp+AwrDofFRjnvosn0nkN2OLQ6g8tA=
|
||||||
github.com/shirou/gopsutil/v4 v4.25.8/go.mod h1:q9QdMmfAOVIw7a+eF86P7ISEU6ka+NLgkUxlopV4RwI=
|
github.com/shirou/gopsutil/v4 v4.25.10/go.mod h1:+kSwyC8DRUD9XXEHCAFjK+0nuArFJM0lva+StQAcskM=
|
||||||
github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY=
|
github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY=
|
||||||
github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
|
github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
|
||||||
github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
|
github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
|
||||||
@@ -112,77 +118,77 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
|
|||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||||
github.com/tklauser/go-sysconf v0.3.15 h1:VE89k0criAymJ/Os65CSn1IXaol+1wrsFHEB8Ol49K4=
|
github.com/tklauser/go-sysconf v0.3.16 h1:frioLaCQSsF5Cy1jgRBrzr6t502KIIwQ0MArYICU0nA=
|
||||||
github.com/tklauser/go-sysconf v0.3.15/go.mod h1:Dmjwr6tYFIseJw7a3dRLJfsHAMXZ3nEnL/aZY+0IuI4=
|
github.com/tklauser/go-sysconf v0.3.16/go.mod h1:/qNL9xxDhc7tx3HSRsLWNnuzbVfh3e7gh/BmM179nYI=
|
||||||
github.com/tklauser/numcpus v0.10.0 h1:18njr6LDBk1zuna922MgdjQuJFjrdppsZG60sHGfjso=
|
github.com/tklauser/numcpus v0.11.0 h1:nSTwhKH5e1dMNsCdVBukSZrURJRoHbSEQjdEbY+9RXw=
|
||||||
github.com/tklauser/numcpus v0.10.0/go.mod h1:BiTKazU708GQTYF4mB+cmlpT2Is1gLk7XVuEeem8LsQ=
|
github.com/tklauser/numcpus v0.11.0/go.mod h1:z+LwcLq54uWZTX0u/bGobaV34u6V7KNlTZejzM6/3MQ=
|
||||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||||
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
|
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
|
||||||
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||||
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
|
|
||||||
go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8=
|
|
||||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI=
|
golang.org/x/crypto v0.44.0 h1:A97SsFvM3AIwEEmTBiaxPPTYpDC47w720rdiiUvgoAU=
|
||||||
golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8=
|
golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvmc=
|
||||||
golang.org/x/exp v0.0.0-20250911091902-df9299821621 h1:2id6c1/gto0kaHYyrixvknJ8tUK/Qs5IsmBtrc+FtgU=
|
golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6 h1:zfMcR1Cs4KNuomFFgGefv5N0czO2XZpUbxGUy8i8ug0=
|
||||||
golang.org/x/exp v0.0.0-20250911091902-df9299821621/go.mod h1:TwQYMMnGpvZyc+JpB/UAuTNIsVJifOlSkrZkhcvpVUk=
|
golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6/go.mod h1:46edojNIoXTNOhySWIWdix628clX9ODXwPsQuG6hsK0=
|
||||||
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||||
golang.org/x/image v0.31.0 h1:mLChjE2MV6g1S7oqbXC0/UcKijjm5fnJLUYKIYrLESA=
|
golang.org/x/image v0.33.0 h1:LXRZRnv1+zGd5XBUVRFmYEphyyKJjQjCRiOuAP3sZfQ=
|
||||||
golang.org/x/image v0.31.0/go.mod h1:R9ec5Lcp96v9FTF+ajwaH3uGxPH4fKfHHAVbUILxghA=
|
golang.org/x/image v0.33.0/go.mod h1:DD3OsTYT9chzuzTQt+zMcOlBHgfoKQb1gry8p76Y1sc=
|
||||||
golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U=
|
golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk=
|
||||||
golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI=
|
golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
|
||||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||||
golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I=
|
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||||
golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
|
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||||
golang.org/x/oauth2 v0.31.0 h1:8Fq0yVZLh4j4YA47vHKFTa9Ew5XIrCP8LC6UeNZnLxo=
|
golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo=
|
||||||
golang.org/x/oauth2 v0.31.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
||||||
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
|
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||||
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||||
golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ=
|
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||||
golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA=
|
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk=
|
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||||
golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4=
|
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE=
|
golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
|
||||||
golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w=
|
golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
|
||||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||||
google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A=
|
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
|
||||||
google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg=
|
gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg=
|
||||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
howett.net/plist v1.0.1 h1:37GdZ8tP09Q35o9ych3ehygcsL+HqKSwzctveSlarvM=
|
howett.net/plist v1.0.1 h1:37GdZ8tP09Q35o9ych3ehygcsL+HqKSwzctveSlarvM=
|
||||||
howett.net/plist v1.0.1/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=
|
howett.net/plist v1.0.1/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=
|
||||||
modernc.org/cc/v4 v4.26.4 h1:jPhG8oNjtTYuP2FA4YefTJ/wioNUGALmGuEWt7SUR6s=
|
modernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis=
|
||||||
modernc.org/cc/v4 v4.26.4/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
|
modernc.org/cc/v4 v4.27.1/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
|
||||||
modernc.org/ccgo/v4 v4.28.1 h1:wPKYn5EC/mYTqBO373jKjvX2n+3+aK7+sICCv4Fjy1A=
|
modernc.org/ccgo/v4 v4.30.1 h1:4r4U1J6Fhj98NKfSjnPUN7Ze2c6MnAdL0hWw6+LrJpc=
|
||||||
modernc.org/ccgo/v4 v4.28.1/go.mod h1:uD+4RnfrVgE6ec9NGguUNdhqzNIeeomeXf6CL0GTE5Q=
|
modernc.org/ccgo/v4 v4.30.1/go.mod h1:bIOeI1JL54Utlxn+LwrFyjCx2n2RDiYEaJVSrgdrRfM=
|
||||||
modernc.org/fileutil v1.3.28 h1:Vp156KUA2nPu9F1NEv036x9UGOjg2qsi5QlWTjZmtMk=
|
modernc.org/fileutil v1.3.40 h1:ZGMswMNc9JOCrcrakF1HrvmergNLAmxOPjizirpfqBA=
|
||||||
modernc.org/fileutil v1.3.28/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc=
|
modernc.org/fileutil v1.3.40/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc=
|
||||||
modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI=
|
modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI=
|
||||||
modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
|
modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
|
||||||
|
modernc.org/gc/v3 v3.1.1 h1:k8T3gkXWY9sEiytKhcgyiZ2L0DTyCQ/nvX+LoCljoRE=
|
||||||
|
modernc.org/gc/v3 v3.1.1/go.mod h1:HFK/6AGESC7Ex+EZJhJ2Gni6cTaYpSMmU/cT9RmlfYY=
|
||||||
modernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks=
|
modernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks=
|
||||||
modernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI=
|
modernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI=
|
||||||
modernc.org/libc v1.66.3 h1:cfCbjTUcdsKyyZZfEUKfoHcP3S0Wkvz3jgSzByEWVCQ=
|
modernc.org/libc v1.66.10 h1:yZkb3YeLx4oynyR+iUsXsybsX4Ubx7MQlSYEw4yj59A=
|
||||||
modernc.org/libc v1.66.3/go.mod h1:XD9zO8kt59cANKvHPXpx7yS2ELPheAey0vjIuZOhOU8=
|
modernc.org/libc v1.66.10/go.mod h1:8vGSEwvoUoltr4dlywvHqjtAqHBaw0j1jI7iFBTAr2I=
|
||||||
modernc.org/libc v1.66.9 h1:YkHp7E1EWrN2iyNav7JE/nHasmshPvlGkon1VxGqOw0=
|
modernc.org/libc v1.67.0 h1:QzL4IrKab2OFmxA3/vRYl0tLXrIamwrhD6CKD4WBVjQ=
|
||||||
modernc.org/libc v1.66.9/go.mod h1:aVdcY7udcawRqauu0HukYYxtBSizV+R80n/6aQe9D5k=
|
modernc.org/libc v1.67.0/go.mod h1:QvvnnJ5P7aitu0ReNpVIEyesuhmDLQ8kaEoyMjIFZJA=
|
||||||
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
|
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
|
||||||
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
|
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
|
||||||
modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI=
|
modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI=
|
||||||
@@ -191,8 +197,8 @@ modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
|
|||||||
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
|
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
|
||||||
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
|
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
|
||||||
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
|
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
|
||||||
modernc.org/sqlite v1.38.2 h1:Aclu7+tgjgcQVShZqim41Bbw9Cho0y/7WzYptXqkEek=
|
modernc.org/sqlite v1.40.0 h1:bNWEDlYhNPAUdUdBzjAvn8icAs/2gaKlj4vM+tQ6KdQ=
|
||||||
modernc.org/sqlite v1.38.2/go.mod h1:cPTJYSlgg3Sfg046yBShXENNtPrWrDX8bsbAQBzgQ5E=
|
modernc.org/sqlite v1.40.0/go.mod h1:9fjQZ0mB1LLP0GYrp39oOJXx/I2sxEnZtzCmEQIKvGE=
|
||||||
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
|
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
|
||||||
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
|
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
|
||||||
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
||||||
|
|||||||
@@ -40,13 +40,18 @@ type UserNotificationSettings struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type SystemAlertStats struct {
|
type SystemAlertStats struct {
|
||||||
Cpu float64 `json:"cpu"`
|
Cpu float64 `json:"cpu"`
|
||||||
Mem float64 `json:"mp"`
|
Mem float64 `json:"mp"`
|
||||||
Disk float64 `json:"dp"`
|
Disk float64 `json:"dp"`
|
||||||
NetSent float64 `json:"ns"`
|
NetSent float64 `json:"ns"`
|
||||||
NetRecv float64 `json:"nr"`
|
NetRecv float64 `json:"nr"`
|
||||||
Temperatures map[string]float32 `json:"t"`
|
GPU map[string]SystemAlertGPUData `json:"g"`
|
||||||
LoadAvg [3]float64 `json:"la"`
|
Temperatures map[string]float32 `json:"t"`
|
||||||
|
LoadAvg [3]float64 `json:"la"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type SystemAlertGPUData struct {
|
||||||
|
Usage float64 `json:"u"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type SystemAlertData struct {
|
type SystemAlertData struct {
|
||||||
@@ -72,7 +77,6 @@ var supportsTitle = map[string]struct{}{
|
|||||||
"ifttt": {},
|
"ifttt": {},
|
||||||
"join": {},
|
"join": {},
|
||||||
"lark": {},
|
"lark": {},
|
||||||
"matrix": {},
|
|
||||||
"ntfy": {},
|
"ntfy": {},
|
||||||
"opsgenie": {},
|
"opsgenie": {},
|
||||||
"pushbullet": {},
|
"pushbullet": {},
|
||||||
|
|||||||
@@ -161,19 +161,14 @@ func (am *AlertManager) sendStatusAlert(alertStatus string, systemName string, a
|
|||||||
title := fmt.Sprintf("Connection to %s is %s %v", systemName, alertStatus, emoji)
|
title := fmt.Sprintf("Connection to %s is %s %v", systemName, alertStatus, emoji)
|
||||||
message := strings.TrimSuffix(title, emoji)
|
message := strings.TrimSuffix(title, emoji)
|
||||||
|
|
||||||
// if errs := am.hub.ExpandRecord(alertRecord, []string{"user"}, nil); len(errs) > 0 {
|
// Get system ID for the link
|
||||||
// return errs["user"]
|
systemID := alertRecord.GetString("system")
|
||||||
// }
|
|
||||||
// user := alertRecord.ExpandedOne("user")
|
|
||||||
// if user == nil {
|
|
||||||
// return nil
|
|
||||||
// }
|
|
||||||
|
|
||||||
return am.SendAlert(AlertMessageData{
|
return am.SendAlert(AlertMessageData{
|
||||||
UserID: alertRecord.GetString("user"),
|
UserID: alertRecord.GetString("user"),
|
||||||
Title: title,
|
Title: title,
|
||||||
Message: message,
|
Message: message,
|
||||||
Link: am.hub.MakeLink("system", systemName),
|
Link: am.hub.MakeLink("system", systemID),
|
||||||
LinkText: "View " + systemName,
|
LinkText: "View " + systemName,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -64,6 +64,8 @@ func (am *AlertManager) HandleSystemAlerts(systemRecord *core.Record, data *syst
|
|||||||
case "LoadAvg15":
|
case "LoadAvg15":
|
||||||
val = data.Info.LoadAvg[2]
|
val = data.Info.LoadAvg[2]
|
||||||
unit = ""
|
unit = ""
|
||||||
|
case "GPU":
|
||||||
|
val = data.Info.GpuPct
|
||||||
}
|
}
|
||||||
|
|
||||||
triggered := alertRecord.GetBool("triggered")
|
triggered := alertRecord.GetBool("triggered")
|
||||||
@@ -206,6 +208,17 @@ func (am *AlertManager) HandleSystemAlerts(systemRecord *core.Record, data *syst
|
|||||||
alert.val += stats.LoadAvg[1]
|
alert.val += stats.LoadAvg[1]
|
||||||
case "LoadAvg15":
|
case "LoadAvg15":
|
||||||
alert.val += stats.LoadAvg[2]
|
alert.val += stats.LoadAvg[2]
|
||||||
|
case "GPU":
|
||||||
|
if len(stats.GPU) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
maxUsage := 0.0
|
||||||
|
for _, gpu := range stats.GPU {
|
||||||
|
if gpu.Usage > maxUsage {
|
||||||
|
maxUsage = gpu.Usage
|
||||||
|
}
|
||||||
|
}
|
||||||
|
alert.val += maxUsage
|
||||||
default:
|
default:
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -268,9 +281,9 @@ func (am *AlertManager) sendSystemAlert(alert SystemAlertData) {
|
|||||||
alert.name = after + "m Load"
|
alert.name = after + "m Load"
|
||||||
}
|
}
|
||||||
|
|
||||||
// make title alert name lowercase if not CPU
|
// make title alert name lowercase if not CPU or GPU
|
||||||
titleAlertName := alert.name
|
titleAlertName := alert.name
|
||||||
if titleAlertName != "CPU" {
|
if titleAlertName != "CPU" && titleAlertName != "GPU" {
|
||||||
titleAlertName = strings.ToLower(titleAlertName)
|
titleAlertName = strings.ToLower(titleAlertName)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -298,7 +311,7 @@ func (am *AlertManager) sendSystemAlert(alert SystemAlertData) {
|
|||||||
UserID: alert.alertRecord.GetString("user"),
|
UserID: alert.alertRecord.GetString("user"),
|
||||||
Title: subject,
|
Title: subject,
|
||||||
Message: body,
|
Message: body,
|
||||||
Link: am.hub.MakeLink("system", systemName),
|
Link: am.hub.MakeLink("system", alert.systemRecord.Id),
|
||||||
LinkText: "View " + systemName,
|
LinkText: "View " + systemName,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,22 +1,47 @@
|
|||||||
package common
|
package common
|
||||||
|
|
||||||
type WebSocketAction = uint8
|
import (
|
||||||
|
"github.com/henrygd/beszel/internal/entities/smart"
|
||||||
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
"github.com/henrygd/beszel/internal/entities/systemd"
|
||||||
|
)
|
||||||
|
|
||||||
// Not implemented yet
|
type WebSocketAction = uint8
|
||||||
// type AgentError = uint8
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// Request system data from agent
|
// Request system data from agent
|
||||||
GetData WebSocketAction = iota
|
GetData WebSocketAction = iota
|
||||||
// Check the fingerprint of the agent
|
// Check the fingerprint of the agent
|
||||||
CheckFingerprint
|
CheckFingerprint
|
||||||
|
// Request container logs from agent
|
||||||
|
GetContainerLogs
|
||||||
|
// Request container info from agent
|
||||||
|
GetContainerInfo
|
||||||
|
// Request SMART data from agent
|
||||||
|
GetSmartData
|
||||||
|
// Request detailed systemd service info from agent
|
||||||
|
GetSystemdInfo
|
||||||
|
// Add new actions here...
|
||||||
)
|
)
|
||||||
|
|
||||||
// HubRequest defines the structure for requests sent from hub to agent.
|
// HubRequest defines the structure for requests sent from hub to agent.
|
||||||
type HubRequest[T any] struct {
|
type HubRequest[T any] struct {
|
||||||
Action WebSocketAction `cbor:"0,keyasint"`
|
Action WebSocketAction `cbor:"0,keyasint"`
|
||||||
Data T `cbor:"1,keyasint,omitempty,omitzero"`
|
Data T `cbor:"1,keyasint,omitempty,omitzero"`
|
||||||
// Error AgentError `cbor:"error,omitempty,omitzero"`
|
Id *uint32 `cbor:"2,keyasint,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// AgentResponse defines the structure for responses sent from agent to hub.
|
||||||
|
type AgentResponse struct {
|
||||||
|
Id *uint32 `cbor:"0,keyasint,omitempty"`
|
||||||
|
SystemData *system.CombinedData `cbor:"1,keyasint,omitempty,omitzero"`
|
||||||
|
Fingerprint *FingerprintResponse `cbor:"2,keyasint,omitempty,omitzero"`
|
||||||
|
Error string `cbor:"3,keyasint,omitempty,omitzero"`
|
||||||
|
String *string `cbor:"4,keyasint,omitempty,omitzero"`
|
||||||
|
SmartData map[string]smart.SmartData `cbor:"5,keyasint,omitempty,omitzero"`
|
||||||
|
ServiceInfo systemd.ServiceDetails `cbor:"6,keyasint,omitempty,omitzero"`
|
||||||
|
// Logs *LogsPayload `cbor:"4,keyasint,omitempty,omitzero"`
|
||||||
|
// RawBytes []byte `cbor:"4,keyasint,omitempty,omitzero"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type FingerprintRequest struct {
|
type FingerprintRequest struct {
|
||||||
@@ -27,6 +52,24 @@ type FingerprintRequest struct {
|
|||||||
type FingerprintResponse struct {
|
type FingerprintResponse struct {
|
||||||
Fingerprint string `cbor:"0,keyasint"`
|
Fingerprint string `cbor:"0,keyasint"`
|
||||||
// Optional system info for universal token system creation
|
// Optional system info for universal token system creation
|
||||||
Hostname string `cbor:"1,keyasint,omitempty,omitzero"`
|
Hostname string `cbor:"1,keyasint,omitzero"`
|
||||||
Port string `cbor:"2,keyasint,omitempty,omitzero"`
|
Port string `cbor:"2,keyasint,omitzero"`
|
||||||
|
Name string `cbor:"3,keyasint,omitzero"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DataRequestOptions struct {
|
||||||
|
CacheTimeMs uint16 `cbor:"0,keyasint"`
|
||||||
|
// ResourceType uint8 `cbor:"1,keyasint,omitempty,omitzero"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ContainerLogsRequest struct {
|
||||||
|
ContainerID string `cbor:"0,keyasint"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ContainerInfoRequest struct {
|
||||||
|
ContainerID string `cbor:"0,keyasint"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type SystemdInfoRequest struct {
|
||||||
|
ServiceName string `cbor:"0,keyasint"`
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -23,4 +23,7 @@ COPY --from=builder /agent /agent
|
|||||||
# this is so we don't need to create the /tmp directory in the scratch container
|
# this is so we don't need to create the /tmp directory in the scratch container
|
||||||
COPY --from=builder /tmp /tmp
|
COPY --from=builder /tmp /tmp
|
||||||
|
|
||||||
|
# Ensure data persistence across container recreations
|
||||||
|
VOLUME ["/var/lib/beszel-agent"]
|
||||||
|
|
||||||
ENTRYPOINT ["/agent"]
|
ENTRYPOINT ["/agent"]
|
||||||
28
internal/dockerfile_agent_alpine
Normal file
28
internal/dockerfile_agent_alpine
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
FROM --platform=$BUILDPLATFORM golang:alpine AS builder
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
COPY ../go.mod ../go.sum ./
|
||||||
|
RUN go mod download
|
||||||
|
|
||||||
|
# Copy source files
|
||||||
|
COPY . ./
|
||||||
|
|
||||||
|
# Build
|
||||||
|
ARG TARGETOS TARGETARCH
|
||||||
|
RUN CGO_ENABLED=0 GOGC=75 GOOS=$TARGETOS GOARCH=$TARGETARCH go build -ldflags "-w -s" -o /agent ./internal/cmd/agent
|
||||||
|
|
||||||
|
RUN rm -rf /tmp/*
|
||||||
|
|
||||||
|
# --------------------------
|
||||||
|
# Final image: default scratch-based agent
|
||||||
|
# --------------------------
|
||||||
|
FROM alpine:latest
|
||||||
|
COPY --from=builder /agent /agent
|
||||||
|
|
||||||
|
RUN apk add --no-cache smartmontools
|
||||||
|
|
||||||
|
# Ensure data persistence across container recreations
|
||||||
|
VOLUME ["/var/lib/beszel-agent"]
|
||||||
|
|
||||||
|
ENTRYPOINT ["/agent"]
|
||||||
@@ -20,6 +20,9 @@ FROM alpine:edge
|
|||||||
|
|
||||||
COPY --from=builder /agent /agent
|
COPY --from=builder /agent /agent
|
||||||
|
|
||||||
RUN apk add --no-cache -X https://dl-cdn.alpinelinux.org/alpine/edge/testing igt-gpu-tools
|
RUN apk add --no-cache -X https://dl-cdn.alpinelinux.org/alpine/edge/testing igt-gpu-tools smartmontools
|
||||||
|
|
||||||
|
# Ensure data persistence across container recreations
|
||||||
|
VOLUME ["/var/lib/beszel-agent"]
|
||||||
|
|
||||||
ENTRYPOINT ["/agent"]
|
ENTRYPOINT ["/agent"]
|
||||||
@@ -2,7 +2,6 @@ FROM --platform=$BUILDPLATFORM golang:alpine AS builder
|
|||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
|
|
||||||
COPY ../go.mod ../go.sum ./
|
COPY ../go.mod ../go.sum ./
|
||||||
RUN go mod download
|
RUN go mod download
|
||||||
|
|
||||||
@@ -13,7 +12,24 @@ COPY . ./
|
|||||||
ARG TARGETOS TARGETARCH
|
ARG TARGETOS TARGETARCH
|
||||||
RUN CGO_ENABLED=0 GOGC=75 GOOS=$TARGETOS GOARCH=$TARGETARCH go build -ldflags "-w -s" -o /agent ./internal/cmd/agent
|
RUN CGO_ENABLED=0 GOGC=75 GOOS=$TARGETOS GOARCH=$TARGETARCH go build -ldflags "-w -s" -o /agent ./internal/cmd/agent
|
||||||
|
|
||||||
RUN rm -rf /tmp/*
|
# --------------------------
|
||||||
|
# Smartmontools builder stage
|
||||||
|
# --------------------------
|
||||||
|
FROM nvidia/cuda:12.2.2-base-ubuntu22.04 AS smartmontools-builder
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get install -y \
|
||||||
|
wget \
|
||||||
|
build-essential \
|
||||||
|
&& wget https://downloads.sourceforge.net/project/smartmontools/smartmontools/7.5/smartmontools-7.5.tar.gz \
|
||||||
|
&& tar zxvf smartmontools-7.5.tar.gz \
|
||||||
|
&& cd smartmontools-7.5 \
|
||||||
|
&& ./configure --prefix=/usr --sysconfdir=/etc \
|
||||||
|
&& make \
|
||||||
|
&& make install \
|
||||||
|
&& rm -rf /smartmontools-7.5* \
|
||||||
|
&& apt-get remove -y wget build-essential \
|
||||||
|
&& apt-get autoremove -y \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# --------------------------
|
# --------------------------
|
||||||
# Final image: GPU-enabled agent with nvidia-smi
|
# Final image: GPU-enabled agent with nvidia-smi
|
||||||
@@ -21,7 +37,10 @@ RUN rm -rf /tmp/*
|
|||||||
FROM nvidia/cuda:12.2.2-base-ubuntu22.04
|
FROM nvidia/cuda:12.2.2-base-ubuntu22.04
|
||||||
COPY --from=builder /agent /agent
|
COPY --from=builder /agent /agent
|
||||||
|
|
||||||
# this is so we don't need to create the /tmp directory in the scratch container
|
# Copy smartmontools binaries and config files
|
||||||
COPY --from=builder /tmp /tmp
|
COPY --from=smartmontools-builder /usr/sbin/smartctl /usr/sbin/smartctl
|
||||||
|
|
||||||
|
# Ensure data persistence across container recreations
|
||||||
|
VOLUME ["/var/lib/beszel-agent"]
|
||||||
|
|
||||||
ENTRYPOINT ["/agent"]
|
ENTRYPOINT ["/agent"]
|
||||||
|
|||||||
@@ -25,6 +25,9 @@ FROM scratch
|
|||||||
COPY --from=builder /beszel /
|
COPY --from=builder /beszel /
|
||||||
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||||
|
|
||||||
|
# Ensure data persistence across container recreations
|
||||||
|
VOLUME ["/beszel_data"]
|
||||||
|
|
||||||
EXPOSE 8090
|
EXPOSE 8090
|
||||||
|
|
||||||
ENTRYPOINT [ "/beszel" ]
|
ENTRYPOINT [ "/beszel" ]
|
||||||
|
|||||||
@@ -8,7 +8,8 @@ type ApiInfo struct {
|
|||||||
IdShort string
|
IdShort string
|
||||||
Names []string
|
Names []string
|
||||||
Status string
|
Status string
|
||||||
// Image string
|
State string
|
||||||
|
Image string
|
||||||
// ImageID string
|
// ImageID string
|
||||||
// Command string
|
// Command string
|
||||||
// Created int64
|
// Created int64
|
||||||
@@ -16,7 +17,6 @@ type ApiInfo struct {
|
|||||||
// SizeRw int64 `json:",omitempty"`
|
// SizeRw int64 `json:",omitempty"`
|
||||||
// SizeRootFs int64 `json:",omitempty"`
|
// SizeRootFs int64 `json:",omitempty"`
|
||||||
// Labels map[string]string
|
// Labels map[string]string
|
||||||
// State string
|
|
||||||
// HostConfig struct {
|
// HostConfig struct {
|
||||||
// NetworkMode string `json:",omitempty"`
|
// NetworkMode string `json:",omitempty"`
|
||||||
// Annotations map[string]string `json:",omitempty"`
|
// Annotations map[string]string `json:",omitempty"`
|
||||||
@@ -103,6 +103,22 @@ type prevNetStats struct {
|
|||||||
Recv uint64
|
Recv uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type DockerHealth = uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
DockerHealthNone DockerHealth = iota
|
||||||
|
DockerHealthStarting
|
||||||
|
DockerHealthHealthy
|
||||||
|
DockerHealthUnhealthy
|
||||||
|
)
|
||||||
|
|
||||||
|
var DockerHealthStrings = map[string]DockerHealth{
|
||||||
|
"none": DockerHealthNone,
|
||||||
|
"starting": DockerHealthStarting,
|
||||||
|
"healthy": DockerHealthHealthy,
|
||||||
|
"unhealthy": DockerHealthUnhealthy,
|
||||||
|
}
|
||||||
|
|
||||||
// Docker container stats
|
// Docker container stats
|
||||||
type Stats struct {
|
type Stats struct {
|
||||||
Name string `json:"n" cbor:"0,keyasint"`
|
Name string `json:"n" cbor:"0,keyasint"`
|
||||||
@@ -110,6 +126,11 @@ type Stats struct {
|
|||||||
Mem float64 `json:"m" cbor:"2,keyasint"`
|
Mem float64 `json:"m" cbor:"2,keyasint"`
|
||||||
NetworkSent float64 `json:"ns" cbor:"3,keyasint"`
|
NetworkSent float64 `json:"ns" cbor:"3,keyasint"`
|
||||||
NetworkRecv float64 `json:"nr" cbor:"4,keyasint"`
|
NetworkRecv float64 `json:"nr" cbor:"4,keyasint"`
|
||||||
|
|
||||||
|
Health DockerHealth `json:"-" cbor:"5,keyasint"`
|
||||||
|
Status string `json:"-" cbor:"6,keyasint"`
|
||||||
|
Id string `json:"-" cbor:"7,keyasint"`
|
||||||
|
Image string `json:"-" cbor:"8,keyasint"`
|
||||||
// PrevCpu [2]uint64 `json:"-"`
|
// PrevCpu [2]uint64 `json:"-"`
|
||||||
CpuSystem uint64 `json:"-"`
|
CpuSystem uint64 `json:"-"`
|
||||||
CpuContainer uint64 `json:"-"`
|
CpuContainer uint64 `json:"-"`
|
||||||
|
|||||||
529
internal/entities/smart/smart.go
Normal file
529
internal/entities/smart/smart.go
Normal file
@@ -0,0 +1,529 @@
|
|||||||
|
package smart
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Common types
|
||||||
|
type VersionInfo [2]int
|
||||||
|
|
||||||
|
type SmartctlInfo struct {
|
||||||
|
Version VersionInfo `json:"version"`
|
||||||
|
SvnRevision string `json:"svn_revision"`
|
||||||
|
PlatformInfo string `json:"platform_info"`
|
||||||
|
BuildInfo string `json:"build_info"`
|
||||||
|
Argv []string `json:"argv"`
|
||||||
|
ExitStatus int `json:"exit_status"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DeviceInfo struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
InfoName string `json:"info_name"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Protocol string `json:"protocol"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type UserCapacity struct {
|
||||||
|
Blocks uint64 `json:"blocks"`
|
||||||
|
Bytes uint64 `json:"bytes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// type LocalTime struct {
|
||||||
|
// TimeT int64 `json:"time_t"`
|
||||||
|
// Asctime string `json:"asctime"`
|
||||||
|
// }
|
||||||
|
|
||||||
|
// type WwnInfo struct {
|
||||||
|
// Naa int `json:"naa"`
|
||||||
|
// Oui int `json:"oui"`
|
||||||
|
// ID int `json:"id"`
|
||||||
|
// }
|
||||||
|
|
||||||
|
// type FormFactorInfo struct {
|
||||||
|
// AtaValue int `json:"ata_value"`
|
||||||
|
// Name string `json:"name"`
|
||||||
|
// }
|
||||||
|
|
||||||
|
// type TrimInfo struct {
|
||||||
|
// Supported bool `json:"supported"`
|
||||||
|
// }
|
||||||
|
|
||||||
|
// type AtaVersionInfo struct {
|
||||||
|
// String string `json:"string"`
|
||||||
|
// MajorValue int `json:"major_value"`
|
||||||
|
// MinorValue int `json:"minor_value"`
|
||||||
|
// }
|
||||||
|
|
||||||
|
// type VersionStringInfo struct {
|
||||||
|
// String string `json:"string"`
|
||||||
|
// Value int `json:"value"`
|
||||||
|
// }
|
||||||
|
|
||||||
|
// type SpeedInfo struct {
|
||||||
|
// SataValue int `json:"sata_value"`
|
||||||
|
// String string `json:"string"`
|
||||||
|
// UnitsPerSecond int `json:"units_per_second"`
|
||||||
|
// BitsPerUnit int `json:"bits_per_unit"`
|
||||||
|
// }
|
||||||
|
|
||||||
|
// type InterfaceSpeedInfo struct {
|
||||||
|
// Max SpeedInfo `json:"max"`
|
||||||
|
// Current SpeedInfo `json:"current"`
|
||||||
|
// }
|
||||||
|
|
||||||
|
type SmartStatusInfo struct {
|
||||||
|
Passed bool `json:"passed"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type StatusInfo struct {
|
||||||
|
Value int `json:"value"`
|
||||||
|
String string `json:"string"`
|
||||||
|
Passed bool `json:"passed"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type PollingMinutes struct {
|
||||||
|
Short int `json:"short"`
|
||||||
|
Extended int `json:"extended"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type CapabilitiesInfo struct {
|
||||||
|
Values []int `json:"values"`
|
||||||
|
ExecOfflineImmediateSupported bool `json:"exec_offline_immediate_supported"`
|
||||||
|
OfflineIsAbortedUponNewCmd bool `json:"offline_is_aborted_upon_new_cmd"`
|
||||||
|
OfflineSurfaceScanSupported bool `json:"offline_surface_scan_supported"`
|
||||||
|
SelfTestsSupported bool `json:"self_tests_supported"`
|
||||||
|
ConveyanceSelfTestSupported bool `json:"conveyance_self_test_supported"`
|
||||||
|
SelectiveSelfTestSupported bool `json:"selective_self_test_supported"`
|
||||||
|
AttributeAutosaveEnabled bool `json:"attribute_autosave_enabled"`
|
||||||
|
ErrorLoggingSupported bool `json:"error_logging_supported"`
|
||||||
|
GpLoggingSupported bool `json:"gp_logging_supported"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// type AtaSmartData struct {
|
||||||
|
// OfflineDataCollection OfflineDataCollectionInfo `json:"offline_data_collection"`
|
||||||
|
// SelfTest SelfTestInfo `json:"self_test"`
|
||||||
|
// Capabilities CapabilitiesInfo `json:"capabilities"`
|
||||||
|
// }
|
||||||
|
|
||||||
|
// type OfflineDataCollectionInfo struct {
|
||||||
|
// Status StatusInfo `json:"status"`
|
||||||
|
// CompletionSeconds int `json:"completion_seconds"`
|
||||||
|
// }
|
||||||
|
|
||||||
|
// type SelfTestInfo struct {
|
||||||
|
// Status StatusInfo `json:"status"`
|
||||||
|
// PollingMinutes PollingMinutes `json:"polling_minutes"`
|
||||||
|
// }
|
||||||
|
|
||||||
|
// type AtaSctCapabilities struct {
|
||||||
|
// Value int `json:"value"`
|
||||||
|
// ErrorRecoveryControlSupported bool `json:"error_recovery_control_supported"`
|
||||||
|
// FeatureControlSupported bool `json:"feature_control_supported"`
|
||||||
|
// DataTableSupported bool `json:"data_table_supported"`
|
||||||
|
// }
|
||||||
|
|
||||||
|
type SummaryInfo struct {
|
||||||
|
Revision int `json:"revision"`
|
||||||
|
Count int `json:"count"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type AtaSmartAttributes struct {
|
||||||
|
// Revision int `json:"revision"`
|
||||||
|
Table []AtaSmartAttribute `json:"table"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type AtaSmartAttribute struct {
|
||||||
|
ID uint16 `json:"id"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
Value uint16 `json:"value"`
|
||||||
|
Worst uint16 `json:"worst"`
|
||||||
|
Thresh uint16 `json:"thresh"`
|
||||||
|
WhenFailed string `json:"when_failed"`
|
||||||
|
// Flags AttributeFlags `json:"flags"`
|
||||||
|
Raw RawValue `json:"raw"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// type AttributeFlags struct {
|
||||||
|
// Value int `json:"value"`
|
||||||
|
// String string `json:"string"`
|
||||||
|
// Prefailure bool `json:"prefailure"`
|
||||||
|
// UpdatedOnline bool `json:"updated_online"`
|
||||||
|
// Performance bool `json:"performance"`
|
||||||
|
// ErrorRate bool `json:"error_rate"`
|
||||||
|
// EventCount bool `json:"event_count"`
|
||||||
|
// AutoKeep bool `json:"auto_keep"`
|
||||||
|
// }
|
||||||
|
|
||||||
|
type RawValue struct {
|
||||||
|
Value SmartRawValue `json:"value"`
|
||||||
|
String string `json:"string"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RawValue) UnmarshalJSON(data []byte) error {
|
||||||
|
var tmp struct {
|
||||||
|
Value json.RawMessage `json:"value"`
|
||||||
|
String string `json:"string"`
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.Unmarshal(data, &tmp); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(tmp.Value) > 0 {
|
||||||
|
if err := r.Value.UnmarshalJSON(tmp.Value); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
r.Value = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
r.String = tmp.String
|
||||||
|
|
||||||
|
if parsed, ok := ParseSmartRawValueString(tmp.String); ok {
|
||||||
|
r.Value = SmartRawValue(parsed)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type SmartRawValue uint64
|
||||||
|
|
||||||
|
// handles when drives report strings like "0h+0m+0.000s" or "7344 (253d 8h)" for power on hours
|
||||||
|
func (v *SmartRawValue) UnmarshalJSON(data []byte) error {
|
||||||
|
trimmed := strings.TrimSpace(string(data))
|
||||||
|
if len(trimmed) == 0 || trimmed == "null" {
|
||||||
|
*v = 0
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if trimmed[0] == '"' {
|
||||||
|
valueStr, err := strconv.Unquote(trimmed)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
parsed, ok := ParseSmartRawValueString(valueStr)
|
||||||
|
if ok {
|
||||||
|
*v = SmartRawValue(parsed)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
*v = 0
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if parsed, err := strconv.ParseUint(trimmed, 0, 64); err == nil {
|
||||||
|
*v = SmartRawValue(parsed)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if parsed, ok := ParseSmartRawValueString(trimmed); ok {
|
||||||
|
*v = SmartRawValue(parsed)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
*v = 0
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseSmartRawValueString attempts to extract a numeric value from the raw value
|
||||||
|
// strings emitted by smartctl, which sometimes include human-friendly annotations
|
||||||
|
// like "7344 (253d 8h)" or "0h+0m+0.000s". It returns the parsed value and a
|
||||||
|
// boolean indicating success.
|
||||||
|
func ParseSmartRawValueString(value string) (uint64, bool) {
|
||||||
|
value = strings.TrimSpace(value)
|
||||||
|
if value == "" {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
if parsed, err := strconv.ParseUint(value, 0, 64); err == nil {
|
||||||
|
return parsed, true
|
||||||
|
}
|
||||||
|
|
||||||
|
if idx := strings.IndexRune(value, 'h'); idx > 0 {
|
||||||
|
hoursPart := strings.TrimSpace(value[:idx])
|
||||||
|
if hoursPart != "" {
|
||||||
|
if parsed, err := strconv.ParseFloat(hoursPart, 64); err == nil {
|
||||||
|
return uint64(parsed), true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < len(value); i++ {
|
||||||
|
if value[i] < '0' || value[i] > '9' {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
end := i + 1
|
||||||
|
for end < len(value) && value[end] >= '0' && value[end] <= '9' {
|
||||||
|
end++
|
||||||
|
}
|
||||||
|
digits := value[i:end]
|
||||||
|
if parsed, err := strconv.ParseUint(digits, 10, 64); err == nil {
|
||||||
|
return parsed, true
|
||||||
|
}
|
||||||
|
i = end
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// type PowerOnTimeInfo struct {
|
||||||
|
// Hours uint32 `json:"hours"`
|
||||||
|
// }
|
||||||
|
|
||||||
|
type TemperatureInfo struct {
|
||||||
|
Current uint8 `json:"current"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type TemperatureInfoScsi struct {
|
||||||
|
Current uint8 `json:"current"`
|
||||||
|
DriveTrip uint8 `json:"drive_trip"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// type SelectiveSelfTestTable struct {
|
||||||
|
// LbaMin int `json:"lba_min"`
|
||||||
|
// LbaMax int `json:"lba_max"`
|
||||||
|
// Status StatusInfo `json:"status"`
|
||||||
|
// }
|
||||||
|
|
||||||
|
// type SelectiveSelfTestFlags struct {
|
||||||
|
// Value int `json:"value"`
|
||||||
|
// RemainderScanEnabled bool `json:"remainder_scan_enabled"`
|
||||||
|
// }
|
||||||
|
|
||||||
|
// type AtaSmartSelectiveSelfTestLog struct {
|
||||||
|
// Revision int `json:"revision"`
|
||||||
|
// Table []SelectiveSelfTestTable `json:"table"`
|
||||||
|
// Flags SelectiveSelfTestFlags `json:"flags"`
|
||||||
|
// PowerUpScanResumeMinutes int `json:"power_up_scan_resume_minutes"`
|
||||||
|
// }
|
||||||
|
|
||||||
|
// BaseSmartInfo contains common fields shared between SATA and NVMe drives
|
||||||
|
// type BaseSmartInfo struct {
|
||||||
|
// Device DeviceInfo `json:"device"`
|
||||||
|
// ModelName string `json:"model_name"`
|
||||||
|
// SerialNumber string `json:"serial_number"`
|
||||||
|
// FirmwareVersion string `json:"firmware_version"`
|
||||||
|
// UserCapacity UserCapacity `json:"user_capacity"`
|
||||||
|
// LogicalBlockSize int `json:"logical_block_size"`
|
||||||
|
// LocalTime LocalTime `json:"local_time"`
|
||||||
|
// }
|
||||||
|
|
||||||
|
type SmartctlInfoLegacy struct {
|
||||||
|
Version VersionInfo `json:"version"`
|
||||||
|
SvnRevision string `json:"svn_revision"`
|
||||||
|
PlatformInfo string `json:"platform_info"`
|
||||||
|
BuildInfo string `json:"build_info"`
|
||||||
|
Argv []string `json:"argv"`
|
||||||
|
ExitStatus int `json:"exit_status"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type SmartInfoForSata struct {
|
||||||
|
// JSONFormatVersion VersionInfo `json:"json_format_version"`
|
||||||
|
Smartctl SmartctlInfoLegacy `json:"smartctl"`
|
||||||
|
Device DeviceInfo `json:"device"`
|
||||||
|
// ModelFamily string `json:"model_family"`
|
||||||
|
ModelName string `json:"model_name"`
|
||||||
|
SerialNumber string `json:"serial_number"`
|
||||||
|
// Wwn WwnInfo `json:"wwn"`
|
||||||
|
FirmwareVersion string `json:"firmware_version"`
|
||||||
|
UserCapacity UserCapacity `json:"user_capacity"`
|
||||||
|
ScsiVendor string `json:"scsi_vendor"`
|
||||||
|
ScsiProduct string `json:"scsi_product"`
|
||||||
|
// LogicalBlockSize int `json:"logical_block_size"`
|
||||||
|
// PhysicalBlockSize int `json:"physical_block_size"`
|
||||||
|
// RotationRate int `json:"rotation_rate"`
|
||||||
|
// FormFactor FormFactorInfo `json:"form_factor"`
|
||||||
|
// Trim TrimInfo `json:"trim"`
|
||||||
|
// InSmartctlDatabase bool `json:"in_smartctl_database"`
|
||||||
|
// AtaVersion AtaVersionInfo `json:"ata_version"`
|
||||||
|
// SataVersion VersionStringInfo `json:"sata_version"`
|
||||||
|
// InterfaceSpeed InterfaceSpeedInfo `json:"interface_speed"`
|
||||||
|
// LocalTime LocalTime `json:"local_time"`
|
||||||
|
SmartStatus SmartStatusInfo `json:"smart_status"`
|
||||||
|
// AtaSmartData AtaSmartData `json:"ata_smart_data"`
|
||||||
|
// AtaSctCapabilities AtaSctCapabilities `json:"ata_sct_capabilities"`
|
||||||
|
AtaSmartAttributes AtaSmartAttributes `json:"ata_smart_attributes"`
|
||||||
|
// PowerOnTime PowerOnTimeInfo `json:"power_on_time"`
|
||||||
|
// PowerCycleCount uint16 `json:"power_cycle_count"`
|
||||||
|
Temperature TemperatureInfo `json:"temperature"`
|
||||||
|
// AtaSmartErrorLog AtaSmartErrorLog `json:"ata_smart_error_log"`
|
||||||
|
// AtaSmartSelfTestLog AtaSmartSelfTestLog `json:"ata_smart_self_test_log"`
|
||||||
|
// AtaSmartSelectiveSelfTestLog AtaSmartSelectiveSelfTestLog `json:"ata_smart_selective_self_test_log"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ScsiErrorCounter struct {
|
||||||
|
ErrorsCorrectedByECCFast uint64 `json:"errors_corrected_by_eccfast"`
|
||||||
|
ErrorsCorrectedByECCDelayed uint64 `json:"errors_corrected_by_eccdelayed"`
|
||||||
|
ErrorsCorrectedByRereadsRewrites uint64 `json:"errors_corrected_by_rereads_rewrites"`
|
||||||
|
TotalErrorsCorrected uint64 `json:"total_errors_corrected"`
|
||||||
|
CorrectionAlgorithmInvocations uint64 `json:"correction_algorithm_invocations"`
|
||||||
|
GigabytesProcessed string `json:"gigabytes_processed"`
|
||||||
|
TotalUncorrectedErrors uint64 `json:"total_uncorrected_errors"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ScsiErrorCounterLog struct {
|
||||||
|
Read ScsiErrorCounter `json:"read"`
|
||||||
|
Write ScsiErrorCounter `json:"write"`
|
||||||
|
Verify ScsiErrorCounter `json:"verify"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ScsiStartStopCycleCounter struct {
|
||||||
|
YearOfManufacture string `json:"year_of_manufacture"`
|
||||||
|
WeekOfManufacture string `json:"week_of_manufacture"`
|
||||||
|
SpecifiedCycleCountOverDeviceLifetime uint64 `json:"specified_cycle_count_over_device_lifetime"`
|
||||||
|
AccumulatedStartStopCycles uint64 `json:"accumulated_start_stop_cycles"`
|
||||||
|
SpecifiedLoadUnloadCountOverDeviceLifetime uint64 `json:"specified_load_unload_count_over_device_lifetime"`
|
||||||
|
AccumulatedLoadUnloadCycles uint64 `json:"accumulated_load_unload_cycles"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type PowerOnTimeScsi struct {
|
||||||
|
Hours uint64 `json:"hours"`
|
||||||
|
Minutes uint64 `json:"minutes"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type SmartInfoForScsi struct {
|
||||||
|
Smartctl SmartctlInfoLegacy `json:"smartctl"`
|
||||||
|
Device DeviceInfo `json:"device"`
|
||||||
|
ScsiVendor string `json:"scsi_vendor"`
|
||||||
|
ScsiProduct string `json:"scsi_product"`
|
||||||
|
ScsiModelName string `json:"scsi_model_name"`
|
||||||
|
ScsiRevision string `json:"scsi_revision"`
|
||||||
|
ScsiVersion string `json:"scsi_version"`
|
||||||
|
SerialNumber string `json:"serial_number"`
|
||||||
|
UserCapacity UserCapacity `json:"user_capacity"`
|
||||||
|
Temperature TemperatureInfoScsi `json:"temperature"`
|
||||||
|
SmartStatus SmartStatusInfo `json:"smart_status"`
|
||||||
|
PowerOnTime PowerOnTimeScsi `json:"power_on_time"`
|
||||||
|
ScsiStartStopCycleCounter ScsiStartStopCycleCounter `json:"scsi_start_stop_cycle_counter"`
|
||||||
|
ScsiGrownDefectList uint64 `json:"scsi_grown_defect_list"`
|
||||||
|
ScsiErrorCounterLog ScsiErrorCounterLog `json:"scsi_error_counter_log"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// type AtaSmartErrorLog struct {
|
||||||
|
// Summary SummaryInfo `json:"summary"`
|
||||||
|
// }
|
||||||
|
|
||||||
|
// type AtaSmartSelfTestLog struct {
|
||||||
|
// Standard SummaryInfo `json:"standard"`
|
||||||
|
// }
|
||||||
|
|
||||||
|
type SmartctlInfoNvme struct {
|
||||||
|
Version VersionInfo `json:"version"`
|
||||||
|
SVNRevision string `json:"svn_revision"`
|
||||||
|
PlatformInfo string `json:"platform_info"`
|
||||||
|
BuildInfo string `json:"build_info"`
|
||||||
|
Argv []string `json:"argv"`
|
||||||
|
ExitStatus int `json:"exit_status"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// type NVMePCIVendor struct {
|
||||||
|
// ID int `json:"id"`
|
||||||
|
// SubsystemID int `json:"subsystem_id"`
|
||||||
|
// }
|
||||||
|
|
||||||
|
// type SizeCapacityInfo struct {
|
||||||
|
// Blocks uint64 `json:"blocks"`
|
||||||
|
// Bytes uint64 `json:"bytes"`
|
||||||
|
// }
|
||||||
|
|
||||||
|
// type EUI64Info struct {
|
||||||
|
// OUI int `json:"oui"`
|
||||||
|
// ExtID int `json:"ext_id"`
|
||||||
|
// }
|
||||||
|
|
||||||
|
// type NVMeNamespace struct {
|
||||||
|
// ID uint32 `json:"id"`
|
||||||
|
// Size SizeCapacityInfo `json:"size"`
|
||||||
|
// Capacity SizeCapacityInfo `json:"capacity"`
|
||||||
|
// Utilization SizeCapacityInfo `json:"utilization"`
|
||||||
|
// FormattedLBASize uint32 `json:"formatted_lba_size"`
|
||||||
|
// EUI64 EUI64Info `json:"eui64"`
|
||||||
|
// }
|
||||||
|
|
||||||
|
type SmartStatusInfoNvme struct {
|
||||||
|
Passed bool `json:"passed"`
|
||||||
|
NVMe SmartStatusNVMe `json:"nvme"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type SmartStatusNVMe struct {
|
||||||
|
Value int `json:"value"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type NVMeSmartHealthInformationLog struct {
|
||||||
|
CriticalWarning uint `json:"critical_warning"`
|
||||||
|
Temperature uint8 `json:"temperature"`
|
||||||
|
AvailableSpare uint `json:"available_spare"`
|
||||||
|
AvailableSpareThreshold uint `json:"available_spare_threshold"`
|
||||||
|
PercentageUsed uint8 `json:"percentage_used"`
|
||||||
|
DataUnitsRead uint64 `json:"data_units_read"`
|
||||||
|
DataUnitsWritten uint64 `json:"data_units_written"`
|
||||||
|
HostReads uint `json:"host_reads"`
|
||||||
|
HostWrites uint `json:"host_writes"`
|
||||||
|
ControllerBusyTime uint `json:"controller_busy_time"`
|
||||||
|
PowerCycles uint16 `json:"power_cycles"`
|
||||||
|
PowerOnHours uint32 `json:"power_on_hours"`
|
||||||
|
UnsafeShutdowns uint16 `json:"unsafe_shutdowns"`
|
||||||
|
MediaErrors uint `json:"media_errors"`
|
||||||
|
NumErrLogEntries uint `json:"num_err_log_entries"`
|
||||||
|
WarningTempTime uint `json:"warning_temp_time"`
|
||||||
|
CriticalCompTime uint `json:"critical_comp_time"`
|
||||||
|
TemperatureSensors []uint8 `json:"temperature_sensors"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type SmartInfoForNvme struct {
|
||||||
|
// JSONFormatVersion VersionInfo `json:"json_format_version"`
|
||||||
|
Smartctl SmartctlInfoNvme `json:"smartctl"`
|
||||||
|
Device DeviceInfo `json:"device"`
|
||||||
|
ModelName string `json:"model_name"`
|
||||||
|
SerialNumber string `json:"serial_number"`
|
||||||
|
FirmwareVersion string `json:"firmware_version"`
|
||||||
|
// NVMePCIVendor NVMePCIVendor `json:"nvme_pci_vendor"`
|
||||||
|
// NVMeIEEEOUIIdentifier uint32 `json:"nvme_ieee_oui_identifier"`
|
||||||
|
// NVMeTotalCapacity uint64 `json:"nvme_total_capacity"`
|
||||||
|
// NVMeUnallocatedCapacity uint64 `json:"nvme_unallocated_capacity"`
|
||||||
|
// NVMeControllerID uint16 `json:"nvme_controller_id"`
|
||||||
|
// NVMeVersion VersionStringInfo `json:"nvme_version"`
|
||||||
|
// NVMeNumberOfNamespaces uint8 `json:"nvme_number_of_namespaces"`
|
||||||
|
// NVMeNamespaces []NVMeNamespace `json:"nvme_namespaces"`
|
||||||
|
UserCapacity UserCapacity `json:"user_capacity"`
|
||||||
|
// LogicalBlockSize int `json:"logical_block_size"`
|
||||||
|
// LocalTime LocalTime `json:"local_time"`
|
||||||
|
SmartStatus SmartStatusInfoNvme `json:"smart_status"`
|
||||||
|
NVMeSmartHealthInformationLog NVMeSmartHealthInformationLog `json:"nvme_smart_health_information_log"`
|
||||||
|
Temperature TemperatureInfoNvme `json:"temperature"`
|
||||||
|
PowerCycleCount uint16 `json:"power_cycle_count"`
|
||||||
|
PowerOnTime PowerOnTimeInfoNvme `json:"power_on_time"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type TemperatureInfoNvme struct {
|
||||||
|
Current int `json:"current"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type PowerOnTimeInfoNvme struct {
|
||||||
|
Hours int `json:"hours"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type SmartData struct {
|
||||||
|
// ModelFamily string `json:"mf,omitempty" cbor:"0,keyasint,omitempty"`
|
||||||
|
ModelName string `json:"mn,omitempty" cbor:"1,keyasint,omitempty"`
|
||||||
|
SerialNumber string `json:"sn,omitempty" cbor:"2,keyasint,omitempty"`
|
||||||
|
FirmwareVersion string `json:"fv,omitempty" cbor:"3,keyasint,omitempty"`
|
||||||
|
Capacity uint64 `json:"c,omitempty" cbor:"4,keyasint,omitempty"`
|
||||||
|
SmartStatus string `json:"s,omitempty" cbor:"5,keyasint,omitempty"`
|
||||||
|
DiskName string `json:"dn,omitempty" cbor:"6,keyasint,omitempty"`
|
||||||
|
DiskType string `json:"dt,omitempty" cbor:"7,keyasint,omitempty"`
|
||||||
|
Temperature uint8 `json:"t,omitempty" cbor:"8,keyasint,omitempty"`
|
||||||
|
Attributes []*SmartAttribute `json:"a,omitempty" cbor:"9,keyasint,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type SmartAttribute struct {
|
||||||
|
ID uint16 `json:"id,omitempty" cbor:"0,keyasint,omitempty"`
|
||||||
|
Name string `json:"n" cbor:"1,keyasint"`
|
||||||
|
Value uint16 `json:"v,omitempty" cbor:"2,keyasint,omitempty"`
|
||||||
|
Worst uint16 `json:"w,omitempty" cbor:"3,keyasint,omitempty"`
|
||||||
|
Threshold uint16 `json:"t,omitempty" cbor:"4,keyasint,omitempty"`
|
||||||
|
RawValue uint64 `json:"rv" cbor:"5,keyasint"`
|
||||||
|
RawString string `json:"rs,omitempty" cbor:"6,keyasint,omitempty"`
|
||||||
|
WhenFailed string `json:"wf,omitempty" cbor:"7,keyasint,omitempty"`
|
||||||
|
}
|
||||||
62
internal/entities/smart/smart_test.go
Normal file
62
internal/entities/smart/smart_test.go
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
package smart
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSmartRawValueUnmarshalDuration(t *testing.T) {
|
||||||
|
input := []byte(`{"value":"62312h+33m+50.907s","string":"62312h+33m+50.907s"}`)
|
||||||
|
var raw RawValue
|
||||||
|
err := json.Unmarshal(input, &raw)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
assert.EqualValues(t, 62312, raw.Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSmartRawValueUnmarshalNumericString(t *testing.T) {
|
||||||
|
input := []byte(`{"value":"7344","string":"7344"}`)
|
||||||
|
var raw RawValue
|
||||||
|
err := json.Unmarshal(input, &raw)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
assert.EqualValues(t, 7344, raw.Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSmartRawValueUnmarshalParenthetical(t *testing.T) {
|
||||||
|
input := []byte(`{"value":"39925 (212 206 0)","string":"39925 (212 206 0)"}`)
|
||||||
|
var raw RawValue
|
||||||
|
err := json.Unmarshal(input, &raw)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
assert.EqualValues(t, 39925, raw.Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSmartRawValueUnmarshalDurationWithFractions(t *testing.T) {
|
||||||
|
input := []byte(`{"value":"2748h+31m+49.560s","string":"2748h+31m+49.560s"}`)
|
||||||
|
var raw RawValue
|
||||||
|
err := json.Unmarshal(input, &raw)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
assert.EqualValues(t, 2748, raw.Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSmartRawValueUnmarshalParentheticalRawValue(t *testing.T) {
|
||||||
|
input := []byte(`{"value":57891864217128,"string":"39925 (212 206 0)"}`)
|
||||||
|
var raw RawValue
|
||||||
|
err := json.Unmarshal(input, &raw)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
assert.EqualValues(t, 39925, raw.Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSmartRawValueUnmarshalDurationRawValue(t *testing.T) {
|
||||||
|
input := []byte(`{"value":57891864217128,"string":"2748h+31m+49.560s"}`)
|
||||||
|
var raw RawValue
|
||||||
|
err := json.Unmarshal(input, &raw)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
assert.EqualValues(t, 2748, raw.Value)
|
||||||
|
}
|
||||||
@@ -3,9 +3,11 @@ package system
|
|||||||
// TODO: this is confusing, make common package with common/types common/helpers etc
|
// TODO: this is confusing, make common package with common/types common/helpers etc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/henrygd/beszel/internal/entities/container"
|
"github.com/henrygd/beszel/internal/entities/container"
|
||||||
|
"github.com/henrygd/beszel/internal/entities/systemd"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Stats struct {
|
type Stats struct {
|
||||||
@@ -41,7 +43,28 @@ type Stats struct {
|
|||||||
LoadAvg [3]float64 `json:"la,omitempty" cbor:"28,keyasint"`
|
LoadAvg [3]float64 `json:"la,omitempty" cbor:"28,keyasint"`
|
||||||
Battery [2]uint8 `json:"bat,omitzero" cbor:"29,keyasint,omitzero"` // [percent, charge state, current]
|
Battery [2]uint8 `json:"bat,omitzero" cbor:"29,keyasint,omitzero"` // [percent, charge state, current]
|
||||||
MaxMem float64 `json:"mm,omitempty" cbor:"30,keyasint,omitempty"`
|
MaxMem float64 `json:"mm,omitempty" cbor:"30,keyasint,omitempty"`
|
||||||
NetworkInterfaces map[string][4]uint64 `json:"ni,omitempty" cbor:"31,keyasint,omitempty"` // [upload bytes, download bytes, total upload, total download]
|
NetworkInterfaces map[string][4]uint64 `json:"ni,omitempty" cbor:"31,keyasint,omitempty"` // [upload bytes, download bytes, total upload, total download]
|
||||||
|
DiskIO [2]uint64 `json:"dio,omitzero" cbor:"32,keyasint,omitzero"` // [read bytes, write bytes]
|
||||||
|
MaxDiskIO [2]uint64 `json:"diom,omitzero" cbor:"-"` // [max read bytes, max write bytes]
|
||||||
|
CpuBreakdown []float64 `json:"cpub,omitempty" cbor:"33,keyasint,omitempty"` // [user, system, iowait, steal, idle]
|
||||||
|
CpuCoresUsage Uint8Slice `json:"cpus,omitempty" cbor:"34,keyasint,omitempty"` // per-core busy usage [CPU0..]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint8Slice wraps []uint8 to customize JSON encoding while keeping CBOR efficient.
|
||||||
|
// JSON: encodes as array of numbers (avoids base64 string).
|
||||||
|
// CBOR: falls back to default handling for []uint8 (byte string), keeping payload small.
|
||||||
|
type Uint8Slice []uint8
|
||||||
|
|
||||||
|
func (s Uint8Slice) MarshalJSON() ([]byte, error) {
|
||||||
|
if s == nil {
|
||||||
|
return []byte("null"), nil
|
||||||
|
}
|
||||||
|
// Convert to wider ints to force array-of-numbers encoding.
|
||||||
|
arr := make([]uint16, len(s))
|
||||||
|
for i, v := range s {
|
||||||
|
arr[i] = uint16(v)
|
||||||
|
}
|
||||||
|
return json.Marshal(arr)
|
||||||
}
|
}
|
||||||
|
|
||||||
type GPUData struct {
|
type GPUData struct {
|
||||||
@@ -60,6 +83,7 @@ type FsStats struct {
|
|||||||
Time time.Time `json:"-"`
|
Time time.Time `json:"-"`
|
||||||
Root bool `json:"-"`
|
Root bool `json:"-"`
|
||||||
Mountpoint string `json:"-"`
|
Mountpoint string `json:"-"`
|
||||||
|
Name string `json:"-"`
|
||||||
DiskTotal float64 `json:"d" cbor:"0,keyasint"`
|
DiskTotal float64 `json:"d" cbor:"0,keyasint"`
|
||||||
DiskUsed float64 `json:"du" cbor:"1,keyasint"`
|
DiskUsed float64 `json:"du" cbor:"1,keyasint"`
|
||||||
TotalRead uint64 `json:"-"`
|
TotalRead uint64 `json:"-"`
|
||||||
@@ -68,6 +92,11 @@ type FsStats struct {
|
|||||||
DiskWritePs float64 `json:"w" cbor:"3,keyasint"`
|
DiskWritePs float64 `json:"w" cbor:"3,keyasint"`
|
||||||
MaxDiskReadPS float64 `json:"rm,omitempty" cbor:"4,keyasint,omitempty"`
|
MaxDiskReadPS float64 `json:"rm,omitempty" cbor:"4,keyasint,omitempty"`
|
||||||
MaxDiskWritePS float64 `json:"wm,omitempty" cbor:"5,keyasint,omitempty"`
|
MaxDiskWritePS float64 `json:"wm,omitempty" cbor:"5,keyasint,omitempty"`
|
||||||
|
// TODO: remove DiskReadPs and DiskWritePs in future release in favor of DiskReadBytes and DiskWriteBytes
|
||||||
|
DiskReadBytes uint64 `json:"rb" cbor:"6,keyasint,omitempty"`
|
||||||
|
DiskWriteBytes uint64 `json:"wb" cbor:"7,keyasint,omitempty"`
|
||||||
|
MaxDiskReadBytes uint64 `json:"rbm,omitempty" cbor:"-"`
|
||||||
|
MaxDiskWriteBytes uint64 `json:"wbm,omitempty" cbor:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type NetIoStats struct {
|
type NetIoStats struct {
|
||||||
@@ -115,13 +144,16 @@ type Info struct {
|
|||||||
LoadAvg15 float64 `json:"l15,omitempty" cbor:"17,keyasint,omitempty"`
|
LoadAvg15 float64 `json:"l15,omitempty" cbor:"17,keyasint,omitempty"`
|
||||||
BandwidthBytes uint64 `json:"bb" cbor:"18,keyasint"`
|
BandwidthBytes uint64 `json:"bb" cbor:"18,keyasint"`
|
||||||
// TODO: remove load fields in future release in favor of load avg array
|
// TODO: remove load fields in future release in favor of load avg array
|
||||||
LoadAvg [3]float64 `json:"la,omitempty" cbor:"19,keyasint"`
|
LoadAvg [3]float64 `json:"la,omitempty" cbor:"19,keyasint"`
|
||||||
ConnectionType ConnectionType `json:"ct,omitempty" cbor:"20,keyasint,omitempty,omitzero"`
|
ConnectionType ConnectionType `json:"ct,omitempty" cbor:"20,keyasint,omitempty,omitzero"`
|
||||||
|
ExtraFsPct map[string]float64 `json:"efs,omitempty" cbor:"21,keyasint,omitempty"`
|
||||||
|
Services []uint16 `json:"sv,omitempty" cbor:"22,keyasint,omitempty"` // [totalServices, numFailedServices]
|
||||||
}
|
}
|
||||||
|
|
||||||
// Final data structure to return to the hub
|
// Final data structure to return to the hub
|
||||||
type CombinedData struct {
|
type CombinedData struct {
|
||||||
Stats Stats `json:"stats" cbor:"0,keyasint"`
|
Stats Stats `json:"stats" cbor:"0,keyasint"`
|
||||||
Info Info `json:"info" cbor:"1,keyasint"`
|
Info Info `json:"info" cbor:"1,keyasint"`
|
||||||
Containers []*container.Stats `json:"container" cbor:"2,keyasint"`
|
Containers []*container.Stats `json:"container" cbor:"2,keyasint"`
|
||||||
|
SystemdServices []*systemd.Service `json:"systemd,omitempty" cbor:"3,keyasint,omitempty"`
|
||||||
}
|
}
|
||||||
|
|||||||
127
internal/entities/systemd/systemd.go
Normal file
127
internal/entities/systemd/systemd.go
Normal file
@@ -0,0 +1,127 @@
|
|||||||
|
package systemd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"runtime"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ServiceState represents the status of a systemd service
|
||||||
|
type ServiceState uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
StatusActive ServiceState = iota
|
||||||
|
StatusInactive
|
||||||
|
StatusFailed
|
||||||
|
StatusActivating
|
||||||
|
StatusDeactivating
|
||||||
|
StatusReloading
|
||||||
|
)
|
||||||
|
|
||||||
|
// ServiceSubState represents the sub status of a systemd service
|
||||||
|
type ServiceSubState uint8
|
||||||
|
|
||||||
|
const (
|
||||||
|
SubStateDead ServiceSubState = iota
|
||||||
|
SubStateRunning
|
||||||
|
SubStateExited
|
||||||
|
SubStateFailed
|
||||||
|
SubStateUnknown
|
||||||
|
)
|
||||||
|
|
||||||
|
// ParseServiceStatus converts a string status to a ServiceStatus enum value
|
||||||
|
func ParseServiceStatus(status string) ServiceState {
|
||||||
|
switch status {
|
||||||
|
case "active":
|
||||||
|
return StatusActive
|
||||||
|
case "inactive":
|
||||||
|
return StatusInactive
|
||||||
|
case "failed":
|
||||||
|
return StatusFailed
|
||||||
|
case "activating":
|
||||||
|
return StatusActivating
|
||||||
|
case "deactivating":
|
||||||
|
return StatusDeactivating
|
||||||
|
case "reloading":
|
||||||
|
return StatusReloading
|
||||||
|
default:
|
||||||
|
return StatusInactive
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseServiceSubState converts a string sub status to a ServiceSubState enum value
|
||||||
|
func ParseServiceSubState(subState string) ServiceSubState {
|
||||||
|
switch subState {
|
||||||
|
case "dead":
|
||||||
|
return SubStateDead
|
||||||
|
case "running":
|
||||||
|
return SubStateRunning
|
||||||
|
case "exited":
|
||||||
|
return SubStateExited
|
||||||
|
case "failed":
|
||||||
|
return SubStateFailed
|
||||||
|
default:
|
||||||
|
return SubStateUnknown
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Service represents a single systemd service with its stats.
|
||||||
|
type Service struct {
|
||||||
|
Name string `json:"n" cbor:"0,keyasint"`
|
||||||
|
State ServiceState `json:"s" cbor:"1,keyasint"`
|
||||||
|
Cpu float64 `json:"c" cbor:"2,keyasint"`
|
||||||
|
Mem uint64 `json:"m" cbor:"3,keyasint"`
|
||||||
|
MemPeak uint64 `json:"mp" cbor:"4,keyasint"`
|
||||||
|
Sub ServiceSubState `json:"ss" cbor:"5,keyasint"`
|
||||||
|
CpuPeak float64 `json:"cp" cbor:"6,keyasint"`
|
||||||
|
PrevCpuUsage uint64 `json:"-"`
|
||||||
|
PrevReadTime time.Time `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateCPUPercent calculates the CPU usage percentage for the service.
|
||||||
|
func (s *Service) UpdateCPUPercent(cpuUsage uint64) {
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
if s.PrevReadTime.IsZero() || cpuUsage < s.PrevCpuUsage {
|
||||||
|
s.Cpu = 0
|
||||||
|
s.PrevCpuUsage = cpuUsage
|
||||||
|
s.PrevReadTime = now
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
duration := now.Sub(s.PrevReadTime).Nanoseconds()
|
||||||
|
if duration <= 0 {
|
||||||
|
s.PrevCpuUsage = cpuUsage
|
||||||
|
s.PrevReadTime = now
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
coreCount := int64(runtime.NumCPU())
|
||||||
|
duration *= coreCount
|
||||||
|
|
||||||
|
usageDelta := cpuUsage - s.PrevCpuUsage
|
||||||
|
cpuPercent := float64(usageDelta) / float64(duration)
|
||||||
|
s.Cpu = twoDecimals(cpuPercent * 100)
|
||||||
|
|
||||||
|
if s.Cpu > s.CpuPeak {
|
||||||
|
s.CpuPeak = s.Cpu
|
||||||
|
}
|
||||||
|
|
||||||
|
s.PrevCpuUsage = cpuUsage
|
||||||
|
s.PrevReadTime = now
|
||||||
|
}
|
||||||
|
|
||||||
|
func twoDecimals(value float64) float64 {
|
||||||
|
return math.Round(value*100) / 100
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServiceDependency represents a unit that the service depends on.
|
||||||
|
type ServiceDependency struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Description string `json:"description,omitempty"`
|
||||||
|
ActiveState string `json:"activeState,omitempty"`
|
||||||
|
SubState string `json:"subState,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServiceDetails contains extended information about a systemd service.
|
||||||
|
type ServiceDetails map[string]any
|
||||||
113
internal/entities/systemd/systemd_test.go
Normal file
113
internal/entities/systemd/systemd_test.go
Normal file
@@ -0,0 +1,113 @@
|
|||||||
|
//go:build testing
|
||||||
|
|
||||||
|
package systemd_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/entities/systemd"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestParseServiceStatus(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
input string
|
||||||
|
expected systemd.ServiceState
|
||||||
|
}{
|
||||||
|
{"active", systemd.StatusActive},
|
||||||
|
{"inactive", systemd.StatusInactive},
|
||||||
|
{"failed", systemd.StatusFailed},
|
||||||
|
{"activating", systemd.StatusActivating},
|
||||||
|
{"deactivating", systemd.StatusDeactivating},
|
||||||
|
{"reloading", systemd.StatusReloading},
|
||||||
|
{"unknown", systemd.StatusInactive}, // default case
|
||||||
|
{"", systemd.StatusInactive}, // default case
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.input, func(t *testing.T) {
|
||||||
|
result := systemd.ParseServiceStatus(test.input)
|
||||||
|
assert.Equal(t, test.expected, result)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseServiceSubState(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
input string
|
||||||
|
expected systemd.ServiceSubState
|
||||||
|
}{
|
||||||
|
{"dead", systemd.SubStateDead},
|
||||||
|
{"running", systemd.SubStateRunning},
|
||||||
|
{"exited", systemd.SubStateExited},
|
||||||
|
{"failed", systemd.SubStateFailed},
|
||||||
|
{"unknown", systemd.SubStateUnknown},
|
||||||
|
{"other", systemd.SubStateUnknown}, // default case
|
||||||
|
{"", systemd.SubStateUnknown}, // default case
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.input, func(t *testing.T) {
|
||||||
|
result := systemd.ParseServiceSubState(test.input)
|
||||||
|
assert.Equal(t, test.expected, result)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestServiceUpdateCPUPercent(t *testing.T) {
|
||||||
|
t.Run("initial call sets CPU to 0", func(t *testing.T) {
|
||||||
|
service := &systemd.Service{}
|
||||||
|
service.UpdateCPUPercent(1000)
|
||||||
|
assert.Equal(t, 0.0, service.Cpu)
|
||||||
|
assert.Equal(t, uint64(1000), service.PrevCpuUsage)
|
||||||
|
assert.False(t, service.PrevReadTime.IsZero())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("subsequent call calculates CPU percentage", func(t *testing.T) {
|
||||||
|
service := &systemd.Service{}
|
||||||
|
service.PrevCpuUsage = 1000
|
||||||
|
service.PrevReadTime = time.Now().Add(-time.Second)
|
||||||
|
|
||||||
|
service.UpdateCPUPercent(8000000000) // 8 seconds of CPU time
|
||||||
|
|
||||||
|
// CPU usage should be positive and reasonable
|
||||||
|
assert.Greater(t, service.Cpu, 0.0, "CPU usage should be positive")
|
||||||
|
assert.LessOrEqual(t, service.Cpu, 100.0, "CPU usage should not exceed 100%")
|
||||||
|
assert.Equal(t, uint64(8000000000), service.PrevCpuUsage)
|
||||||
|
assert.Greater(t, service.CpuPeak, 0.0, "CPU peak should be set")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("CPU peak updates only when higher", func(t *testing.T) {
|
||||||
|
service := &systemd.Service{}
|
||||||
|
service.PrevCpuUsage = 1000
|
||||||
|
service.PrevReadTime = time.Now().Add(-time.Second)
|
||||||
|
service.UpdateCPUPercent(8000000000) // Set initial peak to ~50%
|
||||||
|
initialPeak := service.CpuPeak
|
||||||
|
|
||||||
|
// Now try with much lower CPU usage - should not update peak
|
||||||
|
service.PrevReadTime = time.Now().Add(-time.Second)
|
||||||
|
service.UpdateCPUPercent(1000000) // Much lower usage
|
||||||
|
assert.Equal(t, initialPeak, service.CpuPeak, "Peak should not update for lower CPU usage")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("handles zero duration", func(t *testing.T) {
|
||||||
|
service := &systemd.Service{}
|
||||||
|
service.PrevCpuUsage = 1000
|
||||||
|
now := time.Now()
|
||||||
|
service.PrevReadTime = now
|
||||||
|
// Mock time.Now() to return the same time to ensure zero duration
|
||||||
|
// Since we can't mock time in Go easily, we'll check the logic manually
|
||||||
|
// The zero duration case happens when duration <= 0
|
||||||
|
assert.Equal(t, 0.0, service.Cpu, "CPU should start at 0")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("handles CPU usage wraparound", func(t *testing.T) {
|
||||||
|
service := &systemd.Service{}
|
||||||
|
// Simulate wraparound where new usage is less than previous
|
||||||
|
service.PrevCpuUsage = 1000
|
||||||
|
service.PrevReadTime = time.Now().Add(-time.Second)
|
||||||
|
service.UpdateCPUPercent(500) // Less than previous, should reset
|
||||||
|
assert.Equal(t, 0.0, service.Cpu)
|
||||||
|
})
|
||||||
|
}
|
||||||
@@ -1,6 +1,7 @@
|
|||||||
package hub
|
package hub
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
@@ -93,7 +94,7 @@ func (acr *agentConnectRequest) agentConnect() (err error) {
|
|||||||
// verifyWsConn verifies the WebSocket connection using the agent's fingerprint and
|
// verifyWsConn verifies the WebSocket connection using the agent's fingerprint and
|
||||||
// SSH key signature, then adds the system to the system manager.
|
// SSH key signature, then adds the system to the system manager.
|
||||||
func (acr *agentConnectRequest) verifyWsConn(conn *gws.Conn, fpRecords []ws.FingerprintRecord) (err error) {
|
func (acr *agentConnectRequest) verifyWsConn(conn *gws.Conn, fpRecords []ws.FingerprintRecord) (err error) {
|
||||||
wsConn := ws.NewWsConnection(conn)
|
wsConn := ws.NewWsConnection(conn, acr.agentSemVer)
|
||||||
|
|
||||||
// must set wsConn in connection store before the read loop
|
// must set wsConn in connection store before the read loop
|
||||||
conn.Session().Store("wsConn", wsConn)
|
conn.Session().Store("wsConn", wsConn)
|
||||||
@@ -112,7 +113,7 @@ func (acr *agentConnectRequest) verifyWsConn(conn *gws.Conn, fpRecords []ws.Fing
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
agentFingerprint, err := wsConn.GetFingerprint(acr.token, signer, acr.isUniversalToken)
|
agentFingerprint, err := wsConn.GetFingerprint(context.Background(), acr.token, signer, acr.isUniversalToken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -267,9 +268,12 @@ func (acr *agentConnectRequest) createSystem(agentFingerprint common.Fingerprint
|
|||||||
if agentFingerprint.Port == "" {
|
if agentFingerprint.Port == "" {
|
||||||
agentFingerprint.Port = "45876"
|
agentFingerprint.Port = "45876"
|
||||||
}
|
}
|
||||||
|
if agentFingerprint.Name == "" {
|
||||||
|
agentFingerprint.Name = agentFingerprint.Hostname
|
||||||
|
}
|
||||||
// create new record
|
// create new record
|
||||||
systemRecord := core.NewRecord(systemsCollection)
|
systemRecord := core.NewRecord(systemsCollection)
|
||||||
systemRecord.Set("name", agentFingerprint.Hostname)
|
systemRecord.Set("name", agentFingerprint.Name)
|
||||||
systemRecord.Set("host", remoteAddr)
|
systemRecord.Set("host", remoteAddr)
|
||||||
systemRecord.Set("port", agentFingerprint.Port)
|
systemRecord.Set("port", agentFingerprint.Port)
|
||||||
systemRecord.Set("users", []string{acr.userId})
|
systemRecord.Set("users", []string{acr.userId})
|
||||||
|
|||||||
@@ -120,18 +120,27 @@ func (h *Hub) initialize(e *core.ServeEvent) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// set auth settings
|
// set auth settings
|
||||||
usersCollection, err := e.App.FindCollectionByNameOrId("users")
|
if err := setCollectionAuthSettings(e.App); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// setCollectionAuthSettings sets up default authentication settings for the app
|
||||||
|
func setCollectionAuthSettings(app core.App) error {
|
||||||
|
usersCollection, err := app.FindCollectionByNameOrId("users")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
superusersCollection, err := app.FindCollectionByNameOrId(core.CollectionNameSuperusers)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// disable email auth if DISABLE_PASSWORD_AUTH env var is set
|
// disable email auth if DISABLE_PASSWORD_AUTH env var is set
|
||||||
disablePasswordAuth, _ := GetEnv("DISABLE_PASSWORD_AUTH")
|
disablePasswordAuth, _ := GetEnv("DISABLE_PASSWORD_AUTH")
|
||||||
usersCollection.PasswordAuth.Enabled = disablePasswordAuth != "true"
|
usersCollection.PasswordAuth.Enabled = disablePasswordAuth != "true"
|
||||||
usersCollection.PasswordAuth.IdentityFields = []string{"email"}
|
usersCollection.PasswordAuth.IdentityFields = []string{"email"}
|
||||||
// disable oauth if no providers are configured (todo: remove this in post 0.9.0 release)
|
|
||||||
if usersCollection.OAuth2.Enabled {
|
|
||||||
usersCollection.OAuth2.Enabled = len(usersCollection.OAuth2.Providers) > 0
|
|
||||||
}
|
|
||||||
// allow oauth user creation if USER_CREATION is set
|
// allow oauth user creation if USER_CREATION is set
|
||||||
if userCreation, _ := GetEnv("USER_CREATION"); userCreation == "true" {
|
if userCreation, _ := GetEnv("USER_CREATION"); userCreation == "true" {
|
||||||
cr := "@request.context = 'oauth2'"
|
cr := "@request.context = 'oauth2'"
|
||||||
@@ -139,29 +148,52 @@ func (h *Hub) initialize(e *core.ServeEvent) error {
|
|||||||
} else {
|
} else {
|
||||||
usersCollection.CreateRule = nil
|
usersCollection.CreateRule = nil
|
||||||
}
|
}
|
||||||
if err := e.App.Save(usersCollection); err != nil {
|
|
||||||
|
// enable mfaOtp mfa if MFA_OTP env var is set
|
||||||
|
mfaOtp, _ := GetEnv("MFA_OTP")
|
||||||
|
usersCollection.OTP.Length = 6
|
||||||
|
superusersCollection.OTP.Length = 6
|
||||||
|
usersCollection.OTP.Enabled = mfaOtp == "true"
|
||||||
|
usersCollection.MFA.Enabled = mfaOtp == "true"
|
||||||
|
superusersCollection.OTP.Enabled = mfaOtp == "true" || mfaOtp == "superusers"
|
||||||
|
superusersCollection.MFA.Enabled = mfaOtp == "true" || mfaOtp == "superusers"
|
||||||
|
if err := app.Save(superusersCollection); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if err := app.Save(usersCollection); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
shareAllSystems, _ := GetEnv("SHARE_ALL_SYSTEMS")
|
||||||
|
|
||||||
// allow all users to access systems if SHARE_ALL_SYSTEMS is set
|
// allow all users to access systems if SHARE_ALL_SYSTEMS is set
|
||||||
systemsCollection, err := e.App.FindCachedCollectionByNameOrId("systems")
|
systemsCollection, err := app.FindCollectionByNameOrId("systems")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
shareAllSystems, _ := GetEnv("SHARE_ALL_SYSTEMS")
|
var systemsReadRule string
|
||||||
systemsReadRule := "@request.auth.id != \"\""
|
if shareAllSystems == "true" {
|
||||||
if shareAllSystems != "true" {
|
systemsReadRule = "@request.auth.id != \"\""
|
||||||
// default is to only show systems that the user id is assigned to
|
} else {
|
||||||
systemsReadRule += " && users.id ?= @request.auth.id"
|
systemsReadRule = "@request.auth.id != \"\" && users.id ?= @request.auth.id"
|
||||||
}
|
}
|
||||||
updateDeleteRule := systemsReadRule + " && @request.auth.role != \"readonly\""
|
updateDeleteRule := systemsReadRule + " && @request.auth.role != \"readonly\""
|
||||||
systemsCollection.ListRule = &systemsReadRule
|
systemsCollection.ListRule = &systemsReadRule
|
||||||
systemsCollection.ViewRule = &systemsReadRule
|
systemsCollection.ViewRule = &systemsReadRule
|
||||||
systemsCollection.UpdateRule = &updateDeleteRule
|
systemsCollection.UpdateRule = &updateDeleteRule
|
||||||
systemsCollection.DeleteRule = &updateDeleteRule
|
systemsCollection.DeleteRule = &updateDeleteRule
|
||||||
if err := e.App.Save(systemsCollection); err != nil {
|
if err := app.Save(systemsCollection); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
|
// allow all users to access all containers if SHARE_ALL_SYSTEMS is set
|
||||||
|
containersCollection, err := app.FindCollectionByNameOrId("containers")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
containersListRule := strings.Replace(systemsReadRule, "users.id", "system.users.id", 1)
|
||||||
|
containersCollection.ListRule = &containersListRule
|
||||||
|
return app.Save(containersCollection)
|
||||||
}
|
}
|
||||||
|
|
||||||
// registerCronJobs sets up scheduled tasks
|
// registerCronJobs sets up scheduled tasks
|
||||||
@@ -236,7 +268,17 @@ func (h *Hub) registerApiRoutes(se *core.ServeEvent) error {
|
|||||||
// update / delete user alerts
|
// update / delete user alerts
|
||||||
apiAuth.POST("/user-alerts", alerts.UpsertUserAlerts)
|
apiAuth.POST("/user-alerts", alerts.UpsertUserAlerts)
|
||||||
apiAuth.DELETE("/user-alerts", alerts.DeleteUserAlerts)
|
apiAuth.DELETE("/user-alerts", alerts.DeleteUserAlerts)
|
||||||
|
// get SMART data
|
||||||
|
apiAuth.GET("/smart", h.getSmartData)
|
||||||
|
// get systemd service details
|
||||||
|
apiAuth.GET("/systemd/info", h.getSystemdInfo)
|
||||||
|
// /containers routes
|
||||||
|
if enabled, _ := GetEnv("CONTAINER_DETAILS"); enabled != "false" {
|
||||||
|
// get container logs
|
||||||
|
apiAuth.GET("/containers/logs", h.getContainerLogs)
|
||||||
|
// get container info
|
||||||
|
apiAuth.GET("/containers/info", h.getContainerInfo)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -267,6 +309,80 @@ func (h *Hub) getUniversalToken(e *core.RequestEvent) error {
|
|||||||
return e.JSON(http.StatusOK, response)
|
return e.JSON(http.StatusOK, response)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// containerRequestHandler handles both container logs and info requests
|
||||||
|
func (h *Hub) containerRequestHandler(e *core.RequestEvent, fetchFunc func(*systems.System, string) (string, error), responseKey string) error {
|
||||||
|
systemID := e.Request.URL.Query().Get("system")
|
||||||
|
containerID := e.Request.URL.Query().Get("container")
|
||||||
|
|
||||||
|
if systemID == "" || containerID == "" {
|
||||||
|
return e.JSON(http.StatusBadRequest, map[string]string{"error": "system and container parameters are required"})
|
||||||
|
}
|
||||||
|
|
||||||
|
system, err := h.sm.GetSystem(systemID)
|
||||||
|
if err != nil {
|
||||||
|
return e.JSON(http.StatusNotFound, map[string]string{"error": "system not found"})
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := fetchFunc(system, containerID)
|
||||||
|
if err != nil {
|
||||||
|
return e.JSON(http.StatusNotFound, map[string]string{"error": err.Error()})
|
||||||
|
}
|
||||||
|
|
||||||
|
return e.JSON(http.StatusOK, map[string]string{responseKey: data})
|
||||||
|
}
|
||||||
|
|
||||||
|
// getContainerLogs handles GET /api/beszel/containers/logs requests
|
||||||
|
func (h *Hub) getContainerLogs(e *core.RequestEvent) error {
|
||||||
|
return h.containerRequestHandler(e, func(system *systems.System, containerID string) (string, error) {
|
||||||
|
return system.FetchContainerLogsFromAgent(containerID)
|
||||||
|
}, "logs")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Hub) getContainerInfo(e *core.RequestEvent) error {
|
||||||
|
return h.containerRequestHandler(e, func(system *systems.System, containerID string) (string, error) {
|
||||||
|
return system.FetchContainerInfoFromAgent(containerID)
|
||||||
|
}, "info")
|
||||||
|
}
|
||||||
|
|
||||||
|
// getSystemdInfo handles GET /api/beszel/systemd/info requests
|
||||||
|
func (h *Hub) getSystemdInfo(e *core.RequestEvent) error {
|
||||||
|
query := e.Request.URL.Query()
|
||||||
|
systemID := query.Get("system")
|
||||||
|
serviceName := query.Get("service")
|
||||||
|
|
||||||
|
if systemID == "" || serviceName == "" {
|
||||||
|
return e.JSON(http.StatusBadRequest, map[string]string{"error": "system and service parameters are required"})
|
||||||
|
}
|
||||||
|
system, err := h.sm.GetSystem(systemID)
|
||||||
|
if err != nil {
|
||||||
|
return e.JSON(http.StatusNotFound, map[string]string{"error": "system not found"})
|
||||||
|
}
|
||||||
|
details, err := system.FetchSystemdInfoFromAgent(serviceName)
|
||||||
|
if err != nil {
|
||||||
|
return e.JSON(http.StatusNotFound, map[string]string{"error": err.Error()})
|
||||||
|
}
|
||||||
|
e.Response.Header().Set("Cache-Control", "public, max-age=60")
|
||||||
|
return e.JSON(http.StatusOK, map[string]any{"details": details})
|
||||||
|
}
|
||||||
|
|
||||||
|
// getSmartData handles GET /api/beszel/smart requests
|
||||||
|
func (h *Hub) getSmartData(e *core.RequestEvent) error {
|
||||||
|
systemID := e.Request.URL.Query().Get("system")
|
||||||
|
if systemID == "" {
|
||||||
|
return e.JSON(http.StatusBadRequest, map[string]string{"error": "system parameter is required"})
|
||||||
|
}
|
||||||
|
system, err := h.sm.GetSystem(systemID)
|
||||||
|
if err != nil {
|
||||||
|
return e.JSON(http.StatusNotFound, map[string]string{"error": "system not found"})
|
||||||
|
}
|
||||||
|
data, err := system.FetchSmartDataFromAgent()
|
||||||
|
if err != nil {
|
||||||
|
return e.JSON(http.StatusNotFound, map[string]string{"error": err.Error()})
|
||||||
|
}
|
||||||
|
e.Response.Header().Set("Cache-Control", "public, max-age=60")
|
||||||
|
return e.JSON(http.StatusOK, data)
|
||||||
|
}
|
||||||
|
|
||||||
// generates key pair if it doesn't exist and returns signer
|
// generates key pair if it doesn't exist and returns signer
|
||||||
func (h *Hub) GetSSHKey(dataDir string) (ssh.Signer, error) {
|
func (h *Hub) GetSSHKey(dataDir string) (ssh.Signer, error) {
|
||||||
if h.signer != nil {
|
if h.signer != nil {
|
||||||
|
|||||||
@@ -449,6 +449,47 @@ func TestApiRoutesAuthentication(t *testing.T) {
|
|||||||
})
|
})
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /containers/logs - no auth should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/containers/logs?system=test-system&container=test-container",
|
||||||
|
ExpectedStatus: 401,
|
||||||
|
ExpectedContent: []string{"requires valid"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /containers/logs - with auth but missing system param should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/containers/logs?container=test-container",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 400,
|
||||||
|
ExpectedContent: []string{"system and container parameters are required"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /containers/logs - with auth but missing container param should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/containers/logs?system=test-system",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 400,
|
||||||
|
ExpectedContent: []string{"system and container parameters are required"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "GET /containers/logs - with auth but invalid system should fail",
|
||||||
|
Method: http.MethodGet,
|
||||||
|
URL: "/api/beszel/containers/logs?system=invalid-system&container=test-container",
|
||||||
|
Headers: map[string]string{
|
||||||
|
"Authorization": userToken,
|
||||||
|
},
|
||||||
|
ExpectedStatus: 404,
|
||||||
|
ExpectedContent: []string{"system not found"},
|
||||||
|
TestAppFactory: testAppFactory,
|
||||||
|
},
|
||||||
|
|
||||||
// Auth Optional Routes - Should work without authentication
|
// Auth Optional Routes - Should work without authentication
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -5,19 +5,24 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"hash/fnv"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"net"
|
"net"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/common"
|
||||||
"github.com/henrygd/beszel/internal/hub/ws"
|
"github.com/henrygd/beszel/internal/hub/ws"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/entities/container"
|
||||||
"github.com/henrygd/beszel/internal/entities/system"
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
"github.com/henrygd/beszel/internal/entities/systemd"
|
||||||
|
|
||||||
"github.com/henrygd/beszel"
|
"github.com/henrygd/beszel"
|
||||||
|
|
||||||
"github.com/blang/semver"
|
"github.com/blang/semver"
|
||||||
"github.com/fxamacker/cbor/v2"
|
"github.com/fxamacker/cbor/v2"
|
||||||
|
"github.com/pocketbase/dbx"
|
||||||
"github.com/pocketbase/pocketbase/core"
|
"github.com/pocketbase/pocketbase/core"
|
||||||
"golang.org/x/crypto/ssh"
|
"golang.org/x/crypto/ssh"
|
||||||
)
|
)
|
||||||
@@ -107,7 +112,7 @@ func (sys *System) update() error {
|
|||||||
sys.handlePaused()
|
sys.handlePaused()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
data, err := sys.fetchDataFromAgent()
|
data, err := sys.fetchDataFromAgent(common.DataRequestOptions{CacheTimeMs: uint16(interval)})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
_, err = sys.createRecords(data)
|
_, err = sys.createRecords(data)
|
||||||
}
|
}
|
||||||
@@ -134,41 +139,128 @@ func (sys *System) createRecords(data *system.CombinedData) (*core.Record, error
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
hub := sys.manager.hub
|
hub := sys.manager.hub
|
||||||
// add system_stats and container_stats records
|
err = hub.RunInTransaction(func(txApp core.App) error {
|
||||||
systemStatsCollection, err := hub.FindCachedCollectionByNameOrId("system_stats")
|
// add system_stats and container_stats records
|
||||||
if err != nil {
|
systemStatsCollection, err := txApp.FindCachedCollectionByNameOrId("system_stats")
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
systemStatsRecord := core.NewRecord(systemStatsCollection)
|
|
||||||
systemStatsRecord.Set("system", systemRecord.Id)
|
|
||||||
systemStatsRecord.Set("stats", data.Stats)
|
|
||||||
systemStatsRecord.Set("type", "1m")
|
|
||||||
if err := hub.SaveNoValidate(systemStatsRecord); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// add new container_stats record
|
|
||||||
if len(data.Containers) > 0 {
|
|
||||||
containerStatsCollection, err := hub.FindCachedCollectionByNameOrId("container_stats")
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
containerStatsRecord := core.NewRecord(containerStatsCollection)
|
|
||||||
containerStatsRecord.Set("system", systemRecord.Id)
|
|
||||||
containerStatsRecord.Set("stats", data.Containers)
|
|
||||||
containerStatsRecord.Set("type", "1m")
|
|
||||||
if err := hub.SaveNoValidate(containerStatsRecord); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// update system record (do this last because it triggers alerts and we need above records to be inserted first)
|
|
||||||
systemRecord.Set("status", up)
|
|
||||||
|
|
||||||
systemRecord.Set("info", data.Info)
|
systemStatsRecord := core.NewRecord(systemStatsCollection)
|
||||||
if err := hub.SaveNoValidate(systemRecord); err != nil {
|
systemStatsRecord.Set("system", systemRecord.Id)
|
||||||
return nil, err
|
systemStatsRecord.Set("stats", data.Stats)
|
||||||
|
systemStatsRecord.Set("type", "1m")
|
||||||
|
if err := txApp.SaveNoValidate(systemStatsRecord); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(data.Containers) > 0 {
|
||||||
|
// add / update containers records
|
||||||
|
if data.Containers[0].Id != "" {
|
||||||
|
if err := createContainerRecords(txApp, data.Containers, sys.Id); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// add new container_stats record
|
||||||
|
containerStatsCollection, err := txApp.FindCachedCollectionByNameOrId("container_stats")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
containerStatsRecord := core.NewRecord(containerStatsCollection)
|
||||||
|
containerStatsRecord.Set("system", systemRecord.Id)
|
||||||
|
containerStatsRecord.Set("stats", data.Containers)
|
||||||
|
containerStatsRecord.Set("type", "1m")
|
||||||
|
if err := txApp.SaveNoValidate(containerStatsRecord); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// add new systemd_stats record
|
||||||
|
if len(data.SystemdServices) > 0 {
|
||||||
|
if err := createSystemdStatsRecords(txApp, data.SystemdServices, sys.Id); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// update system record (do this last because it triggers alerts and we need above records to be inserted first)
|
||||||
|
systemRecord.Set("status", up)
|
||||||
|
|
||||||
|
systemRecord.Set("info", data.Info)
|
||||||
|
if err := txApp.SaveNoValidate(systemRecord); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
return systemRecord, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func createSystemdStatsRecords(app core.App, data []*systemd.Service, systemId string) error {
|
||||||
|
if len(data) == 0 {
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
return systemRecord, nil
|
// shared params for all records
|
||||||
|
params := dbx.Params{
|
||||||
|
"system": systemId,
|
||||||
|
"updated": time.Now().UTC().UnixMilli(),
|
||||||
|
}
|
||||||
|
|
||||||
|
valueStrings := make([]string, 0, len(data))
|
||||||
|
for i, service := range data {
|
||||||
|
suffix := fmt.Sprintf("%d", i)
|
||||||
|
valueStrings = append(valueStrings, fmt.Sprintf("({:id%[1]s}, {:system}, {:name%[1]s}, {:state%[1]s}, {:sub%[1]s}, {:cpu%[1]s}, {:cpuPeak%[1]s}, {:memory%[1]s}, {:memPeak%[1]s}, {:updated})", suffix))
|
||||||
|
params["id"+suffix] = getSystemdServiceId(systemId, service.Name)
|
||||||
|
params["name"+suffix] = service.Name
|
||||||
|
params["state"+suffix] = service.State
|
||||||
|
params["sub"+suffix] = service.Sub
|
||||||
|
params["cpu"+suffix] = service.Cpu
|
||||||
|
params["cpuPeak"+suffix] = service.CpuPeak
|
||||||
|
params["memory"+suffix] = service.Mem
|
||||||
|
params["memPeak"+suffix] = service.MemPeak
|
||||||
|
}
|
||||||
|
queryString := fmt.Sprintf(
|
||||||
|
"INSERT INTO systemd_services (id, system, name, state, sub, cpu, cpuPeak, memory, memPeak, updated) VALUES %s ON CONFLICT(id) DO UPDATE SET system = excluded.system, name = excluded.name, state = excluded.state, sub = excluded.sub, cpu = excluded.cpu, cpuPeak = excluded.cpuPeak, memory = excluded.memory, memPeak = excluded.memPeak, updated = excluded.updated",
|
||||||
|
strings.Join(valueStrings, ","),
|
||||||
|
)
|
||||||
|
_, err := app.DB().NewQuery(queryString).Bind(params).Execute()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// getSystemdServiceId generates a deterministic unique id for a systemd service
|
||||||
|
func getSystemdServiceId(systemId string, serviceName string) string {
|
||||||
|
hash := fnv.New32a()
|
||||||
|
hash.Write([]byte(systemId + serviceName))
|
||||||
|
return fmt.Sprintf("%x", hash.Sum32())
|
||||||
|
}
|
||||||
|
|
||||||
|
// createContainerRecords creates container records
|
||||||
|
func createContainerRecords(app core.App, data []*container.Stats, systemId string) error {
|
||||||
|
if len(data) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// shared params for all records
|
||||||
|
params := dbx.Params{
|
||||||
|
"system": systemId,
|
||||||
|
"updated": time.Now().UTC().UnixMilli(),
|
||||||
|
}
|
||||||
|
valueStrings := make([]string, 0, len(data))
|
||||||
|
for i, container := range data {
|
||||||
|
suffix := fmt.Sprintf("%d", i)
|
||||||
|
valueStrings = append(valueStrings, fmt.Sprintf("({:id%[1]s}, {:system}, {:name%[1]s}, {:image%[1]s}, {:status%[1]s}, {:health%[1]s}, {:cpu%[1]s}, {:memory%[1]s}, {:net%[1]s}, {:updated})", suffix))
|
||||||
|
params["id"+suffix] = container.Id
|
||||||
|
params["name"+suffix] = container.Name
|
||||||
|
params["image"+suffix] = container.Image
|
||||||
|
params["status"+suffix] = container.Status
|
||||||
|
params["health"+suffix] = container.Health
|
||||||
|
params["cpu"+suffix] = container.Cpu
|
||||||
|
params["memory"+suffix] = container.Mem
|
||||||
|
params["net"+suffix] = container.NetworkSent + container.NetworkRecv
|
||||||
|
}
|
||||||
|
queryString := fmt.Sprintf(
|
||||||
|
"INSERT INTO containers (id, system, name, image, status, health, cpu, memory, net, updated) VALUES %s ON CONFLICT(id) DO UPDATE SET system = excluded.system, name = excluded.name, image = excluded.image, status = excluded.status, health = excluded.health, cpu = excluded.cpu, memory = excluded.memory, net = excluded.net, updated = excluded.updated",
|
||||||
|
strings.Join(valueStrings, ","),
|
||||||
|
)
|
||||||
|
_, err := app.DB().NewQuery(queryString).Bind(params).Execute()
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// getRecord retrieves the system record from the database.
|
// getRecord retrieves the system record from the database.
|
||||||
@@ -209,13 +301,13 @@ func (sys *System) getContext() (context.Context, context.CancelFunc) {
|
|||||||
|
|
||||||
// fetchDataFromAgent attempts to fetch data from the agent,
|
// fetchDataFromAgent attempts to fetch data from the agent,
|
||||||
// prioritizing WebSocket if available.
|
// prioritizing WebSocket if available.
|
||||||
func (sys *System) fetchDataFromAgent() (*system.CombinedData, error) {
|
func (sys *System) fetchDataFromAgent(options common.DataRequestOptions) (*system.CombinedData, error) {
|
||||||
if sys.data == nil {
|
if sys.data == nil {
|
||||||
sys.data = &system.CombinedData{}
|
sys.data = &system.CombinedData{}
|
||||||
}
|
}
|
||||||
|
|
||||||
if sys.WsConn != nil && sys.WsConn.IsConnected() {
|
if sys.WsConn != nil && sys.WsConn.IsConnected() {
|
||||||
wsData, err := sys.fetchDataViaWebSocket()
|
wsData, err := sys.fetchDataViaWebSocket(options)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return wsData, nil
|
return wsData, nil
|
||||||
}
|
}
|
||||||
@@ -223,82 +315,260 @@ func (sys *System) fetchDataFromAgent() (*system.CombinedData, error) {
|
|||||||
sys.closeWebSocketConnection()
|
sys.closeWebSocketConnection()
|
||||||
}
|
}
|
||||||
|
|
||||||
sshData, err := sys.fetchDataViaSSH()
|
sshData, err := sys.fetchDataViaSSH(options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return sshData, nil
|
return sshData, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sys *System) fetchDataViaWebSocket() (*system.CombinedData, error) {
|
func (sys *System) fetchDataViaWebSocket(options common.DataRequestOptions) (*system.CombinedData, error) {
|
||||||
if sys.WsConn == nil || !sys.WsConn.IsConnected() {
|
if sys.WsConn == nil || !sys.WsConn.IsConnected() {
|
||||||
return nil, errors.New("no websocket connection")
|
return nil, errors.New("no websocket connection")
|
||||||
}
|
}
|
||||||
err := sys.WsConn.RequestSystemData(sys.data)
|
err := sys.WsConn.RequestSystemData(context.Background(), sys.data, options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return sys.data, nil
|
return sys.data, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// fetchStringFromAgentViaSSH is a generic function to fetch strings via SSH
|
||||||
|
func (sys *System) fetchStringFromAgentViaSSH(action common.WebSocketAction, requestData any, errorMsg string) (string, error) {
|
||||||
|
var result string
|
||||||
|
err := sys.runSSHOperation(4*time.Second, 1, func(session *ssh.Session) (bool, error) {
|
||||||
|
stdout, err := session.StdoutPipe()
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
stdin, stdinErr := session.StdinPipe()
|
||||||
|
if stdinErr != nil {
|
||||||
|
return false, stdinErr
|
||||||
|
}
|
||||||
|
if err := session.Shell(); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
req := common.HubRequest[any]{Action: action, Data: requestData}
|
||||||
|
_ = cbor.NewEncoder(stdin).Encode(req)
|
||||||
|
_ = stdin.Close()
|
||||||
|
var resp common.AgentResponse
|
||||||
|
err = cbor.NewDecoder(stdout).Decode(&resp)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
if resp.String == nil {
|
||||||
|
return false, errors.New(errorMsg)
|
||||||
|
}
|
||||||
|
result = *resp.String
|
||||||
|
return false, nil
|
||||||
|
})
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// FetchContainerInfoFromAgent fetches container info from the agent
|
||||||
|
func (sys *System) FetchContainerInfoFromAgent(containerID string) (string, error) {
|
||||||
|
// fetch via websocket
|
||||||
|
if sys.WsConn != nil && sys.WsConn.IsConnected() {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
return sys.WsConn.RequestContainerInfo(ctx, containerID)
|
||||||
|
}
|
||||||
|
// fetch via SSH
|
||||||
|
return sys.fetchStringFromAgentViaSSH(common.GetContainerInfo, common.ContainerInfoRequest{ContainerID: containerID}, "no info in response")
|
||||||
|
}
|
||||||
|
|
||||||
|
// FetchContainerLogsFromAgent fetches container logs from the agent
|
||||||
|
func (sys *System) FetchContainerLogsFromAgent(containerID string) (string, error) {
|
||||||
|
// fetch via websocket
|
||||||
|
if sys.WsConn != nil && sys.WsConn.IsConnected() {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
return sys.WsConn.RequestContainerLogs(ctx, containerID)
|
||||||
|
}
|
||||||
|
// fetch via SSH
|
||||||
|
return sys.fetchStringFromAgentViaSSH(common.GetContainerLogs, common.ContainerLogsRequest{ContainerID: containerID}, "no logs in response")
|
||||||
|
}
|
||||||
|
|
||||||
|
// FetchSystemdInfoFromAgent fetches detailed systemd service information from the agent
|
||||||
|
func (sys *System) FetchSystemdInfoFromAgent(serviceName string) (systemd.ServiceDetails, error) {
|
||||||
|
// fetch via websocket
|
||||||
|
if sys.WsConn != nil && sys.WsConn.IsConnected() {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
return sys.WsConn.RequestSystemdInfo(ctx, serviceName)
|
||||||
|
}
|
||||||
|
|
||||||
|
var result systemd.ServiceDetails
|
||||||
|
err := sys.runSSHOperation(5*time.Second, 1, func(session *ssh.Session) (bool, error) {
|
||||||
|
stdout, err := session.StdoutPipe()
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
stdin, stdinErr := session.StdinPipe()
|
||||||
|
if stdinErr != nil {
|
||||||
|
return false, stdinErr
|
||||||
|
}
|
||||||
|
if err := session.Shell(); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
req := common.HubRequest[any]{Action: common.GetSystemdInfo, Data: common.SystemdInfoRequest{ServiceName: serviceName}}
|
||||||
|
if err := cbor.NewEncoder(stdin).Encode(req); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
_ = stdin.Close()
|
||||||
|
|
||||||
|
var resp common.AgentResponse
|
||||||
|
if err := cbor.NewDecoder(stdout).Decode(&resp); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
if resp.ServiceInfo == nil {
|
||||||
|
if resp.Error != "" {
|
||||||
|
return false, errors.New(resp.Error)
|
||||||
|
}
|
||||||
|
return false, errors.New("no systemd info in response")
|
||||||
|
}
|
||||||
|
result = resp.ServiceInfo
|
||||||
|
return false, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// FetchSmartDataFromAgent fetches SMART data from the agent
|
||||||
|
func (sys *System) FetchSmartDataFromAgent() (map[string]any, error) {
|
||||||
|
// fetch via websocket
|
||||||
|
if sys.WsConn != nil && sys.WsConn.IsConnected() {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
return sys.WsConn.RequestSmartData(ctx)
|
||||||
|
}
|
||||||
|
// fetch via SSH
|
||||||
|
var result map[string]any
|
||||||
|
err := sys.runSSHOperation(5*time.Second, 1, func(session *ssh.Session) (bool, error) {
|
||||||
|
stdout, err := session.StdoutPipe()
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
stdin, stdinErr := session.StdinPipe()
|
||||||
|
if stdinErr != nil {
|
||||||
|
return false, stdinErr
|
||||||
|
}
|
||||||
|
if err := session.Shell(); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
req := common.HubRequest[any]{Action: common.GetSmartData}
|
||||||
|
_ = cbor.NewEncoder(stdin).Encode(req)
|
||||||
|
_ = stdin.Close()
|
||||||
|
var resp common.AgentResponse
|
||||||
|
if err := cbor.NewDecoder(stdout).Decode(&resp); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
// Convert to generic map for JSON response
|
||||||
|
result = make(map[string]any, len(resp.SmartData))
|
||||||
|
for k, v := range resp.SmartData {
|
||||||
|
result[k] = v
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
})
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
|
||||||
// fetchDataViaSSH handles fetching data using SSH.
|
// fetchDataViaSSH handles fetching data using SSH.
|
||||||
// This function encapsulates the original SSH logic.
|
// This function encapsulates the original SSH logic.
|
||||||
// It updates sys.data directly upon successful fetch.
|
// It updates sys.data directly upon successful fetch.
|
||||||
func (sys *System) fetchDataViaSSH() (*system.CombinedData, error) {
|
func (sys *System) fetchDataViaSSH(options common.DataRequestOptions) (*system.CombinedData, error) {
|
||||||
maxRetries := 1
|
err := sys.runSSHOperation(4*time.Second, 1, func(session *ssh.Session) (bool, error) {
|
||||||
for attempt := 0; attempt <= maxRetries; attempt++ {
|
|
||||||
if sys.client == nil || sys.Status == down {
|
|
||||||
if err := sys.createSSHClient(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
session, err := sys.createSessionWithTimeout(4 * time.Second)
|
|
||||||
if err != nil {
|
|
||||||
if attempt >= maxRetries {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
sys.manager.hub.Logger().Warn("Session closed. Retrying...", "host", sys.Host, "port", sys.Port, "err", err)
|
|
||||||
sys.closeSSHConnection()
|
|
||||||
// Reset format detection on connection failure - agent might have been upgraded
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
defer session.Close()
|
|
||||||
|
|
||||||
stdout, err := session.StdoutPipe()
|
stdout, err := session.StdoutPipe()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
stdin, stdinErr := session.StdinPipe()
|
||||||
if err := session.Shell(); err != nil {
|
if err := session.Shell(); err != nil {
|
||||||
return nil, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
*sys.data = system.CombinedData{}
|
*sys.data = system.CombinedData{}
|
||||||
|
|
||||||
if sys.agentVersion.GTE(beszel.MinVersionCbor) {
|
if sys.agentVersion.GTE(beszel.MinVersionAgentResponse) && stdinErr == nil {
|
||||||
err = cbor.NewDecoder(stdout).Decode(sys.data)
|
req := common.HubRequest[any]{Action: common.GetData, Data: options}
|
||||||
} else {
|
_ = cbor.NewEncoder(stdin).Encode(req)
|
||||||
err = json.NewDecoder(stdout).Decode(sys.data)
|
_ = stdin.Close()
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
var resp common.AgentResponse
|
||||||
sys.closeSSHConnection()
|
if decErr := cbor.NewDecoder(stdout).Decode(&resp); decErr == nil && resp.SystemData != nil {
|
||||||
if attempt < maxRetries {
|
*sys.data = *resp.SystemData
|
||||||
continue
|
if err := session.Wait(); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
}
|
}
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// wait for the session to complete
|
var decodeErr error
|
||||||
|
if sys.agentVersion.GTE(beszel.MinVersionCbor) {
|
||||||
|
decodeErr = cbor.NewDecoder(stdout).Decode(sys.data)
|
||||||
|
} else {
|
||||||
|
decodeErr = json.NewDecoder(stdout).Decode(sys.data)
|
||||||
|
}
|
||||||
|
|
||||||
|
if decodeErr != nil {
|
||||||
|
return true, decodeErr
|
||||||
|
}
|
||||||
|
|
||||||
if err := session.Wait(); err != nil {
|
if err := session.Wait(); err != nil {
|
||||||
return nil, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return sys.data, nil
|
return false, nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// this should never be reached due to the return in the loop
|
return sys.data, nil
|
||||||
return nil, fmt.Errorf("failed to fetch data")
|
}
|
||||||
|
|
||||||
|
// runSSHOperation establishes an SSH session and executes the provided operation.
|
||||||
|
// The operation can request a retry by returning true as the first return value.
|
||||||
|
func (sys *System) runSSHOperation(timeout time.Duration, retries int, operation func(*ssh.Session) (bool, error)) error {
|
||||||
|
for attempt := 0; attempt <= retries; attempt++ {
|
||||||
|
if sys.client == nil || sys.Status == down {
|
||||||
|
if err := sys.createSSHClient(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
session, err := sys.createSessionWithTimeout(timeout)
|
||||||
|
if err != nil {
|
||||||
|
if attempt >= retries {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
sys.manager.hub.Logger().Warn("Session closed. Retrying...", "host", sys.Host, "port", sys.Port, "err", err)
|
||||||
|
sys.closeSSHConnection()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
retry, opErr := func() (bool, error) {
|
||||||
|
defer session.Close()
|
||||||
|
return operation(session)
|
||||||
|
}()
|
||||||
|
|
||||||
|
if opErr == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if retry {
|
||||||
|
sys.closeSSHConnection()
|
||||||
|
if attempt < retries {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return opErr
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("ssh operation failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
// createSSHClient creates a new SSH client for the system
|
// createSSHClient creates a new SSH client for the system
|
||||||
@@ -379,11 +649,11 @@ func extractAgentVersion(versionString string) (semver.Version, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getJitter returns a channel that will be triggered after a random delay
|
// getJitter returns a channel that will be triggered after a random delay
|
||||||
// between 40% and 90% of the interval.
|
// between 51% and 95% of the interval.
|
||||||
// This is used to stagger the initial WebSocket connections to prevent clustering.
|
// This is used to stagger the initial WebSocket connections to prevent clustering.
|
||||||
func getJitter() <-chan time.Time {
|
func getJitter() <-chan time.Time {
|
||||||
minPercent := 40
|
minPercent := 51
|
||||||
maxPercent := 90
|
maxPercent := 95
|
||||||
jitterRange := maxPercent - minPercent
|
jitterRange := maxPercent - minPercent
|
||||||
msDelay := (interval * minPercent / 100) + rand.Intn(interval*jitterRange/100)
|
msDelay := (interval * minPercent / 100) + rand.Intn(interval*jitterRange/100)
|
||||||
return time.After(time.Duration(msDelay) * time.Millisecond)
|
return time.After(time.Duration(msDelay) * time.Millisecond)
|
||||||
|
|||||||
@@ -63,6 +63,15 @@ func NewSystemManager(hub hubLike) *SystemManager {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetSystem returns a system by ID from the store
|
||||||
|
func (sm *SystemManager) GetSystem(systemID string) (*System, error) {
|
||||||
|
sys, ok := sm.systems.GetOk(systemID)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("system not found")
|
||||||
|
}
|
||||||
|
return sys, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Initialize sets up the system manager by binding event hooks and starting existing systems.
|
// Initialize sets up the system manager by binding event hooks and starting existing systems.
|
||||||
// It configures SSH client settings and begins monitoring all non-paused systems from the database.
|
// It configures SSH client settings and begins monitoring all non-paused systems from the database.
|
||||||
// Systems are started with staggered delays to prevent overwhelming the hub during startup.
|
// Systems are started with staggered delays to prevent overwhelming the hub during startup.
|
||||||
@@ -106,6 +115,8 @@ func (sm *SystemManager) bindEventHooks() {
|
|||||||
sm.hub.OnRecordAfterUpdateSuccess("systems").BindFunc(sm.onRecordAfterUpdateSuccess)
|
sm.hub.OnRecordAfterUpdateSuccess("systems").BindFunc(sm.onRecordAfterUpdateSuccess)
|
||||||
sm.hub.OnRecordAfterDeleteSuccess("systems").BindFunc(sm.onRecordAfterDeleteSuccess)
|
sm.hub.OnRecordAfterDeleteSuccess("systems").BindFunc(sm.onRecordAfterDeleteSuccess)
|
||||||
sm.hub.OnRecordAfterUpdateSuccess("fingerprints").BindFunc(sm.onTokenRotated)
|
sm.hub.OnRecordAfterUpdateSuccess("fingerprints").BindFunc(sm.onTokenRotated)
|
||||||
|
sm.hub.OnRealtimeSubscribeRequest().BindFunc(sm.onRealtimeSubscribeRequest)
|
||||||
|
sm.hub.OnRealtimeConnectRequest().BindFunc(sm.onRealtimeConnectRequest)
|
||||||
}
|
}
|
||||||
|
|
||||||
// onTokenRotated handles fingerprint token rotation events.
|
// onTokenRotated handles fingerprint token rotation events.
|
||||||
|
|||||||
188
internal/hub/systems/system_realtime.go
Normal file
188
internal/hub/systems/system_realtime.go
Normal file
@@ -0,0 +1,188 @@
|
|||||||
|
package systems
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/common"
|
||||||
|
"github.com/pocketbase/pocketbase/core"
|
||||||
|
"github.com/pocketbase/pocketbase/tools/subscriptions"
|
||||||
|
)
|
||||||
|
|
||||||
|
type subscriptionInfo struct {
|
||||||
|
subscription string
|
||||||
|
connectedClients uint8
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
activeSubscriptions = make(map[string]*subscriptionInfo)
|
||||||
|
workerRunning bool
|
||||||
|
realtimeTicker *time.Ticker
|
||||||
|
tickerStopChan chan struct{}
|
||||||
|
realtimeMutex sync.Mutex
|
||||||
|
)
|
||||||
|
|
||||||
|
// onRealtimeConnectRequest handles client connection events for realtime subscriptions.
|
||||||
|
// It cleans up existing subscriptions when a client connects.
|
||||||
|
func (sm *SystemManager) onRealtimeConnectRequest(e *core.RealtimeConnectRequestEvent) error {
|
||||||
|
// after e.Next() is the client disconnection
|
||||||
|
e.Next()
|
||||||
|
subscriptions := e.Client.Subscriptions()
|
||||||
|
for k := range subscriptions {
|
||||||
|
sm.removeRealtimeSubscription(k, subscriptions[k])
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// onRealtimeSubscribeRequest handles client subscription events for realtime metrics.
|
||||||
|
// It tracks new subscriptions and unsubscriptions to manage the realtime worker lifecycle.
|
||||||
|
func (sm *SystemManager) onRealtimeSubscribeRequest(e *core.RealtimeSubscribeRequestEvent) error {
|
||||||
|
oldSubs := e.Client.Subscriptions()
|
||||||
|
// after e.Next() is the result of the subscribe request
|
||||||
|
err := e.Next()
|
||||||
|
newSubs := e.Client.Subscriptions()
|
||||||
|
|
||||||
|
// handle new subscriptions
|
||||||
|
for k, options := range newSubs {
|
||||||
|
if _, ok := oldSubs[k]; !ok {
|
||||||
|
if strings.HasPrefix(k, "rt_metrics") {
|
||||||
|
systemId := options.Query["system"]
|
||||||
|
if _, ok := activeSubscriptions[systemId]; !ok {
|
||||||
|
activeSubscriptions[systemId] = &subscriptionInfo{
|
||||||
|
subscription: k,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
activeSubscriptions[systemId].connectedClients += 1
|
||||||
|
sm.onRealtimeSubscriptionAdded()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// handle unsubscriptions
|
||||||
|
for k := range oldSubs {
|
||||||
|
if _, ok := newSubs[k]; !ok {
|
||||||
|
sm.removeRealtimeSubscription(k, oldSubs[k])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// onRealtimeSubscriptionAdded initializes or starts the realtime worker when the first subscription is added.
|
||||||
|
// It ensures only one worker runs at a time and creates the ticker for periodic data fetching.
|
||||||
|
func (sm *SystemManager) onRealtimeSubscriptionAdded() {
|
||||||
|
realtimeMutex.Lock()
|
||||||
|
defer realtimeMutex.Unlock()
|
||||||
|
|
||||||
|
// Start the worker if it's not already running
|
||||||
|
if !workerRunning {
|
||||||
|
workerRunning = true
|
||||||
|
// Create a new stop channel for this worker instance
|
||||||
|
tickerStopChan = make(chan struct{})
|
||||||
|
go sm.startRealtimeWorker()
|
||||||
|
}
|
||||||
|
|
||||||
|
// If no ticker exists, create one
|
||||||
|
if realtimeTicker == nil {
|
||||||
|
realtimeTicker = time.NewTicker(1 * time.Second)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkSubscriptions stops the realtime worker when there are no active subscriptions.
|
||||||
|
// This prevents unnecessary resource usage when no clients are listening for realtime data.
|
||||||
|
func (sm *SystemManager) checkSubscriptions() {
|
||||||
|
if !workerRunning || len(activeSubscriptions) > 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
realtimeMutex.Lock()
|
||||||
|
defer realtimeMutex.Unlock()
|
||||||
|
|
||||||
|
// Signal the worker to stop
|
||||||
|
if tickerStopChan != nil {
|
||||||
|
select {
|
||||||
|
case tickerStopChan <- struct{}{}:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if realtimeTicker != nil {
|
||||||
|
realtimeTicker.Stop()
|
||||||
|
realtimeTicker = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mark worker as stopped (will be reset when next subscription comes in)
|
||||||
|
workerRunning = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// removeRealtimeSubscription removes a realtime subscription and checks if the worker should be stopped.
|
||||||
|
// It only processes subscriptions with the "rt_metrics" prefix and triggers cleanup when subscriptions are removed.
|
||||||
|
func (sm *SystemManager) removeRealtimeSubscription(subscription string, options subscriptions.SubscriptionOptions) {
|
||||||
|
if strings.HasPrefix(subscription, "rt_metrics") {
|
||||||
|
systemId := options.Query["system"]
|
||||||
|
if info, ok := activeSubscriptions[systemId]; ok {
|
||||||
|
info.connectedClients -= 1
|
||||||
|
if info.connectedClients <= 0 {
|
||||||
|
delete(activeSubscriptions, systemId)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sm.checkSubscriptions()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// startRealtimeWorker runs the main loop for fetching realtime data from agents.
|
||||||
|
// It continuously fetches system data and broadcasts it to subscribed clients via WebSocket.
|
||||||
|
func (sm *SystemManager) startRealtimeWorker() {
|
||||||
|
sm.fetchRealtimeDataAndNotify()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-tickerStopChan:
|
||||||
|
return
|
||||||
|
case <-realtimeTicker.C:
|
||||||
|
// Check if ticker is still valid (might have been stopped)
|
||||||
|
if realtimeTicker == nil || len(activeSubscriptions) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// slog.Debug("activeSubscriptions", "count", len(activeSubscriptions))
|
||||||
|
sm.fetchRealtimeDataAndNotify()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// fetchRealtimeDataAndNotify fetches realtime data for all active subscriptions and notifies the clients.
|
||||||
|
func (sm *SystemManager) fetchRealtimeDataAndNotify() {
|
||||||
|
for systemId, info := range activeSubscriptions {
|
||||||
|
system, err := sm.GetSystem(systemId)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
go func() {
|
||||||
|
data, err := system.fetchDataFromAgent(common.DataRequestOptions{CacheTimeMs: 1000})
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
bytes, err := json.Marshal(data)
|
||||||
|
if err == nil {
|
||||||
|
notify(sm.hub, info.subscription, bytes)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// notify broadcasts realtime data to all clients subscribed to a specific subscription.
|
||||||
|
// It iterates through all connected clients and sends the data only to those with matching subscriptions.
|
||||||
|
func notify(app core.App, subscription string, data []byte) error {
|
||||||
|
message := subscriptions.Message{
|
||||||
|
Name: subscription,
|
||||||
|
Data: data,
|
||||||
|
}
|
||||||
|
for _, client := range app.SubscriptionsBroker().Clients() {
|
||||||
|
if !client.HasSubscription(subscription) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
client.Send(message)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
75
internal/hub/systems/system_systemd_test.go
Normal file
75
internal/hub/systems/system_systemd_test.go
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
//go:build testing
|
||||||
|
|
||||||
|
package systems
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGetSystemdServiceId(t *testing.T) {
|
||||||
|
t.Run("deterministic output", func(t *testing.T) {
|
||||||
|
systemId := "sys-123"
|
||||||
|
serviceName := "nginx.service"
|
||||||
|
|
||||||
|
// Call multiple times and ensure same result
|
||||||
|
id1 := getSystemdServiceId(systemId, serviceName)
|
||||||
|
id2 := getSystemdServiceId(systemId, serviceName)
|
||||||
|
id3 := getSystemdServiceId(systemId, serviceName)
|
||||||
|
|
||||||
|
assert.Equal(t, id1, id2)
|
||||||
|
assert.Equal(t, id2, id3)
|
||||||
|
assert.NotEmpty(t, id1)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("different inputs produce different ids", func(t *testing.T) {
|
||||||
|
systemId1 := "sys-123"
|
||||||
|
systemId2 := "sys-456"
|
||||||
|
serviceName1 := "nginx.service"
|
||||||
|
serviceName2 := "apache.service"
|
||||||
|
|
||||||
|
id1 := getSystemdServiceId(systemId1, serviceName1)
|
||||||
|
id2 := getSystemdServiceId(systemId2, serviceName1)
|
||||||
|
id3 := getSystemdServiceId(systemId1, serviceName2)
|
||||||
|
id4 := getSystemdServiceId(systemId2, serviceName2)
|
||||||
|
|
||||||
|
// All IDs should be different
|
||||||
|
assert.NotEqual(t, id1, id2)
|
||||||
|
assert.NotEqual(t, id1, id3)
|
||||||
|
assert.NotEqual(t, id1, id4)
|
||||||
|
assert.NotEqual(t, id2, id3)
|
||||||
|
assert.NotEqual(t, id2, id4)
|
||||||
|
assert.NotEqual(t, id3, id4)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("consistent length", func(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
systemId string
|
||||||
|
serviceName string
|
||||||
|
}{
|
||||||
|
{"short", "short.service"},
|
||||||
|
{"very-long-system-id-that-might-be-used-in-practice", "very-long-service-name.service"},
|
||||||
|
{"", "empty-system.service"},
|
||||||
|
{"empty-service", ""},
|
||||||
|
{"", ""},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
id := getSystemdServiceId(tc.systemId, tc.serviceName)
|
||||||
|
// FNV-32 produces 8 hex characters
|
||||||
|
assert.Len(t, id, 8, "ID should be 8 characters for systemId='%s', serviceName='%s'", tc.systemId, tc.serviceName)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("hexadecimal output", func(t *testing.T) {
|
||||||
|
id := getSystemdServiceId("test-system", "test-service")
|
||||||
|
assert.NotEmpty(t, id)
|
||||||
|
|
||||||
|
// Should only contain hexadecimal characters
|
||||||
|
for _, char := range id {
|
||||||
|
assert.True(t, (char >= '0' && char <= '9') || (char >= 'a' && char <= 'f'),
|
||||||
|
"ID should only contain hexadecimal characters, got: %s", id)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
238
internal/hub/ws/handlers.go
Normal file
238
internal/hub/ws/handlers.go
Normal file
@@ -0,0 +1,238 @@
|
|||||||
|
package ws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
"github.com/fxamacker/cbor/v2"
|
||||||
|
"github.com/henrygd/beszel/internal/common"
|
||||||
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
"github.com/henrygd/beszel/internal/entities/systemd"
|
||||||
|
"github.com/lxzan/gws"
|
||||||
|
"golang.org/x/crypto/ssh"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ResponseHandler defines interface for handling agent responses
|
||||||
|
type ResponseHandler interface {
|
||||||
|
Handle(agentResponse common.AgentResponse) error
|
||||||
|
HandleLegacy(rawData []byte) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// BaseHandler provides a default implementation that can be embedded to make HandleLegacy optional
|
||||||
|
type BaseHandler struct{}
|
||||||
|
|
||||||
|
func (h *BaseHandler) HandleLegacy(rawData []byte) error {
|
||||||
|
return errors.New("legacy format not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
// systemDataHandler implements ResponseHandler for system data requests
|
||||||
|
type systemDataHandler struct {
|
||||||
|
data *system.CombinedData
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *systemDataHandler) HandleLegacy(rawData []byte) error {
|
||||||
|
return cbor.Unmarshal(rawData, h.data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *systemDataHandler) Handle(agentResponse common.AgentResponse) error {
|
||||||
|
if agentResponse.SystemData != nil {
|
||||||
|
*h.data = *agentResponse.SystemData
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestSystemData requests system metrics from the agent and unmarshals the response.
|
||||||
|
func (ws *WsConn) RequestSystemData(ctx context.Context, data *system.CombinedData, options common.DataRequestOptions) error {
|
||||||
|
if !ws.IsConnected() {
|
||||||
|
return gws.ErrConnClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := ws.requestManager.SendRequest(ctx, common.GetData, options)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
handler := &systemDataHandler{data: data}
|
||||||
|
return ws.handleAgentRequest(req, handler)
|
||||||
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
// stringResponseHandler is a generic handler for string responses from agents
|
||||||
|
type stringResponseHandler struct {
|
||||||
|
BaseHandler
|
||||||
|
value string
|
||||||
|
errorMsg string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *stringResponseHandler) Handle(agentResponse common.AgentResponse) error {
|
||||||
|
if agentResponse.String == nil {
|
||||||
|
return errors.New(h.errorMsg)
|
||||||
|
}
|
||||||
|
h.value = *agentResponse.String
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
// requestContainerStringViaWS is a generic function to request container-related strings via WebSocket
|
||||||
|
func (ws *WsConn) requestContainerStringViaWS(ctx context.Context, action common.WebSocketAction, requestData any, errorMsg string) (string, error) {
|
||||||
|
if !ws.IsConnected() {
|
||||||
|
return "", gws.ErrConnClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := ws.requestManager.SendRequest(ctx, action, requestData)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
handler := &stringResponseHandler{errorMsg: errorMsg}
|
||||||
|
if err := ws.handleAgentRequest(req, handler); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return handler.value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestContainerLogs requests logs for a specific container via WebSocket.
|
||||||
|
func (ws *WsConn) RequestContainerLogs(ctx context.Context, containerID string) (string, error) {
|
||||||
|
return ws.requestContainerStringViaWS(ctx, common.GetContainerLogs, common.ContainerLogsRequest{ContainerID: containerID}, "no logs in response")
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestContainerInfo requests information about a specific container via WebSocket.
|
||||||
|
func (ws *WsConn) RequestContainerInfo(ctx context.Context, containerID string) (string, error) {
|
||||||
|
return ws.requestContainerStringViaWS(ctx, common.GetContainerInfo, common.ContainerInfoRequest{ContainerID: containerID}, "no info in response")
|
||||||
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
// RequestSystemdInfo requests detailed information about a systemd service via WebSocket.
|
||||||
|
func (ws *WsConn) RequestSystemdInfo(ctx context.Context, serviceName string) (systemd.ServiceDetails, error) {
|
||||||
|
if !ws.IsConnected() {
|
||||||
|
return nil, gws.ErrConnClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := ws.requestManager.SendRequest(ctx, common.GetSystemdInfo, common.SystemdInfoRequest{ServiceName: serviceName})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var result systemd.ServiceDetails
|
||||||
|
handler := &systemdInfoHandler{result: &result}
|
||||||
|
if err := ws.handleAgentRequest(req, handler); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// systemdInfoHandler parses ServiceDetails from AgentResponse
|
||||||
|
type systemdInfoHandler struct {
|
||||||
|
BaseHandler
|
||||||
|
result *systemd.ServiceDetails
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *systemdInfoHandler) Handle(agentResponse common.AgentResponse) error {
|
||||||
|
if agentResponse.ServiceInfo == nil {
|
||||||
|
return errors.New("no systemd info in response")
|
||||||
|
}
|
||||||
|
*h.result = agentResponse.ServiceInfo
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
// RequestSmartData requests SMART data via WebSocket.
|
||||||
|
func (ws *WsConn) RequestSmartData(ctx context.Context) (map[string]any, error) {
|
||||||
|
if !ws.IsConnected() {
|
||||||
|
return nil, gws.ErrConnClosed
|
||||||
|
}
|
||||||
|
req, err := ws.requestManager.SendRequest(ctx, common.GetSmartData, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var result map[string]any
|
||||||
|
handler := ResponseHandler(&smartDataHandler{result: &result})
|
||||||
|
if err := ws.handleAgentRequest(req, handler); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// smartDataHandler parses SMART data map from AgentResponse
|
||||||
|
type smartDataHandler struct {
|
||||||
|
BaseHandler
|
||||||
|
result *map[string]any
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *smartDataHandler) Handle(agentResponse common.AgentResponse) error {
|
||||||
|
if agentResponse.SmartData == nil {
|
||||||
|
return errors.New("no SMART data in response")
|
||||||
|
}
|
||||||
|
// convert to map[string]any for transport convenience in hub layer
|
||||||
|
out := make(map[string]any, len(agentResponse.SmartData))
|
||||||
|
for k, v := range agentResponse.SmartData {
|
||||||
|
out[k] = v
|
||||||
|
}
|
||||||
|
*h.result = out
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
// fingerprintHandler implements ResponseHandler for fingerprint requests
|
||||||
|
type fingerprintHandler struct {
|
||||||
|
result *common.FingerprintResponse
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *fingerprintHandler) HandleLegacy(rawData []byte) error {
|
||||||
|
return cbor.Unmarshal(rawData, h.result)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *fingerprintHandler) Handle(agentResponse common.AgentResponse) error {
|
||||||
|
if agentResponse.Fingerprint != nil {
|
||||||
|
*h.result = *agentResponse.Fingerprint
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return errors.New("no fingerprint data in response")
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFingerprint authenticates with the agent using SSH signature and returns the agent's fingerprint.
|
||||||
|
func (ws *WsConn) GetFingerprint(ctx context.Context, token string, signer ssh.Signer, needSysInfo bool) (common.FingerprintResponse, error) {
|
||||||
|
if !ws.IsConnected() {
|
||||||
|
return common.FingerprintResponse{}, gws.ErrConnClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
challenge := []byte(token)
|
||||||
|
signature, err := signer.Sign(nil, challenge)
|
||||||
|
if err != nil {
|
||||||
|
return common.FingerprintResponse{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := ws.requestManager.SendRequest(ctx, common.CheckFingerprint, common.FingerprintRequest{
|
||||||
|
Signature: signature.Blob,
|
||||||
|
NeedSysInfo: needSysInfo,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return common.FingerprintResponse{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var result common.FingerprintResponse
|
||||||
|
handler := &fingerprintHandler{result: &result}
|
||||||
|
err = ws.handleAgentRequest(req, handler)
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
75
internal/hub/ws/handlers_test.go
Normal file
75
internal/hub/ws/handlers_test.go
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
//go:build testing
|
||||||
|
|
||||||
|
package ws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/common"
|
||||||
|
"github.com/henrygd/beszel/internal/entities/systemd"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSystemdInfoHandlerSuccess(t *testing.T) {
|
||||||
|
handler := &systemdInfoHandler{
|
||||||
|
result: &systemd.ServiceDetails{},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test successful handling with valid ServiceInfo
|
||||||
|
testDetails := systemd.ServiceDetails{
|
||||||
|
"Id": "nginx.service",
|
||||||
|
"ActiveState": "active",
|
||||||
|
"SubState": "running",
|
||||||
|
"Description": "A high performance web server",
|
||||||
|
"ExecMainPID": 1234,
|
||||||
|
"MemoryCurrent": 1024000,
|
||||||
|
}
|
||||||
|
|
||||||
|
response := common.AgentResponse{
|
||||||
|
ServiceInfo: testDetails,
|
||||||
|
}
|
||||||
|
|
||||||
|
err := handler.Handle(response)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, testDetails, *handler.result)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSystemdInfoHandlerError(t *testing.T) {
|
||||||
|
handler := &systemdInfoHandler{
|
||||||
|
result: &systemd.ServiceDetails{},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test error handling when ServiceInfo is nil
|
||||||
|
response := common.AgentResponse{
|
||||||
|
ServiceInfo: nil,
|
||||||
|
Error: "service not found",
|
||||||
|
}
|
||||||
|
|
||||||
|
err := handler.Handle(response)
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.Equal(t, "no systemd info in response", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSystemdInfoHandlerEmptyResponse(t *testing.T) {
|
||||||
|
handler := &systemdInfoHandler{
|
||||||
|
result: &systemd.ServiceDetails{},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test with completely empty response
|
||||||
|
response := common.AgentResponse{}
|
||||||
|
|
||||||
|
err := handler.Handle(response)
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.Equal(t, "no systemd info in response", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSystemdInfoHandlerLegacyNotSupported(t *testing.T) {
|
||||||
|
handler := &systemdInfoHandler{
|
||||||
|
result: &systemd.ServiceDetails{},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that legacy format is not supported
|
||||||
|
err := handler.HandleLegacy([]byte("some data"))
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.Equal(t, "legacy format not supported", err.Error())
|
||||||
|
}
|
||||||
186
internal/hub/ws/request_manager.go
Normal file
186
internal/hub/ws/request_manager.go
Normal file
@@ -0,0 +1,186 @@
|
|||||||
|
package ws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/fxamacker/cbor/v2"
|
||||||
|
"github.com/henrygd/beszel/internal/common"
|
||||||
|
"github.com/lxzan/gws"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RequestID uniquely identifies a request
|
||||||
|
type RequestID uint32
|
||||||
|
|
||||||
|
// PendingRequest tracks an in-flight request
|
||||||
|
type PendingRequest struct {
|
||||||
|
ID RequestID
|
||||||
|
ResponseCh chan *gws.Message
|
||||||
|
Context context.Context
|
||||||
|
Cancel context.CancelFunc
|
||||||
|
CreatedAt time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestManager handles concurrent requests to an agent
|
||||||
|
type RequestManager struct {
|
||||||
|
sync.RWMutex
|
||||||
|
conn *gws.Conn
|
||||||
|
pendingReqs map[RequestID]*PendingRequest
|
||||||
|
nextID atomic.Uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRequestManager creates a new request manager for a WebSocket connection
|
||||||
|
func NewRequestManager(conn *gws.Conn) *RequestManager {
|
||||||
|
rm := &RequestManager{
|
||||||
|
conn: conn,
|
||||||
|
pendingReqs: make(map[RequestID]*PendingRequest),
|
||||||
|
}
|
||||||
|
return rm
|
||||||
|
}
|
||||||
|
|
||||||
|
// SendRequest sends a request and returns a channel for the response
|
||||||
|
func (rm *RequestManager) SendRequest(ctx context.Context, action common.WebSocketAction, data any) (*PendingRequest, error) {
|
||||||
|
reqID := RequestID(rm.nextID.Add(1))
|
||||||
|
|
||||||
|
reqCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||||
|
|
||||||
|
req := &PendingRequest{
|
||||||
|
ID: reqID,
|
||||||
|
ResponseCh: make(chan *gws.Message, 1),
|
||||||
|
Context: reqCtx,
|
||||||
|
Cancel: cancel,
|
||||||
|
CreatedAt: time.Now(),
|
||||||
|
}
|
||||||
|
|
||||||
|
rm.Lock()
|
||||||
|
rm.pendingReqs[reqID] = req
|
||||||
|
rm.Unlock()
|
||||||
|
|
||||||
|
hubReq := common.HubRequest[any]{
|
||||||
|
Id: (*uint32)(&reqID),
|
||||||
|
Action: action,
|
||||||
|
Data: data,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send the request
|
||||||
|
if err := rm.sendMessage(hubReq); err != nil {
|
||||||
|
rm.cancelRequest(reqID)
|
||||||
|
return nil, fmt.Errorf("failed to send request: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start cleanup watcher for timeout/cancellation
|
||||||
|
go rm.cleanupRequest(req)
|
||||||
|
|
||||||
|
return req, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// sendMessage encodes and sends a message over WebSocket
|
||||||
|
func (rm *RequestManager) sendMessage(data any) error {
|
||||||
|
if rm.conn == nil {
|
||||||
|
return gws.ErrConnClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
bytes, err := cbor.Marshal(data)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to marshal request: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return rm.conn.WriteMessage(gws.OpcodeBinary, bytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleResponse processes a single response message
|
||||||
|
func (rm *RequestManager) handleResponse(message *gws.Message) {
|
||||||
|
var response common.AgentResponse
|
||||||
|
if err := cbor.Unmarshal(message.Data.Bytes(), &response); err != nil {
|
||||||
|
// Legacy response without ID - route to first pending request of any type
|
||||||
|
rm.routeLegacyResponse(message)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
reqID := RequestID(*response.Id)
|
||||||
|
|
||||||
|
rm.RLock()
|
||||||
|
req, exists := rm.pendingReqs[reqID]
|
||||||
|
rm.RUnlock()
|
||||||
|
|
||||||
|
if !exists {
|
||||||
|
// Request not found (might have timed out) - close the message
|
||||||
|
message.Close()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case req.ResponseCh <- message:
|
||||||
|
// Message successfully delivered - the receiver will close it
|
||||||
|
rm.deleteRequest(reqID)
|
||||||
|
case <-req.Context.Done():
|
||||||
|
// Request was cancelled/timed out - close the message
|
||||||
|
message.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// routeLegacyResponse handles responses that don't have request IDs (backwards compatibility)
|
||||||
|
func (rm *RequestManager) routeLegacyResponse(message *gws.Message) {
|
||||||
|
// Snapshot the oldest pending request without holding the lock during send
|
||||||
|
rm.RLock()
|
||||||
|
var oldestReq *PendingRequest
|
||||||
|
for _, req := range rm.pendingReqs {
|
||||||
|
if oldestReq == nil || req.CreatedAt.Before(oldestReq.CreatedAt) {
|
||||||
|
oldestReq = req
|
||||||
|
}
|
||||||
|
}
|
||||||
|
rm.RUnlock()
|
||||||
|
|
||||||
|
if oldestReq != nil {
|
||||||
|
select {
|
||||||
|
case oldestReq.ResponseCh <- message:
|
||||||
|
// Message successfully delivered - the receiver will close it
|
||||||
|
rm.deleteRequest(oldestReq.ID)
|
||||||
|
case <-oldestReq.Context.Done():
|
||||||
|
// Request was cancelled - close the message
|
||||||
|
message.Close()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// No pending requests - close the message
|
||||||
|
message.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// cleanupRequest handles request timeout and cleanup
|
||||||
|
func (rm *RequestManager) cleanupRequest(req *PendingRequest) {
|
||||||
|
<-req.Context.Done()
|
||||||
|
rm.cancelRequest(req.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// cancelRequest removes a request and cancels its context
|
||||||
|
func (rm *RequestManager) cancelRequest(reqID RequestID) {
|
||||||
|
rm.Lock()
|
||||||
|
defer rm.Unlock()
|
||||||
|
|
||||||
|
if req, exists := rm.pendingReqs[reqID]; exists {
|
||||||
|
req.Cancel()
|
||||||
|
delete(rm.pendingReqs, reqID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteRequest removes a request from the pending map without cancelling its context.
|
||||||
|
func (rm *RequestManager) deleteRequest(reqID RequestID) {
|
||||||
|
rm.Lock()
|
||||||
|
defer rm.Unlock()
|
||||||
|
delete(rm.pendingReqs, reqID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close shuts down the request manager
|
||||||
|
func (rm *RequestManager) Close() {
|
||||||
|
rm.Lock()
|
||||||
|
defer rm.Unlock()
|
||||||
|
|
||||||
|
// Cancel all pending requests
|
||||||
|
for _, req := range rm.pendingReqs {
|
||||||
|
req.Cancel()
|
||||||
|
}
|
||||||
|
rm.pendingReqs = make(map[RequestID]*PendingRequest)
|
||||||
|
}
|
||||||
81
internal/hub/ws/request_manager_test.go
Normal file
81
internal/hub/ws/request_manager_test.go
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
//go:build testing
|
||||||
|
// +build testing
|
||||||
|
|
||||||
|
package ws
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestRequestManager_BasicFunctionality tests the request manager without mocking gws.Conn
|
||||||
|
func TestRequestManager_BasicFunctionality(t *testing.T) {
|
||||||
|
// We'll test the core logic without mocking the connection
|
||||||
|
// since the gws.Conn interface is complex to mock properly
|
||||||
|
|
||||||
|
t.Run("request ID generation", func(t *testing.T) {
|
||||||
|
// Test that request IDs are generated sequentially and uniquely
|
||||||
|
rm := &RequestManager{}
|
||||||
|
|
||||||
|
// Simulate multiple ID generations
|
||||||
|
id1 := rm.nextID.Add(1)
|
||||||
|
id2 := rm.nextID.Add(1)
|
||||||
|
id3 := rm.nextID.Add(1)
|
||||||
|
|
||||||
|
assert.NotEqual(t, id1, id2)
|
||||||
|
assert.NotEqual(t, id2, id3)
|
||||||
|
assert.Greater(t, id2, id1)
|
||||||
|
assert.Greater(t, id3, id2)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("pending request tracking", func(t *testing.T) {
|
||||||
|
rm := &RequestManager{
|
||||||
|
pendingReqs: make(map[RequestID]*PendingRequest),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initially no pending requests
|
||||||
|
assert.Equal(t, 0, rm.GetPendingCount())
|
||||||
|
|
||||||
|
// Add some fake pending requests
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
req1 := &PendingRequest{
|
||||||
|
ID: RequestID(1),
|
||||||
|
Context: ctx,
|
||||||
|
Cancel: cancel,
|
||||||
|
}
|
||||||
|
req2 := &PendingRequest{
|
||||||
|
ID: RequestID(2),
|
||||||
|
Context: ctx,
|
||||||
|
Cancel: cancel,
|
||||||
|
}
|
||||||
|
|
||||||
|
rm.pendingReqs[req1.ID] = req1
|
||||||
|
rm.pendingReqs[req2.ID] = req2
|
||||||
|
|
||||||
|
assert.Equal(t, 2, rm.GetPendingCount())
|
||||||
|
|
||||||
|
// Remove one
|
||||||
|
delete(rm.pendingReqs, req1.ID)
|
||||||
|
assert.Equal(t, 1, rm.GetPendingCount())
|
||||||
|
|
||||||
|
// Remove all
|
||||||
|
delete(rm.pendingReqs, req2.ID)
|
||||||
|
assert.Equal(t, 0, rm.GetPendingCount())
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("context cancellation", func(t *testing.T) {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Millisecond)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Wait for context to timeout
|
||||||
|
<-ctx.Done()
|
||||||
|
|
||||||
|
// Verify context was cancelled
|
||||||
|
assert.Equal(t, context.DeadlineExceeded, ctx.Err())
|
||||||
|
})
|
||||||
|
}
|
||||||
@@ -5,13 +5,13 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
"weak"
|
"weak"
|
||||||
|
|
||||||
"github.com/henrygd/beszel/internal/entities/system"
|
"github.com/blang/semver"
|
||||||
|
"github.com/henrygd/beszel"
|
||||||
|
|
||||||
"github.com/henrygd/beszel/internal/common"
|
"github.com/henrygd/beszel/internal/common"
|
||||||
|
|
||||||
"github.com/fxamacker/cbor/v2"
|
"github.com/fxamacker/cbor/v2"
|
||||||
"github.com/lxzan/gws"
|
"github.com/lxzan/gws"
|
||||||
"golang.org/x/crypto/ssh"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -25,9 +25,10 @@ type Handler struct {
|
|||||||
|
|
||||||
// WsConn represents a WebSocket connection to an agent.
|
// WsConn represents a WebSocket connection to an agent.
|
||||||
type WsConn struct {
|
type WsConn struct {
|
||||||
conn *gws.Conn
|
conn *gws.Conn
|
||||||
responseChan chan *gws.Message
|
requestManager *RequestManager
|
||||||
DownChan chan struct{}
|
DownChan chan struct{}
|
||||||
|
agentVersion semver.Version
|
||||||
}
|
}
|
||||||
|
|
||||||
// FingerprintRecord is fingerprints collection record data in the hub
|
// FingerprintRecord is fingerprints collection record data in the hub
|
||||||
@@ -50,21 +51,22 @@ func GetUpgrader() *gws.Upgrader {
|
|||||||
return upgrader
|
return upgrader
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewWsConnection creates a new WebSocket connection wrapper.
|
// NewWsConnection creates a new WebSocket connection wrapper with agent version.
|
||||||
func NewWsConnection(conn *gws.Conn) *WsConn {
|
func NewWsConnection(conn *gws.Conn, agentVersion semver.Version) *WsConn {
|
||||||
return &WsConn{
|
return &WsConn{
|
||||||
conn: conn,
|
conn: conn,
|
||||||
responseChan: make(chan *gws.Message, 1),
|
requestManager: NewRequestManager(conn),
|
||||||
DownChan: make(chan struct{}, 1),
|
DownChan: make(chan struct{}, 1),
|
||||||
|
agentVersion: agentVersion,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// OnOpen sets a deadline for the WebSocket connection.
|
// OnOpen sets a deadline for the WebSocket connection and extracts agent version.
|
||||||
func (h *Handler) OnOpen(conn *gws.Conn) {
|
func (h *Handler) OnOpen(conn *gws.Conn) {
|
||||||
conn.SetDeadline(time.Now().Add(deadline))
|
conn.SetDeadline(time.Now().Add(deadline))
|
||||||
}
|
}
|
||||||
|
|
||||||
// OnMessage routes incoming WebSocket messages to the response channel.
|
// OnMessage routes incoming WebSocket messages to the request manager.
|
||||||
func (h *Handler) OnMessage(conn *gws.Conn, message *gws.Message) {
|
func (h *Handler) OnMessage(conn *gws.Conn, message *gws.Message) {
|
||||||
conn.SetDeadline(time.Now().Add(deadline))
|
conn.SetDeadline(time.Now().Add(deadline))
|
||||||
if message.Opcode != gws.OpcodeBinary || message.Data.Len() == 0 {
|
if message.Opcode != gws.OpcodeBinary || message.Data.Len() == 0 {
|
||||||
@@ -75,12 +77,7 @@ func (h *Handler) OnMessage(conn *gws.Conn, message *gws.Message) {
|
|||||||
_ = conn.WriteClose(1000, nil)
|
_ = conn.WriteClose(1000, nil)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
select {
|
wsConn.(*WsConn).requestManager.handleResponse(message)
|
||||||
case wsConn.(*WsConn).responseChan <- message:
|
|
||||||
default:
|
|
||||||
// close if the connection is not expecting a response
|
|
||||||
wsConn.(*WsConn).Close(nil)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// OnClose handles WebSocket connection closures and triggers system down status after delay.
|
// OnClose handles WebSocket connection closures and triggers system down status after delay.
|
||||||
@@ -106,6 +103,9 @@ func (ws *WsConn) Close(msg []byte) {
|
|||||||
if ws.IsConnected() {
|
if ws.IsConnected() {
|
||||||
ws.conn.WriteClose(1000, msg)
|
ws.conn.WriteClose(1000, msg)
|
||||||
}
|
}
|
||||||
|
if ws.requestManager != nil {
|
||||||
|
ws.requestManager.Close()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ping sends a ping frame to keep the connection alive.
|
// Ping sends a ping frame to keep the connection alive.
|
||||||
@@ -115,6 +115,7 @@ func (ws *WsConn) Ping() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// sendMessage encodes data to CBOR and sends it as a binary message to the agent.
|
// sendMessage encodes data to CBOR and sends it as a binary message to the agent.
|
||||||
|
// This is kept for backwards compatibility but new actions should use RequestManager.
|
||||||
func (ws *WsConn) sendMessage(data common.HubRequest[any]) error {
|
func (ws *WsConn) sendMessage(data common.HubRequest[any]) error {
|
||||||
if ws.conn == nil {
|
if ws.conn == nil {
|
||||||
return gws.ErrConnClosed
|
return gws.ErrConnClosed
|
||||||
@@ -126,54 +127,34 @@ func (ws *WsConn) sendMessage(data common.HubRequest[any]) error {
|
|||||||
return ws.conn.WriteMessage(gws.OpcodeBinary, bytes)
|
return ws.conn.WriteMessage(gws.OpcodeBinary, bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RequestSystemData requests system metrics from the agent and unmarshals the response.
|
// handleAgentRequest processes a request to the agent, handling both legacy and new formats.
|
||||||
func (ws *WsConn) RequestSystemData(data *system.CombinedData) error {
|
func (ws *WsConn) handleAgentRequest(req *PendingRequest, handler ResponseHandler) error {
|
||||||
var message *gws.Message
|
// Wait for response
|
||||||
|
|
||||||
ws.sendMessage(common.HubRequest[any]{
|
|
||||||
Action: common.GetData,
|
|
||||||
})
|
|
||||||
select {
|
select {
|
||||||
case <-time.After(10 * time.Second):
|
case message := <-req.ResponseCh:
|
||||||
ws.Close(nil)
|
defer message.Close()
|
||||||
return gws.ErrConnClosed
|
// Cancel request context to stop timeout watcher promptly
|
||||||
case message = <-ws.responseChan:
|
defer req.Cancel()
|
||||||
|
data := message.Data.Bytes()
|
||||||
|
|
||||||
|
// Legacy format - unmarshal directly
|
||||||
|
if ws.agentVersion.LT(beszel.MinVersionAgentResponse) {
|
||||||
|
return handler.HandleLegacy(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// New format with AgentResponse wrapper
|
||||||
|
var agentResponse common.AgentResponse
|
||||||
|
if err := cbor.Unmarshal(data, &agentResponse); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if agentResponse.Error != "" {
|
||||||
|
return errors.New(agentResponse.Error)
|
||||||
|
}
|
||||||
|
return handler.Handle(agentResponse)
|
||||||
|
|
||||||
|
case <-req.Context.Done():
|
||||||
|
return req.Context.Err()
|
||||||
}
|
}
|
||||||
defer message.Close()
|
|
||||||
return cbor.Unmarshal(message.Data.Bytes(), data)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetFingerprint authenticates with the agent using SSH signature and returns the agent's fingerprint.
|
|
||||||
func (ws *WsConn) GetFingerprint(token string, signer ssh.Signer, needSysInfo bool) (common.FingerprintResponse, error) {
|
|
||||||
var clientFingerprint common.FingerprintResponse
|
|
||||||
challenge := []byte(token)
|
|
||||||
|
|
||||||
signature, err := signer.Sign(nil, challenge)
|
|
||||||
if err != nil {
|
|
||||||
return clientFingerprint, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = ws.sendMessage(common.HubRequest[any]{
|
|
||||||
Action: common.CheckFingerprint,
|
|
||||||
Data: common.FingerprintRequest{
|
|
||||||
Signature: signature.Blob,
|
|
||||||
NeedSysInfo: needSysInfo,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return clientFingerprint, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var message *gws.Message
|
|
||||||
select {
|
|
||||||
case message = <-ws.responseChan:
|
|
||||||
case <-time.After(10 * time.Second):
|
|
||||||
return clientFingerprint, errors.New("request expired")
|
|
||||||
}
|
|
||||||
defer message.Close()
|
|
||||||
|
|
||||||
err = cbor.Unmarshal(message.Data.Bytes(), &clientFingerprint)
|
|
||||||
return clientFingerprint, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsConnected returns true if the WebSocket connection is active.
|
// IsConnected returns true if the WebSocket connection is active.
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/blang/semver"
|
||||||
"github.com/henrygd/beszel/internal/common"
|
"github.com/henrygd/beszel/internal/common"
|
||||||
|
|
||||||
"github.com/fxamacker/cbor/v2"
|
"github.com/fxamacker/cbor/v2"
|
||||||
@@ -36,26 +37,25 @@ func TestGetUpgrader(t *testing.T) {
|
|||||||
// TestNewWsConnection tests WebSocket connection creation
|
// TestNewWsConnection tests WebSocket connection creation
|
||||||
func TestNewWsConnection(t *testing.T) {
|
func TestNewWsConnection(t *testing.T) {
|
||||||
// We can't easily mock gws.Conn, so we'll pass nil and test the structure
|
// We can't easily mock gws.Conn, so we'll pass nil and test the structure
|
||||||
wsConn := NewWsConnection(nil)
|
wsConn := NewWsConnection(nil, semver.MustParse("0.12.10"))
|
||||||
|
|
||||||
assert.NotNil(t, wsConn, "WebSocket connection should not be nil")
|
assert.NotNil(t, wsConn, "WebSocket connection should not be nil")
|
||||||
assert.Nil(t, wsConn.conn, "Connection should be nil as passed")
|
assert.Nil(t, wsConn.conn, "Connection should be nil as passed")
|
||||||
assert.NotNil(t, wsConn.responseChan, "Response channel should be initialized")
|
assert.NotNil(t, wsConn.requestManager, "Request manager should be initialized")
|
||||||
assert.NotNil(t, wsConn.DownChan, "Down channel should be initialized")
|
assert.NotNil(t, wsConn.DownChan, "Down channel should be initialized")
|
||||||
assert.Equal(t, 1, cap(wsConn.responseChan), "Response channel should have capacity of 1")
|
|
||||||
assert.Equal(t, 1, cap(wsConn.DownChan), "Down channel should have capacity of 1")
|
assert.Equal(t, 1, cap(wsConn.DownChan), "Down channel should have capacity of 1")
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestWsConn_IsConnected tests the connection status check
|
// TestWsConn_IsConnected tests the connection status check
|
||||||
func TestWsConn_IsConnected(t *testing.T) {
|
func TestWsConn_IsConnected(t *testing.T) {
|
||||||
// Test with nil connection
|
// Test with nil connection
|
||||||
wsConn := NewWsConnection(nil)
|
wsConn := NewWsConnection(nil, semver.MustParse("0.12.10"))
|
||||||
assert.False(t, wsConn.IsConnected(), "Should not be connected when conn is nil")
|
assert.False(t, wsConn.IsConnected(), "Should not be connected when conn is nil")
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestWsConn_Close tests the connection closing with nil connection
|
// TestWsConn_Close tests the connection closing with nil connection
|
||||||
func TestWsConn_Close(t *testing.T) {
|
func TestWsConn_Close(t *testing.T) {
|
||||||
wsConn := NewWsConnection(nil)
|
wsConn := NewWsConnection(nil, semver.MustParse("0.12.10"))
|
||||||
|
|
||||||
// Should handle nil connection gracefully
|
// Should handle nil connection gracefully
|
||||||
assert.NotPanics(t, func() {
|
assert.NotPanics(t, func() {
|
||||||
@@ -65,7 +65,7 @@ func TestWsConn_Close(t *testing.T) {
|
|||||||
|
|
||||||
// TestWsConn_SendMessage_CBOR tests CBOR encoding in sendMessage
|
// TestWsConn_SendMessage_CBOR tests CBOR encoding in sendMessage
|
||||||
func TestWsConn_SendMessage_CBOR(t *testing.T) {
|
func TestWsConn_SendMessage_CBOR(t *testing.T) {
|
||||||
wsConn := NewWsConnection(nil)
|
wsConn := NewWsConnection(nil, semver.MustParse("0.12.10"))
|
||||||
|
|
||||||
testData := common.HubRequest[any]{
|
testData := common.HubRequest[any]{
|
||||||
Action: common.GetData,
|
Action: common.GetData,
|
||||||
@@ -181,6 +181,17 @@ func TestCommonActions(t *testing.T) {
|
|||||||
// Test that the actions we use exist and have expected values
|
// Test that the actions we use exist and have expected values
|
||||||
assert.Equal(t, common.WebSocketAction(0), common.GetData, "GetData should be action 0")
|
assert.Equal(t, common.WebSocketAction(0), common.GetData, "GetData should be action 0")
|
||||||
assert.Equal(t, common.WebSocketAction(1), common.CheckFingerprint, "CheckFingerprint should be action 1")
|
assert.Equal(t, common.WebSocketAction(1), common.CheckFingerprint, "CheckFingerprint should be action 1")
|
||||||
|
assert.Equal(t, common.WebSocketAction(2), common.GetContainerLogs, "GetLogs should be action 2")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLogsHandler(t *testing.T) {
|
||||||
|
h := &stringResponseHandler{errorMsg: "no logs in response"}
|
||||||
|
|
||||||
|
logValue := "test logs"
|
||||||
|
resp := common.AgentResponse{String: &logValue}
|
||||||
|
err := h.Handle(resp)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, logValue, h.value)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestHandler tests that we can create a Handler
|
// TestHandler tests that we can create a Handler
|
||||||
@@ -194,7 +205,7 @@ func TestHandler(t *testing.T) {
|
|||||||
|
|
||||||
// TestWsConnChannelBehavior tests channel behavior without WebSocket connections
|
// TestWsConnChannelBehavior tests channel behavior without WebSocket connections
|
||||||
func TestWsConnChannelBehavior(t *testing.T) {
|
func TestWsConnChannelBehavior(t *testing.T) {
|
||||||
wsConn := NewWsConnection(nil)
|
wsConn := NewWsConnection(nil, semver.MustParse("0.12.10"))
|
||||||
|
|
||||||
// Test that channels are properly initialized and can be used
|
// Test that channels are properly initialized and can be used
|
||||||
select {
|
select {
|
||||||
@@ -212,11 +223,6 @@ func TestWsConnChannelBehavior(t *testing.T) {
|
|||||||
t.Error("Should be able to read from DownChan")
|
t.Error("Should be able to read from DownChan")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Response channel should be empty initially
|
// Request manager should have no pending requests initially
|
||||||
select {
|
assert.Equal(t, 0, wsConn.requestManager.GetPendingCount(), "Should have no pending requests initially")
|
||||||
case <-wsConn.responseChan:
|
|
||||||
t.Error("Response channel should be empty initially")
|
|
||||||
default:
|
|
||||||
// Expected - channel should be empty
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
11
internal/hub/ws/ws_test_helpers.go
Normal file
11
internal/hub/ws/ws_test_helpers.go
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
//go:build testing
|
||||||
|
// +build testing
|
||||||
|
|
||||||
|
package ws
|
||||||
|
|
||||||
|
// GetPendingCount returns the number of pending requests (for monitoring)
|
||||||
|
func (rm *RequestManager) GetPendingCount() int {
|
||||||
|
rm.RLock()
|
||||||
|
defer rm.RUnlock()
|
||||||
|
return len(rm.pendingReqs)
|
||||||
|
}
|
||||||
@@ -1,7 +1,6 @@
|
|||||||
package migrations
|
package migrations
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/google/uuid"
|
|
||||||
"github.com/pocketbase/pocketbase/core"
|
"github.com/pocketbase/pocketbase/core"
|
||||||
m "github.com/pocketbase/pocketbase/migrations"
|
m "github.com/pocketbase/pocketbase/migrations"
|
||||||
)
|
)
|
||||||
@@ -76,6 +75,7 @@ func init() {
|
|||||||
"Disk",
|
"Disk",
|
||||||
"Temperature",
|
"Temperature",
|
||||||
"Bandwidth",
|
"Bandwidth",
|
||||||
|
"GPU",
|
||||||
"LoadAvg1",
|
"LoadAvg1",
|
||||||
"LoadAvg5",
|
"LoadAvg5",
|
||||||
"LoadAvg15"
|
"LoadAvg15"
|
||||||
@@ -719,7 +719,9 @@ func init() {
|
|||||||
"type": "autodate"
|
"type": "autodate"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"indexes": [],
|
"indexes": [
|
||||||
|
"CREATE INDEX ` + "`" + `idx_systems_status` + "`" + ` ON ` + "`" + `systems` + "`" + ` (` + "`" + `status` + "`" + `)"
|
||||||
|
],
|
||||||
"system": false
|
"system": false
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -860,6 +862,294 @@ func init() {
|
|||||||
"system": false,
|
"system": false,
|
||||||
"authRule": "verified=true",
|
"authRule": "verified=true",
|
||||||
"manageRule": null
|
"manageRule": null
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "pbc_1864144027",
|
||||||
|
"listRule": "@request.auth.id != \"\" && system.users.id ?= @request.auth.id",
|
||||||
|
"viewRule": null,
|
||||||
|
"createRule": null,
|
||||||
|
"updateRule": null,
|
||||||
|
"deleteRule": null,
|
||||||
|
"name": "containers",
|
||||||
|
"type": "base",
|
||||||
|
"fields": [
|
||||||
|
{
|
||||||
|
"autogeneratePattern": "[a-f0-9]{6}",
|
||||||
|
"hidden": false,
|
||||||
|
"id": "text3208210256",
|
||||||
|
"max": 12,
|
||||||
|
"min": 6,
|
||||||
|
"name": "id",
|
||||||
|
"pattern": "^[a-f0-9]+$",
|
||||||
|
"presentable": false,
|
||||||
|
"primaryKey": true,
|
||||||
|
"required": true,
|
||||||
|
"system": true,
|
||||||
|
"type": "text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cascadeDelete": false,
|
||||||
|
"collectionId": "2hz5ncl8tizk5nx",
|
||||||
|
"hidden": false,
|
||||||
|
"id": "relation3377271179",
|
||||||
|
"maxSelect": 1,
|
||||||
|
"minSelect": 0,
|
||||||
|
"name": "system",
|
||||||
|
"presentable": false,
|
||||||
|
"required": false,
|
||||||
|
"system": false,
|
||||||
|
"type": "relation"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"autogeneratePattern": "",
|
||||||
|
"hidden": false,
|
||||||
|
"id": "text1579384326",
|
||||||
|
"max": 0,
|
||||||
|
"min": 0,
|
||||||
|
"name": "name",
|
||||||
|
"pattern": "",
|
||||||
|
"presentable": false,
|
||||||
|
"primaryKey": false,
|
||||||
|
"required": false,
|
||||||
|
"system": false,
|
||||||
|
"type": "text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"autogeneratePattern": "",
|
||||||
|
"hidden": false,
|
||||||
|
"id": "text2063623452",
|
||||||
|
"max": 0,
|
||||||
|
"min": 0,
|
||||||
|
"name": "status",
|
||||||
|
"pattern": "",
|
||||||
|
"presentable": false,
|
||||||
|
"primaryKey": false,
|
||||||
|
"required": false,
|
||||||
|
"system": false,
|
||||||
|
"type": "text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hidden": false,
|
||||||
|
"id": "number3470402323",
|
||||||
|
"max": null,
|
||||||
|
"min": null,
|
||||||
|
"name": "health",
|
||||||
|
"onlyInt": false,
|
||||||
|
"presentable": false,
|
||||||
|
"required": false,
|
||||||
|
"system": false,
|
||||||
|
"type": "number"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hidden": false,
|
||||||
|
"id": "number3128971310",
|
||||||
|
"max": 100,
|
||||||
|
"min": 0,
|
||||||
|
"name": "cpu",
|
||||||
|
"onlyInt": false,
|
||||||
|
"presentable": false,
|
||||||
|
"required": false,
|
||||||
|
"system": false,
|
||||||
|
"type": "number"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hidden": false,
|
||||||
|
"id": "number3933025333",
|
||||||
|
"max": null,
|
||||||
|
"min": 0,
|
||||||
|
"name": "memory",
|
||||||
|
"onlyInt": false,
|
||||||
|
"presentable": false,
|
||||||
|
"required": false,
|
||||||
|
"system": false,
|
||||||
|
"type": "number"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hidden": false,
|
||||||
|
"id": "number4075427327",
|
||||||
|
"max": null,
|
||||||
|
"min": null,
|
||||||
|
"name": "net",
|
||||||
|
"onlyInt": false,
|
||||||
|
"presentable": false,
|
||||||
|
"required": false,
|
||||||
|
"system": false,
|
||||||
|
"type": "number"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hidden": false,
|
||||||
|
"id": "number3332085495",
|
||||||
|
"max": null,
|
||||||
|
"min": null,
|
||||||
|
"name": "updated",
|
||||||
|
"onlyInt": true,
|
||||||
|
"presentable": false,
|
||||||
|
"required": true,
|
||||||
|
"system": false,
|
||||||
|
"type": "number"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"autogeneratePattern": "",
|
||||||
|
"hidden": false,
|
||||||
|
"id": "text3309110367",
|
||||||
|
"max": 0,
|
||||||
|
"min": 0,
|
||||||
|
"name": "image",
|
||||||
|
"pattern": "",
|
||||||
|
"presentable": false,
|
||||||
|
"primaryKey": false,
|
||||||
|
"required": false,
|
||||||
|
"system": false,
|
||||||
|
"type": "text"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"indexes": [
|
||||||
|
"CREATE INDEX ` + "`" + `idx_JxWirjdhyO` + "`" + ` ON ` + "`" + `containers` + "`" + ` (` + "`" + `updated` + "`" + `)",
|
||||||
|
"CREATE INDEX ` + "`" + `idx_r3Ja0rs102` + "`" + ` ON ` + "`" + `containers` + "`" + ` (` + "`" + `system` + "`" + `)"
|
||||||
|
],
|
||||||
|
"system": false
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"createRule": null,
|
||||||
|
"deleteRule": null,
|
||||||
|
"fields": [
|
||||||
|
{
|
||||||
|
"autogeneratePattern": "[a-z0-9]{10}",
|
||||||
|
"hidden": false,
|
||||||
|
"id": "text3208210256",
|
||||||
|
"max": 10,
|
||||||
|
"min": 6,
|
||||||
|
"name": "id",
|
||||||
|
"pattern": "^[a-z0-9]+$",
|
||||||
|
"presentable": false,
|
||||||
|
"primaryKey": true,
|
||||||
|
"required": true,
|
||||||
|
"system": true,
|
||||||
|
"type": "text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"autogeneratePattern": "",
|
||||||
|
"hidden": false,
|
||||||
|
"id": "text1579384326",
|
||||||
|
"max": 0,
|
||||||
|
"min": 0,
|
||||||
|
"name": "name",
|
||||||
|
"pattern": "",
|
||||||
|
"presentable": false,
|
||||||
|
"primaryKey": false,
|
||||||
|
"required": false,
|
||||||
|
"system": false,
|
||||||
|
"type": "text"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cascadeDelete": true,
|
||||||
|
"collectionId": "2hz5ncl8tizk5nx",
|
||||||
|
"hidden": false,
|
||||||
|
"id": "relation3377271179",
|
||||||
|
"maxSelect": 1,
|
||||||
|
"minSelect": 0,
|
||||||
|
"name": "system",
|
||||||
|
"presentable": false,
|
||||||
|
"required": false,
|
||||||
|
"system": false,
|
||||||
|
"type": "relation"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hidden": false,
|
||||||
|
"id": "number2063623452",
|
||||||
|
"max": null,
|
||||||
|
"min": null,
|
||||||
|
"name": "state",
|
||||||
|
"onlyInt": true,
|
||||||
|
"presentable": false,
|
||||||
|
"required": false,
|
||||||
|
"system": false,
|
||||||
|
"type": "number"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hidden": false,
|
||||||
|
"id": "number1476559580",
|
||||||
|
"max": null,
|
||||||
|
"min": null,
|
||||||
|
"name": "sub",
|
||||||
|
"onlyInt": true,
|
||||||
|
"presentable": false,
|
||||||
|
"required": false,
|
||||||
|
"system": false,
|
||||||
|
"type": "number"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hidden": false,
|
||||||
|
"id": "number3128971310",
|
||||||
|
"max": null,
|
||||||
|
"min": null,
|
||||||
|
"name": "cpu",
|
||||||
|
"onlyInt": false,
|
||||||
|
"presentable": false,
|
||||||
|
"required": false,
|
||||||
|
"system": false,
|
||||||
|
"type": "number"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hidden": false,
|
||||||
|
"id": "number1052053287",
|
||||||
|
"max": null,
|
||||||
|
"min": null,
|
||||||
|
"name": "cpuPeak",
|
||||||
|
"onlyInt": false,
|
||||||
|
"presentable": false,
|
||||||
|
"required": false,
|
||||||
|
"system": false,
|
||||||
|
"type": "number"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hidden": false,
|
||||||
|
"id": "number3933025333",
|
||||||
|
"max": null,
|
||||||
|
"min": null,
|
||||||
|
"name": "memory",
|
||||||
|
"onlyInt": false,
|
||||||
|
"presentable": false,
|
||||||
|
"required": false,
|
||||||
|
"system": false,
|
||||||
|
"type": "number"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hidden": false,
|
||||||
|
"id": "number1828797201",
|
||||||
|
"max": null,
|
||||||
|
"min": null,
|
||||||
|
"name": "memPeak",
|
||||||
|
"onlyInt": false,
|
||||||
|
"presentable": false,
|
||||||
|
"required": false,
|
||||||
|
"system": false,
|
||||||
|
"type": "number"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"hidden": false,
|
||||||
|
"id": "number3332085495",
|
||||||
|
"max": null,
|
||||||
|
"min": null,
|
||||||
|
"name": "updated",
|
||||||
|
"onlyInt": false,
|
||||||
|
"presentable": false,
|
||||||
|
"required": false,
|
||||||
|
"system": false,
|
||||||
|
"type": "number"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"id": "pbc_3494996990",
|
||||||
|
"indexes": [
|
||||||
|
"CREATE INDEX ` + "`" + `idx_4Z7LuLNdQb` + "`" + ` ON ` + "`" + `systemd_services` + "`" + ` (` + "`" + `system` + "`" + `)",
|
||||||
|
"CREATE INDEX ` + "`" + `idx_pBp1fF837e` + "`" + ` ON ` + "`" + `systemd_services` + "`" + ` (` + "`" + `updated` + "`" + `)"
|
||||||
|
],
|
||||||
|
"listRule": "@request.auth.id != \"\" && system.users.id ?= @request.auth.id",
|
||||||
|
"name": "systemd_services",
|
||||||
|
"system": false,
|
||||||
|
"type": "base",
|
||||||
|
"updateRule": null,
|
||||||
|
"viewRule": null
|
||||||
}
|
}
|
||||||
]`
|
]`
|
||||||
|
|
||||||
@@ -868,31 +1158,6 @@ func init() {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get all systems that don't have fingerprint records
|
|
||||||
var systemIds []string
|
|
||||||
err = app.DB().NewQuery(`
|
|
||||||
SELECT s.id FROM systems s
|
|
||||||
LEFT JOIN fingerprints f ON s.id = f.system
|
|
||||||
WHERE f.system IS NULL
|
|
||||||
`).Column(&systemIds)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Create fingerprint records with unique UUID tokens for each system
|
|
||||||
for _, systemId := range systemIds {
|
|
||||||
token := uuid.New().String()
|
|
||||||
_, err = app.DB().NewQuery(`
|
|
||||||
INSERT INTO fingerprints (system, token)
|
|
||||||
VALUES ({:system}, {:token})
|
|
||||||
`).Bind(map[string]any{
|
|
||||||
"system": systemId,
|
|
||||||
"token": token,
|
|
||||||
}).Execute()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}, func(app core.App) error {
|
}, func(app core.App) error {
|
||||||
return nil
|
return nil
|
||||||
@@ -1,50 +0,0 @@
|
|||||||
package migrations
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/henrygd/beszel/internal/entities/system"
|
|
||||||
"github.com/pocketbase/pocketbase/core"
|
|
||||||
m "github.com/pocketbase/pocketbase/migrations"
|
|
||||||
)
|
|
||||||
|
|
||||||
// This can be deleted after Nov 2025 or so
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
m.Register(func(app core.App) error {
|
|
||||||
app.RunInTransaction(func(txApp core.App) error {
|
|
||||||
var systemIds []string
|
|
||||||
txApp.DB().NewQuery("SELECT id FROM systems").Column(&systemIds)
|
|
||||||
|
|
||||||
for _, systemId := range systemIds {
|
|
||||||
var statRecordIds []string
|
|
||||||
txApp.DB().NewQuery("SELECT id FROM system_stats WHERE system = {:system} AND created > {:created}").Bind(map[string]any{"system": systemId, "created": "2025-09-21"}).Column(&statRecordIds)
|
|
||||||
|
|
||||||
for _, statRecordId := range statRecordIds {
|
|
||||||
statRecord, err := txApp.FindRecordById("system_stats", statRecordId)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
var systemStats system.Stats
|
|
||||||
err = statRecord.UnmarshalJSONField("stats", &systemStats)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// if mem buff cache is less than total mem, we don't need to fix it
|
|
||||||
if systemStats.MemBuffCache < systemStats.Mem {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
systemStats.MemBuffCache = 0
|
|
||||||
statRecord.Set("stats", systemStats)
|
|
||||||
err = txApp.SaveNoValidate(statRecord)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
return nil
|
|
||||||
}, func(app core.App) error {
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -177,6 +177,10 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
|||||||
stats := &tempStats
|
stats := &tempStats
|
||||||
// necessary because uint8 is not big enough for the sum
|
// necessary because uint8 is not big enough for the sum
|
||||||
batterySum := 0
|
batterySum := 0
|
||||||
|
// accumulate per-core usage across records
|
||||||
|
var cpuCoresSums []uint64
|
||||||
|
// accumulate cpu breakdown [user, system, iowait, steal, idle]
|
||||||
|
var cpuBreakdownSums []float64
|
||||||
|
|
||||||
count := float64(len(records))
|
count := float64(len(records))
|
||||||
tempCount := float64(0)
|
tempCount := float64(0)
|
||||||
@@ -194,6 +198,15 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
|||||||
}
|
}
|
||||||
|
|
||||||
sum.Cpu += stats.Cpu
|
sum.Cpu += stats.Cpu
|
||||||
|
// accumulate cpu time breakdowns if present
|
||||||
|
if stats.CpuBreakdown != nil {
|
||||||
|
if len(cpuBreakdownSums) < len(stats.CpuBreakdown) {
|
||||||
|
cpuBreakdownSums = append(cpuBreakdownSums, make([]float64, len(stats.CpuBreakdown)-len(cpuBreakdownSums))...)
|
||||||
|
}
|
||||||
|
for i, v := range stats.CpuBreakdown {
|
||||||
|
cpuBreakdownSums[i] += v
|
||||||
|
}
|
||||||
|
}
|
||||||
sum.Mem += stats.Mem
|
sum.Mem += stats.Mem
|
||||||
sum.MemUsed += stats.MemUsed
|
sum.MemUsed += stats.MemUsed
|
||||||
sum.MemPct += stats.MemPct
|
sum.MemPct += stats.MemPct
|
||||||
@@ -213,8 +226,21 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
|||||||
sum.LoadAvg[2] += stats.LoadAvg[2]
|
sum.LoadAvg[2] += stats.LoadAvg[2]
|
||||||
sum.Bandwidth[0] += stats.Bandwidth[0]
|
sum.Bandwidth[0] += stats.Bandwidth[0]
|
||||||
sum.Bandwidth[1] += stats.Bandwidth[1]
|
sum.Bandwidth[1] += stats.Bandwidth[1]
|
||||||
|
sum.DiskIO[0] += stats.DiskIO[0]
|
||||||
|
sum.DiskIO[1] += stats.DiskIO[1]
|
||||||
batterySum += int(stats.Battery[0])
|
batterySum += int(stats.Battery[0])
|
||||||
sum.Battery[1] = stats.Battery[1]
|
sum.Battery[1] = stats.Battery[1]
|
||||||
|
|
||||||
|
// accumulate per-core usage if present
|
||||||
|
if stats.CpuCoresUsage != nil {
|
||||||
|
if len(cpuCoresSums) < len(stats.CpuCoresUsage) {
|
||||||
|
// extend slices to accommodate core count
|
||||||
|
cpuCoresSums = append(cpuCoresSums, make([]uint64, len(stats.CpuCoresUsage)-len(cpuCoresSums))...)
|
||||||
|
}
|
||||||
|
for i, v := range stats.CpuCoresUsage {
|
||||||
|
cpuCoresSums[i] += uint64(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
// Set peak values
|
// Set peak values
|
||||||
sum.MaxCpu = max(sum.MaxCpu, stats.MaxCpu, stats.Cpu)
|
sum.MaxCpu = max(sum.MaxCpu, stats.MaxCpu, stats.Cpu)
|
||||||
sum.MaxMem = max(sum.MaxMem, stats.MaxMem, stats.MemUsed)
|
sum.MaxMem = max(sum.MaxMem, stats.MaxMem, stats.MemUsed)
|
||||||
@@ -224,6 +250,8 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
|||||||
sum.MaxDiskWritePs = max(sum.MaxDiskWritePs, stats.MaxDiskWritePs, stats.DiskWritePs)
|
sum.MaxDiskWritePs = max(sum.MaxDiskWritePs, stats.MaxDiskWritePs, stats.DiskWritePs)
|
||||||
sum.MaxBandwidth[0] = max(sum.MaxBandwidth[0], stats.MaxBandwidth[0], stats.Bandwidth[0])
|
sum.MaxBandwidth[0] = max(sum.MaxBandwidth[0], stats.MaxBandwidth[0], stats.Bandwidth[0])
|
||||||
sum.MaxBandwidth[1] = max(sum.MaxBandwidth[1], stats.MaxBandwidth[1], stats.Bandwidth[1])
|
sum.MaxBandwidth[1] = max(sum.MaxBandwidth[1], stats.MaxBandwidth[1], stats.Bandwidth[1])
|
||||||
|
sum.MaxDiskIO[0] = max(sum.MaxDiskIO[0], stats.MaxDiskIO[0], stats.DiskIO[0])
|
||||||
|
sum.MaxDiskIO[1] = max(sum.MaxDiskIO[1], stats.MaxDiskIO[1], stats.DiskIO[1])
|
||||||
|
|
||||||
// Accumulate network interfaces
|
// Accumulate network interfaces
|
||||||
if sum.NetworkInterfaces == nil {
|
if sum.NetworkInterfaces == nil {
|
||||||
@@ -265,6 +293,10 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
|||||||
fs.DiskReadPs += value.DiskReadPs
|
fs.DiskReadPs += value.DiskReadPs
|
||||||
fs.MaxDiskReadPS = max(fs.MaxDiskReadPS, value.MaxDiskReadPS, value.DiskReadPs)
|
fs.MaxDiskReadPS = max(fs.MaxDiskReadPS, value.MaxDiskReadPS, value.DiskReadPs)
|
||||||
fs.MaxDiskWritePS = max(fs.MaxDiskWritePS, value.MaxDiskWritePS, value.DiskWritePs)
|
fs.MaxDiskWritePS = max(fs.MaxDiskWritePS, value.MaxDiskWritePS, value.DiskWritePs)
|
||||||
|
fs.DiskReadBytes += value.DiskReadBytes
|
||||||
|
fs.DiskWriteBytes += value.DiskWriteBytes
|
||||||
|
fs.MaxDiskReadBytes = max(fs.MaxDiskReadBytes, value.MaxDiskReadBytes, value.DiskReadBytes)
|
||||||
|
fs.MaxDiskWriteBytes = max(fs.MaxDiskWriteBytes, value.MaxDiskWriteBytes, value.DiskWriteBytes)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -314,6 +346,8 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
|||||||
sum.DiskPct = twoDecimals(sum.DiskPct / count)
|
sum.DiskPct = twoDecimals(sum.DiskPct / count)
|
||||||
sum.DiskReadPs = twoDecimals(sum.DiskReadPs / count)
|
sum.DiskReadPs = twoDecimals(sum.DiskReadPs / count)
|
||||||
sum.DiskWritePs = twoDecimals(sum.DiskWritePs / count)
|
sum.DiskWritePs = twoDecimals(sum.DiskWritePs / count)
|
||||||
|
sum.DiskIO[0] = sum.DiskIO[0] / uint64(count)
|
||||||
|
sum.DiskIO[1] = sum.DiskIO[1] / uint64(count)
|
||||||
sum.NetworkSent = twoDecimals(sum.NetworkSent / count)
|
sum.NetworkSent = twoDecimals(sum.NetworkSent / count)
|
||||||
sum.NetworkRecv = twoDecimals(sum.NetworkRecv / count)
|
sum.NetworkRecv = twoDecimals(sum.NetworkRecv / count)
|
||||||
sum.LoadAvg[0] = twoDecimals(sum.LoadAvg[0] / count)
|
sum.LoadAvg[0] = twoDecimals(sum.LoadAvg[0] / count)
|
||||||
@@ -350,6 +384,8 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
|||||||
fs.DiskUsed = twoDecimals(fs.DiskUsed / count)
|
fs.DiskUsed = twoDecimals(fs.DiskUsed / count)
|
||||||
fs.DiskWritePs = twoDecimals(fs.DiskWritePs / count)
|
fs.DiskWritePs = twoDecimals(fs.DiskWritePs / count)
|
||||||
fs.DiskReadPs = twoDecimals(fs.DiskReadPs / count)
|
fs.DiskReadPs = twoDecimals(fs.DiskReadPs / count)
|
||||||
|
fs.DiskReadBytes = fs.DiskReadBytes / uint64(count)
|
||||||
|
fs.DiskWriteBytes = fs.DiskWriteBytes / uint64(count)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -373,6 +409,25 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
|||||||
sum.GPUData[id] = gpu
|
sum.GPUData[id] = gpu
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Average per-core usage
|
||||||
|
if len(cpuCoresSums) > 0 {
|
||||||
|
avg := make(system.Uint8Slice, len(cpuCoresSums))
|
||||||
|
for i := range cpuCoresSums {
|
||||||
|
v := math.Round(float64(cpuCoresSums[i]) / count)
|
||||||
|
avg[i] = uint8(v)
|
||||||
|
}
|
||||||
|
sum.CpuCoresUsage = avg
|
||||||
|
}
|
||||||
|
|
||||||
|
// Average CPU breakdown
|
||||||
|
if len(cpuBreakdownSums) > 0 {
|
||||||
|
avg := make([]float64, len(cpuBreakdownSums))
|
||||||
|
for i := range cpuBreakdownSums {
|
||||||
|
avg[i] = twoDecimals(cpuBreakdownSums[i] / count)
|
||||||
|
}
|
||||||
|
sum.CpuBreakdown = avg
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return sum
|
return sum
|
||||||
@@ -431,6 +486,14 @@ func (rm *RecordManager) DeleteOldRecords() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
err = deleteOldContainerRecords(txApp)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = deleteOldSystemdServiceRecords(txApp)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
err = deleteOldAlertsHistory(txApp, 200, 250)
|
err = deleteOldAlertsHistory(txApp, 200, 250)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -500,6 +563,34 @@ func deleteOldSystemStats(app core.App) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Deletes systemd service records that haven't been updated in the last 20 minutes
|
||||||
|
func deleteOldSystemdServiceRecords(app core.App) error {
|
||||||
|
now := time.Now().UTC()
|
||||||
|
twentyMinutesAgo := now.Add(-20 * time.Minute)
|
||||||
|
|
||||||
|
// Delete systemd service records where updated < twentyMinutesAgo
|
||||||
|
_, err := app.DB().NewQuery("DELETE FROM systemd_services WHERE updated < {:updated}").Bind(dbx.Params{"updated": twentyMinutesAgo.UnixMilli()}).Execute()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to delete old systemd service records: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deletes container records that haven't been updated in the last 10 minutes
|
||||||
|
func deleteOldContainerRecords(app core.App) error {
|
||||||
|
now := time.Now().UTC()
|
||||||
|
tenMinutesAgo := now.Add(-10 * time.Minute)
|
||||||
|
|
||||||
|
// Delete container records where updated < tenMinutesAgo
|
||||||
|
_, err := app.DB().NewQuery("DELETE FROM containers WHERE updated < {:updated}").Bind(dbx.Params{"updated": tenMinutesAgo.UnixMilli()}).Execute()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to delete old container records: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
/* Round float to two decimals */
|
/* Round float to two decimals */
|
||||||
func twoDecimals(value float64) float64 {
|
func twoDecimals(value float64) float64 {
|
||||||
return math.Round(value*100) / 100
|
return math.Round(value*100) / 100
|
||||||
|
|||||||
@@ -351,6 +351,83 @@ func TestDeleteOldAlertsHistoryEdgeCases(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestDeleteOldSystemdServiceRecords tests systemd service cleanup via DeleteOldRecords
|
||||||
|
func TestDeleteOldSystemdServiceRecords(t *testing.T) {
|
||||||
|
hub, err := tests.NewTestHub(t.TempDir())
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
rm := records.NewRecordManager(hub)
|
||||||
|
|
||||||
|
// Create test user and system
|
||||||
|
user, err := tests.CreateUser(hub, "test@example.com", "testtesttest")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
system, err := tests.CreateRecord(hub, "systems", map[string]any{
|
||||||
|
"name": "test-system",
|
||||||
|
"host": "localhost",
|
||||||
|
"port": "45876",
|
||||||
|
"status": "up",
|
||||||
|
"users": []string{user.Id},
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
now := time.Now().UTC()
|
||||||
|
|
||||||
|
// Create old systemd service records that should be deleted (older than 20 minutes)
|
||||||
|
oldRecord, err := tests.CreateRecord(hub, "systemd_services", map[string]any{
|
||||||
|
"system": system.Id,
|
||||||
|
"name": "nginx.service",
|
||||||
|
"state": 0, // Active
|
||||||
|
"sub": 1, // Running
|
||||||
|
"cpu": 5.0,
|
||||||
|
"cpuPeak": 10.0,
|
||||||
|
"memory": 1024000,
|
||||||
|
"memPeak": 2048000,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
// Set updated time to 25 minutes ago (should be deleted)
|
||||||
|
oldRecord.SetRaw("updated", now.Add(-25*time.Minute).UnixMilli())
|
||||||
|
err = hub.SaveNoValidate(oldRecord)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Create recent systemd service record that should be kept (within 20 minutes)
|
||||||
|
recentRecord, err := tests.CreateRecord(hub, "systemd_services", map[string]any{
|
||||||
|
"system": system.Id,
|
||||||
|
"name": "apache.service",
|
||||||
|
"state": 1, // Inactive
|
||||||
|
"sub": 0, // Dead
|
||||||
|
"cpu": 2.0,
|
||||||
|
"cpuPeak": 3.0,
|
||||||
|
"memory": 512000,
|
||||||
|
"memPeak": 1024000,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
// Set updated time to 10 minutes ago (should be kept)
|
||||||
|
recentRecord.SetRaw("updated", now.Add(-10*time.Minute).UnixMilli())
|
||||||
|
err = hub.SaveNoValidate(recentRecord)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Count records before deletion
|
||||||
|
countBefore, err := hub.CountRecords("systemd_services")
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, int64(2), countBefore, "Should have 2 systemd service records initially")
|
||||||
|
|
||||||
|
// Run deletion via RecordManager
|
||||||
|
rm.DeleteOldRecords()
|
||||||
|
|
||||||
|
// Count records after deletion
|
||||||
|
countAfter, err := hub.CountRecords("systemd_services")
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, int64(1), countAfter, "Should have 1 systemd service record after deletion")
|
||||||
|
|
||||||
|
// Verify the correct record was kept
|
||||||
|
remainingRecords, err := hub.FindRecordsByFilter("systemd_services", "", "", 10, 0, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Len(t, remainingRecords, 1, "Should have exactly 1 record remaining")
|
||||||
|
assert.Equal(t, "apache.service", remainingRecords[0].Get("name"), "The recent record should be kept")
|
||||||
|
}
|
||||||
|
|
||||||
// TestRecordManagerCreation tests RecordManager creation
|
// TestRecordManagerCreation tests RecordManager creation
|
||||||
func TestRecordManagerCreation(t *testing.T) {
|
func TestRecordManagerCreation(t *testing.T) {
|
||||||
hub, err := tests.NewTestHub(t.TempDir())
|
hub, err := tests.NewTestHub(t.TempDir())
|
||||||
|
|||||||
@@ -1,41 +1,83 @@
|
|||||||
{
|
{
|
||||||
"$schema": "https://biomejs.dev/schemas/2.2.3/schema.json",
|
"$schema": "https://biomejs.dev/schemas/2.2.3/schema.json",
|
||||||
"vcs": {
|
"vcs": {
|
||||||
"enabled": false,
|
"enabled": true,
|
||||||
"clientKind": "git",
|
"clientKind": "git",
|
||||||
"useIgnoreFile": false
|
"useIgnoreFile": true,
|
||||||
},
|
"defaultBranch": "main"
|
||||||
"files": {
|
|
||||||
"ignoreUnknown": false
|
|
||||||
},
|
},
|
||||||
"formatter": {
|
"formatter": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"indentStyle": "tab",
|
"indentStyle": "tab",
|
||||||
"indentWidth": 2,
|
"lineWidth": 120,
|
||||||
"lineWidth": 120
|
"formatWithErrors": true
|
||||||
},
|
},
|
||||||
|
"assist": { "actions": { "source": { "organizeImports": "on" } } },
|
||||||
"linter": {
|
"linter": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"rules": {
|
"rules": {
|
||||||
"recommended": true,
|
"recommended": true,
|
||||||
|
"complexity": {
|
||||||
|
"noUselessStringConcat": "error",
|
||||||
|
"noUselessUndefinedInitialization": "error",
|
||||||
|
"noVoid": "error",
|
||||||
|
"useDateNow": "error"
|
||||||
|
},
|
||||||
"correctness": {
|
"correctness": {
|
||||||
"useUniqueElementIds": "off"
|
"noConstantMathMinMaxClamp": "error",
|
||||||
|
"noUndeclaredVariables": "error",
|
||||||
|
"noUnusedImports": "error",
|
||||||
|
"noUnusedFunctionParameters": "error",
|
||||||
|
"noUnusedPrivateClassMembers": "error",
|
||||||
|
"useExhaustiveDependencies": {
|
||||||
|
"level": "error",
|
||||||
|
"options": {
|
||||||
|
"reportUnnecessaryDependencies": false
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"noUnusedVariables": "error"
|
||||||
|
},
|
||||||
|
"style": {
|
||||||
|
"noParameterProperties": "error",
|
||||||
|
"noYodaExpression": "error",
|
||||||
|
"useConsistentBuiltinInstantiation": "error",
|
||||||
|
"useFragmentSyntax": "error",
|
||||||
|
"useShorthandAssign": "error",
|
||||||
|
"useArrayLiterals": "error"
|
||||||
|
},
|
||||||
|
"suspicious": {
|
||||||
|
"useAwait": "error",
|
||||||
|
"noEvolvingTypes": "error"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"javascript": {
|
"javascript": {
|
||||||
"formatter": {
|
"formatter": {
|
||||||
"quoteStyle": "double",
|
"quoteStyle": "double",
|
||||||
"semicolons": "asNeeded",
|
"trailingCommas": "es5",
|
||||||
"trailingCommas": "es5"
|
"semicolons": "asNeeded"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"assist": {
|
"overrides": [
|
||||||
"enabled": true,
|
{
|
||||||
"actions": {
|
"includes": ["**/*.jsx", "**/*.tsx"],
|
||||||
"source": {
|
"linter": {
|
||||||
"organizeImports": "on"
|
"rules": {
|
||||||
|
"style": {
|
||||||
|
"noParameterAssign": "error"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"includes": ["**/*.ts", "**/*.tsx"],
|
||||||
|
"linter": {
|
||||||
|
"rules": {
|
||||||
|
"correctness": {
|
||||||
|
"noUnusedVariables": "off"
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
1008
internal/site/bun.lock
Normal file
1008
internal/site/bun.lock
Normal file
File diff suppressed because it is too large
Load Diff
Binary file not shown.
@@ -3,8 +3,9 @@
|
|||||||
<head>
|
<head>
|
||||||
<meta charset="UTF-8" />
|
<meta charset="UTF-8" />
|
||||||
<link rel="manifest" href="./static/manifest.json" />
|
<link rel="manifest" href="./static/manifest.json" />
|
||||||
<link rel="icon" type="image/svg+xml" href="./static/favicon.svg" />
|
<link rel="icon" type="image/svg+xml" href="./static/icon.svg" />
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=1.0,maximum-scale=1.0, user-scalable=no, viewport-fit=cover" />
|
<meta name="viewport" content="width=device-width, initial-scale=1.0,maximum-scale=1.0, user-scalable=no, viewport-fit=cover" />
|
||||||
|
<meta name="robots" content="noindex, nofollow" />
|
||||||
<title>Beszel</title>
|
<title>Beszel</title>
|
||||||
<script>
|
<script>
|
||||||
globalThis.BESZEL = {
|
globalThis.BESZEL = {
|
||||||
|
|||||||
@@ -11,10 +11,10 @@ export default defineConfig({
|
|||||||
"es",
|
"es",
|
||||||
"fa",
|
"fa",
|
||||||
"fr",
|
"fr",
|
||||||
|
"he",
|
||||||
"hr",
|
"hr",
|
||||||
"hu",
|
"hu",
|
||||||
"it",
|
"it",
|
||||||
"is",
|
|
||||||
"ja",
|
"ja",
|
||||||
"ko",
|
"ko",
|
||||||
"nl",
|
"nl",
|
||||||
|
|||||||
767
internal/site/package-lock.json
generated
767
internal/site/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -1,7 +1,7 @@
|
|||||||
{
|
{
|
||||||
"name": "beszel",
|
"name": "beszel",
|
||||||
"private": true,
|
"private": true,
|
||||||
"version": "0.12.12",
|
"version": "0.16.1",
|
||||||
"type": "module",
|
"type": "module",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"dev": "vite --host",
|
"dev": "vite --host",
|
||||||
@@ -49,11 +49,12 @@
|
|||||||
"react": "^19.1.1",
|
"react": "^19.1.1",
|
||||||
"react-dom": "^19.1.1",
|
"react-dom": "^19.1.1",
|
||||||
"recharts": "^2.15.4",
|
"recharts": "^2.15.4",
|
||||||
|
"shiki": "^3.13.0",
|
||||||
"tailwind-merge": "^3.3.1",
|
"tailwind-merge": "^3.3.1",
|
||||||
"valibot": "^0.42.1"
|
"valibot": "^0.42.1"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@biomejs/biome": "2.2.3",
|
"@biomejs/biome": "2.2.4",
|
||||||
"@lingui/cli": "^5.4.1",
|
"@lingui/cli": "^5.4.1",
|
||||||
"@lingui/swc-plugin": "^5.6.1",
|
"@lingui/swc-plugin": "^5.6.1",
|
||||||
"@lingui/vite-plugin": "^5.4.1",
|
"@lingui/vite-plugin": "^5.4.1",
|
||||||
@@ -76,4 +77,4 @@
|
|||||||
"optionalDependencies": {
|
"optionalDependencies": {
|
||||||
"@esbuild/linux-arm64": "^0.21.5"
|
"@esbuild/linux-arm64": "^0.21.5"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1 +0,0 @@
|
|||||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 56 70" fill="#22c55e"><path d="M35 70H0V0h35q4.4 0 8.2 1.7a21.4 21.4 0 0 1 6.6 4.5q2.9 2.8 4.5 6.6Q56 16.7 56 21a15.4 15.4 0 0 1-.3 3.2 17.6 17.6 0 0 1-.2.8 19.4 19.4 0 0 1-1.5 4 17 17 0 0 1-2.4 3.4 13.5 13.5 0 0 1-2.6 2.3 12.5 12.5 0 0 1-.4.3q1.7 1 3 2.5Q53 39.1 54 41a18.3 18.3 0 0 1 1.5 4 17.4 17.4 0 0 1 .5 3 15.3 15.3 0 0 1 0 1q0 4.4-1.7 8.2a21.4 21.4 0 0 1-4.5 6.6q-2.8 2.9-6.6 4.6Q39.4 70 35 70ZM14 14v14h21a7 7 0 0 0 2.3-.3 6.6 6.6 0 0 0 .4-.2Q39 27 40 26a6.9 6.9 0 0 0 1.5-2.2q.5-1.3.5-2.8a7 7 0 0 0-.4-2.3 6.6 6.6 0 0 0-.1-.4Q40.9 17 40 16a7 7 0 0 0-2.3-1.4 6.9 6.9 0 0 0-2.5-.6 7.9 7.9 0 0 0-.2 0H14Zm0 28v14h21a7 7 0 0 0 2.3-.4 6.6 6.6 0 0 0 .4-.1Q39 54.9 40 54a7 7 0 0 0 1.5-2.2 6.9 6.9 0 0 0 .5-2.6 7.9 7.9 0 0 0 0-.2 7 7 0 0 0-.4-2.3 6.6 6.6 0 0 0-.1-.4Q40.9 45 40 44a7 7 0 0 0-2.3-1.5 6.9 6.9 0 0 0-2.5-.6 7.9 7.9 0 0 0-.2 0H14Z"/></svg>
|
|
||||||
|
Before Width: | Height: | Size: 906 B |
@@ -1 +0,0 @@
|
|||||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 56 70" fill="#dc2626"><path d="M35 70H0V0h35q4.4 0 8.2 1.7a21.4 21.4 0 0 1 6.6 4.5q2.9 2.8 4.5 6.6Q56 16.7 56 21a15.4 15.4 0 0 1-.3 3.2 17.6 17.6 0 0 1-.2.8 19.4 19.4 0 0 1-1.5 4 17 17 0 0 1-2.4 3.4 13.5 13.5 0 0 1-2.6 2.3 12.5 12.5 0 0 1-.4.3q1.7 1 3 2.5Q53 39.1 54 41a18.3 18.3 0 0 1 1.5 4 17.4 17.4 0 0 1 .5 3 15.3 15.3 0 0 1 0 1q0 4.4-1.7 8.2a21.4 21.4 0 0 1-4.5 6.6q-2.8 2.9-6.6 4.6Q39.4 70 35 70ZM14 14v14h21a7 7 0 0 0 2.3-.3 6.6 6.6 0 0 0 .4-.2Q39 27 40 26a6.9 6.9 0 0 0 1.5-2.2q.5-1.3.5-2.8a7 7 0 0 0-.4-2.3 6.6 6.6 0 0 0-.1-.4Q40.9 17 40 16a7 7 0 0 0-2.3-1.4 6.9 6.9 0 0 0-2.5-.6 7.9 7.9 0 0 0-.2 0H14Zm0 28v14h21a7 7 0 0 0 2.3-.4 6.6 6.6 0 0 0 .4-.1Q39 54.9 40 54a7 7 0 0 0 1.5-2.2 6.9 6.9 0 0 0 .5-2.6 7.9 7.9 0 0 0 0-.2 7 7 0 0 0-.4-2.3 6.6 6.6 0 0 0-.1-.4Q40.9 45 40 44a7 7 0 0 0-2.3-1.5 6.9 6.9 0 0 0-2.5-.6 7.9 7.9 0 0 0-.2 0H14Z"/></svg>
|
|
||||||
|
Before Width: | Height: | Size: 906 B |
@@ -1 +0,0 @@
|
|||||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 56 70" fill="#888"><path d="M35 70H0V0h35q4.4 0 8.2 1.7a21.4 21.4 0 0 1 6.6 4.5q2.9 2.8 4.5 6.6Q56 16.7 56 21a15.4 15.4 0 0 1-.3 3.2 17.6 17.6 0 0 1-.2.8 19.4 19.4 0 0 1-1.5 4 17 17 0 0 1-2.4 3.4 13.5 13.5 0 0 1-2.6 2.3 12.5 12.5 0 0 1-.4.3q1.7 1 3 2.5Q53 39.1 54 41a18.3 18.3 0 0 1 1.5 4 17.4 17.4 0 0 1 .5 3 15.3 15.3 0 0 1 0 1q0 4.4-1.7 8.2a21.4 21.4 0 0 1-4.5 6.6q-2.8 2.9-6.6 4.6Q39.4 70 35 70ZM14 14v14h21a7 7 0 0 0 2.3-.3 6.6 6.6 0 0 0 .4-.2Q39 27 40 26a6.9 6.9 0 0 0 1.5-2.2q.5-1.3.5-2.8a7 7 0 0 0-.4-2.3 6.6 6.6 0 0 0-.1-.4Q40.9 17 40 16a7 7 0 0 0-2.3-1.4 6.9 6.9 0 0 0-2.5-.6 7.9 7.9 0 0 0-.2 0H14Zm0 28v14h21a7 7 0 0 0 2.3-.4 6.6 6.6 0 0 0 .4-.1Q39 54.9 40 54a7 7 0 0 0 1.5-2.2 6.9 6.9 0 0 0 .5-2.6 7.9 7.9 0 0 0 0-.2 7 7 0 0 0-.4-2.3 6.6 6.6 0 0 0-.1-.4Q40.9 45 40 44a7 7 0 0 0-2.3-1.5 6.9 6.9 0 0 0-2.5-.6 7.9 7.9 0 0 0-.2 0H14Z"/></svg>
|
|
||||||
|
Before Width: | Height: | Size: 903 B |
9
internal/site/public/static/icon.svg
Normal file
9
internal/site/public/static/icon.svg
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 56 70">
|
||||||
|
<defs>
|
||||||
|
<linearGradient id="gradient" x1="0%" y1="20%" x2="100%" y2="120%">
|
||||||
|
<stop offset="0%" style="stop-color:#747bff"/>
|
||||||
|
<stop offset="100%" style="stop-color:#24eb5c"/>
|
||||||
|
</linearGradient>
|
||||||
|
</defs>
|
||||||
|
<path fill="url(#gradient)" d="M35 70H0V0h35q4.4 0 8.2 1.7a21.4 21.4 0 0 1 6.6 4.5q2.9 2.8 4.5 6.6Q56 16.7 56 21a15.4 15.4 0 0 1-.3 3.2 17.6 17.6 0 0 1-.2.8 19.4 19.4 0 0 1-1.5 4 17 17 0 0 1-2.4 3.4 13.5 13.5 0 0 1-2.6 2.3 12.5 12.5 0 0 1-.4.3q1.7 1 3 2.5Q53 39.1 54 41a18.3 18.3 0 0 1 1.5 4 17.4 17.4 0 0 1 .5 3 15.3 15.3 0 0 1 0 1q0 4.4-1.7 8.2a21.4 21.4 0 0 1-4.5 6.6q-2.8 2.9-6.6 4.6Q39.4 70 35 70ZM14 14v14h21a7 7 0 0 0 2.3-.3 6.6 6.6 0 0 0 .4-.2Q39 27 40 26a6.9 6.9 0 0 0 1.5-2.2q.5-1.3.5-2.8a7 7 0 0 0-.4-2.3 6.6 6.6 0 0 0-.1-.4Q40.9 17 40 16a7 7 0 0 0-2.3-1.4 6.9 6.9 0 0 0-2.5-.6 7.9 7.9 0 0 0-.2 0H14Zm0 28v14h21a7 7 0 0 0 2.3-.4 6.6 6.6 0 0 0 .4-.1Q39 54.9 40 54a7 7 0 0 0 1.5-2.2 6.9 6.9 0 0 0 .5-2.6 7.9 7.9 0 0 0 0-.2 7 7 0 0 0-.4-2.3 6.6 6.6 0 0 0-.1-.4Q40.9 45 40 44a7 7 0 0 0-2.3-1.5 6.9 6.9 0 0 0-2.5-.6 7.9 7.9 0 0 0-.2 0H14Z"/>
|
||||||
|
</svg>
|
||||||
|
After Width: | Height: | Size: 1.1 KiB |
85
internal/site/src/components/active-alerts.tsx
Normal file
85
internal/site/src/components/active-alerts.tsx
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
import { alertInfo } from "@/lib/alerts"
|
||||||
|
import { $alerts, $allSystemsById } from "@/lib/stores"
|
||||||
|
import type { AlertRecord } from "@/types"
|
||||||
|
import { Plural, Trans } from "@lingui/react/macro"
|
||||||
|
import { useStore } from "@nanostores/react"
|
||||||
|
import { getPagePath } from "@nanostores/router"
|
||||||
|
import { useMemo } from "react"
|
||||||
|
import { $router, Link } from "./router"
|
||||||
|
import { Alert, AlertTitle, AlertDescription } from "./ui/alert"
|
||||||
|
import { Card, CardHeader, CardTitle, CardContent } from "./ui/card"
|
||||||
|
|
||||||
|
export const ActiveAlerts = () => {
|
||||||
|
const alerts = useStore($alerts)
|
||||||
|
const systems = useStore($allSystemsById)
|
||||||
|
|
||||||
|
const { activeAlerts, alertsKey } = useMemo(() => {
|
||||||
|
const activeAlerts: AlertRecord[] = []
|
||||||
|
// key to prevent re-rendering if alerts change but active alerts didn't
|
||||||
|
const alertsKey: string[] = []
|
||||||
|
|
||||||
|
for (const systemId of Object.keys(alerts)) {
|
||||||
|
for (const alert of alerts[systemId].values()) {
|
||||||
|
if (alert.triggered && alert.name in alertInfo) {
|
||||||
|
activeAlerts.push(alert)
|
||||||
|
alertsKey.push(`${alert.system}${alert.value}${alert.min}`)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return { activeAlerts, alertsKey }
|
||||||
|
}, [alerts])
|
||||||
|
|
||||||
|
// biome-ignore lint/correctness/useExhaustiveDependencies: alertsKey is inclusive
|
||||||
|
return useMemo(() => {
|
||||||
|
if (activeAlerts.length === 0) {
|
||||||
|
return null
|
||||||
|
}
|
||||||
|
return (
|
||||||
|
<Card>
|
||||||
|
<CardHeader className="pb-4 px-2 sm:px-6 max-sm:pt-5 max-sm:pb-1">
|
||||||
|
<div className="px-2 sm:px-1">
|
||||||
|
<CardTitle>
|
||||||
|
<Trans>Active Alerts</Trans>
|
||||||
|
</CardTitle>
|
||||||
|
</div>
|
||||||
|
</CardHeader>
|
||||||
|
<CardContent className="max-sm:p-2">
|
||||||
|
{activeAlerts.length > 0 && (
|
||||||
|
<div className="grid sm:grid-cols-2 lg:grid-cols-3 2xl:grid-cols-4 gap-3">
|
||||||
|
{activeAlerts.map((alert) => {
|
||||||
|
const info = alertInfo[alert.name as keyof typeof alertInfo]
|
||||||
|
return (
|
||||||
|
<Alert
|
||||||
|
key={alert.id}
|
||||||
|
className="hover:-translate-y-px duration-200 bg-transparent border-foreground/10 hover:shadow-md shadow-black/5"
|
||||||
|
>
|
||||||
|
<info.icon className="h-4 w-4" />
|
||||||
|
<AlertTitle>
|
||||||
|
{systems[alert.system]?.name} {info.name().toLowerCase().replace("cpu", "CPU")}
|
||||||
|
</AlertTitle>
|
||||||
|
<AlertDescription>
|
||||||
|
{alert.name === "Status" ? (
|
||||||
|
<Trans>Connection is down</Trans>
|
||||||
|
) : (
|
||||||
|
<Trans>
|
||||||
|
Exceeds {alert.value}
|
||||||
|
{info.unit} in last <Plural value={alert.min} one="# minute" other="# minutes" />
|
||||||
|
</Trans>
|
||||||
|
)}
|
||||||
|
</AlertDescription>
|
||||||
|
<Link
|
||||||
|
href={getPagePath($router, "system", { id: systems[alert.system]?.id })}
|
||||||
|
className="absolute inset-0 w-full h-full"
|
||||||
|
aria-label="View system"
|
||||||
|
></Link>
|
||||||
|
</Alert>
|
||||||
|
)
|
||||||
|
})}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</CardContent>
|
||||||
|
</Card>
|
||||||
|
)
|
||||||
|
}, [alertsKey.join("")])
|
||||||
|
}
|
||||||
@@ -26,7 +26,7 @@ export default memo(function AlertsButton({ system }: { system: SystemRecord })
|
|||||||
/>
|
/>
|
||||||
</Button>
|
</Button>
|
||||||
</SheetTrigger>
|
</SheetTrigger>
|
||||||
<SheetContent className="max-h-full overflow-auto w-145 !max-w-full p-4 sm:p-6">
|
<SheetContent className="max-h-full overflow-auto w-150 !max-w-full p-4 sm:p-6">
|
||||||
{opened && <AlertDialogContent system={system} />}
|
{opened && <AlertDialogContent system={system} />}
|
||||||
</SheetContent>
|
</SheetContent>
|
||||||
</Sheet>
|
</Sheet>
|
||||||
|
|||||||
@@ -11,12 +11,14 @@ import {
|
|||||||
import { chartMargin, cn, formatShortDate } from "@/lib/utils"
|
import { chartMargin, cn, formatShortDate } from "@/lib/utils"
|
||||||
import type { ChartData, SystemStatsRecord } from "@/types"
|
import type { ChartData, SystemStatsRecord } from "@/types"
|
||||||
import { useYAxisWidth } from "./hooks"
|
import { useYAxisWidth } from "./hooks"
|
||||||
|
import { AxisDomain } from "recharts/types/util/types"
|
||||||
|
|
||||||
export type DataPoint = {
|
export type DataPoint = {
|
||||||
label: string
|
label: string
|
||||||
dataKey: (data: SystemStatsRecord) => number | undefined
|
dataKey: (data: SystemStatsRecord) => number | undefined
|
||||||
color: number | string
|
color: number | string
|
||||||
opacity: number
|
opacity: number
|
||||||
|
stackId?: string | number
|
||||||
}
|
}
|
||||||
|
|
||||||
export default function AreaChartDefault({
|
export default function AreaChartDefault({
|
||||||
@@ -29,19 +31,25 @@ export default function AreaChartDefault({
|
|||||||
domain,
|
domain,
|
||||||
legend,
|
legend,
|
||||||
itemSorter,
|
itemSorter,
|
||||||
|
showTotal = false,
|
||||||
|
reverseStackOrder = false,
|
||||||
|
hideYAxis = false,
|
||||||
}: // logRender = false,
|
}: // logRender = false,
|
||||||
{
|
{
|
||||||
chartData: ChartData
|
chartData: ChartData
|
||||||
max?: number
|
max?: number
|
||||||
maxToggled?: boolean
|
maxToggled?: boolean
|
||||||
tickFormatter: (value: number, index: number) => string
|
tickFormatter: (value: number, index: number) => string
|
||||||
contentFormatter: ({ value, payload }: { value: number; payload: SystemStatsRecord }) => string
|
contentFormatter: ({ value, payload }: { value: number; payload: SystemStatsRecord }) => string
|
||||||
dataPoints?: DataPoint[]
|
dataPoints?: DataPoint[]
|
||||||
domain?: [number, number]
|
domain?: AxisDomain
|
||||||
legend?: boolean
|
legend?: boolean
|
||||||
itemSorter?: (a: any, b: any) => number
|
showTotal?: boolean
|
||||||
// logRender?: boolean
|
itemSorter?: (a: any, b: any) => number
|
||||||
}) {
|
reverseStackOrder?: boolean
|
||||||
|
hideYAxis?: boolean
|
||||||
|
// logRender?: boolean
|
||||||
|
}) {
|
||||||
const { yAxisWidth, updateYAxisWidth } = useYAxisWidth()
|
const { yAxisWidth, updateYAxisWidth } = useYAxisWidth()
|
||||||
|
|
||||||
// biome-ignore lint/correctness/useExhaustiveDependencies: ignore
|
// biome-ignore lint/correctness/useExhaustiveDependencies: ignore
|
||||||
@@ -56,21 +64,29 @@ export default function AreaChartDefault({
|
|||||||
<div>
|
<div>
|
||||||
<ChartContainer
|
<ChartContainer
|
||||||
className={cn("h-full w-full absolute aspect-auto bg-card opacity-0 transition-opacity", {
|
className={cn("h-full w-full absolute aspect-auto bg-card opacity-0 transition-opacity", {
|
||||||
"opacity-100": yAxisWidth,
|
"opacity-100": yAxisWidth || hideYAxis,
|
||||||
|
"ps-4": hideYAxis,
|
||||||
})}
|
})}
|
||||||
>
|
>
|
||||||
<AreaChart accessibilityLayer data={chartData.systemStats} margin={chartMargin}>
|
<AreaChart
|
||||||
|
reverseStackOrder={reverseStackOrder}
|
||||||
|
accessibilityLayer
|
||||||
|
data={chartData.systemStats}
|
||||||
|
margin={hideYAxis ? { ...chartMargin, left: 5 } : chartMargin}
|
||||||
|
>
|
||||||
<CartesianGrid vertical={false} />
|
<CartesianGrid vertical={false} />
|
||||||
<YAxis
|
{!hideYAxis && (
|
||||||
direction="ltr"
|
<YAxis
|
||||||
orientation={chartData.orientation}
|
direction="ltr"
|
||||||
className="tracking-tighter"
|
orientation={chartData.orientation}
|
||||||
width={yAxisWidth}
|
className="tracking-tighter"
|
||||||
domain={domain ?? [0, max ?? "auto"]}
|
width={yAxisWidth}
|
||||||
tickFormatter={(value, index) => updateYAxisWidth(tickFormatter(value, index))}
|
domain={domain ?? [0, max ?? "auto"]}
|
||||||
tickLine={false}
|
tickFormatter={(value, index) => updateYAxisWidth(tickFormatter(value, index))}
|
||||||
axisLine={false}
|
tickLine={false}
|
||||||
/>
|
axisLine={false}
|
||||||
|
/>
|
||||||
|
)}
|
||||||
{xAxis(chartData)}
|
{xAxis(chartData)}
|
||||||
<ChartTooltip
|
<ChartTooltip
|
||||||
animationEasing="ease-out"
|
animationEasing="ease-out"
|
||||||
@@ -81,6 +97,7 @@ export default function AreaChartDefault({
|
|||||||
<ChartTooltipContent
|
<ChartTooltipContent
|
||||||
labelFormatter={(_, data) => formatShortDate(data[0].payload.created)}
|
labelFormatter={(_, data) => formatShortDate(data[0].payload.created)}
|
||||||
contentFormatter={contentFormatter}
|
contentFormatter={contentFormatter}
|
||||||
|
showTotal={showTotal}
|
||||||
/>
|
/>
|
||||||
}
|
}
|
||||||
/>
|
/>
|
||||||
@@ -99,13 +116,14 @@ export default function AreaChartDefault({
|
|||||||
fillOpacity={dataPoint.opacity}
|
fillOpacity={dataPoint.opacity}
|
||||||
stroke={color}
|
stroke={color}
|
||||||
isAnimationActive={false}
|
isAnimationActive={false}
|
||||||
|
stackId={dataPoint.stackId}
|
||||||
/>
|
/>
|
||||||
)
|
)
|
||||||
})}
|
})}
|
||||||
{legend && <ChartLegend content={<ChartLegendContent />} />}
|
{legend && <ChartLegend content={<ChartLegendContent reverse={reverseStackOrder} />} />}
|
||||||
</AreaChart>
|
</AreaChart>
|
||||||
</ChartContainer>
|
</ChartContainer>
|
||||||
</div>
|
</div>
|
||||||
)
|
)
|
||||||
}, [chartData.systemStats.at(-1), yAxisWidth, maxToggled])
|
}, [chartData.systemStats.at(-1), yAxisWidth, maxToggled, showTotal])
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,12 +2,27 @@ import { useStore } from "@nanostores/react"
|
|||||||
import { HistoryIcon } from "lucide-react"
|
import { HistoryIcon } from "lucide-react"
|
||||||
import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "@/components/ui/select"
|
import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "@/components/ui/select"
|
||||||
import { $chartTime } from "@/lib/stores"
|
import { $chartTime } from "@/lib/stores"
|
||||||
import { chartTimeData, cn } from "@/lib/utils"
|
import { chartTimeData, cn, compareSemVer, parseSemVer } from "@/lib/utils"
|
||||||
import type { ChartTimes } from "@/types"
|
import type { ChartTimes, SemVer } from "@/types"
|
||||||
|
import { memo } from "react"
|
||||||
|
|
||||||
export default function ChartTimeSelect({ className }: { className?: string }) {
|
export default memo(function ChartTimeSelect({
|
||||||
|
className,
|
||||||
|
agentVersion,
|
||||||
|
}: {
|
||||||
|
className?: string
|
||||||
|
agentVersion: SemVer
|
||||||
|
}) {
|
||||||
const chartTime = useStore($chartTime)
|
const chartTime = useStore($chartTime)
|
||||||
|
|
||||||
|
// remove chart times that are not supported by the system agent version
|
||||||
|
const availableChartTimes = Object.entries(chartTimeData).filter(([_, { minVersion }]) => {
|
||||||
|
if (!minVersion) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return compareSemVer(agentVersion, parseSemVer(minVersion)) >= 0
|
||||||
|
})
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<Select defaultValue="1h" value={chartTime} onValueChange={(value: ChartTimes) => $chartTime.set(value)}>
|
<Select defaultValue="1h" value={chartTime} onValueChange={(value: ChartTimes) => $chartTime.set(value)}>
|
||||||
<SelectTrigger className={cn(className, "relative ps-10 pe-5")}>
|
<SelectTrigger className={cn(className, "relative ps-10 pe-5")}>
|
||||||
@@ -15,7 +30,7 @@ export default function ChartTimeSelect({ className }: { className?: string }) {
|
|||||||
<SelectValue />
|
<SelectValue />
|
||||||
</SelectTrigger>
|
</SelectTrigger>
|
||||||
<SelectContent>
|
<SelectContent>
|
||||||
{Object.entries(chartTimeData).map(([value, { label }]) => (
|
{availableChartTimes.map(([value, { label }]) => (
|
||||||
<SelectItem key={value} value={value}>
|
<SelectItem key={value} value={value}>
|
||||||
{label()}
|
{label()}
|
||||||
</SelectItem>
|
</SelectItem>
|
||||||
@@ -23,4 +38,4 @@ export default function ChartTimeSelect({ className }: { className?: string }) {
|
|||||||
</SelectContent>
|
</SelectContent>
|
||||||
</Select>
|
</Select>
|
||||||
)
|
)
|
||||||
}
|
})
|
||||||
|
|||||||
@@ -2,7 +2,7 @@
|
|||||||
import { useStore } from "@nanostores/react"
|
import { useStore } from "@nanostores/react"
|
||||||
import { memo, useMemo } from "react"
|
import { memo, useMemo } from "react"
|
||||||
import { Area, AreaChart, CartesianGrid, YAxis } from "recharts"
|
import { Area, AreaChart, CartesianGrid, YAxis } from "recharts"
|
||||||
import { type ChartConfig, ChartContainer, ChartTooltip, ChartTooltipContent, xAxis } from "@/components/ui/chart"
|
import { type ChartConfig, ChartContainer, ChartTooltip, ChartTooltipContent, pinnedAxisDomain, xAxis } from "@/components/ui/chart"
|
||||||
import { ChartType, Unit } from "@/lib/enums"
|
import { ChartType, Unit } from "@/lib/enums"
|
||||||
import { $containerFilter, $userSettings } from "@/lib/stores"
|
import { $containerFilter, $userSettings } from "@/lib/stores"
|
||||||
import { chartMargin, cn, decimalString, formatBytes, formatShortDate, toFixedFloat } from "@/lib/utils"
|
import { chartMargin, cn, decimalString, formatBytes, formatShortDate, toFixedFloat } from "@/lib/utils"
|
||||||
@@ -41,7 +41,7 @@ export default memo(function ContainerChart({
|
|||||||
// tick formatter
|
// tick formatter
|
||||||
if (chartType === ChartType.CPU) {
|
if (chartType === ChartType.CPU) {
|
||||||
obj.tickFormatter = (value) => {
|
obj.tickFormatter = (value) => {
|
||||||
const val = toFixedFloat(value, 2) + unit
|
const val = `${toFixedFloat(value, 2)}%`
|
||||||
return updateYAxisWidth(val)
|
return updateYAxisWidth(val)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@@ -78,7 +78,7 @@ export default memo(function ContainerChart({
|
|||||||
return `${decimalString(value)} ${unit}`
|
return `${decimalString(value)} ${unit}`
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
obj.toolTipFormatter = (item: any) => `${decimalString(item.value)} ${unit}`
|
obj.toolTipFormatter = (item: any) => `${decimalString(item.value)}${unit}`
|
||||||
}
|
}
|
||||||
// data function
|
// data function
|
||||||
if (isNetChart) {
|
if (isNetChart) {
|
||||||
@@ -94,8 +94,11 @@ export default memo(function ContainerChart({
|
|||||||
if (!filter) {
|
if (!filter) {
|
||||||
return new Set<string>()
|
return new Set<string>()
|
||||||
}
|
}
|
||||||
const filterLower = filter.toLowerCase()
|
const filterTerms = filter.toLowerCase().split(" ").filter(term => term.length > 0)
|
||||||
return new Set(Object.keys(chartConfig).filter((key) => !key.toLowerCase().includes(filterLower)))
|
return new Set(Object.keys(chartConfig).filter((key) => {
|
||||||
|
const keyLower = key.toLowerCase()
|
||||||
|
return !filterTerms.some(term => keyLower.includes(term))
|
||||||
|
}))
|
||||||
}, [chartConfig, filter])
|
}, [chartConfig, filter])
|
||||||
|
|
||||||
// console.log('rendered at', new Date())
|
// console.log('rendered at', new Date())
|
||||||
@@ -121,6 +124,7 @@ export default memo(function ContainerChart({
|
|||||||
<CartesianGrid vertical={false} />
|
<CartesianGrid vertical={false} />
|
||||||
<YAxis
|
<YAxis
|
||||||
direction="ltr"
|
direction="ltr"
|
||||||
|
domain={pinnedAxisDomain()}
|
||||||
orientation={chartData.orientation}
|
orientation={chartData.orientation}
|
||||||
className="tracking-tighter"
|
className="tracking-tighter"
|
||||||
width={yAxisWidth}
|
width={yAxisWidth}
|
||||||
@@ -136,7 +140,7 @@ export default memo(function ContainerChart({
|
|||||||
labelFormatter={(_, data) => formatShortDate(data[0].payload.created)}
|
labelFormatter={(_, data) => formatShortDate(data[0].payload.created)}
|
||||||
// @ts-expect-error
|
// @ts-expect-error
|
||||||
itemSorter={(a, b) => b.value - a.value}
|
itemSorter={(a, b) => b.value - a.value}
|
||||||
content={<ChartTooltipContent filter={filter} contentFormatter={toolTipFormatter} />}
|
content={<ChartTooltipContent filter={filter} contentFormatter={toolTipFormatter} showTotal={true} />}
|
||||||
/>
|
/>
|
||||||
{Object.keys(chartConfig).map((key) => {
|
{Object.keys(chartConfig).map((key) => {
|
||||||
const filtered = filteredKeys.has(key)
|
const filtered = filteredKeys.has(key)
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ import { Area, AreaChart, CartesianGrid, YAxis } from "recharts"
|
|||||||
import { ChartContainer, ChartTooltip, ChartTooltipContent, xAxis } from "@/components/ui/chart"
|
import { ChartContainer, ChartTooltip, ChartTooltipContent, xAxis } from "@/components/ui/chart"
|
||||||
import { Unit } from "@/lib/enums"
|
import { Unit } from "@/lib/enums"
|
||||||
import { chartMargin, cn, decimalString, formatBytes, formatShortDate, toFixedFloat } from "@/lib/utils"
|
import { chartMargin, cn, decimalString, formatBytes, formatShortDate, toFixedFloat } from "@/lib/utils"
|
||||||
import type { ChartData } from "@/types"
|
import type { ChartData, SystemStatsRecord } from "@/types"
|
||||||
import { useYAxisWidth } from "./hooks"
|
import { useYAxisWidth } from "./hooks"
|
||||||
|
|
||||||
export default memo(function DiskChart({
|
export default memo(function DiskChart({
|
||||||
@@ -12,7 +12,7 @@ export default memo(function DiskChart({
|
|||||||
diskSize,
|
diskSize,
|
||||||
chartData,
|
chartData,
|
||||||
}: {
|
}: {
|
||||||
dataKey: string
|
dataKey: string | ((data: SystemStatsRecord) => number | undefined)
|
||||||
diskSize: number
|
diskSize: number
|
||||||
chartData: ChartData
|
chartData: ChartData
|
||||||
}) {
|
}) {
|
||||||
|
|||||||
@@ -69,7 +69,7 @@ export function useContainerChartConfigs(containerData: ChartData["containerData
|
|||||||
const hue = ((i * 360) / count) % 360
|
const hue = ((i * 360) / count) % 360
|
||||||
chartConfig[containerName] = {
|
chartConfig[containerName] = {
|
||||||
label: containerName,
|
label: containerName,
|
||||||
color: `hsl(${hue}, 60%, 55%)`,
|
color: `hsl(${hue}, var(--chart-saturation), var(--chart-lightness))`,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -59,8 +59,6 @@ export default memo(function LoadAverageChart({ chartData }: { chartData: ChartD
|
|||||||
<ChartTooltip
|
<ChartTooltip
|
||||||
animationEasing="ease-out"
|
animationEasing="ease-out"
|
||||||
animationDuration={150}
|
animationDuration={150}
|
||||||
// @ts-expect-error
|
|
||||||
// itemSorter={(a, b) => b.value - a.value}
|
|
||||||
content={
|
content={
|
||||||
<ChartTooltipContent
|
<ChartTooltipContent
|
||||||
labelFormatter={(_, data) => formatShortDate(data[0].payload.created)}
|
labelFormatter={(_, data) => formatShortDate(data[0].payload.created)}
|
||||||
@@ -70,14 +68,15 @@ export default memo(function LoadAverageChart({ chartData }: { chartData: ChartD
|
|||||||
/>
|
/>
|
||||||
{keys.map(({ legacy, color, label }, i) => {
|
{keys.map(({ legacy, color, label }, i) => {
|
||||||
const dataKey = (value: { stats: SystemStats }) => {
|
const dataKey = (value: { stats: SystemStats }) => {
|
||||||
if (chartData.agentVersion.patch < 1) {
|
const { minor, patch } = chartData.agentVersion
|
||||||
|
if (minor <= 12 && patch < 1) {
|
||||||
return value.stats?.[legacy]
|
return value.stats?.[legacy]
|
||||||
}
|
}
|
||||||
return value.stats?.la?.[i] ?? value.stats?.[legacy]
|
return value.stats?.la?.[i] ?? value.stats?.[legacy]
|
||||||
}
|
}
|
||||||
return (
|
return (
|
||||||
<Line
|
<Line
|
||||||
key={i}
|
key={label}
|
||||||
dataKey={dataKey}
|
dataKey={dataKey}
|
||||||
name={label}
|
name={label}
|
||||||
type="monotoneX"
|
type="monotoneX"
|
||||||
|
|||||||
@@ -61,6 +61,7 @@ export default memo(function MemChart({ chartData, showMax }: { chartData: Chart
|
|||||||
const { value: convertedValue, unit } = formatBytes(value * 1024, false, Unit.Bytes, true)
|
const { value: convertedValue, unit } = formatBytes(value * 1024, false, Unit.Bytes, true)
|
||||||
return decimalString(convertedValue, convertedValue >= 100 ? 1 : 2) + " " + unit
|
return decimalString(convertedValue, convertedValue >= 100 ? 1 : 2) + " " + unit
|
||||||
}}
|
}}
|
||||||
|
showTotal={true}
|
||||||
/>
|
/>
|
||||||
}
|
}
|
||||||
/>
|
/>
|
||||||
|
|||||||
@@ -64,7 +64,7 @@ export default memo(function TemperatureChart({ chartData }: { chartData: ChartD
|
|||||||
direction="ltr"
|
direction="ltr"
|
||||||
orientation={chartData.orientation}
|
orientation={chartData.orientation}
|
||||||
className="tracking-tighter"
|
className="tracking-tighter"
|
||||||
domain={[0, "auto"]}
|
domain={["auto", "auto"]}
|
||||||
width={yAxisWidth}
|
width={yAxisWidth}
|
||||||
tickFormatter={(val) => {
|
tickFormatter={(val) => {
|
||||||
const { value, unit } = formatTemperature(val, userSettings.unitTemp)
|
const { value, unit } = formatTemperature(val, userSettings.unitTemp)
|
||||||
@@ -91,7 +91,8 @@ export default memo(function TemperatureChart({ chartData }: { chartData: ChartD
|
|||||||
}
|
}
|
||||||
/>
|
/>
|
||||||
{colors.map((key) => {
|
{colors.map((key) => {
|
||||||
const filtered = filter && !key.toLowerCase().includes(filter.toLowerCase())
|
const filterTerms = filter ? filter.toLowerCase().split(" ").filter(term => term.length > 0) : []
|
||||||
|
const filtered = filterTerms.length > 0 && !filterTerms.some(term => key.toLowerCase().includes(term))
|
||||||
const strokeOpacity = filtered ? 0.1 : 1
|
const strokeOpacity = filtered ? 0.1 : 1
|
||||||
return (
|
return (
|
||||||
<Line
|
<Line
|
||||||
@@ -113,4 +114,4 @@ export default memo(function TemperatureChart({ chartData }: { chartData: ChartD
|
|||||||
</ChartContainer>
|
</ChartContainer>
|
||||||
</div>
|
</div>
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
@@ -5,6 +5,7 @@ import { DialogDescription } from "@radix-ui/react-dialog"
|
|||||||
import {
|
import {
|
||||||
AlertOctagonIcon,
|
AlertOctagonIcon,
|
||||||
BookIcon,
|
BookIcon,
|
||||||
|
ContainerIcon,
|
||||||
DatabaseBackupIcon,
|
DatabaseBackupIcon,
|
||||||
FingerprintIcon,
|
FingerprintIcon,
|
||||||
LayoutDashboard,
|
LayoutDashboard,
|
||||||
@@ -65,7 +66,7 @@ export default memo(function CommandPalette({ open, setOpen }: { open: boolean;
|
|||||||
<CommandItem
|
<CommandItem
|
||||||
key={system.id}
|
key={system.id}
|
||||||
onSelect={() => {
|
onSelect={() => {
|
||||||
navigate(getPagePath($router, "system", { name: system.name }))
|
navigate(getPagePath($router, "system", { id: system.id }))
|
||||||
setOpen(false)
|
setOpen(false)
|
||||||
}}
|
}}
|
||||||
>
|
>
|
||||||
@@ -80,7 +81,7 @@ export default memo(function CommandPalette({ open, setOpen }: { open: boolean;
|
|||||||
)}
|
)}
|
||||||
<CommandGroup heading={t`Pages / Settings`}>
|
<CommandGroup heading={t`Pages / Settings`}>
|
||||||
<CommandItem
|
<CommandItem
|
||||||
keywords={["home"]}
|
keywords={["home", t`All Systems`]}
|
||||||
onSelect={() => {
|
onSelect={() => {
|
||||||
navigate(basePath)
|
navigate(basePath)
|
||||||
setOpen(false)
|
setOpen(false)
|
||||||
@@ -94,6 +95,20 @@ export default memo(function CommandPalette({ open, setOpen }: { open: boolean;
|
|||||||
<Trans>Page</Trans>
|
<Trans>Page</Trans>
|
||||||
</CommandShortcut>
|
</CommandShortcut>
|
||||||
</CommandItem>
|
</CommandItem>
|
||||||
|
<CommandItem
|
||||||
|
onSelect={() => {
|
||||||
|
navigate(getPagePath($router, "containers"))
|
||||||
|
setOpen(false)
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<ContainerIcon className="me-2 size-4" />
|
||||||
|
<span>
|
||||||
|
<Trans>All Containers</Trans>
|
||||||
|
</span>
|
||||||
|
<CommandShortcut>
|
||||||
|
<Trans>Page</Trans>
|
||||||
|
</CommandShortcut>
|
||||||
|
</CommandItem>
|
||||||
<CommandItem
|
<CommandItem
|
||||||
onSelect={() => {
|
onSelect={() => {
|
||||||
navigate(getPagePath($router, "settings", { name: "general" }))
|
navigate(getPagePath($router, "settings", { name: "general" }))
|
||||||
|
|||||||
@@ -0,0 +1,176 @@
|
|||||||
|
import type { Column, ColumnDef } from "@tanstack/react-table"
|
||||||
|
import { Button } from "@/components/ui/button"
|
||||||
|
import { cn, decimalString, formatBytes, hourWithSeconds } from "@/lib/utils"
|
||||||
|
import type { ContainerRecord } from "@/types"
|
||||||
|
import { ContainerHealth, ContainerHealthLabels } from "@/lib/enums"
|
||||||
|
import {
|
||||||
|
ArrowUpDownIcon,
|
||||||
|
ClockIcon,
|
||||||
|
ContainerIcon,
|
||||||
|
CpuIcon,
|
||||||
|
LayersIcon,
|
||||||
|
MemoryStickIcon,
|
||||||
|
ServerIcon,
|
||||||
|
ShieldCheckIcon,
|
||||||
|
} from "lucide-react"
|
||||||
|
import { EthernetIcon, HourglassIcon } from "../ui/icons"
|
||||||
|
import { Badge } from "../ui/badge"
|
||||||
|
import { t } from "@lingui/core/macro"
|
||||||
|
import { $allSystemsById } from "@/lib/stores"
|
||||||
|
import { useStore } from "@nanostores/react"
|
||||||
|
|
||||||
|
// Unit names and their corresponding number of seconds for converting docker status strings
|
||||||
|
const unitSeconds = [["s", 1], ["mi", 60], ["h", 3600], ["d", 86400], ["w", 604800], ["mo", 2592000]] as const
|
||||||
|
// Convert docker status string to number of seconds ("Up X minutes", "Up X hours", etc.)
|
||||||
|
function getStatusValue(status: string): number {
|
||||||
|
const [_, num, unit] = status.split(" ")
|
||||||
|
const numValue = Number(num)
|
||||||
|
for (const [unitName, value] of unitSeconds) {
|
||||||
|
if (unit.startsWith(unitName)) {
|
||||||
|
return numValue * value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
export const containerChartCols: ColumnDef<ContainerRecord>[] = [
|
||||||
|
{
|
||||||
|
id: "name",
|
||||||
|
sortingFn: (a, b) => a.original.name.localeCompare(b.original.name),
|
||||||
|
accessorFn: (record) => record.name,
|
||||||
|
header: ({ column }) => <HeaderButton column={column} name={t`Name`} Icon={ContainerIcon} />,
|
||||||
|
cell: ({ getValue }) => {
|
||||||
|
return <span className="ms-1.5 xl:w-48 block truncate">{getValue() as string}</span>
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "system",
|
||||||
|
accessorFn: (record) => record.system,
|
||||||
|
sortingFn: (a, b) => {
|
||||||
|
const allSystems = $allSystemsById.get()
|
||||||
|
const systemNameA = allSystems[a.original.system]?.name ?? ""
|
||||||
|
const systemNameB = allSystems[b.original.system]?.name ?? ""
|
||||||
|
return systemNameA.localeCompare(systemNameB)
|
||||||
|
},
|
||||||
|
header: ({ column }) => <HeaderButton column={column} name={t`System`} Icon={ServerIcon} />,
|
||||||
|
cell: ({ getValue }) => {
|
||||||
|
const allSystems = useStore($allSystemsById)
|
||||||
|
return <span className="ms-1.5 xl:w-34 block truncate">{allSystems[getValue() as string]?.name ?? ""}</span>
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// {
|
||||||
|
// id: "id",
|
||||||
|
// accessorFn: (record) => record.id,
|
||||||
|
// sortingFn: (a, b) => a.original.id.localeCompare(b.original.id),
|
||||||
|
// header: ({ column }) => <HeaderButton column={column} name="ID" Icon={HashIcon} />,
|
||||||
|
// cell: ({ getValue }) => {
|
||||||
|
// return <span className="ms-1.5 me-3 font-mono">{getValue() as string}</span>
|
||||||
|
// },
|
||||||
|
// },
|
||||||
|
{
|
||||||
|
id: "cpu",
|
||||||
|
accessorFn: (record) => record.cpu,
|
||||||
|
invertSorting: true,
|
||||||
|
header: ({ column }) => <HeaderButton column={column} name={t`CPU`} Icon={CpuIcon} />,
|
||||||
|
cell: ({ getValue }) => {
|
||||||
|
const val = getValue() as number
|
||||||
|
return <span className="ms-1.5 tabular-nums">{`${decimalString(val, val >= 10 ? 1 : 2)}%`}</span>
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "memory",
|
||||||
|
accessorFn: (record) => record.memory,
|
||||||
|
invertSorting: true,
|
||||||
|
header: ({ column }) => <HeaderButton column={column} name={t`Memory`} Icon={MemoryStickIcon} />,
|
||||||
|
cell: ({ getValue }) => {
|
||||||
|
const val = getValue() as number
|
||||||
|
const formatted = formatBytes(val, false, undefined, true)
|
||||||
|
return (
|
||||||
|
<span className="ms-1.5 tabular-nums">{`${decimalString(formatted.value, formatted.value >= 10 ? 1 : 2)} ${formatted.unit}`}</span>
|
||||||
|
)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "net",
|
||||||
|
accessorFn: (record) => record.net,
|
||||||
|
invertSorting: true,
|
||||||
|
header: ({ column }) => <HeaderButton column={column} name={t`Net`} Icon={EthernetIcon} />,
|
||||||
|
cell: ({ getValue }) => {
|
||||||
|
const val = getValue() as number
|
||||||
|
const formatted = formatBytes(val, true, undefined, true)
|
||||||
|
return (
|
||||||
|
<span className="ms-1.5 tabular-nums">{`${decimalString(formatted.value, formatted.value >= 10 ? 1 : 2)} ${formatted.unit}`}</span>
|
||||||
|
)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "health",
|
||||||
|
invertSorting: true,
|
||||||
|
accessorFn: (record) => record.health,
|
||||||
|
header: ({ column }) => <HeaderButton column={column} name={t`Health`} Icon={ShieldCheckIcon} />,
|
||||||
|
cell: ({ getValue }) => {
|
||||||
|
const healthValue = getValue() as number
|
||||||
|
const healthStatus = ContainerHealthLabels[healthValue] || "Unknown"
|
||||||
|
return (
|
||||||
|
<Badge variant="outline" className="dark:border-white/12">
|
||||||
|
<span className={cn("size-2 me-1.5 rounded-full", {
|
||||||
|
"bg-green-500": healthValue === ContainerHealth.Healthy,
|
||||||
|
"bg-red-500": healthValue === ContainerHealth.Unhealthy,
|
||||||
|
"bg-yellow-500": healthValue === ContainerHealth.Starting,
|
||||||
|
"bg-zinc-500": healthValue === ContainerHealth.None,
|
||||||
|
})}>
|
||||||
|
</span>
|
||||||
|
{healthStatus}
|
||||||
|
</Badge>
|
||||||
|
)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "image",
|
||||||
|
sortingFn: (a, b) => a.original.image.localeCompare(b.original.image),
|
||||||
|
accessorFn: (record) => record.image,
|
||||||
|
header: ({ column }) => <HeaderButton column={column} name={t({ message: "Image", context: "Docker image" })} Icon={LayersIcon} />,
|
||||||
|
cell: ({ getValue }) => {
|
||||||
|
return <span className="ms-1.5 xl:w-40 block truncate">{getValue() as string}</span>
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "status",
|
||||||
|
accessorFn: (record) => record.status,
|
||||||
|
invertSorting: true,
|
||||||
|
sortingFn: (a, b) => getStatusValue(a.original.status) - getStatusValue(b.original.status),
|
||||||
|
header: ({ column }) => <HeaderButton column={column} name={t`Status`} Icon={HourglassIcon} />,
|
||||||
|
cell: ({ getValue }) => {
|
||||||
|
return <span className="ms-1.5 w-25 block truncate">{getValue() as string}</span>
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "updated",
|
||||||
|
invertSorting: true,
|
||||||
|
accessorFn: (record) => record.updated,
|
||||||
|
header: ({ column }) => <HeaderButton column={column} name={t`Updated`} Icon={ClockIcon} />,
|
||||||
|
cell: ({ getValue }) => {
|
||||||
|
const timestamp = getValue() as number
|
||||||
|
return (
|
||||||
|
<span className="ms-1.5 tabular-nums">
|
||||||
|
{hourWithSeconds(new Date(timestamp).toISOString())}
|
||||||
|
</span>
|
||||||
|
)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
function HeaderButton({ column, name, Icon }: { column: Column<ContainerRecord>; name: string; Icon: React.ElementType }) {
|
||||||
|
const isSorted = column.getIsSorted()
|
||||||
|
return (
|
||||||
|
<Button
|
||||||
|
className={cn("h-9 px-3 flex items-center gap-2 duration-50", isSorted && "bg-accent/70 light:bg-accent text-accent-foreground/90")}
|
||||||
|
variant="ghost"
|
||||||
|
onClick={() => column.toggleSorting(column.getIsSorted() === "asc")}
|
||||||
|
>
|
||||||
|
{Icon && <Icon className="size-4" />}
|
||||||
|
{name}
|
||||||
|
<ArrowUpDownIcon className="size-4" />
|
||||||
|
</Button>
|
||||||
|
)
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user