mirror of
https://github.com/henrygd/beszel.git
synced 2025-12-17 02:36:17 +01:00
add one minute chart + refactor rpc
- add one minute charts - update disk io to use bytes - update hub and agent connection interfaces / handlers to be more flexible - change agent cache to use cache time instead of session id - refactor collection of metrics which require deltas to track separately per cache time
This commit is contained in:
@@ -1,22 +1,33 @@
|
||||
package common
|
||||
|
||||
type WebSocketAction = uint8
|
||||
import (
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
)
|
||||
|
||||
// Not implemented yet
|
||||
// type AgentError = uint8
|
||||
type WebSocketAction = uint8
|
||||
|
||||
const (
|
||||
// Request system data from agent
|
||||
GetData WebSocketAction = iota
|
||||
// Check the fingerprint of the agent
|
||||
CheckFingerprint
|
||||
// Add new actions here...
|
||||
)
|
||||
|
||||
// HubRequest defines the structure for requests sent from hub to agent.
|
||||
type HubRequest[T any] struct {
|
||||
Action WebSocketAction `cbor:"0,keyasint"`
|
||||
Data T `cbor:"1,keyasint,omitempty,omitzero"`
|
||||
// Error AgentError `cbor:"error,omitempty,omitzero"`
|
||||
Id *uint32 `cbor:"2,keyasint,omitempty"`
|
||||
}
|
||||
|
||||
// AgentResponse defines the structure for responses sent from agent to hub.
|
||||
type AgentResponse struct {
|
||||
Id *uint32 `cbor:"0,keyasint,omitempty"`
|
||||
SystemData *system.CombinedData `cbor:"1,keyasint,omitempty,omitzero"`
|
||||
Fingerprint *FingerprintResponse `cbor:"2,keyasint,omitempty,omitzero"`
|
||||
Error string `cbor:"3,keyasint,omitempty,omitzero"`
|
||||
// RawBytes []byte `cbor:"4,keyasint,omitempty,omitzero"`
|
||||
}
|
||||
|
||||
type FingerprintRequest struct {
|
||||
@@ -30,3 +41,8 @@ type FingerprintResponse struct {
|
||||
Hostname string `cbor:"1,keyasint,omitempty,omitzero"`
|
||||
Port string `cbor:"2,keyasint,omitempty,omitzero"`
|
||||
}
|
||||
|
||||
type DataRequestOptions struct {
|
||||
CacheTimeMs uint16 `cbor:"0,keyasint"`
|
||||
// ResourceType uint8 `cbor:"1,keyasint,omitempty,omitzero"`
|
||||
}
|
||||
|
||||
@@ -42,6 +42,8 @@ type Stats struct {
|
||||
Battery [2]uint8 `json:"bat,omitzero" cbor:"29,keyasint,omitzero"` // [percent, charge state, current]
|
||||
MaxMem float64 `json:"mm,omitempty" cbor:"30,keyasint,omitempty"`
|
||||
NetworkInterfaces map[string][4]uint64 `json:"ni,omitempty" cbor:"31,keyasint,omitempty"` // [upload bytes, download bytes, total upload, total download]
|
||||
DiskIO [2]uint64 `json:"dio,omitzero" cbor:"32,keyasint,omitzero"` // [read bytes, write bytes]
|
||||
MaxDiskIO [2]uint64 `json:"diom,omitzero" cbor:"-"` // [max read bytes, max write bytes]
|
||||
}
|
||||
|
||||
type GPUData struct {
|
||||
@@ -68,6 +70,11 @@ type FsStats struct {
|
||||
DiskWritePs float64 `json:"w" cbor:"3,keyasint"`
|
||||
MaxDiskReadPS float64 `json:"rm,omitempty" cbor:"4,keyasint,omitempty"`
|
||||
MaxDiskWritePS float64 `json:"wm,omitempty" cbor:"5,keyasint,omitempty"`
|
||||
// TODO: remove DiskReadPs and DiskWritePs in future release in favor of DiskReadBytes and DiskWriteBytes
|
||||
DiskReadBytes uint64 `json:"rb" cbor:"6,keyasint,omitempty"`
|
||||
DiskWriteBytes uint64 `json:"wb" cbor:"7,keyasint,omitempty"`
|
||||
MaxDiskReadBytes uint64 `json:"rbm,omitempty" cbor:"-"`
|
||||
MaxDiskWriteBytes uint64 `json:"wbm,omitempty" cbor:"-"`
|
||||
}
|
||||
|
||||
type NetIoStats struct {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package hub
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
@@ -93,7 +94,7 @@ func (acr *agentConnectRequest) agentConnect() (err error) {
|
||||
// verifyWsConn verifies the WebSocket connection using the agent's fingerprint and
|
||||
// SSH key signature, then adds the system to the system manager.
|
||||
func (acr *agentConnectRequest) verifyWsConn(conn *gws.Conn, fpRecords []ws.FingerprintRecord) (err error) {
|
||||
wsConn := ws.NewWsConnection(conn)
|
||||
wsConn := ws.NewWsConnection(conn, acr.agentSemVer)
|
||||
|
||||
// must set wsConn in connection store before the read loop
|
||||
conn.Session().Store("wsConn", wsConn)
|
||||
@@ -112,7 +113,7 @@ func (acr *agentConnectRequest) verifyWsConn(conn *gws.Conn, fpRecords []ws.Fing
|
||||
return err
|
||||
}
|
||||
|
||||
agentFingerprint, err := wsConn.GetFingerprint(acr.token, signer, acr.isUniversalToken)
|
||||
agentFingerprint, err := wsConn.GetFingerprint(context.Background(), acr.token, signer, acr.isUniversalToken)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/internal/common"
|
||||
"github.com/henrygd/beszel/internal/hub/ws"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
@@ -107,7 +108,7 @@ func (sys *System) update() error {
|
||||
sys.handlePaused()
|
||||
return nil
|
||||
}
|
||||
data, err := sys.fetchDataFromAgent()
|
||||
data, err := sys.fetchDataFromAgent(common.DataRequestOptions{CacheTimeMs: uint16(interval)})
|
||||
if err == nil {
|
||||
_, err = sys.createRecords(data)
|
||||
}
|
||||
@@ -209,13 +210,13 @@ func (sys *System) getContext() (context.Context, context.CancelFunc) {
|
||||
|
||||
// fetchDataFromAgent attempts to fetch data from the agent,
|
||||
// prioritizing WebSocket if available.
|
||||
func (sys *System) fetchDataFromAgent() (*system.CombinedData, error) {
|
||||
func (sys *System) fetchDataFromAgent(options common.DataRequestOptions) (*system.CombinedData, error) {
|
||||
if sys.data == nil {
|
||||
sys.data = &system.CombinedData{}
|
||||
}
|
||||
|
||||
if sys.WsConn != nil && sys.WsConn.IsConnected() {
|
||||
wsData, err := sys.fetchDataViaWebSocket()
|
||||
wsData, err := sys.fetchDataViaWebSocket(options)
|
||||
if err == nil {
|
||||
return wsData, nil
|
||||
}
|
||||
@@ -223,18 +224,18 @@ func (sys *System) fetchDataFromAgent() (*system.CombinedData, error) {
|
||||
sys.closeWebSocketConnection()
|
||||
}
|
||||
|
||||
sshData, err := sys.fetchDataViaSSH()
|
||||
sshData, err := sys.fetchDataViaSSH(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sshData, nil
|
||||
}
|
||||
|
||||
func (sys *System) fetchDataViaWebSocket() (*system.CombinedData, error) {
|
||||
func (sys *System) fetchDataViaWebSocket(options common.DataRequestOptions) (*system.CombinedData, error) {
|
||||
if sys.WsConn == nil || !sys.WsConn.IsConnected() {
|
||||
return nil, errors.New("no websocket connection")
|
||||
}
|
||||
err := sys.WsConn.RequestSystemData(sys.data)
|
||||
err := sys.WsConn.RequestSystemData(context.Background(), sys.data, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -244,7 +245,7 @@ func (sys *System) fetchDataViaWebSocket() (*system.CombinedData, error) {
|
||||
// fetchDataViaSSH handles fetching data using SSH.
|
||||
// This function encapsulates the original SSH logic.
|
||||
// It updates sys.data directly upon successful fetch.
|
||||
func (sys *System) fetchDataViaSSH() (*system.CombinedData, error) {
|
||||
func (sys *System) fetchDataViaSSH(options common.DataRequestOptions) (*system.CombinedData, error) {
|
||||
maxRetries := 1
|
||||
for attempt := 0; attempt <= maxRetries; attempt++ {
|
||||
if sys.client == nil || sys.Status == down {
|
||||
@@ -269,12 +270,31 @@ func (sys *System) fetchDataViaSSH() (*system.CombinedData, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stdin, stdinErr := session.StdinPipe()
|
||||
if err := session.Shell(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
*sys.data = system.CombinedData{}
|
||||
|
||||
if sys.agentVersion.GTE(beszel.MinVersionAgentResponse) && stdinErr == nil {
|
||||
req := common.HubRequest[any]{Action: common.GetData, Data: options}
|
||||
_ = cbor.NewEncoder(stdin).Encode(req)
|
||||
// Close write side to signal end of request
|
||||
_ = stdin.Close()
|
||||
|
||||
var resp common.AgentResponse
|
||||
if decErr := cbor.NewDecoder(stdout).Decode(&resp); decErr == nil && resp.SystemData != nil {
|
||||
*sys.data = *resp.SystemData
|
||||
// wait for the session to complete
|
||||
if err := session.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sys.data, nil
|
||||
}
|
||||
// If decoding failed, fall back below
|
||||
}
|
||||
|
||||
if sys.agentVersion.GTE(beszel.MinVersionCbor) {
|
||||
err = cbor.NewDecoder(stdout).Decode(sys.data)
|
||||
} else {
|
||||
@@ -379,11 +399,11 @@ func extractAgentVersion(versionString string) (semver.Version, error) {
|
||||
}
|
||||
|
||||
// getJitter returns a channel that will be triggered after a random delay
|
||||
// between 40% and 90% of the interval.
|
||||
// between 51% and 95% of the interval.
|
||||
// This is used to stagger the initial WebSocket connections to prevent clustering.
|
||||
func getJitter() <-chan time.Time {
|
||||
minPercent := 40
|
||||
maxPercent := 90
|
||||
minPercent := 51
|
||||
maxPercent := 95
|
||||
jitterRange := maxPercent - minPercent
|
||||
msDelay := (interval * minPercent / 100) + rand.Intn(interval*jitterRange/100)
|
||||
return time.After(time.Duration(msDelay) * time.Millisecond)
|
||||
|
||||
@@ -106,6 +106,8 @@ func (sm *SystemManager) bindEventHooks() {
|
||||
sm.hub.OnRecordAfterUpdateSuccess("systems").BindFunc(sm.onRecordAfterUpdateSuccess)
|
||||
sm.hub.OnRecordAfterDeleteSuccess("systems").BindFunc(sm.onRecordAfterDeleteSuccess)
|
||||
sm.hub.OnRecordAfterUpdateSuccess("fingerprints").BindFunc(sm.onTokenRotated)
|
||||
sm.hub.OnRealtimeSubscribeRequest().BindFunc(sm.onRealtimeSubscribeRequest)
|
||||
sm.hub.OnRealtimeConnectRequest().BindFunc(sm.onRealtimeConnectRequest)
|
||||
}
|
||||
|
||||
// onTokenRotated handles fingerprint token rotation events.
|
||||
|
||||
187
internal/hub/systems/system_realtime.go
Normal file
187
internal/hub/systems/system_realtime.go
Normal file
@@ -0,0 +1,187 @@
|
||||
package systems
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/henrygd/beszel/internal/common"
|
||||
"github.com/pocketbase/pocketbase/core"
|
||||
"github.com/pocketbase/pocketbase/tools/subscriptions"
|
||||
)
|
||||
|
||||
type subscriptionInfo struct {
|
||||
subscription string
|
||||
connectedClients uint8
|
||||
}
|
||||
|
||||
var (
|
||||
activeSubscriptions = make(map[string]*subscriptionInfo)
|
||||
workerRunning bool
|
||||
realtimeTicker *time.Ticker
|
||||
tickerStopChan chan struct{}
|
||||
realtimeMutex sync.Mutex
|
||||
)
|
||||
|
||||
// onRealtimeConnectRequest handles client connection events for realtime subscriptions.
|
||||
// It cleans up existing subscriptions when a client connects.
|
||||
func (sm *SystemManager) onRealtimeConnectRequest(e *core.RealtimeConnectRequestEvent) error {
|
||||
// after e.Next() is the client disconnection
|
||||
e.Next()
|
||||
subscriptions := e.Client.Subscriptions()
|
||||
for k := range subscriptions {
|
||||
sm.removeRealtimeSubscription(k, subscriptions[k])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// onRealtimeSubscribeRequest handles client subscription events for realtime metrics.
|
||||
// It tracks new subscriptions and unsubscriptions to manage the realtime worker lifecycle.
|
||||
func (sm *SystemManager) onRealtimeSubscribeRequest(e *core.RealtimeSubscribeRequestEvent) error {
|
||||
oldSubs := e.Client.Subscriptions()
|
||||
// after e.Next() is the result of the subscribe request
|
||||
err := e.Next()
|
||||
newSubs := e.Client.Subscriptions()
|
||||
|
||||
// handle new subscriptions
|
||||
for k, options := range newSubs {
|
||||
if _, ok := oldSubs[k]; !ok {
|
||||
if strings.HasPrefix(k, "rt_metrics") {
|
||||
systemId := options.Query["system"]
|
||||
if _, ok := activeSubscriptions[systemId]; !ok {
|
||||
activeSubscriptions[systemId] = &subscriptionInfo{
|
||||
subscription: k,
|
||||
}
|
||||
}
|
||||
activeSubscriptions[systemId].connectedClients += 1
|
||||
sm.onRealtimeSubscriptionAdded()
|
||||
}
|
||||
}
|
||||
}
|
||||
// handle unsubscriptions
|
||||
for k := range oldSubs {
|
||||
if _, ok := newSubs[k]; !ok {
|
||||
sm.removeRealtimeSubscription(k, oldSubs[k])
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// onRealtimeSubscriptionAdded initializes or starts the realtime worker when the first subscription is added.
|
||||
// It ensures only one worker runs at a time and creates the ticker for periodic data fetching.
|
||||
func (sm *SystemManager) onRealtimeSubscriptionAdded() {
|
||||
realtimeMutex.Lock()
|
||||
defer realtimeMutex.Unlock()
|
||||
|
||||
// Start the worker if it's not already running
|
||||
if !workerRunning {
|
||||
workerRunning = true
|
||||
// Create a new stop channel for this worker instance
|
||||
tickerStopChan = make(chan struct{})
|
||||
go sm.startRealtimeWorker()
|
||||
}
|
||||
|
||||
// If no ticker exists, create one
|
||||
if realtimeTicker == nil {
|
||||
realtimeTicker = time.NewTicker(1 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
// checkSubscriptions stops the realtime worker when there are no active subscriptions.
|
||||
// This prevents unnecessary resource usage when no clients are listening for realtime data.
|
||||
func (sm *SystemManager) checkSubscriptions() {
|
||||
if !workerRunning || len(activeSubscriptions) > 0 {
|
||||
return
|
||||
}
|
||||
|
||||
realtimeMutex.Lock()
|
||||
defer realtimeMutex.Unlock()
|
||||
|
||||
// Signal the worker to stop
|
||||
if tickerStopChan != nil {
|
||||
select {
|
||||
case tickerStopChan <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
if realtimeTicker != nil {
|
||||
realtimeTicker.Stop()
|
||||
realtimeTicker = nil
|
||||
}
|
||||
|
||||
// Mark worker as stopped (will be reset when next subscription comes in)
|
||||
workerRunning = false
|
||||
}
|
||||
|
||||
// removeRealtimeSubscription removes a realtime subscription and checks if the worker should be stopped.
|
||||
// It only processes subscriptions with the "rt_metrics" prefix and triggers cleanup when subscriptions are removed.
|
||||
func (sm *SystemManager) removeRealtimeSubscription(subscription string, options subscriptions.SubscriptionOptions) {
|
||||
if strings.HasPrefix(subscription, "rt_metrics") {
|
||||
systemId := options.Query["system"]
|
||||
if info, ok := activeSubscriptions[systemId]; ok {
|
||||
info.connectedClients -= 1
|
||||
if info.connectedClients <= 0 {
|
||||
delete(activeSubscriptions, systemId)
|
||||
}
|
||||
}
|
||||
sm.checkSubscriptions()
|
||||
}
|
||||
}
|
||||
|
||||
// startRealtimeWorker runs the main loop for fetching realtime data from agents.
|
||||
// It continuously fetches system data and broadcasts it to subscribed clients via WebSocket.
|
||||
func (sm *SystemManager) startRealtimeWorker() {
|
||||
sm.fetchRealtimeDataAndNotify()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-tickerStopChan:
|
||||
return
|
||||
case <-realtimeTicker.C:
|
||||
// Check if ticker is still valid (might have been stopped)
|
||||
if realtimeTicker == nil || len(activeSubscriptions) == 0 {
|
||||
return
|
||||
}
|
||||
// slog.Debug("activeSubscriptions", "count", len(activeSubscriptions))
|
||||
sm.fetchRealtimeDataAndNotify()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// fetchRealtimeDataAndNotify fetches realtime data for all active subscriptions and notifies the clients.
|
||||
func (sm *SystemManager) fetchRealtimeDataAndNotify() {
|
||||
for systemId, info := range activeSubscriptions {
|
||||
system, ok := sm.systems.GetOk(systemId)
|
||||
if ok {
|
||||
go func() {
|
||||
data, err := system.fetchDataFromAgent(common.DataRequestOptions{CacheTimeMs: 1000})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
bytes, err := json.Marshal(data)
|
||||
if err == nil {
|
||||
notify(sm.hub, info.subscription, bytes)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// notify broadcasts realtime data to all clients subscribed to a specific subscription.
|
||||
// It iterates through all connected clients and sends the data only to those with matching subscriptions.
|
||||
func notify(app core.App, subscription string, data []byte) error {
|
||||
message := subscriptions.Message{
|
||||
Name: subscription,
|
||||
Data: data,
|
||||
}
|
||||
for _, client := range app.SubscriptionsBroker().Clients() {
|
||||
if !client.HasSubscription(subscription) {
|
||||
continue
|
||||
}
|
||||
client.Send(message)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
107
internal/hub/ws/handlers.go
Normal file
107
internal/hub/ws/handlers.go
Normal file
@@ -0,0 +1,107 @@
|
||||
package ws
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
"github.com/henrygd/beszel/internal/common"
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
"github.com/lxzan/gws"
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
// ResponseHandler defines interface for handling agent responses
|
||||
type ResponseHandler interface {
|
||||
Handle(agentResponse common.AgentResponse) error
|
||||
HandleLegacy(rawData []byte) error
|
||||
}
|
||||
|
||||
// BaseHandler provides a default implementation that can be embedded to make HandleLegacy optional
|
||||
// type BaseHandler struct{}
|
||||
|
||||
// func (h *BaseHandler) HandleLegacy(rawData []byte) error {
|
||||
// return errors.New("legacy format not supported")
|
||||
// }
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// systemDataHandler implements ResponseHandler for system data requests
|
||||
type systemDataHandler struct {
|
||||
data *system.CombinedData
|
||||
}
|
||||
|
||||
func (h *systemDataHandler) HandleLegacy(rawData []byte) error {
|
||||
return cbor.Unmarshal(rawData, h.data)
|
||||
}
|
||||
|
||||
func (h *systemDataHandler) Handle(agentResponse common.AgentResponse) error {
|
||||
if agentResponse.SystemData != nil {
|
||||
*h.data = *agentResponse.SystemData
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RequestSystemData requests system metrics from the agent and unmarshals the response.
|
||||
func (ws *WsConn) RequestSystemData(ctx context.Context, data *system.CombinedData, options common.DataRequestOptions) error {
|
||||
if !ws.IsConnected() {
|
||||
return gws.ErrConnClosed
|
||||
}
|
||||
|
||||
req, err := ws.requestManager.SendRequest(ctx, common.GetData, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
handler := &systemDataHandler{data: data}
|
||||
return ws.handleAgentRequest(req, handler)
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// fingerprintHandler implements ResponseHandler for fingerprint requests
|
||||
type fingerprintHandler struct {
|
||||
result *common.FingerprintResponse
|
||||
}
|
||||
|
||||
func (h *fingerprintHandler) HandleLegacy(rawData []byte) error {
|
||||
return cbor.Unmarshal(rawData, h.result)
|
||||
}
|
||||
|
||||
func (h *fingerprintHandler) Handle(agentResponse common.AgentResponse) error {
|
||||
if agentResponse.Fingerprint != nil {
|
||||
*h.result = *agentResponse.Fingerprint
|
||||
return nil
|
||||
}
|
||||
return errors.New("no fingerprint data in response")
|
||||
}
|
||||
|
||||
// GetFingerprint authenticates with the agent using SSH signature and returns the agent's fingerprint.
|
||||
func (ws *WsConn) GetFingerprint(ctx context.Context, token string, signer ssh.Signer, needSysInfo bool) (common.FingerprintResponse, error) {
|
||||
if !ws.IsConnected() {
|
||||
return common.FingerprintResponse{}, gws.ErrConnClosed
|
||||
}
|
||||
|
||||
challenge := []byte(token)
|
||||
signature, err := signer.Sign(nil, challenge)
|
||||
if err != nil {
|
||||
return common.FingerprintResponse{}, err
|
||||
}
|
||||
|
||||
req, err := ws.requestManager.SendRequest(ctx, common.CheckFingerprint, common.FingerprintRequest{
|
||||
Signature: signature.Blob,
|
||||
NeedSysInfo: needSysInfo,
|
||||
})
|
||||
if err != nil {
|
||||
return common.FingerprintResponse{}, err
|
||||
}
|
||||
|
||||
var result common.FingerprintResponse
|
||||
handler := &fingerprintHandler{result: &result}
|
||||
err = ws.handleAgentRequest(req, handler)
|
||||
return result, err
|
||||
}
|
||||
186
internal/hub/ws/request_manager.go
Normal file
186
internal/hub/ws/request_manager.go
Normal file
@@ -0,0 +1,186 @@
|
||||
package ws
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
"github.com/henrygd/beszel/internal/common"
|
||||
"github.com/lxzan/gws"
|
||||
)
|
||||
|
||||
// RequestID uniquely identifies a request
|
||||
type RequestID uint32
|
||||
|
||||
// PendingRequest tracks an in-flight request
|
||||
type PendingRequest struct {
|
||||
ID RequestID
|
||||
ResponseCh chan *gws.Message
|
||||
Context context.Context
|
||||
Cancel context.CancelFunc
|
||||
CreatedAt time.Time
|
||||
}
|
||||
|
||||
// RequestManager handles concurrent requests to an agent
|
||||
type RequestManager struct {
|
||||
sync.RWMutex
|
||||
conn *gws.Conn
|
||||
pendingReqs map[RequestID]*PendingRequest
|
||||
nextID atomic.Uint32
|
||||
}
|
||||
|
||||
// NewRequestManager creates a new request manager for a WebSocket connection
|
||||
func NewRequestManager(conn *gws.Conn) *RequestManager {
|
||||
rm := &RequestManager{
|
||||
conn: conn,
|
||||
pendingReqs: make(map[RequestID]*PendingRequest),
|
||||
}
|
||||
return rm
|
||||
}
|
||||
|
||||
// SendRequest sends a request and returns a channel for the response
|
||||
func (rm *RequestManager) SendRequest(ctx context.Context, action common.WebSocketAction, data any) (*PendingRequest, error) {
|
||||
reqID := RequestID(rm.nextID.Add(1))
|
||||
|
||||
reqCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
|
||||
req := &PendingRequest{
|
||||
ID: reqID,
|
||||
ResponseCh: make(chan *gws.Message, 1),
|
||||
Context: reqCtx,
|
||||
Cancel: cancel,
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
|
||||
rm.Lock()
|
||||
rm.pendingReqs[reqID] = req
|
||||
rm.Unlock()
|
||||
|
||||
hubReq := common.HubRequest[any]{
|
||||
Id: (*uint32)(&reqID),
|
||||
Action: action,
|
||||
Data: data,
|
||||
}
|
||||
|
||||
// Send the request
|
||||
if err := rm.sendMessage(hubReq); err != nil {
|
||||
rm.cancelRequest(reqID)
|
||||
return nil, fmt.Errorf("failed to send request: %w", err)
|
||||
}
|
||||
|
||||
// Start cleanup watcher for timeout/cancellation
|
||||
go rm.cleanupRequest(req)
|
||||
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// sendMessage encodes and sends a message over WebSocket
|
||||
func (rm *RequestManager) sendMessage(data any) error {
|
||||
if rm.conn == nil {
|
||||
return gws.ErrConnClosed
|
||||
}
|
||||
|
||||
bytes, err := cbor.Marshal(data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal request: %w", err)
|
||||
}
|
||||
|
||||
return rm.conn.WriteMessage(gws.OpcodeBinary, bytes)
|
||||
}
|
||||
|
||||
// handleResponse processes a single response message
|
||||
func (rm *RequestManager) handleResponse(message *gws.Message) {
|
||||
var response common.AgentResponse
|
||||
if err := cbor.Unmarshal(message.Data.Bytes(), &response); err != nil {
|
||||
// Legacy response without ID - route to first pending request of any type
|
||||
rm.routeLegacyResponse(message)
|
||||
return
|
||||
}
|
||||
|
||||
reqID := RequestID(*response.Id)
|
||||
|
||||
rm.RLock()
|
||||
req, exists := rm.pendingReqs[reqID]
|
||||
rm.RUnlock()
|
||||
|
||||
if !exists {
|
||||
// Request not found (might have timed out) - close the message
|
||||
message.Close()
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case req.ResponseCh <- message:
|
||||
// Message successfully delivered - the receiver will close it
|
||||
rm.deleteRequest(reqID)
|
||||
case <-req.Context.Done():
|
||||
// Request was cancelled/timed out - close the message
|
||||
message.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// routeLegacyResponse handles responses that don't have request IDs (backwards compatibility)
|
||||
func (rm *RequestManager) routeLegacyResponse(message *gws.Message) {
|
||||
// Snapshot the oldest pending request without holding the lock during send
|
||||
rm.RLock()
|
||||
var oldestReq *PendingRequest
|
||||
for _, req := range rm.pendingReqs {
|
||||
if oldestReq == nil || req.CreatedAt.Before(oldestReq.CreatedAt) {
|
||||
oldestReq = req
|
||||
}
|
||||
}
|
||||
rm.RUnlock()
|
||||
|
||||
if oldestReq != nil {
|
||||
select {
|
||||
case oldestReq.ResponseCh <- message:
|
||||
// Message successfully delivered - the receiver will close it
|
||||
rm.deleteRequest(oldestReq.ID)
|
||||
case <-oldestReq.Context.Done():
|
||||
// Request was cancelled - close the message
|
||||
message.Close()
|
||||
}
|
||||
} else {
|
||||
// No pending requests - close the message
|
||||
message.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// cleanupRequest handles request timeout and cleanup
|
||||
func (rm *RequestManager) cleanupRequest(req *PendingRequest) {
|
||||
<-req.Context.Done()
|
||||
rm.cancelRequest(req.ID)
|
||||
}
|
||||
|
||||
// cancelRequest removes a request and cancels its context
|
||||
func (rm *RequestManager) cancelRequest(reqID RequestID) {
|
||||
rm.Lock()
|
||||
defer rm.Unlock()
|
||||
|
||||
if req, exists := rm.pendingReqs[reqID]; exists {
|
||||
req.Cancel()
|
||||
delete(rm.pendingReqs, reqID)
|
||||
}
|
||||
}
|
||||
|
||||
// deleteRequest removes a request from the pending map without cancelling its context.
|
||||
func (rm *RequestManager) deleteRequest(reqID RequestID) {
|
||||
rm.Lock()
|
||||
defer rm.Unlock()
|
||||
delete(rm.pendingReqs, reqID)
|
||||
}
|
||||
|
||||
// Close shuts down the request manager
|
||||
func (rm *RequestManager) Close() {
|
||||
rm.Lock()
|
||||
defer rm.Unlock()
|
||||
|
||||
// Cancel all pending requests
|
||||
for _, req := range rm.pendingReqs {
|
||||
req.Cancel()
|
||||
}
|
||||
rm.pendingReqs = make(map[RequestID]*PendingRequest)
|
||||
}
|
||||
81
internal/hub/ws/request_manager_test.go
Normal file
81
internal/hub/ws/request_manager_test.go
Normal file
@@ -0,0 +1,81 @@
|
||||
//go:build testing
|
||||
// +build testing
|
||||
|
||||
package ws
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// TestRequestManager_BasicFunctionality tests the request manager without mocking gws.Conn
|
||||
func TestRequestManager_BasicFunctionality(t *testing.T) {
|
||||
// We'll test the core logic without mocking the connection
|
||||
// since the gws.Conn interface is complex to mock properly
|
||||
|
||||
t.Run("request ID generation", func(t *testing.T) {
|
||||
// Test that request IDs are generated sequentially and uniquely
|
||||
rm := &RequestManager{}
|
||||
|
||||
// Simulate multiple ID generations
|
||||
id1 := rm.nextID.Add(1)
|
||||
id2 := rm.nextID.Add(1)
|
||||
id3 := rm.nextID.Add(1)
|
||||
|
||||
assert.NotEqual(t, id1, id2)
|
||||
assert.NotEqual(t, id2, id3)
|
||||
assert.Greater(t, id2, id1)
|
||||
assert.Greater(t, id3, id2)
|
||||
})
|
||||
|
||||
t.Run("pending request tracking", func(t *testing.T) {
|
||||
rm := &RequestManager{
|
||||
pendingReqs: make(map[RequestID]*PendingRequest),
|
||||
}
|
||||
|
||||
// Initially no pending requests
|
||||
assert.Equal(t, 0, rm.GetPendingCount())
|
||||
|
||||
// Add some fake pending requests
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
req1 := &PendingRequest{
|
||||
ID: RequestID(1),
|
||||
Context: ctx,
|
||||
Cancel: cancel,
|
||||
}
|
||||
req2 := &PendingRequest{
|
||||
ID: RequestID(2),
|
||||
Context: ctx,
|
||||
Cancel: cancel,
|
||||
}
|
||||
|
||||
rm.pendingReqs[req1.ID] = req1
|
||||
rm.pendingReqs[req2.ID] = req2
|
||||
|
||||
assert.Equal(t, 2, rm.GetPendingCount())
|
||||
|
||||
// Remove one
|
||||
delete(rm.pendingReqs, req1.ID)
|
||||
assert.Equal(t, 1, rm.GetPendingCount())
|
||||
|
||||
// Remove all
|
||||
delete(rm.pendingReqs, req2.ID)
|
||||
assert.Equal(t, 0, rm.GetPendingCount())
|
||||
})
|
||||
|
||||
t.Run("context cancellation", func(t *testing.T) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
// Wait for context to timeout
|
||||
<-ctx.Done()
|
||||
|
||||
// Verify context was cancelled
|
||||
assert.Equal(t, context.DeadlineExceeded, ctx.Err())
|
||||
})
|
||||
}
|
||||
@@ -5,13 +5,13 @@ import (
|
||||
"time"
|
||||
"weak"
|
||||
|
||||
"github.com/henrygd/beszel/internal/entities/system"
|
||||
"github.com/blang/semver"
|
||||
"github.com/henrygd/beszel"
|
||||
|
||||
"github.com/henrygd/beszel/internal/common"
|
||||
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
"github.com/lxzan/gws"
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -25,9 +25,10 @@ type Handler struct {
|
||||
|
||||
// WsConn represents a WebSocket connection to an agent.
|
||||
type WsConn struct {
|
||||
conn *gws.Conn
|
||||
responseChan chan *gws.Message
|
||||
DownChan chan struct{}
|
||||
conn *gws.Conn
|
||||
requestManager *RequestManager
|
||||
DownChan chan struct{}
|
||||
agentVersion semver.Version
|
||||
}
|
||||
|
||||
// FingerprintRecord is fingerprints collection record data in the hub
|
||||
@@ -50,21 +51,22 @@ func GetUpgrader() *gws.Upgrader {
|
||||
return upgrader
|
||||
}
|
||||
|
||||
// NewWsConnection creates a new WebSocket connection wrapper.
|
||||
func NewWsConnection(conn *gws.Conn) *WsConn {
|
||||
// NewWsConnection creates a new WebSocket connection wrapper with agent version.
|
||||
func NewWsConnection(conn *gws.Conn, agentVersion semver.Version) *WsConn {
|
||||
return &WsConn{
|
||||
conn: conn,
|
||||
responseChan: make(chan *gws.Message, 1),
|
||||
DownChan: make(chan struct{}, 1),
|
||||
conn: conn,
|
||||
requestManager: NewRequestManager(conn),
|
||||
DownChan: make(chan struct{}, 1),
|
||||
agentVersion: agentVersion,
|
||||
}
|
||||
}
|
||||
|
||||
// OnOpen sets a deadline for the WebSocket connection.
|
||||
// OnOpen sets a deadline for the WebSocket connection and extracts agent version.
|
||||
func (h *Handler) OnOpen(conn *gws.Conn) {
|
||||
conn.SetDeadline(time.Now().Add(deadline))
|
||||
}
|
||||
|
||||
// OnMessage routes incoming WebSocket messages to the response channel.
|
||||
// OnMessage routes incoming WebSocket messages to the request manager.
|
||||
func (h *Handler) OnMessage(conn *gws.Conn, message *gws.Message) {
|
||||
conn.SetDeadline(time.Now().Add(deadline))
|
||||
if message.Opcode != gws.OpcodeBinary || message.Data.Len() == 0 {
|
||||
@@ -75,12 +77,7 @@ func (h *Handler) OnMessage(conn *gws.Conn, message *gws.Message) {
|
||||
_ = conn.WriteClose(1000, nil)
|
||||
return
|
||||
}
|
||||
select {
|
||||
case wsConn.(*WsConn).responseChan <- message:
|
||||
default:
|
||||
// close if the connection is not expecting a response
|
||||
wsConn.(*WsConn).Close(nil)
|
||||
}
|
||||
wsConn.(*WsConn).requestManager.handleResponse(message)
|
||||
}
|
||||
|
||||
// OnClose handles WebSocket connection closures and triggers system down status after delay.
|
||||
@@ -106,6 +103,9 @@ func (ws *WsConn) Close(msg []byte) {
|
||||
if ws.IsConnected() {
|
||||
ws.conn.WriteClose(1000, msg)
|
||||
}
|
||||
if ws.requestManager != nil {
|
||||
ws.requestManager.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// Ping sends a ping frame to keep the connection alive.
|
||||
@@ -115,6 +115,7 @@ func (ws *WsConn) Ping() error {
|
||||
}
|
||||
|
||||
// sendMessage encodes data to CBOR and sends it as a binary message to the agent.
|
||||
// This is kept for backwards compatibility but new actions should use RequestManager.
|
||||
func (ws *WsConn) sendMessage(data common.HubRequest[any]) error {
|
||||
if ws.conn == nil {
|
||||
return gws.ErrConnClosed
|
||||
@@ -126,54 +127,34 @@ func (ws *WsConn) sendMessage(data common.HubRequest[any]) error {
|
||||
return ws.conn.WriteMessage(gws.OpcodeBinary, bytes)
|
||||
}
|
||||
|
||||
// RequestSystemData requests system metrics from the agent and unmarshals the response.
|
||||
func (ws *WsConn) RequestSystemData(data *system.CombinedData) error {
|
||||
var message *gws.Message
|
||||
|
||||
ws.sendMessage(common.HubRequest[any]{
|
||||
Action: common.GetData,
|
||||
})
|
||||
// handleAgentRequest processes a request to the agent, handling both legacy and new formats.
|
||||
func (ws *WsConn) handleAgentRequest(req *PendingRequest, handler ResponseHandler) error {
|
||||
// Wait for response
|
||||
select {
|
||||
case <-time.After(10 * time.Second):
|
||||
ws.Close(nil)
|
||||
return gws.ErrConnClosed
|
||||
case message = <-ws.responseChan:
|
||||
case message := <-req.ResponseCh:
|
||||
defer message.Close()
|
||||
// Cancel request context to stop timeout watcher promptly
|
||||
defer req.Cancel()
|
||||
data := message.Data.Bytes()
|
||||
|
||||
// Legacy format - unmarshal directly
|
||||
if ws.agentVersion.LT(beszel.MinVersionAgentResponse) {
|
||||
return handler.HandleLegacy(data)
|
||||
}
|
||||
|
||||
// New format with AgentResponse wrapper
|
||||
var agentResponse common.AgentResponse
|
||||
if err := cbor.Unmarshal(data, &agentResponse); err != nil {
|
||||
return err
|
||||
}
|
||||
if agentResponse.Error != "" {
|
||||
return errors.New(agentResponse.Error)
|
||||
}
|
||||
return handler.Handle(agentResponse)
|
||||
|
||||
case <-req.Context.Done():
|
||||
return req.Context.Err()
|
||||
}
|
||||
defer message.Close()
|
||||
return cbor.Unmarshal(message.Data.Bytes(), data)
|
||||
}
|
||||
|
||||
// GetFingerprint authenticates with the agent using SSH signature and returns the agent's fingerprint.
|
||||
func (ws *WsConn) GetFingerprint(token string, signer ssh.Signer, needSysInfo bool) (common.FingerprintResponse, error) {
|
||||
var clientFingerprint common.FingerprintResponse
|
||||
challenge := []byte(token)
|
||||
|
||||
signature, err := signer.Sign(nil, challenge)
|
||||
if err != nil {
|
||||
return clientFingerprint, err
|
||||
}
|
||||
|
||||
err = ws.sendMessage(common.HubRequest[any]{
|
||||
Action: common.CheckFingerprint,
|
||||
Data: common.FingerprintRequest{
|
||||
Signature: signature.Blob,
|
||||
NeedSysInfo: needSysInfo,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return clientFingerprint, err
|
||||
}
|
||||
|
||||
var message *gws.Message
|
||||
select {
|
||||
case message = <-ws.responseChan:
|
||||
case <-time.After(10 * time.Second):
|
||||
return clientFingerprint, errors.New("request expired")
|
||||
}
|
||||
defer message.Close()
|
||||
|
||||
err = cbor.Unmarshal(message.Data.Bytes(), &clientFingerprint)
|
||||
return clientFingerprint, err
|
||||
}
|
||||
|
||||
// IsConnected returns true if the WebSocket connection is active.
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/henrygd/beszel/internal/common"
|
||||
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
@@ -36,26 +37,25 @@ func TestGetUpgrader(t *testing.T) {
|
||||
// TestNewWsConnection tests WebSocket connection creation
|
||||
func TestNewWsConnection(t *testing.T) {
|
||||
// We can't easily mock gws.Conn, so we'll pass nil and test the structure
|
||||
wsConn := NewWsConnection(nil)
|
||||
wsConn := NewWsConnection(nil, semver.MustParse("0.12.10"))
|
||||
|
||||
assert.NotNil(t, wsConn, "WebSocket connection should not be nil")
|
||||
assert.Nil(t, wsConn.conn, "Connection should be nil as passed")
|
||||
assert.NotNil(t, wsConn.responseChan, "Response channel should be initialized")
|
||||
assert.NotNil(t, wsConn.requestManager, "Request manager should be initialized")
|
||||
assert.NotNil(t, wsConn.DownChan, "Down channel should be initialized")
|
||||
assert.Equal(t, 1, cap(wsConn.responseChan), "Response channel should have capacity of 1")
|
||||
assert.Equal(t, 1, cap(wsConn.DownChan), "Down channel should have capacity of 1")
|
||||
}
|
||||
|
||||
// TestWsConn_IsConnected tests the connection status check
|
||||
func TestWsConn_IsConnected(t *testing.T) {
|
||||
// Test with nil connection
|
||||
wsConn := NewWsConnection(nil)
|
||||
wsConn := NewWsConnection(nil, semver.MustParse("0.12.10"))
|
||||
assert.False(t, wsConn.IsConnected(), "Should not be connected when conn is nil")
|
||||
}
|
||||
|
||||
// TestWsConn_Close tests the connection closing with nil connection
|
||||
func TestWsConn_Close(t *testing.T) {
|
||||
wsConn := NewWsConnection(nil)
|
||||
wsConn := NewWsConnection(nil, semver.MustParse("0.12.10"))
|
||||
|
||||
// Should handle nil connection gracefully
|
||||
assert.NotPanics(t, func() {
|
||||
@@ -65,7 +65,7 @@ func TestWsConn_Close(t *testing.T) {
|
||||
|
||||
// TestWsConn_SendMessage_CBOR tests CBOR encoding in sendMessage
|
||||
func TestWsConn_SendMessage_CBOR(t *testing.T) {
|
||||
wsConn := NewWsConnection(nil)
|
||||
wsConn := NewWsConnection(nil, semver.MustParse("0.12.10"))
|
||||
|
||||
testData := common.HubRequest[any]{
|
||||
Action: common.GetData,
|
||||
@@ -194,7 +194,7 @@ func TestHandler(t *testing.T) {
|
||||
|
||||
// TestWsConnChannelBehavior tests channel behavior without WebSocket connections
|
||||
func TestWsConnChannelBehavior(t *testing.T) {
|
||||
wsConn := NewWsConnection(nil)
|
||||
wsConn := NewWsConnection(nil, semver.MustParse("0.12.10"))
|
||||
|
||||
// Test that channels are properly initialized and can be used
|
||||
select {
|
||||
@@ -212,11 +212,6 @@ func TestWsConnChannelBehavior(t *testing.T) {
|
||||
t.Error("Should be able to read from DownChan")
|
||||
}
|
||||
|
||||
// Response channel should be empty initially
|
||||
select {
|
||||
case <-wsConn.responseChan:
|
||||
t.Error("Response channel should be empty initially")
|
||||
default:
|
||||
// Expected - channel should be empty
|
||||
}
|
||||
// Request manager should have no pending requests initially
|
||||
assert.Equal(t, 0, wsConn.requestManager.GetPendingCount(), "Should have no pending requests initially")
|
||||
}
|
||||
|
||||
11
internal/hub/ws/ws_test_helpers.go
Normal file
11
internal/hub/ws/ws_test_helpers.go
Normal file
@@ -0,0 +1,11 @@
|
||||
//go:build testing
|
||||
// +build testing
|
||||
|
||||
package ws
|
||||
|
||||
// GetPendingCount returns the number of pending requests (for monitoring)
|
||||
func (rm *RequestManager) GetPendingCount() int {
|
||||
rm.RLock()
|
||||
defer rm.RUnlock()
|
||||
return len(rm.pendingReqs)
|
||||
}
|
||||
@@ -213,6 +213,8 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
||||
sum.LoadAvg[2] += stats.LoadAvg[2]
|
||||
sum.Bandwidth[0] += stats.Bandwidth[0]
|
||||
sum.Bandwidth[1] += stats.Bandwidth[1]
|
||||
sum.DiskIO[0] += stats.DiskIO[0]
|
||||
sum.DiskIO[1] += stats.DiskIO[1]
|
||||
batterySum += int(stats.Battery[0])
|
||||
sum.Battery[1] = stats.Battery[1]
|
||||
// Set peak values
|
||||
@@ -224,6 +226,8 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
||||
sum.MaxDiskWritePs = max(sum.MaxDiskWritePs, stats.MaxDiskWritePs, stats.DiskWritePs)
|
||||
sum.MaxBandwidth[0] = max(sum.MaxBandwidth[0], stats.MaxBandwidth[0], stats.Bandwidth[0])
|
||||
sum.MaxBandwidth[1] = max(sum.MaxBandwidth[1], stats.MaxBandwidth[1], stats.Bandwidth[1])
|
||||
sum.MaxDiskIO[0] = max(sum.MaxDiskIO[0], stats.MaxDiskIO[0], stats.DiskIO[0])
|
||||
sum.MaxDiskIO[1] = max(sum.MaxDiskIO[1], stats.MaxDiskIO[1], stats.DiskIO[1])
|
||||
|
||||
// Accumulate network interfaces
|
||||
if sum.NetworkInterfaces == nil {
|
||||
@@ -314,6 +318,8 @@ func (rm *RecordManager) AverageSystemStats(db dbx.Builder, records RecordIds) *
|
||||
sum.DiskPct = twoDecimals(sum.DiskPct / count)
|
||||
sum.DiskReadPs = twoDecimals(sum.DiskReadPs / count)
|
||||
sum.DiskWritePs = twoDecimals(sum.DiskWritePs / count)
|
||||
sum.DiskIO[0] = sum.DiskIO[0] / uint64(count)
|
||||
sum.DiskIO[1] = sum.DiskIO[1] / uint64(count)
|
||||
sum.NetworkSent = twoDecimals(sum.NetworkSent / count)
|
||||
sum.NetworkRecv = twoDecimals(sum.NetworkRecv / count)
|
||||
sum.LoadAvg[0] = twoDecimals(sum.LoadAvg[0] / count)
|
||||
|
||||
@@ -1,41 +1,83 @@
|
||||
{
|
||||
"$schema": "https://biomejs.dev/schemas/2.2.3/schema.json",
|
||||
"vcs": {
|
||||
"enabled": false,
|
||||
"enabled": true,
|
||||
"clientKind": "git",
|
||||
"useIgnoreFile": false
|
||||
},
|
||||
"files": {
|
||||
"ignoreUnknown": false
|
||||
"useIgnoreFile": true,
|
||||
"defaultBranch": "main"
|
||||
},
|
||||
"formatter": {
|
||||
"enabled": true,
|
||||
"indentStyle": "tab",
|
||||
"indentWidth": 2,
|
||||
"lineWidth": 120
|
||||
"lineWidth": 120,
|
||||
"formatWithErrors": true
|
||||
},
|
||||
"assist": { "actions": { "source": { "organizeImports": "on" } } },
|
||||
"linter": {
|
||||
"enabled": true,
|
||||
"rules": {
|
||||
"recommended": true,
|
||||
"complexity": {
|
||||
"noUselessStringConcat": "error",
|
||||
"noUselessUndefinedInitialization": "error",
|
||||
"noVoid": "error",
|
||||
"useDateNow": "error"
|
||||
},
|
||||
"correctness": {
|
||||
"useUniqueElementIds": "off"
|
||||
"noConstantMathMinMaxClamp": "error",
|
||||
"noUndeclaredVariables": "error",
|
||||
"noUnusedImports": "error",
|
||||
"noUnusedFunctionParameters": "error",
|
||||
"noUnusedPrivateClassMembers": "error",
|
||||
"useExhaustiveDependencies": {
|
||||
"level": "error",
|
||||
"options": {
|
||||
"reportUnnecessaryDependencies": false
|
||||
}
|
||||
},
|
||||
"noUnusedVariables": "error"
|
||||
},
|
||||
"style": {
|
||||
"noParameterProperties": "error",
|
||||
"noYodaExpression": "error",
|
||||
"useConsistentBuiltinInstantiation": "error",
|
||||
"useFragmentSyntax": "error",
|
||||
"useShorthandAssign": "error",
|
||||
"useArrayLiterals": "error"
|
||||
},
|
||||
"suspicious": {
|
||||
"useAwait": "error",
|
||||
"noEvolvingTypes": "error"
|
||||
}
|
||||
}
|
||||
},
|
||||
"javascript": {
|
||||
"formatter": {
|
||||
"quoteStyle": "double",
|
||||
"semicolons": "asNeeded",
|
||||
"trailingCommas": "es5"
|
||||
"trailingCommas": "es5",
|
||||
"semicolons": "asNeeded"
|
||||
}
|
||||
},
|
||||
"assist": {
|
||||
"enabled": true,
|
||||
"actions": {
|
||||
"source": {
|
||||
"organizeImports": "on"
|
||||
"overrides": [
|
||||
{
|
||||
"includes": ["**/*.jsx", "**/*.tsx"],
|
||||
"linter": {
|
||||
"rules": {
|
||||
"style": {
|
||||
"noParameterAssign": "error"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"includes": ["**/*.ts", "**/*.tsx"],
|
||||
"linter": {
|
||||
"rules": {
|
||||
"correctness": {
|
||||
"noUnusedVariables": "off"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -2,12 +2,27 @@ import { useStore } from "@nanostores/react"
|
||||
import { HistoryIcon } from "lucide-react"
|
||||
import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from "@/components/ui/select"
|
||||
import { $chartTime } from "@/lib/stores"
|
||||
import { chartTimeData, cn } from "@/lib/utils"
|
||||
import type { ChartTimes } from "@/types"
|
||||
import { chartTimeData, cn, compareSemVer, parseSemVer } from "@/lib/utils"
|
||||
import type { ChartTimes, SemVer } from "@/types"
|
||||
import { memo } from "react"
|
||||
|
||||
export default function ChartTimeSelect({ className }: { className?: string }) {
|
||||
export default memo(function ChartTimeSelect({
|
||||
className,
|
||||
agentVersion,
|
||||
}: {
|
||||
className?: string
|
||||
agentVersion: SemVer
|
||||
}) {
|
||||
const chartTime = useStore($chartTime)
|
||||
|
||||
// remove chart times that are not supported by the system agent version
|
||||
const availableChartTimes = Object.entries(chartTimeData).filter(([_, { minVersion }]) => {
|
||||
if (!minVersion) {
|
||||
return true
|
||||
}
|
||||
return compareSemVer(agentVersion, parseSemVer(minVersion)) >= 0
|
||||
})
|
||||
|
||||
return (
|
||||
<Select defaultValue="1h" value={chartTime} onValueChange={(value: ChartTimes) => $chartTime.set(value)}>
|
||||
<SelectTrigger className={cn(className, "relative ps-10 pe-5")}>
|
||||
@@ -15,7 +30,7 @@ export default function ChartTimeSelect({ className }: { className?: string }) {
|
||||
<SelectValue />
|
||||
</SelectTrigger>
|
||||
<SelectContent>
|
||||
{Object.entries(chartTimeData).map(([value, { label }]) => (
|
||||
{availableChartTimes.map(([value, { label }]) => (
|
||||
<SelectItem key={value} value={value}>
|
||||
{label()}
|
||||
</SelectItem>
|
||||
@@ -23,4 +38,4 @@ export default function ChartTimeSelect({ className }: { className?: string }) {
|
||||
</SelectContent>
|
||||
</Select>
|
||||
)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -59,8 +59,6 @@ export default memo(function LoadAverageChart({ chartData }: { chartData: ChartD
|
||||
<ChartTooltip
|
||||
animationEasing="ease-out"
|
||||
animationDuration={150}
|
||||
// @ts-expect-error
|
||||
// itemSorter={(a, b) => b.value - a.value}
|
||||
content={
|
||||
<ChartTooltipContent
|
||||
labelFormatter={(_, data) => formatShortDate(data[0].payload.created)}
|
||||
@@ -70,14 +68,15 @@ export default memo(function LoadAverageChart({ chartData }: { chartData: ChartD
|
||||
/>
|
||||
{keys.map(({ legacy, color, label }, i) => {
|
||||
const dataKey = (value: { stats: SystemStats }) => {
|
||||
if (chartData.agentVersion.patch < 1) {
|
||||
const { minor, patch } = chartData.agentVersion
|
||||
if (minor <= 12 && patch < 1) {
|
||||
return value.stats?.[legacy]
|
||||
}
|
||||
return value.stats?.la?.[i] ?? value.stats?.[legacy]
|
||||
}
|
||||
return (
|
||||
<Line
|
||||
key={i}
|
||||
key={label}
|
||||
dataKey={dataKey}
|
||||
name={label}
|
||||
type="monotoneX"
|
||||
|
||||
@@ -14,7 +14,7 @@ import {
|
||||
} from "lucide-react"
|
||||
import { subscribeKeys } from "nanostores"
|
||||
import React, { type JSX, memo, useCallback, useEffect, useMemo, useRef, useState } from "react"
|
||||
import AreaChartDefault from "@/components/charts/area-chart"
|
||||
import AreaChartDefault, { type DataPoint } from "@/components/charts/area-chart"
|
||||
import ContainerChart from "@/components/charts/container-chart"
|
||||
import DiskChart from "@/components/charts/disk-chart"
|
||||
import GpuPowerChart from "@/components/charts/gpu-power-chart"
|
||||
@@ -49,7 +49,16 @@ import {
|
||||
toFixedFloat,
|
||||
useBrowserStorage,
|
||||
} from "@/lib/utils"
|
||||
import type { ChartData, ChartTimes, ContainerStatsRecord, GPUData, SystemRecord, SystemStatsRecord } from "@/types"
|
||||
import type {
|
||||
ChartData,
|
||||
ChartTimes,
|
||||
ContainerStatsRecord,
|
||||
GPUData,
|
||||
SystemInfo,
|
||||
SystemRecord,
|
||||
SystemStats,
|
||||
SystemStatsRecord,
|
||||
} from "@/types"
|
||||
import ChartTimeSelect from "../charts/chart-time-select"
|
||||
import { $router, navigate } from "../router"
|
||||
import Spinner from "../spinner"
|
||||
@@ -95,25 +104,28 @@ function getTimeData(chartTime: ChartTimes, lastCreated: number) {
|
||||
}
|
||||
|
||||
// add empty values between records to make gaps if interval is too large
|
||||
function addEmptyValues<T extends SystemStatsRecord | ContainerStatsRecord>(
|
||||
function addEmptyValues<T extends { created: string | number | null }>(
|
||||
prevRecords: T[],
|
||||
newRecords: T[],
|
||||
expectedInterval: number
|
||||
) {
|
||||
): T[] {
|
||||
const modifiedRecords: T[] = []
|
||||
let prevTime = (prevRecords.at(-1)?.created ?? 0) as number
|
||||
for (let i = 0; i < newRecords.length; i++) {
|
||||
const record = newRecords[i]
|
||||
record.created = new Date(record.created).getTime()
|
||||
if (prevTime) {
|
||||
if (record.created !== null) {
|
||||
record.created = new Date(record.created).getTime()
|
||||
}
|
||||
if (prevTime && record.created !== null) {
|
||||
const interval = record.created - prevTime
|
||||
// if interval is too large, add a null record
|
||||
if (interval > expectedInterval / 2 + expectedInterval) {
|
||||
// @ts-expect-error
|
||||
modifiedRecords.push({ created: null, stats: null })
|
||||
modifiedRecords.push({ created: null, ...("stats" in record ? { stats: null } : {}) } as T)
|
||||
}
|
||||
}
|
||||
prevTime = record.created
|
||||
if (record.created !== null) {
|
||||
prevTime = record.created
|
||||
}
|
||||
modifiedRecords.push(record)
|
||||
}
|
||||
return modifiedRecords
|
||||
@@ -137,7 +149,7 @@ async function getStats<T extends SystemStatsRecord | ContainerStatsRecord>(
|
||||
})
|
||||
}
|
||||
|
||||
function dockerOrPodman(str: string, system: SystemRecord) {
|
||||
function dockerOrPodman(str: string, system: SystemRecord): string {
|
||||
if (system.info.p) {
|
||||
return str.replace("docker", "podman").replace("Docker", "Podman")
|
||||
}
|
||||
@@ -156,10 +168,9 @@ export default memo(function SystemDetail({ name }: { name: string }) {
|
||||
const [containerData, setContainerData] = useState([] as ChartData["containerData"])
|
||||
const netCardRef = useRef<HTMLDivElement>(null)
|
||||
const persistChartTime = useRef(false)
|
||||
const [containerFilterBar, setContainerFilterBar] = useState(null as null | JSX.Element)
|
||||
const [bottomSpacing, setBottomSpacing] = useState(0)
|
||||
const [chartLoading, setChartLoading] = useState(true)
|
||||
const isLongerChart = chartTime !== "1h"
|
||||
const isLongerChart = !["1m", "1h"].includes(chartTime) // true if chart time is not 1m or 1h
|
||||
const userSettings = $userSettings.get()
|
||||
const chartWrapRef = useRef<HTMLDivElement>(null)
|
||||
|
||||
@@ -172,7 +183,6 @@ export default memo(function SystemDetail({ name }: { name: string }) {
|
||||
persistChartTime.current = false
|
||||
setSystemStats([])
|
||||
setContainerData([])
|
||||
setContainerFilterBar(null)
|
||||
$containerFilter.set("")
|
||||
}
|
||||
}, [name])
|
||||
@@ -185,6 +195,51 @@ export default memo(function SystemDetail({ name }: { name: string }) {
|
||||
})
|
||||
}, [name])
|
||||
|
||||
// hide 1m chart time if system agent version is less than 0.13.0
|
||||
useEffect(() => {
|
||||
if (parseSemVer(system?.info?.v) < parseSemVer("0.13.0")) {
|
||||
$chartTime.set("1h")
|
||||
}
|
||||
}, [system?.info?.v])
|
||||
|
||||
// subscribe to realtime metrics if chart time is 1m
|
||||
// biome-ignore lint/correctness/useExhaustiveDependencies: not necessary
|
||||
useEffect(() => {
|
||||
let unsub = () => {}
|
||||
if (!system.id || chartTime !== "1m") {
|
||||
return
|
||||
}
|
||||
if (system.status !== SystemStatus.Up || parseSemVer(system?.info?.v).minor < 13) {
|
||||
$chartTime.set("1h")
|
||||
return
|
||||
}
|
||||
pb.realtime
|
||||
.subscribe(
|
||||
`rt_metrics`,
|
||||
(data: { container: ContainerStatsRecord[]; info: SystemInfo; stats: SystemStats }) => {
|
||||
// console.log("received realtime metrics", data)
|
||||
const newContainerData = makeContainerData([
|
||||
{ created: Date.now(), stats: data.container } as unknown as ContainerStatsRecord,
|
||||
])
|
||||
setContainerData((prevData) => addEmptyValues(prevData, prevData.slice(-59).concat(newContainerData), 1000))
|
||||
setSystemStats((prevStats) =>
|
||||
addEmptyValues(
|
||||
prevStats,
|
||||
prevStats.slice(-59).concat({ created: Date.now(), stats: data.stats } as SystemStatsRecord),
|
||||
1000
|
||||
)
|
||||
)
|
||||
},
|
||||
{ query: { system: system.id } }
|
||||
)
|
||||
.then((us) => {
|
||||
unsub = us
|
||||
})
|
||||
return () => {
|
||||
unsub?.()
|
||||
}
|
||||
}, [chartTime, system.id])
|
||||
|
||||
// biome-ignore lint/correctness/useExhaustiveDependencies: not necessary
|
||||
const chartData: ChartData = useMemo(() => {
|
||||
const lastCreated = Math.max(
|
||||
@@ -221,13 +276,13 @@ export default memo(function SystemDetail({ name }: { name: string }) {
|
||||
}
|
||||
containerData.push(containerStats)
|
||||
}
|
||||
setContainerData(containerData)
|
||||
return containerData
|
||||
}, [])
|
||||
|
||||
// get stats
|
||||
// biome-ignore lint/correctness/useExhaustiveDependencies: not necessary
|
||||
useEffect(() => {
|
||||
if (!system.id || !chartTime) {
|
||||
if (!system.id || !chartTime || chartTime === "1m") {
|
||||
return
|
||||
}
|
||||
// loading: true
|
||||
@@ -261,12 +316,7 @@ export default memo(function SystemDetail({ name }: { name: string }) {
|
||||
}
|
||||
cache.set(cs_cache_key, containerData)
|
||||
}
|
||||
if (containerData.length) {
|
||||
!containerFilterBar && setContainerFilterBar(<FilterBar />)
|
||||
} else if (containerFilterBar) {
|
||||
setContainerFilterBar(null)
|
||||
}
|
||||
makeContainerData(containerData)
|
||||
setContainerData(makeContainerData(containerData))
|
||||
})
|
||||
}, [system, chartTime])
|
||||
|
||||
@@ -392,9 +442,10 @@ export default memo(function SystemDetail({ name }: { name: string }) {
|
||||
|
||||
// select field for switching between avg and max values
|
||||
const maxValSelect = isLongerChart ? <SelectAvgMax max={maxValues} /> : null
|
||||
const showMax = chartTime !== "1h" && maxValues
|
||||
const showMax = maxValues && isLongerChart
|
||||
|
||||
const containerFilterBar = containerData.length ? <FilterBar /> : null
|
||||
|
||||
// if no data, show empty message
|
||||
const dataEmpty = !chartLoading && chartData.systemStats.length === 0
|
||||
const lastGpuVals = Object.values(systemStats.at(-1)?.stats.g ?? {})
|
||||
const hasGpuData = lastGpuVals.length > 0
|
||||
@@ -483,7 +534,7 @@ export default memo(function SystemDetail({ name }: { name: string }) {
|
||||
</div>
|
||||
</div>
|
||||
<div className="xl:ms-auto flex items-center gap-2 max-sm:-mb-1">
|
||||
<ChartTimeSelect className="w-full xl:w-40" />
|
||||
<ChartTimeSelect className="w-full xl:w-40" agentVersion={chartData.agentVersion} />
|
||||
<TooltipProvider delayDuration={100}>
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
@@ -594,23 +645,33 @@ export default memo(function SystemDetail({ name }: { name: string }) {
|
||||
dataPoints={[
|
||||
{
|
||||
label: t({ message: "Write", comment: "Disk write" }),
|
||||
dataKey: ({ stats }: SystemStatsRecord) => (showMax ? stats?.dwm : stats?.dw),
|
||||
dataKey: ({ stats }: SystemStatsRecord) => {
|
||||
if (showMax) {
|
||||
return stats?.dio?.[1] ?? (stats?.dwm ?? 0) * 1024 * 1024
|
||||
}
|
||||
return stats?.dio?.[1] ?? (stats?.dw ?? 0) * 1024 * 1024
|
||||
},
|
||||
color: 3,
|
||||
opacity: 0.3,
|
||||
},
|
||||
{
|
||||
label: t({ message: "Read", comment: "Disk read" }),
|
||||
dataKey: ({ stats }: SystemStatsRecord) => (showMax ? stats?.drm : stats?.dr),
|
||||
dataKey: ({ stats }: SystemStatsRecord) => {
|
||||
if (showMax) {
|
||||
return stats?.diom?.[0] ?? (stats?.drm ?? 0) * 1024 * 1024
|
||||
}
|
||||
return stats?.dio?.[0] ?? (stats?.dr ?? 0) * 1024 * 1024
|
||||
},
|
||||
color: 1,
|
||||
opacity: 0.3,
|
||||
},
|
||||
]}
|
||||
tickFormatter={(val) => {
|
||||
const { value, unit } = formatBytes(val, true, userSettings.unitDisk, true)
|
||||
const { value, unit } = formatBytes(val, true, userSettings.unitDisk, false)
|
||||
return `${toFixedFloat(value, value >= 10 ? 0 : 1)} ${unit}`
|
||||
}}
|
||||
contentFormatter={({ value }) => {
|
||||
const { value: convertedValue, unit } = formatBytes(value, true, userSettings.unitDisk, true)
|
||||
const { value: convertedValue, unit } = formatBytes(value, true, userSettings.unitDisk, false)
|
||||
return `${decimalString(convertedValue, convertedValue >= 100 ? 1 : 2)} ${unit}`
|
||||
}}
|
||||
/>
|
||||
@@ -791,7 +852,7 @@ export default memo(function SystemDetail({ name }: { name: string }) {
|
||||
return (
|
||||
<div key={id} className="contents">
|
||||
<ChartCard
|
||||
className="!col-span-1"
|
||||
className={cn(grid && "!col-span-1")}
|
||||
empty={dataEmpty}
|
||||
grid={grid}
|
||||
title={`${gpu.n} ${t`Usage`}`}
|
||||
@@ -877,24 +938,36 @@ export default memo(function SystemDetail({ name }: { name: string }) {
|
||||
dataPoints={[
|
||||
{
|
||||
label: t`Write`,
|
||||
dataKey: ({ stats }) => stats?.efs?.[extraFsName]?.[showMax ? "wm" : "w"] ?? 0,
|
||||
dataKey: ({ stats }) => {
|
||||
if (showMax) {
|
||||
return stats?.efs?.[extraFsName]?.wb ?? (stats?.efs?.[extraFsName]?.wm ?? 0) * 1024 * 1024
|
||||
}
|
||||
return stats?.efs?.[extraFsName]?.wb ?? (stats?.efs?.[extraFsName]?.w ?? 0) * 1024 * 1024
|
||||
},
|
||||
color: 3,
|
||||
opacity: 0.3,
|
||||
},
|
||||
{
|
||||
label: t`Read`,
|
||||
dataKey: ({ stats }) => stats?.efs?.[extraFsName]?.[showMax ? "rm" : "r"] ?? 0,
|
||||
dataKey: ({ stats }) => {
|
||||
if (showMax) {
|
||||
return (
|
||||
stats?.efs?.[extraFsName]?.rbm ?? (stats?.efs?.[extraFsName]?.rm ?? 0) * 1024 * 1024
|
||||
)
|
||||
}
|
||||
return stats?.efs?.[extraFsName]?.rb ?? (stats?.efs?.[extraFsName]?.r ?? 0) * 1024 * 1024
|
||||
},
|
||||
color: 1,
|
||||
opacity: 0.3,
|
||||
},
|
||||
]}
|
||||
maxToggled={maxValues}
|
||||
tickFormatter={(val) => {
|
||||
const { value, unit } = formatBytes(val, true, userSettings.unitDisk, true)
|
||||
const { value, unit } = formatBytes(val, true, userSettings.unitDisk, false)
|
||||
return `${toFixedFloat(value, value >= 10 ? 0 : 1)} ${unit}`
|
||||
}}
|
||||
contentFormatter={({ value }) => {
|
||||
const { value: convertedValue, unit } = formatBytes(value, true, userSettings.unitDisk, true)
|
||||
const { value: convertedValue, unit } = formatBytes(value, true, userSettings.unitDisk, false)
|
||||
return `${decimalString(convertedValue, convertedValue >= 100 ? 1 : 2)} ${unit}`
|
||||
}}
|
||||
/>
|
||||
@@ -913,7 +986,7 @@ export default memo(function SystemDetail({ name }: { name: string }) {
|
||||
})
|
||||
|
||||
function GpuEnginesChart({ chartData }: { chartData: ChartData }) {
|
||||
const dataPoints = []
|
||||
const dataPoints: DataPoint[] = []
|
||||
const engines = Object.keys(chartData.systemStats?.at(-1)?.stats.g?.[0]?.e ?? {}).sort()
|
||||
for (const engine of engines) {
|
||||
dataPoints.push({
|
||||
|
||||
@@ -53,7 +53,7 @@ export default memo(function NetworkSheet({
|
||||
</SheetTrigger>
|
||||
{hasOpened.current && (
|
||||
<SheetContent aria-describedby={undefined} className="overflow-auto w-200 !max-w-full p-4 sm:p-6">
|
||||
<ChartTimeSelect className="w-[calc(100%-2em)]" />
|
||||
<ChartTimeSelect className="w-[calc(100%-2em)]" agentVersion={chartData.agentVersion} />
|
||||
<ChartCard
|
||||
empty={dataEmpty}
|
||||
grid={grid}
|
||||
|
||||
@@ -131,7 +131,6 @@ export default function SystemsTable() {
|
||||
return [Object.values(upSystems).length, Object.values(downSystems).length, Object.values(pausedSystems).length]
|
||||
}, [upSystems, downSystems, pausedSystems])
|
||||
|
||||
// TODO: hiding temp then gpu messes up table headers
|
||||
const CardHead = useMemo(() => {
|
||||
return (
|
||||
<CardHeader className="pb-4.5 px-2 sm:px-6 max-sm:pt-5 max-sm:pb-1">
|
||||
|
||||
@@ -26,7 +26,7 @@ export const verifyAuth = () => {
|
||||
}
|
||||
|
||||
/** Logs the user out by clearing the auth store and unsubscribing from realtime updates. */
|
||||
export async function logOut() {
|
||||
export function logOut() {
|
||||
$allSystemsByName.set({})
|
||||
$alerts.set({})
|
||||
$userSettings.set({} as UserSettings)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { t } from "@lingui/core/macro"
|
||||
import { type ClassValue, clsx } from "clsx"
|
||||
import { timeDay, timeHour } from "d3-time"
|
||||
import { listenKeys } from "nanostores"
|
||||
import { timeDay, timeHour, timeMinute } from "d3-time"
|
||||
import { useEffect, useState } from "react"
|
||||
import { twMerge } from "tailwind-merge"
|
||||
import { prependBasePath } from "@/components/router"
|
||||
@@ -54,9 +54,18 @@ const createShortDateFormatter = (hour12?: boolean) =>
|
||||
hour12,
|
||||
})
|
||||
|
||||
const createHourWithSecondsFormatter = (hour12?: boolean) =>
|
||||
new Intl.DateTimeFormat(undefined, {
|
||||
hour: "numeric",
|
||||
minute: "numeric",
|
||||
second: "numeric",
|
||||
hour12,
|
||||
})
|
||||
|
||||
// Initialize formatters with default values
|
||||
let hourWithMinutesFormatter = createHourWithMinutesFormatter()
|
||||
let shortDateFormatter = createShortDateFormatter()
|
||||
let hourWithSecondsFormatter = createHourWithSecondsFormatter()
|
||||
|
||||
export const currentHour12 = () => shortDateFormatter.resolvedOptions().hour12
|
||||
|
||||
@@ -68,6 +77,10 @@ export const formatShortDate = (timestamp: string) => {
|
||||
return shortDateFormatter.format(new Date(timestamp))
|
||||
}
|
||||
|
||||
export const hourWithSeconds = (timestamp: string) => {
|
||||
return hourWithSecondsFormatter.format(new Date(timestamp))
|
||||
}
|
||||
|
||||
// Update the time formatters if user changes hourFormat
|
||||
listenKeys($userSettings, ["hourFormat"], ({ hourFormat }) => {
|
||||
if (!hourFormat) return
|
||||
@@ -75,6 +88,7 @@ listenKeys($userSettings, ["hourFormat"], ({ hourFormat }) => {
|
||||
if (currentHour12() !== newHour12) {
|
||||
hourWithMinutesFormatter = createHourWithMinutesFormatter(newHour12)
|
||||
shortDateFormatter = createShortDateFormatter(newHour12)
|
||||
hourWithSecondsFormatter = createHourWithSecondsFormatter(newHour12)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -91,6 +105,15 @@ export const updateFavicon = (newIcon: string) => {
|
||||
}
|
||||
|
||||
export const chartTimeData: ChartTimeData = {
|
||||
"1m": {
|
||||
type: "1m",
|
||||
expectedInterval: 1000,
|
||||
label: () => t`1 minute`,
|
||||
format: (timestamp: string) => hourWithSeconds(timestamp),
|
||||
ticks: 3,
|
||||
getOffset: (endTime: Date) => timeMinute.offset(endTime, -1),
|
||||
minVersion: "0.13.0",
|
||||
},
|
||||
"1h": {
|
||||
type: "1m",
|
||||
expectedInterval: 60_000,
|
||||
@@ -278,7 +301,7 @@ export const generateToken = () => {
|
||||
}
|
||||
|
||||
/** Get the hub URL from the global BESZEL object */
|
||||
export const getHubURL = () => BESZEL?.HUB_URL || window.location.origin
|
||||
export const getHubURL = () => globalThis.BESZEL?.HUB_URL || window.location.origin
|
||||
|
||||
/** Map of system IDs to their corresponding tokens (used to avoid fetching in add-system dialog) */
|
||||
export const tokenMap = new Map<SystemRecord["id"], FingerprintRecord["token"]>()
|
||||
@@ -333,6 +356,17 @@ export const parseSemVer = (semVer = ""): SemVer => {
|
||||
return { major: parts?.[0] ?? 0, minor: parts?.[1] ?? 0, patch: parts?.[2] ?? 0 }
|
||||
}
|
||||
|
||||
/** Compare two semver strings. Returns -1 if a is less than b, 0 if a is equal to b, and 1 if a is greater than b. */
|
||||
export function compareSemVer(a: SemVer, b: SemVer) {
|
||||
if (a.major !== b.major) {
|
||||
return a.major - b.major
|
||||
}
|
||||
if (a.minor !== b.minor) {
|
||||
return a.minor - b.minor
|
||||
}
|
||||
return a.patch - b.patch
|
||||
}
|
||||
|
||||
/** Get meter state from 0-100 value. Used for color coding meters. */
|
||||
export function getMeterState(value: number): MeterState {
|
||||
const { colorWarn = 65, colorCrit = 90 } = $userSettings.get()
|
||||
|
||||
15
internal/site/src/types.d.ts
vendored
15
internal/site/src/types.d.ts
vendored
@@ -123,6 +123,10 @@ export interface SystemStats {
|
||||
drm?: number
|
||||
/** max disk write (mb) */
|
||||
dwm?: number
|
||||
/** disk I/O bytes [read, write] */
|
||||
dio?: [number, number]
|
||||
/** max disk I/O bytes [read, write] */
|
||||
diom?: [number, number]
|
||||
/** network sent (mb) */
|
||||
ns: number
|
||||
/** network received (mb) */
|
||||
@@ -177,6 +181,14 @@ export interface ExtraFsStats {
|
||||
rm: number
|
||||
/** max write (mb) */
|
||||
wm: number
|
||||
/** read per second (bytes) */
|
||||
rb: number
|
||||
/** write per second (bytes) */
|
||||
wb: number
|
||||
/** max read per second (bytes) */
|
||||
rbm: number
|
||||
/** max write per second (mb) */
|
||||
wbm: number
|
||||
}
|
||||
|
||||
export interface ContainerStatsRecord extends RecordModel {
|
||||
@@ -224,7 +236,7 @@ export interface AlertsHistoryRecord extends RecordModel {
|
||||
resolved?: string | null
|
||||
}
|
||||
|
||||
export type ChartTimes = "1h" | "12h" | "24h" | "1w" | "30d"
|
||||
export type ChartTimes = "1m" | "1h" | "12h" | "24h" | "1w" | "30d"
|
||||
|
||||
export interface ChartTimeData {
|
||||
[key: string]: {
|
||||
@@ -234,6 +246,7 @@ export interface ChartTimeData {
|
||||
ticks?: number
|
||||
format: (timestamp: string) => string
|
||||
getOffset: (endTime: Date) => Date
|
||||
minVersion?: string
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user