mirror of
https://github.com/henrygd/beszel.git
synced 2026-04-03 03:21:50 +02:00
Compare commits
231 Commits
56807dc5e4
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6b5e6ffa9a | ||
|
|
d656036d3b | ||
|
|
80b73c7faf | ||
|
|
afe9eb7a70 | ||
|
|
7f565a3086 | ||
|
|
77862d4cb1 | ||
|
|
e158a9001b | ||
|
|
f670e868e4 | ||
|
|
0fff699bf6 | ||
|
|
ba10da1b9f | ||
|
|
7f4f14b505 | ||
|
|
2fda4ff264 | ||
|
|
20b0b40ec8 | ||
|
|
d548a012b4 | ||
|
|
ce5d1217dd | ||
|
|
cef09d7cb1 | ||
|
|
f6440acb43 | ||
|
|
5463a38f0f | ||
|
|
80135fdad3 | ||
|
|
5db4eb4346 | ||
|
|
f6c5e2928a | ||
|
|
6a207c33fa | ||
|
|
9f19afccde | ||
|
|
f25f2469e3 | ||
|
|
5bd43ed461 | ||
|
|
afdc3f7779 | ||
|
|
a227c77526 | ||
|
|
8202d746af | ||
|
|
9840b99327 | ||
|
|
f7b5a505e8 | ||
|
|
3cb32ac046 | ||
|
|
e610d9bfc8 | ||
|
|
b53fdbe0ef | ||
|
|
c7261b56f1 | ||
|
|
3f4c3d51b6 | ||
|
|
ad21cab457 | ||
|
|
f04684b30a | ||
|
|
4d4e4fba9b | ||
|
|
62587919f4 | ||
|
|
35528332fd | ||
|
|
e3e453140e | ||
|
|
7a64da9f65 | ||
|
|
8e71c8ad97 | ||
|
|
97f3b8c61f | ||
|
|
0b0b5d16d7 | ||
|
|
b2fd50211e | ||
|
|
c159eaacd1 | ||
|
|
441bdd2ec5 | ||
|
|
ff36138229 | ||
|
|
be70840609 | ||
|
|
565162ef5f | ||
|
|
adbfe7cfb7 | ||
|
|
1ff7762c80 | ||
|
|
0ab8a606e0 | ||
|
|
e4e0affbc1 | ||
|
|
c3a0e645ee | ||
|
|
c6c3950fb0 | ||
|
|
48ddc96a0d | ||
|
|
704cb86de8 | ||
|
|
2854ce882f | ||
|
|
ed50367f70 | ||
|
|
4ebe869591 | ||
|
|
c9bbbe91f2 | ||
|
|
5bfe4f6970 | ||
|
|
380d2b1091 | ||
|
|
a7f99e7a8c | ||
|
|
bd94a9d142 | ||
|
|
8e2316f845 | ||
|
|
0d3dfcb207 | ||
|
|
b386ce5190 | ||
|
|
e527534016 | ||
|
|
ec7ad632a9 | ||
|
|
963fce5a33 | ||
|
|
d38c0da06d | ||
|
|
cae6ac4626 | ||
|
|
6b1ff264f2 | ||
|
|
35d0e792ad | ||
|
|
654cd06b19 | ||
|
|
5e1b028130 | ||
|
|
638e7dc12a | ||
|
|
73c262455d | ||
|
|
0c4d2edd45 | ||
|
|
8f23fff1c9 | ||
|
|
02c1a0c13d | ||
|
|
69fdcb36ab | ||
|
|
b91eb6de40 | ||
|
|
ec69f6c6e0 | ||
|
|
a86cb91e07 | ||
|
|
004841717a | ||
|
|
096296ba7b | ||
|
|
b012df5669 | ||
|
|
12545b4b6d | ||
|
|
9e2296452b | ||
|
|
ac79860d4a | ||
|
|
e13a99fdac | ||
|
|
4cfb2a86ad | ||
|
|
191f25f6e0 | ||
|
|
aa8b3711d7 | ||
|
|
1fb0b25988 | ||
|
|
04600d83cc | ||
|
|
5d8906c9b2 | ||
|
|
daac287b9d | ||
|
|
d526ea61a9 | ||
|
|
79616e1662 | ||
|
|
01e8bdf040 | ||
|
|
1e3a44e05d | ||
|
|
311095cfdd | ||
|
|
4869c834bb | ||
|
|
e1c1e97f0a | ||
|
|
f6b2824ccc | ||
|
|
f17ffc21b8 | ||
|
|
f792f9b102 | ||
|
|
1def7d8d3a | ||
|
|
ef92b254bf | ||
|
|
10d853c004 | ||
|
|
cdfd116da0 | ||
|
|
283fa9d5c2 | ||
|
|
7d6c0caafc | ||
|
|
04d54a3efc | ||
|
|
14ecb1b069 | ||
|
|
1f1a448aef | ||
|
|
e816ea143a | ||
|
|
2230097dc7 | ||
|
|
25c77c5664 | ||
|
|
dba3519b2c | ||
|
|
48c35aa54d | ||
|
|
6b7845b03e | ||
|
|
221be1da58 | ||
|
|
8347afd68e | ||
|
|
2a3885a52e | ||
|
|
5452e50080 | ||
|
|
028f7bafb2 | ||
|
|
0f6142e27e | ||
|
|
8c37b93a4b | ||
|
|
201d16af05 | ||
|
|
db007176fd | ||
|
|
83fb67132b | ||
|
|
a04837f4d5 | ||
|
|
3d8db53e52 | ||
|
|
5797f8a6ad | ||
|
|
79ca31d770 | ||
|
|
41f3705b6b | ||
|
|
20324763d2 | ||
|
|
70f85f9590 | ||
|
|
c7f7f51c99 | ||
|
|
6723ec8ea4 | ||
|
|
afc19ebd3b | ||
|
|
c83d00ccaa | ||
|
|
425c8d2bdf | ||
|
|
42da1e5a52 | ||
|
|
afcae025ae | ||
|
|
1de36625a4 | ||
|
|
a2b6c7f5e6 | ||
|
|
799c7b077a | ||
|
|
cb5f944de6 | ||
|
|
23c4958145 | ||
|
|
edb2edc12c | ||
|
|
648a979a81 | ||
|
|
988de6de7b | ||
|
|
031abbfcb3 | ||
|
|
b59fcc26e5 | ||
|
|
acaa9381fe | ||
|
|
8d9e9260e6 | ||
|
|
0fc4a6daed | ||
|
|
af0c1d3af7 | ||
|
|
9ad3cd0ab9 | ||
|
|
00def272b0 | ||
|
|
383913505f | ||
|
|
ca8cb78c29 | ||
|
|
8821fb5dd0 | ||
|
|
3279a6ca53 | ||
|
|
6a1a98d73f | ||
|
|
1f067aad5b | ||
|
|
1388711105 | ||
|
|
618e5b4cc1 | ||
|
|
42c3ca5db5 | ||
|
|
534791776b | ||
|
|
0c6c53fc7d | ||
|
|
0dfd5ce07d | ||
|
|
2cd6d46f7c | ||
|
|
c333a9fadd | ||
|
|
ba3d1c66f0 | ||
|
|
7276e533ce | ||
|
|
8b84231042 | ||
|
|
77da744008 | ||
|
|
5da7a21119 | ||
|
|
78d742c712 | ||
|
|
1c97ea3e2c | ||
|
|
3d970defe9 | ||
|
|
6282794004 | ||
|
|
475c53a55d | ||
|
|
4547ff7b5d | ||
|
|
e7b4be3dc5 | ||
|
|
2bd85e04fc | ||
|
|
f6ab5f2af1 | ||
|
|
7d943633a3 | ||
|
|
7fff3c999a | ||
|
|
a9068a11a9 | ||
|
|
d3d102516c | ||
|
|
32131439f9 | ||
|
|
d17685c540 | ||
|
|
e59f8eee36 | ||
|
|
35329abcbd | ||
|
|
ee7741c3ab | ||
|
|
ab0803b2da | ||
|
|
96196a353c | ||
|
|
2a8796c38d | ||
|
|
c8d4f7427d | ||
|
|
8d41a797d3 | ||
|
|
570e1cbf40 | ||
|
|
4c9b00a066 | ||
|
|
7d1f8bb180 | ||
|
|
3a6caeb06e | ||
|
|
9e67245e60 | ||
|
|
b7a95d5d76 | ||
|
|
fe550c5901 | ||
|
|
8aac0a571a | ||
|
|
c506b8b0ad | ||
|
|
a6e84c207e | ||
|
|
249402eaed | ||
|
|
394c476f2a | ||
|
|
86e8a141ea | ||
|
|
53a7e06dcf | ||
|
|
11edabd09f | ||
|
|
41a3d9359f | ||
|
|
5dfc5f247f | ||
|
|
9804c8a31a | ||
|
|
4d05bfdff0 | ||
|
|
0388401a9e | ||
|
|
162c548010 | ||
|
|
888b4a57e5 |
2
.github/CODEOWNERS
vendored
Normal file
2
.github/CODEOWNERS
vendored
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
# Everything needs to be reviewed by Hank
|
||||||
|
* @henrygd
|
||||||
19
.github/DISCUSSION_TEMPLATE/ideas.yml
vendored
Normal file
19
.github/DISCUSSION_TEMPLATE/ideas.yml
vendored
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
body:
|
||||||
|
- type: dropdown
|
||||||
|
id: component
|
||||||
|
attributes:
|
||||||
|
label: Component
|
||||||
|
description: Which part of Beszel is this about?
|
||||||
|
options:
|
||||||
|
- Hub
|
||||||
|
- Agent
|
||||||
|
- Hub & Agent
|
||||||
|
default: 0
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
- type: textarea
|
||||||
|
attributes:
|
||||||
|
label: Description
|
||||||
|
description: Please describe in detail what you want to share.
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
68
.github/DISCUSSION_TEMPLATE/support.yml
vendored
68
.github/DISCUSSION_TEMPLATE/support.yml
vendored
@@ -1,19 +1,54 @@
|
|||||||
body:
|
body:
|
||||||
- type: markdown
|
- type: checkboxes
|
||||||
|
id: terms
|
||||||
attributes:
|
attributes:
|
||||||
value: |
|
label: Welcome!
|
||||||
### Before opening a discussion:
|
description: |
|
||||||
|
Thank you for reaching out to the Beszel community for support! To help us assist you better, please make sure to review the following points before submitting your request:
|
||||||
|
|
||||||
- Check the [common issues guide](https://beszel.dev/guide/common-issues).
|
Please note:
|
||||||
- Search existing [issues](https://github.com/henrygd/beszel/issues) and [discussions](https://github.com/henrygd/beszel/discussions) (including closed).
|
- For translation-related issues or requests, please use the [Crowdin project](https://crowdin.com/project/beszel).
|
||||||
|
**- Please do not submit support reqeusts that are specific to ZFS. We plan to add integration with ZFS utilities in the near future.**
|
||||||
|
|
||||||
|
options:
|
||||||
|
- label: I have read the [Documentation](https://beszel.dev/guide/getting-started)
|
||||||
|
required: true
|
||||||
|
- label: I have checked the [Common Issues Guide](https://beszel.dev/guide/common-issues) and my problem was not mentioned there.
|
||||||
|
required: true
|
||||||
|
- label: I have searched open and closed issues and discussions and my problem was not mentioned before.
|
||||||
|
required: true
|
||||||
|
- label: I have verified I am using the latest version available. You can check the latest release [here](https://github.com/henrygd/beszel/releases).
|
||||||
|
required: true
|
||||||
|
|
||||||
|
- type: dropdown
|
||||||
|
id: component
|
||||||
|
attributes:
|
||||||
|
label: Component
|
||||||
|
description: Which part of Beszel is this about?
|
||||||
|
options:
|
||||||
|
- Hub
|
||||||
|
- Agent
|
||||||
|
- Hub & Agent
|
||||||
|
default: 0
|
||||||
|
validations:
|
||||||
|
required: true
|
||||||
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: description
|
id: description
|
||||||
attributes:
|
attributes:
|
||||||
label: Description
|
label: Problem Description
|
||||||
description: A clear and concise description of the issue or question. If applicable, add screenshots to help explain your problem.
|
description: |
|
||||||
|
How to write a good bug report?
|
||||||
|
|
||||||
|
- Respect the issue template as much as possible.
|
||||||
|
- The title should be short and descriptive.
|
||||||
|
- Explain the conditions which led you to report this issue: the context.
|
||||||
|
- The context should lead to something, a problem that you’re facing.
|
||||||
|
- Remain clear and concise.
|
||||||
|
- Format your messages to help the reader focus on what matters and understand the structure of your message, use [Markdown syntax](https://help.github.com/articles/github-flavored-markdown)
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
|
|
||||||
- type: input
|
- type: input
|
||||||
id: system
|
id: system
|
||||||
attributes:
|
attributes:
|
||||||
@@ -21,13 +56,15 @@ body:
|
|||||||
placeholder: linux/amd64 (agent), freebsd/arm64 (hub)
|
placeholder: linux/amd64 (agent), freebsd/arm64 (hub)
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
- type: input
|
|
||||||
id: version
|
# - type: input
|
||||||
attributes:
|
# id: version
|
||||||
label: Beszel version
|
# attributes:
|
||||||
placeholder: 0.9.1
|
# label: Beszel version
|
||||||
validations:
|
# placeholder: 0.9.1
|
||||||
required: true
|
# validations:
|
||||||
|
# required: true
|
||||||
|
|
||||||
- type: dropdown
|
- type: dropdown
|
||||||
id: install-method
|
id: install-method
|
||||||
attributes:
|
attributes:
|
||||||
@@ -41,18 +78,21 @@ body:
|
|||||||
- Other (please describe above)
|
- Other (please describe above)
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: config
|
id: config
|
||||||
attributes:
|
attributes:
|
||||||
label: Configuration
|
label: Configuration
|
||||||
description: Please provide any relevant service configuration
|
description: Please provide any relevant service configuration
|
||||||
render: yaml
|
render: yaml
|
||||||
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: hub-logs
|
id: hub-logs
|
||||||
attributes:
|
attributes:
|
||||||
label: Hub Logs
|
label: Hub Logs
|
||||||
description: Check the logs page in PocketBase (`/_/#/logs`) for relevant errors (copy JSON).
|
description: Check the logs page in PocketBase (`/_/#/logs`) for relevant errors (copy JSON).
|
||||||
render: json
|
render: json
|
||||||
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: agent-logs
|
id: agent-logs
|
||||||
attributes:
|
attributes:
|
||||||
|
|||||||
103
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
103
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -1,8 +1,30 @@
|
|||||||
name: 🐛 Bug report
|
name: 🐛 Bug report
|
||||||
description: Report a new bug or issue.
|
description: Use this template to report a bug or issue.
|
||||||
title: '[Bug]: '
|
title: '[Bug]: '
|
||||||
labels: ['bug', "needs confirmation"]
|
labels: ['bug']
|
||||||
body:
|
body:
|
||||||
|
- type: checkboxes
|
||||||
|
attributes:
|
||||||
|
label: Welcome!
|
||||||
|
description: |
|
||||||
|
The issue tracker is for reporting bugs and feature requests only. For end-user related support questions, please use the **[GitHub Discussions](https://github.com/henrygd/beszel/discussions/new?category=support)** instead
|
||||||
|
|
||||||
|
Please note:
|
||||||
|
- For translation-related issues or requests, please use the [Crowdin project](https://crowdin.com/project/beszel).
|
||||||
|
- To request a change or feature, use the [feature request form](https://github.com/henrygd/beszel/issues/new?template=feature_request.yml).
|
||||||
|
- Any issues that can be resolved by consulting the documentation or by reviewing existing open or closed issues will be closed.
|
||||||
|
**- Please do not submit bugs that are specific to ZFS. We plan to add integration with ZFS utilities in the near future.**
|
||||||
|
|
||||||
|
options:
|
||||||
|
- label: I have read the [Documentation](https://beszel.dev/guide/getting-started)
|
||||||
|
required: true
|
||||||
|
- label: I have checked the [Common Issues Guide](https://beszel.dev/guide/common-issues) and my problem was not mentioned there.
|
||||||
|
required: true
|
||||||
|
- label: I have searched open and closed issues and my problem was not mentioned before.
|
||||||
|
required: true
|
||||||
|
- label: I have verified I am using the latest version available. You can check the latest release [here](https://github.com/henrygd/beszel/releases).
|
||||||
|
required: true
|
||||||
|
|
||||||
- type: dropdown
|
- type: dropdown
|
||||||
id: component
|
id: component
|
||||||
attributes:
|
attributes:
|
||||||
@@ -12,81 +34,53 @@ body:
|
|||||||
- Hub
|
- Hub
|
||||||
- Agent
|
- Agent
|
||||||
- Hub & Agent
|
- Hub & Agent
|
||||||
|
default: 0
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
- type: markdown
|
|
||||||
attributes:
|
|
||||||
value: |
|
|
||||||
### Thanks for taking the time to fill out this bug report!
|
|
||||||
|
|
||||||
- For more general support, please [start a support thread](https://github.com/henrygd/beszel/discussions/new?category=support).
|
|
||||||
- To request a change or feature, use the [feature request form](https://github.com/henrygd/beszel/issues/new?template=feature_request.yml).
|
|
||||||
- Please do not submit bugs that are specific to ZFS. We plan to add integration with ZFS utilities in the near future.
|
|
||||||
|
|
||||||
### Before submitting a bug report:
|
|
||||||
|
|
||||||
- Check the [common issues guide](https://beszel.dev/guide/common-issues).
|
|
||||||
- Search existing [issues](https://github.com/henrygd/beszel/issues) and [discussions](https://github.com/henrygd/beszel/discussions) (including closed).
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: description
|
id: description
|
||||||
attributes:
|
attributes:
|
||||||
label: Description
|
label: Problem Description
|
||||||
description: Explain the issue you experienced clearly and concisely.
|
description: |
|
||||||
placeholder: I went to the coffee pot and it was empty.
|
How to write a good bug report?
|
||||||
|
|
||||||
|
- Respect the issue template as much as possible.
|
||||||
|
- The title should be short and descriptive.
|
||||||
|
- Explain the conditions which led you to report this issue: the context.
|
||||||
|
- The context should lead to something, a problem that you’re facing.
|
||||||
|
- Remain clear and concise.
|
||||||
|
- Format your messages to help the reader focus on what matters and understand the structure of your message, use [Markdown syntax](https://help.github.com/articles/github-flavored-markdown)
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: expected-behavior
|
id: expected-behavior
|
||||||
attributes:
|
attributes:
|
||||||
label: Expected Behavior
|
label: Expected Behavior
|
||||||
description: In a perfect world, what should have happened?
|
description: |
|
||||||
|
In a perfect world, what should have happened?
|
||||||
|
**Important:** Be specific. Vague descriptions like "it should work" are not helpful.
|
||||||
placeholder: When I got to the coffee pot, it should have been full.
|
placeholder: When I got to the coffee pot, it should have been full.
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: steps-to-reproduce
|
id: steps-to-reproduce
|
||||||
attributes:
|
attributes:
|
||||||
label: Steps to Reproduce
|
label: Steps to Reproduce
|
||||||
description: Describe how to reproduce the issue in repeatable steps.
|
description: |
|
||||||
|
Provide detailed, numbered steps that someone else can follow to reproduce the issue.
|
||||||
|
**Important:** Vague descriptions like "it doesn't work" or "it's broken" will result in the issue being closed.
|
||||||
|
Include specific actions, URLs, button clicks, and any relevant data or configuration.
|
||||||
placeholder: |
|
placeholder: |
|
||||||
1. Go to the coffee pot.
|
1. Go to the coffee pot.
|
||||||
2. Make more coffee.
|
2. Make more coffee.
|
||||||
3. Pour it into a cup.
|
3. Pour it into a cup.
|
||||||
|
4. Observe that the cup is empty instead of full.
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
- type: dropdown
|
|
||||||
id: category
|
|
||||||
attributes:
|
|
||||||
label: Category
|
|
||||||
description: Which category does this relate to most?
|
|
||||||
options:
|
|
||||||
- Metrics
|
|
||||||
- Charts & Visualization
|
|
||||||
- Settings & Configuration
|
|
||||||
- Notifications & Alerts
|
|
||||||
- Authentication
|
|
||||||
- Installation
|
|
||||||
- Performance
|
|
||||||
- UI / UX
|
|
||||||
- Other
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: dropdown
|
|
||||||
id: metrics
|
|
||||||
attributes:
|
|
||||||
label: Affected Metrics
|
|
||||||
description: If applicable, which specific metric does this relate to most?
|
|
||||||
options:
|
|
||||||
- CPU
|
|
||||||
- Memory
|
|
||||||
- Storage
|
|
||||||
- Network
|
|
||||||
- Containers
|
|
||||||
- GPU
|
|
||||||
- Sensors
|
|
||||||
- Other
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: input
|
- type: input
|
||||||
id: system
|
id: system
|
||||||
attributes:
|
attributes:
|
||||||
@@ -94,6 +88,7 @@ body:
|
|||||||
placeholder: linux/amd64 (agent), freebsd/arm64 (hub)
|
placeholder: linux/amd64 (agent), freebsd/arm64 (hub)
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
|
|
||||||
- type: input
|
- type: input
|
||||||
id: version
|
id: version
|
||||||
attributes:
|
attributes:
|
||||||
@@ -101,6 +96,7 @@ body:
|
|||||||
placeholder: 0.9.1
|
placeholder: 0.9.1
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
|
|
||||||
- type: dropdown
|
- type: dropdown
|
||||||
id: install-method
|
id: install-method
|
||||||
attributes:
|
attributes:
|
||||||
@@ -114,18 +110,21 @@ body:
|
|||||||
- Other (please describe above)
|
- Other (please describe above)
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: config
|
id: config
|
||||||
attributes:
|
attributes:
|
||||||
label: Configuration
|
label: Configuration
|
||||||
description: Please provide any relevant service configuration
|
description: Please provide any relevant service configuration
|
||||||
render: yaml
|
render: yaml
|
||||||
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: hub-logs
|
id: hub-logs
|
||||||
attributes:
|
attributes:
|
||||||
label: Hub Logs
|
label: Hub Logs
|
||||||
description: Check the logs page in PocketBase (`/_/#/logs`) for relevant errors (copy JSON).
|
description: Check the logs page in PocketBase (`/_/#/logs`) for relevant errors (copy JSON).
|
||||||
render: json
|
render: json
|
||||||
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: agent-logs
|
id: agent-logs
|
||||||
attributes:
|
attributes:
|
||||||
|
|||||||
3
.github/ISSUE_TEMPLATE/config.yml
vendored
3
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,5 +1,8 @@
|
|||||||
blank_issues_enabled: false
|
blank_issues_enabled: false
|
||||||
contact_links:
|
contact_links:
|
||||||
|
- name: 🗣️ Translations
|
||||||
|
url: https://crowdin.com/project/beszel
|
||||||
|
about: Please report translation issues and request new translations here.
|
||||||
- name: 💬 Support and questions
|
- name: 💬 Support and questions
|
||||||
url: https://github.com/henrygd/beszel/discussions
|
url: https://github.com/henrygd/beszel/discussions
|
||||||
about: Ask and answer questions here.
|
about: Ask and answer questions here.
|
||||||
|
|||||||
81
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
81
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
@@ -1,8 +1,25 @@
|
|||||||
name: 🚀 Feature request
|
name: 🚀 Feature request
|
||||||
description: Request a new feature or change.
|
description: Request a new feature or change.
|
||||||
title: "[Feature]: "
|
title: "[Feature]: "
|
||||||
labels: ["enhancement", "needs review"]
|
labels: ["enhancement"]
|
||||||
body:
|
body:
|
||||||
|
- type: checkboxes
|
||||||
|
attributes:
|
||||||
|
label: Welcome!
|
||||||
|
description: |
|
||||||
|
The issue tracker is for reporting bugs and feature requests only. For end-user related support questions, please use the **[GitHub Discussions](https://github.com/henrygd/beszel/discussions)** instead
|
||||||
|
|
||||||
|
Please note:
|
||||||
|
- For **Bug reports**, use the [Bug Form](https://github.com/henrygd/beszel/issues/new?template=bug_report.yml).
|
||||||
|
- Any requests for new translations should be requested within the [crowdin project](https://crowdin.com/project/beszel).
|
||||||
|
- Create one issue per feature request. This helps us keep track of requests and prioritize them accordingly.
|
||||||
|
|
||||||
|
options:
|
||||||
|
- label: I have searched open and closed feature requests to make sure this or similar feature request does not already exist.
|
||||||
|
required: true
|
||||||
|
- label: This is a feature request, not a bug report or support question.
|
||||||
|
required: true
|
||||||
|
|
||||||
- type: dropdown
|
- type: dropdown
|
||||||
id: component
|
id: component
|
||||||
attributes:
|
attributes:
|
||||||
@@ -12,16 +29,25 @@ body:
|
|||||||
- Hub
|
- Hub
|
||||||
- Agent
|
- Agent
|
||||||
- Hub & Agent
|
- Hub & Agent
|
||||||
|
default: 0
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
- type: markdown
|
|
||||||
attributes:
|
|
||||||
value: Before submitting, please search existing [issues](https://github.com/henrygd/beszel/issues) and [discussions](https://github.com/henrygd/beszel/discussions) (including closed).
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
|
id: description
|
||||||
attributes:
|
attributes:
|
||||||
label: Describe the feature you would like to see
|
label: Description
|
||||||
|
description: |
|
||||||
|
Describe the solution or feature you'd like. Explain what problem this solves or what value it adds.
|
||||||
|
**Important:** Be specific and detailed. Vague requests like "make it better" will be closed.
|
||||||
|
placeholder: |
|
||||||
|
Example:
|
||||||
|
- What is the feature?
|
||||||
|
- What problem does it solve?
|
||||||
|
- How should it work?
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
|
|
||||||
- type: textarea
|
- type: textarea
|
||||||
id: motivation
|
id: motivation
|
||||||
attributes:
|
attributes:
|
||||||
@@ -29,48 +55,3 @@ body:
|
|||||||
description: Why do you want this feature? What problem does it solve?
|
description: Why do you want this feature? What problem does it solve?
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
- type: textarea
|
|
||||||
attributes:
|
|
||||||
label: Describe how you would like to see this feature implemented
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
id: logs
|
|
||||||
attributes:
|
|
||||||
label: Screenshots
|
|
||||||
description: Please attach any relevant screenshots, such as images from your current solution or similar implementations.
|
|
||||||
validations:
|
|
||||||
required: false
|
|
||||||
- type: dropdown
|
|
||||||
id: category
|
|
||||||
attributes:
|
|
||||||
label: Category
|
|
||||||
description: Which category does this relate to most?
|
|
||||||
options:
|
|
||||||
- Metrics
|
|
||||||
- Charts & Visualization
|
|
||||||
- Settings & Configuration
|
|
||||||
- Notifications & Alerts
|
|
||||||
- Authentication
|
|
||||||
- Installation
|
|
||||||
- Performance
|
|
||||||
- UI / UX
|
|
||||||
- Other
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: dropdown
|
|
||||||
id: metrics
|
|
||||||
attributes:
|
|
||||||
label: Affected Metrics
|
|
||||||
description: If applicable, which specific metric does this relate to most?
|
|
||||||
options:
|
|
||||||
- CPU
|
|
||||||
- Memory
|
|
||||||
- Storage
|
|
||||||
- Network
|
|
||||||
- Containers
|
|
||||||
- GPU
|
|
||||||
- Sensors
|
|
||||||
- Other
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
8
.github/workflows/inactivity-actions.yml
vendored
8
.github/workflows/inactivity-actions.yml
vendored
@@ -6,6 +6,7 @@ on:
|
|||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
|
actions: write
|
||||||
issues: write
|
issues: write
|
||||||
pull-requests: write
|
pull-requests: write
|
||||||
|
|
||||||
@@ -48,11 +49,16 @@ jobs:
|
|||||||
# Action can not skip PRs, set it to 100 years to cover it.
|
# Action can not skip PRs, set it to 100 years to cover it.
|
||||||
days-before-pr-stale: 36524
|
days-before-pr-stale: 36524
|
||||||
|
|
||||||
|
# Max issues to process before early exit. Next run resumes from cache. GH API limit: 5000.
|
||||||
|
operations-per-run: 1500
|
||||||
|
|
||||||
# Labels
|
# Labels
|
||||||
stale-issue-label: 'stale'
|
stale-issue-label: 'stale'
|
||||||
remove-stale-when-updated: true
|
remove-stale-when-updated: true
|
||||||
only-issue-labels: 'awaiting-requester'
|
any-of-labels: 'awaiting-requester'
|
||||||
|
exempt-issue-labels: 'enhancement'
|
||||||
|
|
||||||
# Exemptions
|
# Exemptions
|
||||||
exempt-assignees: true
|
exempt-assignees: true
|
||||||
|
|
||||||
exempt-milestones: true
|
exempt-milestones: true
|
||||||
82
.github/workflows/label-from-dropdown.yml
vendored
82
.github/workflows/label-from-dropdown.yml
vendored
@@ -1,82 +0,0 @@
|
|||||||
name: Label issues from dropdowns
|
|
||||||
|
|
||||||
on:
|
|
||||||
issues:
|
|
||||||
types: [opened]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
label_from_dropdown:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
permissions:
|
|
||||||
issues: write
|
|
||||||
steps:
|
|
||||||
- name: Apply labels based on dropdown choices
|
|
||||||
uses: actions/github-script@v7
|
|
||||||
with:
|
|
||||||
script: |
|
|
||||||
|
|
||||||
const issueNumber = context.issue.number;
|
|
||||||
const owner = context.repo.owner;
|
|
||||||
const repo = context.repo.repo;
|
|
||||||
|
|
||||||
// Get the issue body
|
|
||||||
const body = context.payload.issue.body;
|
|
||||||
|
|
||||||
// Helper to find dropdown value in the body (assuming markdown format)
|
|
||||||
function extractSectionValue(heading) {
|
|
||||||
const regex = new RegExp(`### ${heading}\\s+([\\s\\S]*?)(?:\\n###|$)`, 'i');
|
|
||||||
const match = body.match(regex);
|
|
||||||
if (match) {
|
|
||||||
// Get the first non-empty line after the heading
|
|
||||||
const lines = match[1].split('\n').map(l => l.trim()).filter(Boolean);
|
|
||||||
return lines[0] || null;
|
|
||||||
}
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extract dropdown selections
|
|
||||||
const category = extractSectionValue('Category');
|
|
||||||
const metrics = extractSectionValue('Affected Metrics');
|
|
||||||
const component = extractSectionValue('Component');
|
|
||||||
|
|
||||||
// Build labels to add
|
|
||||||
let labelsToAdd = [];
|
|
||||||
if (category) labelsToAdd.push(category);
|
|
||||||
if (metrics) labelsToAdd.push(metrics);
|
|
||||||
if (component) labelsToAdd.push(component);
|
|
||||||
|
|
||||||
// Get existing labels in the repo
|
|
||||||
const { data: existingLabels } = await github.rest.issues.listLabelsForRepo({
|
|
||||||
owner,
|
|
||||||
repo,
|
|
||||||
per_page: 100
|
|
||||||
});
|
|
||||||
const existingLabelNames = existingLabels.map(l => l.name);
|
|
||||||
|
|
||||||
// Find labels that need to be created
|
|
||||||
const labelsToCreate = labelsToAdd.filter(label => !existingLabelNames.includes(label));
|
|
||||||
|
|
||||||
// Create missing labels (with a default color)
|
|
||||||
for (const label of labelsToCreate) {
|
|
||||||
try {
|
|
||||||
await github.rest.issues.createLabel({
|
|
||||||
owner,
|
|
||||||
repo,
|
|
||||||
name: label,
|
|
||||||
color: 'ededed' // light gray, you can pick any hex color
|
|
||||||
});
|
|
||||||
} catch (e) {
|
|
||||||
// Ignore if label already exists (race condition), otherwise rethrow
|
|
||||||
if (!e || e.status !== 422) throw e;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now apply all labels (they all exist now)
|
|
||||||
if (labelsToAdd.length > 0) {
|
|
||||||
await github.rest.issues.addLabels({
|
|
||||||
owner,
|
|
||||||
repo,
|
|
||||||
issue_number: issueNumber,
|
|
||||||
labels: labelsToAdd
|
|
||||||
});
|
|
||||||
}
|
|
||||||
6
.github/workflows/vulncheck.yml
vendored
6
.github/workflows/vulncheck.yml
vendored
@@ -19,11 +19,11 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Check out code into the Go module directory
|
- name: Check out code into the Go module directory
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v6
|
||||||
with:
|
with:
|
||||||
go-version: 1.25.x
|
go-version: 1.26.x
|
||||||
# cached: false
|
# cached: false
|
||||||
- name: Get official govulncheck
|
- name: Get official govulncheck
|
||||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||||
|
|||||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -10,6 +10,7 @@ dist
|
|||||||
*.exe
|
*.exe
|
||||||
internal/cmd/hub/hub
|
internal/cmd/hub/hub
|
||||||
internal/cmd/agent/agent
|
internal/cmd/agent/agent
|
||||||
|
agent.test
|
||||||
node_modules
|
node_modules
|
||||||
build
|
build
|
||||||
*timestamp*
|
*timestamp*
|
||||||
|
|||||||
@@ -16,10 +16,21 @@ builds:
|
|||||||
goos:
|
goos:
|
||||||
- linux
|
- linux
|
||||||
- darwin
|
- darwin
|
||||||
|
- windows
|
||||||
|
- freebsd
|
||||||
goarch:
|
goarch:
|
||||||
- amd64
|
- amd64
|
||||||
- arm64
|
- arm64
|
||||||
- arm
|
- arm
|
||||||
|
ignore:
|
||||||
|
- goos: windows
|
||||||
|
goarch: arm64
|
||||||
|
- goos: windows
|
||||||
|
goarch: arm
|
||||||
|
- goos: freebsd
|
||||||
|
goarch: arm64
|
||||||
|
- goos: freebsd
|
||||||
|
goarch: arm
|
||||||
|
|
||||||
- id: beszel-agent
|
- id: beszel-agent
|
||||||
binary: beszel-agent
|
binary: beszel-agent
|
||||||
@@ -65,6 +76,18 @@ builds:
|
|||||||
- goos: windows
|
- goos: windows
|
||||||
goarch: riscv64
|
goarch: riscv64
|
||||||
|
|
||||||
|
- id: beszel-agent-linux-amd64-glibc
|
||||||
|
binary: beszel-agent
|
||||||
|
main: internal/cmd/agent/agent.go
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
flags:
|
||||||
|
- -tags=glibc
|
||||||
|
goos:
|
||||||
|
- linux
|
||||||
|
goarch:
|
||||||
|
- amd64
|
||||||
|
|
||||||
archives:
|
archives:
|
||||||
- id: beszel-agent
|
- id: beszel-agent
|
||||||
formats: [tar.gz]
|
formats: [tar.gz]
|
||||||
@@ -78,6 +101,15 @@ archives:
|
|||||||
- goos: windows
|
- goos: windows
|
||||||
formats: [zip]
|
formats: [zip]
|
||||||
|
|
||||||
|
- id: beszel-agent-linux-amd64-glibc
|
||||||
|
formats: [tar.gz]
|
||||||
|
ids:
|
||||||
|
- beszel-agent-linux-amd64-glibc
|
||||||
|
name_template: >-
|
||||||
|
{{ .Binary }}_
|
||||||
|
{{- .Os }}_
|
||||||
|
{{- .Arch }}_glibc
|
||||||
|
|
||||||
- id: beszel
|
- id: beszel
|
||||||
formats: [tar.gz]
|
formats: [tar.gz]
|
||||||
ids:
|
ids:
|
||||||
@@ -86,6 +118,9 @@ archives:
|
|||||||
{{ .Binary }}_
|
{{ .Binary }}_
|
||||||
{{- .Os }}_
|
{{- .Os }}_
|
||||||
{{- .Arch }}
|
{{- .Arch }}
|
||||||
|
format_overrides:
|
||||||
|
- goos: windows
|
||||||
|
formats: [zip]
|
||||||
|
|
||||||
nfpms:
|
nfpms:
|
||||||
- id: beszel-agent
|
- id: beszel-agent
|
||||||
@@ -123,9 +158,7 @@ nfpms:
|
|||||||
- debconf
|
- debconf
|
||||||
scripts:
|
scripts:
|
||||||
templates: ./supplemental/debian/templates
|
templates: ./supplemental/debian/templates
|
||||||
# Currently broken due to a bug in goreleaser
|
config: ./supplemental/debian/config.sh
|
||||||
# https://github.com/goreleaser/goreleaser/issues/5487
|
|
||||||
#config: ./supplemental/debian/config.sh
|
|
||||||
|
|
||||||
scoops:
|
scoops:
|
||||||
- ids: [beszel-agent]
|
- ids: [beszel-agent]
|
||||||
|
|||||||
41
Makefile
41
Makefile
@@ -3,6 +3,40 @@ OS ?= $(shell go env GOOS)
|
|||||||
ARCH ?= $(shell go env GOARCH)
|
ARCH ?= $(shell go env GOARCH)
|
||||||
# Skip building the web UI if true
|
# Skip building the web UI if true
|
||||||
SKIP_WEB ?= false
|
SKIP_WEB ?= false
|
||||||
|
# Controls NVML/glibc agent build tag behavior:
|
||||||
|
# - auto (default): enable on linux/amd64 glibc hosts
|
||||||
|
# - true: always enable
|
||||||
|
# - false: always disable
|
||||||
|
NVML ?= auto
|
||||||
|
|
||||||
|
# Detect glibc host for local linux/amd64 builds.
|
||||||
|
HOST_GLIBC := $(shell \
|
||||||
|
if [ "$(OS)" = "linux" ] && [ "$(ARCH)" = "amd64" ]; then \
|
||||||
|
for p in /lib64/ld-linux-x86-64.so.2 /lib/x86_64-linux-gnu/ld-linux-x86-64.so.2 /lib/ld-linux-x86-64.so.2; do \
|
||||||
|
[ -e "$$p" ] && { echo true; exit 0; }; \
|
||||||
|
done; \
|
||||||
|
if command -v ldd >/dev/null 2>&1; then \
|
||||||
|
if ldd --version 2>&1 | tr '[:upper:]' '[:lower:]' | awk '/gnu libc|glibc/{found=1} END{exit !found}'; then \
|
||||||
|
echo true; \
|
||||||
|
else \
|
||||||
|
echo false; \
|
||||||
|
fi; \
|
||||||
|
else \
|
||||||
|
echo false; \
|
||||||
|
fi; \
|
||||||
|
else \
|
||||||
|
echo false; \
|
||||||
|
fi)
|
||||||
|
|
||||||
|
# Enable glibc build tag for NVML on supported Linux builds.
|
||||||
|
AGENT_GO_TAGS :=
|
||||||
|
ifeq ($(NVML),true)
|
||||||
|
AGENT_GO_TAGS := -tags glibc
|
||||||
|
else ifeq ($(NVML),auto)
|
||||||
|
ifeq ($(HOST_GLIBC),true)
|
||||||
|
AGENT_GO_TAGS := -tags glibc
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
|
||||||
# Set executable extension based on target OS
|
# Set executable extension based on target OS
|
||||||
EXE_EXT := $(if $(filter windows,$(OS)),.exe,)
|
EXE_EXT := $(if $(filter windows,$(OS)),.exe,)
|
||||||
@@ -17,7 +51,6 @@ clean:
|
|||||||
lint:
|
lint:
|
||||||
golangci-lint run
|
golangci-lint run
|
||||||
|
|
||||||
test: export GOEXPERIMENT=synctest
|
|
||||||
test:
|
test:
|
||||||
go test -tags=testing ./...
|
go test -tags=testing ./...
|
||||||
|
|
||||||
@@ -54,7 +87,7 @@ fetch-smartctl-conditional:
|
|||||||
|
|
||||||
# Update build-agent to include conditional .NET build
|
# Update build-agent to include conditional .NET build
|
||||||
build-agent: tidy build-dotnet-conditional fetch-smartctl-conditional
|
build-agent: tidy build-dotnet-conditional fetch-smartctl-conditional
|
||||||
GOOS=$(OS) GOARCH=$(ARCH) go build -o ./build/beszel-agent_$(OS)_$(ARCH)$(EXE_EXT) -ldflags "-w -s" ./internal/cmd/agent
|
GOOS=$(OS) GOARCH=$(ARCH) go build $(AGENT_GO_TAGS) -o ./build/beszel-agent_$(OS)_$(ARCH)$(EXE_EXT) -ldflags "-w -s" ./internal/cmd/agent
|
||||||
|
|
||||||
build-hub: tidy $(if $(filter false,$(SKIP_WEB)),build-web-ui)
|
build-hub: tidy $(if $(filter false,$(SKIP_WEB)),build-web-ui)
|
||||||
GOOS=$(OS) GOARCH=$(ARCH) go build -o ./build/beszel_$(OS)_$(ARCH)$(EXE_EXT) -ldflags "-w -s" ./internal/cmd/hub
|
GOOS=$(OS) GOARCH=$(ARCH) go build -o ./build/beszel_$(OS)_$(ARCH)$(EXE_EXT) -ldflags "-w -s" ./internal/cmd/hub
|
||||||
@@ -90,9 +123,9 @@ dev-hub:
|
|||||||
|
|
||||||
dev-agent:
|
dev-agent:
|
||||||
@if command -v entr >/dev/null 2>&1; then \
|
@if command -v entr >/dev/null 2>&1; then \
|
||||||
find ./internal/cmd/agent/*.go ./agent/*.go | entr -r go run github.com/henrygd/beszel/internal/cmd/agent; \
|
find ./internal/cmd/agent/*.go ./agent/*.go | entr -r go run $(AGENT_GO_TAGS) github.com/henrygd/beszel/internal/cmd/agent; \
|
||||||
else \
|
else \
|
||||||
go run github.com/henrygd/beszel/internal/cmd/agent; \
|
go run $(AGENT_GO_TAGS) github.com/henrygd/beszel/internal/cmd/agent; \
|
||||||
fi
|
fi
|
||||||
|
|
||||||
build-dotnet:
|
build-dotnet:
|
||||||
|
|||||||
102
agent/agent.go
102
agent/agent.go
@@ -5,22 +5,22 @@
|
|||||||
package agent
|
package agent
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/hex"
|
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/gliderlabs/ssh"
|
"github.com/gliderlabs/ssh"
|
||||||
"github.com/henrygd/beszel"
|
"github.com/henrygd/beszel"
|
||||||
"github.com/henrygd/beszel/agent/deltatracker"
|
"github.com/henrygd/beszel/agent/deltatracker"
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
|
"github.com/henrygd/beszel/internal/common"
|
||||||
"github.com/henrygd/beszel/internal/entities/system"
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
"github.com/shirou/gopsutil/v4/host"
|
|
||||||
gossh "golang.org/x/crypto/ssh"
|
gossh "golang.org/x/crypto/ssh"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const defaultDataCacheTimeMs uint16 = 60_000
|
||||||
|
|
||||||
type Agent struct {
|
type Agent struct {
|
||||||
sync.Mutex // Used to lock agent while collecting data
|
sync.Mutex // Used to lock agent while collecting data
|
||||||
debug bool // true if LOG_LEVEL is set to debug
|
debug bool // true if LOG_LEVEL is set to debug
|
||||||
@@ -29,12 +29,16 @@ type Agent struct {
|
|||||||
fsNames []string // List of filesystem device names being monitored
|
fsNames []string // List of filesystem device names being monitored
|
||||||
fsStats map[string]*system.FsStats // Keeps track of disk stats for each filesystem
|
fsStats map[string]*system.FsStats // Keeps track of disk stats for each filesystem
|
||||||
diskPrev map[uint16]map[string]prevDisk // Previous disk I/O counters per cache interval
|
diskPrev map[uint16]map[string]prevDisk // Previous disk I/O counters per cache interval
|
||||||
|
diskUsageCacheDuration time.Duration // How long to cache disk usage (to avoid waking sleeping disks)
|
||||||
|
lastDiskUsageUpdate time.Time // Last time disk usage was collected
|
||||||
netInterfaces map[string]struct{} // Stores all valid network interfaces
|
netInterfaces map[string]struct{} // Stores all valid network interfaces
|
||||||
netIoStats map[uint16]system.NetIoStats // Keeps track of bandwidth usage per cache interval
|
netIoStats map[uint16]system.NetIoStats // Keeps track of bandwidth usage per cache interval
|
||||||
netInterfaceDeltaTrackers map[uint16]*deltatracker.DeltaTracker[string, uint64] // Per-cache-time NIC delta trackers
|
netInterfaceDeltaTrackers map[uint16]*deltatracker.DeltaTracker[string, uint64] // Per-cache-time NIC delta trackers
|
||||||
dockerManager *dockerManager // Manages Docker API requests
|
dockerManager *dockerManager // Manages Docker API requests
|
||||||
sensorConfig *SensorConfig // Sensors config
|
sensorConfig *SensorConfig // Sensors config
|
||||||
systemInfo system.Info // Host system info
|
systemInfo system.Info // Host system info (dynamic)
|
||||||
|
systemDetails system.Details // Host system details (static, once-per-connection)
|
||||||
|
detailsDirty bool // Whether system details have changed and need to be resent
|
||||||
gpuManager *GPUManager // Manages GPU data
|
gpuManager *GPUManager // Manages GPU data
|
||||||
cache *systemDataCache // Cache for system stats based on cache time
|
cache *systemDataCache // Cache for system stats based on cache time
|
||||||
connectionManager *ConnectionManager // Channel to signal connection events
|
connectionManager *ConnectionManager // Channel to signal connection events
|
||||||
@@ -60,17 +64,28 @@ func NewAgent(dataDir ...string) (agent *Agent, err error) {
|
|||||||
agent.netIoStats = make(map[uint16]system.NetIoStats)
|
agent.netIoStats = make(map[uint16]system.NetIoStats)
|
||||||
agent.netInterfaceDeltaTrackers = make(map[uint16]*deltatracker.DeltaTracker[string, uint64])
|
agent.netInterfaceDeltaTrackers = make(map[uint16]*deltatracker.DeltaTracker[string, uint64])
|
||||||
|
|
||||||
agent.dataDir, err = getDataDir(dataDir...)
|
agent.dataDir, err = GetDataDir(dataDir...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Warn("Data directory not found")
|
slog.Warn("Data directory not found")
|
||||||
} else {
|
} else {
|
||||||
slog.Info("Data directory", "path", agent.dataDir)
|
slog.Info("Data directory", "path", agent.dataDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
agent.memCalc, _ = GetEnv("MEM_CALC")
|
agent.memCalc, _ = utils.GetEnv("MEM_CALC")
|
||||||
agent.sensorConfig = agent.newSensorConfig()
|
agent.sensorConfig = agent.newSensorConfig()
|
||||||
|
|
||||||
|
// Parse disk usage cache duration (e.g., "15m", "1h") to avoid waking sleeping disks
|
||||||
|
if diskUsageCache, exists := utils.GetEnv("DISK_USAGE_CACHE"); exists {
|
||||||
|
if duration, err := time.ParseDuration(diskUsageCache); err == nil {
|
||||||
|
agent.diskUsageCacheDuration = duration
|
||||||
|
slog.Info("DISK_USAGE_CACHE", "duration", duration)
|
||||||
|
} else {
|
||||||
|
slog.Warn("Invalid DISK_USAGE_CACHE", "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Set up slog with a log level determined by the LOG_LEVEL env var
|
// Set up slog with a log level determined by the LOG_LEVEL env var
|
||||||
if logLevelStr, exists := GetEnv("LOG_LEVEL"); exists {
|
if logLevelStr, exists := utils.GetEnv("LOG_LEVEL"); exists {
|
||||||
switch strings.ToLower(logLevelStr) {
|
switch strings.ToLower(logLevelStr) {
|
||||||
case "debug":
|
case "debug":
|
||||||
agent.debug = true
|
agent.debug = true
|
||||||
@@ -84,8 +99,21 @@ func NewAgent(dataDir ...string) (agent *Agent, err error) {
|
|||||||
|
|
||||||
slog.Debug(beszel.Version)
|
slog.Debug(beszel.Version)
|
||||||
|
|
||||||
|
// initialize docker manager
|
||||||
|
agent.dockerManager = newDockerManager(agent)
|
||||||
|
|
||||||
// initialize system info
|
// initialize system info
|
||||||
agent.initializeSystemInfo()
|
agent.refreshSystemDetails()
|
||||||
|
|
||||||
|
// SMART_INTERVAL env var to update smart data at this interval
|
||||||
|
if smartIntervalEnv, exists := utils.GetEnv("SMART_INTERVAL"); exists {
|
||||||
|
if duration, err := time.ParseDuration(smartIntervalEnv); err == nil && duration > 0 {
|
||||||
|
agent.systemDetails.SmartInterval = duration
|
||||||
|
slog.Info("SMART_INTERVAL", "duration", duration)
|
||||||
|
} else {
|
||||||
|
slog.Warn("Invalid SMART_INTERVAL", "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// initialize connection manager
|
// initialize connection manager
|
||||||
agent.connectionManager = newConnectionManager(agent)
|
agent.connectionManager = newConnectionManager(agent)
|
||||||
@@ -99,9 +127,6 @@ func NewAgent(dataDir ...string) (agent *Agent, err error) {
|
|||||||
// initialize net io stats
|
// initialize net io stats
|
||||||
agent.initializeNetIoStats()
|
agent.initializeNetIoStats()
|
||||||
|
|
||||||
// initialize docker manager
|
|
||||||
agent.dockerManager = newDockerManager(agent)
|
|
||||||
|
|
||||||
agent.systemdManager, err = newSystemdManager()
|
agent.systemdManager, err = newSystemdManager()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Debug("Systemd", "err", err)
|
slog.Debug("Systemd", "err", err)
|
||||||
@@ -120,25 +145,17 @@ func NewAgent(dataDir ...string) (agent *Agent, err error) {
|
|||||||
|
|
||||||
// if debugging, print stats
|
// if debugging, print stats
|
||||||
if agent.debug {
|
if agent.debug {
|
||||||
slog.Debug("Stats", "data", agent.gatherStats(0))
|
slog.Debug("Stats", "data", agent.gatherStats(common.DataRequestOptions{CacheTimeMs: defaultDataCacheTimeMs, IncludeDetails: true}))
|
||||||
}
|
}
|
||||||
|
|
||||||
return agent, nil
|
return agent, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetEnv retrieves an environment variable with a "BESZEL_AGENT_" prefix, or falls back to the unprefixed key.
|
func (a *Agent) gatherStats(options common.DataRequestOptions) *system.CombinedData {
|
||||||
func GetEnv(key string) (value string, exists bool) {
|
|
||||||
if value, exists = os.LookupEnv("BESZEL_AGENT_" + key); exists {
|
|
||||||
return value, exists
|
|
||||||
}
|
|
||||||
// Fallback to the old unprefixed key
|
|
||||||
return os.LookupEnv(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Agent) gatherStats(cacheTimeMs uint16) *system.CombinedData {
|
|
||||||
a.Lock()
|
a.Lock()
|
||||||
defer a.Unlock()
|
defer a.Unlock()
|
||||||
|
|
||||||
|
cacheTimeMs := options.CacheTimeMs
|
||||||
data, isCached := a.cache.Get(cacheTimeMs)
|
data, isCached := a.cache.Get(cacheTimeMs)
|
||||||
if isCached {
|
if isCached {
|
||||||
slog.Debug("Cached data", "cacheTimeMs", cacheTimeMs)
|
slog.Debug("Cached data", "cacheTimeMs", cacheTimeMs)
|
||||||
@@ -149,6 +166,7 @@ func (a *Agent) gatherStats(cacheTimeMs uint16) *system.CombinedData {
|
|||||||
Stats: a.getSystemStats(cacheTimeMs),
|
Stats: a.getSystemStats(cacheTimeMs),
|
||||||
Info: a.systemInfo,
|
Info: a.systemInfo,
|
||||||
}
|
}
|
||||||
|
|
||||||
// slog.Info("System data", "data", data, "cacheTimeMs", cacheTimeMs)
|
// slog.Info("System data", "data", data, "cacheTimeMs", cacheTimeMs)
|
||||||
|
|
||||||
if a.dockerManager != nil {
|
if a.dockerManager != nil {
|
||||||
@@ -161,7 +179,7 @@ func (a *Agent) gatherStats(cacheTimeMs uint16) *system.CombinedData {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// skip updating systemd services if cache time is not the default 60sec interval
|
// skip updating systemd services if cache time is not the default 60sec interval
|
||||||
if a.systemdManager != nil && cacheTimeMs == 60_000 {
|
if a.systemdManager != nil && cacheTimeMs == defaultDataCacheTimeMs {
|
||||||
totalCount := uint16(a.systemdManager.getServiceStatsCount())
|
totalCount := uint16(a.systemdManager.getServiceStatsCount())
|
||||||
if totalCount > 0 {
|
if totalCount > 0 {
|
||||||
numFailed := a.systemdManager.getFailedServiceCount()
|
numFailed := a.systemdManager.getFailedServiceCount()
|
||||||
@@ -184,7 +202,7 @@ func (a *Agent) gatherStats(cacheTimeMs uint16) *system.CombinedData {
|
|||||||
data.Stats.ExtraFs[key] = stats
|
data.Stats.ExtraFs[key] = stats
|
||||||
// Add percentages to Info struct for dashboard
|
// Add percentages to Info struct for dashboard
|
||||||
if stats.DiskTotal > 0 {
|
if stats.DiskTotal > 0 {
|
||||||
pct := twoDecimals((stats.DiskUsed / stats.DiskTotal) * 100)
|
pct := utils.TwoDecimals((stats.DiskUsed / stats.DiskTotal) * 100)
|
||||||
data.Info.ExtraFsPct[key] = pct
|
data.Info.ExtraFsPct[key] = pct
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -192,40 +210,16 @@ func (a *Agent) gatherStats(cacheTimeMs uint16) *system.CombinedData {
|
|||||||
slog.Debug("Extra FS", "data", data.Stats.ExtraFs)
|
slog.Debug("Extra FS", "data", data.Stats.ExtraFs)
|
||||||
|
|
||||||
a.cache.Set(data, cacheTimeMs)
|
a.cache.Set(data, cacheTimeMs)
|
||||||
return data
|
|
||||||
|
return a.attachSystemDetails(data, cacheTimeMs, options.IncludeDetails)
|
||||||
}
|
}
|
||||||
|
|
||||||
// StartAgent initializes and starts the agent with optional WebSocket connection
|
// Start initializes and starts the agent with optional WebSocket connection
|
||||||
func (a *Agent) Start(serverOptions ServerOptions) error {
|
func (a *Agent) Start(serverOptions ServerOptions) error {
|
||||||
a.keys = serverOptions.Keys
|
a.keys = serverOptions.Keys
|
||||||
return a.connectionManager.Start(serverOptions)
|
return a.connectionManager.Start(serverOptions)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Agent) getFingerprint() string {
|
func (a *Agent) getFingerprint() string {
|
||||||
// first look for a fingerprint in the data directory
|
return GetFingerprint(a.dataDir, a.systemDetails.Hostname, a.systemDetails.CpuModel)
|
||||||
if a.dataDir != "" {
|
|
||||||
if fp, err := os.ReadFile(filepath.Join(a.dataDir, "fingerprint")); err == nil {
|
|
||||||
return string(fp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// if no fingerprint is found, generate one
|
|
||||||
fingerprint, err := host.HostID()
|
|
||||||
if err != nil || fingerprint == "" {
|
|
||||||
fingerprint = a.systemInfo.Hostname + a.systemInfo.CpuModel
|
|
||||||
}
|
|
||||||
|
|
||||||
// hash fingerprint
|
|
||||||
sum := sha256.Sum256([]byte(fingerprint))
|
|
||||||
fingerprint = hex.EncodeToString(sum[:24])
|
|
||||||
|
|
||||||
// save fingerprint to data directory
|
|
||||||
if a.dataDir != "" {
|
|
||||||
err = os.WriteFile(filepath.Join(a.dataDir, "fingerprint"), []byte(fingerprint), 0644)
|
|
||||||
if err != nil {
|
|
||||||
slog.Warn("Failed to save fingerprint", "err", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return fingerprint
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build testing
|
//go:build testing
|
||||||
// +build testing
|
|
||||||
|
|
||||||
package agent
|
package agent
|
||||||
|
|
||||||
@@ -22,7 +21,7 @@ func createTestCacheData() *system.CombinedData {
|
|||||||
DiskTotal: 100000,
|
DiskTotal: 100000,
|
||||||
},
|
},
|
||||||
Info: system.Info{
|
Info: system.Info{
|
||||||
Hostname: "test-host",
|
AgentVersion: "0.12.0",
|
||||||
},
|
},
|
||||||
Containers: []*container.Stats{
|
Containers: []*container.Stats{
|
||||||
{
|
{
|
||||||
@@ -128,7 +127,7 @@ func TestCacheMultipleIntervals(t *testing.T) {
|
|||||||
Mem: 16384,
|
Mem: 16384,
|
||||||
},
|
},
|
||||||
Info: system.Info{
|
Info: system.Info{
|
||||||
Hostname: "test-host-2",
|
AgentVersion: "0.12.0",
|
||||||
},
|
},
|
||||||
Containers: []*container.Stats{},
|
Containers: []*container.Stats{},
|
||||||
}
|
}
|
||||||
@@ -171,7 +170,7 @@ func TestCacheOverwrite(t *testing.T) {
|
|||||||
Mem: 32768,
|
Mem: 32768,
|
||||||
},
|
},
|
||||||
Info: system.Info{
|
Info: system.Info{
|
||||||
Hostname: "updated-host",
|
AgentVersion: "0.12.0",
|
||||||
},
|
},
|
||||||
Containers: []*container.Stats{},
|
Containers: []*container.Stats{},
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build testing
|
//go:build testing
|
||||||
// +build testing
|
|
||||||
|
|
||||||
package agent
|
package agent
|
||||||
|
|
||||||
|
|||||||
@@ -1,84 +1,11 @@
|
|||||||
//go:build !freebsd
|
// Package battery provides functions to check if the system has a battery and return the charge state and percentage.
|
||||||
|
|
||||||
// Package battery provides functions to check if the system has a battery and to get the battery stats.
|
|
||||||
package battery
|
package battery
|
||||||
|
|
||||||
import (
|
const (
|
||||||
"errors"
|
stateUnknown uint8 = iota
|
||||||
"log/slog"
|
stateEmpty
|
||||||
"math"
|
stateFull
|
||||||
|
stateCharging
|
||||||
"github.com/distatus/battery"
|
stateDischarging
|
||||||
|
stateIdle
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
systemHasBattery = false
|
|
||||||
haveCheckedBattery = false
|
|
||||||
)
|
|
||||||
|
|
||||||
// HasReadableBattery checks if the system has a battery and returns true if it does.
|
|
||||||
func HasReadableBattery() bool {
|
|
||||||
if haveCheckedBattery {
|
|
||||||
return systemHasBattery
|
|
||||||
}
|
|
||||||
haveCheckedBattery = true
|
|
||||||
batteries, err := battery.GetAll()
|
|
||||||
for _, bat := range batteries {
|
|
||||||
if bat != nil && (bat.Full > 0 || bat.Design > 0) {
|
|
||||||
systemHasBattery = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !systemHasBattery {
|
|
||||||
slog.Debug("No battery found", "err", err)
|
|
||||||
}
|
|
||||||
return systemHasBattery
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetBatteryStats returns the current battery percent and charge state
|
|
||||||
// percent = (current charge of all batteries) / (sum of designed/full capacity of all batteries)
|
|
||||||
func GetBatteryStats() (batteryPercent uint8, batteryState uint8, err error) {
|
|
||||||
if !HasReadableBattery() {
|
|
||||||
return batteryPercent, batteryState, errors.ErrUnsupported
|
|
||||||
}
|
|
||||||
batteries, err := battery.GetAll()
|
|
||||||
// we'll handle errors later by skipping batteries with errors, rather
|
|
||||||
// than skipping everything because of the presence of some errors.
|
|
||||||
if len(batteries) == 0 {
|
|
||||||
return batteryPercent, batteryState, errors.New("no batteries")
|
|
||||||
}
|
|
||||||
|
|
||||||
totalCapacity := float64(0)
|
|
||||||
totalCharge := float64(0)
|
|
||||||
errs, partialErrs := err.(battery.Errors)
|
|
||||||
|
|
||||||
batteryState = math.MaxUint8
|
|
||||||
|
|
||||||
for i, bat := range batteries {
|
|
||||||
if partialErrs && errs[i] != nil {
|
|
||||||
// if there were some errors, like missing data, skip it
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if bat == nil || bat.Full == 0 {
|
|
||||||
// skip batteries with no capacity. Charge is unlikely to ever be zero, but
|
|
||||||
// we can't guarantee that, so don't skip based on charge.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
totalCapacity += bat.Full
|
|
||||||
totalCharge += bat.Current
|
|
||||||
if bat.State.Raw >= 0 {
|
|
||||||
batteryState = uint8(bat.State.Raw)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if totalCapacity == 0 || batteryState == math.MaxUint8 {
|
|
||||||
// for macs there's sometimes a ghost battery with 0 capacity
|
|
||||||
// https://github.com/distatus/battery/issues/34
|
|
||||||
// Instead of skipping over those batteries, we'll check for total 0 capacity
|
|
||||||
// and return an error. This also prevents a divide by zero.
|
|
||||||
return batteryPercent, batteryState, errors.New("no battery capacity")
|
|
||||||
}
|
|
||||||
|
|
||||||
batteryPercent = uint8(totalCharge / totalCapacity * 100)
|
|
||||||
return batteryPercent, batteryState, nil
|
|
||||||
}
|
|
||||||
|
|||||||
96
agent/battery/battery_darwin.go
Normal file
96
agent/battery/battery_darwin.go
Normal file
@@ -0,0 +1,96 @@
|
|||||||
|
//go:build darwin
|
||||||
|
|
||||||
|
package battery
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"log/slog"
|
||||||
|
"math"
|
||||||
|
"os/exec"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"howett.net/plist"
|
||||||
|
)
|
||||||
|
|
||||||
|
type macBattery struct {
|
||||||
|
CurrentCapacity int `plist:"CurrentCapacity"`
|
||||||
|
MaxCapacity int `plist:"MaxCapacity"`
|
||||||
|
FullyCharged bool `plist:"FullyCharged"`
|
||||||
|
IsCharging bool `plist:"IsCharging"`
|
||||||
|
ExternalConnected bool `plist:"ExternalConnected"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func readMacBatteries() ([]macBattery, error) {
|
||||||
|
out, err := exec.Command("ioreg", "-n", "AppleSmartBattery", "-r", "-a").Output()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(out) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
var batteries []macBattery
|
||||||
|
if _, err := plist.Unmarshal(out, &batteries); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return batteries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasReadableBattery checks if the system has a battery and returns true if it does.
|
||||||
|
var HasReadableBattery = sync.OnceValue(func() bool {
|
||||||
|
systemHasBattery := false
|
||||||
|
batteries, err := readMacBatteries()
|
||||||
|
slog.Debug("Batteries", "batteries", batteries, "err", err)
|
||||||
|
for _, bat := range batteries {
|
||||||
|
if bat.MaxCapacity > 0 {
|
||||||
|
systemHasBattery = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return systemHasBattery
|
||||||
|
})
|
||||||
|
|
||||||
|
// GetBatteryStats returns the current battery percent and charge state.
|
||||||
|
// Uses CurrentCapacity/MaxCapacity to match the value macOS displays.
|
||||||
|
func GetBatteryStats() (batteryPercent uint8, batteryState uint8, err error) {
|
||||||
|
if !HasReadableBattery() {
|
||||||
|
return batteryPercent, batteryState, errors.ErrUnsupported
|
||||||
|
}
|
||||||
|
batteries, err := readMacBatteries()
|
||||||
|
if len(batteries) == 0 {
|
||||||
|
return batteryPercent, batteryState, errors.New("no batteries")
|
||||||
|
}
|
||||||
|
|
||||||
|
totalCapacity := 0
|
||||||
|
totalCharge := 0
|
||||||
|
batteryState = math.MaxUint8
|
||||||
|
|
||||||
|
for _, bat := range batteries {
|
||||||
|
if bat.MaxCapacity == 0 {
|
||||||
|
// skip ghost batteries with 0 capacity
|
||||||
|
// https://github.com/distatus/battery/issues/34
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
totalCapacity += bat.MaxCapacity
|
||||||
|
totalCharge += min(bat.CurrentCapacity, bat.MaxCapacity)
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case !bat.ExternalConnected:
|
||||||
|
batteryState = stateDischarging
|
||||||
|
case bat.IsCharging:
|
||||||
|
batteryState = stateCharging
|
||||||
|
case bat.CurrentCapacity == 0:
|
||||||
|
batteryState = stateEmpty
|
||||||
|
case !bat.FullyCharged:
|
||||||
|
batteryState = stateIdle
|
||||||
|
default:
|
||||||
|
batteryState = stateFull
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if totalCapacity == 0 || batteryState == math.MaxUint8 {
|
||||||
|
return batteryPercent, batteryState, errors.New("no battery capacity")
|
||||||
|
}
|
||||||
|
|
||||||
|
batteryPercent = uint8(float64(totalCharge) / float64(totalCapacity) * 100)
|
||||||
|
return batteryPercent, batteryState, nil
|
||||||
|
}
|
||||||
117
agent/battery/battery_linux.go
Normal file
117
agent/battery/battery_linux.go
Normal file
@@ -0,0 +1,117 @@
|
|||||||
|
//go:build linux
|
||||||
|
|
||||||
|
package battery
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"log/slog"
|
||||||
|
"math"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
// getBatteryPaths returns the paths of all batteries in /sys/class/power_supply
|
||||||
|
var getBatteryPaths func() ([]string, error)
|
||||||
|
|
||||||
|
// HasReadableBattery checks if the system has a battery and returns true if it does.
|
||||||
|
var HasReadableBattery func() bool
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
resetBatteryState("/sys/class/power_supply")
|
||||||
|
}
|
||||||
|
|
||||||
|
// resetBatteryState resets the sync.Once functions to a fresh state.
|
||||||
|
// Tests call this after swapping sysfsPowerSupply so the new path is picked up.
|
||||||
|
func resetBatteryState(sysfsPowerSupplyPath string) {
|
||||||
|
getBatteryPaths = sync.OnceValues(func() ([]string, error) {
|
||||||
|
entries, err := os.ReadDir(sysfsPowerSupplyPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var paths []string
|
||||||
|
for _, e := range entries {
|
||||||
|
path := filepath.Join(sysfsPowerSupplyPath, e.Name())
|
||||||
|
if utils.ReadStringFile(filepath.Join(path, "type")) == "Battery" {
|
||||||
|
paths = append(paths, path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return paths, nil
|
||||||
|
})
|
||||||
|
HasReadableBattery = sync.OnceValue(func() bool {
|
||||||
|
systemHasBattery := false
|
||||||
|
paths, err := getBatteryPaths()
|
||||||
|
for _, path := range paths {
|
||||||
|
if _, ok := utils.ReadStringFileOK(filepath.Join(path, "capacity")); ok {
|
||||||
|
systemHasBattery = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !systemHasBattery {
|
||||||
|
slog.Debug("No battery found", "err", err)
|
||||||
|
}
|
||||||
|
return systemHasBattery
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseSysfsState(status string) uint8 {
|
||||||
|
switch status {
|
||||||
|
case "Empty":
|
||||||
|
return stateEmpty
|
||||||
|
case "Full":
|
||||||
|
return stateFull
|
||||||
|
case "Charging":
|
||||||
|
return stateCharging
|
||||||
|
case "Discharging":
|
||||||
|
return stateDischarging
|
||||||
|
case "Not charging":
|
||||||
|
return stateIdle
|
||||||
|
default:
|
||||||
|
return stateUnknown
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBatteryStats returns the current battery percent and charge state.
|
||||||
|
// Reads /sys/class/power_supply/*/capacity directly so the kernel-reported
|
||||||
|
// value is used, which is always 0-100 and matches what the OS displays.
|
||||||
|
func GetBatteryStats() (batteryPercent uint8, batteryState uint8, err error) {
|
||||||
|
if !HasReadableBattery() {
|
||||||
|
return batteryPercent, batteryState, errors.ErrUnsupported
|
||||||
|
}
|
||||||
|
paths, err := getBatteryPaths()
|
||||||
|
if len(paths) == 0 {
|
||||||
|
return batteryPercent, batteryState, errors.New("no batteries")
|
||||||
|
}
|
||||||
|
|
||||||
|
batteryState = math.MaxUint8
|
||||||
|
totalPercent := 0
|
||||||
|
count := 0
|
||||||
|
|
||||||
|
for _, path := range paths {
|
||||||
|
capStr, ok := utils.ReadStringFileOK(filepath.Join(path, "capacity"))
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
cap, parseErr := strconv.Atoi(capStr)
|
||||||
|
if parseErr != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
totalPercent += cap
|
||||||
|
count++
|
||||||
|
|
||||||
|
state := parseSysfsState(utils.ReadStringFile(filepath.Join(path, "status")))
|
||||||
|
if state != stateUnknown {
|
||||||
|
batteryState = state
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if count == 0 || batteryState == math.MaxUint8 {
|
||||||
|
return batteryPercent, batteryState, errors.New("no battery capacity")
|
||||||
|
}
|
||||||
|
|
||||||
|
batteryPercent = uint8(totalPercent / count)
|
||||||
|
return batteryPercent, batteryState, nil
|
||||||
|
}
|
||||||
201
agent/battery/battery_linux_test.go
Normal file
201
agent/battery/battery_linux_test.go
Normal file
@@ -0,0 +1,201 @@
|
|||||||
|
//go:build testing && linux
|
||||||
|
|
||||||
|
package battery
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
// setupFakeSysfs creates a temporary sysfs-like tree under t.TempDir(),
|
||||||
|
// swaps sysfsPowerSupply, resets the sync.Once caches, and restores
|
||||||
|
// everything on cleanup. Returns a helper to create battery directories.
|
||||||
|
func setupFakeSysfs(t *testing.T) (tmpDir string, addBattery func(name, capacity, status string)) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
tmp := t.TempDir()
|
||||||
|
resetBatteryState(tmp)
|
||||||
|
|
||||||
|
write := func(path, content string) {
|
||||||
|
t.Helper()
|
||||||
|
dir := filepath.Dir(path)
|
||||||
|
if err := os.MkdirAll(dir, 0o755); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := os.WriteFile(path, []byte(content), 0o644); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
addBattery = func(name, capacity, status string) {
|
||||||
|
t.Helper()
|
||||||
|
batDir := filepath.Join(tmp, name)
|
||||||
|
write(filepath.Join(batDir, "type"), "Battery")
|
||||||
|
write(filepath.Join(batDir, "capacity"), capacity)
|
||||||
|
write(filepath.Join(batDir, "status"), status)
|
||||||
|
}
|
||||||
|
|
||||||
|
return tmp, addBattery
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseSysfsState(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
input string
|
||||||
|
want uint8
|
||||||
|
}{
|
||||||
|
{"Empty", stateEmpty},
|
||||||
|
{"Full", stateFull},
|
||||||
|
{"Charging", stateCharging},
|
||||||
|
{"Discharging", stateDischarging},
|
||||||
|
{"Not charging", stateIdle},
|
||||||
|
{"", stateUnknown},
|
||||||
|
{"SomethingElse", stateUnknown},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
assert.Equal(t, tt.want, parseSysfsState(tt.input), "parseSysfsState(%q)", tt.input)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetBatteryStats_SingleBattery(t *testing.T) {
|
||||||
|
_, addBattery := setupFakeSysfs(t)
|
||||||
|
addBattery("BAT0", "72", "Discharging")
|
||||||
|
|
||||||
|
pct, state, err := GetBatteryStats()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, uint8(72), pct)
|
||||||
|
assert.Equal(t, stateDischarging, state)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetBatteryStats_MultipleBatteries(t *testing.T) {
|
||||||
|
_, addBattery := setupFakeSysfs(t)
|
||||||
|
addBattery("BAT0", "80", "Charging")
|
||||||
|
addBattery("BAT1", "40", "Charging")
|
||||||
|
|
||||||
|
pct, state, err := GetBatteryStats()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// average of 80 and 40 = 60
|
||||||
|
assert.EqualValues(t, 60, pct)
|
||||||
|
assert.Equal(t, stateCharging, state)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetBatteryStats_FullBattery(t *testing.T) {
|
||||||
|
_, addBattery := setupFakeSysfs(t)
|
||||||
|
addBattery("BAT0", "100", "Full")
|
||||||
|
|
||||||
|
pct, state, err := GetBatteryStats()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, uint8(100), pct)
|
||||||
|
assert.Equal(t, stateFull, state)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetBatteryStats_EmptyBattery(t *testing.T) {
|
||||||
|
_, addBattery := setupFakeSysfs(t)
|
||||||
|
addBattery("BAT0", "0", "Empty")
|
||||||
|
|
||||||
|
pct, state, err := GetBatteryStats()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, uint8(0), pct)
|
||||||
|
assert.Equal(t, stateEmpty, state)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetBatteryStats_NotCharging(t *testing.T) {
|
||||||
|
_, addBattery := setupFakeSysfs(t)
|
||||||
|
addBattery("BAT0", "80", "Not charging")
|
||||||
|
|
||||||
|
pct, state, err := GetBatteryStats()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, uint8(80), pct)
|
||||||
|
assert.Equal(t, stateIdle, state)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetBatteryStats_NoBatteries(t *testing.T) {
|
||||||
|
setupFakeSysfs(t) // empty directory, no batteries
|
||||||
|
|
||||||
|
_, _, err := GetBatteryStats()
|
||||||
|
assert.Error(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetBatteryStats_NonBatterySupplyIgnored(t *testing.T) {
|
||||||
|
tmp, addBattery := setupFakeSysfs(t)
|
||||||
|
|
||||||
|
// Add a real battery
|
||||||
|
addBattery("BAT0", "55", "Charging")
|
||||||
|
|
||||||
|
// Add an AC adapter (type != Battery) - should be ignored
|
||||||
|
acDir := filepath.Join(tmp, "AC0")
|
||||||
|
if err := os.MkdirAll(acDir, 0o755); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := os.WriteFile(filepath.Join(acDir, "type"), []byte("Mains"), 0o644); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pct, state, err := GetBatteryStats()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, uint8(55), pct)
|
||||||
|
assert.Equal(t, stateCharging, state)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetBatteryStats_InvalidCapacitySkipped(t *testing.T) {
|
||||||
|
tmp, addBattery := setupFakeSysfs(t)
|
||||||
|
|
||||||
|
// One battery with valid capacity
|
||||||
|
addBattery("BAT0", "90", "Discharging")
|
||||||
|
|
||||||
|
// Another with invalid capacity text
|
||||||
|
badDir := filepath.Join(tmp, "BAT1")
|
||||||
|
if err := os.MkdirAll(badDir, 0o755); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := os.WriteFile(filepath.Join(badDir, "type"), []byte("Battery"), 0o644); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := os.WriteFile(filepath.Join(badDir, "capacity"), []byte("not-a-number"), 0o644); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := os.WriteFile(filepath.Join(badDir, "status"), []byte("Discharging"), 0o644); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pct, _, err := GetBatteryStats()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
// Only BAT0 counted
|
||||||
|
assert.Equal(t, uint8(90), pct)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetBatteryStats_UnknownStatusOnly(t *testing.T) {
|
||||||
|
_, addBattery := setupFakeSysfs(t)
|
||||||
|
addBattery("BAT0", "50", "SomethingWeird")
|
||||||
|
|
||||||
|
_, _, err := GetBatteryStats()
|
||||||
|
assert.Error(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHasReadableBattery_True(t *testing.T) {
|
||||||
|
_, addBattery := setupFakeSysfs(t)
|
||||||
|
addBattery("BAT0", "50", "Charging")
|
||||||
|
|
||||||
|
assert.True(t, HasReadableBattery())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHasReadableBattery_False(t *testing.T) {
|
||||||
|
setupFakeSysfs(t) // no batteries
|
||||||
|
|
||||||
|
assert.False(t, HasReadableBattery())
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHasReadableBattery_NoCapacityFile(t *testing.T) {
|
||||||
|
tmp, _ := setupFakeSysfs(t)
|
||||||
|
|
||||||
|
// Battery dir with type file but no capacity file
|
||||||
|
batDir := filepath.Join(tmp, "BAT0")
|
||||||
|
err := os.MkdirAll(batDir, 0o755)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
err = os.WriteFile(filepath.Join(batDir, "type"), []byte("Battery"), 0o644)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
assert.False(t, HasReadableBattery())
|
||||||
|
}
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build freebsd
|
//go:build !darwin && !linux && !windows
|
||||||
|
|
||||||
package battery
|
package battery
|
||||||
|
|
||||||
298
agent/battery/battery_windows.go
Normal file
298
agent/battery/battery_windows.go
Normal file
@@ -0,0 +1,298 @@
|
|||||||
|
//go:build windows
|
||||||
|
|
||||||
|
// Most of the Windows battery code is based on
|
||||||
|
// distatus/battery by Karol 'Kenji Takahashi' Woźniak
|
||||||
|
|
||||||
|
package battery
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"log/slog"
|
||||||
|
"math"
|
||||||
|
"sync"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
|
)
|
||||||
|
|
||||||
|
type batteryQueryInformation struct {
|
||||||
|
BatteryTag uint32
|
||||||
|
InformationLevel int32
|
||||||
|
AtRate int32
|
||||||
|
}
|
||||||
|
|
||||||
|
type batteryInformation struct {
|
||||||
|
Capabilities uint32
|
||||||
|
Technology uint8
|
||||||
|
Reserved [3]uint8
|
||||||
|
Chemistry [4]uint8
|
||||||
|
DesignedCapacity uint32
|
||||||
|
FullChargedCapacity uint32
|
||||||
|
DefaultAlert1 uint32
|
||||||
|
DefaultAlert2 uint32
|
||||||
|
CriticalBias uint32
|
||||||
|
CycleCount uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type batteryWaitStatus struct {
|
||||||
|
BatteryTag uint32
|
||||||
|
Timeout uint32
|
||||||
|
PowerState uint32
|
||||||
|
LowCapacity uint32
|
||||||
|
HighCapacity uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type batteryStatus struct {
|
||||||
|
PowerState uint32
|
||||||
|
Capacity uint32
|
||||||
|
Voltage uint32
|
||||||
|
Rate int32
|
||||||
|
}
|
||||||
|
|
||||||
|
type winGUID struct {
|
||||||
|
Data1 uint32
|
||||||
|
Data2 uint16
|
||||||
|
Data3 uint16
|
||||||
|
Data4 [8]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
type spDeviceInterfaceData struct {
|
||||||
|
cbSize uint32
|
||||||
|
InterfaceClassGuid winGUID
|
||||||
|
Flags uint32
|
||||||
|
Reserved uint
|
||||||
|
}
|
||||||
|
|
||||||
|
var guidDeviceBattery = winGUID{
|
||||||
|
0x72631e54,
|
||||||
|
0x78A4,
|
||||||
|
0x11d0,
|
||||||
|
[8]byte{0xbc, 0xf7, 0x00, 0xaa, 0x00, 0xb7, 0xb3, 0x2a},
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
setupapi = &windows.LazyDLL{Name: "setupapi.dll", System: true}
|
||||||
|
setupDiGetClassDevsW = setupapi.NewProc("SetupDiGetClassDevsW")
|
||||||
|
setupDiEnumDeviceInterfaces = setupapi.NewProc("SetupDiEnumDeviceInterfaces")
|
||||||
|
setupDiGetDeviceInterfaceDetailW = setupapi.NewProc("SetupDiGetDeviceInterfaceDetailW")
|
||||||
|
setupDiDestroyDeviceInfoList = setupapi.NewProc("SetupDiDestroyDeviceInfoList")
|
||||||
|
)
|
||||||
|
|
||||||
|
// winBatteryGet reads one battery by index. Returns (fullCapacity, currentCapacity, state, error).
|
||||||
|
// Returns error == errNotFound when there are no more batteries.
|
||||||
|
var errNotFound = errors.New("no more batteries")
|
||||||
|
|
||||||
|
func setupDiSetup(proc *windows.LazyProc, nargs, a1, a2, a3, a4, a5, a6 uintptr) (uintptr, error) {
|
||||||
|
_ = nargs
|
||||||
|
r1, _, errno := syscall.SyscallN(proc.Addr(), a1, a2, a3, a4, a5, a6)
|
||||||
|
if windows.Handle(r1) == windows.InvalidHandle {
|
||||||
|
if errno != 0 {
|
||||||
|
return 0, error(errno)
|
||||||
|
}
|
||||||
|
return 0, syscall.EINVAL
|
||||||
|
}
|
||||||
|
return r1, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func setupDiCall(proc *windows.LazyProc, nargs, a1, a2, a3, a4, a5, a6 uintptr) syscall.Errno {
|
||||||
|
_ = nargs
|
||||||
|
r1, _, errno := syscall.SyscallN(proc.Addr(), a1, a2, a3, a4, a5, a6)
|
||||||
|
if r1 == 0 {
|
||||||
|
if errno != 0 {
|
||||||
|
return errno
|
||||||
|
}
|
||||||
|
return syscall.EINVAL
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func readWinBatteryState(powerState uint32) uint8 {
|
||||||
|
switch {
|
||||||
|
case powerState&0x00000004 != 0:
|
||||||
|
return stateCharging
|
||||||
|
case powerState&0x00000008 != 0:
|
||||||
|
return stateEmpty
|
||||||
|
case powerState&0x00000002 != 0:
|
||||||
|
return stateDischarging
|
||||||
|
case powerState&0x00000001 != 0:
|
||||||
|
return stateFull
|
||||||
|
default:
|
||||||
|
return stateUnknown
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func winBatteryGet(idx int) (full, current uint32, state uint8, err error) {
|
||||||
|
hdev, err := setupDiSetup(
|
||||||
|
setupDiGetClassDevsW,
|
||||||
|
4,
|
||||||
|
uintptr(unsafe.Pointer(&guidDeviceBattery)),
|
||||||
|
0, 0,
|
||||||
|
2|16, // DIGCF_PRESENT|DIGCF_DEVICEINTERFACE
|
||||||
|
0, 0,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, stateUnknown, err
|
||||||
|
}
|
||||||
|
defer syscall.SyscallN(setupDiDestroyDeviceInfoList.Addr(), hdev)
|
||||||
|
|
||||||
|
var did spDeviceInterfaceData
|
||||||
|
did.cbSize = uint32(unsafe.Sizeof(did))
|
||||||
|
errno := setupDiCall(
|
||||||
|
setupDiEnumDeviceInterfaces,
|
||||||
|
5,
|
||||||
|
hdev, 0,
|
||||||
|
uintptr(unsafe.Pointer(&guidDeviceBattery)),
|
||||||
|
uintptr(idx),
|
||||||
|
uintptr(unsafe.Pointer(&did)),
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
if errno == 259 { // ERROR_NO_MORE_ITEMS
|
||||||
|
return 0, 0, stateUnknown, errNotFound
|
||||||
|
}
|
||||||
|
if errno != 0 {
|
||||||
|
return 0, 0, stateUnknown, errno
|
||||||
|
}
|
||||||
|
|
||||||
|
var cbRequired uint32
|
||||||
|
errno = setupDiCall(
|
||||||
|
setupDiGetDeviceInterfaceDetailW,
|
||||||
|
6,
|
||||||
|
hdev,
|
||||||
|
uintptr(unsafe.Pointer(&did)),
|
||||||
|
0, 0,
|
||||||
|
uintptr(unsafe.Pointer(&cbRequired)),
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
if errno != 0 && errno != 122 { // ERROR_INSUFFICIENT_BUFFER
|
||||||
|
return 0, 0, stateUnknown, errno
|
||||||
|
}
|
||||||
|
didd := make([]uint16, cbRequired/2)
|
||||||
|
cbSize := (*uint32)(unsafe.Pointer(&didd[0]))
|
||||||
|
if unsafe.Sizeof(uint(0)) == 8 {
|
||||||
|
*cbSize = 8
|
||||||
|
} else {
|
||||||
|
*cbSize = 6
|
||||||
|
}
|
||||||
|
errno = setupDiCall(
|
||||||
|
setupDiGetDeviceInterfaceDetailW,
|
||||||
|
6,
|
||||||
|
hdev,
|
||||||
|
uintptr(unsafe.Pointer(&did)),
|
||||||
|
uintptr(unsafe.Pointer(&didd[0])),
|
||||||
|
uintptr(cbRequired),
|
||||||
|
uintptr(unsafe.Pointer(&cbRequired)),
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
if errno != 0 {
|
||||||
|
return 0, 0, stateUnknown, errno
|
||||||
|
}
|
||||||
|
devicePath := &didd[2:][0]
|
||||||
|
|
||||||
|
handle, err := windows.CreateFile(
|
||||||
|
devicePath,
|
||||||
|
windows.GENERIC_READ|windows.GENERIC_WRITE,
|
||||||
|
windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE,
|
||||||
|
nil,
|
||||||
|
windows.OPEN_EXISTING,
|
||||||
|
windows.FILE_ATTRIBUTE_NORMAL,
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, stateUnknown, err
|
||||||
|
}
|
||||||
|
defer windows.CloseHandle(handle)
|
||||||
|
|
||||||
|
var dwOut uint32
|
||||||
|
var dwWait uint32
|
||||||
|
var bqi batteryQueryInformation
|
||||||
|
err = windows.DeviceIoControl(
|
||||||
|
handle,
|
||||||
|
2703424, // IOCTL_BATTERY_QUERY_TAG
|
||||||
|
(*byte)(unsafe.Pointer(&dwWait)),
|
||||||
|
uint32(unsafe.Sizeof(dwWait)),
|
||||||
|
(*byte)(unsafe.Pointer(&bqi.BatteryTag)),
|
||||||
|
uint32(unsafe.Sizeof(bqi.BatteryTag)),
|
||||||
|
&dwOut, nil,
|
||||||
|
)
|
||||||
|
if err != nil || bqi.BatteryTag == 0 {
|
||||||
|
return 0, 0, stateUnknown, errors.New("battery tag not returned")
|
||||||
|
}
|
||||||
|
|
||||||
|
var bi batteryInformation
|
||||||
|
if err = windows.DeviceIoControl(
|
||||||
|
handle,
|
||||||
|
2703428, // IOCTL_BATTERY_QUERY_INFORMATION
|
||||||
|
(*byte)(unsafe.Pointer(&bqi)),
|
||||||
|
uint32(unsafe.Sizeof(bqi)),
|
||||||
|
(*byte)(unsafe.Pointer(&bi)),
|
||||||
|
uint32(unsafe.Sizeof(bi)),
|
||||||
|
&dwOut, nil,
|
||||||
|
); err != nil {
|
||||||
|
return 0, 0, stateUnknown, err
|
||||||
|
}
|
||||||
|
|
||||||
|
bws := batteryWaitStatus{BatteryTag: bqi.BatteryTag}
|
||||||
|
var bs batteryStatus
|
||||||
|
if err = windows.DeviceIoControl(
|
||||||
|
handle,
|
||||||
|
2703436, // IOCTL_BATTERY_QUERY_STATUS
|
||||||
|
(*byte)(unsafe.Pointer(&bws)),
|
||||||
|
uint32(unsafe.Sizeof(bws)),
|
||||||
|
(*byte)(unsafe.Pointer(&bs)),
|
||||||
|
uint32(unsafe.Sizeof(bs)),
|
||||||
|
&dwOut, nil,
|
||||||
|
); err != nil {
|
||||||
|
return 0, 0, stateUnknown, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if bs.Capacity == 0xffffffff { // BATTERY_UNKNOWN_CAPACITY
|
||||||
|
return 0, 0, stateUnknown, errors.New("battery capacity unknown")
|
||||||
|
}
|
||||||
|
|
||||||
|
return bi.FullChargedCapacity, bs.Capacity, readWinBatteryState(bs.PowerState), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasReadableBattery checks if the system has a battery and returns true if it does.
|
||||||
|
var HasReadableBattery = sync.OnceValue(func() bool {
|
||||||
|
systemHasBattery := false
|
||||||
|
full, _, _, err := winBatteryGet(0)
|
||||||
|
if err == nil && full > 0 {
|
||||||
|
systemHasBattery = true
|
||||||
|
}
|
||||||
|
if !systemHasBattery {
|
||||||
|
slog.Debug("No battery found", "err", err)
|
||||||
|
}
|
||||||
|
return systemHasBattery
|
||||||
|
})
|
||||||
|
|
||||||
|
// GetBatteryStats returns the current battery percent and charge state.
|
||||||
|
func GetBatteryStats() (batteryPercent uint8, batteryState uint8, err error) {
|
||||||
|
if !HasReadableBattery() {
|
||||||
|
return batteryPercent, batteryState, errors.ErrUnsupported
|
||||||
|
}
|
||||||
|
|
||||||
|
totalFull := uint32(0)
|
||||||
|
totalCurrent := uint32(0)
|
||||||
|
batteryState = math.MaxUint8
|
||||||
|
|
||||||
|
for i := 0; ; i++ {
|
||||||
|
full, current, state, bErr := winBatteryGet(i)
|
||||||
|
if errors.Is(bErr, errNotFound) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if bErr != nil || full == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
totalFull += full
|
||||||
|
totalCurrent += min(current, full)
|
||||||
|
batteryState = state
|
||||||
|
}
|
||||||
|
|
||||||
|
if totalFull == 0 || batteryState == math.MaxUint8 {
|
||||||
|
return batteryPercent, batteryState, errors.New("no battery capacity")
|
||||||
|
}
|
||||||
|
|
||||||
|
batteryPercent = uint8(float64(totalCurrent) / float64(totalFull) * 100)
|
||||||
|
return batteryPercent, batteryState, nil
|
||||||
|
}
|
||||||
@@ -14,10 +14,8 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/henrygd/beszel"
|
"github.com/henrygd/beszel"
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
"github.com/henrygd/beszel/internal/common"
|
"github.com/henrygd/beszel/internal/common"
|
||||||
"github.com/henrygd/beszel/internal/entities/smart"
|
|
||||||
"github.com/henrygd/beszel/internal/entities/system"
|
|
||||||
"github.com/henrygd/beszel/internal/entities/systemd"
|
|
||||||
|
|
||||||
"github.com/fxamacker/cbor/v2"
|
"github.com/fxamacker/cbor/v2"
|
||||||
"github.com/lxzan/gws"
|
"github.com/lxzan/gws"
|
||||||
@@ -46,7 +44,7 @@ type WebSocketClient struct {
|
|||||||
// newWebSocketClient creates a new WebSocket client for the given agent.
|
// newWebSocketClient creates a new WebSocket client for the given agent.
|
||||||
// It reads configuration from environment variables and validates the hub URL.
|
// It reads configuration from environment variables and validates the hub URL.
|
||||||
func newWebSocketClient(agent *Agent) (client *WebSocketClient, err error) {
|
func newWebSocketClient(agent *Agent) (client *WebSocketClient, err error) {
|
||||||
hubURLStr, exists := GetEnv("HUB_URL")
|
hubURLStr, exists := utils.GetEnv("HUB_URL")
|
||||||
if !exists {
|
if !exists {
|
||||||
return nil, errors.New("HUB_URL environment variable not set")
|
return nil, errors.New("HUB_URL environment variable not set")
|
||||||
}
|
}
|
||||||
@@ -75,12 +73,12 @@ func newWebSocketClient(agent *Agent) (client *WebSocketClient, err error) {
|
|||||||
// If neither is set, it returns an error.
|
// If neither is set, it returns an error.
|
||||||
func getToken() (string, error) {
|
func getToken() (string, error) {
|
||||||
// get token from env var
|
// get token from env var
|
||||||
token, _ := GetEnv("TOKEN")
|
token, _ := utils.GetEnv("TOKEN")
|
||||||
if token != "" {
|
if token != "" {
|
||||||
return token, nil
|
return token, nil
|
||||||
}
|
}
|
||||||
// get token from file
|
// get token from file
|
||||||
tokenFile, _ := GetEnv("TOKEN_FILE")
|
tokenFile, _ := utils.GetEnv("TOKEN_FILE")
|
||||||
if tokenFile == "" {
|
if tokenFile == "" {
|
||||||
return "", errors.New("must set TOKEN or TOKEN_FILE")
|
return "", errors.New("must set TOKEN or TOKEN_FILE")
|
||||||
}
|
}
|
||||||
@@ -200,8 +198,8 @@ func (client *WebSocketClient) handleAuthChallenge(msg *common.HubRequest[cbor.R
|
|||||||
}
|
}
|
||||||
|
|
||||||
if authRequest.NeedSysInfo {
|
if authRequest.NeedSysInfo {
|
||||||
response.Name, _ = GetEnv("SYSTEM_NAME")
|
response.Name, _ = utils.GetEnv("SYSTEM_NAME")
|
||||||
response.Hostname = client.agent.systemInfo.Hostname
|
response.Hostname = client.agent.systemDetails.Hostname
|
||||||
serverAddr := client.agent.connectionManager.serverOptions.Addr
|
serverAddr := client.agent.connectionManager.serverOptions.Addr
|
||||||
_, response.Port, _ = net.SplitHostPort(serverAddr)
|
_, response.Port, _ = net.SplitHostPort(serverAddr)
|
||||||
}
|
}
|
||||||
@@ -259,41 +257,17 @@ func (client *WebSocketClient) sendMessage(data any) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// sendResponse sends a response with optional request ID for the new protocol
|
// sendResponse sends a response with optional request ID.
|
||||||
|
// For ID-based requests, we must populate legacy typed fields for backward
|
||||||
|
// compatibility with older hubs (<= 0.17) that don't read the generic Data field.
|
||||||
func (client *WebSocketClient) sendResponse(data any, requestID *uint32) error {
|
func (client *WebSocketClient) sendResponse(data any, requestID *uint32) error {
|
||||||
if requestID != nil {
|
if requestID != nil {
|
||||||
// New format with ID - use typed fields
|
response := newAgentResponse(data, requestID)
|
||||||
response := common.AgentResponse{
|
|
||||||
Id: requestID,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the appropriate typed field based on data type
|
|
||||||
switch v := data.(type) {
|
|
||||||
case *system.CombinedData:
|
|
||||||
response.SystemData = v
|
|
||||||
case *common.FingerprintResponse:
|
|
||||||
response.Fingerprint = v
|
|
||||||
case string:
|
|
||||||
response.String = &v
|
|
||||||
case map[string]smart.SmartData:
|
|
||||||
response.SmartData = v
|
|
||||||
case systemd.ServiceDetails:
|
|
||||||
response.ServiceInfo = v
|
|
||||||
// case []byte:
|
|
||||||
// response.RawBytes = v
|
|
||||||
// case string:
|
|
||||||
// response.RawBytes = []byte(v)
|
|
||||||
default:
|
|
||||||
// For any other type, convert to error
|
|
||||||
response.Error = fmt.Sprintf("unsupported response type: %T", data)
|
|
||||||
}
|
|
||||||
|
|
||||||
return client.sendMessage(response)
|
return client.sendMessage(response)
|
||||||
} else {
|
}
|
||||||
// Legacy format - send data directly
|
// Legacy format - send data directly
|
||||||
return client.sendMessage(data)
|
return client.sendMessage(data)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// getUserAgent returns one of two User-Agent strings based on current time.
|
// getUserAgent returns one of two User-Agent strings based on current time.
|
||||||
// This is used to avoid being blocked by Cloudflare or other anti-bot measures.
|
// This is used to avoid being blocked by Cloudflare or other anti-bot measures.
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build testing
|
//go:build testing
|
||||||
// +build testing
|
|
||||||
|
|
||||||
package agent
|
package agent
|
||||||
|
|
||||||
@@ -71,19 +70,11 @@ func TestNewWebSocketClient(t *testing.T) {
|
|||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
// Set up environment
|
// Set up environment
|
||||||
if tc.hubURL != "" {
|
if tc.hubURL != "" {
|
||||||
os.Setenv("BESZEL_AGENT_HUB_URL", tc.hubURL)
|
t.Setenv("BESZEL_AGENT_HUB_URL", tc.hubURL)
|
||||||
} else {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
|
||||||
}
|
}
|
||||||
if tc.token != "" {
|
if tc.token != "" {
|
||||||
os.Setenv("BESZEL_AGENT_TOKEN", tc.token)
|
t.Setenv("BESZEL_AGENT_TOKEN", tc.token)
|
||||||
} else {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
}
|
}
|
||||||
defer func() {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
}()
|
|
||||||
|
|
||||||
client, err := newWebSocketClient(agent)
|
client, err := newWebSocketClient(agent)
|
||||||
|
|
||||||
@@ -139,12 +130,8 @@ func TestWebSocketClient_GetOptions(t *testing.T) {
|
|||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
// Set up environment
|
// Set up environment
|
||||||
os.Setenv("BESZEL_AGENT_HUB_URL", tc.inputURL)
|
t.Setenv("BESZEL_AGENT_HUB_URL", tc.inputURL)
|
||||||
os.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
t.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
||||||
defer func() {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
}()
|
|
||||||
|
|
||||||
client, err := newWebSocketClient(agent)
|
client, err := newWebSocketClient(agent)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -186,12 +173,8 @@ func TestWebSocketClient_VerifySignature(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// Set up environment
|
// Set up environment
|
||||||
os.Setenv("BESZEL_AGENT_HUB_URL", "http://localhost:8080")
|
t.Setenv("BESZEL_AGENT_HUB_URL", "http://localhost:8080")
|
||||||
os.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
t.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
||||||
defer func() {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
}()
|
|
||||||
|
|
||||||
client, err := newWebSocketClient(agent)
|
client, err := newWebSocketClient(agent)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -259,12 +242,8 @@ func TestWebSocketClient_HandleHubRequest(t *testing.T) {
|
|||||||
agent := createTestAgent(t)
|
agent := createTestAgent(t)
|
||||||
|
|
||||||
// Set up environment
|
// Set up environment
|
||||||
os.Setenv("BESZEL_AGENT_HUB_URL", "http://localhost:8080")
|
t.Setenv("BESZEL_AGENT_HUB_URL", "http://localhost:8080")
|
||||||
os.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
t.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
||||||
defer func() {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
}()
|
|
||||||
|
|
||||||
client, err := newWebSocketClient(agent)
|
client, err := newWebSocketClient(agent)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -351,13 +330,8 @@ func TestGetUserAgent(t *testing.T) {
|
|||||||
func TestWebSocketClient_Close(t *testing.T) {
|
func TestWebSocketClient_Close(t *testing.T) {
|
||||||
agent := createTestAgent(t)
|
agent := createTestAgent(t)
|
||||||
|
|
||||||
// Set up environment
|
t.Setenv("BESZEL_AGENT_HUB_URL", "http://localhost:8080")
|
||||||
os.Setenv("BESZEL_AGENT_HUB_URL", "http://localhost:8080")
|
t.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
||||||
os.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
|
||||||
defer func() {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
}()
|
|
||||||
|
|
||||||
client, err := newWebSocketClient(agent)
|
client, err := newWebSocketClient(agent)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -372,13 +346,8 @@ func TestWebSocketClient_Close(t *testing.T) {
|
|||||||
func TestWebSocketClient_ConnectRateLimit(t *testing.T) {
|
func TestWebSocketClient_ConnectRateLimit(t *testing.T) {
|
||||||
agent := createTestAgent(t)
|
agent := createTestAgent(t)
|
||||||
|
|
||||||
// Set up environment
|
t.Setenv("BESZEL_AGENT_HUB_URL", "http://localhost:8080")
|
||||||
os.Setenv("BESZEL_AGENT_HUB_URL", "http://localhost:8080")
|
t.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
||||||
os.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
|
||||||
defer func() {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
}()
|
|
||||||
|
|
||||||
client, err := newWebSocketClient(agent)
|
client, err := newWebSocketClient(agent)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -394,20 +363,10 @@ func TestWebSocketClient_ConnectRateLimit(t *testing.T) {
|
|||||||
|
|
||||||
// TestGetToken tests the getToken function with various scenarios
|
// TestGetToken tests the getToken function with various scenarios
|
||||||
func TestGetToken(t *testing.T) {
|
func TestGetToken(t *testing.T) {
|
||||||
unsetEnvVars := func() {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
os.Unsetenv("TOKEN")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN_FILE")
|
|
||||||
os.Unsetenv("TOKEN_FILE")
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Run("token from TOKEN environment variable", func(t *testing.T) {
|
t.Run("token from TOKEN environment variable", func(t *testing.T) {
|
||||||
unsetEnvVars()
|
|
||||||
|
|
||||||
// Set TOKEN env var
|
// Set TOKEN env var
|
||||||
expectedToken := "test-token-from-env"
|
expectedToken := "test-token-from-env"
|
||||||
os.Setenv("TOKEN", expectedToken)
|
t.Setenv("TOKEN", expectedToken)
|
||||||
defer os.Unsetenv("TOKEN")
|
|
||||||
|
|
||||||
token, err := getToken()
|
token, err := getToken()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@@ -415,12 +374,9 @@ func TestGetToken(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("token from BESZEL_AGENT_TOKEN environment variable", func(t *testing.T) {
|
t.Run("token from BESZEL_AGENT_TOKEN environment variable", func(t *testing.T) {
|
||||||
unsetEnvVars()
|
|
||||||
|
|
||||||
// Set BESZEL_AGENT_TOKEN env var (should take precedence)
|
// Set BESZEL_AGENT_TOKEN env var (should take precedence)
|
||||||
expectedToken := "test-token-from-beszel-env"
|
expectedToken := "test-token-from-beszel-env"
|
||||||
os.Setenv("BESZEL_AGENT_TOKEN", expectedToken)
|
t.Setenv("BESZEL_AGENT_TOKEN", expectedToken)
|
||||||
defer os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
|
|
||||||
token, err := getToken()
|
token, err := getToken()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@@ -428,8 +384,6 @@ func TestGetToken(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("token from TOKEN_FILE", func(t *testing.T) {
|
t.Run("token from TOKEN_FILE", func(t *testing.T) {
|
||||||
unsetEnvVars()
|
|
||||||
|
|
||||||
// Create a temporary token file
|
// Create a temporary token file
|
||||||
expectedToken := "test-token-from-file"
|
expectedToken := "test-token-from-file"
|
||||||
tokenFile, err := os.CreateTemp("", "token-test-*.txt")
|
tokenFile, err := os.CreateTemp("", "token-test-*.txt")
|
||||||
@@ -441,8 +395,7 @@ func TestGetToken(t *testing.T) {
|
|||||||
tokenFile.Close()
|
tokenFile.Close()
|
||||||
|
|
||||||
// Set TOKEN_FILE env var
|
// Set TOKEN_FILE env var
|
||||||
os.Setenv("TOKEN_FILE", tokenFile.Name())
|
t.Setenv("TOKEN_FILE", tokenFile.Name())
|
||||||
defer os.Unsetenv("TOKEN_FILE")
|
|
||||||
|
|
||||||
token, err := getToken()
|
token, err := getToken()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@@ -450,8 +403,6 @@ func TestGetToken(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("token from BESZEL_AGENT_TOKEN_FILE", func(t *testing.T) {
|
t.Run("token from BESZEL_AGENT_TOKEN_FILE", func(t *testing.T) {
|
||||||
unsetEnvVars()
|
|
||||||
|
|
||||||
// Create a temporary token file
|
// Create a temporary token file
|
||||||
expectedToken := "test-token-from-beszel-file"
|
expectedToken := "test-token-from-beszel-file"
|
||||||
tokenFile, err := os.CreateTemp("", "token-test-*.txt")
|
tokenFile, err := os.CreateTemp("", "token-test-*.txt")
|
||||||
@@ -463,8 +414,7 @@ func TestGetToken(t *testing.T) {
|
|||||||
tokenFile.Close()
|
tokenFile.Close()
|
||||||
|
|
||||||
// Set BESZEL_AGENT_TOKEN_FILE env var (should take precedence)
|
// Set BESZEL_AGENT_TOKEN_FILE env var (should take precedence)
|
||||||
os.Setenv("BESZEL_AGENT_TOKEN_FILE", tokenFile.Name())
|
t.Setenv("BESZEL_AGENT_TOKEN_FILE", tokenFile.Name())
|
||||||
defer os.Unsetenv("BESZEL_AGENT_TOKEN_FILE")
|
|
||||||
|
|
||||||
token, err := getToken()
|
token, err := getToken()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@@ -472,8 +422,6 @@ func TestGetToken(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("TOKEN takes precedence over TOKEN_FILE", func(t *testing.T) {
|
t.Run("TOKEN takes precedence over TOKEN_FILE", func(t *testing.T) {
|
||||||
unsetEnvVars()
|
|
||||||
|
|
||||||
// Create a temporary token file
|
// Create a temporary token file
|
||||||
fileToken := "token-from-file"
|
fileToken := "token-from-file"
|
||||||
tokenFile, err := os.CreateTemp("", "token-test-*.txt")
|
tokenFile, err := os.CreateTemp("", "token-test-*.txt")
|
||||||
@@ -486,12 +434,8 @@ func TestGetToken(t *testing.T) {
|
|||||||
|
|
||||||
// Set both TOKEN and TOKEN_FILE
|
// Set both TOKEN and TOKEN_FILE
|
||||||
envToken := "token-from-env"
|
envToken := "token-from-env"
|
||||||
os.Setenv("TOKEN", envToken)
|
t.Setenv("TOKEN", envToken)
|
||||||
os.Setenv("TOKEN_FILE", tokenFile.Name())
|
t.Setenv("TOKEN_FILE", tokenFile.Name())
|
||||||
defer func() {
|
|
||||||
os.Unsetenv("TOKEN")
|
|
||||||
os.Unsetenv("TOKEN_FILE")
|
|
||||||
}()
|
|
||||||
|
|
||||||
token, err := getToken()
|
token, err := getToken()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@@ -499,7 +443,10 @@ func TestGetToken(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("error when neither TOKEN nor TOKEN_FILE is set", func(t *testing.T) {
|
t.Run("error when neither TOKEN nor TOKEN_FILE is set", func(t *testing.T) {
|
||||||
unsetEnvVars()
|
t.Setenv("BESZEL_AGENT_TOKEN", "")
|
||||||
|
t.Setenv("TOKEN", "")
|
||||||
|
t.Setenv("BESZEL_AGENT_TOKEN_FILE", "")
|
||||||
|
t.Setenv("TOKEN_FILE", "")
|
||||||
|
|
||||||
token, err := getToken()
|
token, err := getToken()
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
@@ -508,11 +455,8 @@ func TestGetToken(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("error when TOKEN_FILE points to non-existent file", func(t *testing.T) {
|
t.Run("error when TOKEN_FILE points to non-existent file", func(t *testing.T) {
|
||||||
unsetEnvVars()
|
|
||||||
|
|
||||||
// Set TOKEN_FILE to a non-existent file
|
// Set TOKEN_FILE to a non-existent file
|
||||||
os.Setenv("TOKEN_FILE", "/non/existent/file.txt")
|
t.Setenv("TOKEN_FILE", "/non/existent/file.txt")
|
||||||
defer os.Unsetenv("TOKEN_FILE")
|
|
||||||
|
|
||||||
token, err := getToken()
|
token, err := getToken()
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
@@ -521,8 +465,6 @@ func TestGetToken(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("handles empty token file", func(t *testing.T) {
|
t.Run("handles empty token file", func(t *testing.T) {
|
||||||
unsetEnvVars()
|
|
||||||
|
|
||||||
// Create an empty token file
|
// Create an empty token file
|
||||||
tokenFile, err := os.CreateTemp("", "token-test-*.txt")
|
tokenFile, err := os.CreateTemp("", "token-test-*.txt")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
@@ -530,8 +472,7 @@ func TestGetToken(t *testing.T) {
|
|||||||
tokenFile.Close()
|
tokenFile.Close()
|
||||||
|
|
||||||
// Set TOKEN_FILE env var
|
// Set TOKEN_FILE env var
|
||||||
os.Setenv("TOKEN_FILE", tokenFile.Name())
|
t.Setenv("TOKEN_FILE", tokenFile.Name())
|
||||||
defer os.Unsetenv("TOKEN_FILE")
|
|
||||||
|
|
||||||
token, err := getToken()
|
token, err := getToken()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
@@ -539,8 +480,6 @@ func TestGetToken(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("strips whitespace from TOKEN_FILE", func(t *testing.T) {
|
t.Run("strips whitespace from TOKEN_FILE", func(t *testing.T) {
|
||||||
unsetEnvVars()
|
|
||||||
|
|
||||||
tokenWithWhitespace := " test-token-with-whitespace \n\t"
|
tokenWithWhitespace := " test-token-with-whitespace \n\t"
|
||||||
expectedToken := "test-token-with-whitespace"
|
expectedToken := "test-token-with-whitespace"
|
||||||
tokenFile, err := os.CreateTemp("", "token-test-*.txt")
|
tokenFile, err := os.CreateTemp("", "token-test-*.txt")
|
||||||
@@ -551,8 +490,7 @@ func TestGetToken(t *testing.T) {
|
|||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
tokenFile.Close()
|
tokenFile.Close()
|
||||||
|
|
||||||
os.Setenv("TOKEN_FILE", tokenFile.Name())
|
t.Setenv("TOKEN_FILE", tokenFile.Name())
|
||||||
defer os.Unsetenv("TOKEN_FILE")
|
|
||||||
|
|
||||||
token, err := getToken()
|
token, err := getToken()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
package agent
|
package agent
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"os"
|
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
@@ -91,8 +91,8 @@ func (c *ConnectionManager) Start(serverOptions ServerOptions) error {
|
|||||||
c.eventChan = make(chan ConnectionEvent, 1)
|
c.eventChan = make(chan ConnectionEvent, 1)
|
||||||
|
|
||||||
// signal handling for shutdown
|
// signal handling for shutdown
|
||||||
sigChan := make(chan os.Signal, 1)
|
sigCtx, stopSignals := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
||||||
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
|
defer stopSignals()
|
||||||
|
|
||||||
c.startWsTicker()
|
c.startWsTicker()
|
||||||
c.connect()
|
c.connect()
|
||||||
@@ -109,8 +109,8 @@ func (c *ConnectionManager) Start(serverOptions ServerOptions) error {
|
|||||||
_ = c.startWebSocketConnection()
|
_ = c.startWebSocketConnection()
|
||||||
case <-healthTicker:
|
case <-healthTicker:
|
||||||
_ = health.Update()
|
_ = health.Update()
|
||||||
case <-sigChan:
|
case <-sigCtx.Done():
|
||||||
slog.Info("Shutting down")
|
slog.Info("Shutting down", "cause", context.Cause(sigCtx))
|
||||||
_ = c.agent.StopServer()
|
_ = c.agent.StopServer()
|
||||||
c.closeWebSocket()
|
c.closeWebSocket()
|
||||||
return health.CleanUp()
|
return health.CleanUp()
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build testing
|
//go:build testing
|
||||||
// +build testing
|
|
||||||
|
|
||||||
package agent
|
package agent
|
||||||
|
|
||||||
@@ -8,7 +7,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -184,10 +182,6 @@ func TestConnectionManager_TickerManagement(t *testing.T) {
|
|||||||
|
|
||||||
// TestConnectionManager_WebSocketConnectionFlow tests WebSocket connection logic
|
// TestConnectionManager_WebSocketConnectionFlow tests WebSocket connection logic
|
||||||
func TestConnectionManager_WebSocketConnectionFlow(t *testing.T) {
|
func TestConnectionManager_WebSocketConnectionFlow(t *testing.T) {
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("Skipping WebSocket connection test in short mode")
|
|
||||||
}
|
|
||||||
|
|
||||||
agent := createTestAgent(t)
|
agent := createTestAgent(t)
|
||||||
cm := agent.connectionManager
|
cm := agent.connectionManager
|
||||||
|
|
||||||
@@ -197,19 +191,18 @@ func TestConnectionManager_WebSocketConnectionFlow(t *testing.T) {
|
|||||||
assert.Equal(t, Disconnected, cm.State, "State should remain Disconnected after failed connection")
|
assert.Equal(t, Disconnected, cm.State, "State should remain Disconnected after failed connection")
|
||||||
|
|
||||||
// Test with invalid URL
|
// Test with invalid URL
|
||||||
os.Setenv("BESZEL_AGENT_HUB_URL", "invalid-url")
|
t.Setenv("BESZEL_AGENT_HUB_URL", "1,33%")
|
||||||
os.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
t.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
||||||
defer func() {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Test with missing token
|
|
||||||
os.Setenv("BESZEL_AGENT_HUB_URL", "http://localhost:8080")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
|
|
||||||
_, err2 := newWebSocketClient(agent)
|
_, err2 := newWebSocketClient(agent)
|
||||||
assert.Error(t, err2, "WebSocket client creation should fail without token")
|
assert.Error(t, err2, "WebSocket client creation should fail with invalid URL")
|
||||||
|
|
||||||
|
// Test with missing token
|
||||||
|
t.Setenv("BESZEL_AGENT_HUB_URL", "http://localhost:8080")
|
||||||
|
t.Setenv("BESZEL_AGENT_TOKEN", "")
|
||||||
|
|
||||||
|
_, err3 := newWebSocketClient(agent)
|
||||||
|
assert.Error(t, err3, "WebSocket client creation should fail without token")
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestConnectionManager_ReconnectionLogic tests reconnection prevention logic
|
// TestConnectionManager_ReconnectionLogic tests reconnection prevention logic
|
||||||
@@ -235,12 +228,8 @@ func TestConnectionManager_ConnectWithRateLimit(t *testing.T) {
|
|||||||
cm := agent.connectionManager
|
cm := agent.connectionManager
|
||||||
|
|
||||||
// Set up environment for WebSocket client creation
|
// Set up environment for WebSocket client creation
|
||||||
os.Setenv("BESZEL_AGENT_HUB_URL", "ws://localhost:8080")
|
t.Setenv("BESZEL_AGENT_HUB_URL", "ws://localhost:8080")
|
||||||
os.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
t.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
||||||
defer func() {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Create WebSocket client
|
// Create WebSocket client
|
||||||
wsClient, err := newWebSocketClient(agent)
|
wsClient, err := newWebSocketClient(agent)
|
||||||
@@ -286,12 +275,8 @@ func TestConnectionManager_CloseWebSocket(t *testing.T) {
|
|||||||
}, "Should not panic when closing nil WebSocket client")
|
}, "Should not panic when closing nil WebSocket client")
|
||||||
|
|
||||||
// Set up environment and create WebSocket client
|
// Set up environment and create WebSocket client
|
||||||
os.Setenv("BESZEL_AGENT_HUB_URL", "ws://localhost:8080")
|
t.Setenv("BESZEL_AGENT_HUB_URL", "ws://localhost:8080")
|
||||||
os.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
t.Setenv("BESZEL_AGENT_TOKEN", "test-token")
|
||||||
defer func() {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_HUB_URL")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_TOKEN")
|
|
||||||
}()
|
|
||||||
|
|
||||||
wsClient, err := newWebSocketClient(agent)
|
wsClient, err := newWebSocketClient(agent)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|||||||
@@ -14,10 +14,10 @@ var lastPerCoreCpuTimes = make(map[uint16][]cpu.TimesStat)
|
|||||||
// init initializes the CPU monitoring by storing the initial CPU times
|
// init initializes the CPU monitoring by storing the initial CPU times
|
||||||
// for the default 60-second cache interval.
|
// for the default 60-second cache interval.
|
||||||
func init() {
|
func init() {
|
||||||
if times, err := cpu.Times(false); err == nil {
|
if times, err := cpu.Times(false); err == nil && len(times) > 0 {
|
||||||
lastCpuTimes[60000] = times[0]
|
lastCpuTimes[60000] = times[0]
|
||||||
}
|
}
|
||||||
if perCoreTimes, err := cpu.Times(true); err == nil {
|
if perCoreTimes, err := cpu.Times(true); err == nil && len(perCoreTimes) > 0 {
|
||||||
lastPerCoreCpuTimes[60000] = perCoreTimes
|
lastPerCoreCpuTimes[60000] = perCoreTimes
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -89,10 +89,7 @@ func getPerCoreCpuUsage(cacheTimeMs uint16) (system.Uint8Slice, error) {
|
|||||||
lastTimes := lastPerCoreCpuTimes[cacheTimeMs]
|
lastTimes := lastPerCoreCpuTimes[cacheTimeMs]
|
||||||
|
|
||||||
// Limit to the number of cores available in both samples
|
// Limit to the number of cores available in both samples
|
||||||
length := len(perCoreTimes)
|
length := min(len(lastTimes), len(perCoreTimes))
|
||||||
if len(lastTimes) < length {
|
|
||||||
length = len(lastTimes)
|
|
||||||
}
|
|
||||||
|
|
||||||
usage := make([]uint8, length)
|
usage := make([]uint8, length)
|
||||||
for i := 0; i < length; i++ {
|
for i := 0; i < length; i++ {
|
||||||
|
|||||||
@@ -6,17 +6,19 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// getDataDir returns the path to the data directory for the agent and an error
|
// GetDataDir returns the path to the data directory for the agent and an error
|
||||||
// if the directory is not valid. Attempts to find the optimal data directory if
|
// if the directory is not valid. Attempts to find the optimal data directory if
|
||||||
// no data directories are provided.
|
// no data directories are provided.
|
||||||
func getDataDir(dataDirs ...string) (string, error) {
|
func GetDataDir(dataDirs ...string) (string, error) {
|
||||||
if len(dataDirs) > 0 {
|
if len(dataDirs) > 0 {
|
||||||
return testDataDirs(dataDirs)
|
return testDataDirs(dataDirs)
|
||||||
}
|
}
|
||||||
|
|
||||||
dataDir, _ := GetEnv("DATA_DIR")
|
dataDir, _ := utils.GetEnv("DATA_DIR")
|
||||||
if dataDir != "" {
|
if dataDir != "" {
|
||||||
dataDirs = append(dataDirs, dataDir)
|
dataDirs = append(dataDirs, dataDir)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build testing
|
//go:build testing
|
||||||
// +build testing
|
|
||||||
|
|
||||||
package agent
|
package agent
|
||||||
|
|
||||||
@@ -17,7 +16,7 @@ func TestGetDataDir(t *testing.T) {
|
|||||||
// Test with explicit dataDir parameter
|
// Test with explicit dataDir parameter
|
||||||
t.Run("explicit data dir", func(t *testing.T) {
|
t.Run("explicit data dir", func(t *testing.T) {
|
||||||
tempDir := t.TempDir()
|
tempDir := t.TempDir()
|
||||||
result, err := getDataDir(tempDir)
|
result, err := GetDataDir(tempDir)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, tempDir, result)
|
assert.Equal(t, tempDir, result)
|
||||||
})
|
})
|
||||||
@@ -26,7 +25,7 @@ func TestGetDataDir(t *testing.T) {
|
|||||||
t.Run("explicit data dir - create new", func(t *testing.T) {
|
t.Run("explicit data dir - create new", func(t *testing.T) {
|
||||||
tempDir := t.TempDir()
|
tempDir := t.TempDir()
|
||||||
newDir := filepath.Join(tempDir, "new-data-dir")
|
newDir := filepath.Join(tempDir, "new-data-dir")
|
||||||
result, err := getDataDir(newDir)
|
result, err := GetDataDir(newDir)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, newDir, result)
|
assert.Equal(t, newDir, result)
|
||||||
|
|
||||||
@@ -40,19 +39,9 @@ func TestGetDataDir(t *testing.T) {
|
|||||||
t.Run("DATA_DIR environment variable", func(t *testing.T) {
|
t.Run("DATA_DIR environment variable", func(t *testing.T) {
|
||||||
tempDir := t.TempDir()
|
tempDir := t.TempDir()
|
||||||
|
|
||||||
// Set environment variable
|
t.Setenv("BESZEL_AGENT_DATA_DIR", tempDir)
|
||||||
oldValue := os.Getenv("DATA_DIR")
|
|
||||||
defer func() {
|
|
||||||
if oldValue == "" {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_DATA_DIR")
|
|
||||||
} else {
|
|
||||||
os.Setenv("BESZEL_AGENT_DATA_DIR", oldValue)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
os.Setenv("BESZEL_AGENT_DATA_DIR", tempDir)
|
result, err := GetDataDir()
|
||||||
|
|
||||||
result, err := getDataDir()
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, tempDir, result)
|
assert.Equal(t, tempDir, result)
|
||||||
})
|
})
|
||||||
@@ -60,26 +49,15 @@ func TestGetDataDir(t *testing.T) {
|
|||||||
// Test with invalid explicit dataDir
|
// Test with invalid explicit dataDir
|
||||||
t.Run("invalid explicit data dir", func(t *testing.T) {
|
t.Run("invalid explicit data dir", func(t *testing.T) {
|
||||||
invalidPath := "/invalid/path/that/cannot/be/created"
|
invalidPath := "/invalid/path/that/cannot/be/created"
|
||||||
_, err := getDataDir(invalidPath)
|
_, err := GetDataDir(invalidPath)
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
})
|
})
|
||||||
|
|
||||||
// Test fallback behavior (empty dataDir, no env var)
|
// Test fallback behavior (empty dataDir, no env var)
|
||||||
t.Run("fallback to default directories", func(t *testing.T) {
|
t.Run("fallback to default directories", func(t *testing.T) {
|
||||||
// Clear DATA_DIR environment variable
|
|
||||||
oldValue := os.Getenv("DATA_DIR")
|
|
||||||
defer func() {
|
|
||||||
if oldValue == "" {
|
|
||||||
os.Unsetenv("DATA_DIR")
|
|
||||||
} else {
|
|
||||||
os.Setenv("DATA_DIR", oldValue)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
os.Unsetenv("DATA_DIR")
|
|
||||||
|
|
||||||
// This will try platform-specific defaults, which may or may not work
|
// This will try platform-specific defaults, which may or may not work
|
||||||
// We're mainly testing that it doesn't panic and returns some result
|
// We're mainly testing that it doesn't panic and returns some result
|
||||||
result, err := getDataDir()
|
result, err := GetDataDir()
|
||||||
// We don't assert success/failure here since it depends on system permissions
|
// We don't assert success/failure here since it depends on system permissions
|
||||||
// Just verify we get a string result if no error
|
// Just verify we get a string result if no error
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
|||||||
587
agent/disk.go
587
agent/disk.go
@@ -1,6 +1,7 @@
|
|||||||
package agent
|
package agent
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@@ -8,11 +9,31 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
"github.com/henrygd/beszel/internal/entities/system"
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
|
||||||
"github.com/shirou/gopsutil/v4/disk"
|
"github.com/shirou/gopsutil/v4/disk"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// fsRegistrationContext holds the shared lookup state needed to resolve a
|
||||||
|
// filesystem into the tracked fsStats key and metadata.
|
||||||
|
type fsRegistrationContext struct {
|
||||||
|
filesystem string // value of optional FILESYSTEM env var
|
||||||
|
isWindows bool
|
||||||
|
efPath string // path to extra filesystems (default "/extra-filesystems")
|
||||||
|
diskIoCounters map[string]disk.IOCountersStat
|
||||||
|
}
|
||||||
|
|
||||||
|
// diskDiscovery groups the transient state for a single initializeDiskInfo run so
|
||||||
|
// helper methods can share the same partitions, mount paths, and lookup functions
|
||||||
|
type diskDiscovery struct {
|
||||||
|
agent *Agent
|
||||||
|
rootMountPoint string
|
||||||
|
partitions []disk.PartitionStat
|
||||||
|
usageFn func(string) (*disk.UsageStat, error)
|
||||||
|
ctx fsRegistrationContext
|
||||||
|
}
|
||||||
|
|
||||||
// parseFilesystemEntry parses a filesystem entry in the format "device__customname"
|
// parseFilesystemEntry parses a filesystem entry in the format "device__customname"
|
||||||
// Returns the device/filesystem part and the custom name part
|
// Returns the device/filesystem part and the custom name part
|
||||||
func parseFilesystemEntry(entry string) (device, customName string) {
|
func parseFilesystemEntry(entry string) (device, customName string) {
|
||||||
@@ -26,14 +47,236 @@ func parseFilesystemEntry(entry string) (device, customName string) {
|
|||||||
return device, customName
|
return device, customName
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// extraFilesystemPartitionInfo derives the I/O device and optional display name
|
||||||
|
// for a mounted /extra-filesystems partition. Prefer the partition device reported
|
||||||
|
// by the system and only use the folder name for custom naming metadata.
|
||||||
|
func extraFilesystemPartitionInfo(p disk.PartitionStat) (device, customName string) {
|
||||||
|
device = strings.TrimSpace(p.Device)
|
||||||
|
folderDevice, customName := parseFilesystemEntry(filepath.Base(p.Mountpoint))
|
||||||
|
if device == "" {
|
||||||
|
device = folderDevice
|
||||||
|
}
|
||||||
|
return device, customName
|
||||||
|
}
|
||||||
|
|
||||||
|
func isDockerSpecialMountpoint(mountpoint string) bool {
|
||||||
|
switch mountpoint {
|
||||||
|
case "/etc/hosts", "/etc/resolv.conf", "/etc/hostname":
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// registerFilesystemStats resolves the tracked key and stats payload for a
|
||||||
|
// filesystem before it is inserted into fsStats.
|
||||||
|
func registerFilesystemStats(existing map[string]*system.FsStats, device, mountpoint string, root bool, customName string, ctx fsRegistrationContext) (string, *system.FsStats, bool) {
|
||||||
|
key := device
|
||||||
|
if !ctx.isWindows {
|
||||||
|
key = filepath.Base(device)
|
||||||
|
}
|
||||||
|
|
||||||
|
if root {
|
||||||
|
// Try to map root device to a diskIoCounters entry. First checks for an
|
||||||
|
// exact key match, then uses findIoDevice for normalized / prefix-based
|
||||||
|
// matching (e.g. nda0p2 -> nda0), and finally falls back to FILESYSTEM.
|
||||||
|
if _, ioMatch := ctx.diskIoCounters[key]; !ioMatch {
|
||||||
|
if matchedKey, match := findIoDevice(key, ctx.diskIoCounters); match {
|
||||||
|
key = matchedKey
|
||||||
|
} else if ctx.filesystem != "" {
|
||||||
|
if matchedKey, match := findIoDevice(ctx.filesystem, ctx.diskIoCounters); match {
|
||||||
|
key = matchedKey
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _, ioMatch = ctx.diskIoCounters[key]; !ioMatch {
|
||||||
|
slog.Warn("Root I/O unmapped; set FILESYSTEM", "device", device, "mountpoint", mountpoint)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Check if non-root has diskstats and prefer the folder device for
|
||||||
|
// /extra-filesystems mounts when the discovered partition device is a
|
||||||
|
// mapper path (e.g. luks UUID) that obscures the underlying block device.
|
||||||
|
if _, ioMatch := ctx.diskIoCounters[key]; !ioMatch {
|
||||||
|
if strings.HasPrefix(mountpoint, ctx.efPath) {
|
||||||
|
folderDevice, _ := parseFilesystemEntry(filepath.Base(mountpoint))
|
||||||
|
if folderDevice != "" {
|
||||||
|
if matchedKey, match := findIoDevice(folderDevice, ctx.diskIoCounters); match {
|
||||||
|
key = matchedKey
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _, ioMatch = ctx.diskIoCounters[key]; !ioMatch {
|
||||||
|
if matchedKey, match := findIoDevice(key, ctx.diskIoCounters); match {
|
||||||
|
key = matchedKey
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, exists := existing[key]; exists {
|
||||||
|
return "", nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
fsStats := &system.FsStats{Root: root, Mountpoint: mountpoint}
|
||||||
|
if customName != "" {
|
||||||
|
fsStats.Name = customName
|
||||||
|
}
|
||||||
|
return key, fsStats, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// addFsStat inserts a discovered filesystem if it resolves to a new tracking
|
||||||
|
// key. The key selection itself lives in buildFsStatRegistration so that logic
|
||||||
|
// can stay directly unit-tested.
|
||||||
|
func (d *diskDiscovery) addFsStat(device, mountpoint string, root bool, customName string) {
|
||||||
|
key, fsStats, ok := registerFilesystemStats(d.agent.fsStats, device, mountpoint, root, customName, d.ctx)
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
d.agent.fsStats[key] = fsStats
|
||||||
|
name := key
|
||||||
|
if customName != "" {
|
||||||
|
name = customName
|
||||||
|
}
|
||||||
|
slog.Info("Detected disk", "name", name, "device", device, "mount", mountpoint, "io", key, "root", root)
|
||||||
|
}
|
||||||
|
|
||||||
|
// addConfiguredRootFs resolves FILESYSTEM against partitions first, then falls
|
||||||
|
// back to direct diskstats matching for setups like ZFS where partitions do not
|
||||||
|
// expose the physical device name.
|
||||||
|
func (d *diskDiscovery) addConfiguredRootFs() bool {
|
||||||
|
if d.ctx.filesystem == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, p := range d.partitions {
|
||||||
|
if filesystemMatchesPartitionSetting(d.ctx.filesystem, p) {
|
||||||
|
d.addFsStat(p.Device, p.Mountpoint, true, "")
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FILESYSTEM may name a physical disk absent from partitions (e.g. ZFS lists
|
||||||
|
// dataset paths like zroot/ROOT/default, not block devices).
|
||||||
|
if ioKey, match := findIoDevice(d.ctx.filesystem, d.ctx.diskIoCounters); match {
|
||||||
|
d.agent.fsStats[ioKey] = &system.FsStats{Root: true, Mountpoint: d.rootMountPoint}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
slog.Warn("Partition details not found", "filesystem", d.ctx.filesystem)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func isRootFallbackPartition(p disk.PartitionStat, rootMountPoint string) bool {
|
||||||
|
return p.Mountpoint == rootMountPoint ||
|
||||||
|
(isDockerSpecialMountpoint(p.Mountpoint) && strings.HasPrefix(p.Device, "/dev"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// addPartitionRootFs handles the non-configured root fallback path when a
|
||||||
|
// partition looks like the active root mount but still needs translating to an
|
||||||
|
// I/O device key.
|
||||||
|
func (d *diskDiscovery) addPartitionRootFs(device, mountpoint string) bool {
|
||||||
|
fs, match := findIoDevice(filepath.Base(device), d.ctx.diskIoCounters)
|
||||||
|
if !match {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// The resolved I/O device is already known here, so use it directly to avoid
|
||||||
|
// a second fallback search inside buildFsStatRegistration.
|
||||||
|
d.addFsStat(fs, mountpoint, true, "")
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// addLastResortRootFs is only used when neither FILESYSTEM nor partition-based
|
||||||
|
// heuristics can identify root, so it picks the busiest I/O device as a final
|
||||||
|
// fallback and preserves the root mountpoint for usage collection.
|
||||||
|
func (d *diskDiscovery) addLastResortRootFs() {
|
||||||
|
rootKey := mostActiveIoDevice(d.ctx.diskIoCounters)
|
||||||
|
if rootKey != "" {
|
||||||
|
slog.Warn("Using most active device for root I/O; set FILESYSTEM to override", "device", rootKey)
|
||||||
|
} else {
|
||||||
|
rootKey = filepath.Base(d.rootMountPoint)
|
||||||
|
if _, exists := d.agent.fsStats[rootKey]; exists {
|
||||||
|
rootKey = "root"
|
||||||
|
}
|
||||||
|
slog.Warn("Root I/O device not detected; set FILESYSTEM to override")
|
||||||
|
}
|
||||||
|
d.agent.fsStats[rootKey] = &system.FsStats{Root: true, Mountpoint: d.rootMountPoint}
|
||||||
|
}
|
||||||
|
|
||||||
|
// findPartitionByFilesystemSetting matches an EXTRA_FILESYSTEMS entry against a
|
||||||
|
// discovered partition either by mountpoint or by device suffix.
|
||||||
|
func findPartitionByFilesystemSetting(filesystem string, partitions []disk.PartitionStat) (disk.PartitionStat, bool) {
|
||||||
|
for _, p := range partitions {
|
||||||
|
if strings.HasSuffix(p.Device, filesystem) || p.Mountpoint == filesystem {
|
||||||
|
return p, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return disk.PartitionStat{}, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// addConfiguredExtraFsEntry resolves one EXTRA_FILESYSTEMS entry, preferring a
|
||||||
|
// discovered partition and falling back to any path that disk.Usage accepts.
|
||||||
|
func (d *diskDiscovery) addConfiguredExtraFsEntry(filesystem, customName string) {
|
||||||
|
if p, found := findPartitionByFilesystemSetting(filesystem, d.partitions); found {
|
||||||
|
d.addFsStat(p.Device, p.Mountpoint, false, customName)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := d.usageFn(filesystem); err == nil {
|
||||||
|
d.addFsStat(filepath.Base(filesystem), filesystem, false, customName)
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
slog.Error("Invalid filesystem", "name", filesystem, "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// addConfiguredExtraFilesystems parses and registers the comma-separated
|
||||||
|
// EXTRA_FILESYSTEMS env var entries.
|
||||||
|
func (d *diskDiscovery) addConfiguredExtraFilesystems(extraFilesystems string) {
|
||||||
|
for fsEntry := range strings.SplitSeq(extraFilesystems, ",") {
|
||||||
|
filesystem, customName := parseFilesystemEntry(fsEntry)
|
||||||
|
d.addConfiguredExtraFsEntry(filesystem, customName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// addPartitionExtraFs registers partitions mounted under /extra-filesystems so
|
||||||
|
// their display names can come from the folder name while their I/O keys still
|
||||||
|
// prefer the underlying partition device. Only direct children are matched to
|
||||||
|
// avoid registering nested virtual mounts (e.g. /proc, /sys) that are returned by
|
||||||
|
// disk.Partitions(true) when the host root is bind-mounted in /extra-filesystems.
|
||||||
|
func (d *diskDiscovery) addPartitionExtraFs(p disk.PartitionStat) {
|
||||||
|
if filepath.Dir(p.Mountpoint) != d.ctx.efPath {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
device, customName := extraFilesystemPartitionInfo(p)
|
||||||
|
d.addFsStat(device, p.Mountpoint, false, customName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// addExtraFilesystemFolders handles bare directories under /extra-filesystems
|
||||||
|
// that may not appear in partition discovery, while skipping mountpoints that
|
||||||
|
// were already registered from higher-fidelity sources.
|
||||||
|
func (d *diskDiscovery) addExtraFilesystemFolders(folderNames []string) {
|
||||||
|
existingMountpoints := make(map[string]bool, len(d.agent.fsStats))
|
||||||
|
for _, stats := range d.agent.fsStats {
|
||||||
|
existingMountpoints[stats.Mountpoint] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, folderName := range folderNames {
|
||||||
|
mountpoint := filepath.Join(d.ctx.efPath, folderName)
|
||||||
|
slog.Debug("/extra-filesystems", "mountpoint", mountpoint)
|
||||||
|
if existingMountpoints[mountpoint] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
device, customName := parseFilesystemEntry(folderName)
|
||||||
|
d.addFsStat(device, mountpoint, false, customName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Sets up the filesystems to monitor for disk usage and I/O.
|
// Sets up the filesystems to monitor for disk usage and I/O.
|
||||||
func (a *Agent) initializeDiskInfo() {
|
func (a *Agent) initializeDiskInfo() {
|
||||||
filesystem, _ := GetEnv("FILESYSTEM")
|
filesystem, _ := utils.GetEnv("FILESYSTEM")
|
||||||
efPath := "/extra-filesystems"
|
|
||||||
hasRoot := false
|
hasRoot := false
|
||||||
isWindows := runtime.GOOS == "windows"
|
isWindows := runtime.GOOS == "windows"
|
||||||
|
|
||||||
partitions, err := disk.Partitions(false)
|
partitions, err := disk.PartitionsWithContext(context.Background(), true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Error("Error getting disk partitions", "err", err)
|
slog.Error("Error getting disk partitions", "err", err)
|
||||||
}
|
}
|
||||||
@@ -46,167 +289,223 @@ func (a *Agent) initializeDiskInfo() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ioContext := context.WithValue(a.sensorsContext,
|
|
||||||
// common.EnvKey, common.EnvMap{common.HostProcEnvKey: "/tmp/testproc"},
|
|
||||||
// )
|
|
||||||
// diskIoCounters, err := disk.IOCountersWithContext(ioContext)
|
|
||||||
|
|
||||||
diskIoCounters, err := disk.IOCounters()
|
diskIoCounters, err := disk.IOCounters()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Error("Error getting diskstats", "err", err)
|
slog.Error("Error getting diskstats", "err", err)
|
||||||
}
|
}
|
||||||
slog.Debug("Disk I/O", "diskstats", diskIoCounters)
|
slog.Debug("Disk I/O", "diskstats", diskIoCounters)
|
||||||
|
ctx := fsRegistrationContext{
|
||||||
// Helper function to add a filesystem to fsStats if it doesn't exist
|
filesystem: filesystem,
|
||||||
addFsStat := func(device, mountpoint string, root bool, customName ...string) {
|
isWindows: isWindows,
|
||||||
var key string
|
diskIoCounters: diskIoCounters,
|
||||||
if isWindows {
|
efPath: "/extra-filesystems",
|
||||||
key = device
|
|
||||||
} else {
|
|
||||||
key = filepath.Base(device)
|
|
||||||
}
|
|
||||||
var ioMatch bool
|
|
||||||
if _, exists := a.fsStats[key]; !exists {
|
|
||||||
if root {
|
|
||||||
slog.Info("Detected root device", "name", key)
|
|
||||||
// Check if root device is in /proc/diskstats, use fallback if not
|
|
||||||
if _, ioMatch = diskIoCounters[key]; !ioMatch {
|
|
||||||
key, ioMatch = findIoDevice(filesystem, diskIoCounters, a.fsStats)
|
|
||||||
if !ioMatch {
|
|
||||||
slog.Info("Using I/O fallback", "device", device, "mountpoint", mountpoint, "fallback", key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Check if non-root has diskstats and fall back to folder name if not
|
|
||||||
// Scenario: device is encrypted and named luks-2bcb02be-999d-4417-8d18-5c61e660fb6e - not in /proc/diskstats.
|
|
||||||
// However, the device can be specified by mounting folder from luks device at /extra-filesystems/sda1
|
|
||||||
if _, ioMatch = diskIoCounters[key]; !ioMatch {
|
|
||||||
efBase := filepath.Base(mountpoint)
|
|
||||||
if _, ioMatch = diskIoCounters[efBase]; ioMatch {
|
|
||||||
key = efBase
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fsStats := &system.FsStats{Root: root, Mountpoint: mountpoint}
|
|
||||||
if len(customName) > 0 && customName[0] != "" {
|
|
||||||
fsStats.Name = customName[0]
|
|
||||||
}
|
|
||||||
a.fsStats[key] = fsStats
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the appropriate root mount point for this system
|
// Get the appropriate root mount point for this system
|
||||||
rootMountPoint := a.getRootMountPoint()
|
discovery := diskDiscovery{
|
||||||
|
agent: a,
|
||||||
|
rootMountPoint: a.getRootMountPoint(),
|
||||||
|
partitions: partitions,
|
||||||
|
usageFn: disk.Usage,
|
||||||
|
ctx: ctx,
|
||||||
|
}
|
||||||
|
|
||||||
// Use FILESYSTEM env var to find root filesystem
|
hasRoot = discovery.addConfiguredRootFs()
|
||||||
if filesystem != "" {
|
|
||||||
for _, p := range partitions {
|
|
||||||
if strings.HasSuffix(p.Device, filesystem) || p.Mountpoint == filesystem {
|
|
||||||
addFsStat(p.Device, p.Mountpoint, true)
|
|
||||||
hasRoot = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !hasRoot {
|
|
||||||
slog.Warn("Partition details not found", "filesystem", filesystem)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add EXTRA_FILESYSTEMS env var values to fsStats
|
// Add EXTRA_FILESYSTEMS env var values to fsStats
|
||||||
if extraFilesystems, exists := GetEnv("EXTRA_FILESYSTEMS"); exists {
|
if extraFilesystems, exists := utils.GetEnv("EXTRA_FILESYSTEMS"); exists {
|
||||||
for _, fsEntry := range strings.Split(extraFilesystems, ",") {
|
discovery.addConfiguredExtraFilesystems(extraFilesystems)
|
||||||
// Parse custom name from format: device__customname
|
|
||||||
fs, customName := parseFilesystemEntry(fsEntry)
|
|
||||||
|
|
||||||
found := false
|
|
||||||
for _, p := range partitions {
|
|
||||||
if strings.HasSuffix(p.Device, fs) || p.Mountpoint == fs {
|
|
||||||
addFsStat(p.Device, p.Mountpoint, false, customName)
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// if not in partitions, test if we can get disk usage
|
|
||||||
if !found {
|
|
||||||
if _, err := disk.Usage(fs); err == nil {
|
|
||||||
addFsStat(filepath.Base(fs), fs, false, customName)
|
|
||||||
} else {
|
|
||||||
slog.Error("Invalid filesystem", "name", fs, "err", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process partitions for various mount points
|
// Process partitions for various mount points
|
||||||
for _, p := range partitions {
|
for _, p := range partitions {
|
||||||
// fmt.Println(p.Device, p.Mountpoint)
|
if !hasRoot && isRootFallbackPartition(p, discovery.rootMountPoint) {
|
||||||
// Binary root fallback or docker root fallback
|
hasRoot = discovery.addPartitionRootFs(p.Device, p.Mountpoint)
|
||||||
if !hasRoot && (p.Mountpoint == rootMountPoint || (p.Mountpoint == "/etc/hosts" && strings.HasPrefix(p.Device, "/dev"))) {
|
|
||||||
fs, match := findIoDevice(filepath.Base(p.Device), diskIoCounters, a.fsStats)
|
|
||||||
if match {
|
|
||||||
addFsStat(fs, p.Mountpoint, true)
|
|
||||||
hasRoot = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if device is in /extra-filesystems
|
|
||||||
if strings.HasPrefix(p.Mountpoint, efPath) {
|
|
||||||
device, customName := parseFilesystemEntry(p.Mountpoint)
|
|
||||||
addFsStat(device, p.Mountpoint, false, customName)
|
|
||||||
}
|
}
|
||||||
|
discovery.addPartitionExtraFs(p)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check all folders in /extra-filesystems and add them if not already present
|
// Check all folders in /extra-filesystems and add them if not already present
|
||||||
if folders, err := os.ReadDir(efPath); err == nil {
|
if folders, err := os.ReadDir(discovery.ctx.efPath); err == nil {
|
||||||
existingMountpoints := make(map[string]bool)
|
folderNames := make([]string, 0, len(folders))
|
||||||
for _, stats := range a.fsStats {
|
|
||||||
existingMountpoints[stats.Mountpoint] = true
|
|
||||||
}
|
|
||||||
for _, folder := range folders {
|
for _, folder := range folders {
|
||||||
if folder.IsDir() {
|
if folder.IsDir() {
|
||||||
mountpoint := filepath.Join(efPath, folder.Name())
|
folderNames = append(folderNames, folder.Name())
|
||||||
slog.Debug("/extra-filesystems", "mountpoint", mountpoint)
|
|
||||||
if !existingMountpoints[mountpoint] {
|
|
||||||
device, customName := parseFilesystemEntry(folder.Name())
|
|
||||||
addFsStat(device, mountpoint, false, customName)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
discovery.addExtraFilesystemFolders(folderNames)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If no root filesystem set, use fallback
|
// If no root filesystem set, try the most active I/O device as a last
|
||||||
|
// resort (e.g. ZFS where dataset names are unrelated to disk names).
|
||||||
if !hasRoot {
|
if !hasRoot {
|
||||||
rootDevice, _ := findIoDevice(filepath.Base(filesystem), diskIoCounters, a.fsStats)
|
discovery.addLastResortRootFs()
|
||||||
slog.Info("Root disk", "mountpoint", rootMountPoint, "io", rootDevice)
|
|
||||||
a.fsStats[rootDevice] = &system.FsStats{Root: true, Mountpoint: rootMountPoint}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
a.pruneDuplicateRootExtraFilesystems()
|
||||||
a.initializeDiskIoStats(diskIoCounters)
|
a.initializeDiskIoStats(diskIoCounters)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns matching device from /proc/diskstats,
|
// Removes extra filesystems that mirror root usage (https://github.com/henrygd/beszel/issues/1428).
|
||||||
// or the device with the most reads if no match is found.
|
func (a *Agent) pruneDuplicateRootExtraFilesystems() {
|
||||||
// bool is true if a match was found.
|
var rootMountpoint string
|
||||||
func findIoDevice(filesystem string, diskIoCounters map[string]disk.IOCountersStat, fsStats map[string]*system.FsStats) (string, bool) {
|
for _, stats := range a.fsStats {
|
||||||
var maxReadBytes uint64
|
if stats != nil && stats.Root {
|
||||||
maxReadDevice := "/"
|
rootMountpoint = stats.Mountpoint
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if rootMountpoint == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
rootUsage, err := disk.Usage(rootMountpoint)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for name, stats := range a.fsStats {
|
||||||
|
if stats == nil || stats.Root {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
extraUsage, err := disk.Usage(stats.Mountpoint)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if hasSameDiskUsage(rootUsage, extraUsage) {
|
||||||
|
slog.Info("Ignoring duplicate FS", "name", name, "mount", stats.Mountpoint)
|
||||||
|
delete(a.fsStats, name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// hasSameDiskUsage compares root/extra usage with a small byte tolerance.
|
||||||
|
func hasSameDiskUsage(a, b *disk.UsageStat) bool {
|
||||||
|
if a == nil || b == nil || a.Total == 0 || b.Total == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// Allow minor drift between sequential disk usage calls.
|
||||||
|
const toleranceBytes uint64 = 16 * 1024 * 1024
|
||||||
|
return withinUsageTolerance(a.Total, b.Total, toleranceBytes) &&
|
||||||
|
withinUsageTolerance(a.Used, b.Used, toleranceBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// withinUsageTolerance reports whether two byte values differ by at most tolerance.
|
||||||
|
func withinUsageTolerance(a, b, tolerance uint64) bool {
|
||||||
|
if a >= b {
|
||||||
|
return a-b <= tolerance
|
||||||
|
}
|
||||||
|
return b-a <= tolerance
|
||||||
|
}
|
||||||
|
|
||||||
|
type ioMatchCandidate struct {
|
||||||
|
name string
|
||||||
|
bytes uint64
|
||||||
|
ops uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// findIoDevice prefers exact device/label matches, then falls back to a
|
||||||
|
// prefix-related candidate with the highest recent activity.
|
||||||
|
func findIoDevice(filesystem string, diskIoCounters map[string]disk.IOCountersStat) (string, bool) {
|
||||||
|
filesystem = normalizeDeviceName(filesystem)
|
||||||
|
if filesystem == "" {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
|
||||||
|
candidates := []ioMatchCandidate{}
|
||||||
|
|
||||||
for _, d := range diskIoCounters {
|
for _, d := range diskIoCounters {
|
||||||
if d.Name == filesystem || (d.Label != "" && d.Label == filesystem) {
|
if normalizeDeviceName(d.Name) == filesystem || (d.Label != "" && normalizeDeviceName(d.Label) == filesystem) {
|
||||||
return d.Name, true
|
return d.Name, true
|
||||||
}
|
}
|
||||||
if d.ReadBytes > maxReadBytes {
|
if prefixRelated(normalizeDeviceName(d.Name), filesystem) ||
|
||||||
// don't use if device already exists in fsStats
|
(d.Label != "" && prefixRelated(normalizeDeviceName(d.Label), filesystem)) {
|
||||||
if _, exists := fsStats[d.Name]; !exists {
|
candidates = append(candidates, ioMatchCandidate{
|
||||||
maxReadBytes = d.ReadBytes
|
name: d.Name,
|
||||||
maxReadDevice = d.Name
|
bytes: d.ReadBytes + d.WriteBytes,
|
||||||
|
ops: d.ReadCount + d.WriteCount,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(candidates) == 0 {
|
||||||
|
return "", false
|
||||||
}
|
}
|
||||||
return maxReadDevice, false
|
|
||||||
|
best := candidates[0]
|
||||||
|
for _, c := range candidates[1:] {
|
||||||
|
if c.bytes > best.bytes ||
|
||||||
|
(c.bytes == best.bytes && c.ops > best.ops) ||
|
||||||
|
(c.bytes == best.bytes && c.ops == best.ops && c.name < best.name) {
|
||||||
|
best = c
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
slog.Info("Using disk I/O fallback", "requested", filesystem, "selected", best.name)
|
||||||
|
return best.name, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// mostActiveIoDevice returns the device with the highest I/O activity,
|
||||||
|
// or "" if diskIoCounters is empty.
|
||||||
|
func mostActiveIoDevice(diskIoCounters map[string]disk.IOCountersStat) string {
|
||||||
|
var best ioMatchCandidate
|
||||||
|
for _, d := range diskIoCounters {
|
||||||
|
c := ioMatchCandidate{
|
||||||
|
name: d.Name,
|
||||||
|
bytes: d.ReadBytes + d.WriteBytes,
|
||||||
|
ops: d.ReadCount + d.WriteCount,
|
||||||
|
}
|
||||||
|
if best.name == "" || c.bytes > best.bytes ||
|
||||||
|
(c.bytes == best.bytes && c.ops > best.ops) ||
|
||||||
|
(c.bytes == best.bytes && c.ops == best.ops && c.name < best.name) {
|
||||||
|
best = c
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return best.name
|
||||||
|
}
|
||||||
|
|
||||||
|
// prefixRelated reports whether either identifier is a prefix of the other.
|
||||||
|
func prefixRelated(a, b string) bool {
|
||||||
|
if a == "" || b == "" || a == b {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return strings.HasPrefix(a, b) || strings.HasPrefix(b, a)
|
||||||
|
}
|
||||||
|
|
||||||
|
// filesystemMatchesPartitionSetting checks whether a FILESYSTEM env var value
|
||||||
|
// matches a partition by mountpoint, exact device name, or prefix relationship
|
||||||
|
// (e.g. FILESYSTEM=ada0 matches partition /dev/ada0p2).
|
||||||
|
func filesystemMatchesPartitionSetting(filesystem string, p disk.PartitionStat) bool {
|
||||||
|
filesystem = strings.TrimSpace(filesystem)
|
||||||
|
if filesystem == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if p.Mountpoint == filesystem {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
fsName := normalizeDeviceName(filesystem)
|
||||||
|
partName := normalizeDeviceName(p.Device)
|
||||||
|
if fsName == "" || partName == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if fsName == partName {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return prefixRelated(partName, fsName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// normalizeDeviceName canonicalizes device strings for comparisons.
|
||||||
|
func normalizeDeviceName(value string) string {
|
||||||
|
name := filepath.Base(strings.TrimSpace(value))
|
||||||
|
if name == "." {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return name
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sets start values for disk I/O stats.
|
// Sets start values for disk I/O stats.
|
||||||
func (a *Agent) initializeDiskIoStats(diskIoCounters map[string]disk.IOCountersStat) {
|
func (a *Agent) initializeDiskIoStats(diskIoCounters map[string]disk.IOCountersStat) {
|
||||||
|
a.fsNames = a.fsNames[:0]
|
||||||
|
now := time.Now()
|
||||||
for device, stats := range a.fsStats {
|
for device, stats := range a.fsStats {
|
||||||
// skip if not in diskIoCounters
|
// skip if not in diskIoCounters
|
||||||
d, exists := diskIoCounters[device]
|
d, exists := diskIoCounters[device]
|
||||||
@@ -215,7 +514,7 @@ func (a *Agent) initializeDiskIoStats(diskIoCounters map[string]disk.IOCountersS
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// populate initial values
|
// populate initial values
|
||||||
stats.Time = time.Now()
|
stats.Time = now
|
||||||
stats.TotalRead = d.ReadBytes
|
stats.TotalRead = d.ReadBytes
|
||||||
stats.TotalWrite = d.WriteBytes
|
stats.TotalWrite = d.WriteBytes
|
||||||
// add to list of valid io device names
|
// add to list of valid io device names
|
||||||
@@ -225,15 +524,26 @@ func (a *Agent) initializeDiskIoStats(diskIoCounters map[string]disk.IOCountersS
|
|||||||
|
|
||||||
// Updates disk usage statistics for all monitored filesystems
|
// Updates disk usage statistics for all monitored filesystems
|
||||||
func (a *Agent) updateDiskUsage(systemStats *system.Stats) {
|
func (a *Agent) updateDiskUsage(systemStats *system.Stats) {
|
||||||
|
// Check if we should skip extra filesystem collection to avoid waking sleeping disks.
|
||||||
|
// Root filesystem is always updated since it can't be sleeping while the agent runs.
|
||||||
|
// Always collect on first call (lastDiskUsageUpdate is zero) or if caching is disabled.
|
||||||
|
cacheExtraFs := a.diskUsageCacheDuration > 0 &&
|
||||||
|
!a.lastDiskUsageUpdate.IsZero() &&
|
||||||
|
time.Since(a.lastDiskUsageUpdate) < a.diskUsageCacheDuration
|
||||||
|
|
||||||
// disk usage
|
// disk usage
|
||||||
for _, stats := range a.fsStats {
|
for _, stats := range a.fsStats {
|
||||||
|
// Skip non-root filesystems if caching is active
|
||||||
|
if cacheExtraFs && !stats.Root {
|
||||||
|
continue
|
||||||
|
}
|
||||||
if d, err := disk.Usage(stats.Mountpoint); err == nil {
|
if d, err := disk.Usage(stats.Mountpoint); err == nil {
|
||||||
stats.DiskTotal = bytesToGigabytes(d.Total)
|
stats.DiskTotal = utils.BytesToGigabytes(d.Total)
|
||||||
stats.DiskUsed = bytesToGigabytes(d.Used)
|
stats.DiskUsed = utils.BytesToGigabytes(d.Used)
|
||||||
if stats.Root {
|
if stats.Root {
|
||||||
systemStats.DiskTotal = bytesToGigabytes(d.Total)
|
systemStats.DiskTotal = utils.BytesToGigabytes(d.Total)
|
||||||
systemStats.DiskUsed = bytesToGigabytes(d.Used)
|
systemStats.DiskUsed = utils.BytesToGigabytes(d.Used)
|
||||||
systemStats.DiskPct = twoDecimals(d.UsedPercent)
|
systemStats.DiskPct = utils.TwoDecimals(d.UsedPercent)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// reset stats if error (likely unmounted)
|
// reset stats if error (likely unmounted)
|
||||||
@@ -244,6 +554,11 @@ func (a *Agent) updateDiskUsage(systemStats *system.Stats) {
|
|||||||
stats.TotalWrite = 0
|
stats.TotalWrite = 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Update the last disk usage update time when we've collected extra filesystems
|
||||||
|
if !cacheExtraFs {
|
||||||
|
a.lastDiskUsageUpdate = time.Now()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Updates disk I/O statistics for all monitored filesystems
|
// Updates disk I/O statistics for all monitored filesystems
|
||||||
@@ -281,8 +596,8 @@ func (a *Agent) updateDiskIo(cacheTimeMs uint16, systemStats *system.Stats) {
|
|||||||
|
|
||||||
diskIORead := (d.ReadBytes - prev.readBytes) * 1000 / msElapsed
|
diskIORead := (d.ReadBytes - prev.readBytes) * 1000 / msElapsed
|
||||||
diskIOWrite := (d.WriteBytes - prev.writeBytes) * 1000 / msElapsed
|
diskIOWrite := (d.WriteBytes - prev.writeBytes) * 1000 / msElapsed
|
||||||
readMbPerSecond := bytesToMegabytes(float64(diskIORead))
|
readMbPerSecond := utils.BytesToMegabytes(float64(diskIORead))
|
||||||
writeMbPerSecond := bytesToMegabytes(float64(diskIOWrite))
|
writeMbPerSecond := utils.BytesToMegabytes(float64(diskIOWrite))
|
||||||
|
|
||||||
// validate values
|
// validate values
|
||||||
if readMbPerSecond > 50_000 || writeMbPerSecond > 50_000 {
|
if readMbPerSecond > 50_000 || writeMbPerSecond > 50_000 {
|
||||||
@@ -316,9 +631,17 @@ func (a *Agent) updateDiskIo(cacheTimeMs uint16, systemStats *system.Stats) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// getRootMountPoint returns the appropriate root mount point for the system
|
// getRootMountPoint returns the appropriate root mount point for the system.
|
||||||
|
// On Windows it returns the system drive (e.g. "C:").
|
||||||
// For immutable systems like Fedora Silverblue, it returns /sysroot instead of /
|
// For immutable systems like Fedora Silverblue, it returns /sysroot instead of /
|
||||||
func (a *Agent) getRootMountPoint() string {
|
func (a *Agent) getRootMountPoint() string {
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
if sd := os.Getenv("SystemDrive"); sd != "" {
|
||||||
|
return sd
|
||||||
|
}
|
||||||
|
return "C:"
|
||||||
|
}
|
||||||
|
|
||||||
// 1. Check if /etc/os-release contains indicators of an immutable system
|
// 1. Check if /etc/os-release contains indicators of an immutable system
|
||||||
if osReleaseContent, err := os.ReadFile("/etc/os-release"); err == nil {
|
if osReleaseContent, err := os.ReadFile("/etc/os-release"); err == nil {
|
||||||
content := string(osReleaseContent)
|
content := string(osReleaseContent)
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build testing
|
//go:build testing
|
||||||
// +build testing
|
|
||||||
|
|
||||||
package agent
|
package agent
|
||||||
|
|
||||||
@@ -7,6 +6,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/henrygd/beszel/internal/entities/system"
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
"github.com/shirou/gopsutil/v4/disk"
|
"github.com/shirou/gopsutil/v4/disk"
|
||||||
@@ -93,19 +93,683 @@ func TestParseFilesystemEntry(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInitializeDiskInfoWithCustomNames(t *testing.T) {
|
func TestExtraFilesystemPartitionInfo(t *testing.T) {
|
||||||
// Set up environment variables
|
t.Run("uses partition device for label-only mountpoint", func(t *testing.T) {
|
||||||
oldEnv := os.Getenv("EXTRA_FILESYSTEMS")
|
device, customName := extraFilesystemPartitionInfo(disk.PartitionStat{
|
||||||
defer func() {
|
Device: "/dev/sdc",
|
||||||
if oldEnv != "" {
|
Mountpoint: "/extra-filesystems/Share",
|
||||||
os.Setenv("EXTRA_FILESYSTEMS", oldEnv)
|
})
|
||||||
} else {
|
|
||||||
os.Unsetenv("EXTRA_FILESYSTEMS")
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
|
assert.Equal(t, "/dev/sdc", device)
|
||||||
|
assert.Equal(t, "", customName)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("uses custom name from mountpoint suffix", func(t *testing.T) {
|
||||||
|
device, customName := extraFilesystemPartitionInfo(disk.PartitionStat{
|
||||||
|
Device: "/dev/sdc",
|
||||||
|
Mountpoint: "/extra-filesystems/sdc__Share",
|
||||||
|
})
|
||||||
|
|
||||||
|
assert.Equal(t, "/dev/sdc", device)
|
||||||
|
assert.Equal(t, "Share", customName)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("falls back to folder device when partition device is unavailable", func(t *testing.T) {
|
||||||
|
device, customName := extraFilesystemPartitionInfo(disk.PartitionStat{
|
||||||
|
Mountpoint: "/extra-filesystems/sdc__Share",
|
||||||
|
})
|
||||||
|
|
||||||
|
assert.Equal(t, "sdc", device)
|
||||||
|
assert.Equal(t, "Share", customName)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("supports custom name without folder device prefix", func(t *testing.T) {
|
||||||
|
device, customName := extraFilesystemPartitionInfo(disk.PartitionStat{
|
||||||
|
Device: "/dev/sdc",
|
||||||
|
Mountpoint: "/extra-filesystems/__Share",
|
||||||
|
})
|
||||||
|
|
||||||
|
assert.Equal(t, "/dev/sdc", device)
|
||||||
|
assert.Equal(t, "Share", customName)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuildFsStatRegistration(t *testing.T) {
|
||||||
|
t.Run("uses basename for non-windows exact io match", func(t *testing.T) {
|
||||||
|
key, stats, ok := registerFilesystemStats(
|
||||||
|
map[string]*system.FsStats{},
|
||||||
|
"/dev/sda1",
|
||||||
|
"/mnt/data",
|
||||||
|
false,
|
||||||
|
"archive",
|
||||||
|
fsRegistrationContext{
|
||||||
|
isWindows: false,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"sda1": {Name: "sda1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "sda1", key)
|
||||||
|
assert.Equal(t, "/mnt/data", stats.Mountpoint)
|
||||||
|
assert.Equal(t, "archive", stats.Name)
|
||||||
|
assert.False(t, stats.Root)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("maps root partition to io device by prefix", func(t *testing.T) {
|
||||||
|
key, stats, ok := registerFilesystemStats(
|
||||||
|
map[string]*system.FsStats{},
|
||||||
|
"/dev/ada0p2",
|
||||||
|
"/",
|
||||||
|
true,
|
||||||
|
"",
|
||||||
|
fsRegistrationContext{
|
||||||
|
isWindows: false,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"ada0": {Name: "ada0", ReadBytes: 1000, WriteBytes: 1000},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "ada0", key)
|
||||||
|
assert.True(t, stats.Root)
|
||||||
|
assert.Equal(t, "/", stats.Mountpoint)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("uses filesystem setting as root fallback", func(t *testing.T) {
|
||||||
|
key, _, ok := registerFilesystemStats(
|
||||||
|
map[string]*system.FsStats{},
|
||||||
|
"overlay",
|
||||||
|
"/",
|
||||||
|
true,
|
||||||
|
"",
|
||||||
|
fsRegistrationContext{
|
||||||
|
filesystem: "nvme0n1p2",
|
||||||
|
isWindows: false,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"nvme0n1": {Name: "nvme0n1", ReadBytes: 1000, WriteBytes: 1000},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "nvme0n1", key)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("prefers parsed extra-filesystems device over mapper device", func(t *testing.T) {
|
||||||
|
key, stats, ok := registerFilesystemStats(
|
||||||
|
map[string]*system.FsStats{},
|
||||||
|
"/dev/mapper/luks-2bcb02be-999d-4417-8d18-5c61e660fb6e",
|
||||||
|
"/extra-filesystems/nvme0n1p2__Archive",
|
||||||
|
false,
|
||||||
|
"Archive",
|
||||||
|
fsRegistrationContext{
|
||||||
|
isWindows: false,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"dm-1": {Name: "dm-1", Label: "luks-2bcb02be-999d-4417-8d18-5c61e660fb6e"},
|
||||||
|
"nvme0n1p2": {Name: "nvme0n1p2"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "nvme0n1p2", key)
|
||||||
|
assert.Equal(t, "Archive", stats.Name)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("falls back to mapper io device when folder device cannot be resolved", func(t *testing.T) {
|
||||||
|
key, stats, ok := registerFilesystemStats(
|
||||||
|
map[string]*system.FsStats{},
|
||||||
|
"/dev/mapper/luks-2bcb02be-999d-4417-8d18-5c61e660fb6e",
|
||||||
|
"/extra-filesystems/Archive",
|
||||||
|
false,
|
||||||
|
"Archive",
|
||||||
|
fsRegistrationContext{
|
||||||
|
isWindows: false,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"dm-1": {Name: "dm-1", Label: "luks-2bcb02be-999d-4417-8d18-5c61e660fb6e"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "dm-1", key)
|
||||||
|
assert.Equal(t, "Archive", stats.Name)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("uses full device name on windows", func(t *testing.T) {
|
||||||
|
key, _, ok := registerFilesystemStats(
|
||||||
|
map[string]*system.FsStats{},
|
||||||
|
`C:`,
|
||||||
|
`C:\\`,
|
||||||
|
false,
|
||||||
|
"",
|
||||||
|
fsRegistrationContext{
|
||||||
|
isWindows: true,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
`C:`: {Name: `C:`},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, `C:`, key)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("skips existing key", func(t *testing.T) {
|
||||||
|
key, stats, ok := registerFilesystemStats(
|
||||||
|
map[string]*system.FsStats{"sda1": {Mountpoint: "/existing"}},
|
||||||
|
"/dev/sda1",
|
||||||
|
"/mnt/data",
|
||||||
|
false,
|
||||||
|
"",
|
||||||
|
fsRegistrationContext{
|
||||||
|
isWindows: false,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"sda1": {Name: "sda1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
assert.False(t, ok)
|
||||||
|
assert.Empty(t, key)
|
||||||
|
assert.Nil(t, stats)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddConfiguredRootFs(t *testing.T) {
|
||||||
|
t.Run("adds root from matching partition", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
discovery := diskDiscovery{
|
||||||
|
agent: agent,
|
||||||
|
rootMountPoint: "/",
|
||||||
|
partitions: []disk.PartitionStat{{Device: "/dev/ada0p2", Mountpoint: "/"}},
|
||||||
|
ctx: fsRegistrationContext{
|
||||||
|
filesystem: "/dev/ada0p2",
|
||||||
|
isWindows: false,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"ada0": {Name: "ada0", ReadBytes: 1000, WriteBytes: 1000},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
ok := discovery.addConfiguredRootFs()
|
||||||
|
|
||||||
|
assert.True(t, ok)
|
||||||
|
stats, exists := agent.fsStats["ada0"]
|
||||||
|
assert.True(t, exists)
|
||||||
|
assert.True(t, stats.Root)
|
||||||
|
assert.Equal(t, "/", stats.Mountpoint)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("adds root from io device when partition is missing", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
discovery := diskDiscovery{
|
||||||
|
agent: agent,
|
||||||
|
rootMountPoint: "/sysroot",
|
||||||
|
ctx: fsRegistrationContext{
|
||||||
|
filesystem: "zroot",
|
||||||
|
isWindows: false,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"nda0": {Name: "nda0", Label: "zroot", ReadBytes: 1000, WriteBytes: 1000},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
ok := discovery.addConfiguredRootFs()
|
||||||
|
|
||||||
|
assert.True(t, ok)
|
||||||
|
stats, exists := agent.fsStats["nda0"]
|
||||||
|
assert.True(t, exists)
|
||||||
|
assert.True(t, stats.Root)
|
||||||
|
assert.Equal(t, "/sysroot", stats.Mountpoint)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("returns false when filesystem cannot be resolved", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
discovery := diskDiscovery{
|
||||||
|
agent: agent,
|
||||||
|
rootMountPoint: "/",
|
||||||
|
ctx: fsRegistrationContext{
|
||||||
|
filesystem: "missing-disk",
|
||||||
|
isWindows: false,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
ok := discovery.addConfiguredRootFs()
|
||||||
|
|
||||||
|
assert.False(t, ok)
|
||||||
|
assert.Empty(t, agent.fsStats)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddPartitionRootFs(t *testing.T) {
|
||||||
|
t.Run("adds root from fallback partition candidate", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
discovery := diskDiscovery{
|
||||||
|
agent: agent,
|
||||||
|
ctx: fsRegistrationContext{
|
||||||
|
isWindows: false,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"nvme0n1": {Name: "nvme0n1", ReadBytes: 1000, WriteBytes: 1000},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
ok := discovery.addPartitionRootFs("/dev/nvme0n1p2", "/")
|
||||||
|
|
||||||
|
assert.True(t, ok)
|
||||||
|
stats, exists := agent.fsStats["nvme0n1"]
|
||||||
|
assert.True(t, exists)
|
||||||
|
assert.True(t, stats.Root)
|
||||||
|
assert.Equal(t, "/", stats.Mountpoint)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("returns false when no io device matches", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
discovery := diskDiscovery{agent: agent, ctx: fsRegistrationContext{diskIoCounters: map[string]disk.IOCountersStat{}}}
|
||||||
|
|
||||||
|
ok := discovery.addPartitionRootFs("/dev/mapper/root", "/")
|
||||||
|
|
||||||
|
assert.False(t, ok)
|
||||||
|
assert.Empty(t, agent.fsStats)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddLastResortRootFs(t *testing.T) {
|
||||||
|
t.Run("uses most active io device when available", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
discovery := diskDiscovery{agent: agent, rootMountPoint: "/", ctx: fsRegistrationContext{diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"sda": {Name: "sda", ReadBytes: 5000, WriteBytes: 5000},
|
||||||
|
"sdb": {Name: "sdb", ReadBytes: 1000, WriteBytes: 1000},
|
||||||
|
}}}
|
||||||
|
|
||||||
|
discovery.addLastResortRootFs()
|
||||||
|
|
||||||
|
stats, exists := agent.fsStats["sda"]
|
||||||
|
assert.True(t, exists)
|
||||||
|
assert.True(t, stats.Root)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("falls back to root key when mountpoint basename collides", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: map[string]*system.FsStats{
|
||||||
|
"sysroot": {Mountpoint: "/extra-filesystems/sysroot"},
|
||||||
|
}}
|
||||||
|
discovery := diskDiscovery{agent: agent, rootMountPoint: "/sysroot", ctx: fsRegistrationContext{diskIoCounters: map[string]disk.IOCountersStat{}}}
|
||||||
|
|
||||||
|
discovery.addLastResortRootFs()
|
||||||
|
|
||||||
|
stats, exists := agent.fsStats["root"]
|
||||||
|
assert.True(t, exists)
|
||||||
|
assert.True(t, stats.Root)
|
||||||
|
assert.Equal(t, "/sysroot", stats.Mountpoint)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddConfiguredExtraFsEntry(t *testing.T) {
|
||||||
|
t.Run("uses matching partition when present", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
discovery := diskDiscovery{
|
||||||
|
agent: agent,
|
||||||
|
partitions: []disk.PartitionStat{{Device: "/dev/sdb1", Mountpoint: "/mnt/backup"}},
|
||||||
|
usageFn: func(string) (*disk.UsageStat, error) {
|
||||||
|
t.Fatal("usage fallback should not be called when partition matches")
|
||||||
|
return nil, nil
|
||||||
|
},
|
||||||
|
ctx: fsRegistrationContext{
|
||||||
|
isWindows: false,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"sdb1": {Name: "sdb1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
discovery.addConfiguredExtraFsEntry("sdb1", "backup")
|
||||||
|
|
||||||
|
stats, exists := agent.fsStats["sdb1"]
|
||||||
|
assert.True(t, exists)
|
||||||
|
assert.Equal(t, "/mnt/backup", stats.Mountpoint)
|
||||||
|
assert.Equal(t, "backup", stats.Name)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("falls back to usage-validated path", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
discovery := diskDiscovery{
|
||||||
|
agent: agent,
|
||||||
|
usageFn: func(path string) (*disk.UsageStat, error) {
|
||||||
|
assert.Equal(t, "/srv/archive", path)
|
||||||
|
return &disk.UsageStat{}, nil
|
||||||
|
},
|
||||||
|
ctx: fsRegistrationContext{
|
||||||
|
isWindows: false,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"archive": {Name: "archive"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
discovery.addConfiguredExtraFsEntry("/srv/archive", "archive")
|
||||||
|
|
||||||
|
stats, exists := agent.fsStats["archive"]
|
||||||
|
assert.True(t, exists)
|
||||||
|
assert.Equal(t, "/srv/archive", stats.Mountpoint)
|
||||||
|
assert.Equal(t, "archive", stats.Name)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("ignores invalid filesystem entry", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
discovery := diskDiscovery{
|
||||||
|
agent: agent,
|
||||||
|
usageFn: func(string) (*disk.UsageStat, error) {
|
||||||
|
return nil, os.ErrNotExist
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
discovery.addConfiguredExtraFsEntry("/missing/archive", "")
|
||||||
|
|
||||||
|
assert.Empty(t, agent.fsStats)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddConfiguredExtraFilesystems(t *testing.T) {
|
||||||
|
t.Run("parses and registers multiple configured filesystems", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
discovery := diskDiscovery{
|
||||||
|
agent: agent,
|
||||||
|
partitions: []disk.PartitionStat{{Device: "/dev/sda1", Mountpoint: "/mnt/fast"}},
|
||||||
|
usageFn: func(path string) (*disk.UsageStat, error) {
|
||||||
|
if path == "/srv/archive" {
|
||||||
|
return &disk.UsageStat{}, nil
|
||||||
|
}
|
||||||
|
return nil, os.ErrNotExist
|
||||||
|
},
|
||||||
|
ctx: fsRegistrationContext{
|
||||||
|
isWindows: false,
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"sda1": {Name: "sda1"},
|
||||||
|
"archive": {Name: "archive"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
discovery.addConfiguredExtraFilesystems("sda1__fast,/srv/archive__cold")
|
||||||
|
|
||||||
|
assert.Contains(t, agent.fsStats, "sda1")
|
||||||
|
assert.Equal(t, "fast", agent.fsStats["sda1"].Name)
|
||||||
|
assert.Contains(t, agent.fsStats, "archive")
|
||||||
|
assert.Equal(t, "cold", agent.fsStats["archive"].Name)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddExtraFilesystemFolders(t *testing.T) {
|
||||||
|
t.Run("adds missing folders and skips existing mountpoints", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: map[string]*system.FsStats{
|
||||||
|
"existing": {Mountpoint: "/extra-filesystems/existing"},
|
||||||
|
}}
|
||||||
|
discovery := diskDiscovery{
|
||||||
|
agent: agent,
|
||||||
|
ctx: fsRegistrationContext{
|
||||||
|
isWindows: false,
|
||||||
|
efPath: "/extra-filesystems",
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"newdisk": {Name: "newdisk"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
discovery.addExtraFilesystemFolders([]string{"existing", "newdisk__Archive"})
|
||||||
|
|
||||||
|
assert.Len(t, agent.fsStats, 2)
|
||||||
|
stats, exists := agent.fsStats["newdisk"]
|
||||||
|
assert.True(t, exists)
|
||||||
|
assert.Equal(t, "/extra-filesystems/newdisk__Archive", stats.Mountpoint)
|
||||||
|
assert.Equal(t, "Archive", stats.Name)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAddPartitionExtraFs(t *testing.T) {
|
||||||
|
makeDiscovery := func(agent *Agent) diskDiscovery {
|
||||||
|
return diskDiscovery{
|
||||||
|
agent: agent,
|
||||||
|
ctx: fsRegistrationContext{
|
||||||
|
isWindows: false,
|
||||||
|
efPath: "/extra-filesystems",
|
||||||
|
diskIoCounters: map[string]disk.IOCountersStat{
|
||||||
|
"nvme0n1p1": {Name: "nvme0n1p1"},
|
||||||
|
"nvme1n1": {Name: "nvme1n1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("registers direct child of extra-filesystems", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
d := makeDiscovery(agent)
|
||||||
|
|
||||||
|
d.addPartitionExtraFs(disk.PartitionStat{
|
||||||
|
Device: "/dev/nvme0n1p1",
|
||||||
|
Mountpoint: "/extra-filesystems/nvme0n1p1__caddy1-root",
|
||||||
|
})
|
||||||
|
|
||||||
|
stats, exists := agent.fsStats["nvme0n1p1"]
|
||||||
|
assert.True(t, exists)
|
||||||
|
assert.Equal(t, "/extra-filesystems/nvme0n1p1__caddy1-root", stats.Mountpoint)
|
||||||
|
assert.Equal(t, "caddy1-root", stats.Name)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("skips nested mount under extra-filesystem bind mount", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
d := makeDiscovery(agent)
|
||||||
|
|
||||||
|
// These simulate the virtual mounts that appear when host / is bind-mounted
|
||||||
|
// with disk.Partitions(all=true) — e.g. /proc, /sys, /dev visible under the mount.
|
||||||
|
for _, nested := range []string{
|
||||||
|
"/extra-filesystems/nvme0n1p1__caddy1-root/proc",
|
||||||
|
"/extra-filesystems/nvme0n1p1__caddy1-root/sys",
|
||||||
|
"/extra-filesystems/nvme0n1p1__caddy1-root/dev",
|
||||||
|
"/extra-filesystems/nvme0n1p1__caddy1-root/run",
|
||||||
|
} {
|
||||||
|
d.addPartitionExtraFs(disk.PartitionStat{Device: "tmpfs", Mountpoint: nested})
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Empty(t, agent.fsStats)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("registers both direct children, skips their nested mounts", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
d := makeDiscovery(agent)
|
||||||
|
|
||||||
|
partitions := []disk.PartitionStat{
|
||||||
|
{Device: "/dev/nvme0n1p1", Mountpoint: "/extra-filesystems/nvme0n1p1__caddy1-root"},
|
||||||
|
{Device: "/dev/nvme1n1", Mountpoint: "/extra-filesystems/nvme1n1__caddy1-docker"},
|
||||||
|
{Device: "proc", Mountpoint: "/extra-filesystems/nvme0n1p1__caddy1-root/proc"},
|
||||||
|
{Device: "sysfs", Mountpoint: "/extra-filesystems/nvme0n1p1__caddy1-root/sys"},
|
||||||
|
{Device: "overlay", Mountpoint: "/extra-filesystems/nvme0n1p1__caddy1-root/var/lib/docker"},
|
||||||
|
}
|
||||||
|
for _, p := range partitions {
|
||||||
|
d.addPartitionExtraFs(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Len(t, agent.fsStats, 2)
|
||||||
|
assert.Equal(t, "caddy1-root", agent.fsStats["nvme0n1p1"].Name)
|
||||||
|
assert.Equal(t, "caddy1-docker", agent.fsStats["nvme1n1"].Name)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("skips partition not under extra-filesystems", func(t *testing.T) {
|
||||||
|
agent := &Agent{fsStats: make(map[string]*system.FsStats)}
|
||||||
|
d := makeDiscovery(agent)
|
||||||
|
|
||||||
|
d.addPartitionExtraFs(disk.PartitionStat{
|
||||||
|
Device: "/dev/nvme0n1p1",
|
||||||
|
Mountpoint: "/",
|
||||||
|
})
|
||||||
|
|
||||||
|
assert.Empty(t, agent.fsStats)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFindIoDevice(t *testing.T) {
|
||||||
|
t.Run("matches by device name", func(t *testing.T) {
|
||||||
|
ioCounters := map[string]disk.IOCountersStat{
|
||||||
|
"sda": {Name: "sda"},
|
||||||
|
"sdb": {Name: "sdb"},
|
||||||
|
}
|
||||||
|
|
||||||
|
device, ok := findIoDevice("sdb", ioCounters)
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "sdb", device)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("matches by device label", func(t *testing.T) {
|
||||||
|
ioCounters := map[string]disk.IOCountersStat{
|
||||||
|
"sda": {Name: "sda", Label: "rootfs"},
|
||||||
|
"sdb": {Name: "sdb"},
|
||||||
|
}
|
||||||
|
|
||||||
|
device, ok := findIoDevice("rootfs", ioCounters)
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "sda", device)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("returns no match when not found", func(t *testing.T) {
|
||||||
|
ioCounters := map[string]disk.IOCountersStat{
|
||||||
|
"sda": {Name: "sda"},
|
||||||
|
"sdb": {Name: "sdb"},
|
||||||
|
}
|
||||||
|
|
||||||
|
device, ok := findIoDevice("nvme0n1p1", ioCounters)
|
||||||
|
assert.False(t, ok)
|
||||||
|
assert.Equal(t, "", device)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("uses uncertain unique prefix fallback", func(t *testing.T) {
|
||||||
|
ioCounters := map[string]disk.IOCountersStat{
|
||||||
|
"nvme0n1": {Name: "nvme0n1"},
|
||||||
|
"sda": {Name: "sda"},
|
||||||
|
}
|
||||||
|
|
||||||
|
device, ok := findIoDevice("nvme0n1p2", ioCounters)
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "nvme0n1", device)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("uses dominant activity when prefix matches are ambiguous", func(t *testing.T) {
|
||||||
|
ioCounters := map[string]disk.IOCountersStat{
|
||||||
|
"sda": {Name: "sda", ReadBytes: 5000, WriteBytes: 5000, ReadCount: 100, WriteCount: 100},
|
||||||
|
"sdb": {Name: "sdb", ReadBytes: 1000, WriteBytes: 1000, ReadCount: 50, WriteCount: 50},
|
||||||
|
}
|
||||||
|
|
||||||
|
device, ok := findIoDevice("sd", ioCounters)
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "sda", device)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("uses highest activity when ambiguous without dominance", func(t *testing.T) {
|
||||||
|
ioCounters := map[string]disk.IOCountersStat{
|
||||||
|
"sda": {Name: "sda", ReadBytes: 3000, WriteBytes: 3000, ReadCount: 50, WriteCount: 50},
|
||||||
|
"sdb": {Name: "sdb", ReadBytes: 2500, WriteBytes: 2500, ReadCount: 40, WriteCount: 40},
|
||||||
|
}
|
||||||
|
|
||||||
|
device, ok := findIoDevice("sd", ioCounters)
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "sda", device)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("matches /dev/-prefixed partition to parent disk", func(t *testing.T) {
|
||||||
|
ioCounters := map[string]disk.IOCountersStat{
|
||||||
|
"nda0": {Name: "nda0", ReadBytes: 1000, WriteBytes: 1000},
|
||||||
|
}
|
||||||
|
|
||||||
|
device, ok := findIoDevice("/dev/nda0p2", ioCounters)
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "nda0", device)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("uses deterministic name tie-breaker", func(t *testing.T) {
|
||||||
|
ioCounters := map[string]disk.IOCountersStat{
|
||||||
|
"sdb": {Name: "sdb", ReadBytes: 2000, WriteBytes: 2000, ReadCount: 10, WriteCount: 10},
|
||||||
|
"sda": {Name: "sda", ReadBytes: 2000, WriteBytes: 2000, ReadCount: 10, WriteCount: 10},
|
||||||
|
}
|
||||||
|
|
||||||
|
device, ok := findIoDevice("sd", ioCounters)
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, "sda", device)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFilesystemMatchesPartitionSetting(t *testing.T) {
|
||||||
|
p := disk.PartitionStat{Device: "/dev/ada0p2", Mountpoint: "/"}
|
||||||
|
|
||||||
|
t.Run("matches mountpoint setting", func(t *testing.T) {
|
||||||
|
assert.True(t, filesystemMatchesPartitionSetting("/", p))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("matches exact partition setting", func(t *testing.T) {
|
||||||
|
assert.True(t, filesystemMatchesPartitionSetting("ada0p2", p))
|
||||||
|
assert.True(t, filesystemMatchesPartitionSetting("/dev/ada0p2", p))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("matches prefix-style parent setting", func(t *testing.T) {
|
||||||
|
assert.True(t, filesystemMatchesPartitionSetting("ada0", p))
|
||||||
|
assert.True(t, filesystemMatchesPartitionSetting("/dev/ada0", p))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("does not match unrelated device", func(t *testing.T) {
|
||||||
|
assert.False(t, filesystemMatchesPartitionSetting("sda", p))
|
||||||
|
assert.False(t, filesystemMatchesPartitionSetting("nvme0n1", p))
|
||||||
|
assert.False(t, filesystemMatchesPartitionSetting("", p))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMostActiveIoDevice(t *testing.T) {
|
||||||
|
t.Run("returns most active device", func(t *testing.T) {
|
||||||
|
ioCounters := map[string]disk.IOCountersStat{
|
||||||
|
"nda0": {Name: "nda0", ReadBytes: 5000, WriteBytes: 5000, ReadCount: 100, WriteCount: 100},
|
||||||
|
"nda1": {Name: "nda1", ReadBytes: 1000, WriteBytes: 1000, ReadCount: 50, WriteCount: 50},
|
||||||
|
}
|
||||||
|
assert.Equal(t, "nda0", mostActiveIoDevice(ioCounters))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("uses deterministic tie-breaker", func(t *testing.T) {
|
||||||
|
ioCounters := map[string]disk.IOCountersStat{
|
||||||
|
"sdb": {Name: "sdb", ReadBytes: 1000, WriteBytes: 1000, ReadCount: 10, WriteCount: 10},
|
||||||
|
"sda": {Name: "sda", ReadBytes: 1000, WriteBytes: 1000, ReadCount: 10, WriteCount: 10},
|
||||||
|
}
|
||||||
|
assert.Equal(t, "sda", mostActiveIoDevice(ioCounters))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("returns empty for empty map", func(t *testing.T) {
|
||||||
|
assert.Equal(t, "", mostActiveIoDevice(map[string]disk.IOCountersStat{}))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIsDockerSpecialMountpoint(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
mountpoint string
|
||||||
|
expected bool
|
||||||
|
}{
|
||||||
|
{name: "hosts", mountpoint: "/etc/hosts", expected: true},
|
||||||
|
{name: "resolv", mountpoint: "/etc/resolv.conf", expected: true},
|
||||||
|
{name: "hostname", mountpoint: "/etc/hostname", expected: true},
|
||||||
|
{name: "root", mountpoint: "/", expected: false},
|
||||||
|
{name: "passwd", mountpoint: "/etc/passwd", expected: false},
|
||||||
|
{name: "extra-filesystem", mountpoint: "/extra-filesystems/sda1", expected: false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
assert.Equal(t, tc.expected, isDockerSpecialMountpoint(tc.mountpoint))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInitializeDiskInfoWithCustomNames(t *testing.T) {
|
||||||
// Test with custom names
|
// Test with custom names
|
||||||
os.Setenv("EXTRA_FILESYSTEMS", "sda1__my-storage,/dev/sdb1__backup-drive,nvme0n1p2")
|
t.Setenv("EXTRA_FILESYSTEMS", "sda1__my-storage,/dev/sdb1__backup-drive,nvme0n1p2")
|
||||||
|
|
||||||
// Mock disk partitions (we'll just test the parsing logic)
|
// Mock disk partitions (we'll just test the parsing logic)
|
||||||
// Since the actual disk operations are system-dependent, we'll focus on the parsing
|
// Since the actual disk operations are system-dependent, we'll focus on the parsing
|
||||||
@@ -133,7 +797,7 @@ func TestInitializeDiskInfoWithCustomNames(t *testing.T) {
|
|||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run("env_"+tc.envValue, func(t *testing.T) {
|
t.Run("env_"+tc.envValue, func(t *testing.T) {
|
||||||
os.Setenv("EXTRA_FILESYSTEMS", tc.envValue)
|
t.Setenv("EXTRA_FILESYSTEMS", tc.envValue)
|
||||||
|
|
||||||
// Create mock partitions that would match our test cases
|
// Create mock partitions that would match our test cases
|
||||||
partitions := []disk.PartitionStat{}
|
partitions := []disk.PartitionStat{}
|
||||||
@@ -154,7 +818,7 @@ func TestInitializeDiskInfoWithCustomNames(t *testing.T) {
|
|||||||
// Test the parsing logic by calling the relevant part
|
// Test the parsing logic by calling the relevant part
|
||||||
// We'll create a simplified version to test just the parsing
|
// We'll create a simplified version to test just the parsing
|
||||||
extraFilesystems := tc.envValue
|
extraFilesystems := tc.envValue
|
||||||
for _, fsEntry := range strings.Split(extraFilesystems, ",") {
|
for fsEntry := range strings.SplitSeq(extraFilesystems, ",") {
|
||||||
// Parse the entry
|
// Parse the entry
|
||||||
fsEntry = strings.TrimSpace(fsEntry)
|
fsEntry = strings.TrimSpace(fsEntry)
|
||||||
var fs, customName string
|
var fs, customName string
|
||||||
@@ -233,3 +897,150 @@ func TestExtraFsKeyGeneration(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestDiskUsageCaching(t *testing.T) {
|
||||||
|
t.Run("caching disabled updates all filesystems", func(t *testing.T) {
|
||||||
|
agent := &Agent{
|
||||||
|
fsStats: map[string]*system.FsStats{
|
||||||
|
"sda": {Root: true, Mountpoint: "/"},
|
||||||
|
"sdb": {Root: false, Mountpoint: "/mnt/storage"},
|
||||||
|
},
|
||||||
|
diskUsageCacheDuration: 0, // caching disabled
|
||||||
|
}
|
||||||
|
|
||||||
|
var stats system.Stats
|
||||||
|
agent.updateDiskUsage(&stats)
|
||||||
|
|
||||||
|
// Both should be updated (non-zero values from disk.Usage)
|
||||||
|
// Root stats should be populated in systemStats
|
||||||
|
assert.True(t, agent.lastDiskUsageUpdate.IsZero() || !agent.lastDiskUsageUpdate.IsZero(),
|
||||||
|
"lastDiskUsageUpdate should be set when caching is disabled")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("caching enabled always updates root filesystem", func(t *testing.T) {
|
||||||
|
agent := &Agent{
|
||||||
|
fsStats: map[string]*system.FsStats{
|
||||||
|
"sda": {Root: true, Mountpoint: "/", DiskTotal: 100, DiskUsed: 50},
|
||||||
|
"sdb": {Root: false, Mountpoint: "/mnt/storage", DiskTotal: 200, DiskUsed: 100},
|
||||||
|
},
|
||||||
|
diskUsageCacheDuration: 1 * time.Hour,
|
||||||
|
lastDiskUsageUpdate: time.Now(), // cache is fresh
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store original extra fs values
|
||||||
|
originalExtraTotal := agent.fsStats["sdb"].DiskTotal
|
||||||
|
originalExtraUsed := agent.fsStats["sdb"].DiskUsed
|
||||||
|
|
||||||
|
var stats system.Stats
|
||||||
|
agent.updateDiskUsage(&stats)
|
||||||
|
|
||||||
|
// Root should be updated (systemStats populated from disk.Usage call)
|
||||||
|
// We can't easily check if disk.Usage was called, but we verify the flow works
|
||||||
|
|
||||||
|
// Extra filesystem should retain cached values (not reset)
|
||||||
|
assert.Equal(t, originalExtraTotal, agent.fsStats["sdb"].DiskTotal,
|
||||||
|
"extra filesystem DiskTotal should be unchanged when cached")
|
||||||
|
assert.Equal(t, originalExtraUsed, agent.fsStats["sdb"].DiskUsed,
|
||||||
|
"extra filesystem DiskUsed should be unchanged when cached")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("first call always updates all filesystems", func(t *testing.T) {
|
||||||
|
agent := &Agent{
|
||||||
|
fsStats: map[string]*system.FsStats{
|
||||||
|
"sda": {Root: true, Mountpoint: "/"},
|
||||||
|
"sdb": {Root: false, Mountpoint: "/mnt/storage"},
|
||||||
|
},
|
||||||
|
diskUsageCacheDuration: 1 * time.Hour,
|
||||||
|
// lastDiskUsageUpdate is zero (first call)
|
||||||
|
}
|
||||||
|
|
||||||
|
var stats system.Stats
|
||||||
|
agent.updateDiskUsage(&stats)
|
||||||
|
|
||||||
|
// After first call, lastDiskUsageUpdate should be set
|
||||||
|
assert.False(t, agent.lastDiskUsageUpdate.IsZero(),
|
||||||
|
"lastDiskUsageUpdate should be set after first call")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("expired cache updates extra filesystems", func(t *testing.T) {
|
||||||
|
agent := &Agent{
|
||||||
|
fsStats: map[string]*system.FsStats{
|
||||||
|
"sda": {Root: true, Mountpoint: "/"},
|
||||||
|
"sdb": {Root: false, Mountpoint: "/mnt/storage"},
|
||||||
|
},
|
||||||
|
diskUsageCacheDuration: 1 * time.Millisecond,
|
||||||
|
lastDiskUsageUpdate: time.Now().Add(-1 * time.Second), // cache expired
|
||||||
|
}
|
||||||
|
|
||||||
|
var stats system.Stats
|
||||||
|
agent.updateDiskUsage(&stats)
|
||||||
|
|
||||||
|
// lastDiskUsageUpdate should be refreshed since cache expired
|
||||||
|
assert.True(t, time.Since(agent.lastDiskUsageUpdate) < time.Second,
|
||||||
|
"lastDiskUsageUpdate should be refreshed when cache expires")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHasSameDiskUsage(t *testing.T) {
|
||||||
|
const toleranceBytes uint64 = 16 * 1024 * 1024
|
||||||
|
|
||||||
|
t.Run("returns true when totals and usage are equal", func(t *testing.T) {
|
||||||
|
a := &disk.UsageStat{Total: 100 * 1024 * 1024 * 1024, Used: 42 * 1024 * 1024 * 1024}
|
||||||
|
b := &disk.UsageStat{Total: 100 * 1024 * 1024 * 1024, Used: 42 * 1024 * 1024 * 1024}
|
||||||
|
assert.True(t, hasSameDiskUsage(a, b))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("returns true within tolerance", func(t *testing.T) {
|
||||||
|
a := &disk.UsageStat{Total: 100 * 1024 * 1024 * 1024, Used: 42 * 1024 * 1024 * 1024}
|
||||||
|
b := &disk.UsageStat{
|
||||||
|
Total: a.Total + toleranceBytes - 1,
|
||||||
|
Used: a.Used - toleranceBytes + 1,
|
||||||
|
}
|
||||||
|
assert.True(t, hasSameDiskUsage(a, b))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("returns false when total exceeds tolerance", func(t *testing.T) {
|
||||||
|
a := &disk.UsageStat{Total: 100 * 1024 * 1024 * 1024, Used: 42 * 1024 * 1024 * 1024}
|
||||||
|
b := &disk.UsageStat{
|
||||||
|
Total: a.Total + toleranceBytes + 1,
|
||||||
|
Used: a.Used,
|
||||||
|
}
|
||||||
|
assert.False(t, hasSameDiskUsage(a, b))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("returns false for nil or zero total", func(t *testing.T) {
|
||||||
|
assert.False(t, hasSameDiskUsage(nil, &disk.UsageStat{Total: 1, Used: 1}))
|
||||||
|
assert.False(t, hasSameDiskUsage(&disk.UsageStat{Total: 1, Used: 1}, nil))
|
||||||
|
assert.False(t, hasSameDiskUsage(&disk.UsageStat{Total: 0, Used: 0}, &disk.UsageStat{Total: 1, Used: 1}))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInitializeDiskIoStatsResetsTrackedDevices(t *testing.T) {
|
||||||
|
agent := &Agent{
|
||||||
|
fsStats: map[string]*system.FsStats{
|
||||||
|
"sda": {},
|
||||||
|
"sdb": {},
|
||||||
|
},
|
||||||
|
fsNames: []string{"stale", "sda"},
|
||||||
|
}
|
||||||
|
|
||||||
|
agent.initializeDiskIoStats(map[string]disk.IOCountersStat{
|
||||||
|
"sda": {Name: "sda", ReadBytes: 10, WriteBytes: 20},
|
||||||
|
"sdb": {Name: "sdb", ReadBytes: 30, WriteBytes: 40},
|
||||||
|
})
|
||||||
|
|
||||||
|
assert.ElementsMatch(t, []string{"sda", "sdb"}, agent.fsNames)
|
||||||
|
assert.Len(t, agent.fsNames, 2)
|
||||||
|
assert.Equal(t, uint64(10), agent.fsStats["sda"].TotalRead)
|
||||||
|
assert.Equal(t, uint64(20), agent.fsStats["sda"].TotalWrite)
|
||||||
|
assert.False(t, agent.fsStats["sda"].Time.IsZero())
|
||||||
|
assert.False(t, agent.fsStats["sdb"].Time.IsZero())
|
||||||
|
|
||||||
|
agent.initializeDiskIoStats(map[string]disk.IOCountersStat{
|
||||||
|
"sdb": {Name: "sdb", ReadBytes: 50, WriteBytes: 60},
|
||||||
|
})
|
||||||
|
|
||||||
|
assert.Equal(t, []string{"sdb"}, agent.fsNames)
|
||||||
|
assert.Equal(t, uint64(50), agent.fsStats["sdb"].TotalRead)
|
||||||
|
assert.Equal(t, uint64(60), agent.fsStats["sdb"].TotalWrite)
|
||||||
|
}
|
||||||
|
|||||||
398
agent/docker.go
398
agent/docker.go
@@ -1,6 +1,7 @@
|
|||||||
package agent
|
package agent
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
@@ -14,16 +15,26 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
"regexp"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/henrygd/beszel/agent/deltatracker"
|
"github.com/henrygd/beszel/agent/deltatracker"
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
"github.com/henrygd/beszel/internal/entities/container"
|
"github.com/henrygd/beszel/internal/entities/container"
|
||||||
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
|
||||||
"github.com/blang/semver"
|
"github.com/blang/semver"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// ansiEscapePattern matches ANSI escape sequences (colors, cursor movement, etc.)
|
||||||
|
// This includes CSI sequences like \x1b[...m and simple escapes like \x1b[K
|
||||||
|
var ansiEscapePattern = regexp.MustCompile(`\x1b\[[0-9;]*[a-zA-Z]|\x1b\][^\x07]*\x07|\x1b[@-Z\\-_]`)
|
||||||
|
var dockerContainerIDPattern = regexp.MustCompile(`^[a-fA-F0-9]{12,64}$`)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// Docker API timeout in milliseconds
|
// Docker API timeout in milliseconds
|
||||||
dockerTimeoutMs = 2100
|
dockerTimeoutMs = 2100
|
||||||
@@ -42,6 +53,7 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type dockerManager struct {
|
type dockerManager struct {
|
||||||
|
agent *Agent // Used to propagate system detail changes back to the agent
|
||||||
client *http.Client // Client to query Docker API
|
client *http.Client // Client to query Docker API
|
||||||
wg sync.WaitGroup // WaitGroup to wait for all goroutines to finish
|
wg sync.WaitGroup // WaitGroup to wait for all goroutines to finish
|
||||||
sem chan struct{} // Semaphore to limit concurrent container requests
|
sem chan struct{} // Semaphore to limit concurrent container requests
|
||||||
@@ -50,11 +62,13 @@ type dockerManager struct {
|
|||||||
containerStatsMap map[string]*container.Stats // Keeps track of container stats
|
containerStatsMap map[string]*container.Stats // Keeps track of container stats
|
||||||
validIds map[string]struct{} // Map of valid container ids, used to prune invalid containers from containerStatsMap
|
validIds map[string]struct{} // Map of valid container ids, used to prune invalid containers from containerStatsMap
|
||||||
goodDockerVersion bool // Whether docker version is at least 25.0.0 (one-shot works correctly)
|
goodDockerVersion bool // Whether docker version is at least 25.0.0 (one-shot works correctly)
|
||||||
|
dockerVersionChecked bool // Whether a version probe has completed successfully
|
||||||
isWindows bool // Whether the Docker Engine API is running on Windows
|
isWindows bool // Whether the Docker Engine API is running on Windows
|
||||||
buf *bytes.Buffer // Buffer to store and read response bodies
|
buf *bytes.Buffer // Buffer to store and read response bodies
|
||||||
decoder *json.Decoder // Reusable JSON decoder that reads from buf
|
decoder *json.Decoder // Reusable JSON decoder that reads from buf
|
||||||
apiStats *container.ApiStats // Reusable API stats object
|
apiStats *container.ApiStats // Reusable API stats object
|
||||||
excludeContainers []string // Patterns to exclude containers by name
|
excludeContainers []string // Patterns to exclude containers by name
|
||||||
|
usingPodman bool // Whether the Docker Engine API is running on Podman
|
||||||
|
|
||||||
// Cache-time-aware tracking for CPU stats (similar to cpu.go)
|
// Cache-time-aware tracking for CPU stats (similar to cpu.go)
|
||||||
// Maps cache time intervals to container-specific CPU usage tracking
|
// Maps cache time intervals to container-specific CPU usage tracking
|
||||||
@@ -66,6 +80,7 @@ type dockerManager struct {
|
|||||||
// cacheTimeMs -> DeltaTracker for network bytes sent/received
|
// cacheTimeMs -> DeltaTracker for network bytes sent/received
|
||||||
networkSentTrackers map[uint16]*deltatracker.DeltaTracker[string, uint64]
|
networkSentTrackers map[uint16]*deltatracker.DeltaTracker[string, uint64]
|
||||||
networkRecvTrackers map[uint16]*deltatracker.DeltaTracker[string, uint64]
|
networkRecvTrackers map[uint16]*deltatracker.DeltaTracker[string, uint64]
|
||||||
|
lastNetworkReadTime map[uint16]map[string]time.Time // cacheTimeMs -> containerId -> last network read time
|
||||||
}
|
}
|
||||||
|
|
||||||
// userAgentRoundTripper is a custom http.RoundTripper that adds a User-Agent header to all requests
|
// userAgentRoundTripper is a custom http.RoundTripper that adds a User-Agent header to all requests
|
||||||
@@ -74,6 +89,14 @@ type userAgentRoundTripper struct {
|
|||||||
userAgent string
|
userAgent string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// dockerVersionResponse contains the /version fields used for engine checks.
|
||||||
|
type dockerVersionResponse struct {
|
||||||
|
Version string `json:"Version"`
|
||||||
|
Components []struct {
|
||||||
|
Name string `json:"Name"`
|
||||||
|
} `json:"Components"`
|
||||||
|
}
|
||||||
|
|
||||||
// RoundTrip implements the http.RoundTripper interface
|
// RoundTrip implements the http.RoundTripper interface
|
||||||
func (u *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
func (u *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||||
req.Header.Set("User-Agent", u.userAgent)
|
req.Header.Set("User-Agent", u.userAgent)
|
||||||
@@ -121,7 +144,14 @@ func (dm *dockerManager) getDockerStats(cacheTimeMs uint16) ([]*container.Stats,
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
dm.isWindows = strings.Contains(resp.Header.Get("Server"), "windows")
|
// Detect Podman and Windows from Server header
|
||||||
|
serverHeader := resp.Header.Get("Server")
|
||||||
|
if !dm.usingPodman && detectPodmanFromHeader(serverHeader) {
|
||||||
|
dm.setIsPodman()
|
||||||
|
}
|
||||||
|
dm.isWindows = strings.Contains(serverHeader, "windows")
|
||||||
|
|
||||||
|
dm.ensureDockerVersionChecked()
|
||||||
|
|
||||||
containersLength := len(dm.apiContainerList)
|
containersLength := len(dm.apiContainerList)
|
||||||
|
|
||||||
@@ -273,7 +303,7 @@ func (dm *dockerManager) cycleNetworkDeltasForCacheTime(cacheTimeMs uint16) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// calculateNetworkStats calculates network sent/receive deltas using DeltaTracker
|
// calculateNetworkStats calculates network sent/receive deltas using DeltaTracker
|
||||||
func (dm *dockerManager) calculateNetworkStats(ctr *container.ApiInfo, apiStats *container.ApiStats, stats *container.Stats, initialized bool, name string, cacheTimeMs uint16) (uint64, uint64) {
|
func (dm *dockerManager) calculateNetworkStats(ctr *container.ApiInfo, apiStats *container.ApiStats, name string, cacheTimeMs uint16) (uint64, uint64) {
|
||||||
var total_sent, total_recv uint64
|
var total_sent, total_recv uint64
|
||||||
for _, v := range apiStats.Networks {
|
for _, v := range apiStats.Networks {
|
||||||
total_sent += v.TxBytes
|
total_sent += v.TxBytes
|
||||||
@@ -292,10 +322,11 @@ func (dm *dockerManager) calculateNetworkStats(ctr *container.ApiInfo, apiStats
|
|||||||
sent_delta_raw := sentTracker.Delta(ctr.IdShort)
|
sent_delta_raw := sentTracker.Delta(ctr.IdShort)
|
||||||
recv_delta_raw := recvTracker.Delta(ctr.IdShort)
|
recv_delta_raw := recvTracker.Delta(ctr.IdShort)
|
||||||
|
|
||||||
// Calculate bytes per second independently for Tx and Rx if we have previous data
|
// Calculate bytes per second using per-cache-time read time to avoid
|
||||||
|
// interference between different cache intervals (e.g. 1000ms vs 60000ms)
|
||||||
var sent_delta, recv_delta uint64
|
var sent_delta, recv_delta uint64
|
||||||
if initialized {
|
if prevReadTime, ok := dm.lastNetworkReadTime[cacheTimeMs][ctr.IdShort]; ok {
|
||||||
millisecondsElapsed := uint64(time.Since(stats.PrevReadTime).Milliseconds())
|
millisecondsElapsed := uint64(time.Since(prevReadTime).Milliseconds())
|
||||||
if millisecondsElapsed > 0 {
|
if millisecondsElapsed > 0 {
|
||||||
if sent_delta_raw > 0 {
|
if sent_delta_raw > 0 {
|
||||||
sent_delta = sent_delta_raw * 1000 / millisecondsElapsed
|
sent_delta = sent_delta_raw * 1000 / millisecondsElapsed
|
||||||
@@ -327,13 +358,48 @@ func validateCpuPercentage(cpuPct float64, containerName string) error {
|
|||||||
|
|
||||||
// updateContainerStatsValues updates the final stats values
|
// updateContainerStatsValues updates the final stats values
|
||||||
func updateContainerStatsValues(stats *container.Stats, cpuPct float64, usedMemory uint64, sent_delta, recv_delta uint64, readTime time.Time) {
|
func updateContainerStatsValues(stats *container.Stats, cpuPct float64, usedMemory uint64, sent_delta, recv_delta uint64, readTime time.Time) {
|
||||||
stats.Cpu = twoDecimals(cpuPct)
|
stats.Cpu = utils.TwoDecimals(cpuPct)
|
||||||
stats.Mem = bytesToMegabytes(float64(usedMemory))
|
stats.Mem = utils.BytesToMegabytes(float64(usedMemory))
|
||||||
stats.NetworkSent = bytesToMegabytes(float64(sent_delta))
|
stats.Bandwidth = [2]uint64{sent_delta, recv_delta}
|
||||||
stats.NetworkRecv = bytesToMegabytes(float64(recv_delta))
|
// TODO(0.19+): stop populating NetworkSent/NetworkRecv (deprecated in 0.18.3)
|
||||||
|
stats.NetworkSent = utils.BytesToMegabytes(float64(sent_delta))
|
||||||
|
stats.NetworkRecv = utils.BytesToMegabytes(float64(recv_delta))
|
||||||
stats.PrevReadTime = readTime
|
stats.PrevReadTime = readTime
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// convertContainerPortsToString formats the ports of a container into a sorted, deduplicated string.
|
||||||
|
// ctr.Ports is nilled out after processing so the slice is not accidentally reused.
|
||||||
|
func convertContainerPortsToString(ctr *container.ApiInfo) string {
|
||||||
|
if len(ctr.Ports) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
sort.Slice(ctr.Ports, func(i, j int) bool {
|
||||||
|
return ctr.Ports[i].PublicPort < ctr.Ports[j].PublicPort
|
||||||
|
})
|
||||||
|
var builder strings.Builder
|
||||||
|
seenPorts := make(map[uint16]struct{})
|
||||||
|
for _, p := range ctr.Ports {
|
||||||
|
_, ok := seenPorts[p.PublicPort]
|
||||||
|
if p.PublicPort == 0 || ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
seenPorts[p.PublicPort] = struct{}{}
|
||||||
|
if builder.Len() > 0 {
|
||||||
|
builder.WriteString(", ")
|
||||||
|
}
|
||||||
|
switch p.IP {
|
||||||
|
case "0.0.0.0", "::":
|
||||||
|
default:
|
||||||
|
builder.WriteString(p.IP)
|
||||||
|
builder.WriteByte(':')
|
||||||
|
}
|
||||||
|
builder.WriteString(strconv.Itoa(int(p.PublicPort)))
|
||||||
|
}
|
||||||
|
// clear ports slice so it doesn't get reused and blend into next response
|
||||||
|
ctr.Ports = nil
|
||||||
|
return builder.String()
|
||||||
|
}
|
||||||
|
|
||||||
func parseDockerStatus(status string) (string, container.DockerHealth) {
|
func parseDockerStatus(status string) (string, container.DockerHealth) {
|
||||||
trimmed := strings.TrimSpace(status)
|
trimmed := strings.TrimSpace(status)
|
||||||
if trimmed == "" {
|
if trimmed == "" {
|
||||||
@@ -353,22 +419,60 @@ func parseDockerStatus(status string) (string, container.DockerHealth) {
|
|||||||
statusText = trimmed
|
statusText = trimmed
|
||||||
}
|
}
|
||||||
|
|
||||||
healthText := strings.ToLower(strings.TrimSpace(strings.TrimSuffix(trimmed[openIdx+1:], ")")))
|
healthText := strings.TrimSpace(strings.TrimSuffix(trimmed[openIdx+1:], ")"))
|
||||||
// Some Docker statuses include a "health:" prefix inside the parentheses.
|
// Some Docker statuses include a "health:" prefix inside the parentheses.
|
||||||
// Strip it so it maps correctly to the known health states.
|
// Strip it so it maps correctly to the known health states.
|
||||||
if colonIdx := strings.IndexRune(healthText, ':'); colonIdx != -1 {
|
if colonIdx := strings.IndexRune(healthText, ':'); colonIdx != -1 {
|
||||||
prefix := strings.TrimSpace(healthText[:colonIdx])
|
prefix := strings.ToLower(strings.TrimSpace(healthText[:colonIdx]))
|
||||||
if prefix == "health" || prefix == "health status" {
|
if prefix == "health" || prefix == "health status" {
|
||||||
healthText = strings.TrimSpace(healthText[colonIdx+1:])
|
healthText = strings.TrimSpace(healthText[colonIdx+1:])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if health, ok := container.DockerHealthStrings[healthText]; ok {
|
if health, ok := parseDockerHealthStatus(healthText); ok {
|
||||||
return statusText, health
|
return statusText, health
|
||||||
}
|
}
|
||||||
|
|
||||||
return trimmed, container.DockerHealthNone
|
return trimmed, container.DockerHealthNone
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// parseDockerHealthStatus maps Docker health status strings to container.DockerHealth values
|
||||||
|
func parseDockerHealthStatus(status string) (container.DockerHealth, bool) {
|
||||||
|
health, ok := container.DockerHealthStrings[strings.ToLower(strings.TrimSpace(status))]
|
||||||
|
return health, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// getPodmanContainerHealth fetches container health status from the container inspect endpoint.
|
||||||
|
// Used for Podman which doesn't provide health status in the /containers/json endpoint as of March 2026.
|
||||||
|
// https://github.com/containers/podman/issues/27786
|
||||||
|
func (dm *dockerManager) getPodmanContainerHealth(containerID string) (container.DockerHealth, error) {
|
||||||
|
resp, err := dm.client.Get(fmt.Sprintf("http://localhost/containers/%s/json", url.PathEscape(containerID)))
|
||||||
|
if err != nil {
|
||||||
|
return container.DockerHealthNone, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return container.DockerHealthNone, fmt.Errorf("container inspect request failed: %s", resp.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
var inspectInfo struct {
|
||||||
|
State struct {
|
||||||
|
Health struct {
|
||||||
|
Status string
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&inspectInfo); err != nil {
|
||||||
|
return container.DockerHealthNone, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if health, ok := parseDockerHealthStatus(inspectInfo.State.Health.Status); ok {
|
||||||
|
return health, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return container.DockerHealthNone, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Updates stats for individual container with cache-time-aware delta tracking
|
// Updates stats for individual container with cache-time-aware delta tracking
|
||||||
func (dm *dockerManager) updateContainerStats(ctr *container.ApiInfo, cacheTimeMs uint16) error {
|
func (dm *dockerManager) updateContainerStats(ctr *container.ApiInfo, cacheTimeMs uint16) error {
|
||||||
name := ctr.Names[0][1:]
|
name := ctr.Names[0][1:]
|
||||||
@@ -378,6 +482,21 @@ func (dm *dockerManager) updateContainerStats(ctr *container.ApiInfo, cacheTimeM
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
statusText, health := parseDockerStatus(ctr.Status)
|
||||||
|
|
||||||
|
// Docker exposes Health.Status on /containers/json in API 1.52+.
|
||||||
|
// Podman currently requires falling back to the inspect endpoint as of March 2026.
|
||||||
|
// https://github.com/containers/podman/issues/27786
|
||||||
|
if ctr.Health.Status != "" {
|
||||||
|
if h, ok := parseDockerHealthStatus(ctr.Health.Status); ok {
|
||||||
|
health = h
|
||||||
|
}
|
||||||
|
} else if dm.usingPodman {
|
||||||
|
if podmanHealth, err := dm.getPodmanContainerHealth(ctr.IdShort); err == nil {
|
||||||
|
health = podmanHealth
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
dm.containerStatsMutex.Lock()
|
dm.containerStatsMutex.Lock()
|
||||||
defer dm.containerStatsMutex.Unlock()
|
defer dm.containerStatsMutex.Unlock()
|
||||||
|
|
||||||
@@ -389,14 +508,18 @@ func (dm *dockerManager) updateContainerStats(ctr *container.ApiInfo, cacheTimeM
|
|||||||
}
|
}
|
||||||
|
|
||||||
stats.Id = ctr.IdShort
|
stats.Id = ctr.IdShort
|
||||||
|
|
||||||
statusText, health := parseDockerStatus(ctr.Status)
|
|
||||||
stats.Status = statusText
|
stats.Status = statusText
|
||||||
stats.Health = health
|
stats.Health = health
|
||||||
|
|
||||||
|
if len(ctr.Ports) > 0 {
|
||||||
|
stats.Ports = convertContainerPortsToString(ctr)
|
||||||
|
}
|
||||||
|
|
||||||
// reset current stats
|
// reset current stats
|
||||||
stats.Cpu = 0
|
stats.Cpu = 0
|
||||||
stats.Mem = 0
|
stats.Mem = 0
|
||||||
|
stats.Bandwidth = [2]uint64{0, 0}
|
||||||
|
// TODO(0.19+): stop populating NetworkSent/NetworkRecv (deprecated in 0.18.3)
|
||||||
stats.NetworkSent = 0
|
stats.NetworkSent = 0
|
||||||
stats.NetworkRecv = 0
|
stats.NetworkRecv = 0
|
||||||
|
|
||||||
@@ -438,7 +561,13 @@ func (dm *dockerManager) updateContainerStats(ctr *container.ApiInfo, cacheTimeM
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Calculate network stats using DeltaTracker
|
// Calculate network stats using DeltaTracker
|
||||||
sent_delta, recv_delta := dm.calculateNetworkStats(ctr, res, stats, initialized, name, cacheTimeMs)
|
sent_delta, recv_delta := dm.calculateNetworkStats(ctr, res, name, cacheTimeMs)
|
||||||
|
|
||||||
|
// Store per-cache-time network read time for next rate calculation
|
||||||
|
if dm.lastNetworkReadTime[cacheTimeMs] == nil {
|
||||||
|
dm.lastNetworkReadTime[cacheTimeMs] = make(map[string]time.Time)
|
||||||
|
}
|
||||||
|
dm.lastNetworkReadTime[cacheTimeMs][ctr.IdShort] = time.Now()
|
||||||
|
|
||||||
// Store current network values for legacy compatibility
|
// Store current network values for legacy compatibility
|
||||||
var total_sent, total_recv uint64
|
var total_sent, total_recv uint64
|
||||||
@@ -470,11 +599,14 @@ func (dm *dockerManager) deleteContainerStatsSync(id string) {
|
|||||||
for ct := range dm.lastCpuReadTime {
|
for ct := range dm.lastCpuReadTime {
|
||||||
delete(dm.lastCpuReadTime[ct], id)
|
delete(dm.lastCpuReadTime[ct], id)
|
||||||
}
|
}
|
||||||
|
for ct := range dm.lastNetworkReadTime {
|
||||||
|
delete(dm.lastNetworkReadTime[ct], id)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creates a new http client for Docker or Podman API
|
// Creates a new http client for Docker or Podman API
|
||||||
func newDockerManager(a *Agent) *dockerManager {
|
func newDockerManager(agent *Agent) *dockerManager {
|
||||||
dockerHost, exists := GetEnv("DOCKER_HOST")
|
dockerHost, exists := utils.GetEnv("DOCKER_HOST")
|
||||||
if exists {
|
if exists {
|
||||||
// return nil if set to empty string
|
// return nil if set to empty string
|
||||||
if dockerHost == "" {
|
if dockerHost == "" {
|
||||||
@@ -510,7 +642,7 @@ func newDockerManager(a *Agent) *dockerManager {
|
|||||||
|
|
||||||
// configurable timeout
|
// configurable timeout
|
||||||
timeout := time.Millisecond * time.Duration(dockerTimeoutMs)
|
timeout := time.Millisecond * time.Duration(dockerTimeoutMs)
|
||||||
if t, set := GetEnv("DOCKER_TIMEOUT"); set {
|
if t, set := utils.GetEnv("DOCKER_TIMEOUT"); set {
|
||||||
timeout, err = time.ParseDuration(t)
|
timeout, err = time.ParseDuration(t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Error(err.Error())
|
slog.Error(err.Error())
|
||||||
@@ -527,7 +659,7 @@ func newDockerManager(a *Agent) *dockerManager {
|
|||||||
|
|
||||||
// Read container exclusion patterns from environment variable
|
// Read container exclusion patterns from environment variable
|
||||||
var excludeContainers []string
|
var excludeContainers []string
|
||||||
if excludeStr, set := GetEnv("EXCLUDE_CONTAINERS"); set && excludeStr != "" {
|
if excludeStr, set := utils.GetEnv("EXCLUDE_CONTAINERS"); set && excludeStr != "" {
|
||||||
parts := strings.SplitSeq(excludeStr, ",")
|
parts := strings.SplitSeq(excludeStr, ",")
|
||||||
for part := range parts {
|
for part := range parts {
|
||||||
trimmed := strings.TrimSpace(part)
|
trimmed := strings.TrimSpace(part)
|
||||||
@@ -539,6 +671,7 @@ func newDockerManager(a *Agent) *dockerManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
manager := &dockerManager{
|
manager := &dockerManager{
|
||||||
|
agent: agent,
|
||||||
client: &http.Client{
|
client: &http.Client{
|
||||||
Timeout: timeout,
|
Timeout: timeout,
|
||||||
Transport: userAgentTransport,
|
Transport: userAgentTransport,
|
||||||
@@ -555,50 +688,55 @@ func newDockerManager(a *Agent) *dockerManager {
|
|||||||
lastCpuReadTime: make(map[uint16]map[string]time.Time),
|
lastCpuReadTime: make(map[uint16]map[string]time.Time),
|
||||||
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkSentTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
networkRecvTrackers: make(map[uint16]*deltatracker.DeltaTracker[string, uint64]),
|
||||||
|
lastNetworkReadTime: make(map[uint16]map[string]time.Time),
|
||||||
}
|
}
|
||||||
|
|
||||||
// If using podman, return client
|
// Best-effort startup probe. If the engine is not ready yet, getDockerStats will
|
||||||
if strings.Contains(dockerHost, "podman") {
|
// retry after the first successful /containers/json request.
|
||||||
a.systemInfo.Podman = true
|
_, _ = manager.checkDockerVersion()
|
||||||
manager.goodDockerVersion = true
|
|
||||||
return manager
|
|
||||||
}
|
|
||||||
|
|
||||||
// this can take up to 5 seconds with retry, so run in goroutine
|
|
||||||
go manager.checkDockerVersion()
|
|
||||||
|
|
||||||
// give version check a chance to complete before returning
|
|
||||||
time.Sleep(50 * time.Millisecond)
|
|
||||||
|
|
||||||
return manager
|
return manager
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkDockerVersion checks Docker version and sets goodDockerVersion if at least 25.0.0.
|
// checkDockerVersion checks Docker version and sets goodDockerVersion if at least 25.0.0.
|
||||||
// Versions before 25.0.0 have a bug with one-shot which requires all requests to be made in one batch.
|
// Versions before 25.0.0 have a bug with one-shot which requires all requests to be made in one batch.
|
||||||
func (dm *dockerManager) checkDockerVersion() {
|
func (dm *dockerManager) checkDockerVersion() (bool, error) {
|
||||||
var err error
|
resp, err := dm.client.Get("http://localhost/version")
|
||||||
var resp *http.Response
|
|
||||||
var versionInfo struct {
|
|
||||||
Version string `json:"Version"`
|
|
||||||
}
|
|
||||||
const versionMaxTries = 2
|
|
||||||
for i := 1; i <= versionMaxTries; i++ {
|
|
||||||
resp, err = dm.client.Get("http://localhost/version")
|
|
||||||
if err == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if resp != nil {
|
|
||||||
resp.Body.Close()
|
|
||||||
}
|
|
||||||
if i < versionMaxTries {
|
|
||||||
slog.Debug("Failed to get Docker version; retrying", "attempt", i, "error", err)
|
|
||||||
time.Sleep(5 * time.Second)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
status := resp.Status
|
||||||
|
resp.Body.Close()
|
||||||
|
return false, fmt.Errorf("docker version request failed: %s", status)
|
||||||
|
}
|
||||||
|
|
||||||
|
var versionInfo dockerVersionResponse
|
||||||
|
serverHeader := resp.Header.Get("Server")
|
||||||
|
if err := dm.decode(resp, &versionInfo); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
dm.applyDockerVersionInfo(serverHeader, &versionInfo)
|
||||||
|
dm.dockerVersionChecked = true
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ensureDockerVersionChecked retries the version probe after a successful
|
||||||
|
// container list request.
|
||||||
|
func (dm *dockerManager) ensureDockerVersionChecked() {
|
||||||
|
if dm.dockerVersionChecked {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := dm.decode(resp, &versionInfo); err != nil {
|
if _, err := dm.checkDockerVersion(); err != nil {
|
||||||
|
slog.Debug("Failed to get Docker version", "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// applyDockerVersionInfo updates version-dependent behavior from engine metadata.
|
||||||
|
func (dm *dockerManager) applyDockerVersionInfo(serverHeader string, versionInfo *dockerVersionResponse) {
|
||||||
|
if detectPodmanEngine(serverHeader, versionInfo) {
|
||||||
|
dm.setIsPodman()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// if version > 24, one-shot works correctly and we can limit concurrent operations
|
// if version > 24, one-shot works correctly and we can limit concurrent operations
|
||||||
@@ -637,9 +775,34 @@ func getDockerHost() string {
|
|||||||
return scheme + socks[0]
|
return scheme + socks[0]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func validateContainerID(containerID string) error {
|
||||||
|
if !dockerContainerIDPattern.MatchString(containerID) {
|
||||||
|
return fmt.Errorf("invalid container id")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildDockerContainerEndpoint(containerID, action string, query url.Values) (string, error) {
|
||||||
|
if err := validateContainerID(containerID); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
u := &url.URL{
|
||||||
|
Scheme: "http",
|
||||||
|
Host: "localhost",
|
||||||
|
Path: fmt.Sprintf("/containers/%s/%s", url.PathEscape(containerID), action),
|
||||||
|
}
|
||||||
|
if len(query) > 0 {
|
||||||
|
u.RawQuery = query.Encode()
|
||||||
|
}
|
||||||
|
return u.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
// getContainerInfo fetches the inspection data for a container
|
// getContainerInfo fetches the inspection data for a container
|
||||||
func (dm *dockerManager) getContainerInfo(ctx context.Context, containerID string) ([]byte, error) {
|
func (dm *dockerManager) getContainerInfo(ctx context.Context, containerID string) ([]byte, error) {
|
||||||
endpoint := fmt.Sprintf("http://localhost/containers/%s/json", containerID)
|
endpoint, err := buildDockerContainerEndpoint(containerID, "json", nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -670,7 +833,15 @@ func (dm *dockerManager) getContainerInfo(ctx context.Context, containerID strin
|
|||||||
|
|
||||||
// getLogs fetches the logs for a container
|
// getLogs fetches the logs for a container
|
||||||
func (dm *dockerManager) getLogs(ctx context.Context, containerID string) (string, error) {
|
func (dm *dockerManager) getLogs(ctx context.Context, containerID string) (string, error) {
|
||||||
endpoint := fmt.Sprintf("http://localhost/containers/%s/logs?stdout=1&stderr=1&tail=%d", containerID, dockerLogsTail)
|
query := url.Values{
|
||||||
|
"stdout": []string{"1"},
|
||||||
|
"stderr": []string{"1"},
|
||||||
|
"tail": []string{fmt.Sprintf("%d", dockerLogsTail)},
|
||||||
|
}
|
||||||
|
endpoint, err := buildDockerContainerEndpoint(containerID, "logs", query)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, endpoint, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
@@ -688,17 +859,52 @@ func (dm *dockerManager) getLogs(ctx context.Context, containerID string) (strin
|
|||||||
}
|
}
|
||||||
|
|
||||||
var builder strings.Builder
|
var builder strings.Builder
|
||||||
if err := decodeDockerLogStream(resp.Body, &builder); err != nil {
|
contentType := resp.Header.Get("Content-Type")
|
||||||
|
multiplexed := strings.HasSuffix(contentType, "multiplexed-stream")
|
||||||
|
logReader := io.Reader(resp.Body)
|
||||||
|
if !multiplexed {
|
||||||
|
// Podman may return multiplexed logs without Content-Type. Sniff the first frame header
|
||||||
|
// with a small buffered reader only when the header check fails.
|
||||||
|
bufferedReader := bufio.NewReaderSize(resp.Body, 8)
|
||||||
|
multiplexed = detectDockerMultiplexedStream(bufferedReader)
|
||||||
|
logReader = bufferedReader
|
||||||
|
}
|
||||||
|
if err := decodeDockerLogStream(logReader, &builder, multiplexed); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
return builder.String(), nil
|
// Strip ANSI escape sequences from logs for clean display in web UI
|
||||||
|
logs := builder.String()
|
||||||
|
if strings.Contains(logs, "\x1b") {
|
||||||
|
logs = ansiEscapePattern.ReplaceAllString(logs, "")
|
||||||
|
}
|
||||||
|
return logs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func decodeDockerLogStream(reader io.Reader, builder *strings.Builder) error {
|
func detectDockerMultiplexedStream(reader *bufio.Reader) bool {
|
||||||
|
const headerSize = 8
|
||||||
|
header, err := reader.Peek(headerSize)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if header[0] != 0x01 && header[0] != 0x02 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// Docker's stream framing header reserves bytes 1-3 as zero.
|
||||||
|
if header[1] != 0 || header[2] != 0 || header[3] != 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
frameLen := binary.BigEndian.Uint32(header[4:])
|
||||||
|
return frameLen <= maxLogFrameSize
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeDockerLogStream(reader io.Reader, builder *strings.Builder, multiplexed bool) error {
|
||||||
|
if !multiplexed {
|
||||||
|
_, err := io.Copy(builder, io.LimitReader(reader, maxTotalLogSize))
|
||||||
|
return err
|
||||||
|
}
|
||||||
const headerSize = 8
|
const headerSize = 8
|
||||||
var header [headerSize]byte
|
var header [headerSize]byte
|
||||||
buf := make([]byte, 0, dockerLogsTail*200)
|
|
||||||
totalBytesRead := 0
|
totalBytesRead := 0
|
||||||
|
|
||||||
for {
|
for {
|
||||||
@@ -722,36 +928,80 @@ func decodeDockerLogStream(reader io.Reader, builder *strings.Builder) error {
|
|||||||
// Check if reading this frame would exceed total log size limit
|
// Check if reading this frame would exceed total log size limit
|
||||||
if totalBytesRead+int(frameLen) > maxTotalLogSize {
|
if totalBytesRead+int(frameLen) > maxTotalLogSize {
|
||||||
// Read and discard remaining data to avoid blocking
|
// Read and discard remaining data to avoid blocking
|
||||||
_, _ = io.Copy(io.Discard, io.LimitReader(reader, int64(frameLen)))
|
_, _ = io.CopyN(io.Discard, reader, int64(frameLen))
|
||||||
slog.Debug("Truncating logs: limit reached", "read", totalBytesRead, "limit", maxTotalLogSize)
|
slog.Debug("Truncating logs: limit reached", "read", totalBytesRead, "limit", maxTotalLogSize)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
buf = allocateBuffer(buf, int(frameLen))
|
n, err := io.CopyN(builder, reader, int64(frameLen))
|
||||||
if _, err := io.ReadFull(reader, buf[:frameLen]); err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) {
|
if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) {
|
||||||
if len(buf) > 0 {
|
|
||||||
builder.Write(buf[:min(int(frameLen), len(buf))])
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
builder.Write(buf[:frameLen])
|
totalBytesRead += int(n)
|
||||||
totalBytesRead += int(frameLen)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func allocateBuffer(current []byte, needed int) []byte {
|
// GetHostInfo fetches the system info from Docker
|
||||||
if cap(current) >= needed {
|
func (dm *dockerManager) GetHostInfo() (info container.HostInfo, err error) {
|
||||||
return current[:needed]
|
resp, err := dm.client.Get("http://localhost/info")
|
||||||
|
if err != nil {
|
||||||
|
return info, err
|
||||||
}
|
}
|
||||||
return make([]byte, needed)
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if err := json.NewDecoder(resp.Body).Decode(&info); err != nil {
|
||||||
|
return info, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func min(a, b int) int {
|
return info, nil
|
||||||
if a < b {
|
|
||||||
return a
|
|
||||||
}
|
}
|
||||||
return b
|
|
||||||
|
func (dm *dockerManager) IsPodman() bool {
|
||||||
|
return dm.usingPodman
|
||||||
|
}
|
||||||
|
|
||||||
|
// setIsPodman sets the manager to Podman mode and updates system details accordingly.
|
||||||
|
func (dm *dockerManager) setIsPodman() {
|
||||||
|
if dm.usingPodman {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
dm.usingPodman = true
|
||||||
|
dm.goodDockerVersion = true
|
||||||
|
dm.dockerVersionChecked = true
|
||||||
|
// keep system details updated - this may be detected late if server isn't ready when
|
||||||
|
// agent starts, so make sure we notify the hub if this happens later.
|
||||||
|
if dm.agent != nil {
|
||||||
|
dm.agent.updateSystemDetails(func(details *system.Details) {
|
||||||
|
details.Podman = true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// detectPodmanFromHeader identifies Podman from the Docker API server header.
|
||||||
|
func detectPodmanFromHeader(server string) bool {
|
||||||
|
return strings.HasPrefix(server, "Libpod")
|
||||||
|
}
|
||||||
|
|
||||||
|
// detectPodmanFromVersion identifies Podman from the version payload.
|
||||||
|
func detectPodmanFromVersion(versionInfo *dockerVersionResponse) bool {
|
||||||
|
if versionInfo == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, component := range versionInfo.Components {
|
||||||
|
if strings.HasPrefix(component.Name, "Podman") {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// detectPodmanEngine checks both header and version metadata for Podman.
|
||||||
|
func detectPodmanEngine(serverHeader string, versionInfo *dockerVersionResponse) bool {
|
||||||
|
if detectPodmanFromHeader(serverHeader) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return detectPodmanFromVersion(versionInfo)
|
||||||
}
|
}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
95
agent/emmc_common.go
Normal file
95
agent/emmc_common.go
Normal file
@@ -0,0 +1,95 @@
|
|||||||
|
package agent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func isEmmcBlockName(name string) bool {
|
||||||
|
if !strings.HasPrefix(name, "mmcblk") {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
suffix := strings.TrimPrefix(name, "mmcblk")
|
||||||
|
if suffix == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, c := range suffix {
|
||||||
|
if c < '0' || c > '9' {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseHexOrDecByte(s string) (uint8, bool) {
|
||||||
|
s = strings.TrimSpace(s)
|
||||||
|
if s == "" {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
base := 10
|
||||||
|
if strings.HasPrefix(s, "0x") || strings.HasPrefix(s, "0X") {
|
||||||
|
base = 16
|
||||||
|
s = s[2:]
|
||||||
|
}
|
||||||
|
parsed, err := strconv.ParseUint(s, base, 8)
|
||||||
|
if err != nil {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
return uint8(parsed), true
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseHexBytePair(s string) (uint8, uint8, bool) {
|
||||||
|
fields := strings.Fields(s)
|
||||||
|
if len(fields) < 2 {
|
||||||
|
return 0, 0, false
|
||||||
|
}
|
||||||
|
a, okA := parseHexOrDecByte(fields[0])
|
||||||
|
b, okB := parseHexOrDecByte(fields[1])
|
||||||
|
if !okA && !okB {
|
||||||
|
return 0, 0, false
|
||||||
|
}
|
||||||
|
return a, b, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func emmcSmartStatus(preEOL uint8) string {
|
||||||
|
switch preEOL {
|
||||||
|
case 0x01:
|
||||||
|
return "PASSED"
|
||||||
|
case 0x02:
|
||||||
|
return "WARNING"
|
||||||
|
case 0x03:
|
||||||
|
return "FAILED"
|
||||||
|
default:
|
||||||
|
return "UNKNOWN"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func emmcPreEOLString(preEOL uint8) string {
|
||||||
|
switch preEOL {
|
||||||
|
case 0x01:
|
||||||
|
return "0x01 (normal)"
|
||||||
|
case 0x02:
|
||||||
|
return "0x02 (warning)"
|
||||||
|
case 0x03:
|
||||||
|
return "0x03 (urgent)"
|
||||||
|
default:
|
||||||
|
return fmt.Sprintf("0x%02x", preEOL)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func emmcLifeTimeString(v uint8) string {
|
||||||
|
// JEDEC eMMC: 0x01..0x0A => 0-100% used in 10% steps, 0x0B => exceeded.
|
||||||
|
switch {
|
||||||
|
case v == 0:
|
||||||
|
return "0x00 (not reported)"
|
||||||
|
case v >= 0x01 && v <= 0x0A:
|
||||||
|
low := int(v-1) * 10
|
||||||
|
high := int(v) * 10
|
||||||
|
return fmt.Sprintf("0x%02x (%d-%d%% used)", v, low, high)
|
||||||
|
case v == 0x0B:
|
||||||
|
return "0x0b (>100% used)"
|
||||||
|
default:
|
||||||
|
return fmt.Sprintf("0x%02x", v)
|
||||||
|
}
|
||||||
|
}
|
||||||
78
agent/emmc_common_test.go
Normal file
78
agent/emmc_common_test.go
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
package agent
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestParseHexOrDecByte(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
in string
|
||||||
|
want uint8
|
||||||
|
ok bool
|
||||||
|
}{
|
||||||
|
{"0x01", 1, true},
|
||||||
|
{"0X0b", 11, true},
|
||||||
|
{"01", 1, true},
|
||||||
|
{" 3 ", 3, true},
|
||||||
|
{"", 0, false},
|
||||||
|
{"0x", 0, false},
|
||||||
|
{"nope", 0, false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
got, ok := parseHexOrDecByte(tt.in)
|
||||||
|
if ok != tt.ok || got != tt.want {
|
||||||
|
t.Fatalf("parseHexOrDecByte(%q) = (%d,%v), want (%d,%v)", tt.in, got, ok, tt.want, tt.ok)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseHexBytePair(t *testing.T) {
|
||||||
|
a, b, ok := parseHexBytePair("0x01 0x02\n")
|
||||||
|
if !ok || a != 1 || b != 2 {
|
||||||
|
t.Fatalf("parseHexBytePair hex = (%d,%d,%v), want (1,2,true)", a, b, ok)
|
||||||
|
}
|
||||||
|
|
||||||
|
a, b, ok = parseHexBytePair("01 02")
|
||||||
|
if !ok || a != 1 || b != 2 {
|
||||||
|
t.Fatalf("parseHexBytePair dec = (%d,%d,%v), want (1,2,true)", a, b, ok)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _, ok = parseHexBytePair("0x01")
|
||||||
|
if ok {
|
||||||
|
t.Fatalf("parseHexBytePair short input ok=true, want false")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEmmcSmartStatus(t *testing.T) {
|
||||||
|
if got := emmcSmartStatus(0x01); got != "PASSED" {
|
||||||
|
t.Fatalf("emmcSmartStatus(0x01) = %q, want PASSED", got)
|
||||||
|
}
|
||||||
|
if got := emmcSmartStatus(0x02); got != "WARNING" {
|
||||||
|
t.Fatalf("emmcSmartStatus(0x02) = %q, want WARNING", got)
|
||||||
|
}
|
||||||
|
if got := emmcSmartStatus(0x03); got != "FAILED" {
|
||||||
|
t.Fatalf("emmcSmartStatus(0x03) = %q, want FAILED", got)
|
||||||
|
}
|
||||||
|
if got := emmcSmartStatus(0x00); got != "UNKNOWN" {
|
||||||
|
t.Fatalf("emmcSmartStatus(0x00) = %q, want UNKNOWN", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIsEmmcBlockName(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
name string
|
||||||
|
ok bool
|
||||||
|
}{
|
||||||
|
{"mmcblk0", true},
|
||||||
|
{"mmcblk1", true},
|
||||||
|
{"mmcblk10", true},
|
||||||
|
{"mmcblk0p1", false},
|
||||||
|
{"sda", false},
|
||||||
|
{"mmcblk", false},
|
||||||
|
{"mmcblkA", false},
|
||||||
|
}
|
||||||
|
for _, c := range cases {
|
||||||
|
if got := isEmmcBlockName(c.name); got != c.ok {
|
||||||
|
t.Fatalf("isEmmcBlockName(%q) = %v, want %v", c.name, got, c.ok)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
215
agent/emmc_linux.go
Normal file
215
agent/emmc_linux.go
Normal file
@@ -0,0 +1,215 @@
|
|||||||
|
//go:build linux
|
||||||
|
|
||||||
|
package agent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
|
"github.com/henrygd/beszel/internal/entities/smart"
|
||||||
|
)
|
||||||
|
|
||||||
|
// emmcSysfsRoot is a test hook; production value is "/sys".
|
||||||
|
var emmcSysfsRoot = "/sys"
|
||||||
|
|
||||||
|
type emmcHealth struct {
|
||||||
|
model string
|
||||||
|
serial string
|
||||||
|
revision string
|
||||||
|
capacity uint64
|
||||||
|
preEOL uint8
|
||||||
|
lifeA uint8
|
||||||
|
lifeB uint8
|
||||||
|
}
|
||||||
|
|
||||||
|
func scanEmmcDevices() []*DeviceInfo {
|
||||||
|
blockDir := filepath.Join(emmcSysfsRoot, "class", "block")
|
||||||
|
entries, err := os.ReadDir(blockDir)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
devices := make([]*DeviceInfo, 0, 2)
|
||||||
|
for _, ent := range entries {
|
||||||
|
name := ent.Name()
|
||||||
|
if !isEmmcBlockName(name) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
deviceDir := filepath.Join(blockDir, name, "device")
|
||||||
|
if !hasEmmcHealthFiles(deviceDir) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
devPath := filepath.Join("/dev", name)
|
||||||
|
devices = append(devices, &DeviceInfo{
|
||||||
|
Name: devPath,
|
||||||
|
Type: "emmc",
|
||||||
|
InfoName: devPath + " [eMMC]",
|
||||||
|
Protocol: "MMC",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return devices
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sm *SmartManager) collectEmmcHealth(deviceInfo *DeviceInfo) (bool, error) {
|
||||||
|
if deviceInfo == nil || deviceInfo.Name == "" {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
base := filepath.Base(deviceInfo.Name)
|
||||||
|
if !isEmmcBlockName(base) && !strings.EqualFold(deviceInfo.Type, "emmc") && !strings.EqualFold(deviceInfo.Type, "mmc") {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
health, ok := readEmmcHealth(base)
|
||||||
|
if !ok {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Normalize the device type to keep pruning logic stable across refreshes.
|
||||||
|
deviceInfo.Type = "emmc"
|
||||||
|
|
||||||
|
key := health.serial
|
||||||
|
if key == "" {
|
||||||
|
key = filepath.Join("/dev", base)
|
||||||
|
}
|
||||||
|
|
||||||
|
status := emmcSmartStatus(health.preEOL)
|
||||||
|
|
||||||
|
attrs := []*smart.SmartAttribute{
|
||||||
|
{
|
||||||
|
Name: "PreEOLInfo",
|
||||||
|
RawValue: uint64(health.preEOL),
|
||||||
|
RawString: emmcPreEOLString(health.preEOL),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "DeviceLifeTimeEstA",
|
||||||
|
RawValue: uint64(health.lifeA),
|
||||||
|
RawString: emmcLifeTimeString(health.lifeA),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "DeviceLifeTimeEstB",
|
||||||
|
RawValue: uint64(health.lifeB),
|
||||||
|
RawString: emmcLifeTimeString(health.lifeB),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
sm.Lock()
|
||||||
|
defer sm.Unlock()
|
||||||
|
|
||||||
|
if _, exists := sm.SmartDataMap[key]; !exists {
|
||||||
|
sm.SmartDataMap[key] = &smart.SmartData{}
|
||||||
|
}
|
||||||
|
|
||||||
|
data := sm.SmartDataMap[key]
|
||||||
|
data.ModelName = health.model
|
||||||
|
data.SerialNumber = health.serial
|
||||||
|
data.FirmwareVersion = health.revision
|
||||||
|
data.Capacity = health.capacity
|
||||||
|
data.Temperature = 0
|
||||||
|
data.SmartStatus = status
|
||||||
|
data.DiskName = filepath.Join("/dev", base)
|
||||||
|
data.DiskType = "emmc"
|
||||||
|
data.Attributes = attrs
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func readEmmcHealth(blockName string) (emmcHealth, bool) {
|
||||||
|
var out emmcHealth
|
||||||
|
|
||||||
|
if !isEmmcBlockName(blockName) {
|
||||||
|
return out, false
|
||||||
|
}
|
||||||
|
|
||||||
|
deviceDir := filepath.Join(emmcSysfsRoot, "class", "block", blockName, "device")
|
||||||
|
preEOL, okPre := readHexByteFile(filepath.Join(deviceDir, "pre_eol_info"))
|
||||||
|
|
||||||
|
// Some kernels expose EXT_CSD lifetime via "life_time" (two bytes), others as
|
||||||
|
// separate files. Support both.
|
||||||
|
lifeA, lifeB, okLife := readLifeTime(deviceDir)
|
||||||
|
|
||||||
|
if !okPre && !okLife {
|
||||||
|
return out, false
|
||||||
|
}
|
||||||
|
|
||||||
|
out.preEOL = preEOL
|
||||||
|
out.lifeA = lifeA
|
||||||
|
out.lifeB = lifeB
|
||||||
|
|
||||||
|
out.model = utils.ReadStringFile(filepath.Join(deviceDir, "name"))
|
||||||
|
out.serial = utils.ReadStringFile(filepath.Join(deviceDir, "serial"))
|
||||||
|
out.revision = utils.ReadStringFile(filepath.Join(deviceDir, "prv"))
|
||||||
|
|
||||||
|
if capBytes, ok := readBlockCapacityBytes(blockName); ok {
|
||||||
|
out.capacity = capBytes
|
||||||
|
}
|
||||||
|
|
||||||
|
return out, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func readLifeTime(deviceDir string) (uint8, uint8, bool) {
|
||||||
|
if content, ok := utils.ReadStringFileOK(filepath.Join(deviceDir, "life_time")); ok {
|
||||||
|
a, b, ok := parseHexBytePair(content)
|
||||||
|
return a, b, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
a, okA := readHexByteFile(filepath.Join(deviceDir, "device_life_time_est_typ_a"))
|
||||||
|
b, okB := readHexByteFile(filepath.Join(deviceDir, "device_life_time_est_typ_b"))
|
||||||
|
if okA || okB {
|
||||||
|
return a, b, true
|
||||||
|
}
|
||||||
|
return 0, 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func readBlockCapacityBytes(blockName string) (uint64, bool) {
|
||||||
|
sizePath := filepath.Join(emmcSysfsRoot, "class", "block", blockName, "size")
|
||||||
|
lbsPath := filepath.Join(emmcSysfsRoot, "class", "block", blockName, "queue", "logical_block_size")
|
||||||
|
|
||||||
|
sizeStr, ok := utils.ReadStringFileOK(sizePath)
|
||||||
|
if !ok {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
sectors, err := strconv.ParseUint(sizeStr, 10, 64)
|
||||||
|
if err != nil || sectors == 0 {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
lbsStr, ok := utils.ReadStringFileOK(lbsPath)
|
||||||
|
logicalBlockSize := uint64(512)
|
||||||
|
if ok {
|
||||||
|
if parsed, err := strconv.ParseUint(lbsStr, 10, 64); err == nil && parsed > 0 {
|
||||||
|
logicalBlockSize = parsed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return sectors * logicalBlockSize, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func readHexByteFile(path string) (uint8, bool) {
|
||||||
|
content, ok := utils.ReadStringFileOK(path)
|
||||||
|
if !ok {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
b, ok := parseHexOrDecByte(content)
|
||||||
|
return b, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasEmmcHealthFiles(deviceDir string) bool {
|
||||||
|
entries, err := os.ReadDir(deviceDir)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, ent := range entries {
|
||||||
|
switch ent.Name() {
|
||||||
|
case "pre_eol_info", "life_time", "device_life_time_est_typ_a", "device_life_time_est_typ_b":
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
80
agent/emmc_linux_test.go
Normal file
80
agent/emmc_linux_test.go
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
//go:build linux
|
||||||
|
|
||||||
|
package agent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/entities/smart"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestEmmcMockSysfsScanAndCollect(t *testing.T) {
|
||||||
|
tmp := t.TempDir()
|
||||||
|
prev := emmcSysfsRoot
|
||||||
|
emmcSysfsRoot = tmp
|
||||||
|
t.Cleanup(func() { emmcSysfsRoot = prev })
|
||||||
|
|
||||||
|
// Fake: /sys/class/block/mmcblk0
|
||||||
|
mmcDeviceDir := filepath.Join(tmp, "class", "block", "mmcblk0", "device")
|
||||||
|
mmcQueueDir := filepath.Join(tmp, "class", "block", "mmcblk0", "queue")
|
||||||
|
if err := os.MkdirAll(mmcDeviceDir, 0o755); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := os.MkdirAll(mmcQueueDir, 0o755); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
write := func(path, content string) {
|
||||||
|
t.Helper()
|
||||||
|
if err := os.WriteFile(path, []byte(content), 0o644); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
write(filepath.Join(mmcDeviceDir, "pre_eol_info"), "0x02\n")
|
||||||
|
write(filepath.Join(mmcDeviceDir, "life_time"), "0x04 0x05\n")
|
||||||
|
write(filepath.Join(mmcDeviceDir, "name"), "H26M52103FMR\n")
|
||||||
|
write(filepath.Join(mmcDeviceDir, "serial"), "01234567\n")
|
||||||
|
write(filepath.Join(mmcDeviceDir, "prv"), "0x08\n")
|
||||||
|
write(filepath.Join(mmcQueueDir, "logical_block_size"), "512\n")
|
||||||
|
write(filepath.Join(tmp, "class", "block", "mmcblk0", "size"), "1024\n") // sectors
|
||||||
|
|
||||||
|
devs := scanEmmcDevices()
|
||||||
|
if len(devs) != 1 {
|
||||||
|
t.Fatalf("scanEmmcDevices() = %d devices, want 1", len(devs))
|
||||||
|
}
|
||||||
|
if devs[0].Name != "/dev/mmcblk0" || devs[0].Type != "emmc" {
|
||||||
|
t.Fatalf("scanEmmcDevices()[0] = %+v, want Name=/dev/mmcblk0 Type=emmc", devs[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
sm := &SmartManager{SmartDataMap: map[string]*smart.SmartData{}}
|
||||||
|
ok, err := sm.collectEmmcHealth(devs[0])
|
||||||
|
if err != nil || !ok {
|
||||||
|
t.Fatalf("collectEmmcHealth() = (ok=%v, err=%v), want (true,nil)", ok, err)
|
||||||
|
}
|
||||||
|
if len(sm.SmartDataMap) != 1 {
|
||||||
|
t.Fatalf("SmartDataMap len=%d, want 1", len(sm.SmartDataMap))
|
||||||
|
}
|
||||||
|
var got *smart.SmartData
|
||||||
|
for _, v := range sm.SmartDataMap {
|
||||||
|
got = v
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if got == nil {
|
||||||
|
t.Fatalf("SmartDataMap value nil")
|
||||||
|
}
|
||||||
|
if got.DiskType != "emmc" || got.DiskName != "/dev/mmcblk0" {
|
||||||
|
t.Fatalf("disk fields = (type=%q name=%q), want (emmc,/dev/mmcblk0)", got.DiskType, got.DiskName)
|
||||||
|
}
|
||||||
|
if got.SmartStatus != "WARNING" {
|
||||||
|
t.Fatalf("SmartStatus=%q, want WARNING", got.SmartStatus)
|
||||||
|
}
|
||||||
|
if got.SerialNumber != "01234567" || got.ModelName == "" || got.Capacity == 0 {
|
||||||
|
t.Fatalf("identity fields = (model=%q serial=%q cap=%d), want non-empty model, serial 01234567, cap>0", got.ModelName, got.SerialNumber, got.Capacity)
|
||||||
|
}
|
||||||
|
if len(got.Attributes) < 3 {
|
||||||
|
t.Fatalf("attributes len=%d, want >= 3", len(got.Attributes))
|
||||||
|
}
|
||||||
|
}
|
||||||
14
agent/emmc_stub.go
Normal file
14
agent/emmc_stub.go
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
//go:build !linux
|
||||||
|
|
||||||
|
package agent
|
||||||
|
|
||||||
|
// Non-Linux builds: eMMC health via sysfs is not available.
|
||||||
|
|
||||||
|
func scanEmmcDevices() []*DeviceInfo {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sm *SmartManager) collectEmmcHealth(deviceInfo *DeviceInfo) (bool, error) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
87
agent/fingerprint.go
Normal file
87
agent/fingerprint.go
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
package agent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"errors"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/shirou/gopsutil/v4/cpu"
|
||||||
|
"github.com/shirou/gopsutil/v4/host"
|
||||||
|
)
|
||||||
|
|
||||||
|
const fingerprintFileName = "fingerprint"
|
||||||
|
|
||||||
|
// knownBadUUID is a commonly known "product_uuid" that is not unique across systems.
|
||||||
|
const knownBadUUID = "03000200-0400-0500-0006-000700080009"
|
||||||
|
|
||||||
|
// GetFingerprint returns the agent fingerprint. It first tries to read a saved
|
||||||
|
// fingerprint from the data directory. If not found (or dataDir is empty), it
|
||||||
|
// generates one from system properties. The hostname and cpuModel parameters are
|
||||||
|
// used as fallback material if host.HostID() fails. If either is empty, they
|
||||||
|
// are fetched from the system automatically.
|
||||||
|
//
|
||||||
|
// If a new fingerprint is generated and a dataDir is provided, it is saved.
|
||||||
|
func GetFingerprint(dataDir, hostname, cpuModel string) string {
|
||||||
|
if dataDir != "" {
|
||||||
|
if fp, err := readFingerprint(dataDir); err == nil {
|
||||||
|
return fp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fp := generateFingerprint(hostname, cpuModel)
|
||||||
|
if dataDir != "" {
|
||||||
|
_ = SaveFingerprint(dataDir, fp)
|
||||||
|
}
|
||||||
|
return fp
|
||||||
|
}
|
||||||
|
|
||||||
|
// generateFingerprint creates a fingerprint from system properties.
|
||||||
|
// It tries host.HostID() first, falling back to hostname + cpuModel.
|
||||||
|
// If hostname or cpuModel are empty, they are fetched from the system.
|
||||||
|
func generateFingerprint(hostname, cpuModel string) string {
|
||||||
|
fingerprint, err := host.HostID()
|
||||||
|
if err != nil || fingerprint == "" || fingerprint == knownBadUUID {
|
||||||
|
if hostname == "" {
|
||||||
|
hostname, _ = os.Hostname()
|
||||||
|
}
|
||||||
|
if cpuModel == "" {
|
||||||
|
if info, err := cpu.Info(); err == nil && len(info) > 0 {
|
||||||
|
cpuModel = info[0].ModelName
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fingerprint = hostname + cpuModel
|
||||||
|
}
|
||||||
|
|
||||||
|
sum := sha256.Sum256([]byte(fingerprint))
|
||||||
|
return hex.EncodeToString(sum[:24])
|
||||||
|
}
|
||||||
|
|
||||||
|
// readFingerprint reads the saved fingerprint from the data directory.
|
||||||
|
func readFingerprint(dataDir string) (string, error) {
|
||||||
|
fp, err := os.ReadFile(filepath.Join(dataDir, fingerprintFileName))
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
s := strings.TrimSpace(string(fp))
|
||||||
|
if s == "" {
|
||||||
|
return "", errors.New("fingerprint file is empty")
|
||||||
|
}
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveFingerprint writes the fingerprint to the data directory.
|
||||||
|
func SaveFingerprint(dataDir, fingerprint string) error {
|
||||||
|
return os.WriteFile(filepath.Join(dataDir, fingerprintFileName), []byte(fingerprint), 0o644)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteFingerprint removes the saved fingerprint file from the data directory.
|
||||||
|
// Returns nil if the file does not exist (idempotent).
|
||||||
|
func DeleteFingerprint(dataDir string) error {
|
||||||
|
err := os.Remove(filepath.Join(dataDir, fingerprintFileName))
|
||||||
|
if errors.Is(err, os.ErrNotExist) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
102
agent/fingerprint_test.go
Normal file
102
agent/fingerprint_test.go
Normal file
@@ -0,0 +1,102 @@
|
|||||||
|
//go:build testing
|
||||||
|
|
||||||
|
package agent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGetFingerprint(t *testing.T) {
|
||||||
|
t.Run("reads existing fingerprint from file", func(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
expected := "abc123def456"
|
||||||
|
err := os.WriteFile(filepath.Join(dir, fingerprintFileName), []byte(expected), 0644)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
fp := GetFingerprint(dir, "", "")
|
||||||
|
assert.Equal(t, expected, fp)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("trims whitespace from file", func(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
err := os.WriteFile(filepath.Join(dir, fingerprintFileName), []byte(" abc123 \n"), 0644)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
fp := GetFingerprint(dir, "", "")
|
||||||
|
assert.Equal(t, "abc123", fp)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("generates fingerprint when file does not exist", func(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
fp := GetFingerprint(dir, "", "")
|
||||||
|
assert.NotEmpty(t, fp)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("generates fingerprint when dataDir is empty", func(t *testing.T) {
|
||||||
|
fp := GetFingerprint("", "", "")
|
||||||
|
assert.NotEmpty(t, fp)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("generates consistent fingerprint for same inputs", func(t *testing.T) {
|
||||||
|
fp1 := GetFingerprint("", "myhost", "mycpu")
|
||||||
|
fp2 := GetFingerprint("", "myhost", "mycpu")
|
||||||
|
assert.Equal(t, fp1, fp2)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("prefers saved fingerprint over generated", func(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
require.NoError(t, SaveFingerprint(dir, "saved-fp"))
|
||||||
|
|
||||||
|
fp := GetFingerprint(dir, "anyhost", "anycpu")
|
||||||
|
assert.Equal(t, "saved-fp", fp)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSaveFingerprint(t *testing.T) {
|
||||||
|
t.Run("saves fingerprint to file", func(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
err := SaveFingerprint(dir, "abc123")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
content, err := os.ReadFile(filepath.Join(dir, fingerprintFileName))
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, "abc123", string(content))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("overwrites existing fingerprint", func(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
require.NoError(t, SaveFingerprint(dir, "old"))
|
||||||
|
require.NoError(t, SaveFingerprint(dir, "new"))
|
||||||
|
|
||||||
|
content, err := os.ReadFile(filepath.Join(dir, fingerprintFileName))
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, "new", string(content))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDeleteFingerprint(t *testing.T) {
|
||||||
|
t.Run("deletes existing fingerprint", func(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
fp := filepath.Join(dir, fingerprintFileName)
|
||||||
|
err := os.WriteFile(fp, []byte("abc123"), 0644)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = DeleteFingerprint(dir)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Verify file is gone
|
||||||
|
_, err = os.Stat(fp)
|
||||||
|
assert.True(t, os.IsNotExist(err))
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("no error when file does not exist", func(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
err := DeleteFingerprint(dir)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
423
agent/gpu.go
423
agent/gpu.go
@@ -5,17 +5,18 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"maps"
|
"maps"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
"github.com/henrygd/beszel/internal/entities/system"
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
|
||||||
"golang.org/x/exp/slog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -23,11 +24,11 @@ const (
|
|||||||
nvidiaSmiCmd string = "nvidia-smi"
|
nvidiaSmiCmd string = "nvidia-smi"
|
||||||
rocmSmiCmd string = "rocm-smi"
|
rocmSmiCmd string = "rocm-smi"
|
||||||
tegraStatsCmd string = "tegrastats"
|
tegraStatsCmd string = "tegrastats"
|
||||||
|
nvtopCmd string = "nvtop"
|
||||||
|
powermetricsCmd string = "powermetrics"
|
||||||
|
macmonCmd string = "macmon"
|
||||||
|
noGPUFoundMsg string = "no GPU found - see https://beszel.dev/guide/gpu"
|
||||||
|
|
||||||
// Polling intervals
|
|
||||||
nvidiaSmiInterval string = "4" // in seconds
|
|
||||||
tegraStatsInterval string = "3700" // in milliseconds
|
|
||||||
rocmSmiInterval time.Duration = 4300 * time.Millisecond
|
|
||||||
// Command retry and timeout constants
|
// Command retry and timeout constants
|
||||||
retryWaitTime time.Duration = 5 * time.Second
|
retryWaitTime time.Duration = 5 * time.Second
|
||||||
maxFailureRetries int = 5
|
maxFailureRetries int = 5
|
||||||
@@ -40,10 +41,6 @@ const (
|
|||||||
// GPUManager manages data collection for GPUs (either Nvidia or AMD)
|
// GPUManager manages data collection for GPUs (either Nvidia or AMD)
|
||||||
type GPUManager struct {
|
type GPUManager struct {
|
||||||
sync.Mutex
|
sync.Mutex
|
||||||
nvidiaSmi bool
|
|
||||||
rocmSmi bool
|
|
||||||
tegrastats bool
|
|
||||||
intelGpuStats bool
|
|
||||||
GpuDataMap map[string]*system.GPUData
|
GpuDataMap map[string]*system.GPUData
|
||||||
// lastAvgData stores the last calculated averages for each GPU
|
// lastAvgData stores the last calculated averages for each GPU
|
||||||
// Used when a collection happens before new data arrives (Count == 0)
|
// Used when a collection happens before new data arrives (Count == 0)
|
||||||
@@ -85,6 +82,58 @@ type gpuCollector struct {
|
|||||||
|
|
||||||
var errNoValidData = fmt.Errorf("no valid GPU data found") // Error for missing data
|
var errNoValidData = fmt.Errorf("no valid GPU data found") // Error for missing data
|
||||||
|
|
||||||
|
// collectorSource identifies a selectable GPU collector in GPU_COLLECTOR.
|
||||||
|
type collectorSource string
|
||||||
|
|
||||||
|
const (
|
||||||
|
collectorSourceNVTop collectorSource = collectorSource(nvtopCmd)
|
||||||
|
collectorSourceNVML collectorSource = "nvml"
|
||||||
|
collectorSourceNvidiaSMI collectorSource = collectorSource(nvidiaSmiCmd)
|
||||||
|
collectorSourceIntelGpuTop collectorSource = collectorSource(intelGpuStatsCmd)
|
||||||
|
collectorSourceAmdSysfs collectorSource = "amd_sysfs"
|
||||||
|
collectorSourceRocmSMI collectorSource = collectorSource(rocmSmiCmd)
|
||||||
|
collectorSourceMacmon collectorSource = collectorSource(macmonCmd)
|
||||||
|
collectorSourcePowermetrics collectorSource = collectorSource(powermetricsCmd)
|
||||||
|
collectorGroupNvidia string = "nvidia"
|
||||||
|
collectorGroupIntel string = "intel"
|
||||||
|
collectorGroupAmd string = "amd"
|
||||||
|
collectorGroupApple string = "apple"
|
||||||
|
)
|
||||||
|
|
||||||
|
func isValidCollectorSource(source collectorSource) bool {
|
||||||
|
switch source {
|
||||||
|
case collectorSourceNVTop,
|
||||||
|
collectorSourceNVML,
|
||||||
|
collectorSourceNvidiaSMI,
|
||||||
|
collectorSourceIntelGpuTop,
|
||||||
|
collectorSourceAmdSysfs,
|
||||||
|
collectorSourceRocmSMI,
|
||||||
|
collectorSourceMacmon,
|
||||||
|
collectorSourcePowermetrics:
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// gpuCapabilities describes detected GPU tooling and sysfs support on the host.
|
||||||
|
type gpuCapabilities struct {
|
||||||
|
hasNvidiaSmi bool
|
||||||
|
hasRocmSmi bool
|
||||||
|
hasAmdSysfs bool
|
||||||
|
hasTegrastats bool
|
||||||
|
hasIntelGpuTop bool
|
||||||
|
hasNvtop bool
|
||||||
|
hasMacmon bool
|
||||||
|
hasPowermetrics bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type collectorDefinition struct {
|
||||||
|
group string
|
||||||
|
available bool
|
||||||
|
start func(onFailure func()) bool
|
||||||
|
deprecationWarning string
|
||||||
|
}
|
||||||
|
|
||||||
// starts and manages the ongoing collection of GPU data for the specified GPU management utility
|
// starts and manages the ongoing collection of GPU data for the specified GPU management utility
|
||||||
func (c *gpuCollector) start() {
|
func (c *gpuCollector) start() {
|
||||||
for {
|
for {
|
||||||
@@ -136,10 +185,10 @@ func (gm *GPUManager) getJetsonParser() func(output []byte) bool {
|
|||||||
// use closure to avoid recompiling the regex
|
// use closure to avoid recompiling the regex
|
||||||
ramPattern := regexp.MustCompile(`RAM (\d+)/(\d+)MB`)
|
ramPattern := regexp.MustCompile(`RAM (\d+)/(\d+)MB`)
|
||||||
gr3dPattern := regexp.MustCompile(`GR3D_FREQ (\d+)%`)
|
gr3dPattern := regexp.MustCompile(`GR3D_FREQ (\d+)%`)
|
||||||
tempPattern := regexp.MustCompile(`tj@(\d+\.?\d*)C`)
|
tempPattern := regexp.MustCompile(`(?:tj|GPU)@(\d+\.?\d*)C`)
|
||||||
// Orin Nano / NX do not have GPU specific power monitor
|
// Orin Nano / NX do not have GPU specific power monitor
|
||||||
// TODO: Maybe use VDD_IN for Nano / NX and add a total system power chart
|
// TODO: Maybe use VDD_IN for Nano / NX and add a total system power chart
|
||||||
powerPattern := regexp.MustCompile(`(GPU_SOC|CPU_GPU_CV) (\d+)mW`)
|
powerPattern := regexp.MustCompile(`(GPU_SOC|CPU_GPU_CV)\s+(\d+)mW|VDD_SYS_GPU\s+(\d+)/\d+`)
|
||||||
|
|
||||||
// jetson devices have only one gpu so we'll just initialize here
|
// jetson devices have only one gpu so we'll just initialize here
|
||||||
gpuData := &system.GPUData{Name: "GPU"}
|
gpuData := &system.GPUData{Name: "GPU"}
|
||||||
@@ -168,7 +217,13 @@ func (gm *GPUManager) getJetsonParser() func(output []byte) bool {
|
|||||||
// Parse power usage
|
// Parse power usage
|
||||||
powerMatches := powerPattern.FindSubmatch(output)
|
powerMatches := powerPattern.FindSubmatch(output)
|
||||||
if powerMatches != nil {
|
if powerMatches != nil {
|
||||||
power, _ := strconv.ParseFloat(string(powerMatches[2]), 64)
|
// powerMatches[2] is the "(GPU_SOC|CPU_GPU_CV) <N>mW" capture
|
||||||
|
// powerMatches[3] is the "VDD_SYS_GPU <N>/<N>" capture
|
||||||
|
powerStr := string(powerMatches[2])
|
||||||
|
if powerStr == "" {
|
||||||
|
powerStr = string(powerMatches[3])
|
||||||
|
}
|
||||||
|
power, _ := strconv.ParseFloat(powerStr, 64)
|
||||||
gpuData.Power += power / milliwattsInAWatt
|
gpuData.Power += power / milliwattsInAWatt
|
||||||
}
|
}
|
||||||
gpuData.Count++
|
gpuData.Count++
|
||||||
@@ -231,13 +286,14 @@ func (gm *GPUManager) parseAmdData(output []byte) bool {
|
|||||||
totalMemory, _ := strconv.ParseFloat(v.MemoryTotal, 64)
|
totalMemory, _ := strconv.ParseFloat(v.MemoryTotal, 64)
|
||||||
usage, _ := strconv.ParseFloat(v.Usage, 64)
|
usage, _ := strconv.ParseFloat(v.Usage, 64)
|
||||||
|
|
||||||
if _, ok := gm.GpuDataMap[v.ID]; !ok {
|
id := v.ID
|
||||||
gm.GpuDataMap[v.ID] = &system.GPUData{Name: v.Name}
|
if _, ok := gm.GpuDataMap[id]; !ok {
|
||||||
|
gm.GpuDataMap[id] = &system.GPUData{Name: v.Name}
|
||||||
}
|
}
|
||||||
gpu := gm.GpuDataMap[v.ID]
|
gpu := gm.GpuDataMap[id]
|
||||||
gpu.Temperature, _ = strconv.ParseFloat(v.Temperature, 64)
|
gpu.Temperature, _ = strconv.ParseFloat(v.Temperature, 64)
|
||||||
gpu.MemoryUsed = bytesToMegabytes(memoryUsage)
|
gpu.MemoryUsed = utils.BytesToMegabytes(memoryUsage)
|
||||||
gpu.MemoryTotal = bytesToMegabytes(totalMemory)
|
gpu.MemoryTotal = utils.BytesToMegabytes(totalMemory)
|
||||||
gpu.Usage += usage
|
gpu.Usage += usage
|
||||||
gpu.Power += power
|
gpu.Power += power
|
||||||
gpu.Count++
|
gpu.Count++
|
||||||
@@ -297,8 +353,13 @@ func (gm *GPUManager) calculateGPUAverage(id string, gpu *system.GPUData, cacheK
|
|||||||
currentCount := uint32(gpu.Count)
|
currentCount := uint32(gpu.Count)
|
||||||
deltaCount := gm.calculateDeltaCount(currentCount, lastSnapshot)
|
deltaCount := gm.calculateDeltaCount(currentCount, lastSnapshot)
|
||||||
|
|
||||||
// If no new data arrived, use last known average
|
// If no new data arrived
|
||||||
if deltaCount == 0 {
|
if deltaCount == 0 {
|
||||||
|
// If GPU appears suspended (instantaneous values are 0), return zero values
|
||||||
|
// Otherwise return last known average for temporary collection gaps
|
||||||
|
if gpu.Temperature == 0 && gpu.MemoryUsed == 0 {
|
||||||
|
return system.GPUData{Name: gpu.Name}
|
||||||
|
}
|
||||||
return gm.lastAvgData[id] // zero value if not found
|
return gm.lastAvgData[id] // zero value if not found
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -306,16 +367,16 @@ func (gm *GPUManager) calculateGPUAverage(id string, gpu *system.GPUData, cacheK
|
|||||||
gpuAvg := *gpu
|
gpuAvg := *gpu
|
||||||
deltaUsage, deltaPower, deltaPowerPkg := gm.calculateDeltas(gpu, lastSnapshot)
|
deltaUsage, deltaPower, deltaPowerPkg := gm.calculateDeltas(gpu, lastSnapshot)
|
||||||
|
|
||||||
gpuAvg.Power = twoDecimals(deltaPower / float64(deltaCount))
|
gpuAvg.Power = utils.TwoDecimals(deltaPower / float64(deltaCount))
|
||||||
|
|
||||||
if gpu.Engines != nil {
|
if gpu.Engines != nil {
|
||||||
// make fresh map for averaged engine metrics to avoid mutating
|
// make fresh map for averaged engine metrics to avoid mutating
|
||||||
// the accumulator map stored in gm.GpuDataMap
|
// the accumulator map stored in gm.GpuDataMap
|
||||||
gpuAvg.Engines = make(map[string]float64, len(gpu.Engines))
|
gpuAvg.Engines = make(map[string]float64, len(gpu.Engines))
|
||||||
gpuAvg.Usage = gm.calculateIntelGPUUsage(&gpuAvg, gpu, lastSnapshot, deltaCount)
|
gpuAvg.Usage = gm.calculateIntelGPUUsage(&gpuAvg, gpu, lastSnapshot, deltaCount)
|
||||||
gpuAvg.PowerPkg = twoDecimals(deltaPowerPkg / float64(deltaCount))
|
gpuAvg.PowerPkg = utils.TwoDecimals(deltaPowerPkg / float64(deltaCount))
|
||||||
} else {
|
} else {
|
||||||
gpuAvg.Usage = twoDecimals(deltaUsage / float64(deltaCount))
|
gpuAvg.Usage = utils.TwoDecimals(deltaUsage / float64(deltaCount))
|
||||||
}
|
}
|
||||||
|
|
||||||
gm.lastAvgData[id] = gpuAvg
|
gm.lastAvgData[id] = gpuAvg
|
||||||
@@ -350,17 +411,17 @@ func (gm *GPUManager) calculateIntelGPUUsage(gpuAvg, gpu *system.GPUData, lastSn
|
|||||||
} else {
|
} else {
|
||||||
deltaEngine = engine
|
deltaEngine = engine
|
||||||
}
|
}
|
||||||
gpuAvg.Engines[name] = twoDecimals(deltaEngine / float64(deltaCount))
|
gpuAvg.Engines[name] = utils.TwoDecimals(deltaEngine / float64(deltaCount))
|
||||||
maxEngineUsage = max(maxEngineUsage, deltaEngine/float64(deltaCount))
|
maxEngineUsage = max(maxEngineUsage, deltaEngine/float64(deltaCount))
|
||||||
}
|
}
|
||||||
return twoDecimals(maxEngineUsage)
|
return utils.TwoDecimals(maxEngineUsage)
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateInstantaneousValues updates values that should reflect current state, not averages
|
// updateInstantaneousValues updates values that should reflect current state, not averages
|
||||||
func (gm *GPUManager) updateInstantaneousValues(gpuAvg *system.GPUData, gpu *system.GPUData) {
|
func (gm *GPUManager) updateInstantaneousValues(gpuAvg *system.GPUData, gpu *system.GPUData) {
|
||||||
gpuAvg.Temperature = twoDecimals(gpu.Temperature)
|
gpuAvg.Temperature = utils.TwoDecimals(gpu.Temperature)
|
||||||
gpuAvg.MemoryUsed = twoDecimals(gpu.MemoryUsed)
|
gpuAvg.MemoryUsed = utils.TwoDecimals(gpu.MemoryUsed)
|
||||||
gpuAvg.MemoryTotal = twoDecimals(gpu.MemoryTotal)
|
gpuAvg.MemoryTotal = utils.TwoDecimals(gpu.MemoryTotal)
|
||||||
}
|
}
|
||||||
|
|
||||||
// storeSnapshot saves the current GPU state for this cache key
|
// storeSnapshot saves the current GPU state for this cache key
|
||||||
@@ -378,38 +439,43 @@ func (gm *GPUManager) storeSnapshot(id string, gpu *system.GPUData, cacheKey uin
|
|||||||
gm.lastSnapshots[cacheKey][id] = snapshot
|
gm.lastSnapshots[cacheKey][id] = snapshot
|
||||||
}
|
}
|
||||||
|
|
||||||
// detectGPUs checks for the presence of GPU management tools (nvidia-smi, rocm-smi, tegrastats)
|
// discoverGpuCapabilities checks for available GPU tooling and sysfs support.
|
||||||
// in the system path. It sets the corresponding flags in the GPUManager struct if any of these
|
// It only reports capability presence and does not apply policy decisions.
|
||||||
// tools are found. If none of the tools are found, it returns an error indicating that no GPU
|
func (gm *GPUManager) discoverGpuCapabilities() gpuCapabilities {
|
||||||
// management tools are available.
|
caps := gpuCapabilities{
|
||||||
func (gm *GPUManager) detectGPUs() error {
|
hasAmdSysfs: gm.hasAmdSysfs(),
|
||||||
|
}
|
||||||
if _, err := exec.LookPath(nvidiaSmiCmd); err == nil {
|
if _, err := exec.LookPath(nvidiaSmiCmd); err == nil {
|
||||||
gm.nvidiaSmi = true
|
caps.hasNvidiaSmi = true
|
||||||
}
|
}
|
||||||
if _, err := exec.LookPath(rocmSmiCmd); err == nil {
|
if _, err := exec.LookPath(rocmSmiCmd); err == nil {
|
||||||
gm.rocmSmi = true
|
caps.hasRocmSmi = true
|
||||||
}
|
}
|
||||||
if _, err := exec.LookPath(tegraStatsCmd); err == nil {
|
if _, err := exec.LookPath(tegraStatsCmd); err == nil {
|
||||||
gm.tegrastats = true
|
caps.hasTegrastats = true
|
||||||
gm.nvidiaSmi = false
|
|
||||||
}
|
}
|
||||||
if _, err := exec.LookPath(intelGpuStatsCmd); err == nil {
|
if _, err := exec.LookPath(intelGpuStatsCmd); err == nil {
|
||||||
gm.intelGpuStats = true
|
caps.hasIntelGpuTop = true
|
||||||
}
|
}
|
||||||
if gm.nvidiaSmi || gm.rocmSmi || gm.tegrastats || gm.intelGpuStats {
|
if _, err := exec.LookPath(nvtopCmd); err == nil {
|
||||||
return nil
|
caps.hasNvtop = true
|
||||||
}
|
}
|
||||||
return fmt.Errorf("no GPU found - install nvidia-smi, rocm-smi, tegrastats, or intel_gpu_top")
|
if runtime.GOOS == "darwin" {
|
||||||
|
if _, err := utils.LookPathHomebrew(macmonCmd); err == nil {
|
||||||
|
caps.hasMacmon = true
|
||||||
|
}
|
||||||
|
if _, err := exec.LookPath(powermetricsCmd); err == nil {
|
||||||
|
caps.hasPowermetrics = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return caps
|
||||||
}
|
}
|
||||||
|
|
||||||
// startCollector starts the appropriate GPU data collector based on the command
|
func hasAnyGpuCollector(caps gpuCapabilities) bool {
|
||||||
func (gm *GPUManager) startCollector(command string) {
|
return caps.hasNvidiaSmi || caps.hasRocmSmi || caps.hasAmdSysfs || caps.hasTegrastats || caps.hasIntelGpuTop || caps.hasNvtop || caps.hasMacmon || caps.hasPowermetrics
|
||||||
collector := gpuCollector{
|
|
||||||
name: command,
|
|
||||||
bufSize: 10 * 1024,
|
|
||||||
}
|
}
|
||||||
switch command {
|
|
||||||
case intelGpuStatsCmd:
|
func (gm *GPUManager) startIntelCollector() {
|
||||||
go func() {
|
go func() {
|
||||||
failures := 0
|
failures := 0
|
||||||
for {
|
for {
|
||||||
@@ -424,21 +490,39 @@ func (gm *GPUManager) startCollector(command string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
case nvidiaSmiCmd:
|
}
|
||||||
collector.cmdArgs = []string{
|
|
||||||
"-l", nvidiaSmiInterval,
|
func (gm *GPUManager) startNvidiaSmiCollector(intervalSeconds string) {
|
||||||
|
collector := gpuCollector{
|
||||||
|
name: nvidiaSmiCmd,
|
||||||
|
bufSize: 10 * 1024,
|
||||||
|
cmdArgs: []string{
|
||||||
|
"-l", intervalSeconds,
|
||||||
"--query-gpu=index,name,temperature.gpu,memory.used,memory.total,utilization.gpu,power.draw",
|
"--query-gpu=index,name,temperature.gpu,memory.used,memory.total,utilization.gpu,power.draw",
|
||||||
"--format=csv,noheader,nounits",
|
"--format=csv,noheader,nounits",
|
||||||
|
},
|
||||||
|
parse: gm.parseNvidiaData,
|
||||||
}
|
}
|
||||||
collector.parse = gm.parseNvidiaData
|
|
||||||
go collector.start()
|
go collector.start()
|
||||||
case tegraStatsCmd:
|
}
|
||||||
collector.cmdArgs = []string{"--interval", tegraStatsInterval}
|
|
||||||
collector.parse = gm.getJetsonParser()
|
func (gm *GPUManager) startTegraStatsCollector(intervalMilliseconds string) {
|
||||||
|
collector := gpuCollector{
|
||||||
|
name: tegraStatsCmd,
|
||||||
|
bufSize: 10 * 1024,
|
||||||
|
cmdArgs: []string{"--interval", intervalMilliseconds},
|
||||||
|
parse: gm.getJetsonParser(),
|
||||||
|
}
|
||||||
go collector.start()
|
go collector.start()
|
||||||
case rocmSmiCmd:
|
}
|
||||||
collector.cmdArgs = []string{"--showid", "--showtemp", "--showuse", "--showpower", "--showproductname", "--showmeminfo", "vram", "--json"}
|
|
||||||
collector.parse = gm.parseAmdData
|
func (gm *GPUManager) startRocmSmiCollector(pollInterval time.Duration) {
|
||||||
|
collector := gpuCollector{
|
||||||
|
name: rocmSmiCmd,
|
||||||
|
bufSize: 10 * 1024,
|
||||||
|
cmdArgs: []string{"--showid", "--showtemp", "--showuse", "--showpower", "--showproductname", "--showmeminfo", "vram", "--json"},
|
||||||
|
parse: gm.parseAmdData,
|
||||||
|
}
|
||||||
go func() {
|
go func() {
|
||||||
failures := 0
|
failures := 0
|
||||||
for {
|
for {
|
||||||
@@ -447,36 +531,233 @@ func (gm *GPUManager) startCollector(command string) {
|
|||||||
if failures > maxFailureRetries {
|
if failures > maxFailureRetries {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
slog.Warn("Error collecting AMD GPU data", "err", err)
|
slog.Warn("Error collecting AMD GPU data via rocm-smi", "err", err)
|
||||||
}
|
}
|
||||||
time.Sleep(rocmSmiInterval)
|
time.Sleep(pollInterval)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (gm *GPUManager) collectorDefinitions(caps gpuCapabilities) map[collectorSource]collectorDefinition {
|
||||||
|
return map[collectorSource]collectorDefinition{
|
||||||
|
collectorSourceNVML: {
|
||||||
|
group: collectorGroupNvidia,
|
||||||
|
available: true,
|
||||||
|
start: func(_ func()) bool {
|
||||||
|
return gm.startNvmlCollector()
|
||||||
|
},
|
||||||
|
},
|
||||||
|
collectorSourceNvidiaSMI: {
|
||||||
|
group: collectorGroupNvidia,
|
||||||
|
available: caps.hasNvidiaSmi,
|
||||||
|
start: func(_ func()) bool {
|
||||||
|
gm.startNvidiaSmiCollector("4") // seconds
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
},
|
||||||
|
collectorSourceIntelGpuTop: {
|
||||||
|
group: collectorGroupIntel,
|
||||||
|
available: caps.hasIntelGpuTop,
|
||||||
|
start: func(_ func()) bool {
|
||||||
|
gm.startIntelCollector()
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
},
|
||||||
|
collectorSourceAmdSysfs: {
|
||||||
|
group: collectorGroupAmd,
|
||||||
|
available: caps.hasAmdSysfs,
|
||||||
|
start: func(_ func()) bool {
|
||||||
|
return gm.startAmdSysfsCollector()
|
||||||
|
},
|
||||||
|
},
|
||||||
|
collectorSourceRocmSMI: {
|
||||||
|
group: collectorGroupAmd,
|
||||||
|
available: caps.hasRocmSmi,
|
||||||
|
deprecationWarning: "rocm-smi is deprecated and may be removed in a future release",
|
||||||
|
start: func(_ func()) bool {
|
||||||
|
gm.startRocmSmiCollector(4300 * time.Millisecond)
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
},
|
||||||
|
collectorSourceNVTop: {
|
||||||
|
available: caps.hasNvtop,
|
||||||
|
start: func(onFailure func()) bool {
|
||||||
|
gm.startNvtopCollector("30", onFailure) // tens of milliseconds
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
},
|
||||||
|
collectorSourceMacmon: {
|
||||||
|
group: collectorGroupApple,
|
||||||
|
available: caps.hasMacmon,
|
||||||
|
start: func(_ func()) bool {
|
||||||
|
gm.startMacmonCollector()
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
},
|
||||||
|
collectorSourcePowermetrics: {
|
||||||
|
group: collectorGroupApple,
|
||||||
|
available: caps.hasPowermetrics,
|
||||||
|
start: func(_ func()) bool {
|
||||||
|
gm.startPowermetricsCollector()
|
||||||
|
return true
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseCollectorPriority parses GPU_COLLECTOR and returns valid ordered entries.
|
||||||
|
func parseCollectorPriority(value string) []collectorSource {
|
||||||
|
parts := strings.Split(value, ",")
|
||||||
|
priorities := make([]collectorSource, 0, len(parts))
|
||||||
|
for _, raw := range parts {
|
||||||
|
name := collectorSource(strings.TrimSpace(strings.ToLower(raw)))
|
||||||
|
if !isValidCollectorSource(name) {
|
||||||
|
if name != "" {
|
||||||
|
slog.Warn("Ignoring unknown GPU collector", "collector", name)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
priorities = append(priorities, name)
|
||||||
|
}
|
||||||
|
return priorities
|
||||||
|
}
|
||||||
|
|
||||||
|
// startNvmlCollector initializes NVML and starts its polling loop.
|
||||||
|
func (gm *GPUManager) startNvmlCollector() bool {
|
||||||
|
collector := &nvmlCollector{gm: gm}
|
||||||
|
if err := collector.init(); err != nil {
|
||||||
|
slog.Warn("Failed to initialize NVML", "err", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
go collector.start()
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// startAmdSysfsCollector starts AMD GPU collection via sysfs.
|
||||||
|
func (gm *GPUManager) startAmdSysfsCollector() bool {
|
||||||
|
go func() {
|
||||||
|
if err := gm.collectAmdStats(); err != nil {
|
||||||
|
slog.Warn("Error collecting AMD GPU data via sysfs", "err", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// startCollectorsByPriority starts collectors in order with one source per vendor group.
|
||||||
|
func (gm *GPUManager) startCollectorsByPriority(priorities []collectorSource, caps gpuCapabilities) int {
|
||||||
|
definitions := gm.collectorDefinitions(caps)
|
||||||
|
selectedGroups := make(map[string]bool, 3)
|
||||||
|
started := 0
|
||||||
|
for i, source := range priorities {
|
||||||
|
definition, ok := definitions[source]
|
||||||
|
if !ok || !definition.available {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// nvtop is not a vendor-specific collector, so should only be used if no other collectors are selected or it is first in GPU_COLLECTOR.
|
||||||
|
if source == collectorSourceNVTop {
|
||||||
|
if len(selectedGroups) > 0 {
|
||||||
|
slog.Warn("Skipping nvtop because other collectors are selected")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// if nvtop fails, fall back to remaining collectors.
|
||||||
|
remaining := append([]collectorSource(nil), priorities[i+1:]...)
|
||||||
|
if definition.start(func() {
|
||||||
|
gm.startCollectorsByPriority(remaining, caps)
|
||||||
|
}) {
|
||||||
|
started++
|
||||||
|
return started
|
||||||
|
}
|
||||||
|
}
|
||||||
|
group := definition.group
|
||||||
|
if group == "" || selectedGroups[group] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if definition.deprecationWarning != "" {
|
||||||
|
slog.Warn(definition.deprecationWarning)
|
||||||
|
}
|
||||||
|
if definition.start(nil) {
|
||||||
|
selectedGroups[group] = true
|
||||||
|
started++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return started
|
||||||
|
}
|
||||||
|
|
||||||
|
// resolveLegacyCollectorPriority builds the default collector order when GPU_COLLECTOR is unset.
|
||||||
|
func (gm *GPUManager) resolveLegacyCollectorPriority(caps gpuCapabilities) []collectorSource {
|
||||||
|
priorities := make([]collectorSource, 0, 4)
|
||||||
|
|
||||||
|
if caps.hasNvidiaSmi && !caps.hasTegrastats {
|
||||||
|
if nvml, _ := utils.GetEnv("NVML"); nvml == "true" {
|
||||||
|
priorities = append(priorities, collectorSourceNVML, collectorSourceNvidiaSMI)
|
||||||
|
} else {
|
||||||
|
priorities = append(priorities, collectorSourceNvidiaSMI)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if caps.hasRocmSmi {
|
||||||
|
if val, _ := utils.GetEnv("AMD_SYSFS"); val == "true" {
|
||||||
|
priorities = append(priorities, collectorSourceAmdSysfs)
|
||||||
|
} else {
|
||||||
|
priorities = append(priorities, collectorSourceRocmSMI)
|
||||||
|
}
|
||||||
|
} else if caps.hasAmdSysfs {
|
||||||
|
priorities = append(priorities, collectorSourceAmdSysfs)
|
||||||
|
}
|
||||||
|
|
||||||
|
if caps.hasIntelGpuTop {
|
||||||
|
priorities = append(priorities, collectorSourceIntelGpuTop)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apple collectors are currently opt-in only for testing.
|
||||||
|
// Enable them with GPU_COLLECTOR=macmon or GPU_COLLECTOR=powermetrics.
|
||||||
|
// TODO: uncomment below when Apple collectors are confirmed to be working.
|
||||||
|
//
|
||||||
|
// Prefer macmon on macOS (no sudo). Fall back to powermetrics if present.
|
||||||
|
// if caps.hasMacmon {
|
||||||
|
// priorities = append(priorities, collectorSourceMacmon)
|
||||||
|
// } else if caps.hasPowermetrics {
|
||||||
|
// priorities = append(priorities, collectorSourcePowermetrics)
|
||||||
|
// }
|
||||||
|
|
||||||
|
// Keep nvtop as a last resort only when no vendor collector exists.
|
||||||
|
if len(priorities) == 0 && caps.hasNvtop {
|
||||||
|
priorities = append(priorities, collectorSourceNVTop)
|
||||||
|
}
|
||||||
|
return priorities
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewGPUManager creates and initializes a new GPUManager
|
// NewGPUManager creates and initializes a new GPUManager
|
||||||
func NewGPUManager() (*GPUManager, error) {
|
func NewGPUManager() (*GPUManager, error) {
|
||||||
if skipGPU, _ := GetEnv("SKIP_GPU"); skipGPU == "true" {
|
if skipGPU, _ := utils.GetEnv("SKIP_GPU"); skipGPU == "true" {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
var gm GPUManager
|
var gm GPUManager
|
||||||
if err := gm.detectGPUs(); err != nil {
|
caps := gm.discoverGpuCapabilities()
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
gm.GpuDataMap = make(map[string]*system.GPUData)
|
gm.GpuDataMap = make(map[string]*system.GPUData)
|
||||||
|
|
||||||
if gm.nvidiaSmi {
|
// Jetson devices should always use tegrastats (ignore GPU_COLLECTOR).
|
||||||
gm.startCollector(nvidiaSmiCmd)
|
if caps.hasTegrastats {
|
||||||
|
gm.startTegraStatsCollector("3700")
|
||||||
|
return &gm, nil
|
||||||
}
|
}
|
||||||
if gm.rocmSmi {
|
|
||||||
gm.startCollector(rocmSmiCmd)
|
// Respect explicit collector selection before capability auto-detection.
|
||||||
|
if collectorConfig, ok := utils.GetEnv("GPU_COLLECTOR"); ok && strings.TrimSpace(collectorConfig) != "" {
|
||||||
|
priorities := parseCollectorPriority(collectorConfig)
|
||||||
|
if gm.startCollectorsByPriority(priorities, caps) == 0 {
|
||||||
|
return nil, fmt.Errorf("no configured GPU collectors are available")
|
||||||
}
|
}
|
||||||
if gm.tegrastats {
|
return &gm, nil
|
||||||
gm.startCollector(tegraStatsCmd)
|
|
||||||
}
|
}
|
||||||
if gm.intelGpuStats {
|
|
||||||
gm.startCollector(intelGpuStatsCmd)
|
if !hasAnyGpuCollector(caps) {
|
||||||
|
return nil, fmt.Errorf(noGPUFoundMsg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// auto-detect and start collectors when GPU_COLLECTOR is unset.
|
||||||
|
if gm.startCollectorsByPriority(gm.resolveLegacyCollectorPriority(caps), caps) == 0 {
|
||||||
|
return nil, fmt.Errorf(noGPUFoundMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &gm, nil
|
return &gm, nil
|
||||||
|
|||||||
302
agent/gpu_amd_linux.go
Normal file
302
agent/gpu_amd_linux.go
Normal file
@@ -0,0 +1,302 @@
|
|||||||
|
//go:build linux
|
||||||
|
|
||||||
|
package agent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
var amdgpuNameCache = struct {
|
||||||
|
sync.RWMutex
|
||||||
|
hits map[string]string
|
||||||
|
misses map[string]struct{}
|
||||||
|
}{
|
||||||
|
hits: make(map[string]string),
|
||||||
|
misses: make(map[string]struct{}),
|
||||||
|
}
|
||||||
|
|
||||||
|
// hasAmdSysfs returns true if any AMD GPU sysfs nodes are found
|
||||||
|
func (gm *GPUManager) hasAmdSysfs() bool {
|
||||||
|
cards, err := filepath.Glob("/sys/class/drm/card*/device/vendor")
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, vendorPath := range cards {
|
||||||
|
vendor, err := utils.ReadStringFileLimited(vendorPath, 64)
|
||||||
|
if err == nil && vendor == "0x1002" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// collectAmdStats collects AMD GPU metrics directly from sysfs to avoid the overhead of rocm-smi
|
||||||
|
func (gm *GPUManager) collectAmdStats() error {
|
||||||
|
sysfsPollInterval := 3000 * time.Millisecond
|
||||||
|
cards, err := filepath.Glob("/sys/class/drm/card*")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var amdGpuPaths []string
|
||||||
|
for _, card := range cards {
|
||||||
|
// Ignore symbolic links and non-main card directories
|
||||||
|
if strings.Contains(filepath.Base(card), "-") || !isAmdGpu(card) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
amdGpuPaths = append(amdGpuPaths, card)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(amdGpuPaths) == 0 {
|
||||||
|
return errNoValidData
|
||||||
|
}
|
||||||
|
|
||||||
|
slog.Debug("Using sysfs for AMD GPU data collection")
|
||||||
|
|
||||||
|
failures := 0
|
||||||
|
for {
|
||||||
|
hasData := false
|
||||||
|
for _, cardPath := range amdGpuPaths {
|
||||||
|
if gm.updateAmdGpuData(cardPath) {
|
||||||
|
hasData = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !hasData {
|
||||||
|
failures++
|
||||||
|
if failures > maxFailureRetries {
|
||||||
|
return errNoValidData
|
||||||
|
}
|
||||||
|
slog.Warn("No AMD GPU data from sysfs", "failures", failures)
|
||||||
|
time.Sleep(retryWaitTime)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
failures = 0
|
||||||
|
time.Sleep(sysfsPollInterval)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// isAmdGpu checks whether a DRM card path belongs to AMD vendor ID 0x1002.
|
||||||
|
func isAmdGpu(cardPath string) bool {
|
||||||
|
vendor, err := utils.ReadStringFileLimited(filepath.Join(cardPath, "device/vendor"), 64)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return vendor == "0x1002"
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateAmdGpuData reads GPU metrics from sysfs and updates the GPU data map.
|
||||||
|
// Returns true if at least some data was successfully read.
|
||||||
|
func (gm *GPUManager) updateAmdGpuData(cardPath string) bool {
|
||||||
|
devicePath := filepath.Join(cardPath, "device")
|
||||||
|
id := filepath.Base(cardPath)
|
||||||
|
|
||||||
|
// Read all sysfs values first (no lock needed - these can be slow)
|
||||||
|
usage, usageErr := readSysfsFloat(filepath.Join(devicePath, "gpu_busy_percent"))
|
||||||
|
memUsed, memUsedErr := readSysfsFloat(filepath.Join(devicePath, "mem_info_vram_used"))
|
||||||
|
memTotal, _ := readSysfsFloat(filepath.Join(devicePath, "mem_info_vram_total"))
|
||||||
|
// if gtt is present, add it to the memory used and total (https://github.com/henrygd/beszel/issues/1569#issuecomment-3837640484)
|
||||||
|
if gttUsed, err := readSysfsFloat(filepath.Join(devicePath, "mem_info_gtt_used")); err == nil && gttUsed > 0 {
|
||||||
|
if gttTotal, err := readSysfsFloat(filepath.Join(devicePath, "mem_info_gtt_total")); err == nil {
|
||||||
|
memUsed += gttUsed
|
||||||
|
memTotal += gttTotal
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var temp, power float64
|
||||||
|
hwmons, _ := filepath.Glob(filepath.Join(devicePath, "hwmon/hwmon*"))
|
||||||
|
for _, hwmonDir := range hwmons {
|
||||||
|
if t, err := readSysfsFloat(filepath.Join(hwmonDir, "temp1_input")); err == nil {
|
||||||
|
temp = t / 1000.0
|
||||||
|
}
|
||||||
|
if p, err := readSysfsFloat(filepath.Join(hwmonDir, "power1_average")); err == nil {
|
||||||
|
power += p / 1000000.0
|
||||||
|
} else if p, err := readSysfsFloat(filepath.Join(hwmonDir, "power1_input")); err == nil {
|
||||||
|
power += p / 1000000.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if we got any meaningful data
|
||||||
|
if usageErr != nil && memUsedErr != nil && temp == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Single lock to update all values atomically
|
||||||
|
gm.Lock()
|
||||||
|
defer gm.Unlock()
|
||||||
|
|
||||||
|
gpu, ok := gm.GpuDataMap[id]
|
||||||
|
if !ok {
|
||||||
|
gpu = &system.GPUData{Name: getAmdGpuName(devicePath)}
|
||||||
|
gm.GpuDataMap[id] = gpu
|
||||||
|
}
|
||||||
|
|
||||||
|
if usageErr == nil {
|
||||||
|
gpu.Usage += usage
|
||||||
|
}
|
||||||
|
gpu.MemoryUsed = utils.BytesToMegabytes(memUsed)
|
||||||
|
gpu.MemoryTotal = utils.BytesToMegabytes(memTotal)
|
||||||
|
gpu.Temperature = temp
|
||||||
|
gpu.Power += power
|
||||||
|
gpu.Count++
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// readSysfsFloat reads and parses a numeric value from a sysfs file.
|
||||||
|
func readSysfsFloat(path string) (float64, error) {
|
||||||
|
val, err := utils.ReadStringFileLimited(path, 64)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return strconv.ParseFloat(val, 64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// normalizeHexID normalizes hex IDs by trimming spaces, lowercasing, and dropping 0x.
|
||||||
|
func normalizeHexID(id string) string {
|
||||||
|
return strings.TrimPrefix(strings.ToLower(strings.TrimSpace(id)), "0x")
|
||||||
|
}
|
||||||
|
|
||||||
|
// cacheKeyForAmdgpu builds the cache key for a device and optional revision.
|
||||||
|
func cacheKeyForAmdgpu(deviceID, revisionID string) string {
|
||||||
|
if revisionID != "" {
|
||||||
|
return deviceID + ":" + revisionID
|
||||||
|
}
|
||||||
|
return deviceID
|
||||||
|
}
|
||||||
|
|
||||||
|
// lookupAmdgpuNameInFile resolves an AMDGPU name from amdgpu.ids by device/revision.
|
||||||
|
func lookupAmdgpuNameInFile(deviceID, revisionID, filePath string) (name string, exact bool, found bool) {
|
||||||
|
file, err := os.Open(filePath)
|
||||||
|
if err != nil {
|
||||||
|
return "", false, false
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
var byDevice string
|
||||||
|
scanner := bufio.NewScanner(file)
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := strings.TrimSpace(scanner.Text())
|
||||||
|
if line == "" || strings.HasPrefix(line, "#") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
parts := strings.SplitN(line, ",", 3)
|
||||||
|
if len(parts) != 3 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
dev := normalizeHexID(parts[0])
|
||||||
|
rev := normalizeHexID(parts[1])
|
||||||
|
productName := strings.TrimSpace(parts[2])
|
||||||
|
if dev == "" || productName == "" || dev != deviceID {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if byDevice == "" {
|
||||||
|
byDevice = productName
|
||||||
|
}
|
||||||
|
if revisionID != "" && rev == revisionID {
|
||||||
|
return productName, true, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if byDevice != "" {
|
||||||
|
return byDevice, false, true
|
||||||
|
}
|
||||||
|
return "", false, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// getCachedAmdgpuName returns cached hit/miss status for the given device/revision.
|
||||||
|
func getCachedAmdgpuName(deviceID, revisionID string) (name string, found bool, done bool) {
|
||||||
|
// Build the list of cache keys to check. We always look up the exact device+revision key.
|
||||||
|
// When revisionID is set, we also look up deviceID alone, since the cache may store a
|
||||||
|
// device-only fallback when we couldn't resolve the exact revision.
|
||||||
|
keys := []string{cacheKeyForAmdgpu(deviceID, revisionID)}
|
||||||
|
if revisionID != "" {
|
||||||
|
keys = append(keys, deviceID)
|
||||||
|
}
|
||||||
|
|
||||||
|
knownMisses := 0
|
||||||
|
amdgpuNameCache.RLock()
|
||||||
|
defer amdgpuNameCache.RUnlock()
|
||||||
|
for _, key := range keys {
|
||||||
|
if name, ok := amdgpuNameCache.hits[key]; ok {
|
||||||
|
return name, true, true
|
||||||
|
}
|
||||||
|
if _, ok := amdgpuNameCache.misses[key]; ok {
|
||||||
|
knownMisses++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// done=true means "don't bother doing slow lookup": we either found a name (above) or
|
||||||
|
// every key we checked was already a known miss, so we've tried before and failed.
|
||||||
|
return "", false, knownMisses == len(keys)
|
||||||
|
}
|
||||||
|
|
||||||
|
// normalizeAmdgpuName trims standard suffixes from AMDGPU product names.
|
||||||
|
func normalizeAmdgpuName(name string) string {
|
||||||
|
for _, suffix := range []string{" Graphics", " Series"} {
|
||||||
|
name = strings.TrimSuffix(name, suffix)
|
||||||
|
}
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
|
||||||
|
// cacheAmdgpuName stores a resolved AMDGPU name in the lookup cache.
|
||||||
|
func cacheAmdgpuName(deviceID, revisionID, name string, exact bool) {
|
||||||
|
name = normalizeAmdgpuName(name)
|
||||||
|
amdgpuNameCache.Lock()
|
||||||
|
defer amdgpuNameCache.Unlock()
|
||||||
|
if exact && revisionID != "" {
|
||||||
|
amdgpuNameCache.hits[cacheKeyForAmdgpu(deviceID, revisionID)] = name
|
||||||
|
}
|
||||||
|
amdgpuNameCache.hits[deviceID] = name
|
||||||
|
}
|
||||||
|
|
||||||
|
// cacheMissingAmdgpuName records unresolved device/revision lookups.
|
||||||
|
func cacheMissingAmdgpuName(deviceID, revisionID string) {
|
||||||
|
amdgpuNameCache.Lock()
|
||||||
|
defer amdgpuNameCache.Unlock()
|
||||||
|
amdgpuNameCache.misses[deviceID] = struct{}{}
|
||||||
|
if revisionID != "" {
|
||||||
|
amdgpuNameCache.misses[cacheKeyForAmdgpu(deviceID, revisionID)] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getAmdGpuName attempts to get a descriptive GPU name.
|
||||||
|
// First tries product_name (rarely available), then looks up the PCI device ID.
|
||||||
|
// Falls back to showing the raw device ID if not found in the lookup table.
|
||||||
|
func getAmdGpuName(devicePath string) string {
|
||||||
|
// Try product_name first (works for some enterprise GPUs)
|
||||||
|
if prod, err := utils.ReadStringFileLimited(filepath.Join(devicePath, "product_name"), 128); err == nil {
|
||||||
|
return prod
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read PCI device ID and look it up
|
||||||
|
if deviceID, err := utils.ReadStringFileLimited(filepath.Join(devicePath, "device"), 64); err == nil {
|
||||||
|
id := normalizeHexID(deviceID)
|
||||||
|
revision := ""
|
||||||
|
if rev, revErr := utils.ReadStringFileLimited(filepath.Join(devicePath, "revision"), 64); revErr == nil {
|
||||||
|
revision = normalizeHexID(rev)
|
||||||
|
}
|
||||||
|
|
||||||
|
if name, found, done := getCachedAmdgpuName(id, revision); found {
|
||||||
|
return name
|
||||||
|
} else if !done {
|
||||||
|
if name, exact, ok := lookupAmdgpuNameInFile(id, revision, "/usr/share/libdrm/amdgpu.ids"); ok {
|
||||||
|
cacheAmdgpuName(id, revision, name, exact)
|
||||||
|
return normalizeAmdgpuName(name)
|
||||||
|
}
|
||||||
|
cacheMissingAmdgpuName(id, revision)
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("AMD GPU (%s)", id)
|
||||||
|
}
|
||||||
|
|
||||||
|
return "AMD GPU"
|
||||||
|
}
|
||||||
265
agent/gpu_amd_linux_test.go
Normal file
265
agent/gpu_amd_linux_test.go
Normal file
@@ -0,0 +1,265 @@
|
|||||||
|
//go:build linux
|
||||||
|
|
||||||
|
package agent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNormalizeHexID(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
in string
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{"0x1002", "1002"},
|
||||||
|
{"C2", "c2"},
|
||||||
|
{" 15BF ", "15bf"},
|
||||||
|
{"0x15bf", "15bf"},
|
||||||
|
{"", ""},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
subName := tt.in
|
||||||
|
if subName == "" {
|
||||||
|
subName = "empty_string"
|
||||||
|
}
|
||||||
|
t.Run(subName, func(t *testing.T) {
|
||||||
|
got := normalizeHexID(tt.in)
|
||||||
|
assert.Equal(t, tt.want, got)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCacheKeyForAmdgpu(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
deviceID string
|
||||||
|
revisionID string
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{"1114", "c2", "1114:c2"},
|
||||||
|
{"15bf", "", "15bf"},
|
||||||
|
{"1506", "c1", "1506:c1"},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
got := cacheKeyForAmdgpu(tt.deviceID, tt.revisionID)
|
||||||
|
assert.Equal(t, tt.want, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReadSysfsFloat(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
|
||||||
|
validPath := filepath.Join(dir, "val")
|
||||||
|
require.NoError(t, os.WriteFile(validPath, []byte(" 42.5 \n"), 0o644))
|
||||||
|
got, err := readSysfsFloat(validPath)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, 42.5, got)
|
||||||
|
|
||||||
|
// Integer and scientific
|
||||||
|
sciPath := filepath.Join(dir, "sci")
|
||||||
|
require.NoError(t, os.WriteFile(sciPath, []byte("1e2"), 0o644))
|
||||||
|
got, err = readSysfsFloat(sciPath)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.Equal(t, 100.0, got)
|
||||||
|
|
||||||
|
// Missing file
|
||||||
|
_, err = readSysfsFloat(filepath.Join(dir, "missing"))
|
||||||
|
require.Error(t, err)
|
||||||
|
|
||||||
|
// Invalid content
|
||||||
|
badPath := filepath.Join(dir, "bad")
|
||||||
|
require.NoError(t, os.WriteFile(badPath, []byte("not a number"), 0o644))
|
||||||
|
_, err = readSysfsFloat(badPath)
|
||||||
|
require.Error(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIsAmdGpu(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
deviceDir := filepath.Join(dir, "device")
|
||||||
|
require.NoError(t, os.MkdirAll(deviceDir, 0o755))
|
||||||
|
|
||||||
|
// AMD vendor 0x1002 -> true
|
||||||
|
require.NoError(t, os.WriteFile(filepath.Join(deviceDir, "vendor"), []byte("0x1002\n"), 0o644))
|
||||||
|
assert.True(t, isAmdGpu(dir), "vendor 0x1002 should be AMD")
|
||||||
|
|
||||||
|
// Non-AMD vendor -> false
|
||||||
|
require.NoError(t, os.WriteFile(filepath.Join(deviceDir, "vendor"), []byte("0x10de\n"), 0o644))
|
||||||
|
assert.False(t, isAmdGpu(dir), "vendor 0x10de should not be AMD")
|
||||||
|
|
||||||
|
// Missing vendor file -> false
|
||||||
|
require.NoError(t, os.Remove(filepath.Join(deviceDir, "vendor")))
|
||||||
|
assert.False(t, isAmdGpu(dir), "missing vendor file should be false")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAmdgpuNameCacheRoundTrip(t *testing.T) {
|
||||||
|
// Cache a name and retrieve it (unique key to avoid affecting other tests)
|
||||||
|
deviceID, revisionID := "cachedev99", "00"
|
||||||
|
cacheAmdgpuName(deviceID, revisionID, "AMD Test GPU 99 Graphics", true)
|
||||||
|
|
||||||
|
name, found, done := getCachedAmdgpuName(deviceID, revisionID)
|
||||||
|
assert.True(t, found)
|
||||||
|
assert.True(t, done)
|
||||||
|
assert.Equal(t, "AMD Test GPU 99", name)
|
||||||
|
|
||||||
|
// Device-only key also stored
|
||||||
|
name2, found2, _ := getCachedAmdgpuName(deviceID, "")
|
||||||
|
assert.True(t, found2)
|
||||||
|
assert.Equal(t, "AMD Test GPU 99", name2)
|
||||||
|
|
||||||
|
// Cache a miss
|
||||||
|
cacheMissingAmdgpuName("missedev99", "ab")
|
||||||
|
_, found3, done3 := getCachedAmdgpuName("missedev99", "ab")
|
||||||
|
assert.False(t, found3)
|
||||||
|
assert.True(t, done3, "done should be true so caller skips file lookup")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUpdateAmdGpuDataWithFakeSysfs(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
writeGTT bool
|
||||||
|
wantMemoryUsed float64
|
||||||
|
wantMemoryTotal float64
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "sums vram and gtt when gtt is present",
|
||||||
|
writeGTT: true,
|
||||||
|
wantMemoryUsed: utils.BytesToMegabytes(1073741824 + 536870912),
|
||||||
|
wantMemoryTotal: utils.BytesToMegabytes(2147483648 + 4294967296),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "falls back to vram when gtt is missing",
|
||||||
|
writeGTT: false,
|
||||||
|
wantMemoryUsed: utils.BytesToMegabytes(1073741824),
|
||||||
|
wantMemoryTotal: utils.BytesToMegabytes(2147483648),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
cardPath := filepath.Join(dir, "card0")
|
||||||
|
devicePath := filepath.Join(cardPath, "device")
|
||||||
|
hwmonPath := filepath.Join(devicePath, "hwmon", "hwmon0")
|
||||||
|
require.NoError(t, os.MkdirAll(hwmonPath, 0o755))
|
||||||
|
|
||||||
|
write := func(name, content string) {
|
||||||
|
require.NoError(t, os.WriteFile(filepath.Join(devicePath, name), []byte(content), 0o644))
|
||||||
|
}
|
||||||
|
write("vendor", "0x1002")
|
||||||
|
write("device", "0x1506")
|
||||||
|
write("revision", "0xc1")
|
||||||
|
write("gpu_busy_percent", "25")
|
||||||
|
write("mem_info_vram_used", "1073741824")
|
||||||
|
write("mem_info_vram_total", "2147483648")
|
||||||
|
if tt.writeGTT {
|
||||||
|
write("mem_info_gtt_used", "536870912")
|
||||||
|
write("mem_info_gtt_total", "4294967296")
|
||||||
|
}
|
||||||
|
require.NoError(t, os.WriteFile(filepath.Join(hwmonPath, "temp1_input"), []byte("45000"), 0o644))
|
||||||
|
require.NoError(t, os.WriteFile(filepath.Join(hwmonPath, "power1_input"), []byte("20000000"), 0o644))
|
||||||
|
|
||||||
|
// Pre-cache name so getAmdGpuName returns a known value (it uses system amdgpu.ids path)
|
||||||
|
cacheAmdgpuName("1506", "c1", "AMD Radeon 610M Graphics", true)
|
||||||
|
|
||||||
|
gm := &GPUManager{GpuDataMap: make(map[string]*system.GPUData)}
|
||||||
|
ok := gm.updateAmdGpuData(cardPath)
|
||||||
|
require.True(t, ok)
|
||||||
|
|
||||||
|
gpu, ok := gm.GpuDataMap["card0"]
|
||||||
|
require.True(t, ok)
|
||||||
|
assert.Equal(t, "AMD Radeon 610M", gpu.Name)
|
||||||
|
assert.Equal(t, 25.0, gpu.Usage)
|
||||||
|
assert.Equal(t, tt.wantMemoryUsed, gpu.MemoryUsed)
|
||||||
|
assert.Equal(t, tt.wantMemoryTotal, gpu.MemoryTotal)
|
||||||
|
assert.Equal(t, 45.0, gpu.Temperature)
|
||||||
|
assert.Equal(t, 20.0, gpu.Power)
|
||||||
|
assert.Equal(t, 1.0, gpu.Count)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLookupAmdgpuNameInFile(t *testing.T) {
|
||||||
|
idsPath := filepath.Join("test-data", "amdgpu.ids")
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
deviceID string
|
||||||
|
revisionID string
|
||||||
|
wantName string
|
||||||
|
wantExact bool
|
||||||
|
wantFound bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "exact device and revision match",
|
||||||
|
deviceID: "1114",
|
||||||
|
revisionID: "c2",
|
||||||
|
wantName: "AMD Radeon 860M Graphics",
|
||||||
|
wantExact: true,
|
||||||
|
wantFound: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "exact match 15BF revision 01 returns 760M",
|
||||||
|
deviceID: "15bf",
|
||||||
|
revisionID: "01",
|
||||||
|
wantName: "AMD Radeon 760M Graphics",
|
||||||
|
wantExact: true,
|
||||||
|
wantFound: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "exact match 15BF revision 00 returns 780M",
|
||||||
|
deviceID: "15bf",
|
||||||
|
revisionID: "00",
|
||||||
|
wantName: "AMD Radeon 780M Graphics",
|
||||||
|
wantExact: true,
|
||||||
|
wantFound: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "device-only match returns first entry for device",
|
||||||
|
deviceID: "1506",
|
||||||
|
revisionID: "",
|
||||||
|
wantName: "AMD Radeon 610M",
|
||||||
|
wantExact: false,
|
||||||
|
wantFound: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "unknown device not found",
|
||||||
|
deviceID: "dead",
|
||||||
|
revisionID: "00",
|
||||||
|
wantName: "",
|
||||||
|
wantExact: false,
|
||||||
|
wantFound: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
gotName, gotExact, gotFound := lookupAmdgpuNameInFile(tt.deviceID, tt.revisionID, idsPath)
|
||||||
|
assert.Equal(t, tt.wantName, gotName, "name")
|
||||||
|
assert.Equal(t, tt.wantExact, gotExact, "exact")
|
||||||
|
assert.Equal(t, tt.wantFound, gotFound, "found")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetAmdGpuNameFromIdsFile(t *testing.T) {
|
||||||
|
// Test that getAmdGpuName resolves a name when we can't inject the ids path.
|
||||||
|
// We only verify behavior when product_name is missing and device/revision
|
||||||
|
// would be read from sysfs; the actual lookup uses /usr/share/libdrm/amdgpu.ids.
|
||||||
|
// So this test focuses on normalizeAmdgpuName and that lookupAmdgpuNameInFile
|
||||||
|
// returns the expected name for our test-data file.
|
||||||
|
idsPath := filepath.Join("test-data", "amdgpu.ids")
|
||||||
|
name, exact, found := lookupAmdgpuNameInFile("1435", "ae", idsPath)
|
||||||
|
require.True(t, found)
|
||||||
|
require.True(t, exact)
|
||||||
|
assert.Equal(t, "AMD Custom GPU 0932", name)
|
||||||
|
assert.Equal(t, "AMD Custom GPU 0932", normalizeAmdgpuName(name))
|
||||||
|
|
||||||
|
// " Graphics" suffix is trimmed by normalizeAmdgpuName
|
||||||
|
name2 := "AMD Radeon 860M Graphics"
|
||||||
|
assert.Equal(t, "AMD Radeon 860M", normalizeAmdgpuName(name2))
|
||||||
|
}
|
||||||
15
agent/gpu_amd_unsupported.go
Normal file
15
agent/gpu_amd_unsupported.go
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
//go:build !linux
|
||||||
|
|
||||||
|
package agent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (gm *GPUManager) hasAmdSysfs() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (gm *GPUManager) collectAmdStats() error {
|
||||||
|
return errors.ErrUnsupported
|
||||||
|
}
|
||||||
257
agent/gpu_darwin.go
Normal file
257
agent/gpu_darwin.go
Normal file
@@ -0,0 +1,257 @@
|
|||||||
|
//go:build darwin
|
||||||
|
|
||||||
|
package agent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"log/slog"
|
||||||
|
"os/exec"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// powermetricsSampleIntervalMs is the sampling interval passed to powermetrics (-i).
|
||||||
|
powermetricsSampleIntervalMs = 500
|
||||||
|
// powermetricsPollInterval is how often we run powermetrics to collect a new sample.
|
||||||
|
powermetricsPollInterval = 2 * time.Second
|
||||||
|
// macmonIntervalMs is the sampling interval passed to macmon pipe (-i), in milliseconds.
|
||||||
|
macmonIntervalMs = 2500
|
||||||
|
)
|
||||||
|
|
||||||
|
const appleGPUID = "0"
|
||||||
|
|
||||||
|
// startPowermetricsCollector runs powermetrics --samplers gpu_power in a loop and updates
|
||||||
|
// GPU usage and power. Requires root (sudo) on macOS. A single logical GPU is reported as id "0".
|
||||||
|
func (gm *GPUManager) startPowermetricsCollector() {
|
||||||
|
// Ensure single GPU entry for Apple GPU
|
||||||
|
if _, ok := gm.GpuDataMap[appleGPUID]; !ok {
|
||||||
|
gm.GpuDataMap[appleGPUID] = &system.GPUData{Name: "Apple GPU"}
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
failures := 0
|
||||||
|
for {
|
||||||
|
if err := gm.collectPowermetrics(); err != nil {
|
||||||
|
failures++
|
||||||
|
if failures > maxFailureRetries {
|
||||||
|
slog.Warn("powermetrics GPU collector failed repeatedly, stopping", "err", err)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
slog.Warn("Error collecting macOS GPU data via powermetrics (may require sudo)", "err", err)
|
||||||
|
time.Sleep(retryWaitTime)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
failures = 0
|
||||||
|
time.Sleep(powermetricsPollInterval)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// collectPowermetrics runs powermetrics once and parses GPU usage and power from its output.
|
||||||
|
func (gm *GPUManager) collectPowermetrics() error {
|
||||||
|
interval := strconv.Itoa(powermetricsSampleIntervalMs)
|
||||||
|
cmd := exec.Command(powermetricsCmd, "--samplers", "gpu_power", "-i", interval, "-n", "1")
|
||||||
|
cmd.Stderr = nil
|
||||||
|
out, err := cmd.Output()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !gm.parsePowermetricsData(out) {
|
||||||
|
return errNoValidData
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parsePowermetricsData parses powermetrics gpu_power output and updates GpuDataMap["0"].
|
||||||
|
// Example output:
|
||||||
|
//
|
||||||
|
// **** GPU usage ****
|
||||||
|
// GPU HW active frequency: 444 MHz
|
||||||
|
// GPU HW active residency: 0.97% (444 MHz: .97% ...
|
||||||
|
// GPU idle residency: 99.03%
|
||||||
|
// GPU Power: 4 mW
|
||||||
|
func (gm *GPUManager) parsePowermetricsData(output []byte) bool {
|
||||||
|
var idleResidency, powerMW float64
|
||||||
|
var gotIdle, gotPower bool
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(bytes.NewReader(output))
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := strings.TrimSpace(scanner.Text())
|
||||||
|
if strings.HasPrefix(line, "GPU idle residency:") {
|
||||||
|
// "GPU idle residency: 99.03%"
|
||||||
|
fields := strings.Fields(strings.TrimPrefix(line, "GPU idle residency:"))
|
||||||
|
if len(fields) >= 1 {
|
||||||
|
pct := strings.TrimSuffix(fields[0], "%")
|
||||||
|
if v, err := strconv.ParseFloat(pct, 64); err == nil {
|
||||||
|
idleResidency = v
|
||||||
|
gotIdle = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if strings.HasPrefix(line, "GPU Power:") {
|
||||||
|
// "GPU Power: 4 mW"
|
||||||
|
fields := strings.Fields(strings.TrimPrefix(line, "GPU Power:"))
|
||||||
|
if len(fields) >= 1 {
|
||||||
|
if v, err := strconv.ParseFloat(fields[0], 64); err == nil {
|
||||||
|
powerMW = v
|
||||||
|
gotPower = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !gotIdle && !gotPower {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
gm.Lock()
|
||||||
|
defer gm.Unlock()
|
||||||
|
|
||||||
|
if _, ok := gm.GpuDataMap[appleGPUID]; !ok {
|
||||||
|
gm.GpuDataMap[appleGPUID] = &system.GPUData{Name: "Apple GPU"}
|
||||||
|
}
|
||||||
|
gpu := gm.GpuDataMap[appleGPUID]
|
||||||
|
|
||||||
|
if gotIdle {
|
||||||
|
// Usage = 100 - idle residency (e.g. 100 - 99.03 = 0.97%)
|
||||||
|
gpu.Usage += 100 - idleResidency
|
||||||
|
}
|
||||||
|
if gotPower {
|
||||||
|
// mW -> W
|
||||||
|
gpu.Power += powerMW / milliwattsInAWatt
|
||||||
|
}
|
||||||
|
gpu.Count++
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// startMacmonCollector runs `macmon pipe` in a loop and parses one JSON object per line.
|
||||||
|
// This collector does not require sudo. A single logical GPU is reported as id "0".
|
||||||
|
func (gm *GPUManager) startMacmonCollector() {
|
||||||
|
if _, ok := gm.GpuDataMap[appleGPUID]; !ok {
|
||||||
|
gm.GpuDataMap[appleGPUID] = &system.GPUData{Name: "Apple GPU"}
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
failures := 0
|
||||||
|
for {
|
||||||
|
if err := gm.collectMacmonPipe(); err != nil {
|
||||||
|
failures++
|
||||||
|
if failures > maxFailureRetries {
|
||||||
|
slog.Warn("macmon GPU collector failed repeatedly, stopping", "err", err)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
slog.Warn("Error collecting macOS GPU data via macmon", "err", err)
|
||||||
|
time.Sleep(retryWaitTime)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
failures = 0
|
||||||
|
// `macmon pipe` is long-running; if it returns, wait a bit before restarting.
|
||||||
|
time.Sleep(retryWaitTime)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
type macmonTemp struct {
|
||||||
|
GPUTempAvg float64 `json:"gpu_temp_avg"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type macmonSample struct {
|
||||||
|
GPUPower float64 `json:"gpu_power"` // watts (macmon reports fractional values)
|
||||||
|
GPURAMPower float64 `json:"gpu_ram_power"` // watts
|
||||||
|
GPUUsage []float64 `json:"gpu_usage"` // [freq_mhz, usage] where usage is typically 0..1
|
||||||
|
Temp macmonTemp `json:"temp"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (gm *GPUManager) collectMacmonPipe() (err error) {
|
||||||
|
macmonPath, err := utils.LookPathHomebrew(macmonCmd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
cmd := exec.Command(macmonPath, "pipe", "-i", strconv.Itoa(macmonIntervalMs))
|
||||||
|
// Avoid blocking if macmon writes to stderr.
|
||||||
|
cmd.Stderr = io.Discard
|
||||||
|
stdout, err := cmd.StdoutPipe()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := cmd.Start(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure we always reap the child to avoid zombies on any return path and
|
||||||
|
// propagate a non-zero exit code if no other error was set.
|
||||||
|
defer func() {
|
||||||
|
_ = stdout.Close()
|
||||||
|
if cmd.ProcessState == nil || !cmd.ProcessState.Exited() {
|
||||||
|
_ = cmd.Process.Kill()
|
||||||
|
}
|
||||||
|
if waitErr := cmd.Wait(); err == nil && waitErr != nil {
|
||||||
|
err = waitErr
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(stdout)
|
||||||
|
var hadSample bool
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := bytes.TrimSpace(scanner.Bytes())
|
||||||
|
if len(line) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if gm.parseMacmonLine(line) {
|
||||||
|
hadSample = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if scanErr := scanner.Err(); scanErr != nil {
|
||||||
|
return scanErr
|
||||||
|
}
|
||||||
|
if !hadSample {
|
||||||
|
return errNoValidData
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseMacmonLine parses a single macmon JSON line and updates Apple GPU metrics.
|
||||||
|
func (gm *GPUManager) parseMacmonLine(line []byte) bool {
|
||||||
|
var sample macmonSample
|
||||||
|
if err := json.Unmarshal(line, &sample); err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
usage := 0.0
|
||||||
|
if len(sample.GPUUsage) >= 2 {
|
||||||
|
usage = sample.GPUUsage[1]
|
||||||
|
// Heuristic: macmon typically reports 0..1; convert to percentage.
|
||||||
|
if usage <= 1.0 {
|
||||||
|
usage *= 100
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Consider the line valid if it contains at least one GPU metric.
|
||||||
|
if usage == 0 && sample.GPUPower == 0 && sample.Temp.GPUTempAvg == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
gm.Lock()
|
||||||
|
defer gm.Unlock()
|
||||||
|
|
||||||
|
gpu, ok := gm.GpuDataMap[appleGPUID]
|
||||||
|
if !ok {
|
||||||
|
gpu = &system.GPUData{Name: "Apple GPU"}
|
||||||
|
gm.GpuDataMap[appleGPUID] = gpu
|
||||||
|
}
|
||||||
|
gpu.Temperature = sample.Temp.GPUTempAvg
|
||||||
|
gpu.Usage += usage
|
||||||
|
// macmon reports power in watts; include VRAM power if present.
|
||||||
|
gpu.Power += sample.GPUPower + sample.GPURAMPower
|
||||||
|
gpu.Count++
|
||||||
|
return true
|
||||||
|
}
|
||||||
81
agent/gpu_darwin_test.go
Normal file
81
agent/gpu_darwin_test.go
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
//go:build darwin
|
||||||
|
|
||||||
|
package agent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestParsePowermetricsData(t *testing.T) {
|
||||||
|
input := `
|
||||||
|
Machine model: Mac14,10
|
||||||
|
OS version: 25D125
|
||||||
|
|
||||||
|
*** Sampled system activity (Sat Feb 14 00:42:06 2026 -0500) (503.05ms elapsed) ***
|
||||||
|
|
||||||
|
**** GPU usage ****
|
||||||
|
|
||||||
|
GPU HW active frequency: 444 MHz
|
||||||
|
GPU HW active residency: 0.97% (444 MHz: .97% 612 MHz: 0% 808 MHz: 0% 968 MHz: 0% 1110 MHz: 0% 1236 MHz: 0% 1338 MHz: 0% 1398 MHz: 0%)
|
||||||
|
GPU SW requested state: (P1 : 100% P2 : 0% P3 : 0% P4 : 0% P5 : 0% P6 : 0% P7 : 0% P8 : 0%)
|
||||||
|
GPU idle residency: 99.03%
|
||||||
|
GPU Power: 4 mW
|
||||||
|
`
|
||||||
|
gm := &GPUManager{
|
||||||
|
GpuDataMap: make(map[string]*system.GPUData),
|
||||||
|
}
|
||||||
|
valid := gm.parsePowermetricsData([]byte(input))
|
||||||
|
require.True(t, valid)
|
||||||
|
|
||||||
|
g0, ok := gm.GpuDataMap["0"]
|
||||||
|
require.True(t, ok)
|
||||||
|
assert.Equal(t, "Apple GPU", g0.Name)
|
||||||
|
// Usage = 100 - 99.03 = 0.97
|
||||||
|
assert.InDelta(t, 0.97, g0.Usage, 0.01)
|
||||||
|
// 4 mW -> 0.004 W
|
||||||
|
assert.InDelta(t, 0.004, g0.Power, 0.0001)
|
||||||
|
assert.Equal(t, 1.0, g0.Count)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParsePowermetricsDataPartial(t *testing.T) {
|
||||||
|
// Only power line (e.g. older macOS or different sampler output)
|
||||||
|
input := `
|
||||||
|
**** GPU usage ****
|
||||||
|
GPU Power: 120 mW
|
||||||
|
`
|
||||||
|
gm := &GPUManager{
|
||||||
|
GpuDataMap: make(map[string]*system.GPUData),
|
||||||
|
}
|
||||||
|
valid := gm.parsePowermetricsData([]byte(input))
|
||||||
|
require.True(t, valid)
|
||||||
|
|
||||||
|
g0, ok := gm.GpuDataMap["0"]
|
||||||
|
require.True(t, ok)
|
||||||
|
assert.Equal(t, "Apple GPU", g0.Name)
|
||||||
|
assert.InDelta(t, 0.12, g0.Power, 0.001)
|
||||||
|
assert.Equal(t, 1.0, g0.Count)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseMacmonLine(t *testing.T) {
|
||||||
|
input := `{"all_power":0.6468324661254883,"ane_power":0.0,"cpu_power":0.6359732151031494,"ecpu_usage":[2061,0.1726151406764984],"gpu_power":0.010859241709113121,"gpu_ram_power":0.000965250947047025,"gpu_usage":[503,0.013633215799927711],"memory":{"ram_total":17179869184,"ram_usage":12322914304,"swap_total":0,"swap_usage":0},"pcpu_usage":[1248,0.11792058497667313],"ram_power":0.14885640144348145,"sys_power":10.4955415725708,"temp":{"cpu_temp_avg":23.041261672973633,"gpu_temp_avg":29.44516944885254},"timestamp":"2026-02-17T19:34:27.942556+00:00"}`
|
||||||
|
|
||||||
|
gm := &GPUManager{
|
||||||
|
GpuDataMap: make(map[string]*system.GPUData),
|
||||||
|
}
|
||||||
|
valid := gm.parseMacmonLine([]byte(input))
|
||||||
|
require.True(t, valid)
|
||||||
|
|
||||||
|
g0, ok := gm.GpuDataMap["0"]
|
||||||
|
require.True(t, ok)
|
||||||
|
assert.Equal(t, "Apple GPU", g0.Name)
|
||||||
|
// macmon reports usage fraction 0..1; expect percent conversion.
|
||||||
|
assert.InDelta(t, 1.3633, g0.Usage, 0.05)
|
||||||
|
// power includes gpu_power + gpu_ram_power
|
||||||
|
assert.InDelta(t, 0.011824, g0.Power, 0.0005)
|
||||||
|
assert.InDelta(t, 29.445, g0.Temperature, 0.01)
|
||||||
|
assert.Equal(t, 1.0, g0.Count)
|
||||||
|
}
|
||||||
9
agent/gpu_darwin_unsupported.go
Normal file
9
agent/gpu_darwin_unsupported.go
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
//go:build !darwin
|
||||||
|
|
||||||
|
package agent
|
||||||
|
|
||||||
|
// startPowermetricsCollector is a no-op on non-darwin platforms; the real implementation is in gpu_darwin.go.
|
||||||
|
func (gm *GPUManager) startPowermetricsCollector() {}
|
||||||
|
|
||||||
|
// startMacmonCollector is a no-op on non-darwin platforms; the real implementation is in gpu_darwin.go.
|
||||||
|
func (gm *GPUManager) startMacmonCollector() {}
|
||||||
@@ -7,6 +7,7 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
"github.com/henrygd/beszel/internal/entities/system"
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -27,10 +28,11 @@ func (gm *GPUManager) updateIntelFromStats(sample *intelGpuStats) bool {
|
|||||||
defer gm.Unlock()
|
defer gm.Unlock()
|
||||||
|
|
||||||
// only one gpu for now - cmd doesn't provide all by default
|
// only one gpu for now - cmd doesn't provide all by default
|
||||||
gpuData, ok := gm.GpuDataMap["0"]
|
id := "i0" // prefix with i to avoid conflicts with nvidia card ids
|
||||||
|
gpuData, ok := gm.GpuDataMap[id]
|
||||||
if !ok {
|
if !ok {
|
||||||
gpuData = &system.GPUData{Name: "GPU", Engines: make(map[string]float64)}
|
gpuData = &system.GPUData{Name: "GPU", Engines: make(map[string]float64)}
|
||||||
gm.GpuDataMap["0"] = gpuData
|
gm.GpuDataMap[id] = gpuData
|
||||||
}
|
}
|
||||||
|
|
||||||
gpuData.Power += sample.PowerGPU
|
gpuData.Power += sample.PowerGPU
|
||||||
@@ -51,7 +53,7 @@ func (gm *GPUManager) updateIntelFromStats(sample *intelGpuStats) bool {
|
|||||||
func (gm *GPUManager) collectIntelStats() (err error) {
|
func (gm *GPUManager) collectIntelStats() (err error) {
|
||||||
// Build command arguments, optionally selecting a device via -d
|
// Build command arguments, optionally selecting a device via -d
|
||||||
args := []string{"-s", intelGpuStatsInterval, "-l"}
|
args := []string{"-s", intelGpuStatsInterval, "-l"}
|
||||||
if dev, ok := GetEnv("INTEL_GPU_DEVICE"); ok && dev != "" {
|
if dev, ok := utils.GetEnv("INTEL_GPU_DEVICE"); ok && dev != "" {
|
||||||
args = append(args, "-d", dev)
|
args = append(args, "-d", dev)
|
||||||
}
|
}
|
||||||
cmd := exec.Command(intelGpuStatsCmd, args...)
|
cmd := exec.Command(intelGpuStatsCmd, args...)
|
||||||
|
|||||||
224
agent/gpu_nvml.go
Normal file
224
agent/gpu_nvml.go
Normal file
@@ -0,0 +1,224 @@
|
|||||||
|
//go:build amd64 && (windows || (linux && glibc))
|
||||||
|
|
||||||
|
package agent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/ebitengine/purego"
|
||||||
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NVML constants and types
|
||||||
|
const (
|
||||||
|
nvmlSuccess int = 0
|
||||||
|
)
|
||||||
|
|
||||||
|
type nvmlDevice uintptr
|
||||||
|
|
||||||
|
type nvmlReturn int
|
||||||
|
|
||||||
|
type nvmlMemoryV1 struct {
|
||||||
|
Total uint64
|
||||||
|
Free uint64
|
||||||
|
Used uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type nvmlMemoryV2 struct {
|
||||||
|
Version uint32
|
||||||
|
Total uint64
|
||||||
|
Reserved uint64
|
||||||
|
Free uint64
|
||||||
|
Used uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type nvmlUtilization struct {
|
||||||
|
Gpu uint32
|
||||||
|
Memory uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type nvmlPciInfo struct {
|
||||||
|
BusId [16]byte
|
||||||
|
Domain uint32
|
||||||
|
Bus uint32
|
||||||
|
Device uint32
|
||||||
|
PciDeviceId uint32
|
||||||
|
PciSubSystemId uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// NVML function signatures
|
||||||
|
var (
|
||||||
|
nvmlInit func() nvmlReturn
|
||||||
|
nvmlShutdown func() nvmlReturn
|
||||||
|
nvmlDeviceGetCount func(count *uint32) nvmlReturn
|
||||||
|
nvmlDeviceGetHandleByIndex func(index uint32, device *nvmlDevice) nvmlReturn
|
||||||
|
nvmlDeviceGetName func(device nvmlDevice, name *byte, length uint32) nvmlReturn
|
||||||
|
nvmlDeviceGetMemoryInfo func(device nvmlDevice, memory uintptr) nvmlReturn
|
||||||
|
nvmlDeviceGetUtilizationRates func(device nvmlDevice, utilization *nvmlUtilization) nvmlReturn
|
||||||
|
nvmlDeviceGetTemperature func(device nvmlDevice, sensorType int, temp *uint32) nvmlReturn
|
||||||
|
nvmlDeviceGetPowerUsage func(device nvmlDevice, power *uint32) nvmlReturn
|
||||||
|
nvmlDeviceGetPciInfo func(device nvmlDevice, pci *nvmlPciInfo) nvmlReturn
|
||||||
|
nvmlErrorString func(result nvmlReturn) string
|
||||||
|
)
|
||||||
|
|
||||||
|
type nvmlCollector struct {
|
||||||
|
gm *GPUManager
|
||||||
|
lib uintptr
|
||||||
|
devices []nvmlDevice
|
||||||
|
bdfs []string
|
||||||
|
isV2 bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *nvmlCollector) init() error {
|
||||||
|
slog.Debug("NVML: Initializing")
|
||||||
|
libPath := getNVMLPath()
|
||||||
|
|
||||||
|
lib, err := openLibrary(libPath)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to load %s: %w", libPath, err)
|
||||||
|
}
|
||||||
|
c.lib = lib
|
||||||
|
|
||||||
|
purego.RegisterLibFunc(&nvmlInit, lib, "nvmlInit")
|
||||||
|
purego.RegisterLibFunc(&nvmlShutdown, lib, "nvmlShutdown")
|
||||||
|
purego.RegisterLibFunc(&nvmlDeviceGetCount, lib, "nvmlDeviceGetCount")
|
||||||
|
purego.RegisterLibFunc(&nvmlDeviceGetHandleByIndex, lib, "nvmlDeviceGetHandleByIndex")
|
||||||
|
purego.RegisterLibFunc(&nvmlDeviceGetName, lib, "nvmlDeviceGetName")
|
||||||
|
// Try to get v2 memory info, fallback to v1 if not available
|
||||||
|
if hasSymbol(lib, "nvmlDeviceGetMemoryInfo_v2") {
|
||||||
|
c.isV2 = true
|
||||||
|
purego.RegisterLibFunc(&nvmlDeviceGetMemoryInfo, lib, "nvmlDeviceGetMemoryInfo_v2")
|
||||||
|
} else {
|
||||||
|
purego.RegisterLibFunc(&nvmlDeviceGetMemoryInfo, lib, "nvmlDeviceGetMemoryInfo")
|
||||||
|
}
|
||||||
|
purego.RegisterLibFunc(&nvmlDeviceGetUtilizationRates, lib, "nvmlDeviceGetUtilizationRates")
|
||||||
|
purego.RegisterLibFunc(&nvmlDeviceGetTemperature, lib, "nvmlDeviceGetTemperature")
|
||||||
|
purego.RegisterLibFunc(&nvmlDeviceGetPowerUsage, lib, "nvmlDeviceGetPowerUsage")
|
||||||
|
purego.RegisterLibFunc(&nvmlDeviceGetPciInfo, lib, "nvmlDeviceGetPciInfo")
|
||||||
|
purego.RegisterLibFunc(&nvmlErrorString, lib, "nvmlErrorString")
|
||||||
|
|
||||||
|
if ret := nvmlInit(); ret != nvmlReturn(nvmlSuccess) {
|
||||||
|
return fmt.Errorf("nvmlInit failed: %v", ret)
|
||||||
|
}
|
||||||
|
|
||||||
|
var count uint32
|
||||||
|
if ret := nvmlDeviceGetCount(&count); ret != nvmlReturn(nvmlSuccess) {
|
||||||
|
return fmt.Errorf("nvmlDeviceGetCount failed: %v", ret)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := uint32(0); i < count; i++ {
|
||||||
|
var device nvmlDevice
|
||||||
|
if ret := nvmlDeviceGetHandleByIndex(i, &device); ret == nvmlReturn(nvmlSuccess) {
|
||||||
|
c.devices = append(c.devices, device)
|
||||||
|
// Get BDF for power state check
|
||||||
|
var pci nvmlPciInfo
|
||||||
|
if ret := nvmlDeviceGetPciInfo(device, &pci); ret == nvmlReturn(nvmlSuccess) {
|
||||||
|
busID := string(pci.BusId[:])
|
||||||
|
if idx := strings.Index(busID, "\x00"); idx != -1 {
|
||||||
|
busID = busID[:idx]
|
||||||
|
}
|
||||||
|
c.bdfs = append(c.bdfs, strings.ToLower(busID))
|
||||||
|
} else {
|
||||||
|
c.bdfs = append(c.bdfs, "")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *nvmlCollector) start() {
|
||||||
|
defer nvmlShutdown()
|
||||||
|
ticker := time.Tick(3 * time.Second)
|
||||||
|
|
||||||
|
for range ticker {
|
||||||
|
c.collect()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *nvmlCollector) collect() {
|
||||||
|
c.gm.Lock()
|
||||||
|
defer c.gm.Unlock()
|
||||||
|
|
||||||
|
for i, device := range c.devices {
|
||||||
|
id := fmt.Sprintf("%d", i)
|
||||||
|
bdf := c.bdfs[i]
|
||||||
|
|
||||||
|
// Update GPUDataMap
|
||||||
|
if _, ok := c.gm.GpuDataMap[id]; !ok {
|
||||||
|
var nameBuf [64]byte
|
||||||
|
if ret := nvmlDeviceGetName(device, &nameBuf[0], 64); ret != nvmlReturn(nvmlSuccess) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
name := string(nameBuf[:strings.Index(string(nameBuf[:]), "\x00")])
|
||||||
|
name = strings.TrimPrefix(name, "NVIDIA ")
|
||||||
|
c.gm.GpuDataMap[id] = &system.GPUData{Name: strings.TrimSuffix(name, " Laptop GPU")}
|
||||||
|
}
|
||||||
|
gpu := c.gm.GpuDataMap[id]
|
||||||
|
|
||||||
|
if bdf != "" && !c.isGPUActive(bdf) {
|
||||||
|
slog.Debug("NVML: GPU is suspended, skipping", "bdf", bdf)
|
||||||
|
gpu.Temperature = 0
|
||||||
|
gpu.MemoryUsed = 0
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Utilization
|
||||||
|
var utilization nvmlUtilization
|
||||||
|
if ret := nvmlDeviceGetUtilizationRates(device, &utilization); ret != nvmlReturn(nvmlSuccess) {
|
||||||
|
slog.Debug("NVML: Utilization failed (GPU likely suspended)", "bdf", bdf, "ret", ret)
|
||||||
|
gpu.Temperature = 0
|
||||||
|
gpu.MemoryUsed = 0
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
slog.Debug("NVML: Collecting data for GPU", "bdf", bdf)
|
||||||
|
|
||||||
|
// Temperature
|
||||||
|
var temp uint32
|
||||||
|
nvmlDeviceGetTemperature(device, 0, &temp) // 0 is NVML_TEMPERATURE_GPU
|
||||||
|
|
||||||
|
// Memory: only poll if GPU is active to avoid leaving D3cold state (#1522)
|
||||||
|
if utilization.Gpu > 0 {
|
||||||
|
var usedMem, totalMem uint64
|
||||||
|
if c.isV2 {
|
||||||
|
var memory nvmlMemoryV2
|
||||||
|
memory.Version = 0x02000028 // (2 << 24) | 40 bytes
|
||||||
|
if ret := nvmlDeviceGetMemoryInfo(device, uintptr(unsafe.Pointer(&memory))); ret != nvmlReturn(nvmlSuccess) {
|
||||||
|
slog.Debug("NVML: MemoryInfo_v2 failed", "bdf", bdf, "ret", ret)
|
||||||
|
} else {
|
||||||
|
usedMem = memory.Used
|
||||||
|
totalMem = memory.Total
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
var memory nvmlMemoryV1
|
||||||
|
if ret := nvmlDeviceGetMemoryInfo(device, uintptr(unsafe.Pointer(&memory))); ret != nvmlReturn(nvmlSuccess) {
|
||||||
|
slog.Debug("NVML: MemoryInfo failed", "bdf", bdf, "ret", ret)
|
||||||
|
} else {
|
||||||
|
usedMem = memory.Used
|
||||||
|
totalMem = memory.Total
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if totalMem > 0 {
|
||||||
|
gpu.MemoryUsed = float64(usedMem) / 1024 / 1024 / mebibytesInAMegabyte
|
||||||
|
gpu.MemoryTotal = float64(totalMem) / 1024 / 1024 / mebibytesInAMegabyte
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
slog.Debug("NVML: Skipping memory info (utilization=0)", "bdf", bdf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Power
|
||||||
|
var power uint32
|
||||||
|
nvmlDeviceGetPowerUsage(device, &power)
|
||||||
|
|
||||||
|
gpu.Temperature = float64(temp)
|
||||||
|
gpu.Usage += float64(utilization.Gpu)
|
||||||
|
gpu.Power += float64(power) / 1000.0
|
||||||
|
gpu.Count++
|
||||||
|
slog.Debug("NVML: Collected data", "gpu", gpu)
|
||||||
|
}
|
||||||
|
}
|
||||||
57
agent/gpu_nvml_linux.go
Normal file
57
agent/gpu_nvml_linux.go
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
//go:build glibc && linux && amd64
|
||||||
|
|
||||||
|
package agent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log/slog"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/ebitengine/purego"
|
||||||
|
)
|
||||||
|
|
||||||
|
func openLibrary(name string) (uintptr, error) {
|
||||||
|
return purego.Dlopen(name, purego.RTLD_NOW|purego.RTLD_GLOBAL)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getNVMLPath() string {
|
||||||
|
return "libnvidia-ml.so.1"
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasSymbol(lib uintptr, symbol string) bool {
|
||||||
|
_, err := purego.Dlsym(lib, symbol)
|
||||||
|
return err == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *nvmlCollector) isGPUActive(bdf string) bool {
|
||||||
|
// runtime_status
|
||||||
|
statusPath := filepath.Join("/sys/bus/pci/devices", bdf, "power/runtime_status")
|
||||||
|
status, err := os.ReadFile(statusPath)
|
||||||
|
if err != nil {
|
||||||
|
slog.Debug("NVML: Can't read runtime_status", "bdf", bdf, "err", err)
|
||||||
|
return true // Assume active if we can't read status
|
||||||
|
}
|
||||||
|
statusStr := strings.TrimSpace(string(status))
|
||||||
|
if statusStr != "active" && statusStr != "resuming" {
|
||||||
|
slog.Debug("NVML: GPU not active", "bdf", bdf, "status", statusStr)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// power_state (D0 check)
|
||||||
|
// Find any drm card device power_state
|
||||||
|
pstatePathPattern := filepath.Join("/sys/bus/pci/devices", bdf, "drm/card*/device/power_state")
|
||||||
|
matches, _ := filepath.Glob(pstatePathPattern)
|
||||||
|
if len(matches) > 0 {
|
||||||
|
pstate, err := os.ReadFile(matches[0])
|
||||||
|
if err == nil {
|
||||||
|
pstateStr := strings.TrimSpace(string(pstate))
|
||||||
|
if pstateStr != "D0" {
|
||||||
|
slog.Debug("NVML: GPU not in D0 state", "bdf", bdf, "pstate", pstateStr)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
15
agent/gpu_nvml_unsupported.go
Normal file
15
agent/gpu_nvml_unsupported.go
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
//go:build (!linux && !windows) || !amd64 || (linux && !glibc)
|
||||||
|
|
||||||
|
package agent
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
type nvmlCollector struct {
|
||||||
|
gm *GPUManager
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *nvmlCollector) init() error {
|
||||||
|
return fmt.Errorf("nvml not supported on this platform")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *nvmlCollector) start() {}
|
||||||
25
agent/gpu_nvml_windows.go
Normal file
25
agent/gpu_nvml_windows.go
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
//go:build windows && amd64
|
||||||
|
|
||||||
|
package agent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
|
)
|
||||||
|
|
||||||
|
func openLibrary(name string) (uintptr, error) {
|
||||||
|
handle, err := windows.LoadLibrary(name)
|
||||||
|
return uintptr(handle), err
|
||||||
|
}
|
||||||
|
|
||||||
|
func getNVMLPath() string {
|
||||||
|
return "nvml.dll"
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasSymbol(lib uintptr, symbol string) bool {
|
||||||
|
_, err := windows.GetProcAddress(windows.Handle(lib), symbol)
|
||||||
|
return err == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *nvmlCollector) isGPUActive(bdf string) bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
160
agent/gpu_nvtop.go
Normal file
160
agent/gpu_nvtop.go
Normal file
@@ -0,0 +1,160 @@
|
|||||||
|
package agent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"log/slog"
|
||||||
|
"os/exec"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
type nvtopSnapshot struct {
|
||||||
|
DeviceName string `json:"device_name"`
|
||||||
|
Temp *string `json:"temp"`
|
||||||
|
PowerDraw *string `json:"power_draw"`
|
||||||
|
GpuUtil *string `json:"gpu_util"`
|
||||||
|
MemTotal *string `json:"mem_total"`
|
||||||
|
MemUsed *string `json:"mem_used"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseNvtopNumber parses nvtop numeric strings with units (C/W/%).
|
||||||
|
func parseNvtopNumber(raw string) float64 {
|
||||||
|
cleaned := strings.TrimSpace(raw)
|
||||||
|
cleaned = strings.TrimSuffix(cleaned, "C")
|
||||||
|
cleaned = strings.TrimSuffix(cleaned, "W")
|
||||||
|
cleaned = strings.TrimSuffix(cleaned, "%")
|
||||||
|
val, _ := strconv.ParseFloat(cleaned, 64)
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseNvtopData parses a single nvtop JSON snapshot payload.
|
||||||
|
func (gm *GPUManager) parseNvtopData(output []byte) bool {
|
||||||
|
var snapshots []nvtopSnapshot
|
||||||
|
if err := json.Unmarshal(output, &snapshots); err != nil || len(snapshots) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return gm.updateNvtopSnapshots(snapshots)
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateNvtopSnapshots applies one decoded nvtop snapshot batch to GPU accumulators.
|
||||||
|
func (gm *GPUManager) updateNvtopSnapshots(snapshots []nvtopSnapshot) bool {
|
||||||
|
gm.Lock()
|
||||||
|
defer gm.Unlock()
|
||||||
|
|
||||||
|
valid := false
|
||||||
|
usedIDs := make(map[string]struct{}, len(snapshots))
|
||||||
|
for i, sample := range snapshots {
|
||||||
|
if sample.DeviceName == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
indexID := "n" + strconv.Itoa(i)
|
||||||
|
id := indexID
|
||||||
|
|
||||||
|
// nvtop ordering can change, so prefer reusing an existing slot with matching device name.
|
||||||
|
if existingByIndex, ok := gm.GpuDataMap[indexID]; ok && existingByIndex.Name != "" && existingByIndex.Name != sample.DeviceName {
|
||||||
|
for existingID, gpu := range gm.GpuDataMap {
|
||||||
|
if !strings.HasPrefix(existingID, "n") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, taken := usedIDs[existingID]; taken {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if gpu.Name == sample.DeviceName {
|
||||||
|
id = existingID
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := gm.GpuDataMap[id]; !ok {
|
||||||
|
gm.GpuDataMap[id] = &system.GPUData{Name: sample.DeviceName}
|
||||||
|
}
|
||||||
|
gpu := gm.GpuDataMap[id]
|
||||||
|
gpu.Name = sample.DeviceName
|
||||||
|
|
||||||
|
if sample.Temp != nil {
|
||||||
|
gpu.Temperature = parseNvtopNumber(*sample.Temp)
|
||||||
|
}
|
||||||
|
if sample.MemUsed != nil {
|
||||||
|
gpu.MemoryUsed = utils.BytesToMegabytes(parseNvtopNumber(*sample.MemUsed))
|
||||||
|
}
|
||||||
|
if sample.MemTotal != nil {
|
||||||
|
gpu.MemoryTotal = utils.BytesToMegabytes(parseNvtopNumber(*sample.MemTotal))
|
||||||
|
}
|
||||||
|
if sample.GpuUtil != nil {
|
||||||
|
gpu.Usage += parseNvtopNumber(*sample.GpuUtil)
|
||||||
|
}
|
||||||
|
if sample.PowerDraw != nil {
|
||||||
|
gpu.Power += parseNvtopNumber(*sample.PowerDraw)
|
||||||
|
}
|
||||||
|
gpu.Count++
|
||||||
|
usedIDs[id] = struct{}{}
|
||||||
|
valid = true
|
||||||
|
}
|
||||||
|
return valid
|
||||||
|
}
|
||||||
|
|
||||||
|
// collectNvtopStats runs nvtop loop mode and continuously decodes JSON snapshots.
|
||||||
|
func (gm *GPUManager) collectNvtopStats(interval string) error {
|
||||||
|
cmd := exec.Command(nvtopCmd, "-lP", "-d", interval)
|
||||||
|
stdout, err := cmd.StdoutPipe()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := cmd.Start(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
_ = stdout.Close()
|
||||||
|
if cmd.ProcessState == nil || !cmd.ProcessState.Exited() {
|
||||||
|
_ = cmd.Process.Kill()
|
||||||
|
}
|
||||||
|
_ = cmd.Wait()
|
||||||
|
}()
|
||||||
|
|
||||||
|
decoder := json.NewDecoder(stdout)
|
||||||
|
foundValid := false
|
||||||
|
for {
|
||||||
|
var snapshots []nvtopSnapshot
|
||||||
|
if err := decoder.Decode(&snapshots); err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
if foundValid {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return errNoValidData
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if gm.updateNvtopSnapshots(snapshots) {
|
||||||
|
foundValid = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// startNvtopCollector starts nvtop collection with retry or fallback callback handling.
|
||||||
|
func (gm *GPUManager) startNvtopCollector(interval string, onFailure func()) {
|
||||||
|
go func() {
|
||||||
|
failures := 0
|
||||||
|
for {
|
||||||
|
if err := gm.collectNvtopStats(interval); err != nil {
|
||||||
|
if onFailure != nil {
|
||||||
|
slog.Warn("Error collecting GPU data via nvtop", "err", err)
|
||||||
|
onFailure()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
failures++
|
||||||
|
if failures > maxFailureRetries {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
slog.Warn("Error collecting GPU data via nvtop", "err", err)
|
||||||
|
time.Sleep(retryWaitTime)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build testing
|
//go:build testing
|
||||||
// +build testing
|
|
||||||
|
|
||||||
package agent
|
package agent
|
||||||
|
|
||||||
@@ -11,6 +10,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
"github.com/henrygd/beszel/internal/entities/system"
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
@@ -250,6 +250,100 @@ func TestParseAmdData(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestParseNvtopData(t *testing.T) {
|
||||||
|
input, err := os.ReadFile("test-data/nvtop.json")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
gm := &GPUManager{
|
||||||
|
GpuDataMap: make(map[string]*system.GPUData),
|
||||||
|
}
|
||||||
|
valid := gm.parseNvtopData(input)
|
||||||
|
require.True(t, valid)
|
||||||
|
|
||||||
|
g0, ok := gm.GpuDataMap["n0"]
|
||||||
|
require.True(t, ok)
|
||||||
|
assert.Equal(t, "NVIDIA GeForce RTX 3050 Ti Laptop GPU", g0.Name)
|
||||||
|
assert.Equal(t, 48.0, g0.Temperature)
|
||||||
|
assert.Equal(t, 5.0, g0.Usage)
|
||||||
|
assert.Equal(t, 13.0, g0.Power)
|
||||||
|
assert.Equal(t, utils.BytesToMegabytes(349372416), g0.MemoryUsed)
|
||||||
|
assert.Equal(t, utils.BytesToMegabytes(4294967296), g0.MemoryTotal)
|
||||||
|
assert.Equal(t, 1.0, g0.Count)
|
||||||
|
|
||||||
|
g1, ok := gm.GpuDataMap["n1"]
|
||||||
|
require.True(t, ok)
|
||||||
|
assert.Equal(t, "AMD Radeon 680M", g1.Name)
|
||||||
|
assert.Equal(t, 48.0, g1.Temperature)
|
||||||
|
assert.Equal(t, 12.0, g1.Usage)
|
||||||
|
assert.Equal(t, 9.0, g1.Power)
|
||||||
|
assert.Equal(t, utils.BytesToMegabytes(1213784064), g1.MemoryUsed)
|
||||||
|
assert.Equal(t, utils.BytesToMegabytes(16929173504), g1.MemoryTotal)
|
||||||
|
assert.Equal(t, 1.0, g1.Count)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUpdateNvtopSnapshotsKeepsDeviceAssociationWhenOrderChanges(t *testing.T) {
|
||||||
|
strPtr := func(s string) *string { return &s }
|
||||||
|
|
||||||
|
gm := &GPUManager{
|
||||||
|
GpuDataMap: make(map[string]*system.GPUData),
|
||||||
|
}
|
||||||
|
|
||||||
|
firstBatch := []nvtopSnapshot{
|
||||||
|
{
|
||||||
|
DeviceName: "NVIDIA GeForce RTX 3050 Ti Laptop GPU",
|
||||||
|
GpuUtil: strPtr("20%"),
|
||||||
|
PowerDraw: strPtr("10W"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
DeviceName: "AMD Radeon 680M",
|
||||||
|
GpuUtil: strPtr("30%"),
|
||||||
|
PowerDraw: strPtr("20W"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
secondBatchSwapped := []nvtopSnapshot{
|
||||||
|
{
|
||||||
|
DeviceName: "AMD Radeon 680M",
|
||||||
|
GpuUtil: strPtr("40%"),
|
||||||
|
PowerDraw: strPtr("25W"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
DeviceName: "NVIDIA GeForce RTX 3050 Ti Laptop GPU",
|
||||||
|
GpuUtil: strPtr("50%"),
|
||||||
|
PowerDraw: strPtr("15W"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
require.True(t, gm.updateNvtopSnapshots(firstBatch))
|
||||||
|
require.True(t, gm.updateNvtopSnapshots(secondBatchSwapped))
|
||||||
|
|
||||||
|
nvidia := gm.GpuDataMap["n0"]
|
||||||
|
require.NotNil(t, nvidia)
|
||||||
|
assert.Equal(t, "NVIDIA GeForce RTX 3050 Ti Laptop GPU", nvidia.Name)
|
||||||
|
assert.Equal(t, 70.0, nvidia.Usage)
|
||||||
|
assert.Equal(t, 25.0, nvidia.Power)
|
||||||
|
assert.Equal(t, 2.0, nvidia.Count)
|
||||||
|
|
||||||
|
amd := gm.GpuDataMap["n1"]
|
||||||
|
require.NotNil(t, amd)
|
||||||
|
assert.Equal(t, "AMD Radeon 680M", amd.Name)
|
||||||
|
assert.Equal(t, 70.0, amd.Usage)
|
||||||
|
assert.Equal(t, 45.0, amd.Power)
|
||||||
|
assert.Equal(t, 2.0, amd.Count)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseCollectorPriority(t *testing.T) {
|
||||||
|
got := parseCollectorPriority(" nvml, nvidia-smi, intel_gpu_top, amd_sysfs, nvtop, rocm-smi, bad ")
|
||||||
|
want := []collectorSource{
|
||||||
|
collectorSourceNVML,
|
||||||
|
collectorSourceNvidiaSMI,
|
||||||
|
collectorSourceIntelGpuTop,
|
||||||
|
collectorSourceAmdSysfs,
|
||||||
|
collectorSourceNVTop,
|
||||||
|
collectorSourceRocmSMI,
|
||||||
|
}
|
||||||
|
assert.Equal(t, want, got)
|
||||||
|
}
|
||||||
|
|
||||||
func TestParseJetsonData(t *testing.T) {
|
func TestParseJetsonData(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
@@ -307,6 +401,19 @@ func TestParseJetsonData(t *testing.T) {
|
|||||||
Count: 1,
|
Count: 1,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "orin-style output with GPU@ temp and VDD_SYS_GPU power",
|
||||||
|
input: "RAM 3276/7859MB (lfb 5x4MB) SWAP 1626/12122MB (cached 181MB) CPU [44%@1421,49%@2031,67%@2034,17%@1420,25%@1419,8%@1420] EMC_FREQ 1%@1866 GR3D_FREQ 0%@114 APE 150 MTS fg 1% bg 1% PLL@42.5C MCPU@42.5C PMIC@50C Tboard@38C GPU@39.5C BCPU@42.5C thermal@41.3C Tdiode@39.25C VDD_SYS_GPU 182/182 VDD_SYS_SOC 730/730 VDD_4V0_WIFI 0/0 VDD_IN 5297/5297 VDD_SYS_CPU 1917/1917 VDD_SYS_DDR 1241/1241",
|
||||||
|
wantMetrics: &system.GPUData{
|
||||||
|
Name: "GPU",
|
||||||
|
MemoryUsed: 3276.0,
|
||||||
|
MemoryTotal: 7859.0,
|
||||||
|
Usage: 0.0,
|
||||||
|
Power: 0.182, // 182mW -> 0.182W
|
||||||
|
Temperature: 39.5,
|
||||||
|
Count: 1,
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
@@ -825,7 +932,7 @@ func TestInitializeSnapshots(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestCalculateGPUAverage(t *testing.T) {
|
func TestCalculateGPUAverage(t *testing.T) {
|
||||||
t.Run("returns zero value when deltaCount is zero", func(t *testing.T) {
|
t.Run("returns cached average when deltaCount is zero", func(t *testing.T) {
|
||||||
gm := &GPUManager{
|
gm := &GPUManager{
|
||||||
lastSnapshots: map[uint16]map[string]*gpuSnapshot{
|
lastSnapshots: map[uint16]map[string]*gpuSnapshot{
|
||||||
5000: {
|
5000: {
|
||||||
@@ -841,6 +948,7 @@ func TestCalculateGPUAverage(t *testing.T) {
|
|||||||
Count: 10.0, // Same as snapshot, so delta = 0
|
Count: 10.0, // Same as snapshot, so delta = 0
|
||||||
Usage: 100.0,
|
Usage: 100.0,
|
||||||
Power: 200.0,
|
Power: 200.0,
|
||||||
|
Temperature: 50.0, // Non-zero to avoid "suspended" check
|
||||||
}
|
}
|
||||||
|
|
||||||
result := gm.calculateGPUAverage("0", gpu, 5000)
|
result := gm.calculateGPUAverage("0", gpu, 5000)
|
||||||
@@ -849,6 +957,31 @@ func TestCalculateGPUAverage(t *testing.T) {
|
|||||||
assert.Equal(t, 100.0, result.Power, "Should return cached average")
|
assert.Equal(t, 100.0, result.Power, "Should return cached average")
|
||||||
})
|
})
|
||||||
|
|
||||||
|
t.Run("returns zero value when GPU is suspended", func(t *testing.T) {
|
||||||
|
gm := &GPUManager{
|
||||||
|
lastSnapshots: map[uint16]map[string]*gpuSnapshot{
|
||||||
|
5000: {
|
||||||
|
"0": {count: 10, usage: 100, power: 200},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
lastAvgData: map[string]system.GPUData{
|
||||||
|
"0": {Usage: 50.0, Power: 100.0},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
gpu := &system.GPUData{
|
||||||
|
Name: "Test GPU",
|
||||||
|
Count: 10.0,
|
||||||
|
Temperature: 0,
|
||||||
|
MemoryUsed: 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
result := gm.calculateGPUAverage("0", gpu, 5000)
|
||||||
|
|
||||||
|
assert.Equal(t, 0.0, result.Usage, "Should return zero usage")
|
||||||
|
assert.Equal(t, 0.0, result.Power, "Should return zero power")
|
||||||
|
})
|
||||||
|
|
||||||
t.Run("calculates average for standard GPU", func(t *testing.T) {
|
t.Run("calculates average for standard GPU", func(t *testing.T) {
|
||||||
gm := &GPUManager{
|
gm := &GPUManager{
|
||||||
lastSnapshots: map[uint16]map[string]*gpuSnapshot{
|
lastSnapshots: map[uint16]map[string]*gpuSnapshot{
|
||||||
@@ -948,36 +1081,33 @@ func TestCalculateGPUAverage(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDetectGPUs(t *testing.T) {
|
func TestGPUCapabilitiesAndLegacyPriority(t *testing.T) {
|
||||||
// Save original PATH
|
// Save original PATH
|
||||||
origPath := os.Getenv("PATH")
|
hasAmdSysfs := (&GPUManager{}).hasAmdSysfs()
|
||||||
defer os.Setenv("PATH", origPath)
|
|
||||||
|
|
||||||
// Set up temp dir with the commands
|
|
||||||
tempDir := t.TempDir()
|
|
||||||
os.Setenv("PATH", tempDir)
|
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
setupCommands func() error
|
setupCommands func(string) error
|
||||||
wantNvidiaSmi bool
|
wantNvidiaSmi bool
|
||||||
wantRocmSmi bool
|
wantRocmSmi bool
|
||||||
wantTegrastats bool
|
wantTegrastats bool
|
||||||
|
wantNvtop bool
|
||||||
wantErr bool
|
wantErr bool
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "nvidia-smi not available",
|
name: "nvidia-smi not available",
|
||||||
setupCommands: func() error {
|
setupCommands: func(_ string) error {
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
wantNvidiaSmi: false,
|
wantNvidiaSmi: false,
|
||||||
wantRocmSmi: false,
|
wantRocmSmi: false,
|
||||||
wantTegrastats: false,
|
wantTegrastats: false,
|
||||||
|
wantNvtop: false,
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "nvidia-smi available",
|
name: "nvidia-smi available",
|
||||||
setupCommands: func() error {
|
setupCommands: func(tempDir string) error {
|
||||||
path := filepath.Join(tempDir, "nvidia-smi")
|
path := filepath.Join(tempDir, "nvidia-smi")
|
||||||
script := `#!/bin/sh
|
script := `#!/bin/sh
|
||||||
echo "test"`
|
echo "test"`
|
||||||
@@ -989,29 +1119,14 @@ echo "test"`
|
|||||||
wantNvidiaSmi: true,
|
wantNvidiaSmi: true,
|
||||||
wantTegrastats: false,
|
wantTegrastats: false,
|
||||||
wantRocmSmi: false,
|
wantRocmSmi: false,
|
||||||
|
wantNvtop: false,
|
||||||
wantErr: false,
|
wantErr: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "rocm-smi available",
|
name: "rocm-smi available",
|
||||||
setupCommands: func() error {
|
setupCommands: func(tempDir string) error {
|
||||||
path := filepath.Join(tempDir, "rocm-smi")
|
path := filepath.Join(tempDir, "rocm-smi")
|
||||||
script := `#!/bin/sh
|
script := `#!/bin/sh
|
||||||
echo "test"`
|
|
||||||
if err := os.WriteFile(path, []byte(script), 0755); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
wantNvidiaSmi: true,
|
|
||||||
wantRocmSmi: true,
|
|
||||||
wantTegrastats: false,
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "tegrastats available",
|
|
||||||
setupCommands: func() error {
|
|
||||||
path := filepath.Join(tempDir, "tegrastats")
|
|
||||||
script := `#!/bin/sh
|
|
||||||
echo "test"`
|
echo "test"`
|
||||||
if err := os.WriteFile(path, []byte(script), 0755); err != nil {
|
if err := os.WriteFile(path, []byte(script), 0755); err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -1020,13 +1135,48 @@ echo "test"`
|
|||||||
},
|
},
|
||||||
wantNvidiaSmi: false,
|
wantNvidiaSmi: false,
|
||||||
wantRocmSmi: true,
|
wantRocmSmi: true,
|
||||||
|
wantTegrastats: false,
|
||||||
|
wantNvtop: false,
|
||||||
|
wantErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "tegrastats available",
|
||||||
|
setupCommands: func(tempDir string) error {
|
||||||
|
path := filepath.Join(tempDir, "tegrastats")
|
||||||
|
script := `#!/bin/sh
|
||||||
|
echo "test"`
|
||||||
|
if err := os.WriteFile(path, []byte(script), 0755); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
wantNvidiaSmi: false,
|
||||||
|
wantRocmSmi: false,
|
||||||
wantTegrastats: true,
|
wantTegrastats: true,
|
||||||
|
wantNvtop: false,
|
||||||
|
wantErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "nvtop available",
|
||||||
|
setupCommands: func(tempDir string) error {
|
||||||
|
path := filepath.Join(tempDir, "nvtop")
|
||||||
|
script := `#!/bin/sh
|
||||||
|
echo "[]"`
|
||||||
|
if err := os.WriteFile(path, []byte(script), 0755); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
wantNvidiaSmi: false,
|
||||||
|
wantRocmSmi: false,
|
||||||
|
wantTegrastats: false,
|
||||||
|
wantNvtop: true,
|
||||||
wantErr: false,
|
wantErr: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "no gpu tools available",
|
name: "no gpu tools available",
|
||||||
setupCommands: func() error {
|
setupCommands: func(_ string) error {
|
||||||
os.Setenv("PATH", "")
|
t.Setenv("PATH", "")
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
@@ -1035,36 +1185,56 @@ echo "test"`
|
|||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
if err := tt.setupCommands(); err != nil {
|
tempDir := t.TempDir()
|
||||||
|
t.Setenv("PATH", tempDir)
|
||||||
|
if err := tt.setupCommands(tempDir); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
gm := &GPUManager{}
|
gm := &GPUManager{}
|
||||||
err := gm.detectGPUs()
|
caps := gm.discoverGpuCapabilities()
|
||||||
|
var err error
|
||||||
|
if !hasAnyGpuCollector(caps) {
|
||||||
|
err = fmt.Errorf(noGPUFoundMsg)
|
||||||
|
}
|
||||||
|
priorities := gm.resolveLegacyCollectorPriority(caps)
|
||||||
|
hasPriority := func(source collectorSource) bool {
|
||||||
|
for _, s := range priorities {
|
||||||
|
if s == source {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
gotNvidiaSmi := hasPriority(collectorSourceNvidiaSMI)
|
||||||
|
gotRocmSmi := hasPriority(collectorSourceRocmSMI)
|
||||||
|
gotTegrastats := caps.hasTegrastats
|
||||||
|
gotNvtop := caps.hasNvtop
|
||||||
|
|
||||||
t.Logf("nvidiaSmi: %v, rocmSmi: %v, tegrastats: %v", gm.nvidiaSmi, gm.rocmSmi, gm.tegrastats)
|
t.Logf("nvidiaSmi: %v, rocmSmi: %v, tegrastats: %v", gotNvidiaSmi, gotRocmSmi, gotTegrastats)
|
||||||
|
|
||||||
if tt.wantErr {
|
wantErr := tt.wantErr
|
||||||
|
if hasAmdSysfs && (tt.name == "nvidia-smi not available" || tt.name == "no gpu tools available") {
|
||||||
|
wantErr = false
|
||||||
|
}
|
||||||
|
if wantErr {
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, tt.wantNvidiaSmi, gm.nvidiaSmi)
|
assert.Equal(t, tt.wantNvidiaSmi, gotNvidiaSmi)
|
||||||
assert.Equal(t, tt.wantRocmSmi, gm.rocmSmi)
|
assert.Equal(t, tt.wantRocmSmi, gotRocmSmi)
|
||||||
assert.Equal(t, tt.wantTegrastats, gm.tegrastats)
|
assert.Equal(t, tt.wantTegrastats, gotTegrastats)
|
||||||
|
assert.Equal(t, tt.wantNvtop, gotNvtop)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStartCollector(t *testing.T) {
|
func TestCollectorStartHelpers(t *testing.T) {
|
||||||
// Save original PATH
|
|
||||||
origPath := os.Getenv("PATH")
|
|
||||||
defer os.Setenv("PATH", origPath)
|
|
||||||
|
|
||||||
// Set up temp dir with the commands
|
// Set up temp dir with the commands
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
os.Setenv("PATH", dir)
|
t.Setenv("PATH", dir)
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
@@ -1142,6 +1312,27 @@ echo "11-14-2024 22:54:33 RAM 1024/4096MB GR3D_FREQ 80% tj@70C VDD_GPU_SOC 1000m
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "nvtop collector",
|
||||||
|
command: "nvtop",
|
||||||
|
setup: func(t *testing.T) error {
|
||||||
|
path := filepath.Join(dir, "nvtop")
|
||||||
|
script := `#!/bin/sh
|
||||||
|
echo '[{"device_name":"NVIDIA Test GPU","temp":"52C","power_draw":"31W","gpu_util":"37%","mem_total":"4294967296","mem_used":"536870912","processes":[]}]'`
|
||||||
|
if err := os.WriteFile(path, []byte(script), 0755); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
validate: func(t *testing.T, gm *GPUManager) {
|
||||||
|
gpu, exists := gm.GpuDataMap["n0"]
|
||||||
|
assert.True(t, exists)
|
||||||
|
if exists {
|
||||||
|
assert.Equal(t, "NVIDIA Test GPU", gpu.Name)
|
||||||
|
assert.Equal(t, 52.0, gpu.Temperature)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
@@ -1154,13 +1345,161 @@ echo "11-14-2024 22:54:33 RAM 1024/4096MB GR3D_FREQ 80% tj@70C VDD_GPU_SOC 1000m
|
|||||||
GpuDataMap: make(map[string]*system.GPUData),
|
GpuDataMap: make(map[string]*system.GPUData),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
tt.gm.startCollector(tt.command)
|
switch tt.command {
|
||||||
|
case nvidiaSmiCmd:
|
||||||
|
tt.gm.startNvidiaSmiCollector("4")
|
||||||
|
case rocmSmiCmd:
|
||||||
|
tt.gm.startRocmSmiCollector(4300 * time.Millisecond)
|
||||||
|
case tegraStatsCmd:
|
||||||
|
tt.gm.startTegraStatsCollector("3700")
|
||||||
|
case nvtopCmd:
|
||||||
|
tt.gm.startNvtopCollector("30", nil)
|
||||||
|
default:
|
||||||
|
t.Fatalf("unknown test command %q", tt.command)
|
||||||
|
}
|
||||||
time.Sleep(50 * time.Millisecond) // Give collector time to run
|
time.Sleep(50 * time.Millisecond) // Give collector time to run
|
||||||
tt.validate(t, tt.gm)
|
tt.validate(t, tt.gm)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestNewGPUManagerPriorityNvtopFallback(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
t.Setenv("PATH", dir)
|
||||||
|
t.Setenv("BESZEL_AGENT_GPU_COLLECTOR", "nvtop,nvidia-smi")
|
||||||
|
|
||||||
|
nvtopPath := filepath.Join(dir, "nvtop")
|
||||||
|
nvtopScript := `#!/bin/sh
|
||||||
|
echo 'not-json'`
|
||||||
|
require.NoError(t, os.WriteFile(nvtopPath, []byte(nvtopScript), 0755))
|
||||||
|
|
||||||
|
nvidiaPath := filepath.Join(dir, "nvidia-smi")
|
||||||
|
nvidiaScript := `#!/bin/sh
|
||||||
|
echo "0, NVIDIA Priority GPU, 45, 512, 2048, 12, 25"`
|
||||||
|
require.NoError(t, os.WriteFile(nvidiaPath, []byte(nvidiaScript), 0755))
|
||||||
|
|
||||||
|
gm, err := NewGPUManager()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, gm)
|
||||||
|
|
||||||
|
time.Sleep(150 * time.Millisecond)
|
||||||
|
gpu, ok := gm.GpuDataMap["0"]
|
||||||
|
require.True(t, ok)
|
||||||
|
assert.Equal(t, "Priority GPU", gpu.Name)
|
||||||
|
assert.Equal(t, 45.0, gpu.Temperature)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewGPUManagerPriorityMixedCollectors(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
t.Setenv("PATH", dir)
|
||||||
|
t.Setenv("BESZEL_AGENT_GPU_COLLECTOR", "intel_gpu_top,rocm-smi")
|
||||||
|
|
||||||
|
intelPath := filepath.Join(dir, "intel_gpu_top")
|
||||||
|
intelScript := `#!/bin/sh
|
||||||
|
echo "Freq MHz IRQ RC6 Power W IMC MiB/s RCS VCS"
|
||||||
|
echo " req act /s % gpu pkg rd wr % se wa % se wa"
|
||||||
|
echo "226 223 338 58 2.00 2.69 1820 965 0.00 0 0 0.00 0 0"
|
||||||
|
echo "189 187 412 67 1.80 2.45 1950 823 8.50 2 1 15.00 1 0"
|
||||||
|
`
|
||||||
|
require.NoError(t, os.WriteFile(intelPath, []byte(intelScript), 0755))
|
||||||
|
|
||||||
|
rocmPath := filepath.Join(dir, "rocm-smi")
|
||||||
|
rocmScript := `#!/bin/sh
|
||||||
|
echo '{"card0": {"Temperature (Sensor edge) (C)": "49.0", "Current Socket Graphics Package Power (W)": "28.159", "GPU use (%)": "0", "VRAM Total Memory (B)": "536870912", "VRAM Total Used Memory (B)": "445550592", "Card Series": "Rembrandt [Radeon 680M]", "GUID": "34756"}}'
|
||||||
|
`
|
||||||
|
require.NoError(t, os.WriteFile(rocmPath, []byte(rocmScript), 0755))
|
||||||
|
|
||||||
|
gm, err := NewGPUManager()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, gm)
|
||||||
|
|
||||||
|
time.Sleep(150 * time.Millisecond)
|
||||||
|
_, intelOk := gm.GpuDataMap["i0"]
|
||||||
|
_, amdOk := gm.GpuDataMap["34756"]
|
||||||
|
assert.True(t, intelOk)
|
||||||
|
assert.True(t, amdOk)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewGPUManagerPriorityNvmlFallbackToNvidiaSmi(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
t.Setenv("PATH", dir)
|
||||||
|
t.Setenv("BESZEL_AGENT_GPU_COLLECTOR", "nvml,nvidia-smi")
|
||||||
|
|
||||||
|
nvidiaPath := filepath.Join(dir, "nvidia-smi")
|
||||||
|
nvidiaScript := `#!/bin/sh
|
||||||
|
echo "0, NVIDIA Fallback GPU, 41, 256, 1024, 8, 14"`
|
||||||
|
require.NoError(t, os.WriteFile(nvidiaPath, []byte(nvidiaScript), 0755))
|
||||||
|
|
||||||
|
gm, err := NewGPUManager()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, gm)
|
||||||
|
|
||||||
|
time.Sleep(150 * time.Millisecond)
|
||||||
|
gpu, ok := gm.GpuDataMap["0"]
|
||||||
|
require.True(t, ok)
|
||||||
|
assert.Equal(t, "Fallback GPU", gpu.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewGPUManagerConfiguredCollectorsMustStart(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
t.Setenv("PATH", dir)
|
||||||
|
|
||||||
|
t.Run("configured valid collector unavailable", func(t *testing.T) {
|
||||||
|
t.Setenv("BESZEL_AGENT_GPU_COLLECTOR", "nvidia-smi")
|
||||||
|
gm, err := NewGPUManager()
|
||||||
|
require.Nil(t, gm)
|
||||||
|
require.Error(t, err)
|
||||||
|
assert.Contains(t, err.Error(), "no configured GPU collectors are available")
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("configured collector list has only unknown entries", func(t *testing.T) {
|
||||||
|
t.Setenv("BESZEL_AGENT_GPU_COLLECTOR", "bad,unknown")
|
||||||
|
gm, err := NewGPUManager()
|
||||||
|
require.Nil(t, gm)
|
||||||
|
require.Error(t, err)
|
||||||
|
assert.Contains(t, err.Error(), "no configured GPU collectors are available")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCollectorDefinitionsNvmlDoesNotRequireNvidiaSmi(t *testing.T) {
|
||||||
|
gm := &GPUManager{}
|
||||||
|
definitions := gm.collectorDefinitions(gpuCapabilities{})
|
||||||
|
require.Contains(t, definitions, collectorSourceNVML)
|
||||||
|
assert.True(t, definitions[collectorSourceNVML].available)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewGPUManagerConfiguredNvmlBypassesCapabilityGate(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
t.Setenv("PATH", dir)
|
||||||
|
t.Setenv("BESZEL_AGENT_GPU_COLLECTOR", "nvml")
|
||||||
|
|
||||||
|
gm, err := NewGPUManager()
|
||||||
|
require.Nil(t, gm)
|
||||||
|
require.Error(t, err)
|
||||||
|
assert.Contains(t, err.Error(), "no configured GPU collectors are available")
|
||||||
|
assert.NotContains(t, err.Error(), noGPUFoundMsg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewGPUManagerJetsonIgnoresCollectorConfig(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
t.Setenv("PATH", dir)
|
||||||
|
t.Setenv("BESZEL_AGENT_GPU_COLLECTOR", "nvidia-smi")
|
||||||
|
|
||||||
|
tegraPath := filepath.Join(dir, "tegrastats")
|
||||||
|
tegraScript := `#!/bin/sh
|
||||||
|
echo "11-14-2024 22:54:33 RAM 1024/4096MB GR3D_FREQ 80% tj@70C VDD_GPU_SOC 1000mW"`
|
||||||
|
require.NoError(t, os.WriteFile(tegraPath, []byte(tegraScript), 0755))
|
||||||
|
|
||||||
|
gm, err := NewGPUManager()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NotNil(t, gm)
|
||||||
|
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
gpu, ok := gm.GpuDataMap["0"]
|
||||||
|
require.True(t, ok)
|
||||||
|
assert.Equal(t, "GPU", gpu.Name)
|
||||||
|
}
|
||||||
|
|
||||||
// TestAccumulationTableDriven tests the accumulation behavior for all three GPU types
|
// TestAccumulationTableDriven tests the accumulation behavior for all three GPU types
|
||||||
func TestAccumulation(t *testing.T) {
|
func TestAccumulation(t *testing.T) {
|
||||||
type expectedGPUValues struct {
|
type expectedGPUValues struct {
|
||||||
@@ -1346,7 +1685,7 @@ func TestIntelUpdateFromStats(t *testing.T) {
|
|||||||
ok := gm.updateIntelFromStats(&sample1)
|
ok := gm.updateIntelFromStats(&sample1)
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
|
|
||||||
gpu := gm.GpuDataMap["0"]
|
gpu := gm.GpuDataMap["i0"]
|
||||||
require.NotNil(t, gpu)
|
require.NotNil(t, gpu)
|
||||||
assert.Equal(t, "GPU", gpu.Name)
|
assert.Equal(t, "GPU", gpu.Name)
|
||||||
assert.EqualValues(t, 10.5, gpu.Power)
|
assert.EqualValues(t, 10.5, gpu.Power)
|
||||||
@@ -1368,7 +1707,7 @@ func TestIntelUpdateFromStats(t *testing.T) {
|
|||||||
ok = gm.updateIntelFromStats(&sample2)
|
ok = gm.updateIntelFromStats(&sample2)
|
||||||
assert.True(t, ok)
|
assert.True(t, ok)
|
||||||
|
|
||||||
gpu = gm.GpuDataMap["0"]
|
gpu = gm.GpuDataMap["i0"]
|
||||||
require.NotNil(t, gpu)
|
require.NotNil(t, gpu)
|
||||||
assert.EqualValues(t, 10.5, gpu.Power)
|
assert.EqualValues(t, 10.5, gpu.Power)
|
||||||
assert.EqualValues(t, 30.0, gpu.Engines["Render/3D"]) // 20 + 10
|
assert.EqualValues(t, 30.0, gpu.Engines["Render/3D"]) // 20 + 10
|
||||||
@@ -1378,12 +1717,8 @@ func TestIntelUpdateFromStats(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestIntelCollectorStreaming(t *testing.T) {
|
func TestIntelCollectorStreaming(t *testing.T) {
|
||||||
// Save and override PATH
|
|
||||||
origPath := os.Getenv("PATH")
|
|
||||||
defer os.Setenv("PATH", origPath)
|
|
||||||
|
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
os.Setenv("PATH", dir)
|
t.Setenv("PATH", dir)
|
||||||
|
|
||||||
// Create a fake intel_gpu_top that prints -l format with four samples (first will be skipped) and exits
|
// Create a fake intel_gpu_top that prints -l format with four samples (first will be skipped) and exits
|
||||||
scriptPath := filepath.Join(dir, "intel_gpu_top")
|
scriptPath := filepath.Join(dir, "intel_gpu_top")
|
||||||
@@ -1407,7 +1742,7 @@ echo "298 295 278 51 2.20 3.12 1675 942 5.75 1 2 9.50
|
|||||||
t.Fatalf("collectIntelStats error: %v", err)
|
t.Fatalf("collectIntelStats error: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
gpu := gm.GpuDataMap["0"]
|
gpu := gm.GpuDataMap["i0"]
|
||||||
require.NotNil(t, gpu)
|
require.NotNil(t, gpu)
|
||||||
// Power should be sum of samples 2-4 (first is skipped): 2.0 + 1.8 + 2.2 = 6.0
|
// Power should be sum of samples 2-4 (first is skipped): 2.0 + 1.8 + 2.2 = 6.0
|
||||||
assert.EqualValues(t, 6.0, gpu.Power)
|
assert.EqualValues(t, 6.0, gpu.Power)
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ import (
|
|||||||
"github.com/henrygd/beszel/internal/common"
|
"github.com/henrygd/beszel/internal/common"
|
||||||
"github.com/henrygd/beszel/internal/entities/smart"
|
"github.com/henrygd/beszel/internal/entities/smart"
|
||||||
|
|
||||||
"golang.org/x/exp/slog"
|
"log/slog"
|
||||||
)
|
)
|
||||||
|
|
||||||
// HandlerContext provides context for request handlers
|
// HandlerContext provides context for request handlers
|
||||||
@@ -94,7 +94,7 @@ func (h *GetDataHandler) Handle(hctx *HandlerContext) error {
|
|||||||
var options common.DataRequestOptions
|
var options common.DataRequestOptions
|
||||||
_ = cbor.Unmarshal(hctx.Request.Data, &options)
|
_ = cbor.Unmarshal(hctx.Request.Data, &options)
|
||||||
|
|
||||||
sysStats := hctx.Agent.gatherStats(options.CacheTimeMs)
|
sysStats := hctx.Agent.gatherStats(options)
|
||||||
return hctx.SendResponse(sysStats, hctx.RequestID)
|
return hctx.SendResponse(sysStats, hctx.RequestID)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build testing
|
//go:build testing
|
||||||
// +build testing
|
|
||||||
|
|
||||||
package agent
|
package agent
|
||||||
|
|
||||||
|
|||||||
@@ -9,11 +9,31 @@ import (
|
|||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// healthFile is the path to the health file
|
// healthFile is the path to the health file
|
||||||
var healthFile = filepath.Join(os.TempDir(), "beszel_health")
|
var healthFile = getHealthFilePath()
|
||||||
|
|
||||||
|
func getHealthFilePath() string {
|
||||||
|
filename := "beszel_health"
|
||||||
|
if runtime.GOOS == "linux" {
|
||||||
|
fullPath := filepath.Join("/dev/shm", filename)
|
||||||
|
if err := updateHealthFile(fullPath); err == nil {
|
||||||
|
return fullPath
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return filepath.Join(os.TempDir(), filename)
|
||||||
|
}
|
||||||
|
|
||||||
|
func updateHealthFile(path string) error {
|
||||||
|
file, err := os.Create(path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return file.Close()
|
||||||
|
}
|
||||||
|
|
||||||
// Check checks if the agent is connected by checking the modification time of the health file
|
// Check checks if the agent is connected by checking the modification time of the health file
|
||||||
func Check() error {
|
func Check() error {
|
||||||
@@ -30,11 +50,7 @@ func Check() error {
|
|||||||
|
|
||||||
// Update updates the modification time of the health file
|
// Update updates the modification time of the health file
|
||||||
func Update() error {
|
func Update() error {
|
||||||
file, err := os.Create(healthFile)
|
return updateHealthFile(healthFile)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return file.Close()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CleanUp removes the health file
|
// CleanUp removes the health file
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build testing
|
//go:build testing
|
||||||
// +build testing
|
|
||||||
|
|
||||||
package health
|
package health
|
||||||
|
|
||||||
@@ -37,7 +36,6 @@ func TestHealth(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
// This test uses synctest to simulate time passing.
|
// This test uses synctest to simulate time passing.
|
||||||
// NOTE: This test requires GOEXPERIMENT=synctest to run.
|
|
||||||
t.Run("check with simulated time", func(t *testing.T) {
|
t.Run("check with simulated time", func(t *testing.T) {
|
||||||
synctest.Test(t, func(t *testing.T) {
|
synctest.Test(t, func(t *testing.T) {
|
||||||
// Update the file to set the initial timestamp.
|
// Update the file to set the initial timestamp.
|
||||||
|
|||||||
@@ -52,7 +52,12 @@ class Program
|
|||||||
foreach (var sensor in hardware.Sensors)
|
foreach (var sensor in hardware.Sensors)
|
||||||
{
|
{
|
||||||
var validTemp = sensor.SensorType == SensorType.Temperature && sensor.Value.HasValue;
|
var validTemp = sensor.SensorType == SensorType.Temperature && sensor.Value.HasValue;
|
||||||
if (!validTemp || sensor.Name.Contains("Distance"))
|
if (!validTemp ||
|
||||||
|
sensor.Name.IndexOf("Distance", StringComparison.OrdinalIgnoreCase) >= 0 ||
|
||||||
|
sensor.Name.IndexOf("Limit", StringComparison.OrdinalIgnoreCase) >= 0 ||
|
||||||
|
sensor.Name.IndexOf("Critical", StringComparison.OrdinalIgnoreCase) >= 0 ||
|
||||||
|
sensor.Name.IndexOf("Warning", StringComparison.OrdinalIgnoreCase) >= 0 ||
|
||||||
|
sensor.Name.IndexOf("Resolution", StringComparison.OrdinalIgnoreCase) >= 0)
|
||||||
{
|
{
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,9 +3,11 @@
|
|||||||
<OutputType>Exe</OutputType>
|
<OutputType>Exe</OutputType>
|
||||||
<TargetFramework>net48</TargetFramework>
|
<TargetFramework>net48</TargetFramework>
|
||||||
<Platforms>x64</Platforms>
|
<Platforms>x64</Platforms>
|
||||||
|
<RuntimeIdentifier>win-x64</RuntimeIdentifier>
|
||||||
|
<AppendRuntimeIdentifierToOutputPath>false</AppendRuntimeIdentifierToOutputPath>
|
||||||
</PropertyGroup>
|
</PropertyGroup>
|
||||||
|
|
||||||
<ItemGroup>
|
<ItemGroup>
|
||||||
<PackageReference Include="LibreHardwareMonitorLib" Version="0.9.4" />
|
<PackageReference Include="LibreHardwareMonitorLib" Version="0.9.6" />
|
||||||
</ItemGroup>
|
</ItemGroup>
|
||||||
</Project>
|
</Project>
|
||||||
|
|||||||
233
agent/mdraid_linux.go
Normal file
233
agent/mdraid_linux.go
Normal file
@@ -0,0 +1,233 @@
|
|||||||
|
//go:build linux
|
||||||
|
|
||||||
|
package agent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
|
"github.com/henrygd/beszel/internal/entities/smart"
|
||||||
|
)
|
||||||
|
|
||||||
|
// mdraidSysfsRoot is a test hook; production value is "/sys".
|
||||||
|
var mdraidSysfsRoot = "/sys"
|
||||||
|
|
||||||
|
type mdraidHealth struct {
|
||||||
|
level string
|
||||||
|
arrayState string
|
||||||
|
degraded uint64
|
||||||
|
raidDisks uint64
|
||||||
|
syncAction string
|
||||||
|
syncCompleted string
|
||||||
|
syncSpeed string
|
||||||
|
mismatchCnt uint64
|
||||||
|
capacity uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanMdraidDevices discovers Linux md arrays exposed in sysfs.
|
||||||
|
func scanMdraidDevices() []*DeviceInfo {
|
||||||
|
blockDir := filepath.Join(mdraidSysfsRoot, "block")
|
||||||
|
entries, err := os.ReadDir(blockDir)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
devices := make([]*DeviceInfo, 0, 2)
|
||||||
|
for _, ent := range entries {
|
||||||
|
name := ent.Name()
|
||||||
|
if !isMdraidBlockName(name) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
mdDir := filepath.Join(blockDir, name, "md")
|
||||||
|
if !utils.FileExists(filepath.Join(mdDir, "array_state")) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
devPath := filepath.Join("/dev", name)
|
||||||
|
devices = append(devices, &DeviceInfo{
|
||||||
|
Name: devPath,
|
||||||
|
Type: "mdraid",
|
||||||
|
InfoName: devPath + " [mdraid]",
|
||||||
|
Protocol: "MD",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return devices
|
||||||
|
}
|
||||||
|
|
||||||
|
// collectMdraidHealth reads mdraid health and stores it in SmartDataMap.
|
||||||
|
func (sm *SmartManager) collectMdraidHealth(deviceInfo *DeviceInfo) (bool, error) {
|
||||||
|
if deviceInfo == nil || deviceInfo.Name == "" {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
base := filepath.Base(deviceInfo.Name)
|
||||||
|
if !isMdraidBlockName(base) && !strings.EqualFold(deviceInfo.Type, "mdraid") {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
health, ok := readMdraidHealth(base)
|
||||||
|
if !ok {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
deviceInfo.Type = "mdraid"
|
||||||
|
key := fmt.Sprintf("mdraid:%s", base)
|
||||||
|
status := mdraidSmartStatus(health)
|
||||||
|
|
||||||
|
attrs := make([]*smart.SmartAttribute, 0, 10)
|
||||||
|
if health.arrayState != "" {
|
||||||
|
attrs = append(attrs, &smart.SmartAttribute{Name: "ArrayState", RawString: health.arrayState})
|
||||||
|
}
|
||||||
|
if health.level != "" {
|
||||||
|
attrs = append(attrs, &smart.SmartAttribute{Name: "RaidLevel", RawString: health.level})
|
||||||
|
}
|
||||||
|
if health.raidDisks > 0 {
|
||||||
|
attrs = append(attrs, &smart.SmartAttribute{Name: "RaidDisks", RawValue: health.raidDisks})
|
||||||
|
}
|
||||||
|
if health.degraded > 0 {
|
||||||
|
attrs = append(attrs, &smart.SmartAttribute{Name: "Degraded", RawValue: health.degraded})
|
||||||
|
}
|
||||||
|
if health.syncAction != "" {
|
||||||
|
attrs = append(attrs, &smart.SmartAttribute{Name: "SyncAction", RawString: health.syncAction})
|
||||||
|
}
|
||||||
|
if health.syncCompleted != "" {
|
||||||
|
attrs = append(attrs, &smart.SmartAttribute{Name: "SyncCompleted", RawString: health.syncCompleted})
|
||||||
|
}
|
||||||
|
if health.syncSpeed != "" {
|
||||||
|
attrs = append(attrs, &smart.SmartAttribute{Name: "SyncSpeed", RawString: health.syncSpeed})
|
||||||
|
}
|
||||||
|
if health.mismatchCnt > 0 {
|
||||||
|
attrs = append(attrs, &smart.SmartAttribute{Name: "MismatchCount", RawValue: health.mismatchCnt})
|
||||||
|
}
|
||||||
|
|
||||||
|
sm.Lock()
|
||||||
|
defer sm.Unlock()
|
||||||
|
|
||||||
|
if _, exists := sm.SmartDataMap[key]; !exists {
|
||||||
|
sm.SmartDataMap[key] = &smart.SmartData{}
|
||||||
|
}
|
||||||
|
|
||||||
|
data := sm.SmartDataMap[key]
|
||||||
|
data.ModelName = "Linux MD RAID"
|
||||||
|
if health.level != "" {
|
||||||
|
data.ModelName = "Linux MD RAID (" + health.level + ")"
|
||||||
|
}
|
||||||
|
data.Capacity = health.capacity
|
||||||
|
data.SmartStatus = status
|
||||||
|
data.DiskName = filepath.Join("/dev", base)
|
||||||
|
data.DiskType = "mdraid"
|
||||||
|
data.Attributes = attrs
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// readMdraidHealth reads md array health fields from sysfs.
|
||||||
|
func readMdraidHealth(blockName string) (mdraidHealth, bool) {
|
||||||
|
var out mdraidHealth
|
||||||
|
|
||||||
|
if !isMdraidBlockName(blockName) {
|
||||||
|
return out, false
|
||||||
|
}
|
||||||
|
|
||||||
|
mdDir := filepath.Join(mdraidSysfsRoot, "block", blockName, "md")
|
||||||
|
arrayState, okState := utils.ReadStringFileOK(filepath.Join(mdDir, "array_state"))
|
||||||
|
if !okState {
|
||||||
|
return out, false
|
||||||
|
}
|
||||||
|
|
||||||
|
out.arrayState = arrayState
|
||||||
|
out.level = utils.ReadStringFile(filepath.Join(mdDir, "level"))
|
||||||
|
out.syncAction = utils.ReadStringFile(filepath.Join(mdDir, "sync_action"))
|
||||||
|
out.syncCompleted = utils.ReadStringFile(filepath.Join(mdDir, "sync_completed"))
|
||||||
|
out.syncSpeed = utils.ReadStringFile(filepath.Join(mdDir, "sync_speed"))
|
||||||
|
|
||||||
|
if val, ok := utils.ReadUintFile(filepath.Join(mdDir, "raid_disks")); ok {
|
||||||
|
out.raidDisks = val
|
||||||
|
}
|
||||||
|
if val, ok := utils.ReadUintFile(filepath.Join(mdDir, "degraded")); ok {
|
||||||
|
out.degraded = val
|
||||||
|
}
|
||||||
|
if val, ok := utils.ReadUintFile(filepath.Join(mdDir, "mismatch_cnt")); ok {
|
||||||
|
out.mismatchCnt = val
|
||||||
|
}
|
||||||
|
|
||||||
|
if capBytes, ok := readMdraidBlockCapacityBytes(blockName, mdraidSysfsRoot); ok {
|
||||||
|
out.capacity = capBytes
|
||||||
|
}
|
||||||
|
|
||||||
|
return out, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// mdraidSmartStatus maps md state/sync signals to a SMART-like status.
|
||||||
|
func mdraidSmartStatus(health mdraidHealth) string {
|
||||||
|
state := strings.ToLower(strings.TrimSpace(health.arrayState))
|
||||||
|
switch state {
|
||||||
|
case "inactive", "faulty", "broken", "stopped":
|
||||||
|
return "FAILED"
|
||||||
|
}
|
||||||
|
// During rebuild/recovery, arrays are often temporarily degraded; report as
|
||||||
|
// warning instead of hard failure while synchronization is in progress.
|
||||||
|
syncAction := strings.ToLower(strings.TrimSpace(health.syncAction))
|
||||||
|
switch syncAction {
|
||||||
|
case "resync", "recover", "reshape":
|
||||||
|
return "WARNING"
|
||||||
|
}
|
||||||
|
if health.degraded > 0 {
|
||||||
|
return "FAILED"
|
||||||
|
}
|
||||||
|
switch syncAction {
|
||||||
|
case "check", "repair":
|
||||||
|
return "WARNING"
|
||||||
|
}
|
||||||
|
switch state {
|
||||||
|
case "clean", "active", "active-idle", "write-pending", "read-auto", "readonly":
|
||||||
|
return "PASSED"
|
||||||
|
}
|
||||||
|
return "UNKNOWN"
|
||||||
|
}
|
||||||
|
|
||||||
|
// isMdraidBlockName matches /dev/mdN-style block device names.
|
||||||
|
func isMdraidBlockName(name string) bool {
|
||||||
|
if !strings.HasPrefix(name, "md") {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
suffix := strings.TrimPrefix(name, "md")
|
||||||
|
if suffix == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, c := range suffix {
|
||||||
|
if c < '0' || c > '9' {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// readMdraidBlockCapacityBytes converts block size metadata into bytes.
|
||||||
|
func readMdraidBlockCapacityBytes(blockName, root string) (uint64, bool) {
|
||||||
|
sizePath := filepath.Join(root, "block", blockName, "size")
|
||||||
|
lbsPath := filepath.Join(root, "block", blockName, "queue", "logical_block_size")
|
||||||
|
|
||||||
|
sizeStr, ok := utils.ReadStringFileOK(sizePath)
|
||||||
|
if !ok {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
sectors, err := strconv.ParseUint(sizeStr, 10, 64)
|
||||||
|
if err != nil || sectors == 0 {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
logicalBlockSize := uint64(512)
|
||||||
|
if lbsStr, ok := utils.ReadStringFileOK(lbsPath); ok {
|
||||||
|
if parsed, err := strconv.ParseUint(lbsStr, 10, 64); err == nil && parsed > 0 {
|
||||||
|
logicalBlockSize = parsed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return sectors * logicalBlockSize, true
|
||||||
|
}
|
||||||
103
agent/mdraid_linux_test.go
Normal file
103
agent/mdraid_linux_test.go
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
//go:build linux
|
||||||
|
|
||||||
|
package agent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/entities/smart"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMdraidMockSysfsScanAndCollect(t *testing.T) {
|
||||||
|
tmp := t.TempDir()
|
||||||
|
prev := mdraidSysfsRoot
|
||||||
|
mdraidSysfsRoot = tmp
|
||||||
|
t.Cleanup(func() { mdraidSysfsRoot = prev })
|
||||||
|
|
||||||
|
mdDir := filepath.Join(tmp, "block", "md0", "md")
|
||||||
|
queueDir := filepath.Join(tmp, "block", "md0", "queue")
|
||||||
|
if err := os.MkdirAll(mdDir, 0o755); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := os.MkdirAll(queueDir, 0o755); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
write := func(path, content string) {
|
||||||
|
t.Helper()
|
||||||
|
if err := os.WriteFile(path, []byte(content), 0o644); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
write(filepath.Join(mdDir, "array_state"), "active\n")
|
||||||
|
write(filepath.Join(mdDir, "level"), "raid1\n")
|
||||||
|
write(filepath.Join(mdDir, "raid_disks"), "2\n")
|
||||||
|
write(filepath.Join(mdDir, "degraded"), "0\n")
|
||||||
|
write(filepath.Join(mdDir, "sync_action"), "resync\n")
|
||||||
|
write(filepath.Join(mdDir, "sync_completed"), "10%\n")
|
||||||
|
write(filepath.Join(mdDir, "sync_speed"), "100M\n")
|
||||||
|
write(filepath.Join(mdDir, "mismatch_cnt"), "0\n")
|
||||||
|
write(filepath.Join(queueDir, "logical_block_size"), "512\n")
|
||||||
|
write(filepath.Join(tmp, "block", "md0", "size"), "2048\n")
|
||||||
|
|
||||||
|
devs := scanMdraidDevices()
|
||||||
|
if len(devs) != 1 {
|
||||||
|
t.Fatalf("scanMdraidDevices() = %d devices, want 1", len(devs))
|
||||||
|
}
|
||||||
|
if devs[0].Name != "/dev/md0" || devs[0].Type != "mdraid" {
|
||||||
|
t.Fatalf("scanMdraidDevices()[0] = %+v, want Name=/dev/md0 Type=mdraid", devs[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
sm := &SmartManager{SmartDataMap: map[string]*smart.SmartData{}}
|
||||||
|
ok, err := sm.collectMdraidHealth(devs[0])
|
||||||
|
if err != nil || !ok {
|
||||||
|
t.Fatalf("collectMdraidHealth() = (ok=%v, err=%v), want (true,nil)", ok, err)
|
||||||
|
}
|
||||||
|
if len(sm.SmartDataMap) != 1 {
|
||||||
|
t.Fatalf("SmartDataMap len=%d, want 1", len(sm.SmartDataMap))
|
||||||
|
}
|
||||||
|
var got *smart.SmartData
|
||||||
|
for _, v := range sm.SmartDataMap {
|
||||||
|
got = v
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if got == nil {
|
||||||
|
t.Fatalf("SmartDataMap value nil")
|
||||||
|
}
|
||||||
|
if got.DiskType != "mdraid" || got.DiskName != "/dev/md0" {
|
||||||
|
t.Fatalf("disk fields = (type=%q name=%q), want (mdraid,/dev/md0)", got.DiskType, got.DiskName)
|
||||||
|
}
|
||||||
|
if got.SmartStatus != "WARNING" {
|
||||||
|
t.Fatalf("SmartStatus=%q, want WARNING", got.SmartStatus)
|
||||||
|
}
|
||||||
|
if got.ModelName == "" || got.Capacity == 0 {
|
||||||
|
t.Fatalf("identity fields = (model=%q cap=%d), want non-empty model and cap>0", got.ModelName, got.Capacity)
|
||||||
|
}
|
||||||
|
if len(got.Attributes) < 5 {
|
||||||
|
t.Fatalf("attributes len=%d, want >= 5", len(got.Attributes))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMdraidSmartStatus(t *testing.T) {
|
||||||
|
if got := mdraidSmartStatus(mdraidHealth{arrayState: "inactive"}); got != "FAILED" {
|
||||||
|
t.Fatalf("mdraidSmartStatus(inactive) = %q, want FAILED", got)
|
||||||
|
}
|
||||||
|
if got := mdraidSmartStatus(mdraidHealth{arrayState: "active", degraded: 1, syncAction: "recover"}); got != "WARNING" {
|
||||||
|
t.Fatalf("mdraidSmartStatus(degraded+recover) = %q, want WARNING", got)
|
||||||
|
}
|
||||||
|
if got := mdraidSmartStatus(mdraidHealth{arrayState: "active", degraded: 1}); got != "FAILED" {
|
||||||
|
t.Fatalf("mdraidSmartStatus(degraded) = %q, want FAILED", got)
|
||||||
|
}
|
||||||
|
if got := mdraidSmartStatus(mdraidHealth{arrayState: "active", syncAction: "recover"}); got != "WARNING" {
|
||||||
|
t.Fatalf("mdraidSmartStatus(recover) = %q, want WARNING", got)
|
||||||
|
}
|
||||||
|
if got := mdraidSmartStatus(mdraidHealth{arrayState: "clean"}); got != "PASSED" {
|
||||||
|
t.Fatalf("mdraidSmartStatus(clean) = %q, want PASSED", got)
|
||||||
|
}
|
||||||
|
if got := mdraidSmartStatus(mdraidHealth{arrayState: "unknown"}); got != "UNKNOWN" {
|
||||||
|
t.Fatalf("mdraidSmartStatus(unknown) = %q, want UNKNOWN", got)
|
||||||
|
}
|
||||||
|
}
|
||||||
11
agent/mdraid_stub.go
Normal file
11
agent/mdraid_stub.go
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
//go:build !linux
|
||||||
|
|
||||||
|
package agent
|
||||||
|
|
||||||
|
func scanMdraidDevices() []*DeviceInfo {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sm *SmartManager) collectMdraidHealth(deviceInfo *DeviceInfo) (bool, error) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/henrygd/beszel/agent/deltatracker"
|
"github.com/henrygd/beszel/agent/deltatracker"
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
"github.com/henrygd/beszel/internal/entities/system"
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
psutilNet "github.com/shirou/gopsutil/v4/net"
|
psutilNet "github.com/shirou/gopsutil/v4/net"
|
||||||
)
|
)
|
||||||
@@ -94,7 +95,7 @@ func (a *Agent) initializeNetIoStats() {
|
|||||||
a.netInterfaces = make(map[string]struct{}, 0)
|
a.netInterfaces = make(map[string]struct{}, 0)
|
||||||
|
|
||||||
// parse NICS env var for whitelist / blacklist
|
// parse NICS env var for whitelist / blacklist
|
||||||
nicsEnvVal, nicsEnvExists := GetEnv("NICS")
|
nicsEnvVal, nicsEnvExists := utils.GetEnv("NICS")
|
||||||
var nicCfg *NicConfig
|
var nicCfg *NicConfig
|
||||||
if nicsEnvExists {
|
if nicsEnvExists {
|
||||||
nicCfg = newNicConfig(nicsEnvVal)
|
nicCfg = newNicConfig(nicsEnvVal)
|
||||||
@@ -103,10 +104,7 @@ func (a *Agent) initializeNetIoStats() {
|
|||||||
// get current network I/O stats and record valid interfaces
|
// get current network I/O stats and record valid interfaces
|
||||||
if netIO, err := psutilNet.IOCounters(true); err == nil {
|
if netIO, err := psutilNet.IOCounters(true); err == nil {
|
||||||
for _, v := range netIO {
|
for _, v := range netIO {
|
||||||
if nicsEnvExists && !isValidNic(v.Name, nicCfg) {
|
if skipNetworkInterface(v, nicCfg) {
|
||||||
continue
|
|
||||||
}
|
|
||||||
if a.skipNetworkInterface(v) {
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
slog.Info("Detected network interface", "name", v.Name, "sent", v.BytesSent, "recv", v.BytesRecv)
|
slog.Info("Detected network interface", "name", v.Name, "sent", v.BytesSent, "recv", v.BytesRecv)
|
||||||
@@ -215,10 +213,8 @@ func (a *Agent) applyNetworkTotals(
|
|||||||
totalBytesSent, totalBytesRecv uint64,
|
totalBytesSent, totalBytesRecv uint64,
|
||||||
bytesSentPerSecond, bytesRecvPerSecond uint64,
|
bytesSentPerSecond, bytesRecvPerSecond uint64,
|
||||||
) {
|
) {
|
||||||
networkSentPs := bytesToMegabytes(float64(bytesSentPerSecond))
|
if bytesSentPerSecond > 10_000_000_000 || bytesRecvPerSecond > 10_000_000_000 {
|
||||||
networkRecvPs := bytesToMegabytes(float64(bytesRecvPerSecond))
|
slog.Warn("Invalid net stats. Resetting.", "sent", bytesSentPerSecond, "recv", bytesRecvPerSecond)
|
||||||
if networkSentPs > 10_000 || networkRecvPs > 10_000 {
|
|
||||||
slog.Warn("Invalid net stats. Resetting.", "sent", networkSentPs, "recv", networkRecvPs)
|
|
||||||
for _, v := range netIO {
|
for _, v := range netIO {
|
||||||
if _, exists := a.netInterfaces[v.Name]; !exists {
|
if _, exists := a.netInterfaces[v.Name]; !exists {
|
||||||
continue
|
continue
|
||||||
@@ -228,21 +224,29 @@ func (a *Agent) applyNetworkTotals(
|
|||||||
a.initializeNetIoStats()
|
a.initializeNetIoStats()
|
||||||
delete(a.netIoStats, cacheTimeMs)
|
delete(a.netIoStats, cacheTimeMs)
|
||||||
delete(a.netInterfaceDeltaTrackers, cacheTimeMs)
|
delete(a.netInterfaceDeltaTrackers, cacheTimeMs)
|
||||||
systemStats.NetworkSent = 0
|
|
||||||
systemStats.NetworkRecv = 0
|
|
||||||
systemStats.Bandwidth[0], systemStats.Bandwidth[1] = 0, 0
|
systemStats.Bandwidth[0], systemStats.Bandwidth[1] = 0, 0
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
systemStats.NetworkSent = networkSentPs
|
|
||||||
systemStats.NetworkRecv = networkRecvPs
|
|
||||||
systemStats.Bandwidth[0], systemStats.Bandwidth[1] = bytesSentPerSecond, bytesRecvPerSecond
|
systemStats.Bandwidth[0], systemStats.Bandwidth[1] = bytesSentPerSecond, bytesRecvPerSecond
|
||||||
nis.BytesSent = totalBytesSent
|
nis.BytesSent = totalBytesSent
|
||||||
nis.BytesRecv = totalBytesRecv
|
nis.BytesRecv = totalBytesRecv
|
||||||
a.netIoStats[cacheTimeMs] = nis
|
a.netIoStats[cacheTimeMs] = nis
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *Agent) skipNetworkInterface(v psutilNet.IOCountersStat) bool {
|
// skipNetworkInterface returns true if the network interface should be ignored.
|
||||||
|
func skipNetworkInterface(v psutilNet.IOCountersStat, nicCfg *NicConfig) bool {
|
||||||
|
if nicCfg != nil {
|
||||||
|
if !isValidNic(v.Name, nicCfg) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// In whitelist mode, we honor explicit inclusion without auto-filtering.
|
||||||
|
if !nicCfg.isBlacklist {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// In blacklist mode, still apply the auto-filter below.
|
||||||
|
}
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case strings.HasPrefix(v.Name, "lo"),
|
case strings.HasPrefix(v.Name, "lo"),
|
||||||
strings.HasPrefix(v.Name, "docker"),
|
strings.HasPrefix(v.Name, "docker"),
|
||||||
|
|||||||
@@ -261,6 +261,39 @@ func TestNewNicConfig(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
func TestSkipNetworkInterface(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
nic psutilNet.IOCountersStat
|
||||||
|
nicCfg *NicConfig
|
||||||
|
expectSkip bool
|
||||||
|
}{
|
||||||
|
{"loopback lo", psutilNet.IOCountersStat{Name: "lo", BytesSent: 100, BytesRecv: 100}, nil, true},
|
||||||
|
{"loopback lo0", psutilNet.IOCountersStat{Name: "lo0", BytesSent: 100, BytesRecv: 100}, nil, true},
|
||||||
|
{"docker prefix", psutilNet.IOCountersStat{Name: "docker0", BytesSent: 100, BytesRecv: 100}, nil, true},
|
||||||
|
{"br- prefix", psutilNet.IOCountersStat{Name: "br-lan", BytesSent: 100, BytesRecv: 100}, nil, true},
|
||||||
|
{"veth prefix", psutilNet.IOCountersStat{Name: "veth0abc", BytesSent: 100, BytesRecv: 100}, nil, true},
|
||||||
|
{"bond prefix", psutilNet.IOCountersStat{Name: "bond0", BytesSent: 100, BytesRecv: 100}, nil, true},
|
||||||
|
{"cali prefix", psutilNet.IOCountersStat{Name: "cali1234", BytesSent: 100, BytesRecv: 100}, nil, true},
|
||||||
|
{"zero BytesRecv", psutilNet.IOCountersStat{Name: "eth0", BytesSent: 100, BytesRecv: 0}, nil, true},
|
||||||
|
{"zero BytesSent", psutilNet.IOCountersStat{Name: "eth0", BytesSent: 0, BytesRecv: 100}, nil, true},
|
||||||
|
{"both zero", psutilNet.IOCountersStat{Name: "eth0", BytesSent: 0, BytesRecv: 0}, nil, true},
|
||||||
|
{"normal eth0", psutilNet.IOCountersStat{Name: "eth0", BytesSent: 100, BytesRecv: 200}, nil, false},
|
||||||
|
{"normal wlan0", psutilNet.IOCountersStat{Name: "wlan0", BytesSent: 1, BytesRecv: 1}, nil, false},
|
||||||
|
{"whitelist overrides skip (docker)", psutilNet.IOCountersStat{Name: "docker0", BytesSent: 100, BytesRecv: 100}, newNicConfig("docker0"), false},
|
||||||
|
{"whitelist overrides skip (lo)", psutilNet.IOCountersStat{Name: "lo", BytesSent: 100, BytesRecv: 100}, newNicConfig("lo"), false},
|
||||||
|
{"whitelist exclusion", psutilNet.IOCountersStat{Name: "eth1", BytesSent: 100, BytesRecv: 100}, newNicConfig("eth0"), true},
|
||||||
|
{"blacklist skip lo", psutilNet.IOCountersStat{Name: "lo", BytesSent: 100, BytesRecv: 100}, newNicConfig("-eth0"), true},
|
||||||
|
{"blacklist explicit eth0", psutilNet.IOCountersStat{Name: "eth0", BytesSent: 100, BytesRecv: 100}, newNicConfig("-eth0"), true},
|
||||||
|
{"blacklist allow eth1", psutilNet.IOCountersStat{Name: "eth1", BytesSent: 100, BytesRecv: 100}, newNicConfig("-eth0"), false},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
assert.Equal(t, tt.expectSkip, skipNetworkInterface(tt.nic, tt.nicCfg))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestEnsureNetworkInterfacesMap(t *testing.T) {
|
func TestEnsureNetworkInterfacesMap(t *testing.T) {
|
||||||
var a Agent
|
var a Agent
|
||||||
var stats system.Stats
|
var stats system.Stats
|
||||||
@@ -383,8 +416,6 @@ func TestApplyNetworkTotals(t *testing.T) {
|
|||||||
totalBytesSent uint64
|
totalBytesSent uint64
|
||||||
totalBytesRecv uint64
|
totalBytesRecv uint64
|
||||||
expectReset bool
|
expectReset bool
|
||||||
expectedNetworkSent float64
|
|
||||||
expectedNetworkRecv float64
|
|
||||||
expectedBandwidthSent uint64
|
expectedBandwidthSent uint64
|
||||||
expectedBandwidthRecv uint64
|
expectedBandwidthRecv uint64
|
||||||
}{
|
}{
|
||||||
@@ -395,8 +426,6 @@ func TestApplyNetworkTotals(t *testing.T) {
|
|||||||
totalBytesSent: 10000000,
|
totalBytesSent: 10000000,
|
||||||
totalBytesRecv: 20000000,
|
totalBytesRecv: 20000000,
|
||||||
expectReset: false,
|
expectReset: false,
|
||||||
expectedNetworkSent: 0.95, // ~1 MB/s rounded to 2 decimals
|
|
||||||
expectedNetworkRecv: 1.91, // ~2 MB/s rounded to 2 decimals
|
|
||||||
expectedBandwidthSent: 1000000,
|
expectedBandwidthSent: 1000000,
|
||||||
expectedBandwidthRecv: 2000000,
|
expectedBandwidthRecv: 2000000,
|
||||||
},
|
},
|
||||||
@@ -424,18 +453,6 @@ func TestApplyNetworkTotals(t *testing.T) {
|
|||||||
totalBytesRecv: 20000000,
|
totalBytesRecv: 20000000,
|
||||||
expectReset: true,
|
expectReset: true,
|
||||||
},
|
},
|
||||||
{
|
|
||||||
name: "Valid network stats - at threshold boundary",
|
|
||||||
bytesSentPerSecond: 10485750000, // ~9999.99 MB/s (rounds to 9999.99)
|
|
||||||
bytesRecvPerSecond: 10485750000, // ~9999.99 MB/s (rounds to 9999.99)
|
|
||||||
totalBytesSent: 10000000,
|
|
||||||
totalBytesRecv: 20000000,
|
|
||||||
expectReset: false,
|
|
||||||
expectedNetworkSent: 9999.99,
|
|
||||||
expectedNetworkRecv: 9999.99,
|
|
||||||
expectedBandwidthSent: 10485750000,
|
|
||||||
expectedBandwidthRecv: 10485750000,
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
name: "Zero values",
|
name: "Zero values",
|
||||||
bytesSentPerSecond: 0,
|
bytesSentPerSecond: 0,
|
||||||
@@ -443,8 +460,6 @@ func TestApplyNetworkTotals(t *testing.T) {
|
|||||||
totalBytesSent: 0,
|
totalBytesSent: 0,
|
||||||
totalBytesRecv: 0,
|
totalBytesRecv: 0,
|
||||||
expectReset: false,
|
expectReset: false,
|
||||||
expectedNetworkSent: 0.0,
|
|
||||||
expectedNetworkRecv: 0.0,
|
|
||||||
expectedBandwidthSent: 0,
|
expectedBandwidthSent: 0,
|
||||||
expectedBandwidthRecv: 0,
|
expectedBandwidthRecv: 0,
|
||||||
},
|
},
|
||||||
@@ -481,14 +496,10 @@ func TestApplyNetworkTotals(t *testing.T) {
|
|||||||
// Should have reset network tracking state - maps cleared and stats zeroed
|
// Should have reset network tracking state - maps cleared and stats zeroed
|
||||||
assert.NotContains(t, a.netIoStats, cacheTimeMs, "cache entry should be cleared after reset")
|
assert.NotContains(t, a.netIoStats, cacheTimeMs, "cache entry should be cleared after reset")
|
||||||
assert.NotContains(t, a.netInterfaceDeltaTrackers, cacheTimeMs, "tracker should be cleared on reset")
|
assert.NotContains(t, a.netInterfaceDeltaTrackers, cacheTimeMs, "tracker should be cleared on reset")
|
||||||
assert.Zero(t, systemStats.NetworkSent)
|
|
||||||
assert.Zero(t, systemStats.NetworkRecv)
|
|
||||||
assert.Zero(t, systemStats.Bandwidth[0])
|
assert.Zero(t, systemStats.Bandwidth[0])
|
||||||
assert.Zero(t, systemStats.Bandwidth[1])
|
assert.Zero(t, systemStats.Bandwidth[1])
|
||||||
} else {
|
} else {
|
||||||
// Should have applied stats
|
// Should have applied stats
|
||||||
assert.Equal(t, tt.expectedNetworkSent, systemStats.NetworkSent)
|
|
||||||
assert.Equal(t, tt.expectedNetworkRecv, systemStats.NetworkRecv)
|
|
||||||
assert.Equal(t, tt.expectedBandwidthSent, systemStats.Bandwidth[0])
|
assert.Equal(t, tt.expectedBandwidthSent, systemStats.Bandwidth[0])
|
||||||
assert.Equal(t, tt.expectedBandwidthRecv, systemStats.Bandwidth[1])
|
assert.Equal(t, tt.expectedBandwidthRecv, systemStats.Bandwidth[1])
|
||||||
|
|
||||||
|
|||||||
31
agent/response.go
Normal file
31
agent/response.go
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
package agent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/fxamacker/cbor/v2"
|
||||||
|
"github.com/henrygd/beszel/internal/common"
|
||||||
|
"github.com/henrygd/beszel/internal/entities/smart"
|
||||||
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
"github.com/henrygd/beszel/internal/entities/systemd"
|
||||||
|
)
|
||||||
|
|
||||||
|
// newAgentResponse creates an AgentResponse using legacy typed fields.
|
||||||
|
// This maintains backward compatibility with <= 0.17 hubs that expect specific fields.
|
||||||
|
func newAgentResponse(data any, requestID *uint32) common.AgentResponse {
|
||||||
|
response := common.AgentResponse{Id: requestID}
|
||||||
|
switch v := data.(type) {
|
||||||
|
case *system.CombinedData:
|
||||||
|
response.SystemData = v
|
||||||
|
case *common.FingerprintResponse:
|
||||||
|
response.Fingerprint = v
|
||||||
|
case string:
|
||||||
|
response.String = &v
|
||||||
|
case map[string]smart.SmartData:
|
||||||
|
response.SmartData = v
|
||||||
|
case systemd.ServiceDetails:
|
||||||
|
response.ServiceInfo = v
|
||||||
|
default:
|
||||||
|
// For unknown types, use the generic Data field
|
||||||
|
response.Data, _ = cbor.Marshal(data)
|
||||||
|
}
|
||||||
|
return response
|
||||||
|
}
|
||||||
@@ -2,48 +2,67 @@ package agent
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"path"
|
"path"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
"github.com/henrygd/beszel/internal/entities/system"
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
|
||||||
"github.com/shirou/gopsutil/v4/common"
|
"github.com/shirou/gopsutil/v4/common"
|
||||||
"github.com/shirou/gopsutil/v4/sensors"
|
"github.com/shirou/gopsutil/v4/sensors"
|
||||||
)
|
)
|
||||||
|
|
||||||
type SensorConfig struct {
|
var errTemperatureFetchTimeout = errors.New("temperature collection timed out")
|
||||||
context context.Context
|
|
||||||
sensors map[string]struct{}
|
|
||||||
primarySensor string
|
|
||||||
isBlacklist bool
|
|
||||||
hasWildcards bool
|
|
||||||
skipCollection bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Agent) newSensorConfig() *SensorConfig {
|
|
||||||
primarySensor, _ := GetEnv("PRIMARY_SENSOR")
|
|
||||||
sysSensors, _ := GetEnv("SYS_SENSORS")
|
|
||||||
sensorsEnvVal, sensorsSet := GetEnv("SENSORS")
|
|
||||||
skipCollection := sensorsSet && sensorsEnvVal == ""
|
|
||||||
|
|
||||||
return a.newSensorConfigWithEnv(primarySensor, sysSensors, sensorsEnvVal, skipCollection)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Matches sensors.TemperaturesWithContext to allow for panic recovery (gopsutil/issues/1832)
|
// Matches sensors.TemperaturesWithContext to allow for panic recovery (gopsutil/issues/1832)
|
||||||
type getTempsFn func(ctx context.Context) ([]sensors.TemperatureStat, error)
|
type getTempsFn func(ctx context.Context) ([]sensors.TemperatureStat, error)
|
||||||
|
|
||||||
|
type SensorConfig struct {
|
||||||
|
context context.Context
|
||||||
|
sensors map[string]struct{}
|
||||||
|
primarySensor string
|
||||||
|
timeout time.Duration
|
||||||
|
isBlacklist bool
|
||||||
|
hasWildcards bool
|
||||||
|
skipCollection bool
|
||||||
|
firstRun bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Agent) newSensorConfig() *SensorConfig {
|
||||||
|
primarySensor, _ := utils.GetEnv("PRIMARY_SENSOR")
|
||||||
|
sysSensors, _ := utils.GetEnv("SYS_SENSORS")
|
||||||
|
sensorsEnvVal, sensorsSet := utils.GetEnv("SENSORS")
|
||||||
|
skipCollection := sensorsSet && sensorsEnvVal == ""
|
||||||
|
sensorsTimeout, _ := utils.GetEnv("SENSORS_TIMEOUT")
|
||||||
|
|
||||||
|
return a.newSensorConfigWithEnv(primarySensor, sysSensors, sensorsEnvVal, sensorsTimeout, skipCollection)
|
||||||
|
}
|
||||||
|
|
||||||
// newSensorConfigWithEnv creates a SensorConfig with the provided environment variables
|
// newSensorConfigWithEnv creates a SensorConfig with the provided environment variables
|
||||||
// sensorsSet indicates if the SENSORS environment variable was explicitly set (even to empty string)
|
// sensorsSet indicates if the SENSORS environment variable was explicitly set (even to empty string)
|
||||||
func (a *Agent) newSensorConfigWithEnv(primarySensor, sysSensors, sensorsEnvVal string, skipCollection bool) *SensorConfig {
|
func (a *Agent) newSensorConfigWithEnv(primarySensor, sysSensors, sensorsEnvVal, sensorsTimeout string, skipCollection bool) *SensorConfig {
|
||||||
|
timeout := 2 * time.Second
|
||||||
|
if sensorsTimeout != "" {
|
||||||
|
if d, err := time.ParseDuration(sensorsTimeout); err == nil {
|
||||||
|
timeout = d
|
||||||
|
} else {
|
||||||
|
slog.Warn("Invalid SENSORS_TIMEOUT", "value", sensorsTimeout)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
config := &SensorConfig{
|
config := &SensorConfig{
|
||||||
context: context.Background(),
|
context: context.Background(),
|
||||||
primarySensor: primarySensor,
|
primarySensor: primarySensor,
|
||||||
|
timeout: timeout,
|
||||||
skipCollection: skipCollection,
|
skipCollection: skipCollection,
|
||||||
|
firstRun: true,
|
||||||
sensors: make(map[string]struct{}),
|
sensors: make(map[string]struct{}),
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -85,10 +104,12 @@ func (a *Agent) updateTemperatures(systemStats *system.Stats) {
|
|||||||
// reset high temp
|
// reset high temp
|
||||||
a.systemInfo.DashboardTemp = 0
|
a.systemInfo.DashboardTemp = 0
|
||||||
|
|
||||||
temps, err := a.getTempsWithPanicRecovery(getSensorTemps)
|
temps, err := a.getTempsWithTimeout(getSensorTemps)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// retry once on panic (gopsutil/issues/1832)
|
// retry once on panic (gopsutil/issues/1832)
|
||||||
temps, err = a.getTempsWithPanicRecovery(getSensorTemps)
|
if !errors.Is(err, errTemperatureFetchTimeout) {
|
||||||
|
temps, err = a.getTempsWithTimeout(getSensorTemps)
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Warn("Error updating temperatures", "err", err)
|
slog.Warn("Error updating temperatures", "err", err)
|
||||||
if len(systemStats.Temperatures) > 0 {
|
if len(systemStats.Temperatures) > 0 {
|
||||||
@@ -135,7 +156,7 @@ func (a *Agent) updateTemperatures(systemStats *system.Stats) {
|
|||||||
case sensorName:
|
case sensorName:
|
||||||
a.systemInfo.DashboardTemp = sensor.Temperature
|
a.systemInfo.DashboardTemp = sensor.Temperature
|
||||||
}
|
}
|
||||||
systemStats.Temperatures[sensorName] = twoDecimals(sensor.Temperature)
|
systemStats.Temperatures[sensorName] = utils.TwoDecimals(sensor.Temperature)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -151,6 +172,34 @@ func (a *Agent) getTempsWithPanicRecovery(getTemps getTempsFn) (temps []sensors.
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (a *Agent) getTempsWithTimeout(getTemps getTempsFn) ([]sensors.TemperatureStat, error) {
|
||||||
|
type result struct {
|
||||||
|
temps []sensors.TemperatureStat
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use a longer timeout on the first run to allow for initialization
|
||||||
|
// (e.g. Windows LHM subprocess startup)
|
||||||
|
timeout := a.sensorConfig.timeout
|
||||||
|
if a.sensorConfig.firstRun {
|
||||||
|
a.sensorConfig.firstRun = false
|
||||||
|
timeout = 10 * time.Second
|
||||||
|
}
|
||||||
|
|
||||||
|
resultCh := make(chan result, 1)
|
||||||
|
go func() {
|
||||||
|
temps, err := a.getTempsWithPanicRecovery(getTemps)
|
||||||
|
resultCh <- result{temps: temps, err: err}
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case res := <-resultCh:
|
||||||
|
return res.temps, res.err
|
||||||
|
case <-time.After(timeout):
|
||||||
|
return nil, errTemperatureFetchTimeout
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// isValidSensor checks if a sensor is valid based on the sensor name and the sensor config
|
// isValidSensor checks if a sensor is valid based on the sensor name and the sensor config
|
||||||
func isValidSensor(sensorName string, config *SensorConfig) bool {
|
func isValidSensor(sensorName string, config *SensorConfig) bool {
|
||||||
// if no sensors configured, everything is valid
|
// if no sensors configured, everything is valid
|
||||||
|
|||||||
@@ -1,13 +1,12 @@
|
|||||||
//go:build testing
|
//go:build testing
|
||||||
// +build testing
|
|
||||||
|
|
||||||
package agent
|
package agent
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/henrygd/beszel/internal/entities/system"
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
|
||||||
@@ -169,6 +168,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
|||||||
primarySensor string
|
primarySensor string
|
||||||
sysSensors string
|
sysSensors string
|
||||||
sensors string
|
sensors string
|
||||||
|
sensorsTimeout string
|
||||||
skipCollection bool
|
skipCollection bool
|
||||||
expectedConfig *SensorConfig
|
expectedConfig *SensorConfig
|
||||||
}{
|
}{
|
||||||
@@ -180,12 +180,37 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
|||||||
expectedConfig: &SensorConfig{
|
expectedConfig: &SensorConfig{
|
||||||
context: context.Background(),
|
context: context.Background(),
|
||||||
primarySensor: "",
|
primarySensor: "",
|
||||||
|
timeout: 2 * time.Second,
|
||||||
sensors: map[string]struct{}{},
|
sensors: map[string]struct{}{},
|
||||||
isBlacklist: false,
|
isBlacklist: false,
|
||||||
hasWildcards: false,
|
hasWildcards: false,
|
||||||
skipCollection: false,
|
skipCollection: false,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "Custom timeout",
|
||||||
|
primarySensor: "",
|
||||||
|
sysSensors: "",
|
||||||
|
sensors: "",
|
||||||
|
sensorsTimeout: "5s",
|
||||||
|
expectedConfig: &SensorConfig{
|
||||||
|
context: context.Background(),
|
||||||
|
timeout: 5 * time.Second,
|
||||||
|
sensors: map[string]struct{}{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Invalid timeout falls back to default",
|
||||||
|
primarySensor: "",
|
||||||
|
sysSensors: "",
|
||||||
|
sensors: "",
|
||||||
|
sensorsTimeout: "notaduration",
|
||||||
|
expectedConfig: &SensorConfig{
|
||||||
|
context: context.Background(),
|
||||||
|
timeout: 2 * time.Second,
|
||||||
|
sensors: map[string]struct{}{},
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "Explicitly set to empty string",
|
name: "Explicitly set to empty string",
|
||||||
primarySensor: "",
|
primarySensor: "",
|
||||||
@@ -195,6 +220,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
|||||||
expectedConfig: &SensorConfig{
|
expectedConfig: &SensorConfig{
|
||||||
context: context.Background(),
|
context: context.Background(),
|
||||||
primarySensor: "",
|
primarySensor: "",
|
||||||
|
timeout: 2 * time.Second,
|
||||||
sensors: map[string]struct{}{},
|
sensors: map[string]struct{}{},
|
||||||
isBlacklist: false,
|
isBlacklist: false,
|
||||||
hasWildcards: false,
|
hasWildcards: false,
|
||||||
@@ -209,6 +235,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
|||||||
expectedConfig: &SensorConfig{
|
expectedConfig: &SensorConfig{
|
||||||
context: context.Background(),
|
context: context.Background(),
|
||||||
primarySensor: "cpu_temp",
|
primarySensor: "cpu_temp",
|
||||||
|
timeout: 2 * time.Second,
|
||||||
sensors: map[string]struct{}{},
|
sensors: map[string]struct{}{},
|
||||||
isBlacklist: false,
|
isBlacklist: false,
|
||||||
hasWildcards: false,
|
hasWildcards: false,
|
||||||
@@ -222,6 +249,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
|||||||
expectedConfig: &SensorConfig{
|
expectedConfig: &SensorConfig{
|
||||||
context: context.Background(),
|
context: context.Background(),
|
||||||
primarySensor: "cpu_temp",
|
primarySensor: "cpu_temp",
|
||||||
|
timeout: 2 * time.Second,
|
||||||
sensors: map[string]struct{}{
|
sensors: map[string]struct{}{
|
||||||
"cpu_temp": {},
|
"cpu_temp": {},
|
||||||
"gpu_temp": {},
|
"gpu_temp": {},
|
||||||
@@ -238,6 +266,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
|||||||
expectedConfig: &SensorConfig{
|
expectedConfig: &SensorConfig{
|
||||||
context: context.Background(),
|
context: context.Background(),
|
||||||
primarySensor: "cpu_temp",
|
primarySensor: "cpu_temp",
|
||||||
|
timeout: 2 * time.Second,
|
||||||
sensors: map[string]struct{}{
|
sensors: map[string]struct{}{
|
||||||
"cpu_temp": {},
|
"cpu_temp": {},
|
||||||
"gpu_temp": {},
|
"gpu_temp": {},
|
||||||
@@ -254,6 +283,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
|||||||
expectedConfig: &SensorConfig{
|
expectedConfig: &SensorConfig{
|
||||||
context: context.Background(),
|
context: context.Background(),
|
||||||
primarySensor: "cpu_temp",
|
primarySensor: "cpu_temp",
|
||||||
|
timeout: 2 * time.Second,
|
||||||
sensors: map[string]struct{}{
|
sensors: map[string]struct{}{
|
||||||
"cpu_*": {},
|
"cpu_*": {},
|
||||||
"gpu_temp": {},
|
"gpu_temp": {},
|
||||||
@@ -270,6 +300,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
|||||||
expectedConfig: &SensorConfig{
|
expectedConfig: &SensorConfig{
|
||||||
context: context.Background(),
|
context: context.Background(),
|
||||||
primarySensor: "cpu_temp",
|
primarySensor: "cpu_temp",
|
||||||
|
timeout: 2 * time.Second,
|
||||||
sensors: map[string]struct{}{
|
sensors: map[string]struct{}{
|
||||||
"cpu_*": {},
|
"cpu_*": {},
|
||||||
"gpu_temp": {},
|
"gpu_temp": {},
|
||||||
@@ -285,6 +316,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
|||||||
sensors: "cpu_temp",
|
sensors: "cpu_temp",
|
||||||
expectedConfig: &SensorConfig{
|
expectedConfig: &SensorConfig{
|
||||||
primarySensor: "cpu_temp",
|
primarySensor: "cpu_temp",
|
||||||
|
timeout: 2 * time.Second,
|
||||||
sensors: map[string]struct{}{
|
sensors: map[string]struct{}{
|
||||||
"cpu_temp": {},
|
"cpu_temp": {},
|
||||||
},
|
},
|
||||||
@@ -296,7 +328,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
|||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
result := agent.newSensorConfigWithEnv(tt.primarySensor, tt.sysSensors, tt.sensors, tt.skipCollection)
|
result := agent.newSensorConfigWithEnv(tt.primarySensor, tt.sysSensors, tt.sensors, tt.sensorsTimeout, tt.skipCollection)
|
||||||
|
|
||||||
// Check primary sensor
|
// Check primary sensor
|
||||||
assert.Equal(t, tt.expectedConfig.primarySensor, result.primarySensor)
|
assert.Equal(t, tt.expectedConfig.primarySensor, result.primarySensor)
|
||||||
@@ -315,6 +347,7 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
|||||||
// Check flags
|
// Check flags
|
||||||
assert.Equal(t, tt.expectedConfig.isBlacklist, result.isBlacklist)
|
assert.Equal(t, tt.expectedConfig.isBlacklist, result.isBlacklist)
|
||||||
assert.Equal(t, tt.expectedConfig.hasWildcards, result.hasWildcards)
|
assert.Equal(t, tt.expectedConfig.hasWildcards, result.hasWildcards)
|
||||||
|
assert.Equal(t, tt.expectedConfig.timeout, result.timeout)
|
||||||
|
|
||||||
// Check context
|
// Check context
|
||||||
if tt.sysSensors != "" {
|
if tt.sysSensors != "" {
|
||||||
@@ -330,40 +363,18 @@ func TestNewSensorConfigWithEnv(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNewSensorConfig(t *testing.T) {
|
func TestNewSensorConfig(t *testing.T) {
|
||||||
// Save original environment variables
|
|
||||||
originalPrimary, hasPrimary := os.LookupEnv("BESZEL_AGENT_PRIMARY_SENSOR")
|
|
||||||
originalSys, hasSys := os.LookupEnv("BESZEL_AGENT_SYS_SENSORS")
|
|
||||||
originalSensors, hasSensors := os.LookupEnv("BESZEL_AGENT_SENSORS")
|
|
||||||
|
|
||||||
// Restore environment variables after the test
|
|
||||||
defer func() {
|
|
||||||
// Clean up test environment variables
|
|
||||||
os.Unsetenv("BESZEL_AGENT_PRIMARY_SENSOR")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_SYS_SENSORS")
|
|
||||||
os.Unsetenv("BESZEL_AGENT_SENSORS")
|
|
||||||
|
|
||||||
// Restore original values if they existed
|
|
||||||
if hasPrimary {
|
|
||||||
os.Setenv("BESZEL_AGENT_PRIMARY_SENSOR", originalPrimary)
|
|
||||||
}
|
|
||||||
if hasSys {
|
|
||||||
os.Setenv("BESZEL_AGENT_SYS_SENSORS", originalSys)
|
|
||||||
}
|
|
||||||
if hasSensors {
|
|
||||||
os.Setenv("BESZEL_AGENT_SENSORS", originalSensors)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Set test environment variables
|
// Set test environment variables
|
||||||
os.Setenv("BESZEL_AGENT_PRIMARY_SENSOR", "test_primary")
|
t.Setenv("BESZEL_AGENT_PRIMARY_SENSOR", "test_primary")
|
||||||
os.Setenv("BESZEL_AGENT_SYS_SENSORS", "/test/path")
|
t.Setenv("BESZEL_AGENT_SYS_SENSORS", "/test/path")
|
||||||
os.Setenv("BESZEL_AGENT_SENSORS", "test_sensor1,test_*,test_sensor3")
|
t.Setenv("BESZEL_AGENT_SENSORS", "test_sensor1,test_*,test_sensor3")
|
||||||
|
t.Setenv("BESZEL_AGENT_SENSORS_TIMEOUT", "7s")
|
||||||
|
|
||||||
agent := &Agent{}
|
agent := &Agent{}
|
||||||
result := agent.newSensorConfig()
|
result := agent.newSensorConfig()
|
||||||
|
|
||||||
// Verify results
|
// Verify results
|
||||||
assert.Equal(t, "test_primary", result.primarySensor)
|
assert.Equal(t, "test_primary", result.primarySensor)
|
||||||
|
assert.Equal(t, 7*time.Second, result.timeout)
|
||||||
assert.NotNil(t, result.sensors)
|
assert.NotNil(t, result.sensors)
|
||||||
assert.Equal(t, 3, len(result.sensors))
|
assert.Equal(t, 3, len(result.sensors))
|
||||||
assert.True(t, result.hasWildcards)
|
assert.True(t, result.hasWildcards)
|
||||||
@@ -552,3 +563,59 @@ func TestGetTempsWithPanicRecovery(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGetTempsWithTimeout(t *testing.T) {
|
||||||
|
agent := &Agent{
|
||||||
|
sensorConfig: &SensorConfig{
|
||||||
|
context: context.Background(),
|
||||||
|
timeout: 10 * time.Millisecond,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Run("returns temperatures before timeout", func(t *testing.T) {
|
||||||
|
temps, err := agent.getTempsWithTimeout(func(ctx context.Context) ([]sensors.TemperatureStat, error) {
|
||||||
|
return []sensors.TemperatureStat{{SensorKey: "cpu_temp", Temperature: 42}}, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Len(t, temps, 1)
|
||||||
|
assert.Equal(t, "cpu_temp", temps[0].SensorKey)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("returns timeout error when collector hangs", func(t *testing.T) {
|
||||||
|
temps, err := agent.getTempsWithTimeout(func(ctx context.Context) ([]sensors.TemperatureStat, error) {
|
||||||
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
return []sensors.TemperatureStat{{SensorKey: "cpu_temp", Temperature: 42}}, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
assert.Nil(t, temps)
|
||||||
|
assert.ErrorIs(t, err, errTemperatureFetchTimeout)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUpdateTemperaturesSkipsOnTimeout(t *testing.T) {
|
||||||
|
agent := &Agent{
|
||||||
|
systemInfo: system.Info{DashboardTemp: 99},
|
||||||
|
sensorConfig: &SensorConfig{
|
||||||
|
context: context.Background(),
|
||||||
|
timeout: 10 * time.Millisecond,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Cleanup(func() {
|
||||||
|
getSensorTemps = sensors.TemperaturesWithContext
|
||||||
|
})
|
||||||
|
getSensorTemps = func(ctx context.Context) ([]sensors.TemperatureStat, error) {
|
||||||
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
stats := &system.Stats{
|
||||||
|
Temperatures: map[string]float64{"stale": 50},
|
||||||
|
}
|
||||||
|
|
||||||
|
agent.updateTemperatures(stats)
|
||||||
|
|
||||||
|
assert.Equal(t, 0.0, agent.systemInfo.DashboardTemp)
|
||||||
|
assert.Equal(t, map[string]float64{}, stats.Temperatures)
|
||||||
|
}
|
||||||
|
|||||||
@@ -12,10 +12,9 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/henrygd/beszel"
|
"github.com/henrygd/beszel"
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
"github.com/henrygd/beszel/internal/common"
|
"github.com/henrygd/beszel/internal/common"
|
||||||
"github.com/henrygd/beszel/internal/entities/smart"
|
|
||||||
"github.com/henrygd/beszel/internal/entities/system"
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
"github.com/henrygd/beszel/internal/entities/systemd"
|
|
||||||
|
|
||||||
"github.com/blang/semver"
|
"github.com/blang/semver"
|
||||||
"github.com/fxamacker/cbor/v2"
|
"github.com/fxamacker/cbor/v2"
|
||||||
@@ -38,6 +37,9 @@ var hubVersions map[string]semver.Version
|
|||||||
// and begins listening for connections. Returns an error if the server
|
// and begins listening for connections. Returns an error if the server
|
||||||
// is already running or if there's an issue starting the server.
|
// is already running or if there's an issue starting the server.
|
||||||
func (a *Agent) StartServer(opts ServerOptions) error {
|
func (a *Agent) StartServer(opts ServerOptions) error {
|
||||||
|
if disableSSH, _ := utils.GetEnv("DISABLE_SSH"); disableSSH == "true" {
|
||||||
|
return errors.New("SSH disabled")
|
||||||
|
}
|
||||||
if a.server != nil {
|
if a.server != nil {
|
||||||
return errors.New("server already started")
|
return errors.New("server already started")
|
||||||
}
|
}
|
||||||
@@ -165,20 +167,9 @@ func (a *Agent) handleSSHRequest(w io.Writer, req *common.HubRequest[cbor.RawMes
|
|||||||
}
|
}
|
||||||
|
|
||||||
// responder that writes AgentResponse to stdout
|
// responder that writes AgentResponse to stdout
|
||||||
|
// Uses legacy typed fields for backward compatibility with <= 0.17
|
||||||
sshResponder := func(data any, requestID *uint32) error {
|
sshResponder := func(data any, requestID *uint32) error {
|
||||||
response := common.AgentResponse{Id: requestID}
|
response := newAgentResponse(data, requestID)
|
||||||
switch v := data.(type) {
|
|
||||||
case *system.CombinedData:
|
|
||||||
response.SystemData = v
|
|
||||||
case string:
|
|
||||||
response.String = &v
|
|
||||||
case map[string]smart.SmartData:
|
|
||||||
response.SmartData = v
|
|
||||||
case systemd.ServiceDetails:
|
|
||||||
response.ServiceInfo = v
|
|
||||||
default:
|
|
||||||
response.Error = fmt.Sprintf("unsupported response type: %T", data)
|
|
||||||
}
|
|
||||||
return cbor.NewEncoder(w).Encode(response)
|
return cbor.NewEncoder(w).Encode(response)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -202,7 +193,7 @@ func (a *Agent) handleSSHRequest(w io.Writer, req *common.HubRequest[cbor.RawMes
|
|||||||
|
|
||||||
// handleLegacyStats serves the legacy one-shot stats payload for older hubs
|
// handleLegacyStats serves the legacy one-shot stats payload for older hubs
|
||||||
func (a *Agent) handleLegacyStats(w io.Writer, hubVersion semver.Version) error {
|
func (a *Agent) handleLegacyStats(w io.Writer, hubVersion semver.Version) error {
|
||||||
stats := a.gatherStats(60_000)
|
stats := a.gatherStats(common.DataRequestOptions{CacheTimeMs: defaultDataCacheTimeMs})
|
||||||
return a.writeToSession(w, stats, hubVersion)
|
return a.writeToSession(w, stats, hubVersion)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -248,11 +239,11 @@ func ParseKeys(input string) ([]gossh.PublicKey, error) {
|
|||||||
// and finally defaults to ":45876".
|
// and finally defaults to ":45876".
|
||||||
func GetAddress(addr string) string {
|
func GetAddress(addr string) string {
|
||||||
if addr == "" {
|
if addr == "" {
|
||||||
addr, _ = GetEnv("LISTEN")
|
addr, _ = utils.GetEnv("LISTEN")
|
||||||
}
|
}
|
||||||
if addr == "" {
|
if addr == "" {
|
||||||
// Legacy PORT environment variable support
|
// Legacy PORT environment variable support
|
||||||
addr, _ = GetEnv("PORT")
|
addr, _ = utils.GetEnv("PORT")
|
||||||
}
|
}
|
||||||
if addr == "" {
|
if addr == "" {
|
||||||
return ":45876"
|
return ":45876"
|
||||||
@@ -268,7 +259,7 @@ func GetAddress(addr string) string {
|
|||||||
// It checks the NETWORK environment variable first, then infers from
|
// It checks the NETWORK environment variable first, then infers from
|
||||||
// the address format: addresses starting with "/" are "unix", others are "tcp".
|
// the address format: addresses starting with "/" are "unix", others are "tcp".
|
||||||
func GetNetwork(addr string) string {
|
func GetNetwork(addr string) string {
|
||||||
if network, ok := GetEnv("NETWORK"); ok && network != "" {
|
if network, ok := utils.GetEnv("NETWORK"); ok && network != "" {
|
||||||
return network
|
return network
|
||||||
}
|
}
|
||||||
if strings.HasPrefix(addr, "/") {
|
if strings.HasPrefix(addr, "/") {
|
||||||
|
|||||||
@@ -1,3 +1,5 @@
|
|||||||
|
//go:build testing
|
||||||
|
|
||||||
package agent
|
package agent
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -180,6 +182,22 @@ func TestStartServer(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestStartServerDisableSSH(t *testing.T) {
|
||||||
|
t.Setenv("BESZEL_AGENT_DISABLE_SSH", "true")
|
||||||
|
|
||||||
|
agent, err := NewAgent("")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
opts := ServerOptions{
|
||||||
|
Network: "tcp",
|
||||||
|
Addr: ":45990",
|
||||||
|
}
|
||||||
|
|
||||||
|
err = agent.StartServer(opts)
|
||||||
|
assert.Error(t, err)
|
||||||
|
assert.Contains(t, err.Error(), "SSH disabled")
|
||||||
|
}
|
||||||
|
|
||||||
/////////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////////
|
||||||
//////////////////// ParseKeys Tests ////////////////////////////
|
//////////////////// ParseKeys Tests ////////////////////////////
|
||||||
/////////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////////
|
||||||
@@ -513,7 +531,7 @@ func TestWriteToSessionEncoding(t *testing.T) {
|
|||||||
err = json.Unmarshal([]byte(encodedData), &decodedJson)
|
err = json.Unmarshal([]byte(encodedData), &decodedJson)
|
||||||
assert.Error(t, err, "Should not be valid JSON data")
|
assert.Error(t, err, "Should not be valid JSON data")
|
||||||
|
|
||||||
assert.Equal(t, testData.Info.Hostname, decodedCbor.Info.Hostname)
|
assert.Equal(t, testData.Details.Hostname, decodedCbor.Details.Hostname)
|
||||||
assert.Equal(t, testData.Stats.Cpu, decodedCbor.Stats.Cpu)
|
assert.Equal(t, testData.Stats.Cpu, decodedCbor.Stats.Cpu)
|
||||||
} else {
|
} else {
|
||||||
// Should be JSON - try to decode as JSON
|
// Should be JSON - try to decode as JSON
|
||||||
@@ -526,7 +544,7 @@ func TestWriteToSessionEncoding(t *testing.T) {
|
|||||||
assert.Error(t, err, "Should not be valid CBOR data")
|
assert.Error(t, err, "Should not be valid CBOR data")
|
||||||
|
|
||||||
// Verify the decoded JSON data matches our test data
|
// Verify the decoded JSON data matches our test data
|
||||||
assert.Equal(t, testData.Info.Hostname, decodedJson.Info.Hostname)
|
assert.Equal(t, testData.Details.Hostname, decodedJson.Details.Hostname)
|
||||||
assert.Equal(t, testData.Stats.Cpu, decodedJson.Stats.Cpu)
|
assert.Equal(t, testData.Stats.Cpu, decodedJson.Stats.Cpu)
|
||||||
|
|
||||||
// Verify it looks like JSON (starts with '{' and contains readable field names)
|
// Verify it looks like JSON (starts with '{' and contains readable field names)
|
||||||
@@ -550,13 +568,12 @@ func createTestCombinedData() *system.CombinedData {
|
|||||||
DiskUsed: 549755813888, // 512GB
|
DiskUsed: 549755813888, // 512GB
|
||||||
DiskPct: 50.0,
|
DiskPct: 50.0,
|
||||||
},
|
},
|
||||||
Info: system.Info{
|
Details: &system.Details{
|
||||||
Hostname: "test-host",
|
Hostname: "test-host",
|
||||||
Cores: 8,
|
},
|
||||||
CpuModel: "Test CPU Model",
|
Info: system.Info{
|
||||||
Uptime: 3600,
|
Uptime: 3600,
|
||||||
AgentVersion: "0.12.0",
|
AgentVersion: "0.12.0",
|
||||||
Os: system.Linux,
|
|
||||||
},
|
},
|
||||||
Containers: []*container.Stats{
|
Containers: []*container.Stats{
|
||||||
{
|
{
|
||||||
|
|||||||
387
agent/smart.go
387
agent/smart.go
@@ -8,17 +8,18 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log/slog"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
"github.com/henrygd/beszel/internal/entities/smart"
|
"github.com/henrygd/beszel/internal/entities/smart"
|
||||||
|
|
||||||
"golang.org/x/exp/slog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// SmartManager manages data collection for SMART devices
|
// SmartManager manages data collection for SMART devices
|
||||||
@@ -28,8 +29,11 @@ type SmartManager struct {
|
|||||||
SmartDevices []*DeviceInfo
|
SmartDevices []*DeviceInfo
|
||||||
refreshMutex sync.Mutex
|
refreshMutex sync.Mutex
|
||||||
lastScanTime time.Time
|
lastScanTime time.Time
|
||||||
binPath string
|
smartctlPath string
|
||||||
excludedDevices map[string]struct{}
|
excludedDevices map[string]struct{}
|
||||||
|
darwinNvmeOnce sync.Once
|
||||||
|
darwinNvmeCapacity map[string]uint64 // serial → bytes cache, written once via darwinNvmeOnce
|
||||||
|
darwinNvmeProvider func() ([]byte, error) // overridable for testing
|
||||||
}
|
}
|
||||||
|
|
||||||
type scanOutput struct {
|
type scanOutput struct {
|
||||||
@@ -53,6 +57,12 @@ type DeviceInfo struct {
|
|||||||
parserType string
|
parserType string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// deviceKey is a composite key for a device, used to identify a device uniquely.
|
||||||
|
type deviceKey struct {
|
||||||
|
name string
|
||||||
|
deviceType string
|
||||||
|
}
|
||||||
|
|
||||||
var errNoValidSmartData = fmt.Errorf("no valid SMART data found") // Error for missing data
|
var errNoValidSmartData = fmt.Errorf("no valid SMART data found") // Error for missing data
|
||||||
|
|
||||||
// Refresh updates SMART data for all known devices
|
// Refresh updates SMART data for all known devices
|
||||||
@@ -150,7 +160,7 @@ func (sm *SmartManager) ScanDevices(force bool) error {
|
|||||||
currentDevices := sm.devicesSnapshot()
|
currentDevices := sm.devicesSnapshot()
|
||||||
|
|
||||||
var configuredDevices []*DeviceInfo
|
var configuredDevices []*DeviceInfo
|
||||||
if configuredRaw, ok := GetEnv("SMART_DEVICES"); ok {
|
if configuredRaw, ok := utils.GetEnv("SMART_DEVICES"); ok {
|
||||||
slog.Info("SMART_DEVICES", "value", configuredRaw)
|
slog.Info("SMART_DEVICES", "value", configuredRaw)
|
||||||
config := strings.TrimSpace(configuredRaw)
|
config := strings.TrimSpace(configuredRaw)
|
||||||
if config == "" {
|
if config == "" {
|
||||||
@@ -164,18 +174,18 @@ func (sm *SmartManager) ScanDevices(force bool) error {
|
|||||||
configuredDevices = parsedDevices
|
configuredDevices = parsedDevices
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
cmd := exec.CommandContext(ctx, sm.binPath, "--scan", "-j")
|
|
||||||
output, err := cmd.Output()
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
scanErr error
|
scanErr error
|
||||||
scannedDevices []*DeviceInfo
|
scannedDevices []*DeviceInfo
|
||||||
hasValidScan bool
|
hasValidScan bool
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if sm.smartctlPath != "" {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
cmd := exec.CommandContext(ctx, sm.smartctlPath, "--scan", "-j")
|
||||||
|
output, err := cmd.Output()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
scanErr = err
|
scanErr = err
|
||||||
} else {
|
} else {
|
||||||
@@ -184,6 +194,21 @@ func (sm *SmartManager) ScanDevices(force bool) error {
|
|||||||
scanErr = errNoValidSmartData
|
scanErr = errNoValidSmartData
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add eMMC devices (Linux only) by reading sysfs health fields. This does not
|
||||||
|
// require smartctl and does not scan the whole device.
|
||||||
|
if emmcDevices := scanEmmcDevices(); len(emmcDevices) > 0 {
|
||||||
|
scannedDevices = append(scannedDevices, emmcDevices...)
|
||||||
|
hasValidScan = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add Linux mdraid arrays by reading sysfs health fields. This does not
|
||||||
|
// require smartctl and does not scan the whole device.
|
||||||
|
if raidDevices := scanMdraidDevices(); len(raidDevices) > 0 {
|
||||||
|
scannedDevices = append(scannedDevices, raidDevices...)
|
||||||
|
hasValidScan = true
|
||||||
|
}
|
||||||
|
|
||||||
finalDevices := mergeDeviceLists(currentDevices, scannedDevices, configuredDevices)
|
finalDevices := mergeDeviceLists(currentDevices, scannedDevices, configuredDevices)
|
||||||
finalDevices = sm.filterExcludedDevices(finalDevices)
|
finalDevices = sm.filterExcludedDevices(finalDevices)
|
||||||
@@ -201,7 +226,11 @@ func (sm *SmartManager) ScanDevices(force bool) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (sm *SmartManager) parseConfiguredDevices(config string) ([]*DeviceInfo, error) {
|
func (sm *SmartManager) parseConfiguredDevices(config string) ([]*DeviceInfo, error) {
|
||||||
entries := strings.Split(config, ",")
|
splitChar, _ := utils.GetEnv("SMART_DEVICES_SEPARATOR")
|
||||||
|
if splitChar == "" {
|
||||||
|
splitChar = ","
|
||||||
|
}
|
||||||
|
entries := strings.Split(config, splitChar)
|
||||||
devices := make([]*DeviceInfo, 0, len(entries))
|
devices := make([]*DeviceInfo, 0, len(entries))
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
entry = strings.TrimSpace(entry)
|
entry = strings.TrimSpace(entry)
|
||||||
@@ -235,7 +264,7 @@ func (sm *SmartManager) parseConfiguredDevices(config string) ([]*DeviceInfo, er
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (sm *SmartManager) refreshExcludedDevices() {
|
func (sm *SmartManager) refreshExcludedDevices() {
|
||||||
rawValue, _ := GetEnv("EXCLUDE_SMART")
|
rawValue, _ := utils.GetEnv("EXCLUDE_SMART")
|
||||||
sm.excludedDevices = make(map[string]struct{})
|
sm.excludedDevices = make(map[string]struct{})
|
||||||
|
|
||||||
for entry := range strings.SplitSeq(rawValue, ",") {
|
for entry := range strings.SplitSeq(rawValue, ",") {
|
||||||
@@ -325,6 +354,13 @@ func normalizeParserType(value string) string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// makeDeviceKey creates a composite key from device name and type.
|
||||||
|
// This allows multiple drives under the same device path (e.g., RAID controllers)
|
||||||
|
// to be tracked separately.
|
||||||
|
func makeDeviceKey(name, deviceType string) deviceKey {
|
||||||
|
return deviceKey{name: name, deviceType: deviceType}
|
||||||
|
}
|
||||||
|
|
||||||
// parseSmartOutput attempts each SMART parser, optionally detecting the type when
|
// parseSmartOutput attempts each SMART parser, optionally detecting the type when
|
||||||
// it is not provided, and updates the device info when a parser succeeds.
|
// it is not provided, and updates the device info when a parser succeeds.
|
||||||
func (sm *SmartManager) parseSmartOutput(deviceInfo *DeviceInfo, output []byte) bool {
|
func (sm *SmartManager) parseSmartOutput(deviceInfo *DeviceInfo, output []byte) bool {
|
||||||
@@ -425,35 +461,81 @@ func (sm *SmartManager) CollectSmart(deviceInfo *DeviceInfo) error {
|
|||||||
return errNoValidSmartData
|
return errNoValidSmartData
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// mdraid health is not exposed via SMART; Linux exposes array state in sysfs.
|
||||||
|
if deviceInfo != nil {
|
||||||
|
if ok, err := sm.collectMdraidHealth(deviceInfo); ok {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// eMMC health is not exposed via SMART on Linux, but the kernel provides
|
||||||
|
// wear / EOL indicators via sysfs. Prefer that path when available.
|
||||||
|
if deviceInfo != nil {
|
||||||
|
if ok, err := sm.collectEmmcHealth(deviceInfo); ok {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if sm.smartctlPath == "" {
|
||||||
|
return errNoValidSmartData
|
||||||
|
}
|
||||||
|
|
||||||
// slog.Info("collecting SMART data", "device", deviceInfo.Name, "type", deviceInfo.Type, "has_existing_data", sm.hasDataForDevice(deviceInfo.Name))
|
// slog.Info("collecting SMART data", "device", deviceInfo.Name, "type", deviceInfo.Type, "has_existing_data", sm.hasDataForDevice(deviceInfo.Name))
|
||||||
|
|
||||||
// Check if we have any existing data for this device
|
// Check if we have any existing data for this device
|
||||||
hasExistingData := sm.hasDataForDevice(deviceInfo.Name)
|
hasExistingData := sm.hasDataForDevice(deviceInfo.Name)
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
// Try with -n standby first if we have existing data
|
// Try with -n standby first if we have existing data
|
||||||
args := sm.smartctlArgs(deviceInfo, true)
|
args := sm.smartctlArgs(deviceInfo, hasExistingData)
|
||||||
cmd := exec.CommandContext(ctx, sm.binPath, args...)
|
cmd := exec.CommandContext(ctx, sm.smartctlPath, args...)
|
||||||
output, err := cmd.CombinedOutput()
|
output, err := cmd.CombinedOutput()
|
||||||
|
|
||||||
// Check if device is in standby (exit status 2)
|
// Check if device is in standby (exit status 2)
|
||||||
if exitErr, ok := err.(*exec.ExitError); ok && exitErr.ExitCode() == 2 {
|
if exitErr, ok := errors.AsType[*exec.ExitError](err); ok && exitErr.ExitCode() == 2 {
|
||||||
if hasExistingData {
|
if hasExistingData {
|
||||||
// Device is in standby and we have cached data, keep using cache
|
// Device is in standby and we have cached data, keep using cache
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// No cached data, need to collect initial data by bypassing standby
|
// No cached data, need to collect initial data by bypassing standby
|
||||||
ctx2, cancel2 := context.WithTimeout(context.Background(), 2*time.Second)
|
ctx2, cancel2 := context.WithTimeout(context.Background(), 15*time.Second)
|
||||||
defer cancel2()
|
defer cancel2()
|
||||||
args = sm.smartctlArgs(deviceInfo, false)
|
args = sm.smartctlArgs(deviceInfo, false)
|
||||||
cmd = exec.CommandContext(ctx2, sm.binPath, args...)
|
cmd = exec.CommandContext(ctx2, sm.smartctlPath, args...)
|
||||||
output, err = cmd.CombinedOutput()
|
output, err = cmd.CombinedOutput()
|
||||||
}
|
}
|
||||||
|
|
||||||
hasValidData := sm.parseSmartOutput(deviceInfo, output)
|
hasValidData := sm.parseSmartOutput(deviceInfo, output)
|
||||||
|
|
||||||
|
// If NVMe controller path failed, try namespace path as fallback.
|
||||||
|
// NVMe controllers (/dev/nvme0) don't always support SMART queries. See github.com/henrygd/beszel/issues/1504
|
||||||
|
if !hasValidData && err != nil && isNvmeControllerPath(deviceInfo.Name) {
|
||||||
|
controllerPath := deviceInfo.Name
|
||||||
|
namespacePath := controllerPath + "n1"
|
||||||
|
if !sm.isExcludedDevice(namespacePath) {
|
||||||
|
deviceInfo.Name = namespacePath
|
||||||
|
|
||||||
|
ctx3, cancel3 := context.WithTimeout(context.Background(), 15*time.Second)
|
||||||
|
defer cancel3()
|
||||||
|
args = sm.smartctlArgs(deviceInfo, false)
|
||||||
|
cmd = exec.CommandContext(ctx3, sm.smartctlPath, args...)
|
||||||
|
output, err = cmd.CombinedOutput()
|
||||||
|
hasValidData = sm.parseSmartOutput(deviceInfo, output)
|
||||||
|
|
||||||
|
// Auto-exclude the controller path so future scans don't re-add it
|
||||||
|
if hasValidData {
|
||||||
|
sm.Lock()
|
||||||
|
if sm.excludedDevices == nil {
|
||||||
|
sm.excludedDevices = make(map[string]struct{})
|
||||||
|
}
|
||||||
|
sm.excludedDevices[controllerPath] = struct{}{}
|
||||||
|
sm.Unlock()
|
||||||
|
slog.Debug("auto-excluded NVMe controller path", "path", controllerPath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if !hasValidData {
|
if !hasValidData {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Info("smartctl failed", "device", deviceInfo.Name, "err", err)
|
slog.Info("smartctl failed", "device", deviceInfo.Name, "err", err)
|
||||||
@@ -469,10 +551,12 @@ func (sm *SmartManager) CollectSmart(deviceInfo *DeviceInfo) error {
|
|||||||
// smartctlArgs returns the arguments for the smartctl command
|
// smartctlArgs returns the arguments for the smartctl command
|
||||||
// based on the device type and whether to include standby mode
|
// based on the device type and whether to include standby mode
|
||||||
func (sm *SmartManager) smartctlArgs(deviceInfo *DeviceInfo, includeStandby bool) []string {
|
func (sm *SmartManager) smartctlArgs(deviceInfo *DeviceInfo, includeStandby bool) []string {
|
||||||
args := make([]string, 0, 7)
|
args := make([]string, 0, 9)
|
||||||
|
var deviceType, parserType string
|
||||||
|
|
||||||
if deviceInfo != nil {
|
if deviceInfo != nil {
|
||||||
deviceType := strings.ToLower(deviceInfo.Type)
|
deviceType = strings.ToLower(deviceInfo.Type)
|
||||||
|
parserType = strings.ToLower(deviceInfo.parserType)
|
||||||
// types sometimes misidentified in scan; see github.com/henrygd/beszel/issues/1345
|
// types sometimes misidentified in scan; see github.com/henrygd/beszel/issues/1345
|
||||||
if deviceType != "" && deviceType != "scsi" && deviceType != "ata" {
|
if deviceType != "" && deviceType != "scsi" && deviceType != "ata" {
|
||||||
args = append(args, "-d", deviceInfo.Type)
|
args = append(args, "-d", deviceInfo.Type)
|
||||||
@@ -480,6 +564,13 @@ func (sm *SmartManager) smartctlArgs(deviceInfo *DeviceInfo, includeStandby bool
|
|||||||
}
|
}
|
||||||
|
|
||||||
args = append(args, "-a", "--json=c")
|
args = append(args, "-a", "--json=c")
|
||||||
|
effectiveType := parserType
|
||||||
|
if effectiveType == "" {
|
||||||
|
effectiveType = deviceType
|
||||||
|
}
|
||||||
|
if effectiveType == "sat" || effectiveType == "ata" {
|
||||||
|
args = append(args, "-l", "devstat")
|
||||||
|
}
|
||||||
|
|
||||||
if includeStandby {
|
if includeStandby {
|
||||||
args = append(args, "-n", "standby")
|
args = append(args, "-n", "standby")
|
||||||
@@ -540,6 +631,28 @@ func mergeDeviceLists(existing, scanned, configured []*DeviceInfo) []*DeviceInfo
|
|||||||
return existing
|
return existing
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// buildUniqueNameIndex returns devices that appear exactly once by name.
|
||||||
|
// It is used to safely apply name-only fallbacks without RAID ambiguity.
|
||||||
|
buildUniqueNameIndex := func(devices []*DeviceInfo) map[string]*DeviceInfo {
|
||||||
|
counts := make(map[string]int, len(devices))
|
||||||
|
for _, dev := range devices {
|
||||||
|
if dev == nil || dev.Name == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
counts[dev.Name]++
|
||||||
|
}
|
||||||
|
unique := make(map[string]*DeviceInfo, len(counts))
|
||||||
|
for _, dev := range devices {
|
||||||
|
if dev == nil || dev.Name == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if counts[dev.Name] == 1 {
|
||||||
|
unique[dev.Name] = dev
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return unique
|
||||||
|
}
|
||||||
|
|
||||||
// preserveVerifiedType copies the verified type/parser metadata from an existing
|
// preserveVerifiedType copies the verified type/parser metadata from an existing
|
||||||
// device record so that subsequent scans/config updates never downgrade a
|
// device record so that subsequent scans/config updates never downgrade a
|
||||||
// previously verified device.
|
// previously verified device.
|
||||||
@@ -552,69 +665,90 @@ func mergeDeviceLists(existing, scanned, configured []*DeviceInfo) []*DeviceInfo
|
|||||||
target.parserType = prev.parserType
|
target.parserType = prev.parserType
|
||||||
}
|
}
|
||||||
|
|
||||||
existingIndex := make(map[string]*DeviceInfo, len(existing))
|
// applyConfiguredMetadata updates a matched device with any configured
|
||||||
|
// overrides, preserving verified type data when present.
|
||||||
|
applyConfiguredMetadata := func(existingDev, configuredDev *DeviceInfo) {
|
||||||
|
// Only update the type if it has not been verified yet; otherwise we
|
||||||
|
// keep the existing verified metadata intact.
|
||||||
|
if configuredDev.Type != "" && !existingDev.typeVerified {
|
||||||
|
newType := strings.TrimSpace(configuredDev.Type)
|
||||||
|
existingDev.Type = newType
|
||||||
|
existingDev.typeVerified = false
|
||||||
|
existingDev.parserType = normalizeParserType(newType)
|
||||||
|
}
|
||||||
|
if configuredDev.InfoName != "" {
|
||||||
|
existingDev.InfoName = configuredDev.InfoName
|
||||||
|
}
|
||||||
|
if configuredDev.Protocol != "" {
|
||||||
|
existingDev.Protocol = configuredDev.Protocol
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
existingIndex := make(map[deviceKey]*DeviceInfo, len(existing))
|
||||||
for _, dev := range existing {
|
for _, dev := range existing {
|
||||||
if dev == nil || dev.Name == "" {
|
if dev == nil || dev.Name == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
existingIndex[dev.Name] = dev
|
existingIndex[makeDeviceKey(dev.Name, dev.Type)] = dev
|
||||||
}
|
}
|
||||||
|
existingByName := buildUniqueNameIndex(existing)
|
||||||
|
|
||||||
finalDevices := make([]*DeviceInfo, 0, len(scanned)+len(configured))
|
finalDevices := make([]*DeviceInfo, 0, len(scanned)+len(configured))
|
||||||
deviceIndex := make(map[string]*DeviceInfo, len(scanned)+len(configured))
|
deviceIndex := make(map[deviceKey]*DeviceInfo, len(scanned)+len(configured))
|
||||||
|
|
||||||
// Start with the newly scanned devices so we always surface fresh metadata,
|
// Start with the newly scanned devices so we always surface fresh metadata,
|
||||||
// but ensure we retain any previously verified parser assignment.
|
// but ensure we retain any previously verified parser assignment.
|
||||||
for _, dev := range scanned {
|
for _, scannedDevice := range scanned {
|
||||||
if dev == nil || dev.Name == "" {
|
if scannedDevice == nil || scannedDevice.Name == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Work on a copy so we can safely adjust metadata without mutating the
|
// Work on a copy so we can safely adjust metadata without mutating the
|
||||||
// input slices that may be reused elsewhere.
|
// input slices that may be reused elsewhere.
|
||||||
copyDev := *dev
|
copyDev := *scannedDevice
|
||||||
if prev := existingIndex[copyDev.Name]; prev != nil {
|
key := makeDeviceKey(copyDev.Name, copyDev.Type)
|
||||||
|
if prev := existingIndex[key]; prev != nil {
|
||||||
|
preserveVerifiedType(©Dev, prev)
|
||||||
|
} else if prev := existingByName[copyDev.Name]; prev != nil {
|
||||||
preserveVerifiedType(©Dev, prev)
|
preserveVerifiedType(©Dev, prev)
|
||||||
}
|
}
|
||||||
|
|
||||||
finalDevices = append(finalDevices, ©Dev)
|
finalDevices = append(finalDevices, ©Dev)
|
||||||
deviceIndex[copyDev.Name] = finalDevices[len(finalDevices)-1]
|
copyKey := makeDeviceKey(copyDev.Name, copyDev.Type)
|
||||||
|
deviceIndex[copyKey] = finalDevices[len(finalDevices)-1]
|
||||||
}
|
}
|
||||||
|
deviceIndexByName := buildUniqueNameIndex(finalDevices)
|
||||||
|
|
||||||
// Merge configured devices on top so users can override scan results (except
|
// Merge configured devices on top so users can override scan results (except
|
||||||
// for verified type information).
|
// for verified type information).
|
||||||
for _, dev := range configured {
|
for _, configuredDevice := range configured {
|
||||||
if dev == nil || dev.Name == "" {
|
if configuredDevice == nil || configuredDevice.Name == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if existingDev, ok := deviceIndex[dev.Name]; ok {
|
key := makeDeviceKey(configuredDevice.Name, configuredDevice.Type)
|
||||||
// Only update the type if it has not been verified yet; otherwise we
|
if existingDev, ok := deviceIndex[key]; ok {
|
||||||
// keep the existing verified metadata intact.
|
applyConfiguredMetadata(existingDev, configuredDevice)
|
||||||
if dev.Type != "" && !existingDev.typeVerified {
|
continue
|
||||||
newType := strings.TrimSpace(dev.Type)
|
|
||||||
existingDev.Type = newType
|
|
||||||
existingDev.typeVerified = false
|
|
||||||
existingDev.parserType = normalizeParserType(newType)
|
|
||||||
}
|
|
||||||
if dev.InfoName != "" {
|
|
||||||
existingDev.InfoName = dev.InfoName
|
|
||||||
}
|
|
||||||
if dev.Protocol != "" {
|
|
||||||
existingDev.Protocol = dev.Protocol
|
|
||||||
}
|
}
|
||||||
|
if existingDev := deviceIndexByName[configuredDevice.Name]; existingDev != nil {
|
||||||
|
applyConfiguredMetadata(existingDev, configuredDevice)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
copyDev := *dev
|
copyDev := *configuredDevice
|
||||||
if prev := existingIndex[copyDev.Name]; prev != nil {
|
key = makeDeviceKey(copyDev.Name, copyDev.Type)
|
||||||
|
if prev := existingIndex[key]; prev != nil {
|
||||||
|
preserveVerifiedType(©Dev, prev)
|
||||||
|
} else if prev := existingByName[copyDev.Name]; prev != nil {
|
||||||
preserveVerifiedType(©Dev, prev)
|
preserveVerifiedType(©Dev, prev)
|
||||||
} else if copyDev.Type != "" {
|
} else if copyDev.Type != "" {
|
||||||
copyDev.parserType = normalizeParserType(copyDev.Type)
|
copyDev.parserType = normalizeParserType(copyDev.Type)
|
||||||
}
|
}
|
||||||
|
|
||||||
finalDevices = append(finalDevices, ©Dev)
|
finalDevices = append(finalDevices, ©Dev)
|
||||||
deviceIndex[copyDev.Name] = finalDevices[len(finalDevices)-1]
|
copyKey := makeDeviceKey(copyDev.Name, copyDev.Type)
|
||||||
|
deviceIndex[copyKey] = finalDevices[len(finalDevices)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
return finalDevices
|
return finalDevices
|
||||||
@@ -632,12 +766,14 @@ func (sm *SmartManager) updateSmartDevices(devices []*DeviceInfo) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
validNames := make(map[string]struct{}, len(devices))
|
validKeys := make(map[deviceKey]struct{}, len(devices))
|
||||||
|
nameCounts := make(map[string]int, len(devices))
|
||||||
for _, device := range devices {
|
for _, device := range devices {
|
||||||
if device == nil || device.Name == "" {
|
if device == nil || device.Name == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
validNames[device.Name] = struct{}{}
|
validKeys[makeDeviceKey(device.Name, device.Type)] = struct{}{}
|
||||||
|
nameCounts[device.Name]++
|
||||||
}
|
}
|
||||||
|
|
||||||
for key, data := range sm.SmartDataMap {
|
for key, data := range sm.SmartDataMap {
|
||||||
@@ -646,7 +782,11 @@ func (sm *SmartManager) updateSmartDevices(devices []*DeviceInfo) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := validNames[data.DiskName]; ok {
|
if data.DiskType == "" {
|
||||||
|
if nameCounts[data.DiskName] == 1 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else if _, ok := validKeys[makeDeviceKey(data.DiskName, data.DiskType)]; ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -738,6 +878,14 @@ func (sm *SmartManager) parseSmartForSata(output []byte) (bool, int) {
|
|||||||
smartData.DiskName = data.Device.Name
|
smartData.DiskName = data.Device.Name
|
||||||
smartData.DiskType = data.Device.Type
|
smartData.DiskType = data.Device.Type
|
||||||
|
|
||||||
|
// get values from ata_device_statistics if necessary
|
||||||
|
var ataDeviceStats smart.AtaDeviceStatistics
|
||||||
|
if smartData.Temperature == 0 {
|
||||||
|
if temp := findAtaDeviceStatisticsValue(&data, &ataDeviceStats, 5, "Current Temperature", 0, 255); temp != nil {
|
||||||
|
smartData.Temperature = uint8(*temp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// update SmartAttributes
|
// update SmartAttributes
|
||||||
smartData.Attributes = make([]*smart.SmartAttribute, 0, len(data.AtaSmartAttributes.Table))
|
smartData.Attributes = make([]*smart.SmartAttribute, 0, len(data.AtaSmartAttributes.Table))
|
||||||
for _, attr := range data.AtaSmartAttributes.Table {
|
for _, attr := range data.AtaSmartAttributes.Table {
|
||||||
@@ -772,6 +920,36 @@ func getSmartStatus(temperature uint8, passed bool) string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// findAtaDeviceStatisticsEntry centralizes ATA devstat lookups so additional
|
||||||
|
// metrics can be pulled from the same structure in the future.
|
||||||
|
func findAtaDeviceStatisticsValue(data *smart.SmartInfoForSata, ataDeviceStats *smart.AtaDeviceStatistics, entryNumber uint8, entryName string, minValue, maxValue int64) *int64 {
|
||||||
|
if len(ataDeviceStats.Pages) == 0 {
|
||||||
|
if len(data.AtaDeviceStatistics) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(data.AtaDeviceStatistics, ataDeviceStats); err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for pageIdx := range ataDeviceStats.Pages {
|
||||||
|
page := &ataDeviceStats.Pages[pageIdx]
|
||||||
|
if page.Number != entryNumber {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for entryIdx := range page.Table {
|
||||||
|
entry := &page.Table[entryIdx]
|
||||||
|
if !strings.EqualFold(entry.Name, entryName) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if entry.Value == nil || *entry.Value < minValue || *entry.Value > maxValue {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return entry.Value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (sm *SmartManager) parseSmartForScsi(output []byte) (bool, int) {
|
func (sm *SmartManager) parseSmartForScsi(output []byte) (bool, int) {
|
||||||
var data smart.SmartInfoForScsi
|
var data smart.SmartInfoForScsi
|
||||||
|
|
||||||
@@ -858,6 +1036,52 @@ func parseScsiGigabytesProcessed(value string) int64 {
|
|||||||
return parsed
|
return parsed
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// lookupDarwinNvmeCapacity returns the capacity in bytes for a given NVMe serial number on Darwin.
|
||||||
|
// It uses system_profiler SPNVMeDataType to get capacity since Apple SSDs don't report user_capacity
|
||||||
|
// via smartctl. Results are cached after the first call via sync.Once.
|
||||||
|
func (sm *SmartManager) lookupDarwinNvmeCapacity(serial string) uint64 {
|
||||||
|
sm.darwinNvmeOnce.Do(func() {
|
||||||
|
sm.darwinNvmeCapacity = make(map[string]uint64)
|
||||||
|
|
||||||
|
provider := sm.darwinNvmeProvider
|
||||||
|
if provider == nil {
|
||||||
|
provider = func() ([]byte, error) {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
return exec.CommandContext(ctx, "system_profiler", "SPNVMeDataType", "-json").Output()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
out, err := provider()
|
||||||
|
if err != nil {
|
||||||
|
slog.Debug("system_profiler NVMe lookup failed", "err", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var result struct {
|
||||||
|
SPNVMeDataType []struct {
|
||||||
|
Items []struct {
|
||||||
|
DeviceSerial string `json:"device_serial"`
|
||||||
|
SizeInBytes uint64 `json:"size_in_bytes"`
|
||||||
|
} `json:"_items"`
|
||||||
|
} `json:"SPNVMeDataType"`
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(out, &result); err != nil {
|
||||||
|
slog.Debug("system_profiler NVMe parse failed", "err", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, controller := range result.SPNVMeDataType {
|
||||||
|
for _, item := range controller.Items {
|
||||||
|
if item.DeviceSerial != "" && item.SizeInBytes > 0 {
|
||||||
|
sm.darwinNvmeCapacity[item.DeviceSerial] = item.SizeInBytes
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
return sm.darwinNvmeCapacity[serial]
|
||||||
|
}
|
||||||
|
|
||||||
// parseSmartForNvme parses the output of smartctl --all -j /dev/nvmeX and updates the SmartDataMap
|
// parseSmartForNvme parses the output of smartctl --all -j /dev/nvmeX and updates the SmartDataMap
|
||||||
// Returns hasValidData and exitStatus
|
// Returns hasValidData and exitStatus
|
||||||
func (sm *SmartManager) parseSmartForNvme(output []byte) (bool, int) {
|
func (sm *SmartManager) parseSmartForNvme(output []byte) (bool, int) {
|
||||||
@@ -894,6 +1118,9 @@ func (sm *SmartManager) parseSmartForNvme(output []byte) (bool, int) {
|
|||||||
smartData.SerialNumber = data.SerialNumber
|
smartData.SerialNumber = data.SerialNumber
|
||||||
smartData.FirmwareVersion = data.FirmwareVersion
|
smartData.FirmwareVersion = data.FirmwareVersion
|
||||||
smartData.Capacity = data.UserCapacity.Bytes
|
smartData.Capacity = data.UserCapacity.Bytes
|
||||||
|
if smartData.Capacity == 0 && (runtime.GOOS == "darwin" || sm.darwinNvmeProvider != nil) {
|
||||||
|
smartData.Capacity = sm.lookupDarwinNvmeCapacity(data.SerialNumber)
|
||||||
|
}
|
||||||
smartData.Temperature = data.NVMeSmartHealthInformationLog.Temperature
|
smartData.Temperature = data.NVMeSmartHealthInformationLog.Temperature
|
||||||
smartData.SmartStatus = getSmartStatus(smartData.Temperature, data.SmartStatus.Passed)
|
smartData.SmartStatus = getSmartStatus(smartData.Temperature, data.SmartStatus.Passed)
|
||||||
smartData.DiskName = data.Device.Name
|
smartData.DiskName = data.Device.Name
|
||||||
@@ -929,32 +1156,42 @@ func (sm *SmartManager) parseSmartForNvme(output []byte) (bool, int) {
|
|||||||
|
|
||||||
// detectSmartctl checks if smartctl is installed, returns an error if not
|
// detectSmartctl checks if smartctl is installed, returns an error if not
|
||||||
func (sm *SmartManager) detectSmartctl() (string, error) {
|
func (sm *SmartManager) detectSmartctl() (string, error) {
|
||||||
isWindows := runtime.GOOS == "windows"
|
if runtime.GOOS == "windows" {
|
||||||
|
|
||||||
// Load embedded smartctl.exe for Windows amd64 builds.
|
// Load embedded smartctl.exe for Windows amd64 builds.
|
||||||
if isWindows && runtime.GOARCH == "amd64" {
|
if runtime.GOARCH == "amd64" {
|
||||||
if path, err := ensureEmbeddedSmartctl(); err == nil {
|
if path, err := ensureEmbeddedSmartctl(); err == nil {
|
||||||
return path, nil
|
return path, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Try to find smartctl in the default installation location
|
||||||
if path, err := exec.LookPath("smartctl"); err == nil {
|
const location = "C:\\Program Files\\smartmontools\\bin\\smartctl.exe"
|
||||||
return path, nil
|
|
||||||
}
|
|
||||||
locations := []string{}
|
|
||||||
if isWindows {
|
|
||||||
locations = append(locations,
|
|
||||||
"C:\\Program Files\\smartmontools\\bin\\smartctl.exe",
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
locations = append(locations, "/opt/homebrew/bin/smartctl")
|
|
||||||
}
|
|
||||||
for _, location := range locations {
|
|
||||||
if _, err := os.Stat(location); err == nil {
|
if _, err := os.Stat(location); err == nil {
|
||||||
return location, nil
|
return location, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return "", errors.New("smartctl not found")
|
|
||||||
|
return utils.LookPathHomebrew("smartctl")
|
||||||
|
}
|
||||||
|
|
||||||
|
// isNvmeControllerPath checks if the path matches an NVMe controller pattern
|
||||||
|
// like /dev/nvme0, /dev/nvme1, etc. (without namespace suffix like n1)
|
||||||
|
func isNvmeControllerPath(path string) bool {
|
||||||
|
base := filepath.Base(path)
|
||||||
|
if !strings.HasPrefix(base, "nvme") {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
suffix := strings.TrimPrefix(base, "nvme")
|
||||||
|
if suffix == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
// Controller paths are just "nvme" + digits (e.g., nvme0, nvme1)
|
||||||
|
// Namespace paths have "n" after the controller number (e.g., nvme0n1)
|
||||||
|
for _, c := range suffix {
|
||||||
|
if c < '0' || c > '9' {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSmartManager creates and initializes a new SmartManager
|
// NewSmartManager creates and initializes a new SmartManager
|
||||||
@@ -964,11 +1201,17 @@ func NewSmartManager() (*SmartManager, error) {
|
|||||||
}
|
}
|
||||||
sm.refreshExcludedDevices()
|
sm.refreshExcludedDevices()
|
||||||
path, err := sm.detectSmartctl()
|
path, err := sm.detectSmartctl()
|
||||||
|
slog.Debug("smartctl", "path", path, "err", err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Debug(err.Error())
|
// Keep the previous fail-fast behavior unless this Linux host exposes
|
||||||
return nil, err
|
// eMMC or mdraid health via sysfs, in which case smartctl is optional.
|
||||||
}
|
if runtime.GOOS == "linux" {
|
||||||
slog.Debug("smartctl", "path", path)
|
if len(scanEmmcDevices()) > 0 || len(scanMdraidDevices()) > 0 {
|
||||||
sm.binPath = path
|
return sm, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
sm.smartctlPath = path
|
||||||
return sm, nil
|
return sm, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build testing
|
//go:build testing
|
||||||
// +build testing
|
|
||||||
|
|
||||||
package agent
|
package agent
|
||||||
|
|
||||||
@@ -89,6 +88,111 @@ func TestParseSmartForSata(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestParseSmartForSataDeviceStatisticsTemperature(t *testing.T) {
|
||||||
|
jsonPayload := []byte(`{
|
||||||
|
"smartctl": {"exit_status": 0},
|
||||||
|
"device": {"name": "/dev/sdb", "type": "sat"},
|
||||||
|
"model_name": "SanDisk SSD U110 16GB",
|
||||||
|
"serial_number": "DEVSTAT123",
|
||||||
|
"firmware_version": "U21B001",
|
||||||
|
"user_capacity": {"bytes": 16013942784},
|
||||||
|
"smart_status": {"passed": true},
|
||||||
|
"ata_smart_attributes": {"table": []},
|
||||||
|
"ata_device_statistics": {
|
||||||
|
"pages": [
|
||||||
|
{
|
||||||
|
"number": 5,
|
||||||
|
"name": "Temperature Statistics",
|
||||||
|
"table": [
|
||||||
|
{"name": "Current Temperature", "value": 22, "flags": {"valid": true}}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}`)
|
||||||
|
|
||||||
|
sm := &SmartManager{SmartDataMap: make(map[string]*smart.SmartData)}
|
||||||
|
hasData, exitStatus := sm.parseSmartForSata(jsonPayload)
|
||||||
|
require.True(t, hasData)
|
||||||
|
assert.Equal(t, 0, exitStatus)
|
||||||
|
|
||||||
|
deviceData, ok := sm.SmartDataMap["DEVSTAT123"]
|
||||||
|
require.True(t, ok, "expected smart data entry for serial DEVSTAT123")
|
||||||
|
assert.Equal(t, uint8(22), deviceData.Temperature)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseSmartForSataAtaDeviceStatistics(t *testing.T) {
|
||||||
|
// tests that ata_device_statistics values are parsed correctly
|
||||||
|
jsonPayload := []byte(`{
|
||||||
|
"smartctl": {"exit_status": 0},
|
||||||
|
"device": {"name": "/dev/sdb", "type": "sat"},
|
||||||
|
"model_name": "SanDisk SSD U110 16GB",
|
||||||
|
"serial_number": "lksjfh23lhj",
|
||||||
|
"firmware_version": "U21B001",
|
||||||
|
"user_capacity": {"bytes": 16013942784},
|
||||||
|
"smart_status": {"passed": true},
|
||||||
|
"ata_smart_attributes": {"table": []},
|
||||||
|
"ata_device_statistics": {
|
||||||
|
"pages": [
|
||||||
|
{
|
||||||
|
"number": 5,
|
||||||
|
"name": "Temperature Statistics",
|
||||||
|
"table": [
|
||||||
|
{"name": "Current Temperature", "value": 43, "flags": {"valid": true}},
|
||||||
|
{"name": "Specified Minimum Operating Temperature", "value": -20, "flags": {"valid": true}}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}`)
|
||||||
|
|
||||||
|
sm := &SmartManager{SmartDataMap: make(map[string]*smart.SmartData)}
|
||||||
|
hasData, exitStatus := sm.parseSmartForSata(jsonPayload)
|
||||||
|
require.True(t, hasData)
|
||||||
|
assert.Equal(t, 0, exitStatus)
|
||||||
|
|
||||||
|
deviceData, ok := sm.SmartDataMap["lksjfh23lhj"]
|
||||||
|
require.True(t, ok, "expected smart data entry for serial lksjfh23lhj")
|
||||||
|
assert.Equal(t, uint8(43), deviceData.Temperature)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseSmartForSataNegativeDeviceStatistics(t *testing.T) {
|
||||||
|
// Tests that negative values in ata_device_statistics (e.g. min operating temp)
|
||||||
|
// do not cause the entire SAT parser to fail.
|
||||||
|
jsonPayload := []byte(`{
|
||||||
|
"smartctl": {"exit_status": 0},
|
||||||
|
"device": {"name": "/dev/sdb", "type": "sat"},
|
||||||
|
"model_name": "SanDisk SSD U110 16GB",
|
||||||
|
"serial_number": "NEGATIVE123",
|
||||||
|
"firmware_version": "U21B001",
|
||||||
|
"user_capacity": {"bytes": 16013942784},
|
||||||
|
"smart_status": {"passed": true},
|
||||||
|
"temperature": {"current": 38},
|
||||||
|
"ata_smart_attributes": {"table": []},
|
||||||
|
"ata_device_statistics": {
|
||||||
|
"pages": [
|
||||||
|
{
|
||||||
|
"number": 5,
|
||||||
|
"name": "Temperature Statistics",
|
||||||
|
"table": [
|
||||||
|
{"name": "Current Temperature", "value": 38, "flags": {"valid": true}},
|
||||||
|
{"name": "Specified Minimum Operating Temperature", "value": -20, "flags": {"valid": true}}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}`)
|
||||||
|
|
||||||
|
sm := &SmartManager{SmartDataMap: make(map[string]*smart.SmartData)}
|
||||||
|
hasData, exitStatus := sm.parseSmartForSata(jsonPayload)
|
||||||
|
require.True(t, hasData)
|
||||||
|
assert.Equal(t, 0, exitStatus)
|
||||||
|
|
||||||
|
deviceData, ok := sm.SmartDataMap["NEGATIVE123"]
|
||||||
|
require.True(t, ok, "expected smart data entry for serial NEGATIVE123")
|
||||||
|
assert.Equal(t, uint8(38), deviceData.Temperature)
|
||||||
|
}
|
||||||
|
|
||||||
func TestParseSmartForSataParentheticalRawValue(t *testing.T) {
|
func TestParseSmartForSataParentheticalRawValue(t *testing.T) {
|
||||||
jsonPayload := []byte(`{
|
jsonPayload := []byte(`{
|
||||||
"smartctl": {"exit_status": 0},
|
"smartctl": {"exit_status": 0},
|
||||||
@@ -195,6 +299,24 @@ func TestDevicesSnapshotReturnsCopy(t *testing.T) {
|
|||||||
assert.Len(t, snapshot, 2)
|
assert.Len(t, snapshot, 2)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestScanDevicesWithEnvOverrideAndSeparator(t *testing.T) {
|
||||||
|
t.Setenv("SMART_DEVICES_SEPARATOR", "|")
|
||||||
|
t.Setenv("SMART_DEVICES", "/dev/sda:jmb39x-q,0|/dev/nvme0:nvme")
|
||||||
|
|
||||||
|
sm := &SmartManager{
|
||||||
|
SmartDataMap: make(map[string]*smart.SmartData),
|
||||||
|
}
|
||||||
|
|
||||||
|
err := sm.ScanDevices(true)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Len(t, sm.SmartDevices, 2)
|
||||||
|
assert.Equal(t, "/dev/sda", sm.SmartDevices[0].Name)
|
||||||
|
assert.Equal(t, "jmb39x-q,0", sm.SmartDevices[0].Type)
|
||||||
|
assert.Equal(t, "/dev/nvme0", sm.SmartDevices[1].Name)
|
||||||
|
assert.Equal(t, "nvme", sm.SmartDevices[1].Type)
|
||||||
|
}
|
||||||
|
|
||||||
func TestScanDevicesWithEnvOverride(t *testing.T) {
|
func TestScanDevicesWithEnvOverride(t *testing.T) {
|
||||||
t.Setenv("SMART_DEVICES", "/dev/sda:sat, /dev/nvme0:nvme")
|
t.Setenv("SMART_DEVICES", "/dev/sda:sat, /dev/nvme0:nvme")
|
||||||
|
|
||||||
@@ -249,15 +371,21 @@ func TestSmartctlArgs(t *testing.T) {
|
|||||||
|
|
||||||
sataDevice := &DeviceInfo{Name: "/dev/sda", Type: "sat"}
|
sataDevice := &DeviceInfo{Name: "/dev/sda", Type: "sat"}
|
||||||
assert.Equal(t,
|
assert.Equal(t,
|
||||||
[]string{"-d", "sat", "-a", "--json=c", "-n", "standby", "/dev/sda"},
|
[]string{"-d", "sat", "-a", "--json=c", "-l", "devstat", "-n", "standby", "/dev/sda"},
|
||||||
sm.smartctlArgs(sataDevice, true),
|
sm.smartctlArgs(sataDevice, true),
|
||||||
)
|
)
|
||||||
|
|
||||||
assert.Equal(t,
|
assert.Equal(t,
|
||||||
[]string{"-d", "sat", "-a", "--json=c", "/dev/sda"},
|
[]string{"-d", "sat", "-a", "--json=c", "-l", "devstat", "/dev/sda"},
|
||||||
sm.smartctlArgs(sataDevice, false),
|
sm.smartctlArgs(sataDevice, false),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
nvmeDevice := &DeviceInfo{Name: "/dev/nvme0", Type: "nvme"}
|
||||||
|
assert.Equal(t,
|
||||||
|
[]string{"-d", "nvme", "-a", "--json=c", "-n", "standby", "/dev/nvme0"},
|
||||||
|
sm.smartctlArgs(nvmeDevice, true),
|
||||||
|
)
|
||||||
|
|
||||||
assert.Equal(t,
|
assert.Equal(t,
|
||||||
[]string{"-a", "--json=c", "-n", "standby"},
|
[]string{"-a", "--json=c", "-n", "standby"},
|
||||||
sm.smartctlArgs(nil, true),
|
sm.smartctlArgs(nil, true),
|
||||||
@@ -442,6 +570,88 @@ func TestMergeDeviceListsUpdatesTypeWhenUnverified(t *testing.T) {
|
|||||||
assert.Equal(t, "", device.parserType)
|
assert.Equal(t, "", device.parserType)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestMergeDeviceListsHandlesDevicesWithSameNameAndDifferentTypes(t *testing.T) {
|
||||||
|
// There are use cases where the same device name is re-used,
|
||||||
|
// for example, a RAID controller with multiple drives.
|
||||||
|
scanned := []*DeviceInfo{
|
||||||
|
{Name: "/dev/sda", Type: "megaraid,0"},
|
||||||
|
{Name: "/dev/sda", Type: "megaraid,1"},
|
||||||
|
{Name: "/dev/sda", Type: "megaraid,2"},
|
||||||
|
}
|
||||||
|
|
||||||
|
merged := mergeDeviceLists(nil, scanned, nil)
|
||||||
|
require.Len(t, merged, 3, "should have 3 separate devices for RAID controller")
|
||||||
|
|
||||||
|
byKey := make(map[string]*DeviceInfo, len(merged))
|
||||||
|
for _, dev := range merged {
|
||||||
|
key := dev.Name + "|" + dev.Type
|
||||||
|
byKey[key] = dev
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Contains(t, byKey, "/dev/sda|megaraid,0")
|
||||||
|
assert.Contains(t, byKey, "/dev/sda|megaraid,1")
|
||||||
|
assert.Contains(t, byKey, "/dev/sda|megaraid,2")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMergeDeviceListsHandlesMixedRAIDAndRegular(t *testing.T) {
|
||||||
|
// Test mixing RAID drives with regular devices
|
||||||
|
scanned := []*DeviceInfo{
|
||||||
|
{Name: "/dev/sda", Type: "megaraid,0"},
|
||||||
|
{Name: "/dev/sda", Type: "megaraid,1"},
|
||||||
|
{Name: "/dev/sdb", Type: "sat"},
|
||||||
|
{Name: "/dev/nvme0", Type: "nvme"},
|
||||||
|
}
|
||||||
|
|
||||||
|
merged := mergeDeviceLists(nil, scanned, nil)
|
||||||
|
require.Len(t, merged, 4, "should have 4 separate devices")
|
||||||
|
|
||||||
|
byKey := make(map[string]*DeviceInfo, len(merged))
|
||||||
|
for _, dev := range merged {
|
||||||
|
key := dev.Name + "|" + dev.Type
|
||||||
|
byKey[key] = dev
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Contains(t, byKey, "/dev/sda|megaraid,0")
|
||||||
|
assert.Contains(t, byKey, "/dev/sda|megaraid,1")
|
||||||
|
assert.Contains(t, byKey, "/dev/sdb|sat")
|
||||||
|
assert.Contains(t, byKey, "/dev/nvme0|nvme")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUpdateSmartDevicesPreservesRAIDDrives(t *testing.T) {
|
||||||
|
// Test that updateSmartDevices correctly validates RAID drives using composite keys
|
||||||
|
sm := &SmartManager{
|
||||||
|
SmartDevices: []*DeviceInfo{
|
||||||
|
{Name: "/dev/sda", Type: "megaraid,0"},
|
||||||
|
{Name: "/dev/sda", Type: "megaraid,1"},
|
||||||
|
},
|
||||||
|
SmartDataMap: map[string]*smart.SmartData{
|
||||||
|
"serial-0": {
|
||||||
|
DiskName: "/dev/sda",
|
||||||
|
DiskType: "megaraid,0",
|
||||||
|
SerialNumber: "serial-0",
|
||||||
|
},
|
||||||
|
"serial-1": {
|
||||||
|
DiskName: "/dev/sda",
|
||||||
|
DiskType: "megaraid,1",
|
||||||
|
SerialNumber: "serial-1",
|
||||||
|
},
|
||||||
|
"serial-stale": {
|
||||||
|
DiskName: "/dev/sda",
|
||||||
|
DiskType: "megaraid,2",
|
||||||
|
SerialNumber: "serial-stale",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
sm.updateSmartDevices(sm.SmartDevices)
|
||||||
|
|
||||||
|
// serial-0 and serial-1 should be preserved (matching devices exist)
|
||||||
|
assert.Contains(t, sm.SmartDataMap, "serial-0")
|
||||||
|
assert.Contains(t, sm.SmartDataMap, "serial-1")
|
||||||
|
// serial-stale should be removed (no matching device)
|
||||||
|
assert.NotContains(t, sm.SmartDataMap, "serial-stale")
|
||||||
|
}
|
||||||
|
|
||||||
func TestParseSmartOutputMarksVerified(t *testing.T) {
|
func TestParseSmartOutputMarksVerified(t *testing.T) {
|
||||||
fixturePath := filepath.Join("test-data", "smart", "nvme0.json")
|
fixturePath := filepath.Join("test-data", "smart", "nvme0.json")
|
||||||
data, err := os.ReadFile(fixturePath)
|
data, err := os.ReadFile(fixturePath)
|
||||||
@@ -589,6 +799,182 @@ func TestIsVirtualDeviceScsi(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestFindAtaDeviceStatisticsValue(t *testing.T) {
|
||||||
|
val42 := int64(42)
|
||||||
|
val100 := int64(100)
|
||||||
|
valMinus20 := int64(-20)
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
data smart.SmartInfoForSata
|
||||||
|
ataDeviceStats smart.AtaDeviceStatistics
|
||||||
|
entryNumber uint8
|
||||||
|
entryName string
|
||||||
|
minValue int64
|
||||||
|
maxValue int64
|
||||||
|
expectedValue *int64
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "value in ataDeviceStats",
|
||||||
|
ataDeviceStats: smart.AtaDeviceStatistics{
|
||||||
|
Pages: []smart.AtaDeviceStatisticsPage{
|
||||||
|
{
|
||||||
|
Number: 5,
|
||||||
|
Table: []smart.AtaDeviceStatisticsEntry{
|
||||||
|
{Name: "Current Temperature", Value: &val42},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
entryNumber: 5,
|
||||||
|
entryName: "Current Temperature",
|
||||||
|
minValue: 0,
|
||||||
|
maxValue: 100,
|
||||||
|
expectedValue: &val42,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "value unmarshaled from data",
|
||||||
|
data: smart.SmartInfoForSata{
|
||||||
|
AtaDeviceStatistics: []byte(`{"pages":[{"number":5,"table":[{"name":"Current Temperature","value":100}]}]}`),
|
||||||
|
},
|
||||||
|
entryNumber: 5,
|
||||||
|
entryName: "Current Temperature",
|
||||||
|
minValue: 0,
|
||||||
|
maxValue: 255,
|
||||||
|
expectedValue: &val100,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "value out of range (too high)",
|
||||||
|
ataDeviceStats: smart.AtaDeviceStatistics{
|
||||||
|
Pages: []smart.AtaDeviceStatisticsPage{
|
||||||
|
{
|
||||||
|
Number: 5,
|
||||||
|
Table: []smart.AtaDeviceStatisticsEntry{
|
||||||
|
{Name: "Current Temperature", Value: &val100},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
entryNumber: 5,
|
||||||
|
entryName: "Current Temperature",
|
||||||
|
minValue: 0,
|
||||||
|
maxValue: 50,
|
||||||
|
expectedValue: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "value out of range (too low)",
|
||||||
|
ataDeviceStats: smart.AtaDeviceStatistics{
|
||||||
|
Pages: []smart.AtaDeviceStatisticsPage{
|
||||||
|
{
|
||||||
|
Number: 5,
|
||||||
|
Table: []smart.AtaDeviceStatisticsEntry{
|
||||||
|
{Name: "Min Temp", Value: &valMinus20},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
entryNumber: 5,
|
||||||
|
entryName: "Min Temp",
|
||||||
|
minValue: 0,
|
||||||
|
maxValue: 100,
|
||||||
|
expectedValue: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no statistics available",
|
||||||
|
data: smart.SmartInfoForSata{},
|
||||||
|
entryNumber: 5,
|
||||||
|
entryName: "Current Temperature",
|
||||||
|
minValue: 0,
|
||||||
|
maxValue: 255,
|
||||||
|
expectedValue: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "wrong page number",
|
||||||
|
ataDeviceStats: smart.AtaDeviceStatistics{
|
||||||
|
Pages: []smart.AtaDeviceStatisticsPage{
|
||||||
|
{
|
||||||
|
Number: 1,
|
||||||
|
Table: []smart.AtaDeviceStatisticsEntry{
|
||||||
|
{Name: "Current Temperature", Value: &val42},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
entryNumber: 5,
|
||||||
|
entryName: "Current Temperature",
|
||||||
|
minValue: 0,
|
||||||
|
maxValue: 100,
|
||||||
|
expectedValue: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "wrong entry name",
|
||||||
|
ataDeviceStats: smart.AtaDeviceStatistics{
|
||||||
|
Pages: []smart.AtaDeviceStatisticsPage{
|
||||||
|
{
|
||||||
|
Number: 5,
|
||||||
|
Table: []smart.AtaDeviceStatisticsEntry{
|
||||||
|
{Name: "Other Stat", Value: &val42},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
entryNumber: 5,
|
||||||
|
entryName: "Current Temperature",
|
||||||
|
minValue: 0,
|
||||||
|
maxValue: 100,
|
||||||
|
expectedValue: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "case insensitive name match",
|
||||||
|
ataDeviceStats: smart.AtaDeviceStatistics{
|
||||||
|
Pages: []smart.AtaDeviceStatisticsPage{
|
||||||
|
{
|
||||||
|
Number: 5,
|
||||||
|
Table: []smart.AtaDeviceStatisticsEntry{
|
||||||
|
{Name: "CURRENT TEMPERATURE", Value: &val42},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
entryNumber: 5,
|
||||||
|
entryName: "Current Temperature",
|
||||||
|
minValue: 0,
|
||||||
|
maxValue: 100,
|
||||||
|
expectedValue: &val42,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "entry value is nil",
|
||||||
|
ataDeviceStats: smart.AtaDeviceStatistics{
|
||||||
|
Pages: []smart.AtaDeviceStatisticsPage{
|
||||||
|
{
|
||||||
|
Number: 5,
|
||||||
|
Table: []smart.AtaDeviceStatisticsEntry{
|
||||||
|
{Name: "Current Temperature", Value: nil},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
entryNumber: 5,
|
||||||
|
entryName: "Current Temperature",
|
||||||
|
minValue: 0,
|
||||||
|
maxValue: 100,
|
||||||
|
expectedValue: nil,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
result := findAtaDeviceStatisticsValue(&tt.data, &tt.ataDeviceStats, tt.entryNumber, tt.entryName, tt.minValue, tt.maxValue)
|
||||||
|
if tt.expectedValue == nil {
|
||||||
|
assert.Nil(t, result)
|
||||||
|
} else {
|
||||||
|
require.NotNil(t, result)
|
||||||
|
assert.Equal(t, *tt.expectedValue, *result)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestRefreshExcludedDevices(t *testing.T) {
|
func TestRefreshExcludedDevices(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
@@ -649,7 +1035,7 @@ func TestRefreshExcludedDevices(t *testing.T) {
|
|||||||
t.Setenv("EXCLUDE_SMART", tt.envValue)
|
t.Setenv("EXCLUDE_SMART", tt.envValue)
|
||||||
} else {
|
} else {
|
||||||
// Ensure env var is not set for empty test
|
// Ensure env var is not set for empty test
|
||||||
os.Unsetenv("EXCLUDE_SMART")
|
t.Setenv("EXCLUDE_SMART", "")
|
||||||
}
|
}
|
||||||
|
|
||||||
sm := &SmartManager{}
|
sm := &SmartManager{}
|
||||||
@@ -780,3 +1166,114 @@ func TestFilterExcludedDevices(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestIsNvmeControllerPath(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
path string
|
||||||
|
expected bool
|
||||||
|
}{
|
||||||
|
// Controller paths (should return true)
|
||||||
|
{"/dev/nvme0", true},
|
||||||
|
{"/dev/nvme1", true},
|
||||||
|
{"/dev/nvme10", true},
|
||||||
|
{"nvme0", true},
|
||||||
|
|
||||||
|
// Namespace paths (should return false)
|
||||||
|
{"/dev/nvme0n1", false},
|
||||||
|
{"/dev/nvme1n1", false},
|
||||||
|
{"/dev/nvme0n1p1", false},
|
||||||
|
{"nvme0n1", false},
|
||||||
|
|
||||||
|
// Non-NVMe paths (should return false)
|
||||||
|
{"/dev/sda", false},
|
||||||
|
{"/dev/sda1", false},
|
||||||
|
{"/dev/hda", false},
|
||||||
|
{"", false},
|
||||||
|
{"/dev/nvme", false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.path, func(t *testing.T) {
|
||||||
|
result := isNvmeControllerPath(tt.path)
|
||||||
|
assert.Equal(t, tt.expected, result, "path: %s", tt.path)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseSmartForNvmeAppleSSD(t *testing.T) {
|
||||||
|
// Apple SSDs don't report user_capacity via smartctl; capacity should be fetched
|
||||||
|
// from system_profiler via the darwinNvmeProvider fallback.
|
||||||
|
fixturePath := filepath.Join("test-data", "smart", "apple_nvme.json")
|
||||||
|
data, err := os.ReadFile(fixturePath)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
providerCalls := 0
|
||||||
|
fakeProvider := func() ([]byte, error) {
|
||||||
|
providerCalls++
|
||||||
|
return []byte(`{
|
||||||
|
"SPNVMeDataType": [{
|
||||||
|
"_items": [{
|
||||||
|
"device_serial": "0ba0147940253c15",
|
||||||
|
"size_in_bytes": 251000193024
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
}`), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
sm := &SmartManager{
|
||||||
|
SmartDataMap: make(map[string]*smart.SmartData),
|
||||||
|
darwinNvmeProvider: fakeProvider,
|
||||||
|
}
|
||||||
|
|
||||||
|
hasData, _ := sm.parseSmartForNvme(data)
|
||||||
|
require.True(t, hasData)
|
||||||
|
|
||||||
|
deviceData, ok := sm.SmartDataMap["0ba0147940253c15"]
|
||||||
|
require.True(t, ok)
|
||||||
|
assert.Equal(t, "APPLE SSD AP0256Q", deviceData.ModelName)
|
||||||
|
assert.Equal(t, uint64(251000193024), deviceData.Capacity)
|
||||||
|
assert.Equal(t, uint8(42), deviceData.Temperature)
|
||||||
|
assert.Equal(t, "PASSED", deviceData.SmartStatus)
|
||||||
|
assert.Equal(t, 1, providerCalls, "system_profiler should be called once")
|
||||||
|
|
||||||
|
// Second parse: provider should NOT be called again (cache hit)
|
||||||
|
_, _ = sm.parseSmartForNvme(data)
|
||||||
|
assert.Equal(t, 1, providerCalls, "system_profiler should not be called again after caching")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLookupDarwinNvmeCapacityMultipleDisks(t *testing.T) {
|
||||||
|
fakeProvider := func() ([]byte, error) {
|
||||||
|
return []byte(`{
|
||||||
|
"SPNVMeDataType": [
|
||||||
|
{
|
||||||
|
"_items": [
|
||||||
|
{"device_serial": "serial-disk0", "size_in_bytes": 251000193024},
|
||||||
|
{"device_serial": "serial-disk1", "size_in_bytes": 1000204886016}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"_items": [
|
||||||
|
{"device_serial": "serial-disk2", "size_in_bytes": 512110190592}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}`), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
sm := &SmartManager{darwinNvmeProvider: fakeProvider}
|
||||||
|
assert.Equal(t, uint64(251000193024), sm.lookupDarwinNvmeCapacity("serial-disk0"))
|
||||||
|
assert.Equal(t, uint64(1000204886016), sm.lookupDarwinNvmeCapacity("serial-disk1"))
|
||||||
|
assert.Equal(t, uint64(512110190592), sm.lookupDarwinNvmeCapacity("serial-disk2"))
|
||||||
|
assert.Equal(t, uint64(0), sm.lookupDarwinNvmeCapacity("unknown-serial"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLookupDarwinNvmeCapacityProviderError(t *testing.T) {
|
||||||
|
fakeProvider := func() ([]byte, error) {
|
||||||
|
return nil, errors.New("system_profiler not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
sm := &SmartManager{darwinNvmeProvider: fakeProvider}
|
||||||
|
assert.Equal(t, uint64(0), sm.lookupDarwinNvmeCapacity("any-serial"))
|
||||||
|
// Cache should be initialized even on error so we don't retry (Once already fired)
|
||||||
|
assert.NotNil(t, sm.darwinNvmeCapacity)
|
||||||
|
}
|
||||||
|
|||||||
168
agent/system.go
168
agent/system.go
@@ -2,15 +2,19 @@ package agent
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/henrygd/beszel"
|
"github.com/henrygd/beszel"
|
||||||
"github.com/henrygd/beszel/agent/battery"
|
"github.com/henrygd/beszel/agent/battery"
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
|
"github.com/henrygd/beszel/agent/zfs"
|
||||||
|
"github.com/henrygd/beszel/internal/entities/container"
|
||||||
"github.com/henrygd/beszel/internal/entities/system"
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
|
||||||
"github.com/shirou/gopsutil/v4/cpu"
|
"github.com/shirou/gopsutil/v4/cpu"
|
||||||
@@ -27,52 +31,110 @@ type prevDisk struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Sets initial / non-changing values about the host system
|
// Sets initial / non-changing values about the host system
|
||||||
func (a *Agent) initializeSystemInfo() {
|
func (a *Agent) refreshSystemDetails() {
|
||||||
a.systemInfo.AgentVersion = beszel.Version
|
a.systemInfo.AgentVersion = beszel.Version
|
||||||
a.systemInfo.Hostname, _ = os.Hostname()
|
|
||||||
|
// get host info from Docker if available
|
||||||
|
var hostInfo container.HostInfo
|
||||||
|
|
||||||
|
if a.dockerManager != nil {
|
||||||
|
a.systemDetails.Podman = a.dockerManager.IsPodman()
|
||||||
|
hostInfo, _ = a.dockerManager.GetHostInfo()
|
||||||
|
}
|
||||||
|
|
||||||
|
a.systemDetails.Hostname, _ = os.Hostname()
|
||||||
|
if arch, err := host.KernelArch(); err == nil {
|
||||||
|
a.systemDetails.Arch = arch
|
||||||
|
} else {
|
||||||
|
a.systemDetails.Arch = runtime.GOARCH
|
||||||
|
}
|
||||||
|
|
||||||
platform, _, version, _ := host.PlatformInformation()
|
platform, _, version, _ := host.PlatformInformation()
|
||||||
|
|
||||||
if platform == "darwin" {
|
if platform == "darwin" {
|
||||||
a.systemInfo.KernelVersion = version
|
a.systemDetails.Os = system.Darwin
|
||||||
a.systemInfo.Os = system.Darwin
|
a.systemDetails.OsName = fmt.Sprintf("macOS %s", version)
|
||||||
} else if strings.Contains(platform, "indows") {
|
} else if strings.Contains(platform, "indows") {
|
||||||
a.systemInfo.KernelVersion = fmt.Sprintf("%s %s", strings.Replace(platform, "Microsoft ", "", 1), version)
|
a.systemDetails.Os = system.Windows
|
||||||
a.systemInfo.Os = system.Windows
|
a.systemDetails.OsName = strings.Replace(platform, "Microsoft ", "", 1)
|
||||||
|
a.systemDetails.Kernel = version
|
||||||
} else if platform == "freebsd" {
|
} else if platform == "freebsd" {
|
||||||
a.systemInfo.Os = system.Freebsd
|
a.systemDetails.Os = system.Freebsd
|
||||||
a.systemInfo.KernelVersion = version
|
a.systemDetails.Kernel, _ = host.KernelVersion()
|
||||||
|
if prettyName, err := getOsPrettyName(); err == nil {
|
||||||
|
a.systemDetails.OsName = prettyName
|
||||||
} else {
|
} else {
|
||||||
a.systemInfo.Os = system.Linux
|
a.systemDetails.OsName = "FreeBSD"
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
a.systemDetails.Os = system.Linux
|
||||||
|
a.systemDetails.OsName = hostInfo.OperatingSystem
|
||||||
|
if a.systemDetails.OsName == "" {
|
||||||
|
if prettyName, err := getOsPrettyName(); err == nil {
|
||||||
|
a.systemDetails.OsName = prettyName
|
||||||
|
} else {
|
||||||
|
a.systemDetails.OsName = platform
|
||||||
|
}
|
||||||
|
}
|
||||||
|
a.systemDetails.Kernel = hostInfo.KernelVersion
|
||||||
|
if a.systemDetails.Kernel == "" {
|
||||||
|
a.systemDetails.Kernel, _ = host.KernelVersion()
|
||||||
}
|
}
|
||||||
|
|
||||||
if a.systemInfo.KernelVersion == "" {
|
|
||||||
a.systemInfo.KernelVersion, _ = host.KernelVersion()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// cpu model
|
// cpu model
|
||||||
if info, err := cpu.Info(); err == nil && len(info) > 0 {
|
if info, err := cpu.Info(); err == nil && len(info) > 0 {
|
||||||
a.systemInfo.CpuModel = info[0].ModelName
|
a.systemDetails.CpuModel = info[0].ModelName
|
||||||
}
|
}
|
||||||
// cores / threads
|
// cores / threads
|
||||||
a.systemInfo.Cores, _ = cpu.Counts(false)
|
cores, _ := cpu.Counts(false)
|
||||||
if threads, err := cpu.Counts(true); err == nil {
|
threads := hostInfo.NCPU
|
||||||
if threads > 0 && threads < a.systemInfo.Cores {
|
if threads == 0 {
|
||||||
// in lxc logical cores reflects container limits, so use that as cores if lower
|
threads, _ = cpu.Counts(true)
|
||||||
a.systemInfo.Cores = threads
|
}
|
||||||
} else {
|
// in lxc, logical cores reflects container limits, so use that as cores if lower
|
||||||
a.systemInfo.Threads = threads
|
if threads > 0 && threads < cores {
|
||||||
|
cores = threads
|
||||||
|
}
|
||||||
|
a.systemDetails.Cores = cores
|
||||||
|
a.systemDetails.Threads = threads
|
||||||
|
|
||||||
|
// total memory
|
||||||
|
a.systemDetails.MemoryTotal = hostInfo.MemTotal
|
||||||
|
if a.systemDetails.MemoryTotal == 0 {
|
||||||
|
if v, err := mem.VirtualMemory(); err == nil {
|
||||||
|
a.systemDetails.MemoryTotal = v.Total
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// zfs
|
// zfs
|
||||||
if _, err := getARCSize(); err != nil {
|
if _, err := zfs.ARCSize(); err != nil {
|
||||||
slog.Debug("Not monitoring ZFS ARC", "err", err)
|
slog.Debug("Not monitoring ZFS ARC", "err", err)
|
||||||
} else {
|
} else {
|
||||||
a.zfs = true
|
a.zfs = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// attachSystemDetails returns details only for fresh default-interval responses.
|
||||||
|
func (a *Agent) attachSystemDetails(data *system.CombinedData, cacheTimeMs uint16, includeRequested bool) *system.CombinedData {
|
||||||
|
if cacheTimeMs != defaultDataCacheTimeMs || (!includeRequested && !a.detailsDirty) {
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
|
||||||
|
// copy data to avoid adding details to the original cached struct
|
||||||
|
response := *data
|
||||||
|
response.Details = &a.systemDetails
|
||||||
|
a.detailsDirty = false
|
||||||
|
return &response
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateSystemDetails applies a mutation to the static details payload and marks
|
||||||
|
// it for inclusion on the next fresh default-interval response.
|
||||||
|
func (a *Agent) updateSystemDetails(updateFunc func(details *system.Details)) {
|
||||||
|
updateFunc(&a.systemDetails)
|
||||||
|
a.detailsDirty = true
|
||||||
|
}
|
||||||
|
|
||||||
// Returns current info, stats about the host system
|
// Returns current info, stats about the host system
|
||||||
func (a *Agent) getSystemStats(cacheTimeMs uint16) system.Stats {
|
func (a *Agent) getSystemStats(cacheTimeMs uint16) system.Stats {
|
||||||
var systemStats system.Stats
|
var systemStats system.Stats
|
||||||
@@ -86,13 +148,13 @@ func (a *Agent) getSystemStats(cacheTimeMs uint16) system.Stats {
|
|||||||
// cpu metrics
|
// cpu metrics
|
||||||
cpuMetrics, err := getCpuMetrics(cacheTimeMs)
|
cpuMetrics, err := getCpuMetrics(cacheTimeMs)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
systemStats.Cpu = twoDecimals(cpuMetrics.Total)
|
systemStats.Cpu = utils.TwoDecimals(cpuMetrics.Total)
|
||||||
systemStats.CpuBreakdown = []float64{
|
systemStats.CpuBreakdown = []float64{
|
||||||
twoDecimals(cpuMetrics.User),
|
utils.TwoDecimals(cpuMetrics.User),
|
||||||
twoDecimals(cpuMetrics.System),
|
utils.TwoDecimals(cpuMetrics.System),
|
||||||
twoDecimals(cpuMetrics.Iowait),
|
utils.TwoDecimals(cpuMetrics.Iowait),
|
||||||
twoDecimals(cpuMetrics.Steal),
|
utils.TwoDecimals(cpuMetrics.Steal),
|
||||||
twoDecimals(cpuMetrics.Idle),
|
utils.TwoDecimals(cpuMetrics.Idle),
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
slog.Error("Error getting cpu metrics", "err", err)
|
slog.Error("Error getting cpu metrics", "err", err)
|
||||||
@@ -116,8 +178,8 @@ func (a *Agent) getSystemStats(cacheTimeMs uint16) system.Stats {
|
|||||||
// memory
|
// memory
|
||||||
if v, err := mem.VirtualMemory(); err == nil {
|
if v, err := mem.VirtualMemory(); err == nil {
|
||||||
// swap
|
// swap
|
||||||
systemStats.Swap = bytesToGigabytes(v.SwapTotal)
|
systemStats.Swap = utils.BytesToGigabytes(v.SwapTotal)
|
||||||
systemStats.SwapUsed = bytesToGigabytes(v.SwapTotal - v.SwapFree - v.SwapCached)
|
systemStats.SwapUsed = utils.BytesToGigabytes(v.SwapTotal - v.SwapFree - v.SwapCached)
|
||||||
// cache + buffers value for default mem calculation
|
// cache + buffers value for default mem calculation
|
||||||
// note: gopsutil automatically adds SReclaimable to v.Cached
|
// note: gopsutil automatically adds SReclaimable to v.Cached
|
||||||
cacheBuff := v.Cached + v.Buffers - v.Shared
|
cacheBuff := v.Cached + v.Buffers - v.Shared
|
||||||
@@ -137,16 +199,16 @@ func (a *Agent) getSystemStats(cacheTimeMs uint16) system.Stats {
|
|||||||
// }
|
// }
|
||||||
// subtract ZFS ARC size from used memory and add as its own category
|
// subtract ZFS ARC size from used memory and add as its own category
|
||||||
if a.zfs {
|
if a.zfs {
|
||||||
if arcSize, _ := getARCSize(); arcSize > 0 && arcSize < v.Used {
|
if arcSize, _ := zfs.ARCSize(); arcSize > 0 && arcSize < v.Used {
|
||||||
v.Used = v.Used - arcSize
|
v.Used = v.Used - arcSize
|
||||||
v.UsedPercent = float64(v.Used) / float64(v.Total) * 100.0
|
v.UsedPercent = float64(v.Used) / float64(v.Total) * 100.0
|
||||||
systemStats.MemZfsArc = bytesToGigabytes(arcSize)
|
systemStats.MemZfsArc = utils.BytesToGigabytes(arcSize)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
systemStats.Mem = bytesToGigabytes(v.Total)
|
systemStats.Mem = utils.BytesToGigabytes(v.Total)
|
||||||
systemStats.MemBuffCache = bytesToGigabytes(cacheBuff)
|
systemStats.MemBuffCache = utils.BytesToGigabytes(cacheBuff)
|
||||||
systemStats.MemUsed = bytesToGigabytes(v.Used)
|
systemStats.MemUsed = utils.BytesToGigabytes(v.Used)
|
||||||
systemStats.MemPct = twoDecimals(v.UsedPercent)
|
systemStats.MemPct = utils.TwoDecimals(v.UsedPercent)
|
||||||
}
|
}
|
||||||
|
|
||||||
// disk usage
|
// disk usage
|
||||||
@@ -195,47 +257,37 @@ func (a *Agent) getSystemStats(cacheTimeMs uint16) system.Stats {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// update base system info
|
// update system info
|
||||||
a.systemInfo.ConnectionType = a.connectionManager.ConnectionType
|
a.systemInfo.ConnectionType = a.connectionManager.ConnectionType
|
||||||
a.systemInfo.Cpu = systemStats.Cpu
|
a.systemInfo.Cpu = systemStats.Cpu
|
||||||
a.systemInfo.LoadAvg = systemStats.LoadAvg
|
a.systemInfo.LoadAvg = systemStats.LoadAvg
|
||||||
// TODO: remove these in future release in favor of load avg array
|
|
||||||
a.systemInfo.LoadAvg1 = systemStats.LoadAvg[0]
|
|
||||||
a.systemInfo.LoadAvg5 = systemStats.LoadAvg[1]
|
|
||||||
a.systemInfo.LoadAvg15 = systemStats.LoadAvg[2]
|
|
||||||
a.systemInfo.MemPct = systemStats.MemPct
|
a.systemInfo.MemPct = systemStats.MemPct
|
||||||
a.systemInfo.DiskPct = systemStats.DiskPct
|
a.systemInfo.DiskPct = systemStats.DiskPct
|
||||||
|
a.systemInfo.Battery = systemStats.Battery
|
||||||
a.systemInfo.Uptime, _ = host.Uptime()
|
a.systemInfo.Uptime, _ = host.Uptime()
|
||||||
// TODO: in future release, remove MB bandwidth values in favor of bytes
|
|
||||||
a.systemInfo.Bandwidth = twoDecimals(systemStats.NetworkSent + systemStats.NetworkRecv)
|
|
||||||
a.systemInfo.BandwidthBytes = systemStats.Bandwidth[0] + systemStats.Bandwidth[1]
|
a.systemInfo.BandwidthBytes = systemStats.Bandwidth[0] + systemStats.Bandwidth[1]
|
||||||
slog.Debug("sysinfo", "data", a.systemInfo)
|
a.systemInfo.Threads = a.systemDetails.Threads
|
||||||
|
|
||||||
return systemStats
|
return systemStats
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns the size of the ZFS ARC memory cache in bytes
|
// getOsPrettyName attempts to get the pretty OS name from /etc/os-release on Linux systems
|
||||||
func getARCSize() (uint64, error) {
|
func getOsPrettyName() (string, error) {
|
||||||
file, err := os.Open("/proc/spl/kstat/zfs/arcstats")
|
file, err := os.Open("/etc/os-release")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return "", err
|
||||||
}
|
}
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
|
||||||
// Scan the lines
|
|
||||||
scanner := bufio.NewScanner(file)
|
scanner := bufio.NewScanner(file)
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
line := scanner.Text()
|
line := scanner.Text()
|
||||||
if strings.HasPrefix(line, "size") {
|
if after, ok := strings.CutPrefix(line, "PRETTY_NAME="); ok {
|
||||||
// Example line: size 4 15032385536
|
value := after
|
||||||
fields := strings.Fields(line)
|
value = strings.Trim(value, `"`)
|
||||||
if len(fields) < 3 {
|
return value, nil
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
// Return the size as uint64
|
|
||||||
return strconv.ParseUint(fields[2], 10, 64)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0, fmt.Errorf("failed to parse size field")
|
return "", errors.New("pretty name not found")
|
||||||
}
|
}
|
||||||
|
|||||||
61
agent/system_test.go
Normal file
61
agent/system_test.go
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
package agent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/common"
|
||||||
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGatherStatsDoesNotAttachDetailsToCachedRequests(t *testing.T) {
|
||||||
|
agent := &Agent{
|
||||||
|
cache: NewSystemDataCache(),
|
||||||
|
systemDetails: system.Details{Hostname: "updated-host", Podman: true},
|
||||||
|
detailsDirty: true,
|
||||||
|
}
|
||||||
|
cached := &system.CombinedData{
|
||||||
|
Info: system.Info{Hostname: "cached-host"},
|
||||||
|
}
|
||||||
|
agent.cache.Set(cached, defaultDataCacheTimeMs)
|
||||||
|
|
||||||
|
response := agent.gatherStats(common.DataRequestOptions{CacheTimeMs: defaultDataCacheTimeMs})
|
||||||
|
|
||||||
|
assert.Same(t, cached, response)
|
||||||
|
assert.Nil(t, response.Details)
|
||||||
|
assert.True(t, agent.detailsDirty)
|
||||||
|
assert.Equal(t, "cached-host", response.Info.Hostname)
|
||||||
|
assert.Nil(t, cached.Details)
|
||||||
|
|
||||||
|
secondResponse := agent.gatherStats(common.DataRequestOptions{CacheTimeMs: defaultDataCacheTimeMs})
|
||||||
|
assert.Same(t, cached, secondResponse)
|
||||||
|
assert.Nil(t, secondResponse.Details)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestUpdateSystemDetailsMarksDetailsDirty(t *testing.T) {
|
||||||
|
agent := &Agent{}
|
||||||
|
|
||||||
|
agent.updateSystemDetails(func(details *system.Details) {
|
||||||
|
details.Hostname = "updated-host"
|
||||||
|
details.Podman = true
|
||||||
|
})
|
||||||
|
|
||||||
|
assert.True(t, agent.detailsDirty)
|
||||||
|
assert.Equal(t, "updated-host", agent.systemDetails.Hostname)
|
||||||
|
assert.True(t, agent.systemDetails.Podman)
|
||||||
|
|
||||||
|
original := &system.CombinedData{}
|
||||||
|
realTimeResponse := agent.attachSystemDetails(original, 1000, true)
|
||||||
|
assert.Same(t, original, realTimeResponse)
|
||||||
|
assert.Nil(t, realTimeResponse.Details)
|
||||||
|
assert.True(t, agent.detailsDirty)
|
||||||
|
|
||||||
|
response := agent.attachSystemDetails(original, defaultDataCacheTimeMs, false)
|
||||||
|
require.NotNil(t, response.Details)
|
||||||
|
assert.NotSame(t, original, response)
|
||||||
|
assert.Equal(t, "updated-host", response.Details.Hostname)
|
||||||
|
assert.True(t, response.Details.Podman)
|
||||||
|
assert.False(t, agent.detailsDirty)
|
||||||
|
assert.Nil(t, original.Details)
|
||||||
|
}
|
||||||
@@ -8,12 +8,14 @@ import (
|
|||||||
"log/slog"
|
"log/slog"
|
||||||
"maps"
|
"maps"
|
||||||
"math"
|
"math"
|
||||||
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/coreos/go-systemd/v22/dbus"
|
"github.com/coreos/go-systemd/v22/dbus"
|
||||||
|
"github.com/henrygd/beszel/agent/utils"
|
||||||
"github.com/henrygd/beszel/internal/entities/systemd"
|
"github.com/henrygd/beszel/internal/entities/systemd"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -28,11 +30,36 @@ type systemdManager struct {
|
|||||||
patterns []string
|
patterns []string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isSystemdAvailable checks if systemd is used on the system to avoid unnecessary connection attempts (#1548)
|
||||||
|
func isSystemdAvailable() bool {
|
||||||
|
paths := []string{
|
||||||
|
"/run/systemd/system",
|
||||||
|
"/run/dbus/system_bus_socket",
|
||||||
|
"/var/run/dbus/system_bus_socket",
|
||||||
|
}
|
||||||
|
for _, path := range paths {
|
||||||
|
if _, err := os.Stat(path); err == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if data, err := os.ReadFile("/proc/1/comm"); err == nil {
|
||||||
|
return strings.TrimSpace(string(data)) == "systemd"
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// newSystemdManager creates a new systemdManager.
|
// newSystemdManager creates a new systemdManager.
|
||||||
func newSystemdManager() (*systemdManager, error) {
|
func newSystemdManager() (*systemdManager, error) {
|
||||||
if skipSystemd, _ := GetEnv("SKIP_SYSTEMD"); skipSystemd == "true" {
|
if skipSystemd, _ := utils.GetEnv("SKIP_SYSTEMD"); skipSystemd == "true" {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check if systemd is available on the system before attempting connection
|
||||||
|
if !isSystemdAvailable() {
|
||||||
|
slog.Debug("Systemd not available")
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
conn, err := dbus.NewSystemConnectionContext(context.Background())
|
conn, err := dbus.NewSystemConnectionContext(context.Background())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
slog.Debug("Error connecting to systemd", "err", err, "ref", "https://beszel.dev/guide/systemd")
|
slog.Debug("Error connecting to systemd", "err", err, "ref", "https://beszel.dev/guide/systemd")
|
||||||
@@ -118,13 +145,27 @@ func (sm *systemdManager) getServiceStats(conn *dbus.Conn, refresh bool) []*syst
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Track which units are currently present to remove stale entries
|
||||||
|
currentUnits := make(map[string]struct{}, len(units))
|
||||||
|
|
||||||
for _, unit := range units {
|
for _, unit := range units {
|
||||||
|
currentUnits[unit.Name] = struct{}{}
|
||||||
service, err := sm.updateServiceStats(conn, unit)
|
service, err := sm.updateServiceStats(conn, unit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
services = append(services, service)
|
services = append(services, service)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Remove services that no longer exist in systemd
|
||||||
|
sm.Lock()
|
||||||
|
for unitName := range sm.serviceStatsMap {
|
||||||
|
if _, exists := currentUnits[unitName]; !exists {
|
||||||
|
delete(sm.serviceStatsMap, unitName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sm.Unlock()
|
||||||
|
|
||||||
sm.hasFreshStats = true
|
sm.hasFreshStats = true
|
||||||
return services
|
return services
|
||||||
}
|
}
|
||||||
@@ -254,13 +295,13 @@ func unescapeServiceName(name string) string {
|
|||||||
// otherwise defaults to "*service".
|
// otherwise defaults to "*service".
|
||||||
func getServicePatterns() []string {
|
func getServicePatterns() []string {
|
||||||
patterns := []string{}
|
patterns := []string{}
|
||||||
if envPatterns, _ := GetEnv("SERVICE_PATTERNS"); envPatterns != "" {
|
if envPatterns, _ := utils.GetEnv("SERVICE_PATTERNS"); envPatterns != "" {
|
||||||
for pattern := range strings.SplitSeq(envPatterns, ",") {
|
for pattern := range strings.SplitSeq(envPatterns, ",") {
|
||||||
pattern = strings.TrimSpace(pattern)
|
pattern = strings.TrimSpace(pattern)
|
||||||
if pattern == "" {
|
if pattern == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if !strings.HasSuffix(pattern, ".service") {
|
if !strings.HasSuffix(pattern, "timer") && !strings.HasSuffix(pattern, ".service") {
|
||||||
pattern += ".service"
|
pattern += ".service"
|
||||||
}
|
}
|
||||||
patterns = append(patterns, pattern)
|
patterns = append(patterns, pattern)
|
||||||
|
|||||||
@@ -19,11 +19,11 @@ func TestSystemdManagerGetServiceStats(t *testing.T) {
|
|||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// Test with refresh = true
|
// Test with refresh = true
|
||||||
result := manager.getServiceStats(true)
|
result := manager.getServiceStats("any-service", true)
|
||||||
assert.Nil(t, result)
|
assert.Nil(t, result)
|
||||||
|
|
||||||
// Test with refresh = false
|
// Test with refresh = false
|
||||||
result = manager.getServiceStats(false)
|
result = manager.getServiceStats("any-service", false)
|
||||||
assert.Nil(t, result)
|
assert.Nil(t, result)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ package agent
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
@@ -48,6 +49,35 @@ func TestUnescapeServiceNameInvalid(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestIsSystemdAvailable(t *testing.T) {
|
||||||
|
// Note: This test's result will vary based on the actual system running the tests
|
||||||
|
// On systems with systemd, it should return true
|
||||||
|
// On systems without systemd, it should return false
|
||||||
|
result := isSystemdAvailable()
|
||||||
|
|
||||||
|
// Check if either the /run/systemd/system directory exists or PID 1 is systemd
|
||||||
|
runSystemdExists := false
|
||||||
|
if _, err := os.Stat("/run/systemd/system"); err == nil {
|
||||||
|
runSystemdExists = true
|
||||||
|
}
|
||||||
|
|
||||||
|
pid1IsSystemd := false
|
||||||
|
if data, err := os.ReadFile("/proc/1/comm"); err == nil {
|
||||||
|
pid1IsSystemd = strings.TrimSpace(string(data)) == "systemd"
|
||||||
|
}
|
||||||
|
|
||||||
|
expected := runSystemdExists || pid1IsSystemd
|
||||||
|
|
||||||
|
assert.Equal(t, expected, result, "isSystemdAvailable should correctly detect systemd presence")
|
||||||
|
|
||||||
|
// Log the result for informational purposes
|
||||||
|
if result {
|
||||||
|
t.Log("Systemd is available on this system")
|
||||||
|
} else {
|
||||||
|
t.Log("Systemd is not available on this system")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestGetServicePatterns(t *testing.T) {
|
func TestGetServicePatterns(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
@@ -126,20 +156,23 @@ func TestGetServicePatterns(t *testing.T) {
|
|||||||
expected: []string{"*nginx*.service", "*apache*.service"},
|
expected: []string{"*nginx*.service", "*apache*.service"},
|
||||||
cleanupEnvVars: true,
|
cleanupEnvVars: true,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "opt into timer monitoring",
|
||||||
|
prefixedEnv: "nginx.service,docker,apache.timer",
|
||||||
|
unprefixedEnv: "",
|
||||||
|
expected: []string{"nginx.service", "docker.service", "apache.timer"},
|
||||||
|
cleanupEnvVars: true,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
// Clean up any existing env vars
|
|
||||||
os.Unsetenv("BESZEL_AGENT_SERVICE_PATTERNS")
|
|
||||||
os.Unsetenv("SERVICE_PATTERNS")
|
|
||||||
|
|
||||||
// Set up environment variables
|
// Set up environment variables
|
||||||
if tt.prefixedEnv != "" {
|
if tt.prefixedEnv != "" {
|
||||||
os.Setenv("BESZEL_AGENT_SERVICE_PATTERNS", tt.prefixedEnv)
|
t.Setenv("BESZEL_AGENT_SERVICE_PATTERNS", tt.prefixedEnv)
|
||||||
}
|
}
|
||||||
if tt.unprefixedEnv != "" {
|
if tt.unprefixedEnv != "" {
|
||||||
os.Setenv("SERVICE_PATTERNS", tt.unprefixedEnv)
|
t.Setenv("SERVICE_PATTERNS", tt.unprefixedEnv)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run the function
|
// Run the function
|
||||||
@@ -147,12 +180,6 @@ func TestGetServicePatterns(t *testing.T) {
|
|||||||
|
|
||||||
// Verify results
|
// Verify results
|
||||||
assert.Equal(t, tt.expected, result, "Patterns should match expected values")
|
assert.Equal(t, tt.expected, result, "Patterns should match expected values")
|
||||||
|
|
||||||
// Cleanup
|
|
||||||
if tt.cleanupEnvVars {
|
|
||||||
os.Unsetenv("BESZEL_AGENT_SERVICE_PATTERNS")
|
|
||||||
os.Unsetenv("SERVICE_PATTERNS")
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
700
agent/test-data/amdgpu.ids
Normal file
700
agent/test-data/amdgpu.ids
Normal file
@@ -0,0 +1,700 @@
|
|||||||
|
# List of AMDGPU IDs
|
||||||
|
#
|
||||||
|
# Syntax:
|
||||||
|
# device_id, revision_id, product_name <-- single tab after comma
|
||||||
|
|
||||||
|
1.0.0
|
||||||
|
1114, C2, AMD Radeon 860M Graphics
|
||||||
|
1114, C3, AMD Radeon 840M Graphics
|
||||||
|
1114, D2, AMD Radeon 860M Graphics
|
||||||
|
1114, D3, AMD Radeon 840M Graphics
|
||||||
|
1309, 00, AMD Radeon R7 Graphics
|
||||||
|
130A, 00, AMD Radeon R6 Graphics
|
||||||
|
130B, 00, AMD Radeon R4 Graphics
|
||||||
|
130C, 00, AMD Radeon R7 Graphics
|
||||||
|
130D, 00, AMD Radeon R6 Graphics
|
||||||
|
130E, 00, AMD Radeon R5 Graphics
|
||||||
|
130F, 00, AMD Radeon R7 Graphics
|
||||||
|
130F, D4, AMD Radeon R7 Graphics
|
||||||
|
130F, D5, AMD Radeon R7 Graphics
|
||||||
|
130F, D6, AMD Radeon R7 Graphics
|
||||||
|
130F, D7, AMD Radeon R7 Graphics
|
||||||
|
1313, 00, AMD Radeon R7 Graphics
|
||||||
|
1313, D4, AMD Radeon R7 Graphics
|
||||||
|
1313, D5, AMD Radeon R7 Graphics
|
||||||
|
1313, D6, AMD Radeon R7 Graphics
|
||||||
|
1315, 00, AMD Radeon R5 Graphics
|
||||||
|
1315, D4, AMD Radeon R5 Graphics
|
||||||
|
1315, D5, AMD Radeon R5 Graphics
|
||||||
|
1315, D6, AMD Radeon R5 Graphics
|
||||||
|
1315, D7, AMD Radeon R5 Graphics
|
||||||
|
1316, 00, AMD Radeon R5 Graphics
|
||||||
|
1318, 00, AMD Radeon R5 Graphics
|
||||||
|
131B, 00, AMD Radeon R4 Graphics
|
||||||
|
131C, 00, AMD Radeon R7 Graphics
|
||||||
|
131D, 00, AMD Radeon R6 Graphics
|
||||||
|
1435, AE, AMD Custom GPU 0932
|
||||||
|
1506, C1, AMD Radeon 610M
|
||||||
|
1506, C2, AMD Radeon 610M
|
||||||
|
1506, C3, AMD Radeon 610M
|
||||||
|
1506, C4, AMD Radeon 610M
|
||||||
|
150E, C1, AMD Radeon 890M Graphics
|
||||||
|
150E, C4, AMD Radeon 890M Graphics
|
||||||
|
150E, C5, AMD Radeon 890M Graphics
|
||||||
|
150E, C6, AMD Radeon 890M Graphics
|
||||||
|
150E, D1, AMD Radeon 890M Graphics
|
||||||
|
150E, D2, AMD Radeon 890M Graphics
|
||||||
|
150E, D3, AMD Radeon 890M Graphics
|
||||||
|
1586, C1, Radeon 8060S Graphics
|
||||||
|
1586, C2, Radeon 8050S Graphics
|
||||||
|
1586, C4, Radeon 8050S Graphics
|
||||||
|
1586, D1, Radeon 8060S Graphics
|
||||||
|
1586, D2, Radeon 8050S Graphics
|
||||||
|
1586, D4, Radeon 8050S Graphics
|
||||||
|
1586, D5, Radeon 8040S Graphics
|
||||||
|
15BF, 00, AMD Radeon 780M Graphics
|
||||||
|
15BF, 01, AMD Radeon 760M Graphics
|
||||||
|
15BF, 02, AMD Radeon 780M Graphics
|
||||||
|
15BF, 03, AMD Radeon 760M Graphics
|
||||||
|
15BF, C1, AMD Radeon 780M Graphics
|
||||||
|
15BF, C2, AMD Radeon 780M Graphics
|
||||||
|
15BF, C3, AMD Radeon 760M Graphics
|
||||||
|
15BF, C4, AMD Radeon 780M Graphics
|
||||||
|
15BF, C5, AMD Radeon 740M Graphics
|
||||||
|
15BF, C6, AMD Radeon 780M Graphics
|
||||||
|
15BF, C7, AMD Radeon 780M Graphics
|
||||||
|
15BF, C8, AMD Radeon 760M Graphics
|
||||||
|
15BF, C9, AMD Radeon 780M Graphics
|
||||||
|
15BF, CA, AMD Radeon 740M Graphics
|
||||||
|
15BF, CB, AMD Radeon 760M Graphics
|
||||||
|
15BF, CC, AMD Radeon 740M Graphics
|
||||||
|
15BF, CD, AMD Radeon 760M Graphics
|
||||||
|
15BF, CF, AMD Radeon 780M Graphics
|
||||||
|
15BF, D0, AMD Radeon 780M Graphics
|
||||||
|
15BF, D1, AMD Radeon 780M Graphics
|
||||||
|
15BF, D2, AMD Radeon 780M Graphics
|
||||||
|
15BF, D3, AMD Radeon 780M Graphics
|
||||||
|
15BF, D4, AMD Radeon 780M Graphics
|
||||||
|
15BF, D5, AMD Radeon 760M Graphics
|
||||||
|
15BF, D6, AMD Radeon 760M Graphics
|
||||||
|
15BF, D7, AMD Radeon 780M Graphics
|
||||||
|
15BF, D8, AMD Radeon 740M Graphics
|
||||||
|
15BF, D9, AMD Radeon 780M Graphics
|
||||||
|
15BF, DA, AMD Radeon 780M Graphics
|
||||||
|
15BF, DB, AMD Radeon 760M Graphics
|
||||||
|
15BF, DC, AMD Radeon 760M Graphics
|
||||||
|
15BF, DD, AMD Radeon 780M Graphics
|
||||||
|
15BF, DE, AMD Radeon 740M Graphics
|
||||||
|
15BF, DF, AMD Radeon 760M Graphics
|
||||||
|
15BF, F0, AMD Radeon 760M Graphics
|
||||||
|
15C8, C1, AMD Radeon 740M Graphics
|
||||||
|
15C8, C2, AMD Radeon 740M Graphics
|
||||||
|
15C8, C3, AMD Radeon 740M Graphics
|
||||||
|
15C8, C4, AMD Radeon 740M Graphics
|
||||||
|
15C8, D1, AMD Radeon 740M Graphics
|
||||||
|
15C8, D2, AMD Radeon 740M Graphics
|
||||||
|
15C8, D3, AMD Radeon 740M Graphics
|
||||||
|
15C8, D4, AMD Radeon 740M Graphics
|
||||||
|
15D8, 00, AMD Radeon RX Vega 8 Graphics WS
|
||||||
|
15D8, 91, AMD Radeon Vega 3 Graphics
|
||||||
|
15D8, 91, AMD Ryzen Embedded R1606G with Radeon Vega Gfx
|
||||||
|
15D8, 92, AMD Radeon Vega 3 Graphics
|
||||||
|
15D8, 92, AMD Ryzen Embedded R1505G with Radeon Vega Gfx
|
||||||
|
15D8, 93, AMD Radeon Vega 1 Graphics
|
||||||
|
15D8, A1, AMD Radeon Vega 10 Graphics
|
||||||
|
15D8, A2, AMD Radeon Vega 8 Graphics
|
||||||
|
15D8, A3, AMD Radeon Vega 6 Graphics
|
||||||
|
15D8, A4, AMD Radeon Vega 3 Graphics
|
||||||
|
15D8, B1, AMD Radeon Vega 10 Graphics
|
||||||
|
15D8, B2, AMD Radeon Vega 8 Graphics
|
||||||
|
15D8, B3, AMD Radeon Vega 6 Graphics
|
||||||
|
15D8, B4, AMD Radeon Vega 3 Graphics
|
||||||
|
15D8, C1, AMD Radeon Vega 10 Graphics
|
||||||
|
15D8, C2, AMD Radeon Vega 8 Graphics
|
||||||
|
15D8, C3, AMD Radeon Vega 6 Graphics
|
||||||
|
15D8, C4, AMD Radeon Vega 3 Graphics
|
||||||
|
15D8, C5, AMD Radeon Vega 3 Graphics
|
||||||
|
15D8, C8, AMD Radeon Vega 11 Graphics
|
||||||
|
15D8, C9, AMD Radeon Vega 8 Graphics
|
||||||
|
15D8, CA, AMD Radeon Vega 11 Graphics
|
||||||
|
15D8, CB, AMD Radeon Vega 8 Graphics
|
||||||
|
15D8, CC, AMD Radeon Vega 3 Graphics
|
||||||
|
15D8, CE, AMD Radeon Vega 3 Graphics
|
||||||
|
15D8, CF, AMD Ryzen Embedded R1305G with Radeon Vega Gfx
|
||||||
|
15D8, D1, AMD Radeon Vega 10 Graphics
|
||||||
|
15D8, D2, AMD Radeon Vega 8 Graphics
|
||||||
|
15D8, D3, AMD Radeon Vega 6 Graphics
|
||||||
|
15D8, D4, AMD Radeon Vega 3 Graphics
|
||||||
|
15D8, D8, AMD Radeon Vega 11 Graphics
|
||||||
|
15D8, D9, AMD Radeon Vega 8 Graphics
|
||||||
|
15D8, DA, AMD Radeon Vega 11 Graphics
|
||||||
|
15D8, DB, AMD Radeon Vega 3 Graphics
|
||||||
|
15D8, DB, AMD Radeon Vega 8 Graphics
|
||||||
|
15D8, DC, AMD Radeon Vega 3 Graphics
|
||||||
|
15D8, DD, AMD Radeon Vega 3 Graphics
|
||||||
|
15D8, DE, AMD Radeon Vega 3 Graphics
|
||||||
|
15D8, DF, AMD Radeon Vega 3 Graphics
|
||||||
|
15D8, E3, AMD Radeon Vega 3 Graphics
|
||||||
|
15D8, E4, AMD Ryzen Embedded R1102G with Radeon Vega Gfx
|
||||||
|
15DD, 81, AMD Ryzen Embedded V1807B with Radeon Vega Gfx
|
||||||
|
15DD, 82, AMD Ryzen Embedded V1756B with Radeon Vega Gfx
|
||||||
|
15DD, 83, AMD Ryzen Embedded V1605B with Radeon Vega Gfx
|
||||||
|
15DD, 84, AMD Radeon Vega 6 Graphics
|
||||||
|
15DD, 85, AMD Ryzen Embedded V1202B with Radeon Vega Gfx
|
||||||
|
15DD, 86, AMD Radeon Vega 11 Graphics
|
||||||
|
15DD, 88, AMD Radeon Vega 8 Graphics
|
||||||
|
15DD, C1, AMD Radeon Vega 11 Graphics
|
||||||
|
15DD, C2, AMD Radeon Vega 8 Graphics
|
||||||
|
15DD, C3, AMD Radeon Vega 3 / 10 Graphics
|
||||||
|
15DD, C4, AMD Radeon Vega 8 Graphics
|
||||||
|
15DD, C5, AMD Radeon Vega 3 Graphics
|
||||||
|
15DD, C6, AMD Radeon Vega 11 Graphics
|
||||||
|
15DD, C8, AMD Radeon Vega 8 Graphics
|
||||||
|
15DD, C9, AMD Radeon Vega 11 Graphics
|
||||||
|
15DD, CA, AMD Radeon Vega 8 Graphics
|
||||||
|
15DD, CB, AMD Radeon Vega 3 Graphics
|
||||||
|
15DD, CC, AMD Radeon Vega 6 Graphics
|
||||||
|
15DD, CE, AMD Radeon Vega 3 Graphics
|
||||||
|
15DD, CF, AMD Radeon Vega 3 Graphics
|
||||||
|
15DD, D0, AMD Radeon Vega 10 Graphics
|
||||||
|
15DD, D1, AMD Radeon Vega 8 Graphics
|
||||||
|
15DD, D3, AMD Radeon Vega 11 Graphics
|
||||||
|
15DD, D5, AMD Radeon Vega 8 Graphics
|
||||||
|
15DD, D6, AMD Radeon Vega 11 Graphics
|
||||||
|
15DD, D7, AMD Radeon Vega 8 Graphics
|
||||||
|
15DD, D8, AMD Radeon Vega 3 Graphics
|
||||||
|
15DD, D9, AMD Radeon Vega 6 Graphics
|
||||||
|
15DD, E1, AMD Radeon Vega 3 Graphics
|
||||||
|
15DD, E2, AMD Radeon Vega 3 Graphics
|
||||||
|
163F, AE, AMD Custom GPU 0405
|
||||||
|
163F, E1, AMD Custom GPU 0405
|
||||||
|
164E, D8, AMD Radeon 610M
|
||||||
|
164E, D9, AMD Radeon 610M
|
||||||
|
164E, DA, AMD Radeon 610M
|
||||||
|
164E, DB, AMD Radeon 610M
|
||||||
|
164E, DC, AMD Radeon 610M
|
||||||
|
1681, 06, AMD Radeon 680M
|
||||||
|
1681, 07, AMD Radeon 660M
|
||||||
|
1681, 0A, AMD Radeon 680M
|
||||||
|
1681, 0B, AMD Radeon 660M
|
||||||
|
1681, C7, AMD Radeon 680M
|
||||||
|
1681, C8, AMD Radeon 680M
|
||||||
|
1681, C9, AMD Radeon 660M
|
||||||
|
1900, 01, AMD Radeon 780M Graphics
|
||||||
|
1900, 02, AMD Radeon 760M Graphics
|
||||||
|
1900, 03, AMD Radeon 780M Graphics
|
||||||
|
1900, 04, AMD Radeon 760M Graphics
|
||||||
|
1900, 05, AMD Radeon 780M Graphics
|
||||||
|
1900, 06, AMD Radeon 780M Graphics
|
||||||
|
1900, 07, AMD Radeon 760M Graphics
|
||||||
|
1900, B0, AMD Radeon 780M Graphics
|
||||||
|
1900, B1, AMD Radeon 780M Graphics
|
||||||
|
1900, B2, AMD Radeon 780M Graphics
|
||||||
|
1900, B3, AMD Radeon 780M Graphics
|
||||||
|
1900, B4, AMD Radeon 780M Graphics
|
||||||
|
1900, B5, AMD Radeon 780M Graphics
|
||||||
|
1900, B6, AMD Radeon 780M Graphics
|
||||||
|
1900, B7, AMD Radeon 760M Graphics
|
||||||
|
1900, B8, AMD Radeon 760M Graphics
|
||||||
|
1900, B9, AMD Radeon 780M Graphics
|
||||||
|
1900, BA, AMD Radeon 780M Graphics
|
||||||
|
1900, BB, AMD Radeon 780M Graphics
|
||||||
|
1900, C0, AMD Radeon 780M Graphics
|
||||||
|
1900, C1, AMD Radeon 760M Graphics
|
||||||
|
1900, C2, AMD Radeon 780M Graphics
|
||||||
|
1900, C3, AMD Radeon 760M Graphics
|
||||||
|
1900, C4, AMD Radeon 780M Graphics
|
||||||
|
1900, C5, AMD Radeon 780M Graphics
|
||||||
|
1900, C6, AMD Radeon 760M Graphics
|
||||||
|
1900, C7, AMD Radeon 780M Graphics
|
||||||
|
1900, C8, AMD Radeon 760M Graphics
|
||||||
|
1900, C9, AMD Radeon 780M Graphics
|
||||||
|
1900, CA, AMD Radeon 760M Graphics
|
||||||
|
1900, CB, AMD Radeon 780M Graphics
|
||||||
|
1900, CC, AMD Radeon 780M Graphics
|
||||||
|
1900, CD, AMD Radeon 760M Graphics
|
||||||
|
1900, CE, AMD Radeon 780M Graphics
|
||||||
|
1900, CF, AMD Radeon 760M Graphics
|
||||||
|
1900, D0, AMD Radeon 780M Graphics
|
||||||
|
1900, D1, AMD Radeon 760M Graphics
|
||||||
|
1900, D2, AMD Radeon 780M Graphics
|
||||||
|
1900, D3, AMD Radeon 760M Graphics
|
||||||
|
1900, D4, AMD Radeon 780M Graphics
|
||||||
|
1900, D5, AMD Radeon 780M Graphics
|
||||||
|
1900, D6, AMD Radeon 760M Graphics
|
||||||
|
1900, D7, AMD Radeon 780M Graphics
|
||||||
|
1900, D8, AMD Radeon 760M Graphics
|
||||||
|
1900, D9, AMD Radeon 780M Graphics
|
||||||
|
1900, DA, AMD Radeon 760M Graphics
|
||||||
|
1900, DB, AMD Radeon 780M Graphics
|
||||||
|
1900, DC, AMD Radeon 780M Graphics
|
||||||
|
1900, DD, AMD Radeon 760M Graphics
|
||||||
|
1900, DE, AMD Radeon 780M Graphics
|
||||||
|
1900, DF, AMD Radeon 760M Graphics
|
||||||
|
1900, F0, AMD Radeon 780M Graphics
|
||||||
|
1900, F1, AMD Radeon 780M Graphics
|
||||||
|
1900, F2, AMD Radeon 780M Graphics
|
||||||
|
1901, C1, AMD Radeon 740M Graphics
|
||||||
|
1901, C2, AMD Radeon 740M Graphics
|
||||||
|
1901, C3, AMD Radeon 740M Graphics
|
||||||
|
1901, C6, AMD Radeon 740M Graphics
|
||||||
|
1901, C7, AMD Radeon 740M Graphics
|
||||||
|
1901, C8, AMD Radeon 740M Graphics
|
||||||
|
1901, C9, AMD Radeon 740M Graphics
|
||||||
|
1901, CA, AMD Radeon 740M Graphics
|
||||||
|
1901, D1, AMD Radeon 740M Graphics
|
||||||
|
1901, D2, AMD Radeon 740M Graphics
|
||||||
|
1901, D3, AMD Radeon 740M Graphics
|
||||||
|
1901, D4, AMD Radeon 740M Graphics
|
||||||
|
1901, D5, AMD Radeon 740M Graphics
|
||||||
|
1901, D6, AMD Radeon 740M Graphics
|
||||||
|
1901, D7, AMD Radeon 740M Graphics
|
||||||
|
1901, D8, AMD Radeon 740M Graphics
|
||||||
|
6600, 00, AMD Radeon HD 8600 / 8700M
|
||||||
|
6600, 81, AMD Radeon R7 M370
|
||||||
|
6601, 00, AMD Radeon HD 8500M / 8700M
|
||||||
|
6604, 00, AMD Radeon R7 M265 Series
|
||||||
|
6604, 81, AMD Radeon R7 M350
|
||||||
|
6605, 00, AMD Radeon R7 M260 Series
|
||||||
|
6605, 81, AMD Radeon R7 M340
|
||||||
|
6606, 00, AMD Radeon HD 8790M
|
||||||
|
6607, 00, AMD Radeon R5 M240
|
||||||
|
6608, 00, AMD FirePro W2100
|
||||||
|
6610, 00, AMD Radeon R7 200 Series
|
||||||
|
6610, 81, AMD Radeon R7 350
|
||||||
|
6610, 83, AMD Radeon R5 340
|
||||||
|
6610, 87, AMD Radeon R7 200 Series
|
||||||
|
6611, 00, AMD Radeon R7 200 Series
|
||||||
|
6611, 87, AMD Radeon R7 200 Series
|
||||||
|
6613, 00, AMD Radeon R7 200 Series
|
||||||
|
6617, 00, AMD Radeon R7 240 Series
|
||||||
|
6617, 87, AMD Radeon R7 200 Series
|
||||||
|
6617, C7, AMD Radeon R7 240 Series
|
||||||
|
6640, 00, AMD Radeon HD 8950
|
||||||
|
6640, 80, AMD Radeon R9 M380
|
||||||
|
6646, 00, AMD Radeon R9 M280X
|
||||||
|
6646, 80, AMD Radeon R9 M385
|
||||||
|
6646, 80, AMD Radeon R9 M470X
|
||||||
|
6647, 00, AMD Radeon R9 M200X Series
|
||||||
|
6647, 80, AMD Radeon R9 M380
|
||||||
|
6649, 00, AMD FirePro W5100
|
||||||
|
6658, 00, AMD Radeon R7 200 Series
|
||||||
|
665C, 00, AMD Radeon HD 7700 Series
|
||||||
|
665D, 00, AMD Radeon R7 200 Series
|
||||||
|
665F, 81, AMD Radeon R7 360 Series
|
||||||
|
6660, 00, AMD Radeon HD 8600M Series
|
||||||
|
6660, 81, AMD Radeon R5 M335
|
||||||
|
6660, 83, AMD Radeon R5 M330
|
||||||
|
6663, 00, AMD Radeon HD 8500M Series
|
||||||
|
6663, 83, AMD Radeon R5 M320
|
||||||
|
6664, 00, AMD Radeon R5 M200 Series
|
||||||
|
6665, 00, AMD Radeon R5 M230 Series
|
||||||
|
6665, 83, AMD Radeon R5 M320
|
||||||
|
6665, C3, AMD Radeon R5 M435
|
||||||
|
6666, 00, AMD Radeon R5 M200 Series
|
||||||
|
6667, 00, AMD Radeon R5 M200 Series
|
||||||
|
666F, 00, AMD Radeon HD 8500M
|
||||||
|
66A1, 02, AMD Instinct MI60 / MI50
|
||||||
|
66A1, 06, AMD Radeon Pro VII
|
||||||
|
66AF, C1, AMD Radeon VII
|
||||||
|
6780, 00, AMD FirePro W9000
|
||||||
|
6784, 00, ATI FirePro V (FireGL V) Graphics Adapter
|
||||||
|
6788, 00, ATI FirePro V (FireGL V) Graphics Adapter
|
||||||
|
678A, 00, AMD FirePro W8000
|
||||||
|
6798, 00, AMD Radeon R9 200 / HD 7900 Series
|
||||||
|
6799, 00, AMD Radeon HD 7900 Series
|
||||||
|
679A, 00, AMD Radeon HD 7900 Series
|
||||||
|
679B, 00, AMD Radeon HD 7900 Series
|
||||||
|
679E, 00, AMD Radeon HD 7800 Series
|
||||||
|
67A0, 00, AMD Radeon FirePro W9100
|
||||||
|
67A1, 00, AMD Radeon FirePro W8100
|
||||||
|
67B0, 00, AMD Radeon R9 200 Series
|
||||||
|
67B0, 80, AMD Radeon R9 390 Series
|
||||||
|
67B1, 00, AMD Radeon R9 200 Series
|
||||||
|
67B1, 80, AMD Radeon R9 390 Series
|
||||||
|
67B9, 00, AMD Radeon R9 200 Series
|
||||||
|
67C0, 00, AMD Radeon Pro WX 7100 Graphics
|
||||||
|
67C0, 80, AMD Radeon E9550
|
||||||
|
67C2, 01, AMD Radeon Pro V7350x2
|
||||||
|
67C2, 02, AMD Radeon Pro V7300X
|
||||||
|
67C4, 00, AMD Radeon Pro WX 7100 Graphics
|
||||||
|
67C4, 80, AMD Radeon E9560 / E9565 Graphics
|
||||||
|
67C7, 00, AMD Radeon Pro WX 5100 Graphics
|
||||||
|
67C7, 80, AMD Radeon E9390 Graphics
|
||||||
|
67D0, 01, AMD Radeon Pro V7350x2
|
||||||
|
67D0, 02, AMD Radeon Pro V7300X
|
||||||
|
67DF, C0, AMD Radeon Pro 580X
|
||||||
|
67DF, C1, AMD Radeon RX 580 Series
|
||||||
|
67DF, C2, AMD Radeon RX 570 Series
|
||||||
|
67DF, C3, AMD Radeon RX 580 Series
|
||||||
|
67DF, C4, AMD Radeon RX 480 Graphics
|
||||||
|
67DF, C5, AMD Radeon RX 470 Graphics
|
||||||
|
67DF, C6, AMD Radeon RX 570 Series
|
||||||
|
67DF, C7, AMD Radeon RX 480 Graphics
|
||||||
|
67DF, CF, AMD Radeon RX 470 Graphics
|
||||||
|
67DF, D7, AMD Radeon RX 470 Graphics
|
||||||
|
67DF, E0, AMD Radeon RX 470 Series
|
||||||
|
67DF, E1, AMD Radeon RX 590 Series
|
||||||
|
67DF, E3, AMD Radeon RX Series
|
||||||
|
67DF, E7, AMD Radeon RX 580 Series
|
||||||
|
67DF, EB, AMD Radeon Pro 580X
|
||||||
|
67DF, EF, AMD Radeon RX 570 Series
|
||||||
|
67DF, F7, AMD Radeon RX P30PH
|
||||||
|
67DF, FF, AMD Radeon RX 470 Series
|
||||||
|
67E0, 00, AMD Radeon Pro WX Series
|
||||||
|
67E3, 00, AMD Radeon Pro WX 4100
|
||||||
|
67E8, 00, AMD Radeon Pro WX Series
|
||||||
|
67E8, 01, AMD Radeon Pro WX Series
|
||||||
|
67E8, 80, AMD Radeon E9260 Graphics
|
||||||
|
67EB, 00, AMD Radeon Pro V5300X
|
||||||
|
67EF, C0, AMD Radeon RX Graphics
|
||||||
|
67EF, C1, AMD Radeon RX 460 Graphics
|
||||||
|
67EF, C2, AMD Radeon Pro Series
|
||||||
|
67EF, C3, AMD Radeon RX Series
|
||||||
|
67EF, C5, AMD Radeon RX 460 Graphics
|
||||||
|
67EF, C7, AMD Radeon RX Graphics
|
||||||
|
67EF, CF, AMD Radeon RX 460 Graphics
|
||||||
|
67EF, E0, AMD Radeon RX 560 Series
|
||||||
|
67EF, E1, AMD Radeon RX Series
|
||||||
|
67EF, E2, AMD Radeon RX 560X
|
||||||
|
67EF, E3, AMD Radeon RX Series
|
||||||
|
67EF, E5, AMD Radeon RX 560 Series
|
||||||
|
67EF, E7, AMD Radeon RX 560 Series
|
||||||
|
67EF, EF, AMD Radeon 550 Series
|
||||||
|
67EF, FF, AMD Radeon RX 460 Graphics
|
||||||
|
67FF, C0, AMD Radeon Pro 465
|
||||||
|
67FF, C1, AMD Radeon RX 560 Series
|
||||||
|
67FF, CF, AMD Radeon RX 560 Series
|
||||||
|
67FF, EF, AMD Radeon RX 560 Series
|
||||||
|
67FF, FF, AMD Radeon RX 550 Series
|
||||||
|
6800, 00, AMD Radeon HD 7970M
|
||||||
|
6801, 00, AMD Radeon HD 8970M
|
||||||
|
6806, 00, AMD Radeon R9 M290X
|
||||||
|
6808, 00, AMD FirePro W7000
|
||||||
|
6808, 00, ATI FirePro V (FireGL V) Graphics Adapter
|
||||||
|
6809, 00, ATI FirePro W5000
|
||||||
|
6810, 00, AMD Radeon R9 200 Series
|
||||||
|
6810, 81, AMD Radeon R9 370 Series
|
||||||
|
6811, 00, AMD Radeon R9 200 Series
|
||||||
|
6811, 81, AMD Radeon R7 370 Series
|
||||||
|
6818, 00, AMD Radeon HD 7800 Series
|
||||||
|
6819, 00, AMD Radeon HD 7800 Series
|
||||||
|
6820, 00, AMD Radeon R9 M275X
|
||||||
|
6820, 81, AMD Radeon R9 M375
|
||||||
|
6820, 83, AMD Radeon R9 M375X
|
||||||
|
6821, 00, AMD Radeon R9 M200X Series
|
||||||
|
6821, 83, AMD Radeon R9 M370X
|
||||||
|
6821, 87, AMD Radeon R7 M380
|
||||||
|
6822, 00, AMD Radeon E8860
|
||||||
|
6823, 00, AMD Radeon R9 M200X Series
|
||||||
|
6825, 00, AMD Radeon HD 7800M Series
|
||||||
|
6826, 00, AMD Radeon HD 7700M Series
|
||||||
|
6827, 00, AMD Radeon HD 7800M Series
|
||||||
|
6828, 00, AMD FirePro W600
|
||||||
|
682B, 00, AMD Radeon HD 8800M Series
|
||||||
|
682B, 87, AMD Radeon R9 M360
|
||||||
|
682C, 00, AMD FirePro W4100
|
||||||
|
682D, 00, AMD Radeon HD 7700M Series
|
||||||
|
682F, 00, AMD Radeon HD 7700M Series
|
||||||
|
6830, 00, AMD Radeon 7800M Series
|
||||||
|
6831, 00, AMD Radeon 7700M Series
|
||||||
|
6835, 00, AMD Radeon R7 Series / HD 9000 Series
|
||||||
|
6837, 00, AMD Radeon HD 7700 Series
|
||||||
|
683D, 00, AMD Radeon HD 7700 Series
|
||||||
|
683F, 00, AMD Radeon HD 7700 Series
|
||||||
|
684C, 00, ATI FirePro V (FireGL V) Graphics Adapter
|
||||||
|
6860, 00, AMD Radeon Instinct MI25
|
||||||
|
6860, 01, AMD Radeon Instinct MI25
|
||||||
|
6860, 02, AMD Radeon Instinct MI25
|
||||||
|
6860, 03, AMD Radeon Pro V340
|
||||||
|
6860, 04, AMD Radeon Instinct MI25x2
|
||||||
|
6860, 07, AMD Radeon Pro V320
|
||||||
|
6861, 00, AMD Radeon Pro WX 9100
|
||||||
|
6862, 00, AMD Radeon Pro SSG
|
||||||
|
6863, 00, AMD Radeon Vega Frontier Edition
|
||||||
|
6864, 03, AMD Radeon Pro V340
|
||||||
|
6864, 04, AMD Radeon Instinct MI25x2
|
||||||
|
6864, 05, AMD Radeon Pro V340
|
||||||
|
6868, 00, AMD Radeon Pro WX 8200
|
||||||
|
686C, 00, AMD Radeon Instinct MI25 MxGPU
|
||||||
|
686C, 01, AMD Radeon Instinct MI25 MxGPU
|
||||||
|
686C, 02, AMD Radeon Instinct MI25 MxGPU
|
||||||
|
686C, 03, AMD Radeon Pro V340 MxGPU
|
||||||
|
686C, 04, AMD Radeon Instinct MI25x2 MxGPU
|
||||||
|
686C, 05, AMD Radeon Pro V340L MxGPU
|
||||||
|
686C, 06, AMD Radeon Instinct MI25 MxGPU
|
||||||
|
687F, 01, AMD Radeon RX Vega
|
||||||
|
687F, C0, AMD Radeon RX Vega
|
||||||
|
687F, C1, AMD Radeon RX Vega
|
||||||
|
687F, C3, AMD Radeon RX Vega
|
||||||
|
687F, C7, AMD Radeon RX Vega
|
||||||
|
6900, 00, AMD Radeon R7 M260
|
||||||
|
6900, 81, AMD Radeon R7 M360
|
||||||
|
6900, 83, AMD Radeon R7 M340
|
||||||
|
6900, C1, AMD Radeon R5 M465 Series
|
||||||
|
6900, C3, AMD Radeon R5 M445 Series
|
||||||
|
6900, D1, AMD Radeon 530 Series
|
||||||
|
6900, D3, AMD Radeon 530 Series
|
||||||
|
6901, 00, AMD Radeon R5 M255
|
||||||
|
6902, 00, AMD Radeon Series
|
||||||
|
6907, 00, AMD Radeon R5 M255
|
||||||
|
6907, 87, AMD Radeon R5 M315
|
||||||
|
6920, 00, AMD Radeon R9 M395X
|
||||||
|
6920, 01, AMD Radeon R9 M390X
|
||||||
|
6921, 00, AMD Radeon R9 M390X
|
||||||
|
6929, 00, AMD FirePro S7150
|
||||||
|
6929, 01, AMD FirePro S7100X
|
||||||
|
692B, 00, AMD FirePro W7100
|
||||||
|
6938, 00, AMD Radeon R9 200 Series
|
||||||
|
6938, F0, AMD Radeon R9 200 Series
|
||||||
|
6938, F1, AMD Radeon R9 380 Series
|
||||||
|
6939, 00, AMD Radeon R9 200 Series
|
||||||
|
6939, F0, AMD Radeon R9 200 Series
|
||||||
|
6939, F1, AMD Radeon R9 380 Series
|
||||||
|
694C, C0, AMD Radeon RX Vega M GH Graphics
|
||||||
|
694E, C0, AMD Radeon RX Vega M GL Graphics
|
||||||
|
6980, 00, AMD Radeon Pro WX 3100
|
||||||
|
6981, 00, AMD Radeon Pro WX 3200 Series
|
||||||
|
6981, 01, AMD Radeon Pro WX 3200 Series
|
||||||
|
6981, 10, AMD Radeon Pro WX 3200 Series
|
||||||
|
6985, 00, AMD Radeon Pro WX 3100
|
||||||
|
6986, 00, AMD Radeon Pro WX 2100
|
||||||
|
6987, 80, AMD Embedded Radeon E9171
|
||||||
|
6987, C0, AMD Radeon 550X Series
|
||||||
|
6987, C1, AMD Radeon RX 640
|
||||||
|
6987, C3, AMD Radeon 540X Series
|
||||||
|
6987, C7, AMD Radeon 540
|
||||||
|
6995, 00, AMD Radeon Pro WX 2100
|
||||||
|
6997, 00, AMD Radeon Pro WX 2100
|
||||||
|
699F, 81, AMD Embedded Radeon E9170 Series
|
||||||
|
699F, C0, AMD Radeon 500 Series
|
||||||
|
699F, C1, AMD Radeon 540 Series
|
||||||
|
699F, C3, AMD Radeon 500 Series
|
||||||
|
699F, C7, AMD Radeon RX 550 / 550 Series
|
||||||
|
699F, C9, AMD Radeon 540
|
||||||
|
6FDF, E7, AMD Radeon RX 590 GME
|
||||||
|
6FDF, EF, AMD Radeon RX 580 2048SP
|
||||||
|
7300, C1, AMD FirePro S9300 x2
|
||||||
|
7300, C8, AMD Radeon R9 Fury Series
|
||||||
|
7300, C9, AMD Radeon Pro Duo
|
||||||
|
7300, CA, AMD Radeon R9 Fury Series
|
||||||
|
7300, CB, AMD Radeon R9 Fury Series
|
||||||
|
7312, 00, AMD Radeon Pro W5700
|
||||||
|
731E, C6, AMD Radeon RX 5700XTB
|
||||||
|
731E, C7, AMD Radeon RX 5700B
|
||||||
|
731F, C0, AMD Radeon RX 5700 XT 50th Anniversary
|
||||||
|
731F, C1, AMD Radeon RX 5700 XT
|
||||||
|
731F, C2, AMD Radeon RX 5600M
|
||||||
|
731F, C3, AMD Radeon RX 5700M
|
||||||
|
731F, C4, AMD Radeon RX 5700
|
||||||
|
731F, C5, AMD Radeon RX 5700 XT
|
||||||
|
731F, CA, AMD Radeon RX 5600 XT
|
||||||
|
731F, CB, AMD Radeon RX 5600 OEM
|
||||||
|
7340, C1, AMD Radeon RX 5500M
|
||||||
|
7340, C3, AMD Radeon RX 5300M
|
||||||
|
7340, C5, AMD Radeon RX 5500 XT
|
||||||
|
7340, C7, AMD Radeon RX 5500
|
||||||
|
7340, C9, AMD Radeon RX 5500XTB
|
||||||
|
7340, CF, AMD Radeon RX 5300
|
||||||
|
7341, 00, AMD Radeon Pro W5500
|
||||||
|
7347, 00, AMD Radeon Pro W5500M
|
||||||
|
7360, 41, AMD Radeon Pro 5600M
|
||||||
|
7360, C3, AMD Radeon Pro V520
|
||||||
|
7362, C1, AMD Radeon Pro V540
|
||||||
|
7362, C3, AMD Radeon Pro V520
|
||||||
|
738C, 01, AMD Instinct MI100
|
||||||
|
73A1, 00, AMD Radeon Pro V620
|
||||||
|
73A3, 00, AMD Radeon Pro W6800
|
||||||
|
73A5, C0, AMD Radeon RX 6950 XT
|
||||||
|
73AE, 00, AMD Radeon Pro V620 MxGPU
|
||||||
|
73AF, C0, AMD Radeon RX 6900 XT
|
||||||
|
73BF, C0, AMD Radeon RX 6900 XT
|
||||||
|
73BF, C1, AMD Radeon RX 6800 XT
|
||||||
|
73BF, C3, AMD Radeon RX 6800
|
||||||
|
73DF, C0, AMD Radeon RX 6750 XT
|
||||||
|
73DF, C1, AMD Radeon RX 6700 XT
|
||||||
|
73DF, C2, AMD Radeon RX 6800M
|
||||||
|
73DF, C3, AMD Radeon RX 6800M
|
||||||
|
73DF, C5, AMD Radeon RX 6700 XT
|
||||||
|
73DF, CF, AMD Radeon RX 6700M
|
||||||
|
73DF, D5, AMD Radeon RX 6750 GRE 12GB
|
||||||
|
73DF, D7, AMD TDC-235
|
||||||
|
73DF, DF, AMD Radeon RX 6700
|
||||||
|
73DF, E5, AMD Radeon RX 6750 GRE 12GB
|
||||||
|
73DF, FF, AMD Radeon RX 6700
|
||||||
|
73E0, 00, AMD Radeon RX 6600M
|
||||||
|
73E1, 00, AMD Radeon Pro W6600M
|
||||||
|
73E3, 00, AMD Radeon Pro W6600
|
||||||
|
73EF, C0, AMD Radeon RX 6800S
|
||||||
|
73EF, C1, AMD Radeon RX 6650 XT
|
||||||
|
73EF, C2, AMD Radeon RX 6700S
|
||||||
|
73EF, C3, AMD Radeon RX 6650M
|
||||||
|
73EF, C4, AMD Radeon RX 6650M XT
|
||||||
|
73FF, C1, AMD Radeon RX 6600 XT
|
||||||
|
73FF, C3, AMD Radeon RX 6600M
|
||||||
|
73FF, C7, AMD Radeon RX 6600
|
||||||
|
73FF, CB, AMD Radeon RX 6600S
|
||||||
|
73FF, CF, AMD Radeon RX 6600 LE
|
||||||
|
73FF, DF, AMD Radeon RX 6750 GRE 10GB
|
||||||
|
7408, 00, AMD Instinct MI250X
|
||||||
|
740C, 01, AMD Instinct MI250X / MI250
|
||||||
|
740F, 02, AMD Instinct MI210
|
||||||
|
7421, 00, AMD Radeon Pro W6500M
|
||||||
|
7422, 00, AMD Radeon Pro W6400
|
||||||
|
7423, 00, AMD Radeon Pro W6300M
|
||||||
|
7423, 01, AMD Radeon Pro W6300
|
||||||
|
7424, 00, AMD Radeon RX 6300
|
||||||
|
743F, C1, AMD Radeon RX 6500 XT
|
||||||
|
743F, C3, AMD Radeon RX 6500
|
||||||
|
743F, C3, AMD Radeon RX 6500M
|
||||||
|
743F, C7, AMD Radeon RX 6400
|
||||||
|
743F, C8, AMD Radeon RX 6500M
|
||||||
|
743F, CC, AMD Radeon 6550S
|
||||||
|
743F, CE, AMD Radeon RX 6450M
|
||||||
|
743F, CF, AMD Radeon RX 6300M
|
||||||
|
743F, D3, AMD Radeon RX 6550M
|
||||||
|
743F, D7, AMD Radeon RX 6400
|
||||||
|
7448, 00, AMD Radeon Pro W7900
|
||||||
|
7449, 00, AMD Radeon Pro W7800 48GB
|
||||||
|
744A, 00, AMD Radeon Pro W7900 Dual Slot
|
||||||
|
744B, 00, AMD Radeon Pro W7900D
|
||||||
|
744C, C8, AMD Radeon RX 7900 XTX
|
||||||
|
744C, CC, AMD Radeon RX 7900 XT
|
||||||
|
744C, CE, AMD Radeon RX 7900 GRE
|
||||||
|
744C, CF, AMD Radeon RX 7900M
|
||||||
|
745E, CC, AMD Radeon Pro W7800
|
||||||
|
7460, 00, AMD Radeon Pro V710
|
||||||
|
7461, 00, AMD Radeon Pro V710 MxGPU
|
||||||
|
7470, 00, AMD Radeon Pro W7700
|
||||||
|
747E, C8, AMD Radeon RX 7800 XT
|
||||||
|
747E, D8, AMD Radeon RX 7800M
|
||||||
|
747E, DB, AMD Radeon RX 7700
|
||||||
|
747E, FF, AMD Radeon RX 7700 XT
|
||||||
|
7480, 00, AMD Radeon Pro W7600
|
||||||
|
7480, C0, AMD Radeon RX 7600 XT
|
||||||
|
7480, C1, AMD Radeon RX 7700S
|
||||||
|
7480, C2, AMD Radeon RX 7650 GRE
|
||||||
|
7480, C3, AMD Radeon RX 7600S
|
||||||
|
7480, C7, AMD Radeon RX 7600M XT
|
||||||
|
7480, CF, AMD Radeon RX 7600
|
||||||
|
7481, C7, AMD Steam Machine
|
||||||
|
7483, CF, AMD Radeon RX 7600M
|
||||||
|
7489, 00, AMD Radeon Pro W7500
|
||||||
|
7499, 00, AMD Radeon Pro W7400
|
||||||
|
7499, C0, AMD Radeon RX 7400
|
||||||
|
7499, C1, AMD Radeon RX 7300
|
||||||
|
74A0, 00, AMD Instinct MI300A
|
||||||
|
74A1, 00, AMD Instinct MI300X
|
||||||
|
74A2, 00, AMD Instinct MI308X
|
||||||
|
74A5, 00, AMD Instinct MI325X
|
||||||
|
74A8, 00, AMD Instinct MI308X HF
|
||||||
|
74A9, 00, AMD Instinct MI300X HF
|
||||||
|
74B5, 00, AMD Instinct MI300X VF
|
||||||
|
74B6, 00, AMD Instinct MI308X
|
||||||
|
74BD, 00, AMD Instinct MI300X HF
|
||||||
|
7550, C0, AMD Radeon RX 9070 XT
|
||||||
|
7550, C2, AMD Radeon RX 9070 GRE
|
||||||
|
7550, C3, AMD Radeon RX 9070
|
||||||
|
7551, C0, AMD Radeon AI PRO R9700
|
||||||
|
7590, C0, AMD Radeon RX 9060 XT
|
||||||
|
7590, C7, AMD Radeon RX 9060
|
||||||
|
75A0, C0, AMD Instinct MI350X
|
||||||
|
75A3, C0, AMD Instinct MI355X
|
||||||
|
75B0, C0, AMD Instinct MI350X VF
|
||||||
|
75B3, C0, AMD Instinct MI355X VF
|
||||||
|
9830, 00, AMD Radeon HD 8400 / R3 Series
|
||||||
|
9831, 00, AMD Radeon HD 8400E
|
||||||
|
9832, 00, AMD Radeon HD 8330
|
||||||
|
9833, 00, AMD Radeon HD 8330E
|
||||||
|
9834, 00, AMD Radeon HD 8210
|
||||||
|
9835, 00, AMD Radeon HD 8210E
|
||||||
|
9836, 00, AMD Radeon HD 8200 / R3 Series
|
||||||
|
9837, 00, AMD Radeon HD 8280E
|
||||||
|
9838, 00, AMD Radeon HD 8200 / R3 series
|
||||||
|
9839, 00, AMD Radeon HD 8180
|
||||||
|
983D, 00, AMD Radeon HD 8250
|
||||||
|
9850, 00, AMD Radeon R3 Graphics
|
||||||
|
9850, 03, AMD Radeon R3 Graphics
|
||||||
|
9850, 40, AMD Radeon R2 Graphics
|
||||||
|
9850, 45, AMD Radeon R3 Graphics
|
||||||
|
9851, 00, AMD Radeon R4 Graphics
|
||||||
|
9851, 01, AMD Radeon R5E Graphics
|
||||||
|
9851, 05, AMD Radeon R5 Graphics
|
||||||
|
9851, 06, AMD Radeon R5E Graphics
|
||||||
|
9851, 40, AMD Radeon R4 Graphics
|
||||||
|
9851, 45, AMD Radeon R5 Graphics
|
||||||
|
9852, 00, AMD Radeon R2 Graphics
|
||||||
|
9852, 40, AMD Radeon E1 Graphics
|
||||||
|
9853, 00, AMD Radeon R2 Graphics
|
||||||
|
9853, 01, AMD Radeon R4E Graphics
|
||||||
|
9853, 03, AMD Radeon R2 Graphics
|
||||||
|
9853, 05, AMD Radeon R1E Graphics
|
||||||
|
9853, 06, AMD Radeon R1E Graphics
|
||||||
|
9853, 07, AMD Radeon R1E Graphics
|
||||||
|
9853, 08, AMD Radeon R1E Graphics
|
||||||
|
9853, 40, AMD Radeon R2 Graphics
|
||||||
|
9854, 00, AMD Radeon R3 Graphics
|
||||||
|
9854, 01, AMD Radeon R3E Graphics
|
||||||
|
9854, 02, AMD Radeon R3 Graphics
|
||||||
|
9854, 05, AMD Radeon R2 Graphics
|
||||||
|
9854, 06, AMD Radeon R4 Graphics
|
||||||
|
9854, 07, AMD Radeon R3 Graphics
|
||||||
|
9855, 02, AMD Radeon R6 Graphics
|
||||||
|
9855, 05, AMD Radeon R4 Graphics
|
||||||
|
9856, 00, AMD Radeon R2 Graphics
|
||||||
|
9856, 01, AMD Radeon R2E Graphics
|
||||||
|
9856, 02, AMD Radeon R2 Graphics
|
||||||
|
9856, 05, AMD Radeon R1E Graphics
|
||||||
|
9856, 06, AMD Radeon R2 Graphics
|
||||||
|
9856, 07, AMD Radeon R1E Graphics
|
||||||
|
9856, 08, AMD Radeon R1E Graphics
|
||||||
|
9856, 13, AMD Radeon R1E Graphics
|
||||||
|
9874, 81, AMD Radeon R6 Graphics
|
||||||
|
9874, 84, AMD Radeon R7 Graphics
|
||||||
|
9874, 85, AMD Radeon R6 Graphics
|
||||||
|
9874, 87, AMD Radeon R5 Graphics
|
||||||
|
9874, 88, AMD Radeon R7E Graphics
|
||||||
|
9874, 89, AMD Radeon R6E Graphics
|
||||||
|
9874, C4, AMD Radeon R7 Graphics
|
||||||
|
9874, C5, AMD Radeon R6 Graphics
|
||||||
|
9874, C6, AMD Radeon R6 Graphics
|
||||||
|
9874, C7, AMD Radeon R5 Graphics
|
||||||
|
9874, C8, AMD Radeon R7 Graphics
|
||||||
|
9874, C9, AMD Radeon R7 Graphics
|
||||||
|
9874, CA, AMD Radeon R5 Graphics
|
||||||
|
9874, CB, AMD Radeon R5 Graphics
|
||||||
|
9874, CC, AMD Radeon R7 Graphics
|
||||||
|
9874, CD, AMD Radeon R7 Graphics
|
||||||
|
9874, CE, AMD Radeon R5 Graphics
|
||||||
|
9874, E1, AMD Radeon R7 Graphics
|
||||||
|
9874, E2, AMD Radeon R7 Graphics
|
||||||
|
9874, E3, AMD Radeon R7 Graphics
|
||||||
|
9874, E4, AMD Radeon R7 Graphics
|
||||||
|
9874, E5, AMD Radeon R5 Graphics
|
||||||
|
9874, E6, AMD Radeon R5 Graphics
|
||||||
|
98E4, 80, AMD Radeon R5E Graphics
|
||||||
|
98E4, 81, AMD Radeon R4E Graphics
|
||||||
|
98E4, 83, AMD Radeon R2E Graphics
|
||||||
|
98E4, 84, AMD Radeon R2E Graphics
|
||||||
|
98E4, 86, AMD Radeon R1E Graphics
|
||||||
|
98E4, C0, AMD Radeon R4 Graphics
|
||||||
|
98E4, C1, AMD Radeon R5 Graphics
|
||||||
|
98E4, C2, AMD Radeon R4 Graphics
|
||||||
|
98E4, C4, AMD Radeon R5 Graphics
|
||||||
|
98E4, C6, AMD Radeon R5 Graphics
|
||||||
|
98E4, C8, AMD Radeon R4 Graphics
|
||||||
|
98E4, C9, AMD Radeon R4 Graphics
|
||||||
|
98E4, CA, AMD Radeon R5 Graphics
|
||||||
|
98E4, D0, AMD Radeon R2 Graphics
|
||||||
|
98E4, D1, AMD Radeon R2 Graphics
|
||||||
|
98E4, D2, AMD Radeon R2 Graphics
|
||||||
|
98E4, D4, AMD Radeon R2 Graphics
|
||||||
|
98E4, D9, AMD Radeon R5 Graphics
|
||||||
|
98E4, DA, AMD Radeon R5 Graphics
|
||||||
|
98E4, DB, AMD Radeon R3 Graphics
|
||||||
|
98E4, E1, AMD Radeon R3 Graphics
|
||||||
|
98E4, E2, AMD Radeon R3 Graphics
|
||||||
|
98E4, E9, AMD Radeon R4 Graphics
|
||||||
|
98E4, EA, AMD Radeon R4 Graphics
|
||||||
|
98E4, EB, AMD Radeon R3 Graphics
|
||||||
|
98E4, EB, AMD Radeon R4 Graphics
|
||||||
34
agent/test-data/nvtop.json
Normal file
34
agent/test-data/nvtop.json
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
[
|
||||||
|
{
|
||||||
|
"device_name": "NVIDIA GeForce RTX 3050 Ti Laptop GPU",
|
||||||
|
"gpu_clock": "1485MHz",
|
||||||
|
"mem_clock": "6001MHz",
|
||||||
|
"temp": "48C",
|
||||||
|
"fan_speed": null,
|
||||||
|
"power_draw": "13W",
|
||||||
|
"gpu_util": "5%",
|
||||||
|
"encode": "0%",
|
||||||
|
"decode": "0%",
|
||||||
|
"mem_util": "8%",
|
||||||
|
"mem_total": "4294967296",
|
||||||
|
"mem_used": "349372416",
|
||||||
|
"mem_free": "3945594880",
|
||||||
|
"processes" : []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"device_name": "AMD Radeon 680M",
|
||||||
|
"gpu_clock": "2200MHz",
|
||||||
|
"mem_clock": "2400MHz",
|
||||||
|
"temp": "48C",
|
||||||
|
"fan_speed": "CPU Fan",
|
||||||
|
"power_draw": "9W",
|
||||||
|
"gpu_util": "12%",
|
||||||
|
"encode": null,
|
||||||
|
"decode": "0%",
|
||||||
|
"mem_util": "7%",
|
||||||
|
"mem_total": "16929173504",
|
||||||
|
"mem_used": "1213784064",
|
||||||
|
"mem_free": "15715389440",
|
||||||
|
"processes" : []
|
||||||
|
}
|
||||||
|
]
|
||||||
51
agent/test-data/smart/apple_nvme.json
Normal file
51
agent/test-data/smart/apple_nvme.json
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
{
|
||||||
|
"json_format_version": [1, 0],
|
||||||
|
"smartctl": {
|
||||||
|
"version": [7, 4],
|
||||||
|
"argv": ["smartctl", "-aix", "-j", "IOService:/AppleARMPE/arm-io@10F00000/AppleT810xIO/ans@77400000/AppleASCWrapV4/iop-ans-nub/RTBuddy(ANS2)/RTBuddyService/AppleANS3NVMeController/NS_01@1"],
|
||||||
|
"exit_status": 4
|
||||||
|
},
|
||||||
|
"device": {
|
||||||
|
"name": "IOService:/AppleARMPE/arm-io@10F00000/AppleT810xIO/ans@77400000/AppleASCWrapV4/iop-ans-nub/RTBuddy(ANS2)/RTBuddyService/AppleANS3NVMeController/NS_01@1",
|
||||||
|
"info_name": "IOService:/AppleARMPE/arm-io@10F00000/AppleT810xIO/ans@77400000/AppleASCWrapV4/iop-ans-nub/RTBuddy(ANS2)/RTBuddyService/AppleANS3NVMeController/NS_01@1",
|
||||||
|
"type": "nvme",
|
||||||
|
"protocol": "NVMe"
|
||||||
|
},
|
||||||
|
"model_name": "APPLE SSD AP0256Q",
|
||||||
|
"serial_number": "0ba0147940253c15",
|
||||||
|
"firmware_version": "555",
|
||||||
|
"smart_support": {
|
||||||
|
"available": true,
|
||||||
|
"enabled": true
|
||||||
|
},
|
||||||
|
"smart_status": {
|
||||||
|
"passed": true,
|
||||||
|
"nvme": {
|
||||||
|
"value": 0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nvme_smart_health_information_log": {
|
||||||
|
"critical_warning": 0,
|
||||||
|
"temperature": 42,
|
||||||
|
"available_spare": 100,
|
||||||
|
"available_spare_threshold": 99,
|
||||||
|
"percentage_used": 1,
|
||||||
|
"data_units_read": 270189386,
|
||||||
|
"data_units_written": 166753862,
|
||||||
|
"host_reads": 7543766995,
|
||||||
|
"host_writes": 3761621926,
|
||||||
|
"controller_busy_time": 0,
|
||||||
|
"power_cycles": 366,
|
||||||
|
"power_on_hours": 2850,
|
||||||
|
"unsafe_shutdowns": 195,
|
||||||
|
"media_errors": 0,
|
||||||
|
"num_err_log_entries": 0
|
||||||
|
},
|
||||||
|
"temperature": {
|
||||||
|
"current": 42
|
||||||
|
},
|
||||||
|
"power_cycle_count": 366,
|
||||||
|
"power_on_time": {
|
||||||
|
"hours": 2850
|
||||||
|
}
|
||||||
|
}
|
||||||
17
agent/test-data/system_info.json
Normal file
17
agent/test-data/system_info.json
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
{
|
||||||
|
"ID": "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS",
|
||||||
|
"Containers": 14,
|
||||||
|
"ContainersRunning": 3,
|
||||||
|
"ContainersPaused": 1,
|
||||||
|
"ContainersStopped": 10,
|
||||||
|
"Images": 508,
|
||||||
|
"Driver": "overlay2",
|
||||||
|
"KernelVersion": "6.8.0-31-generic",
|
||||||
|
"OperatingSystem": "Ubuntu 24.04 LTS",
|
||||||
|
"OSVersion": "24.04",
|
||||||
|
"OSType": "linux",
|
||||||
|
"Architecture": "x86_64",
|
||||||
|
"NCPU": 4,
|
||||||
|
"MemTotal": 2095882240,
|
||||||
|
"ServerVersion": "27.0.1"
|
||||||
|
}
|
||||||
@@ -1,12 +1,10 @@
|
|||||||
package agent
|
package agent
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/henrygd/beszel/internal/ghupdate"
|
"github.com/henrygd/beszel/internal/ghupdate"
|
||||||
)
|
)
|
||||||
@@ -81,7 +79,7 @@ func detectRestarter() restarter {
|
|||||||
func Update(useMirror bool) error {
|
func Update(useMirror bool) error {
|
||||||
exePath, _ := os.Executable()
|
exePath, _ := os.Executable()
|
||||||
|
|
||||||
dataDir, err := getDataDir()
|
dataDir, err := GetDataDir()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
dataDir = os.TempDir()
|
dataDir = os.TempDir()
|
||||||
}
|
}
|
||||||
@@ -108,12 +106,12 @@ func Update(useMirror bool) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// 6) Fix SELinux context if necessary
|
// Fix SELinux context if necessary
|
||||||
if err := handleSELinuxContext(exePath); err != nil {
|
if err := ghupdate.HandleSELinuxContext(exePath); err != nil {
|
||||||
ghupdate.ColorPrintf(ghupdate.ColorYellow, "Warning: SELinux context handling: %v", err)
|
ghupdate.ColorPrintf(ghupdate.ColorYellow, "Warning: SELinux context handling: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// 7) Restart service if running under a recognised init system
|
// Restart service if running under a recognised init system
|
||||||
if r := detectRestarter(); r != nil {
|
if r := detectRestarter(); r != nil {
|
||||||
if err := r.Restart(); err != nil {
|
if err := r.Restart(); err != nil {
|
||||||
ghupdate.ColorPrintf(ghupdate.ColorYellow, "Warning: failed to restart service: %v", err)
|
ghupdate.ColorPrintf(ghupdate.ColorYellow, "Warning: failed to restart service: %v", err)
|
||||||
@@ -127,42 +125,3 @@ func Update(useMirror bool) error {
|
|||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// handleSELinuxContext restores or applies the correct SELinux label to the binary.
|
|
||||||
func handleSELinuxContext(path string) error {
|
|
||||||
out, err := exec.Command("getenforce").Output()
|
|
||||||
if err != nil {
|
|
||||||
// SELinux not enabled or getenforce not available
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
state := strings.TrimSpace(string(out))
|
|
||||||
if state == "Disabled" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
ghupdate.ColorPrint(ghupdate.ColorYellow, "SELinux is enabled; applying context…")
|
|
||||||
var errs []string
|
|
||||||
|
|
||||||
// Try persistent context via semanage+restorecon
|
|
||||||
if semanagePath, err := exec.LookPath("semanage"); err == nil {
|
|
||||||
if err := exec.Command(semanagePath, "fcontext", "-a", "-t", "bin_t", path).Run(); err != nil {
|
|
||||||
errs = append(errs, "semanage fcontext failed: "+err.Error())
|
|
||||||
} else if restoreconPath, err := exec.LookPath("restorecon"); err == nil {
|
|
||||||
if err := exec.Command(restoreconPath, "-v", path).Run(); err != nil {
|
|
||||||
errs = append(errs, "restorecon failed: "+err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fallback to temporary context via chcon
|
|
||||||
if chconPath, err := exec.LookPath("chcon"); err == nil {
|
|
||||||
if err := exec.Command(chconPath, "-t", "bin_t", path).Run(); err != nil {
|
|
||||||
errs = append(errs, "chcon failed: "+err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(errs) > 0 {
|
|
||||||
return fmt.Errorf("SELinux context errors: %s", strings.Join(errs, "; "))
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,15 +0,0 @@
|
|||||||
package agent
|
|
||||||
|
|
||||||
import "math"
|
|
||||||
|
|
||||||
func bytesToMegabytes(b float64) float64 {
|
|
||||||
return twoDecimals(b / 1048576)
|
|
||||||
}
|
|
||||||
|
|
||||||
func bytesToGigabytes(b uint64) float64 {
|
|
||||||
return twoDecimals(float64(b) / 1073741824)
|
|
||||||
}
|
|
||||||
|
|
||||||
func twoDecimals(value float64) float64 {
|
|
||||||
return math.Round(value*100) / 100
|
|
||||||
}
|
|
||||||
112
agent/utils/utils.go
Normal file
112
agent/utils/utils.go
Normal file
@@ -0,0 +1,112 @@
|
|||||||
|
package utils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetEnv retrieves an environment variable with a "BESZEL_AGENT_" prefix, or falls back to the unprefixed key.
|
||||||
|
func GetEnv(key string) (value string, exists bool) {
|
||||||
|
if value, exists = os.LookupEnv("BESZEL_AGENT_" + key); exists {
|
||||||
|
return value, exists
|
||||||
|
}
|
||||||
|
return os.LookupEnv(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BytesToMegabytes converts bytes to megabytes and rounds to two decimal places.
|
||||||
|
func BytesToMegabytes(b float64) float64 {
|
||||||
|
return TwoDecimals(b / 1048576)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BytesToGigabytes converts bytes to gigabytes and rounds to two decimal places.
|
||||||
|
func BytesToGigabytes(b uint64) float64 {
|
||||||
|
return TwoDecimals(float64(b) / 1073741824)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TwoDecimals rounds a float64 value to two decimal places.
|
||||||
|
func TwoDecimals(value float64) float64 {
|
||||||
|
return math.Round(value*100) / 100
|
||||||
|
}
|
||||||
|
|
||||||
|
// func RoundFloat(val float64, precision uint) float64 {
|
||||||
|
// ratio := math.Pow(10, float64(precision))
|
||||||
|
// return math.Round(val*ratio) / ratio
|
||||||
|
// }
|
||||||
|
|
||||||
|
// ReadStringFile returns trimmed file contents or empty string on error.
|
||||||
|
func ReadStringFile(path string) string {
|
||||||
|
content, _ := ReadStringFileOK(path)
|
||||||
|
return content
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadStringFileOK returns trimmed file contents and read success.
|
||||||
|
func ReadStringFileOK(path string) (string, bool) {
|
||||||
|
b, err := os.ReadFile(path)
|
||||||
|
if err != nil {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
return strings.TrimSpace(string(b)), true
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadStringFileLimited reads a file into a string with a maximum size (in bytes) to avoid
|
||||||
|
// allocating large buffers and potential panics with pseudo-files when the size is misreported.
|
||||||
|
func ReadStringFileLimited(path string, maxSize int) (string, error) {
|
||||||
|
f, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
buf := make([]byte, maxSize)
|
||||||
|
n, err := f.Read(buf)
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return strings.TrimSpace(string(buf[:n])), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileExists reports whether the given path exists.
|
||||||
|
func FileExists(path string) bool {
|
||||||
|
_, err := os.Stat(path)
|
||||||
|
return err == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadUintFile parses a decimal uint64 value from a file.
|
||||||
|
func ReadUintFile(path string) (uint64, bool) {
|
||||||
|
raw, ok := ReadStringFileOK(path)
|
||||||
|
if !ok {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
parsed, err := strconv.ParseUint(raw, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
return parsed, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// LookPathHomebrew is like exec.LookPath but also checks Homebrew paths.
|
||||||
|
func LookPathHomebrew(file string) (string, error) {
|
||||||
|
foundPath, lookPathErr := exec.LookPath(file)
|
||||||
|
if lookPathErr == nil {
|
||||||
|
return foundPath, nil
|
||||||
|
}
|
||||||
|
var homebrewPath string
|
||||||
|
switch runtime.GOOS {
|
||||||
|
case "darwin":
|
||||||
|
homebrewPath = filepath.Join("/opt", "homebrew", "bin", file)
|
||||||
|
case "linux":
|
||||||
|
homebrewPath = filepath.Join("/home", "linuxbrew", ".linuxbrew", "bin", file)
|
||||||
|
}
|
||||||
|
if homebrewPath != "" {
|
||||||
|
if _, err := os.Stat(homebrewPath); err == nil {
|
||||||
|
return homebrewPath, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", lookPathErr
|
||||||
|
}
|
||||||
158
agent/utils/utils_test.go
Normal file
158
agent/utils/utils_test.go
Normal file
@@ -0,0 +1,158 @@
|
|||||||
|
package utils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestTwoDecimals(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
input float64
|
||||||
|
expected float64
|
||||||
|
}{
|
||||||
|
{"round down", 1.234, 1.23},
|
||||||
|
{"round half up", 1.235, 1.24}, // math.Round rounds half up
|
||||||
|
{"no rounding needed", 1.23, 1.23},
|
||||||
|
{"negative number", -1.235, -1.24}, // math.Round rounds half up (more negative)
|
||||||
|
{"zero", 0.0, 0.0},
|
||||||
|
{"large number", 123.456, 123.46}, // rounds 5 up
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
result := TwoDecimals(tt.input)
|
||||||
|
assert.Equal(t, tt.expected, result)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBytesToMegabytes(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
input float64
|
||||||
|
expected float64
|
||||||
|
}{
|
||||||
|
{"1 MB", 1048576, 1.0},
|
||||||
|
{"512 KB", 524288, 0.5},
|
||||||
|
{"zero", 0, 0},
|
||||||
|
{"large value", 1073741824, 1024}, // 1 GB = 1024 MB
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
result := BytesToMegabytes(tt.input)
|
||||||
|
assert.Equal(t, tt.expected, result)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBytesToGigabytes(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
input uint64
|
||||||
|
expected float64
|
||||||
|
}{
|
||||||
|
{"1 GB", 1073741824, 1.0},
|
||||||
|
{"512 MB", 536870912, 0.5},
|
||||||
|
{"0 GB", 0, 0},
|
||||||
|
{"2 GB", 2147483648, 2.0},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
result := BytesToGigabytes(tt.input)
|
||||||
|
assert.Equal(t, tt.expected, result)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFileFunctions(t *testing.T) {
|
||||||
|
tmpDir := t.TempDir()
|
||||||
|
testFilePath := filepath.Join(tmpDir, "test.txt")
|
||||||
|
testContent := "hello world"
|
||||||
|
|
||||||
|
// Test FileExists (false)
|
||||||
|
assert.False(t, FileExists(testFilePath))
|
||||||
|
|
||||||
|
// Test ReadStringFileOK (false)
|
||||||
|
content, ok := ReadStringFileOK(testFilePath)
|
||||||
|
assert.False(t, ok)
|
||||||
|
assert.Empty(t, content)
|
||||||
|
|
||||||
|
// Test ReadStringFile (empty)
|
||||||
|
assert.Empty(t, ReadStringFile(testFilePath))
|
||||||
|
|
||||||
|
// Write file
|
||||||
|
err := os.WriteFile(testFilePath, []byte(testContent+"\n "), 0644)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Test FileExists (true)
|
||||||
|
assert.True(t, FileExists(testFilePath))
|
||||||
|
|
||||||
|
// Test ReadStringFileOK (true)
|
||||||
|
content, ok = ReadStringFileOK(testFilePath)
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, testContent, content)
|
||||||
|
|
||||||
|
// Test ReadStringFile (content)
|
||||||
|
assert.Equal(t, testContent, ReadStringFile(testFilePath))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestReadUintFile(t *testing.T) {
|
||||||
|
tmpDir := t.TempDir()
|
||||||
|
|
||||||
|
t.Run("valid uint", func(t *testing.T) {
|
||||||
|
path := filepath.Join(tmpDir, "uint.txt")
|
||||||
|
os.WriteFile(path, []byte(" 12345\n"), 0644)
|
||||||
|
val, ok := ReadUintFile(path)
|
||||||
|
assert.True(t, ok)
|
||||||
|
assert.Equal(t, uint64(12345), val)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("invalid uint", func(t *testing.T) {
|
||||||
|
path := filepath.Join(tmpDir, "invalid.txt")
|
||||||
|
os.WriteFile(path, []byte("abc"), 0644)
|
||||||
|
val, ok := ReadUintFile(path)
|
||||||
|
assert.False(t, ok)
|
||||||
|
assert.Equal(t, uint64(0), val)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("missing file", func(t *testing.T) {
|
||||||
|
path := filepath.Join(tmpDir, "missing.txt")
|
||||||
|
val, ok := ReadUintFile(path)
|
||||||
|
assert.False(t, ok)
|
||||||
|
assert.Equal(t, uint64(0), val)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetEnv(t *testing.T) {
|
||||||
|
key := "TEST_VAR"
|
||||||
|
prefixedKey := "BESZEL_AGENT_" + key
|
||||||
|
|
||||||
|
t.Run("prefixed variable exists", func(t *testing.T) {
|
||||||
|
t.Setenv(prefixedKey, "prefixed_val")
|
||||||
|
t.Setenv(key, "unprefixed_val")
|
||||||
|
|
||||||
|
val, exists := GetEnv(key)
|
||||||
|
assert.True(t, exists)
|
||||||
|
assert.Equal(t, "prefixed_val", val)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("only unprefixed variable exists", func(t *testing.T) {
|
||||||
|
t.Setenv(key, "unprefixed_val")
|
||||||
|
|
||||||
|
val, exists := GetEnv(key)
|
||||||
|
assert.True(t, exists)
|
||||||
|
assert.Equal(t, "unprefixed_val", val)
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("neither variable exists", func(t *testing.T) {
|
||||||
|
val, exists := GetEnv(key)
|
||||||
|
assert.False(t, exists)
|
||||||
|
assert.Empty(t, val)
|
||||||
|
})
|
||||||
|
}
|
||||||
11
agent/zfs/zfs_freebsd.go
Normal file
11
agent/zfs/zfs_freebsd.go
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
//go:build freebsd
|
||||||
|
|
||||||
|
package zfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
func ARCSize() (uint64, error) {
|
||||||
|
return unix.SysctlUint64("kstat.zfs.misc.arcstats.size")
|
||||||
|
}
|
||||||
34
agent/zfs/zfs_linux.go
Normal file
34
agent/zfs/zfs_linux.go
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
//go:build linux
|
||||||
|
|
||||||
|
// Package zfs provides functions to read ZFS statistics.
|
||||||
|
package zfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func ARCSize() (uint64, error) {
|
||||||
|
file, err := os.Open("/proc/spl/kstat/zfs/arcstats")
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(file)
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := scanner.Text()
|
||||||
|
if strings.HasPrefix(line, "size") {
|
||||||
|
fields := strings.Fields(line)
|
||||||
|
if len(fields) < 3 {
|
||||||
|
return 0, fmt.Errorf("unexpected arcstats size format: %s", line)
|
||||||
|
}
|
||||||
|
return strconv.ParseUint(fields[2], 10, 64)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0, fmt.Errorf("size field not found in arcstats")
|
||||||
|
}
|
||||||
9
agent/zfs/zfs_unsupported.go
Normal file
9
agent/zfs/zfs_unsupported.go
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
//go:build !linux && !freebsd
|
||||||
|
|
||||||
|
package zfs
|
||||||
|
|
||||||
|
import "errors"
|
||||||
|
|
||||||
|
func ARCSize() (uint64, error) {
|
||||||
|
return 0, errors.ErrUnsupported
|
||||||
|
}
|
||||||
@@ -6,7 +6,7 @@ import "github.com/blang/semver"
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
// Version is the current version of the application.
|
// Version is the current version of the application.
|
||||||
Version = "0.16.1"
|
Version = "0.18.6"
|
||||||
// AppName is the name of the application.
|
// AppName is the name of the application.
|
||||||
AppName = "beszel"
|
AppName = "beszel"
|
||||||
)
|
)
|
||||||
|
|||||||
58
go.mod
58
go.mod
@@ -1,26 +1,28 @@
|
|||||||
module github.com/henrygd/beszel
|
module github.com/henrygd/beszel
|
||||||
|
|
||||||
go 1.25.3
|
go 1.26.1
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/blang/semver v3.5.1+incompatible
|
github.com/blang/semver v3.5.1+incompatible
|
||||||
github.com/coreos/go-systemd/v22 v22.6.0
|
github.com/coreos/go-systemd/v22 v22.7.0
|
||||||
github.com/distatus/battery v0.11.0
|
github.com/ebitengine/purego v0.10.0
|
||||||
github.com/fxamacker/cbor/v2 v2.9.0
|
github.com/fxamacker/cbor/v2 v2.9.0
|
||||||
github.com/gliderlabs/ssh v0.3.8
|
github.com/gliderlabs/ssh v0.3.8
|
||||||
github.com/google/uuid v1.6.0
|
github.com/google/uuid v1.6.0
|
||||||
github.com/lxzan/gws v1.8.9
|
github.com/lxzan/gws v1.9.1
|
||||||
github.com/nicholas-fedor/shoutrrr v0.12.0
|
github.com/nicholas-fedor/shoutrrr v0.14.1
|
||||||
github.com/pocketbase/dbx v1.11.0
|
github.com/pocketbase/dbx v1.12.0
|
||||||
github.com/pocketbase/pocketbase v0.33.0
|
github.com/pocketbase/pocketbase v0.36.7
|
||||||
github.com/shirou/gopsutil/v4 v4.25.10
|
github.com/shirou/gopsutil/v4 v4.26.3
|
||||||
github.com/spf13/cast v1.10.0
|
github.com/spf13/cast v1.10.0
|
||||||
github.com/spf13/cobra v1.10.1
|
github.com/spf13/cobra v1.10.2
|
||||||
github.com/spf13/pflag v1.0.10
|
github.com/spf13/pflag v1.0.10
|
||||||
github.com/stretchr/testify v1.11.1
|
github.com/stretchr/testify v1.11.1
|
||||||
golang.org/x/crypto v0.44.0
|
golang.org/x/crypto v0.49.0
|
||||||
golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6
|
golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90
|
||||||
|
golang.org/x/sys v0.42.0
|
||||||
gopkg.in/yaml.v3 v3.0.1
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
|
howett.net/plist v1.0.1
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
@@ -28,21 +30,21 @@ require (
|
|||||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||||
github.com/disintegration/imaging v1.6.2 // indirect
|
github.com/disintegration/imaging v1.6.2 // indirect
|
||||||
github.com/dolthub/maphash v0.1.0 // indirect
|
|
||||||
github.com/domodwyer/mailyak/v3 v3.6.2 // indirect
|
github.com/domodwyer/mailyak/v3 v3.6.2 // indirect
|
||||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||||
github.com/ebitengine/purego v0.9.1 // indirect
|
github.com/eclipse/paho.golang v0.23.0 // indirect
|
||||||
github.com/fatih/color v1.18.0 // indirect
|
github.com/fatih/color v1.19.0 // indirect
|
||||||
github.com/gabriel-vasile/mimetype v1.4.11 // indirect
|
github.com/gabriel-vasile/mimetype v1.4.13 // indirect
|
||||||
github.com/ganigeorgiev/fexpr v0.5.0 // indirect
|
github.com/ganigeorgiev/fexpr v0.5.0 // indirect
|
||||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||||
github.com/go-ozzo/ozzo-validation/v4 v4.3.0 // indirect
|
github.com/go-ozzo/ozzo-validation/v4 v4.3.0 // indirect
|
||||||
github.com/go-sql-driver/mysql v1.9.1 // indirect
|
github.com/go-sql-driver/mysql v1.9.1 // indirect
|
||||||
github.com/godbus/dbus/v5 v5.2.0 // indirect
|
github.com/godbus/dbus/v5 v5.2.2 // indirect
|
||||||
github.com/golang-jwt/jwt/v5 v5.3.0 // indirect
|
github.com/golang-jwt/jwt/v5 v5.3.1 // indirect
|
||||||
|
github.com/gorilla/websocket v1.5.3 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||||
github.com/klauspost/compress v1.18.1 // indirect
|
github.com/klauspost/compress v1.18.5 // indirect
|
||||||
github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3 // indirect
|
github.com/lufia/plan9stats v0.0.0-20260324052639-156f7da3f749 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||||
github.com/ncruces/go-strftime v1.0.0 // indirect
|
github.com/ncruces/go-strftime v1.0.0 // indirect
|
||||||
@@ -53,16 +55,14 @@ require (
|
|||||||
github.com/tklauser/numcpus v0.11.0 // indirect
|
github.com/tklauser/numcpus v0.11.0 // indirect
|
||||||
github.com/x448/float16 v0.8.4 // indirect
|
github.com/x448/float16 v0.8.4 // indirect
|
||||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||||
golang.org/x/image v0.33.0 // indirect
|
golang.org/x/image v0.38.0 // indirect
|
||||||
golang.org/x/net v0.47.0 // indirect
|
golang.org/x/net v0.52.0 // indirect
|
||||||
golang.org/x/oauth2 v0.33.0 // indirect
|
golang.org/x/oauth2 v0.36.0 // indirect
|
||||||
golang.org/x/sync v0.18.0 // indirect
|
golang.org/x/sync v0.20.0 // indirect
|
||||||
golang.org/x/sys v0.38.0 // indirect
|
golang.org/x/term v0.41.0 // indirect
|
||||||
golang.org/x/term v0.37.0 // indirect
|
golang.org/x/text v0.35.0 // indirect
|
||||||
golang.org/x/text v0.31.0 // indirect
|
modernc.org/libc v1.70.0 // indirect
|
||||||
howett.net/plist v1.0.1 // indirect
|
|
||||||
modernc.org/libc v1.66.10 // indirect
|
|
||||||
modernc.org/mathutil v1.7.1 // indirect
|
modernc.org/mathutil v1.7.1 // indirect
|
||||||
modernc.org/memory v1.11.0 // indirect
|
modernc.org/memory v1.11.0 // indirect
|
||||||
modernc.org/sqlite v1.40.0 // indirect
|
modernc.org/sqlite v1.46.2 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
150
go.sum
150
go.sum
@@ -9,32 +9,30 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d
|
|||||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||||
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
|
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
|
||||||
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||||
github.com/coreos/go-systemd/v22 v22.6.0 h1:aGVa/v8B7hpb0TKl0MWoAavPDmHvobFe5R5zn0bCJWo=
|
github.com/coreos/go-systemd/v22 v22.7.0 h1:LAEzFkke61DFROc7zNLX/WA2i5J8gYqe0rSj9KI28KA=
|
||||||
github.com/coreos/go-systemd/v22 v22.6.0/go.mod h1:iG+pp635Fo7ZmV/j14KUcmEyWF+0X7Lua8rrTWzYgWU=
|
github.com/coreos/go-systemd/v22 v22.7.0/go.mod h1:xNUYtjHu2EDXbsxz1i41wouACIwT7Ybq9o0BQhMwD0w=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c=
|
github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c=
|
||||||
github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4=
|
github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4=
|
||||||
github.com/distatus/battery v0.11.0 h1:KJk89gz90Iq/wJtbjjM9yUzBXV+ASV/EG2WOOL7N8lc=
|
|
||||||
github.com/distatus/battery v0.11.0/go.mod h1:KmVkE8A8hpIX4T78QRdMktYpEp35QfOL8A8dwZBxq2k=
|
|
||||||
github.com/dolthub/maphash v0.1.0 h1:bsQ7JsF4FkkWyrP3oCnFJgrCUAFbFf3kOl4L/QxPDyQ=
|
|
||||||
github.com/dolthub/maphash v0.1.0/go.mod h1:gkg4Ch4CdCDu5h6PMriVLawB7koZ+5ijb9puGMV50a4=
|
|
||||||
github.com/domodwyer/mailyak/v3 v3.6.2 h1:x3tGMsyFhTCaxp6ycgR0FE/bu5QiNp+hetUuCOBXMn8=
|
github.com/domodwyer/mailyak/v3 v3.6.2 h1:x3tGMsyFhTCaxp6ycgR0FE/bu5QiNp+hetUuCOBXMn8=
|
||||||
github.com/domodwyer/mailyak/v3 v3.6.2/go.mod h1:lOm/u9CyCVWHeaAmHIdF4RiKVxKUT/H5XX10lIKAL6c=
|
github.com/domodwyer/mailyak/v3 v3.6.2/go.mod h1:lOm/u9CyCVWHeaAmHIdF4RiKVxKUT/H5XX10lIKAL6c=
|
||||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||||
github.com/ebitengine/purego v0.9.1 h1:a/k2f2HQU3Pi399RPW1MOaZyhKJL9w/xFpKAg4q1s0A=
|
github.com/ebitengine/purego v0.10.0 h1:QIw4xfpWT6GWTzaW5XEKy3HXoqrJGx1ijYHzTF0/ISU=
|
||||||
github.com/ebitengine/purego v0.9.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
github.com/ebitengine/purego v0.10.0/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||||
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
|
github.com/eclipse/paho.golang v0.23.0 h1:KHgl2wz6EJo7cMBmkuhpt7C576vP+kpPv7jjvSyR6Mk=
|
||||||
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
|
github.com/eclipse/paho.golang v0.23.0/go.mod h1:nQRhTkoZv8EAiNs5UU0/WdQIx2NrnWUpL9nsGJTQN04=
|
||||||
|
github.com/fatih/color v1.19.0 h1:Zp3PiM21/9Ld6FzSKyL5c/BULoe/ONr9KlbYVOfG8+w=
|
||||||
|
github.com/fatih/color v1.19.0/go.mod h1:zNk67I0ZUT1bEGsSGyCZYZNrHuTkJJB+r6Q9VuMi0LE=
|
||||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||||
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
|
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
|
||||||
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
|
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
|
||||||
github.com/gabriel-vasile/mimetype v1.4.11 h1:AQvxbp830wPhHTqc1u7nzoLT+ZFxGY7emj5DR5DYFik=
|
github.com/gabriel-vasile/mimetype v1.4.13 h1:46nXokslUBsAJE/wMsp5gtO500a4F3Nkz9Ufpk2AcUM=
|
||||||
github.com/gabriel-vasile/mimetype v1.4.11/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
|
github.com/gabriel-vasile/mimetype v1.4.13/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
|
||||||
github.com/ganigeorgiev/fexpr v0.5.0 h1:XA9JxtTE/Xm+g/JFI6RfZEHSiQlk+1glLvRK1Lpv/Tk=
|
github.com/ganigeorgiev/fexpr v0.5.0 h1:XA9JxtTE/Xm+g/JFI6RfZEHSiQlk+1glLvRK1Lpv/Tk=
|
||||||
github.com/ganigeorgiev/fexpr v0.5.0/go.mod h1:RyGiGqmeXhEQ6+mlGdnUleLHgtzzu/VGO2WtJkF5drE=
|
github.com/ganigeorgiev/fexpr v0.5.0/go.mod h1:RyGiGqmeXhEQ6+mlGdnUleLHgtzzu/VGO2WtJkF5drE=
|
||||||
github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c=
|
github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c=
|
||||||
@@ -51,17 +49,19 @@ github.com/go-sql-driver/mysql v1.9.1 h1:FrjNGn/BsJQjVRuSa8CBrM5BWA9BWoXXat3KrtS
|
|||||||
github.com/go-sql-driver/mysql v1.9.1/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU=
|
github.com/go-sql-driver/mysql v1.9.1/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU=
|
||||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||||
github.com/godbus/dbus/v5 v5.2.0 h1:3WexO+U+yg9T70v9FdHr9kCxYlazaAXUhx2VMkbfax8=
|
github.com/godbus/dbus/v5 v5.2.2 h1:TUR3TgtSVDmjiXOgAAyaZbYmIeP3DPkld3jgKGV8mXQ=
|
||||||
github.com/godbus/dbus/v5 v5.2.0/go.mod h1:3AAv2+hPq5rdnr5txxxRwiGjPXamgoIHgz9FPBfOp3c=
|
github.com/godbus/dbus/v5 v5.2.2/go.mod h1:3AAv2+hPq5rdnr5txxxRwiGjPXamgoIHgz9FPBfOp3c=
|
||||||
github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
|
github.com/golang-jwt/jwt/v5 v5.3.1 h1:kYf81DTWFe7t+1VvL7eS+jKFVWaUnK9cB1qbwn63YCY=
|
||||||
github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
|
github.com/golang-jwt/jwt/v5 v5.3.1/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
|
||||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||||
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d h1:KJIErDwbSHjnp/SGzE5ed8Aol7JsKiI5X7yWKAtzhM0=
|
github.com/google/pprof v0.0.0-20260302011040-a15ffb7f9dcc h1:VBbFa1lDYWEeV5FZKUiYKYT0VxCp9twUmmaq9eb8sXw=
|
||||||
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d/go.mod h1:I6V7YzU0XDpsHqbsyrghnFZLO1gwK6NPTNvmetQIk9U=
|
github.com/google/pprof v0.0.0-20260302011040-a15ffb7f9dcc/go.mod h1:MxpfABSjhmINe3F1It9d+8exIHFvUqtLIRCdOGNXqiI=
|
||||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||||
|
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||||
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||||
@@ -69,35 +69,35 @@ github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLf
|
|||||||
github.com/jarcoal/httpmock v1.4.1 h1:0Ju+VCFuARfFlhVXFc2HxlcQkfB+Xq12/EotHko+x2A=
|
github.com/jarcoal/httpmock v1.4.1 h1:0Ju+VCFuARfFlhVXFc2HxlcQkfB+Xq12/EotHko+x2A=
|
||||||
github.com/jarcoal/httpmock v1.4.1/go.mod h1:ftW1xULwo+j0R0JJkJIIi7UKigZUXCLLanykgjwBXL0=
|
github.com/jarcoal/httpmock v1.4.1/go.mod h1:ftW1xULwo+j0R0JJkJIIi7UKigZUXCLLanykgjwBXL0=
|
||||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||||
github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co=
|
github.com/klauspost/compress v1.18.5 h1:/h1gH5Ce+VWNLSWqPzOVn6XBO+vJbCNGvjoaGBFW2IE=
|
||||||
github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0=
|
github.com/klauspost/compress v1.18.5/go.mod h1:cwPg85FWrGar70rWktvGQj8/hthj3wpl0PGDogxkrSQ=
|
||||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3 h1:PwQumkgq4/acIiZhtifTV5OUqqiP82UAl0h87xj/l9k=
|
github.com/lufia/plan9stats v0.0.0-20260324052639-156f7da3f749 h1:Qj3hTcdWH8uMZDI41HNuTuJN525C7NBrbtH5kSO6fPk=
|
||||||
github.com/lufia/plan9stats v0.0.0-20251013123823-9fd1530e3ec3/go.mod h1:autxFIvghDt3jPTLoqZ9OZ7s9qTGNAWmYCjVFWPX/zg=
|
github.com/lufia/plan9stats v0.0.0-20260324052639-156f7da3f749/go.mod h1:autxFIvghDt3jPTLoqZ9OZ7s9qTGNAWmYCjVFWPX/zg=
|
||||||
github.com/lxzan/gws v1.8.9 h1:VU3SGUeWlQrEwfUSfokcZep8mdg/BrUF+y73YYshdBM=
|
github.com/lxzan/gws v1.9.1 h1:4lbIp4cW0hOLP3ejFHR/uWRy741AURx7oKkNNi2OT9o=
|
||||||
github.com/lxzan/gws v1.8.9/go.mod h1:d9yHaR1eDTBHagQC6KY7ycUOaz5KWeqQtP3xu7aMK8Y=
|
github.com/lxzan/gws v1.9.1/go.mod h1:gXHSCPmTGryWJ4icuqy8Yho32E4YIMHH0fkDRYJRbdc=
|
||||||
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
||||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
||||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w=
|
github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w=
|
||||||
github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
||||||
github.com/nicholas-fedor/shoutrrr v0.12.0 h1:8mwJdfU+uBEybSymwQJMGl/grG7lvVUKbVSNxn3XvUI=
|
github.com/nicholas-fedor/shoutrrr v0.14.1 h1:6sx4cJNfNuUtD6ygGlB0dqcCQ+abfsUh+b+6jgujf6A=
|
||||||
github.com/nicholas-fedor/shoutrrr v0.12.0/go.mod h1:WYiRalR4C43Qmd2zhPWGIFIxu633NB1hDM6Ap/DQcsA=
|
github.com/nicholas-fedor/shoutrrr v0.14.1/go.mod h1:U7IywBkLpBV7rgn8iLbQ9/LklJG1gm24bFv5cXXsDKs=
|
||||||
github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns=
|
github.com/onsi/ginkgo/v2 v2.28.1 h1:S4hj+HbZp40fNKuLUQOYLDgZLwNUVn19N3Atb98NCyI=
|
||||||
github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo=
|
github.com/onsi/ginkgo/v2 v2.28.1/go.mod h1:CLtbVInNckU3/+gC8LzkGUb9oF+e8W8TdUsxPwvdOgE=
|
||||||
github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A=
|
github.com/onsi/gomega v1.39.1 h1:1IJLAad4zjPn2PsnhH70V4DKRFlrCzGBNrNaru+Vf28=
|
||||||
github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k=
|
github.com/onsi/gomega v1.39.1/go.mod h1:hL6yVALoTOxeWudERyfppUcZXjMwIMLnuSfruD2lcfg=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/pocketbase/dbx v1.11.0 h1:LpZezioMfT3K4tLrqA55wWFw1EtH1pM4tzSVa7kgszU=
|
github.com/pocketbase/dbx v1.12.0 h1:/oLErM+A0b4xI0PWTGPqSDVjzix48PqI/bng2l0PzoA=
|
||||||
github.com/pocketbase/dbx v1.11.0/go.mod h1:xXRCIAKTHMgUCyCKZm55pUOdvFziJjQfXaWKhu2vhMs=
|
github.com/pocketbase/dbx v1.12.0/go.mod h1:xXRCIAKTHMgUCyCKZm55pUOdvFziJjQfXaWKhu2vhMs=
|
||||||
github.com/pocketbase/pocketbase v0.33.0 h1:v2EfiY3hxigzRJ/BwFuwVn0vUv7d2QQoD5zUFPaKR9o=
|
github.com/pocketbase/pocketbase v0.36.7 h1:MrViB7BptPYrf2Nt25pJEYBqUdFjuhRKu1p5GTrkvPA=
|
||||||
github.com/pocketbase/pocketbase v0.33.0/go.mod h1:9BEs+CRV7CrS+X5LfBh4bdJQsbzQAIklft3ovGe/c5A=
|
github.com/pocketbase/pocketbase v0.36.7/go.mod h1:qX4HuVjoKXtEg41fSJVM0JLfGWXbBmHxVv/FaE446r4=
|
||||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
|
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
|
||||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||||
@@ -105,16 +105,20 @@ github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qq
|
|||||||
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
|
||||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
github.com/shirou/gopsutil/v4 v4.25.10 h1:at8lk/5T1OgtuCp+AwrDofFRjnvosn0nkN2OLQ6g8tA=
|
github.com/shirou/gopsutil/v4 v4.26.2 h1:X8i6sicvUFih4BmYIGT1m2wwgw2VG9YgrDTi7cIRGUI=
|
||||||
github.com/shirou/gopsutil/v4 v4.25.10/go.mod h1:+kSwyC8DRUD9XXEHCAFjK+0nuArFJM0lva+StQAcskM=
|
github.com/shirou/gopsutil/v4 v4.26.2/go.mod h1:LZ6ewCSkBqUpvSOf+LsTGnRinC6iaNUNMGBtDkJBaLQ=
|
||||||
|
github.com/shirou/gopsutil/v4 v4.26.3 h1:2ESdQt90yU3oXF/CdOlRCJxrP+Am1aBYubTMTfxJ1qc=
|
||||||
|
github.com/shirou/gopsutil/v4 v4.26.3/go.mod h1:LZ6ewCSkBqUpvSOf+LsTGnRinC6iaNUNMGBtDkJBaLQ=
|
||||||
github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY=
|
github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY=
|
||||||
github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
|
github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
|
||||||
github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s=
|
github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
|
||||||
github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0=
|
github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
|
||||||
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||||
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
|
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
|
||||||
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/objx v0.5.3 h1:jmXUvGomnU1o3W/V5h2VEradbpJDwGrzugQQvL0POH4=
|
||||||
|
github.com/stretchr/objx v0.5.3/go.mod h1:rDQraq+vQZU7Fde9LOZLr8Tax6zZvy4kuNKF+QYS+U0=
|
||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||||
@@ -126,44 +130,44 @@ github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
|||||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||||
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
|
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
|
||||||
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||||
|
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||||
|
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.44.0 h1:A97SsFvM3AIwEEmTBiaxPPTYpDC47w720rdiiUvgoAU=
|
golang.org/x/crypto v0.49.0 h1:+Ng2ULVvLHnJ/ZFEq4KdcDd/cfjrrjjNSXNzxg0Y4U4=
|
||||||
golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvmc=
|
golang.org/x/crypto v0.49.0/go.mod h1:ErX4dUh2UM+CFYiXZRTcMpEcN8b/1gxEuv3nODoYtCA=
|
||||||
golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6 h1:zfMcR1Cs4KNuomFFgGefv5N0czO2XZpUbxGUy8i8ug0=
|
golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90 h1:jiDhWWeC7jfWqR9c/uplMOqJ0sbNlNWv0UkzE0vX1MA=
|
||||||
golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6/go.mod h1:46edojNIoXTNOhySWIWdix628clX9ODXwPsQuG6hsK0=
|
golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90/go.mod h1:xE1HEv6b+1SCZ5/uscMRjUBKtIxworgEcEi+/n9NQDQ=
|
||||||
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||||
golang.org/x/image v0.33.0 h1:LXRZRnv1+zGd5XBUVRFmYEphyyKJjQjCRiOuAP3sZfQ=
|
golang.org/x/image v0.38.0 h1:5l+q+Y9JDC7mBOMjo4/aPhMDcxEptsX+Tt3GgRQRPuE=
|
||||||
golang.org/x/image v0.33.0/go.mod h1:DD3OsTYT9chzuzTQt+zMcOlBHgfoKQb1gry8p76Y1sc=
|
golang.org/x/image v0.38.0/go.mod h1:/3f6vaXC+6CEanU4KJxbcUZyEePbyKbaLoDOe4ehFYY=
|
||||||
golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk=
|
golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI=
|
||||||
golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
|
golang.org/x/mod v0.34.0/go.mod h1:ykgH52iCZe79kzLLMhyCUzhMci+nQj+0XkbXpNYtVjY=
|
||||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
golang.org/x/net v0.52.0 h1:He/TN1l0e4mmR3QqHMT2Xab3Aj3L9qjbhRm78/6jrW0=
|
||||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
golang.org/x/net v0.52.0/go.mod h1:R1MAz7uMZxVMualyPXb+VaqGSa3LIaUqk0eEt3w36Sw=
|
||||||
golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo=
|
golang.org/x/oauth2 v0.36.0 h1:peZ/1z27fi9hUOFCAZaHyrpWG5lwe0RJEEEeH0ThlIs=
|
||||||
golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
golang.org/x/oauth2 v0.36.0/go.mod h1:YDBUJMTkDnJS+A4BP4eZBjCqtokkg1hODuPjwiGPO7Q=
|
||||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4=
|
||||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
golang.org/x/sys v0.42.0 h1:omrd2nAlyT5ESRdCLYdm3+fMfNFE/+Rf4bDIQImRJeo=
|
||||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
golang.org/x/sys v0.42.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw=
|
||||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
golang.org/x/term v0.41.0 h1:QCgPso/Q3RTJx2Th4bDLqML4W6iJiaXFq2/ftQF13YU=
|
||||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
golang.org/x/term v0.41.0/go.mod h1:3pfBgksrReYfZ5lvYM0kSO0LIkAl4Yl2bXOkKP7Ec2A=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
golang.org/x/text v0.35.0 h1:JOVx6vVDFokkpaq1AEptVzLTpDe9KGpj5tR4/X+ybL8=
|
||||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
golang.org/x/text v0.35.0/go.mod h1:khi/HExzZJ2pGnjenulevKNX1W67CUy0AsXcNubPGCA=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
|
golang.org/x/tools v0.43.0 h1:12BdW9CeB3Z+J/I/wj34VMl8X+fEXBxVR90JeMX5E7s=
|
||||||
golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
|
golang.org/x/tools v0.43.0/go.mod h1:uHkMso649BX2cZK6+RpuIPXS3ho2hZo4FVwfoy1vIk0=
|
||||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||||
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
|
|
||||||
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
@@ -175,20 +179,18 @@ howett.net/plist v1.0.1 h1:37GdZ8tP09Q35o9ych3ehygcsL+HqKSwzctveSlarvM=
|
|||||||
howett.net/plist v1.0.1/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=
|
howett.net/plist v1.0.1/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=
|
||||||
modernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis=
|
modernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis=
|
||||||
modernc.org/cc/v4 v4.27.1/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
|
modernc.org/cc/v4 v4.27.1/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
|
||||||
modernc.org/ccgo/v4 v4.30.1 h1:4r4U1J6Fhj98NKfSjnPUN7Ze2c6MnAdL0hWw6+LrJpc=
|
modernc.org/ccgo/v4 v4.32.0 h1:hjG66bI/kqIPX1b2yT6fr/jt+QedtP2fqojG2VrFuVw=
|
||||||
modernc.org/ccgo/v4 v4.30.1/go.mod h1:bIOeI1JL54Utlxn+LwrFyjCx2n2RDiYEaJVSrgdrRfM=
|
modernc.org/ccgo/v4 v4.32.0/go.mod h1:6F08EBCx5uQc38kMGl+0Nm0oWczoo1c7cgpzEry7Uc0=
|
||||||
modernc.org/fileutil v1.3.40 h1:ZGMswMNc9JOCrcrakF1HrvmergNLAmxOPjizirpfqBA=
|
modernc.org/fileutil v1.4.0 h1:j6ZzNTftVS054gi281TyLjHPp6CPHr2KCxEXjEbD6SM=
|
||||||
modernc.org/fileutil v1.3.40/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc=
|
modernc.org/fileutil v1.4.0/go.mod h1:EqdKFDxiByqxLk8ozOxObDSfcVOv/54xDs/DUHdvCUU=
|
||||||
modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI=
|
modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI=
|
||||||
modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
|
modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
|
||||||
modernc.org/gc/v3 v3.1.1 h1:k8T3gkXWY9sEiytKhcgyiZ2L0DTyCQ/nvX+LoCljoRE=
|
modernc.org/gc/v3 v3.1.2 h1:ZtDCnhonXSZexk/AYsegNRV1lJGgaNZJuKjJSWKyEqo=
|
||||||
modernc.org/gc/v3 v3.1.1/go.mod h1:HFK/6AGESC7Ex+EZJhJ2Gni6cTaYpSMmU/cT9RmlfYY=
|
modernc.org/gc/v3 v3.1.2/go.mod h1:HFK/6AGESC7Ex+EZJhJ2Gni6cTaYpSMmU/cT9RmlfYY=
|
||||||
modernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks=
|
modernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks=
|
||||||
modernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI=
|
modernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI=
|
||||||
modernc.org/libc v1.66.10 h1:yZkb3YeLx4oynyR+iUsXsybsX4Ubx7MQlSYEw4yj59A=
|
modernc.org/libc v1.70.0 h1:U58NawXqXbgpZ/dcdS9kMshu08aiA6b7gusEusqzNkw=
|
||||||
modernc.org/libc v1.66.10/go.mod h1:8vGSEwvoUoltr4dlywvHqjtAqHBaw0j1jI7iFBTAr2I=
|
modernc.org/libc v1.70.0/go.mod h1:OVmxFGP1CI/Z4L3E0Q3Mf1PDE0BucwMkcXjjLntvHJo=
|
||||||
modernc.org/libc v1.67.0 h1:QzL4IrKab2OFmxA3/vRYl0tLXrIamwrhD6CKD4WBVjQ=
|
|
||||||
modernc.org/libc v1.67.0/go.mod h1:QvvnnJ5P7aitu0ReNpVIEyesuhmDLQ8kaEoyMjIFZJA=
|
|
||||||
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
|
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
|
||||||
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
|
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
|
||||||
modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI=
|
modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI=
|
||||||
@@ -197,8 +199,8 @@ modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
|
|||||||
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
|
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
|
||||||
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
|
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
|
||||||
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
|
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
|
||||||
modernc.org/sqlite v1.40.0 h1:bNWEDlYhNPAUdUdBzjAvn8icAs/2gaKlj4vM+tQ6KdQ=
|
modernc.org/sqlite v1.46.2 h1:gkXQ6R0+AjxFC/fTDaeIVLbNLNrRoOK7YYVz5BKhTcE=
|
||||||
modernc.org/sqlite v1.40.0/go.mod h1:9fjQZ0mB1LLP0GYrp39oOJXx/I2sxEnZtzCmEQIKvGE=
|
modernc.org/sqlite v1.46.2/go.mod h1:hWjRO6Tj/5Ik8ieqxQybiEOUXy0NJFNp2tpvVpKlvig=
|
||||||
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
|
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
|
||||||
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
|
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
|
||||||
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
||||||
|
|||||||
@@ -21,9 +21,9 @@ type hubLike interface {
|
|||||||
|
|
||||||
type AlertManager struct {
|
type AlertManager struct {
|
||||||
hub hubLike
|
hub hubLike
|
||||||
alertQueue chan alertTask
|
stopOnce sync.Once
|
||||||
stopChan chan struct{}
|
|
||||||
pendingAlerts sync.Map
|
pendingAlerts sync.Map
|
||||||
|
alertsCache *AlertsCache
|
||||||
}
|
}
|
||||||
|
|
||||||
type AlertMessageData struct {
|
type AlertMessageData struct {
|
||||||
@@ -40,15 +40,22 @@ type UserNotificationSettings struct {
|
|||||||
Webhooks []string `json:"webhooks"`
|
Webhooks []string `json:"webhooks"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type SystemAlertFsStats struct {
|
||||||
|
DiskTotal float64 `json:"d"`
|
||||||
|
DiskUsed float64 `json:"du"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Values pulled from system_stats.stats that are relevant to alerts.
|
||||||
type SystemAlertStats struct {
|
type SystemAlertStats struct {
|
||||||
Cpu float64 `json:"cpu"`
|
Cpu float64 `json:"cpu"`
|
||||||
Mem float64 `json:"mp"`
|
Mem float64 `json:"mp"`
|
||||||
Disk float64 `json:"dp"`
|
Disk float64 `json:"dp"`
|
||||||
NetSent float64 `json:"ns"`
|
Bandwidth [2]uint64 `json:"b"`
|
||||||
NetRecv float64 `json:"nr"`
|
|
||||||
GPU map[string]SystemAlertGPUData `json:"g"`
|
GPU map[string]SystemAlertGPUData `json:"g"`
|
||||||
Temperatures map[string]float32 `json:"t"`
|
Temperatures map[string]float32 `json:"t"`
|
||||||
LoadAvg [3]float64 `json:"la"`
|
LoadAvg [3]float64 `json:"la"`
|
||||||
|
Battery [2]uint8 `json:"bat"`
|
||||||
|
ExtraFs map[string]SystemAlertFsStats `json:"efs"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type SystemAlertGPUData struct {
|
type SystemAlertGPUData struct {
|
||||||
@@ -57,7 +64,7 @@ type SystemAlertGPUData struct {
|
|||||||
|
|
||||||
type SystemAlertData struct {
|
type SystemAlertData struct {
|
||||||
systemRecord *core.Record
|
systemRecord *core.Record
|
||||||
alertRecord *core.Record
|
alertData CachedAlertData
|
||||||
name string
|
name string
|
||||||
unit string
|
unit string
|
||||||
val float64
|
val float64
|
||||||
@@ -92,11 +99,9 @@ var supportsTitle = map[string]struct{}{
|
|||||||
func NewAlertManager(app hubLike) *AlertManager {
|
func NewAlertManager(app hubLike) *AlertManager {
|
||||||
am := &AlertManager{
|
am := &AlertManager{
|
||||||
hub: app,
|
hub: app,
|
||||||
alertQueue: make(chan alertTask, 5),
|
alertsCache: NewAlertsCache(app),
|
||||||
stopChan: make(chan struct{}),
|
|
||||||
}
|
}
|
||||||
am.bindEvents()
|
am.bindEvents()
|
||||||
go am.startWorker()
|
|
||||||
return am
|
return am
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -104,6 +109,20 @@ func NewAlertManager(app hubLike) *AlertManager {
|
|||||||
func (am *AlertManager) bindEvents() {
|
func (am *AlertManager) bindEvents() {
|
||||||
am.hub.OnRecordAfterUpdateSuccess("alerts").BindFunc(updateHistoryOnAlertUpdate)
|
am.hub.OnRecordAfterUpdateSuccess("alerts").BindFunc(updateHistoryOnAlertUpdate)
|
||||||
am.hub.OnRecordAfterDeleteSuccess("alerts").BindFunc(resolveHistoryOnAlertDelete)
|
am.hub.OnRecordAfterDeleteSuccess("alerts").BindFunc(resolveHistoryOnAlertDelete)
|
||||||
|
am.hub.OnRecordAfterUpdateSuccess("smart_devices").BindFunc(am.handleSmartDeviceAlert)
|
||||||
|
|
||||||
|
am.hub.OnServe().BindFunc(func(e *core.ServeEvent) error {
|
||||||
|
// Populate all alerts into cache on startup
|
||||||
|
_ = am.alertsCache.PopulateFromDB(true)
|
||||||
|
|
||||||
|
if err := resolveStatusAlerts(e.App); err != nil {
|
||||||
|
e.App.Logger().Error("Failed to resolve stale status alerts", "err", err)
|
||||||
|
}
|
||||||
|
if err := am.restorePendingStatusAlerts(); err != nil {
|
||||||
|
e.App.Logger().Error("Failed to restore pending status alerts", "err", err)
|
||||||
|
}
|
||||||
|
return e.Next()
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsNotificationSilenced checks if a notification should be silenced based on configured quiet hours
|
// IsNotificationSilenced checks if a notification should be silenced based on configured quiet hours
|
||||||
@@ -153,25 +172,22 @@ func (am *AlertManager) IsNotificationSilenced(userID, systemID string) bool {
|
|||||||
// Handle case where window crosses midnight
|
// Handle case where window crosses midnight
|
||||||
if endMinutes < startMinutes {
|
if endMinutes < startMinutes {
|
||||||
// Window crosses midnight (e.g., 23:00 - 01:00)
|
// Window crosses midnight (e.g., 23:00 - 01:00)
|
||||||
if nowMinutes >= startMinutes || nowMinutes <= endMinutes {
|
if nowMinutes >= startMinutes || nowMinutes < endMinutes {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Normal case (e.g., 09:00 - 17:00)
|
// Normal case (e.g., 09:00 - 17:00)
|
||||||
if nowMinutes >= startMinutes && nowMinutes <= endMinutes {
|
if nowMinutes >= startMinutes && nowMinutes < endMinutes {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// One-time window: check if current time is within the date range
|
// One-time window: check if current time is within the date range
|
||||||
if now.After(start) || now.Equal(start) {
|
if (now.After(start) || now.Equal(start)) && now.Before(end) {
|
||||||
// If end is zero/null, suppression continues indefinitely from start
|
|
||||||
if end.IsZero() || now.Before(end) || now.Equal(end) {
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@@ -260,13 +276,14 @@ func (am *AlertManager) SendShoutrrrAlert(notificationUrl, title, message, link,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Add link
|
// Add link
|
||||||
if scheme == "ntfy" {
|
switch scheme {
|
||||||
|
case "ntfy":
|
||||||
queryParams.Add("Actions", fmt.Sprintf("view, %s, %s", linkText, link))
|
queryParams.Add("Actions", fmt.Sprintf("view, %s, %s", linkText, link))
|
||||||
} else if scheme == "lark" {
|
case "lark":
|
||||||
queryParams.Add("link", link)
|
queryParams.Add("link", link)
|
||||||
} else if scheme == "bark" {
|
case "bark":
|
||||||
queryParams.Add("url", link)
|
queryParams.Add("url", link)
|
||||||
} else {
|
default:
|
||||||
message += "\n\n" + link
|
message += "\n\n" + link
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -299,3 +316,13 @@ func (am *AlertManager) SendTestNotification(e *core.RequestEvent) error {
|
|||||||
}
|
}
|
||||||
return e.JSON(200, map[string]bool{"err": false})
|
return e.JSON(200, map[string]bool{"err": false})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// setAlertTriggered updates the "triggered" status of an alert record in the database
|
||||||
|
func (am *AlertManager) setAlertTriggered(alert CachedAlertData, triggered bool) error {
|
||||||
|
alertRecord, err := am.hub.FindRecordById("alerts", alert.Id)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
alertRecord.Set("triggered", triggered)
|
||||||
|
return am.hub.Save(alertRecord)
|
||||||
|
}
|
||||||
|
|||||||
386
internal/alerts/alerts_battery_test.go
Normal file
386
internal/alerts/alerts_battery_test.go
Normal file
@@ -0,0 +1,386 @@
|
|||||||
|
//go:build testing
|
||||||
|
|
||||||
|
package alerts_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
beszelTests "github.com/henrygd/beszel/internal/tests"
|
||||||
|
|
||||||
|
"github.com/pocketbase/dbx"
|
||||||
|
"github.com/pocketbase/pocketbase/tools/types"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestBatteryAlertLogic tests that battery alerts trigger when value drops BELOW threshold
|
||||||
|
// (opposite of other alerts like CPU, Memory, etc. which trigger when exceeding threshold)
|
||||||
|
func TestBatteryAlertLogic(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
// Create a system
|
||||||
|
systems, err := beszelTests.CreateSystems(hub, 1, user.Id, "up")
|
||||||
|
require.NoError(t, err)
|
||||||
|
systemRecord := systems[0]
|
||||||
|
|
||||||
|
// Create a battery alert with threshold of 20% and min of 1 minute (immediate trigger)
|
||||||
|
batteryAlert, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": "Battery",
|
||||||
|
"system": systemRecord.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"value": 20, // threshold: 20%
|
||||||
|
"min": 1, // 1 minute (immediate trigger for testing)
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Verify alert is not triggered initially
|
||||||
|
assert.False(t, batteryAlert.GetBool("triggered"), "Alert should not be triggered initially")
|
||||||
|
|
||||||
|
// Create system stats with battery at 50% (above threshold - should NOT trigger)
|
||||||
|
statsHigh := system.Stats{
|
||||||
|
Cpu: 10,
|
||||||
|
MemPct: 30,
|
||||||
|
DiskPct: 40,
|
||||||
|
Battery: [2]uint8{50, 1}, // 50% battery, discharging
|
||||||
|
}
|
||||||
|
statsHighJSON, _ := json.Marshal(statsHigh)
|
||||||
|
_, err = beszelTests.CreateRecord(hub, "system_stats", map[string]any{
|
||||||
|
"system": systemRecord.Id,
|
||||||
|
"type": "1m",
|
||||||
|
"stats": string(statsHighJSON),
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Create CombinedData for the alert handler
|
||||||
|
combinedDataHigh := &system.CombinedData{
|
||||||
|
Stats: statsHigh,
|
||||||
|
Info: system.Info{
|
||||||
|
AgentVersion: "0.12.0",
|
||||||
|
Cpu: 10,
|
||||||
|
MemPct: 30,
|
||||||
|
DiskPct: 40,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Simulate system update time
|
||||||
|
systemRecord.Set("updated", time.Now().UTC())
|
||||||
|
err = hub.SaveNoValidate(systemRecord)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Handle system alerts with high battery
|
||||||
|
am := hub.GetAlertManager()
|
||||||
|
err = am.HandleSystemAlerts(systemRecord, combinedDataHigh)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Verify alert is still NOT triggered (battery 50% is above threshold 20%)
|
||||||
|
batteryAlert, err = hub.FindFirstRecordByFilter("alerts", "id={:id}", dbx.Params{"id": batteryAlert.Id})
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.False(t, batteryAlert.GetBool("triggered"), "Alert should NOT be triggered when battery (50%%) is above threshold (20%%)")
|
||||||
|
|
||||||
|
// Now create stats with battery at 15% (below threshold - should trigger)
|
||||||
|
statsLow := system.Stats{
|
||||||
|
Cpu: 10,
|
||||||
|
MemPct: 30,
|
||||||
|
DiskPct: 40,
|
||||||
|
Battery: [2]uint8{15, 1}, // 15% battery, discharging
|
||||||
|
}
|
||||||
|
statsLowJSON, _ := json.Marshal(statsLow)
|
||||||
|
_, err = beszelTests.CreateRecord(hub, "system_stats", map[string]any{
|
||||||
|
"system": systemRecord.Id,
|
||||||
|
"type": "1m",
|
||||||
|
"stats": string(statsLowJSON),
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
combinedDataLow := &system.CombinedData{
|
||||||
|
Stats: statsLow,
|
||||||
|
Info: system.Info{
|
||||||
|
AgentVersion: "0.12.0",
|
||||||
|
Cpu: 10,
|
||||||
|
MemPct: 30,
|
||||||
|
DiskPct: 40,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update system timestamp
|
||||||
|
systemRecord.Set("updated", time.Now().UTC())
|
||||||
|
err = hub.SaveNoValidate(systemRecord)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Handle system alerts with low battery
|
||||||
|
err = am.HandleSystemAlerts(systemRecord, combinedDataLow)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Wait for the alert to be processed
|
||||||
|
time.Sleep(20 * time.Millisecond)
|
||||||
|
|
||||||
|
// Verify alert IS triggered (battery 15% is below threshold 20%)
|
||||||
|
batteryAlert, err = hub.FindFirstRecordByFilter("alerts", "id={:id}", dbx.Params{"id": batteryAlert.Id})
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.True(t, batteryAlert.GetBool("triggered"), "Alert SHOULD be triggered when battery (15%%) drops below threshold (20%%)")
|
||||||
|
|
||||||
|
// Now test resolution: battery goes back above threshold
|
||||||
|
statsRecovered := system.Stats{
|
||||||
|
Cpu: 10,
|
||||||
|
MemPct: 30,
|
||||||
|
DiskPct: 40,
|
||||||
|
Battery: [2]uint8{25, 1}, // 25% battery, discharging
|
||||||
|
}
|
||||||
|
statsRecoveredJSON, _ := json.Marshal(statsRecovered)
|
||||||
|
_, err = beszelTests.CreateRecord(hub, "system_stats", map[string]any{
|
||||||
|
"system": systemRecord.Id,
|
||||||
|
"type": "1m",
|
||||||
|
"stats": string(statsRecoveredJSON),
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
combinedDataRecovered := &system.CombinedData{
|
||||||
|
Stats: statsRecovered,
|
||||||
|
Info: system.Info{
|
||||||
|
AgentVersion: "0.12.0",
|
||||||
|
Cpu: 10,
|
||||||
|
MemPct: 30,
|
||||||
|
DiskPct: 40,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update system timestamp
|
||||||
|
systemRecord.Set("updated", time.Now().UTC())
|
||||||
|
err = hub.SaveNoValidate(systemRecord)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Handle system alerts with recovered battery
|
||||||
|
err = am.HandleSystemAlerts(systemRecord, combinedDataRecovered)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Wait for the alert to be processed
|
||||||
|
time.Sleep(20 * time.Millisecond)
|
||||||
|
|
||||||
|
// Verify alert is now resolved (battery 25% is above threshold 20%)
|
||||||
|
batteryAlert, err = hub.FindFirstRecordByFilter("alerts", "id={:id}", dbx.Params{"id": batteryAlert.Id})
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.False(t, batteryAlert.GetBool("triggered"), "Alert should be resolved when battery (25%%) goes above threshold (20%%)")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestBatteryAlertNoBattery verifies that systems without battery data don't trigger alerts
|
||||||
|
func TestBatteryAlertNoBattery(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
// Create a system
|
||||||
|
systems, err := beszelTests.CreateSystems(hub, 1, user.Id, "up")
|
||||||
|
require.NoError(t, err)
|
||||||
|
systemRecord := systems[0]
|
||||||
|
|
||||||
|
// Create a battery alert
|
||||||
|
batteryAlert, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": "Battery",
|
||||||
|
"system": systemRecord.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"value": 20,
|
||||||
|
"min": 1,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Create stats with NO battery data (Battery[0] = 0)
|
||||||
|
statsNoBattery := system.Stats{
|
||||||
|
Cpu: 10,
|
||||||
|
MemPct: 30,
|
||||||
|
DiskPct: 40,
|
||||||
|
Battery: [2]uint8{0, 0}, // No battery
|
||||||
|
}
|
||||||
|
|
||||||
|
combinedData := &system.CombinedData{
|
||||||
|
Stats: statsNoBattery,
|
||||||
|
Info: system.Info{
|
||||||
|
AgentVersion: "0.12.0",
|
||||||
|
Cpu: 10,
|
||||||
|
MemPct: 30,
|
||||||
|
DiskPct: 40,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Simulate system update time
|
||||||
|
systemRecord.Set("updated", time.Now().UTC())
|
||||||
|
err = hub.SaveNoValidate(systemRecord)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Handle system alerts
|
||||||
|
am := hub.GetAlertManager()
|
||||||
|
err = am.HandleSystemAlerts(systemRecord, combinedData)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Wait a moment for processing
|
||||||
|
time.Sleep(20 * time.Millisecond)
|
||||||
|
|
||||||
|
// Verify alert is NOT triggered (no battery data should skip the alert)
|
||||||
|
batteryAlert, err = hub.FindFirstRecordByFilter("alerts", "id={:id}", dbx.Params{"id": batteryAlert.Id})
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.False(t, batteryAlert.GetBool("triggered"), "Alert should NOT be triggered when system has no battery")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestBatteryAlertAveragedSamples tests battery alerts with min > 1 (averaging multiple samples)
|
||||||
|
// This ensures the inverted threshold logic works correctly across averaged time windows
|
||||||
|
func TestBatteryAlertAveragedSamples(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
// Create a system
|
||||||
|
systems, err := beszelTests.CreateSystems(hub, 1, user.Id, "up")
|
||||||
|
require.NoError(t, err)
|
||||||
|
systemRecord := systems[0]
|
||||||
|
|
||||||
|
// Create a battery alert with threshold of 25% and min of 2 minutes (requires averaging)
|
||||||
|
batteryAlert, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": "Battery",
|
||||||
|
"system": systemRecord.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"value": 25, // threshold: 25%
|
||||||
|
"min": 2, // 2 minutes - requires averaging
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Verify alert is not triggered initially
|
||||||
|
assert.False(t, batteryAlert.GetBool("triggered"), "Alert should not be triggered initially")
|
||||||
|
|
||||||
|
am := hub.GetAlertManager()
|
||||||
|
now := time.Now().UTC()
|
||||||
|
|
||||||
|
// Create system_stats records with low battery (below threshold)
|
||||||
|
// The alert has min=2 minutes, so alert.time = now - 2 minutes
|
||||||
|
// For the alert to be valid, alert.time must be AFTER the oldest record's created time
|
||||||
|
// So we need records older than (now - 2 min), plus records within the window
|
||||||
|
// Records at: now-3min (oldest, before window), now-90s, now-60s, now-30s
|
||||||
|
recordTimes := []time.Duration{
|
||||||
|
-180 * time.Second, // 3 min ago - this makes the oldest record before alert.time
|
||||||
|
-90 * time.Second,
|
||||||
|
-60 * time.Second,
|
||||||
|
-30 * time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, offset := range recordTimes {
|
||||||
|
statsLow := system.Stats{
|
||||||
|
Cpu: 10,
|
||||||
|
MemPct: 30,
|
||||||
|
DiskPct: 40,
|
||||||
|
Battery: [2]uint8{15, 1}, // 15% battery (below 25% threshold)
|
||||||
|
}
|
||||||
|
statsLowJSON, _ := json.Marshal(statsLow)
|
||||||
|
|
||||||
|
recordTime := now.Add(offset)
|
||||||
|
record, err := beszelTests.CreateRecord(hub, "system_stats", map[string]any{
|
||||||
|
"system": systemRecord.Id,
|
||||||
|
"type": "1m",
|
||||||
|
"stats": string(statsLowJSON),
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
// Update created time to simulate historical records - use SetRaw with formatted string
|
||||||
|
record.SetRaw("created", recordTime.Format(types.DefaultDateLayout))
|
||||||
|
err = hub.SaveNoValidate(record)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create combined data with low battery
|
||||||
|
combinedDataLow := &system.CombinedData{
|
||||||
|
Stats: system.Stats{
|
||||||
|
Cpu: 10,
|
||||||
|
MemPct: 30,
|
||||||
|
DiskPct: 40,
|
||||||
|
Battery: [2]uint8{15, 1},
|
||||||
|
},
|
||||||
|
Info: system.Info{
|
||||||
|
AgentVersion: "0.12.0",
|
||||||
|
Cpu: 10,
|
||||||
|
MemPct: 30,
|
||||||
|
DiskPct: 40,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update system timestamp
|
||||||
|
systemRecord.Set("updated", now)
|
||||||
|
err = hub.SaveNoValidate(systemRecord)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Handle system alerts - should trigger because average battery is below threshold
|
||||||
|
err = am.HandleSystemAlerts(systemRecord, combinedDataLow)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Wait for alert processing
|
||||||
|
time.Sleep(20 * time.Millisecond)
|
||||||
|
|
||||||
|
// Verify alert IS triggered (average battery 15% is below threshold 25%)
|
||||||
|
batteryAlert, err = hub.FindFirstRecordByFilter("alerts", "id={:id}", dbx.Params{"id": batteryAlert.Id})
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.True(t, batteryAlert.GetBool("triggered"),
|
||||||
|
"Alert SHOULD be triggered when average battery (15%%) is below threshold (25%%) over min period")
|
||||||
|
|
||||||
|
// Now add records with high battery to test resolution
|
||||||
|
// Use a new time window 2 minutes later
|
||||||
|
newNow := now.Add(2 * time.Minute)
|
||||||
|
// Records need to span before the alert time window (newNow - 2 min)
|
||||||
|
recordTimesHigh := []time.Duration{
|
||||||
|
-180 * time.Second, // 3 min before newNow - makes oldest record before alert.time
|
||||||
|
-90 * time.Second,
|
||||||
|
-60 * time.Second,
|
||||||
|
-30 * time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, offset := range recordTimesHigh {
|
||||||
|
statsHigh := system.Stats{
|
||||||
|
Cpu: 10,
|
||||||
|
MemPct: 30,
|
||||||
|
DiskPct: 40,
|
||||||
|
Battery: [2]uint8{50, 1}, // 50% battery (above 25% threshold)
|
||||||
|
}
|
||||||
|
statsHighJSON, _ := json.Marshal(statsHigh)
|
||||||
|
|
||||||
|
recordTime := newNow.Add(offset)
|
||||||
|
record, err := beszelTests.CreateRecord(hub, "system_stats", map[string]any{
|
||||||
|
"system": systemRecord.Id,
|
||||||
|
"type": "1m",
|
||||||
|
"stats": string(statsHighJSON),
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
record.SetRaw("created", recordTime.Format(types.DefaultDateLayout))
|
||||||
|
err = hub.SaveNoValidate(record)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create combined data with high battery
|
||||||
|
combinedDataHigh := &system.CombinedData{
|
||||||
|
Stats: system.Stats{
|
||||||
|
Cpu: 10,
|
||||||
|
MemPct: 30,
|
||||||
|
DiskPct: 40,
|
||||||
|
Battery: [2]uint8{50, 1},
|
||||||
|
},
|
||||||
|
Info: system.Info{
|
||||||
|
AgentVersion: "0.12.0",
|
||||||
|
Cpu: 10,
|
||||||
|
MemPct: 30,
|
||||||
|
DiskPct: 40,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update system timestamp to the new time window
|
||||||
|
systemRecord.Set("updated", newNow)
|
||||||
|
err = hub.SaveNoValidate(systemRecord)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Handle system alerts - should resolve because average battery is now above threshold
|
||||||
|
err = am.HandleSystemAlerts(systemRecord, combinedDataHigh)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
// Wait for alert processing
|
||||||
|
time.Sleep(20 * time.Millisecond)
|
||||||
|
|
||||||
|
// Verify alert is resolved (average battery 50% is above threshold 25%)
|
||||||
|
batteryAlert, err = hub.FindFirstRecordByFilter("alerts", "id={:id}", dbx.Params{"id": batteryAlert.Id})
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.False(t, batteryAlert.GetBool("triggered"),
|
||||||
|
"Alert should be resolved when average battery (50%%) is above threshold (25%%) over min period")
|
||||||
|
}
|
||||||
177
internal/alerts/alerts_cache.go
Normal file
177
internal/alerts/alerts_cache.go
Normal file
@@ -0,0 +1,177 @@
|
|||||||
|
package alerts
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/pocketbase/dbx"
|
||||||
|
"github.com/pocketbase/pocketbase/core"
|
||||||
|
"github.com/pocketbase/pocketbase/tools/store"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CachedAlertData represents the relevant fields of an alert record for status checking and updates.
|
||||||
|
type CachedAlertData struct {
|
||||||
|
Id string
|
||||||
|
SystemID string
|
||||||
|
UserID string
|
||||||
|
Name string
|
||||||
|
Value float64
|
||||||
|
Triggered bool
|
||||||
|
Min uint8
|
||||||
|
// Created types.DateTime
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *CachedAlertData) PopulateFromRecord(record *core.Record) {
|
||||||
|
a.Id = record.Id
|
||||||
|
a.SystemID = record.GetString("system")
|
||||||
|
a.UserID = record.GetString("user")
|
||||||
|
a.Name = record.GetString("name")
|
||||||
|
a.Value = record.GetFloat("value")
|
||||||
|
a.Triggered = record.GetBool("triggered")
|
||||||
|
a.Min = uint8(record.GetInt("min"))
|
||||||
|
// a.Created = record.GetDateTime("created")
|
||||||
|
}
|
||||||
|
|
||||||
|
// AlertsCache provides an in-memory cache for system alerts.
|
||||||
|
type AlertsCache struct {
|
||||||
|
app core.App
|
||||||
|
store *store.Store[string, *store.Store[string, CachedAlertData]]
|
||||||
|
populated bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAlertsCache creates a new instance of SystemAlertsCache.
|
||||||
|
func NewAlertsCache(app core.App) *AlertsCache {
|
||||||
|
c := AlertsCache{
|
||||||
|
app: app,
|
||||||
|
store: store.New(map[string]*store.Store[string, CachedAlertData]{}),
|
||||||
|
}
|
||||||
|
return c.bindEvents()
|
||||||
|
}
|
||||||
|
|
||||||
|
// bindEvents sets up event listeners to keep the cache in sync with database changes.
|
||||||
|
func (c *AlertsCache) bindEvents() *AlertsCache {
|
||||||
|
c.app.OnRecordAfterUpdateSuccess("alerts").BindFunc(func(e *core.RecordEvent) error {
|
||||||
|
// c.Delete(e.Record.Original()) // this would be needed if the system field on an existing alert was changed, however we don't currently allow that in the UI so we'll leave it commented out
|
||||||
|
c.Update(e.Record)
|
||||||
|
return e.Next()
|
||||||
|
})
|
||||||
|
c.app.OnRecordAfterDeleteSuccess("alerts").BindFunc(func(e *core.RecordEvent) error {
|
||||||
|
c.Delete(e.Record)
|
||||||
|
return e.Next()
|
||||||
|
})
|
||||||
|
c.app.OnRecordAfterCreateSuccess("alerts").BindFunc(func(e *core.RecordEvent) error {
|
||||||
|
c.Update(e.Record)
|
||||||
|
return e.Next()
|
||||||
|
})
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// PopulateFromDB clears current entries and loads all alerts from the database into the cache.
|
||||||
|
func (c *AlertsCache) PopulateFromDB(force bool) error {
|
||||||
|
if !force && c.populated {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
records, err := c.app.FindAllRecords("alerts")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
c.store.RemoveAll()
|
||||||
|
for _, record := range records {
|
||||||
|
c.Update(record)
|
||||||
|
}
|
||||||
|
c.populated = true
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update adds or updates an alert record in the cache.
|
||||||
|
func (c *AlertsCache) Update(record *core.Record) {
|
||||||
|
systemID := record.GetString("system")
|
||||||
|
if systemID == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
systemStore, ok := c.store.GetOk(systemID)
|
||||||
|
if !ok {
|
||||||
|
systemStore = store.New(map[string]CachedAlertData{})
|
||||||
|
c.store.Set(systemID, systemStore)
|
||||||
|
}
|
||||||
|
var ca CachedAlertData
|
||||||
|
ca.PopulateFromRecord(record)
|
||||||
|
systemStore.Set(record.Id, ca)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete removes an alert record from the cache.
|
||||||
|
func (c *AlertsCache) Delete(record *core.Record) {
|
||||||
|
systemID := record.GetString("system")
|
||||||
|
if systemID == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if systemStore, ok := c.store.GetOk(systemID); ok {
|
||||||
|
systemStore.Remove(record.Id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSystemAlerts returns all alerts for the specified system, lazy-loading if necessary.
|
||||||
|
func (c *AlertsCache) GetSystemAlerts(systemID string) []CachedAlertData {
|
||||||
|
systemStore, ok := c.store.GetOk(systemID)
|
||||||
|
if !ok {
|
||||||
|
// Populate cache for this system
|
||||||
|
records, err := c.app.FindAllRecords("alerts", dbx.NewExp("system={:system}", dbx.Params{"system": systemID}))
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
systemStore = store.New(map[string]CachedAlertData{})
|
||||||
|
for _, record := range records {
|
||||||
|
var ca CachedAlertData
|
||||||
|
ca.PopulateFromRecord(record)
|
||||||
|
systemStore.Set(record.Id, ca)
|
||||||
|
}
|
||||||
|
c.store.Set(systemID, systemStore)
|
||||||
|
}
|
||||||
|
all := systemStore.GetAll()
|
||||||
|
alerts := make([]CachedAlertData, 0, len(all))
|
||||||
|
for _, alert := range all {
|
||||||
|
alerts = append(alerts, alert)
|
||||||
|
}
|
||||||
|
return alerts
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAlert returns a specific alert by its ID from the cache.
|
||||||
|
func (c *AlertsCache) GetAlert(systemID, alertID string) (CachedAlertData, bool) {
|
||||||
|
if systemStore, ok := c.store.GetOk(systemID); ok {
|
||||||
|
return systemStore.GetOk(alertID)
|
||||||
|
}
|
||||||
|
return CachedAlertData{}, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAlertsByName returns all alerts of a specific type for the specified system.
|
||||||
|
func (c *AlertsCache) GetAlertsByName(systemID, alertName string) []CachedAlertData {
|
||||||
|
allAlerts := c.GetSystemAlerts(systemID)
|
||||||
|
var alerts []CachedAlertData
|
||||||
|
for _, record := range allAlerts {
|
||||||
|
if record.Name == alertName {
|
||||||
|
alerts = append(alerts, record)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return alerts
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAlertsExcludingNames returns all alerts for the specified system excluding the given types.
|
||||||
|
func (c *AlertsCache) GetAlertsExcludingNames(systemID string, excludedNames ...string) []CachedAlertData {
|
||||||
|
excludeMap := make(map[string]struct{})
|
||||||
|
for _, name := range excludedNames {
|
||||||
|
excludeMap[name] = struct{}{}
|
||||||
|
}
|
||||||
|
allAlerts := c.GetSystemAlerts(systemID)
|
||||||
|
var alerts []CachedAlertData
|
||||||
|
for _, record := range allAlerts {
|
||||||
|
if _, excluded := excludeMap[record.Name]; !excluded {
|
||||||
|
alerts = append(alerts, record)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return alerts
|
||||||
|
}
|
||||||
|
|
||||||
|
// Refresh returns the latest cached copy for an alert snapshot if it still exists.
|
||||||
|
func (c *AlertsCache) Refresh(alert CachedAlertData) (CachedAlertData, bool) {
|
||||||
|
if alert.Id == "" {
|
||||||
|
return CachedAlertData{}, false
|
||||||
|
}
|
||||||
|
return c.GetAlert(alert.SystemID, alert.Id)
|
||||||
|
}
|
||||||
215
internal/alerts/alerts_cache_test.go
Normal file
215
internal/alerts/alerts_cache_test.go
Normal file
@@ -0,0 +1,215 @@
|
|||||||
|
//go:build testing
|
||||||
|
|
||||||
|
package alerts_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/alerts"
|
||||||
|
beszelTests "github.com/henrygd/beszel/internal/tests"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSystemAlertsCachePopulateAndFilter(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
systems, err := beszelTests.CreateSystems(hub, 2, user.Id, "up")
|
||||||
|
require.NoError(t, err)
|
||||||
|
system1 := systems[0]
|
||||||
|
system2 := systems[1]
|
||||||
|
|
||||||
|
statusAlert, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": "Status",
|
||||||
|
"system": system1.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"min": 1,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
cpuAlert, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"system": system1.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"value": 80,
|
||||||
|
"min": 1,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
memoryAlert, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": "Memory",
|
||||||
|
"system": system2.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"value": 90,
|
||||||
|
"min": 1,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
cache := alerts.NewAlertsCache(hub)
|
||||||
|
cache.PopulateFromDB(false)
|
||||||
|
|
||||||
|
statusAlerts := cache.GetAlertsByName(system1.Id, "Status")
|
||||||
|
require.Len(t, statusAlerts, 1)
|
||||||
|
assert.Equal(t, statusAlert.Id, statusAlerts[0].Id)
|
||||||
|
|
||||||
|
nonStatusAlerts := cache.GetAlertsExcludingNames(system1.Id, "Status")
|
||||||
|
require.Len(t, nonStatusAlerts, 1)
|
||||||
|
assert.Equal(t, cpuAlert.Id, nonStatusAlerts[0].Id)
|
||||||
|
|
||||||
|
system2Alerts := cache.GetSystemAlerts(system2.Id)
|
||||||
|
require.Len(t, system2Alerts, 1)
|
||||||
|
assert.Equal(t, memoryAlert.Id, system2Alerts[0].Id)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSystemAlertsCacheLazyLoadUpdateAndDelete(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
systems, err := beszelTests.CreateSystems(hub, 1, user.Id, "up")
|
||||||
|
require.NoError(t, err)
|
||||||
|
systemRecord := systems[0]
|
||||||
|
|
||||||
|
statusAlert, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": "Status",
|
||||||
|
"system": systemRecord.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"min": 1,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
cache := alerts.NewAlertsCache(hub)
|
||||||
|
require.Len(t, cache.GetSystemAlerts(systemRecord.Id), 1, "first lookup should lazy-load alerts for the system")
|
||||||
|
|
||||||
|
cpuAlert, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"system": systemRecord.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"value": 80,
|
||||||
|
"min": 1,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
cache.Update(cpuAlert)
|
||||||
|
|
||||||
|
nonStatusAlerts := cache.GetAlertsExcludingNames(systemRecord.Id, "Status")
|
||||||
|
require.Len(t, nonStatusAlerts, 1)
|
||||||
|
assert.Equal(t, cpuAlert.Id, nonStatusAlerts[0].Id)
|
||||||
|
|
||||||
|
cache.Delete(statusAlert)
|
||||||
|
assert.Empty(t, cache.GetAlertsByName(systemRecord.Id, "Status"), "deleted alerts should be removed from the in-memory cache")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSystemAlertsCacheRefreshReturnsLatestCopy(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
systems, err := beszelTests.CreateSystems(hub, 1, user.Id, "up")
|
||||||
|
require.NoError(t, err)
|
||||||
|
system := systems[0]
|
||||||
|
|
||||||
|
alert, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": "Status",
|
||||||
|
"system": system.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"min": 1,
|
||||||
|
"triggered": false,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
cache := alerts.NewAlertsCache(hub)
|
||||||
|
snapshot := cache.GetSystemAlerts(system.Id)[0]
|
||||||
|
assert.False(t, snapshot.Triggered)
|
||||||
|
|
||||||
|
alert.Set("triggered", true)
|
||||||
|
require.NoError(t, hub.Save(alert))
|
||||||
|
|
||||||
|
refreshed, ok := cache.Refresh(snapshot)
|
||||||
|
require.True(t, ok)
|
||||||
|
assert.Equal(t, snapshot.Id, refreshed.Id)
|
||||||
|
assert.True(t, refreshed.Triggered, "refresh should return the updated cached value rather than the stale snapshot")
|
||||||
|
|
||||||
|
require.NoError(t, hub.Delete(alert))
|
||||||
|
_, ok = cache.Refresh(snapshot)
|
||||||
|
assert.False(t, ok, "refresh should report false when the cached alert no longer exists")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAlertManagerCacheLifecycle(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
systems, err := beszelTests.CreateSystems(hub, 1, user.Id, "up")
|
||||||
|
require.NoError(t, err)
|
||||||
|
system := systems[0]
|
||||||
|
|
||||||
|
// Create an alert
|
||||||
|
alert, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": "CPU",
|
||||||
|
"system": system.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"value": 80,
|
||||||
|
"min": 1,
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
am := hub.AlertManager
|
||||||
|
cache := am.GetSystemAlertsCache()
|
||||||
|
|
||||||
|
// Verify it's in cache (it should be since CreateRecord triggers the event)
|
||||||
|
assert.Len(t, cache.GetSystemAlerts(system.Id), 1)
|
||||||
|
assert.Equal(t, alert.Id, cache.GetSystemAlerts(system.Id)[0].Id)
|
||||||
|
assert.EqualValues(t, 80, cache.GetSystemAlerts(system.Id)[0].Value)
|
||||||
|
|
||||||
|
// Update the alert through PocketBase to trigger events
|
||||||
|
alert.Set("value", 85)
|
||||||
|
require.NoError(t, hub.Save(alert))
|
||||||
|
|
||||||
|
// Check if updated value is reflected (or at least that it's still there)
|
||||||
|
cachedAlerts := cache.GetSystemAlerts(system.Id)
|
||||||
|
assert.Len(t, cachedAlerts, 1)
|
||||||
|
assert.EqualValues(t, 85, cachedAlerts[0].Value)
|
||||||
|
|
||||||
|
// Delete the alert through PocketBase to trigger events
|
||||||
|
require.NoError(t, hub.Delete(alert))
|
||||||
|
|
||||||
|
// Verify it's removed from cache
|
||||||
|
assert.Empty(t, cache.GetSystemAlerts(system.Id), "alert should be removed from cache after PocketBase delete")
|
||||||
|
}
|
||||||
|
|
||||||
|
// func TestAlertManagerCacheMovesAlertToNewSystemOnUpdate(t *testing.T) {
|
||||||
|
// hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
// defer hub.Cleanup()
|
||||||
|
|
||||||
|
// systems, err := beszelTests.CreateSystems(hub, 2, user.Id, "up")
|
||||||
|
// require.NoError(t, err)
|
||||||
|
// system1 := systems[0]
|
||||||
|
// system2 := systems[1]
|
||||||
|
|
||||||
|
// alert, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
// "name": "CPU",
|
||||||
|
// "system": system1.Id,
|
||||||
|
// "user": user.Id,
|
||||||
|
// "value": 80,
|
||||||
|
// "min": 1,
|
||||||
|
// })
|
||||||
|
// require.NoError(t, err)
|
||||||
|
|
||||||
|
// am := hub.AlertManager
|
||||||
|
// cache := am.GetSystemAlertsCache()
|
||||||
|
|
||||||
|
// // Initially in system1 cache
|
||||||
|
// assert.Len(t, cache.Get(system1.Id), 1)
|
||||||
|
// assert.Empty(t, cache.Get(system2.Id))
|
||||||
|
|
||||||
|
// // Move alert to system2
|
||||||
|
// alert.Set("system", system2.Id)
|
||||||
|
// require.NoError(t, hub.Save(alert))
|
||||||
|
|
||||||
|
// // DEBUG: print if it is found
|
||||||
|
// // fmt.Printf("system1 alerts after update: %v\n", cache.Get(system1.Id))
|
||||||
|
|
||||||
|
// // Should be removed from system1 and present in system2
|
||||||
|
// assert.Empty(t, cache.GetType(system1.Id, "CPU"), "updated alerts should be evicted from the previous system cache")
|
||||||
|
// require.Len(t, cache.Get(system2.Id), 1)
|
||||||
|
// assert.Equal(t, alert.Id, cache.Get(system2.Id)[0].Id)
|
||||||
|
// }
|
||||||
155
internal/alerts/alerts_disk_test.go
Normal file
155
internal/alerts/alerts_disk_test.go
Normal file
@@ -0,0 +1,155 @@
|
|||||||
|
//go:build testing
|
||||||
|
|
||||||
|
package alerts_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/henrygd/beszel/internal/entities/system"
|
||||||
|
beszelTests "github.com/henrygd/beszel/internal/tests"
|
||||||
|
|
||||||
|
"github.com/pocketbase/dbx"
|
||||||
|
"github.com/pocketbase/pocketbase/tools/types"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestDiskAlertExtraFsMultiMinute tests that multi-minute disk alerts correctly use
|
||||||
|
// historical per-minute values for extra (non-root) filesystems, not the current live snapshot.
|
||||||
|
func TestDiskAlertExtraFsMultiMinute(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
systems, err := beszelTests.CreateSystems(hub, 1, user.Id, "up")
|
||||||
|
require.NoError(t, err)
|
||||||
|
systemRecord := systems[0]
|
||||||
|
|
||||||
|
// Disk alert: threshold 80%, min=2 (requires historical averaging)
|
||||||
|
diskAlert, err := beszelTests.CreateRecord(hub, "alerts", map[string]any{
|
||||||
|
"name": "Disk",
|
||||||
|
"system": systemRecord.Id,
|
||||||
|
"user": user.Id,
|
||||||
|
"value": 80, // threshold: 80%
|
||||||
|
"min": 2, // 2 minutes - requires historical averaging
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.False(t, diskAlert.GetBool("triggered"), "Alert should not be triggered initially")
|
||||||
|
|
||||||
|
am := hub.GetAlertManager()
|
||||||
|
now := time.Now().UTC()
|
||||||
|
|
||||||
|
extraFsHigh := map[string]*system.FsStats{
|
||||||
|
"/mnt/data": {DiskTotal: 1000, DiskUsed: 920}, // 92% - above threshold
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert 4 historical records spread over 3 minutes (same pattern as battery tests).
|
||||||
|
// The oldest record must predate (now - 2min) so the alert time window is valid.
|
||||||
|
recordTimes := []time.Duration{
|
||||||
|
-180 * time.Second, // 3 min ago - anchors oldest record before alert.time
|
||||||
|
-90 * time.Second,
|
||||||
|
-60 * time.Second,
|
||||||
|
-30 * time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, offset := range recordTimes {
|
||||||
|
stats := system.Stats{
|
||||||
|
DiskPct: 30, // root disk at 30% - below threshold
|
||||||
|
ExtraFs: extraFsHigh,
|
||||||
|
}
|
||||||
|
statsJSON, _ := json.Marshal(stats)
|
||||||
|
|
||||||
|
recordTime := now.Add(offset)
|
||||||
|
record, err := beszelTests.CreateRecord(hub, "system_stats", map[string]any{
|
||||||
|
"system": systemRecord.Id,
|
||||||
|
"type": "1m",
|
||||||
|
"stats": string(statsJSON),
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
record.SetRaw("created", recordTime.Format(types.DefaultDateLayout))
|
||||||
|
err = hub.SaveNoValidate(record)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
combinedDataHigh := &system.CombinedData{
|
||||||
|
Stats: system.Stats{
|
||||||
|
DiskPct: 30,
|
||||||
|
ExtraFs: extraFsHigh,
|
||||||
|
},
|
||||||
|
Info: system.Info{
|
||||||
|
DiskPct: 30,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
systemRecord.Set("updated", now)
|
||||||
|
err = hub.SaveNoValidate(systemRecord)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = am.HandleSystemAlerts(systemRecord, combinedDataHigh)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
time.Sleep(20 * time.Millisecond)
|
||||||
|
|
||||||
|
diskAlert, err = hub.FindFirstRecordByFilter("alerts", "id={:id}", dbx.Params{"id": diskAlert.Id})
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.True(t, diskAlert.GetBool("triggered"),
|
||||||
|
"Alert SHOULD be triggered when extra disk average (92%%) exceeds threshold (80%%)")
|
||||||
|
|
||||||
|
// --- Resolution: extra disk drops to 50%, alert should resolve ---
|
||||||
|
|
||||||
|
extraFsLow := map[string]*system.FsStats{
|
||||||
|
"/mnt/data": {DiskTotal: 1000, DiskUsed: 500}, // 50% - below threshold
|
||||||
|
}
|
||||||
|
|
||||||
|
newNow := now.Add(2 * time.Minute)
|
||||||
|
recordTimesLow := []time.Duration{
|
||||||
|
-180 * time.Second,
|
||||||
|
-90 * time.Second,
|
||||||
|
-60 * time.Second,
|
||||||
|
-30 * time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, offset := range recordTimesLow {
|
||||||
|
stats := system.Stats{
|
||||||
|
DiskPct: 30,
|
||||||
|
ExtraFs: extraFsLow,
|
||||||
|
}
|
||||||
|
statsJSON, _ := json.Marshal(stats)
|
||||||
|
|
||||||
|
recordTime := newNow.Add(offset)
|
||||||
|
record, err := beszelTests.CreateRecord(hub, "system_stats", map[string]any{
|
||||||
|
"system": systemRecord.Id,
|
||||||
|
"type": "1m",
|
||||||
|
"stats": string(statsJSON),
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
record.SetRaw("created", recordTime.Format(types.DefaultDateLayout))
|
||||||
|
err = hub.SaveNoValidate(record)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
combinedDataLow := &system.CombinedData{
|
||||||
|
Stats: system.Stats{
|
||||||
|
DiskPct: 30,
|
||||||
|
ExtraFs: extraFsLow,
|
||||||
|
},
|
||||||
|
Info: system.Info{
|
||||||
|
DiskPct: 30,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
systemRecord.Set("updated", newNow)
|
||||||
|
err = hub.SaveNoValidate(systemRecord)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
err = am.HandleSystemAlerts(systemRecord, combinedDataLow)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
time.Sleep(20 * time.Millisecond)
|
||||||
|
|
||||||
|
diskAlert, err = hub.FindFirstRecordByFilter("alerts", "id={:id}", dbx.Params{"id": diskAlert.Id})
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.False(t, diskAlert.GetBool("triggered"),
|
||||||
|
"Alert should be resolved when extra disk average (50%%) drops below threshold (80%%)")
|
||||||
|
}
|
||||||
@@ -1,5 +1,4 @@
|
|||||||
//go:build testing
|
//go:build testing
|
||||||
// +build testing
|
|
||||||
|
|
||||||
package alerts_test
|
package alerts_test
|
||||||
|
|
||||||
@@ -50,7 +49,7 @@ func TestAlertSilencedOneTime(t *testing.T) {
|
|||||||
|
|
||||||
// Get alert manager
|
// Get alert manager
|
||||||
am := alerts.NewAlertManager(hub)
|
am := alerts.NewAlertManager(hub)
|
||||||
defer am.StopWorker()
|
defer am.Stop()
|
||||||
|
|
||||||
// Test that alert is silenced
|
// Test that alert is silenced
|
||||||
silenced := am.IsNotificationSilenced(user.Id, system.Id)
|
silenced := am.IsNotificationSilenced(user.Id, system.Id)
|
||||||
@@ -107,7 +106,7 @@ func TestAlertSilencedDaily(t *testing.T) {
|
|||||||
|
|
||||||
// Get alert manager
|
// Get alert manager
|
||||||
am := alerts.NewAlertManager(hub)
|
am := alerts.NewAlertManager(hub)
|
||||||
defer am.StopWorker()
|
defer am.Stop()
|
||||||
|
|
||||||
// Get current hour and create a window that includes current time
|
// Get current hour and create a window that includes current time
|
||||||
now := time.Now().UTC()
|
now := time.Now().UTC()
|
||||||
@@ -171,7 +170,7 @@ func TestAlertSilencedDailyMidnightCrossing(t *testing.T) {
|
|||||||
|
|
||||||
// Get alert manager
|
// Get alert manager
|
||||||
am := alerts.NewAlertManager(hub)
|
am := alerts.NewAlertManager(hub)
|
||||||
defer am.StopWorker()
|
defer am.Stop()
|
||||||
|
|
||||||
// Create a window that crosses midnight: 22:00 - 02:00
|
// Create a window that crosses midnight: 22:00 - 02:00
|
||||||
startTime := time.Date(2000, 1, 1, 22, 0, 0, 0, time.UTC)
|
startTime := time.Date(2000, 1, 1, 22, 0, 0, 0, time.UTC)
|
||||||
@@ -212,7 +211,7 @@ func TestAlertSilencedGlobal(t *testing.T) {
|
|||||||
|
|
||||||
// Get alert manager
|
// Get alert manager
|
||||||
am := alerts.NewAlertManager(hub)
|
am := alerts.NewAlertManager(hub)
|
||||||
defer am.StopWorker()
|
defer am.Stop()
|
||||||
|
|
||||||
// Create a global quiet hours window (no system specified)
|
// Create a global quiet hours window (no system specified)
|
||||||
now := time.Now().UTC()
|
now := time.Now().UTC()
|
||||||
@@ -251,7 +250,7 @@ func TestAlertSilencedSystemSpecific(t *testing.T) {
|
|||||||
|
|
||||||
// Get alert manager
|
// Get alert manager
|
||||||
am := alerts.NewAlertManager(hub)
|
am := alerts.NewAlertManager(hub)
|
||||||
defer am.StopWorker()
|
defer am.Stop()
|
||||||
|
|
||||||
// Create a system-specific quiet hours window for system1 only
|
// Create a system-specific quiet hours window for system1 only
|
||||||
now := time.Now().UTC()
|
now := time.Now().UTC()
|
||||||
@@ -297,7 +296,7 @@ func TestAlertSilencedMultiUser(t *testing.T) {
|
|||||||
|
|
||||||
// Get alert manager
|
// Get alert manager
|
||||||
am := alerts.NewAlertManager(hub)
|
am := alerts.NewAlertManager(hub)
|
||||||
defer am.StopWorker()
|
defer am.Stop()
|
||||||
|
|
||||||
// Create a quiet hours window for user1 only
|
// Create a quiet hours window for user1 only
|
||||||
now := time.Now().UTC()
|
now := time.Now().UTC()
|
||||||
@@ -418,7 +417,7 @@ func TestAlertSilencedNoWindows(t *testing.T) {
|
|||||||
|
|
||||||
// Get alert manager
|
// Get alert manager
|
||||||
am := alerts.NewAlertManager(hub)
|
am := alerts.NewAlertManager(hub)
|
||||||
defer am.StopWorker()
|
defer am.Stop()
|
||||||
|
|
||||||
// Without any quiet hours windows, alert should NOT be silenced
|
// Without any quiet hours windows, alert should NOT be silenced
|
||||||
silenced := am.IsNotificationSilenced(user.Id, system.Id)
|
silenced := am.IsNotificationSilenced(user.Id, system.Id)
|
||||||
|
|||||||
107
internal/alerts/alerts_smart.go
Normal file
107
internal/alerts/alerts_smart.go
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
package alerts
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/pocketbase/pocketbase/core"
|
||||||
|
)
|
||||||
|
|
||||||
|
// handleSmartDeviceAlert sends alerts when a SMART device state worsens into WARNING/FAILED.
|
||||||
|
// This is automatic and does not require user opt-in.
|
||||||
|
func (am *AlertManager) handleSmartDeviceAlert(e *core.RecordEvent) error {
|
||||||
|
oldState := e.Record.Original().GetString("state")
|
||||||
|
newState := e.Record.GetString("state")
|
||||||
|
|
||||||
|
if !shouldSendSmartDeviceAlert(oldState, newState) {
|
||||||
|
return e.Next()
|
||||||
|
}
|
||||||
|
|
||||||
|
systemID := e.Record.GetString("system")
|
||||||
|
if systemID == "" {
|
||||||
|
return e.Next()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch the system record to get the name and users
|
||||||
|
systemRecord, err := e.App.FindRecordById("systems", systemID)
|
||||||
|
if err != nil {
|
||||||
|
e.App.Logger().Error("Failed to find system for SMART alert", "err", err, "systemID", systemID)
|
||||||
|
return e.Next()
|
||||||
|
}
|
||||||
|
|
||||||
|
systemName := systemRecord.GetString("name")
|
||||||
|
deviceName := e.Record.GetString("name")
|
||||||
|
model := e.Record.GetString("model")
|
||||||
|
statusLabel := smartStateLabel(newState)
|
||||||
|
|
||||||
|
// Build alert message
|
||||||
|
title := fmt.Sprintf("SMART %s on %s: %s %s", statusLabel, systemName, deviceName, smartStateEmoji(newState))
|
||||||
|
var message string
|
||||||
|
if model != "" {
|
||||||
|
message = fmt.Sprintf("Disk %s (%s) SMART status changed to %s", deviceName, model, newState)
|
||||||
|
} else {
|
||||||
|
message = fmt.Sprintf("Disk %s SMART status changed to %s", deviceName, newState)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get users associated with the system
|
||||||
|
userIDs := systemRecord.GetStringSlice("users")
|
||||||
|
if len(userIDs) == 0 {
|
||||||
|
return e.Next()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send alert to each user
|
||||||
|
for _, userID := range userIDs {
|
||||||
|
if err := am.SendAlert(AlertMessageData{
|
||||||
|
UserID: userID,
|
||||||
|
SystemID: systemID,
|
||||||
|
Title: title,
|
||||||
|
Message: message,
|
||||||
|
Link: am.hub.MakeLink("system", systemID),
|
||||||
|
LinkText: "View " + systemName,
|
||||||
|
}); err != nil {
|
||||||
|
e.App.Logger().Error("Failed to send SMART alert", "err", err, "userID", userID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return e.Next()
|
||||||
|
}
|
||||||
|
|
||||||
|
func shouldSendSmartDeviceAlert(oldState, newState string) bool {
|
||||||
|
oldSeverity := smartStateSeverity(oldState)
|
||||||
|
newSeverity := smartStateSeverity(newState)
|
||||||
|
|
||||||
|
// Ignore unknown states and recoveries; only alert on worsening transitions
|
||||||
|
// from known-good/degraded states into WARNING/FAILED.
|
||||||
|
return oldSeverity >= 1 && newSeverity > oldSeverity
|
||||||
|
}
|
||||||
|
|
||||||
|
func smartStateSeverity(state string) int {
|
||||||
|
switch state {
|
||||||
|
case "PASSED":
|
||||||
|
return 1
|
||||||
|
case "WARNING":
|
||||||
|
return 2
|
||||||
|
case "FAILED":
|
||||||
|
return 3
|
||||||
|
default:
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func smartStateEmoji(state string) string {
|
||||||
|
switch state {
|
||||||
|
case "WARNING":
|
||||||
|
return "\U0001F7E0"
|
||||||
|
default:
|
||||||
|
return "\U0001F534"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func smartStateLabel(state string) string {
|
||||||
|
switch state {
|
||||||
|
case "FAILED":
|
||||||
|
return "failure"
|
||||||
|
default:
|
||||||
|
return strings.ToLower(state)
|
||||||
|
}
|
||||||
|
}
|
||||||
264
internal/alerts/alerts_smart_test.go
Normal file
264
internal/alerts/alerts_smart_test.go
Normal file
@@ -0,0 +1,264 @@
|
|||||||
|
//go:build testing
|
||||||
|
|
||||||
|
package alerts_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
beszelTests "github.com/henrygd/beszel/internal/tests"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSmartDeviceAlert(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
// Create a system for the user
|
||||||
|
system, err := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||||
|
"name": "test-system",
|
||||||
|
"users": []string{user.Id},
|
||||||
|
"host": "127.0.0.1",
|
||||||
|
})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Create a smart_device with state PASSED
|
||||||
|
smartDevice, err := beszelTests.CreateRecord(hub, "smart_devices", map[string]any{
|
||||||
|
"system": system.Id,
|
||||||
|
"name": "/dev/sda",
|
||||||
|
"model": "Samsung SSD 970 EVO",
|
||||||
|
"state": "PASSED",
|
||||||
|
})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Verify no emails sent initially
|
||||||
|
assert.Zero(t, hub.TestMailer.TotalSend(), "should have 0 emails sent initially")
|
||||||
|
|
||||||
|
// Re-fetch the record so PocketBase can properly track original values
|
||||||
|
smartDevice, err = hub.FindRecordById("smart_devices", smartDevice.Id)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Update the smart device state to FAILED
|
||||||
|
smartDevice.Set("state", "FAILED")
|
||||||
|
err = hub.Save(smartDevice)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Wait for the alert to be processed
|
||||||
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
|
||||||
|
// Verify that an email was sent
|
||||||
|
assert.EqualValues(t, 1, hub.TestMailer.TotalSend(), "should have 1 email sent after state changed to FAILED")
|
||||||
|
|
||||||
|
// Check the email content
|
||||||
|
lastMessage := hub.TestMailer.LastMessage()
|
||||||
|
assert.Contains(t, lastMessage.Subject, "SMART failure on test-system")
|
||||||
|
assert.Contains(t, lastMessage.Subject, "/dev/sda")
|
||||||
|
assert.Contains(t, lastMessage.Text, "Samsung SSD 970 EVO")
|
||||||
|
assert.Contains(t, lastMessage.Text, "FAILED")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSmartDeviceAlertPassedToWarning(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
system, err := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||||
|
"name": "test-system",
|
||||||
|
"users": []string{user.Id},
|
||||||
|
"host": "127.0.0.1",
|
||||||
|
})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
smartDevice, err := beszelTests.CreateRecord(hub, "smart_devices", map[string]any{
|
||||||
|
"system": system.Id,
|
||||||
|
"name": "/dev/mmcblk0",
|
||||||
|
"model": "eMMC",
|
||||||
|
"state": "PASSED",
|
||||||
|
})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
smartDevice, err = hub.FindRecordById("smart_devices", smartDevice.Id)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
smartDevice.Set("state", "WARNING")
|
||||||
|
err = hub.Save(smartDevice)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
|
||||||
|
assert.EqualValues(t, 1, hub.TestMailer.TotalSend(), "should have 1 email sent after state changed to WARNING")
|
||||||
|
lastMessage := hub.TestMailer.LastMessage()
|
||||||
|
assert.Contains(t, lastMessage.Subject, "SMART warning on test-system")
|
||||||
|
assert.Contains(t, lastMessage.Text, "WARNING")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSmartDeviceAlertWarningToFailed(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
system, err := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||||
|
"name": "test-system",
|
||||||
|
"users": []string{user.Id},
|
||||||
|
"host": "127.0.0.1",
|
||||||
|
})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
smartDevice, err := beszelTests.CreateRecord(hub, "smart_devices", map[string]any{
|
||||||
|
"system": system.Id,
|
||||||
|
"name": "/dev/mmcblk0",
|
||||||
|
"model": "eMMC",
|
||||||
|
"state": "WARNING",
|
||||||
|
})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
smartDevice, err = hub.FindRecordById("smart_devices", smartDevice.Id)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
smartDevice.Set("state", "FAILED")
|
||||||
|
err = hub.Save(smartDevice)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
|
||||||
|
assert.EqualValues(t, 1, hub.TestMailer.TotalSend(), "should have 1 email sent after state changed from WARNING to FAILED")
|
||||||
|
lastMessage := hub.TestMailer.LastMessage()
|
||||||
|
assert.Contains(t, lastMessage.Subject, "SMART failure on test-system")
|
||||||
|
assert.Contains(t, lastMessage.Text, "FAILED")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSmartDeviceAlertNoAlertOnNonPassedToFailed(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
// Create a system for the user
|
||||||
|
system, err := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||||
|
"name": "test-system",
|
||||||
|
"users": []string{user.Id},
|
||||||
|
"host": "127.0.0.1",
|
||||||
|
})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Create a smart_device with state UNKNOWN
|
||||||
|
smartDevice, err := beszelTests.CreateRecord(hub, "smart_devices", map[string]any{
|
||||||
|
"system": system.Id,
|
||||||
|
"name": "/dev/sda",
|
||||||
|
"model": "Samsung SSD 970 EVO",
|
||||||
|
"state": "UNKNOWN",
|
||||||
|
})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Re-fetch the record so PocketBase can properly track original values
|
||||||
|
smartDevice, err = hub.FindRecordById("smart_devices", smartDevice.Id)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Update the state from UNKNOWN to FAILED - should NOT trigger alert.
|
||||||
|
// We only alert from known healthy/degraded states.
|
||||||
|
smartDevice.Set("state", "FAILED")
|
||||||
|
err = hub.Save(smartDevice)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
|
||||||
|
// Verify no email was sent (only PASSED -> FAILED triggers alert)
|
||||||
|
assert.Zero(t, hub.TestMailer.TotalSend(), "should have 0 emails when changing from UNKNOWN to FAILED")
|
||||||
|
|
||||||
|
// Re-fetch the record again
|
||||||
|
smartDevice, err = hub.FindRecordById("smart_devices", smartDevice.Id)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Update state from FAILED to PASSED - should NOT trigger alert
|
||||||
|
smartDevice.Set("state", "PASSED")
|
||||||
|
err = hub.Save(smartDevice)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
|
||||||
|
// Verify no email was sent
|
||||||
|
assert.Zero(t, hub.TestMailer.TotalSend(), "should have 0 emails when changing from FAILED to PASSED")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSmartDeviceAlertMultipleUsers(t *testing.T) {
|
||||||
|
hub, user1 := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
// Create a second user
|
||||||
|
user2, err := beszelTests.CreateUser(hub, "test2@example.com", "password")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Create user settings for the second user
|
||||||
|
_, err = beszelTests.CreateRecord(hub, "user_settings", map[string]any{
|
||||||
|
"user": user2.Id,
|
||||||
|
"settings": `{"emails":["test2@example.com"],"webhooks":[]}`,
|
||||||
|
})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Create a system with both users
|
||||||
|
system, err := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||||
|
"name": "shared-system",
|
||||||
|
"users": []string{user1.Id, user2.Id},
|
||||||
|
"host": "127.0.0.1",
|
||||||
|
})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Create a smart_device with state PASSED
|
||||||
|
smartDevice, err := beszelTests.CreateRecord(hub, "smart_devices", map[string]any{
|
||||||
|
"system": system.Id,
|
||||||
|
"name": "/dev/nvme0n1",
|
||||||
|
"model": "WD Black SN850",
|
||||||
|
"state": "PASSED",
|
||||||
|
})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Re-fetch the record so PocketBase can properly track original values
|
||||||
|
smartDevice, err = hub.FindRecordById("smart_devices", smartDevice.Id)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Update the smart device state to FAILED
|
||||||
|
smartDevice.Set("state", "FAILED")
|
||||||
|
err = hub.Save(smartDevice)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
|
||||||
|
// Verify that two emails were sent (one for each user)
|
||||||
|
assert.EqualValues(t, 2, hub.TestMailer.TotalSend(), "should have 2 emails sent for 2 users")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSmartDeviceAlertWithoutModel(t *testing.T) {
|
||||||
|
hub, user := beszelTests.GetHubWithUser(t)
|
||||||
|
defer hub.Cleanup()
|
||||||
|
|
||||||
|
// Create a system for the user
|
||||||
|
system, err := beszelTests.CreateRecord(hub, "systems", map[string]any{
|
||||||
|
"name": "test-system",
|
||||||
|
"users": []string{user.Id},
|
||||||
|
"host": "127.0.0.1",
|
||||||
|
})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Create a smart_device with state PASSED but no model
|
||||||
|
smartDevice, err := beszelTests.CreateRecord(hub, "smart_devices", map[string]any{
|
||||||
|
"system": system.Id,
|
||||||
|
"name": "/dev/sdb",
|
||||||
|
"state": "PASSED",
|
||||||
|
})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Re-fetch the record so PocketBase can properly track original values
|
||||||
|
smartDevice, err = hub.FindRecordById("smart_devices", smartDevice.Id)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// Update the smart device state to FAILED
|
||||||
|
smartDevice.Set("state", "FAILED")
|
||||||
|
err = hub.Save(smartDevice)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
|
||||||
|
// Verify that an email was sent
|
||||||
|
assert.EqualValues(t, 1, hub.TestMailer.TotalSend(), "should have 1 email sent")
|
||||||
|
|
||||||
|
// Check that the email doesn't have empty parentheses for missing model
|
||||||
|
lastMessage := hub.TestMailer.LastMessage()
|
||||||
|
assert.NotContains(t, lastMessage.Text, "()", "should not have empty parentheses for missing model")
|
||||||
|
assert.Contains(t, lastMessage.Text, "/dev/sdb")
|
||||||
|
}
|
||||||
@@ -5,67 +5,28 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pocketbase/dbx"
|
|
||||||
"github.com/pocketbase/pocketbase/core"
|
"github.com/pocketbase/pocketbase/core"
|
||||||
)
|
)
|
||||||
|
|
||||||
type alertTask struct {
|
|
||||||
action string // "schedule" or "cancel"
|
|
||||||
systemName string
|
|
||||||
alertRecord *core.Record
|
|
||||||
delay time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
type alertInfo struct {
|
type alertInfo struct {
|
||||||
systemName string
|
systemName string
|
||||||
alertRecord *core.Record
|
alertData CachedAlertData
|
||||||
expireTime time.Time
|
expireTime time.Time
|
||||||
|
timer *time.Timer
|
||||||
}
|
}
|
||||||
|
|
||||||
// startWorker is a long-running goroutine that processes alert tasks
|
// Stop cancels all pending status alert timers.
|
||||||
// every x seconds. It must be running to process status alerts.
|
func (am *AlertManager) Stop() {
|
||||||
func (am *AlertManager) startWorker() {
|
am.stopOnce.Do(func() {
|
||||||
processPendingAlerts := time.Tick(15 * time.Second)
|
am.pendingAlerts.Range(func(key, value any) bool {
|
||||||
|
|
||||||
// check for status alerts that are not resolved when system comes up
|
|
||||||
// (can be removed if we figure out core bug in #1052)
|
|
||||||
checkStatusAlerts := time.Tick(561 * time.Second)
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-am.stopChan:
|
|
||||||
return
|
|
||||||
case task := <-am.alertQueue:
|
|
||||||
switch task.action {
|
|
||||||
case "schedule":
|
|
||||||
am.pendingAlerts.Store(task.alertRecord.Id, &alertInfo{
|
|
||||||
systemName: task.systemName,
|
|
||||||
alertRecord: task.alertRecord,
|
|
||||||
expireTime: time.Now().Add(task.delay),
|
|
||||||
})
|
|
||||||
case "cancel":
|
|
||||||
am.pendingAlerts.Delete(task.alertRecord.Id)
|
|
||||||
}
|
|
||||||
case <-checkStatusAlerts:
|
|
||||||
resolveStatusAlerts(am.hub)
|
|
||||||
case <-processPendingAlerts:
|
|
||||||
// Check for expired alerts every tick
|
|
||||||
now := time.Now()
|
|
||||||
for key, value := range am.pendingAlerts.Range {
|
|
||||||
info := value.(*alertInfo)
|
info := value.(*alertInfo)
|
||||||
if now.After(info.expireTime) {
|
if info.timer != nil {
|
||||||
// Downtime delay has passed, process alert
|
info.timer.Stop()
|
||||||
am.sendStatusAlert("down", info.systemName, info.alertRecord)
|
}
|
||||||
am.pendingAlerts.Delete(key)
|
am.pendingAlerts.Delete(key)
|
||||||
}
|
return true
|
||||||
}
|
})
|
||||||
}
|
})
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// StopWorker shuts down the AlertManager.worker goroutine
|
|
||||||
func (am *AlertManager) StopWorker() {
|
|
||||||
close(am.stopChan)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// HandleStatusAlerts manages the logic when system status changes.
|
// HandleStatusAlerts manages the logic when system status changes.
|
||||||
@@ -74,82 +35,116 @@ func (am *AlertManager) HandleStatusAlerts(newStatus string, systemRecord *core.
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
alertRecords, err := am.getSystemStatusAlerts(systemRecord.Id)
|
alerts := am.alertsCache.GetAlertsByName(systemRecord.Id, "Status")
|
||||||
if err != nil {
|
if len(alerts) == 0 {
|
||||||
return err
|
|
||||||
}
|
|
||||||
if len(alertRecords) == 0 {
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
systemName := systemRecord.GetString("name")
|
systemName := systemRecord.GetString("name")
|
||||||
if newStatus == "down" {
|
if newStatus == "down" {
|
||||||
am.handleSystemDown(systemName, alertRecords)
|
am.handleSystemDown(systemName, alerts)
|
||||||
} else {
|
} else {
|
||||||
am.handleSystemUp(systemName, alertRecords)
|
am.handleSystemUp(systemName, alerts)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getSystemStatusAlerts retrieves all "Status" alert records for a given system ID.
|
// handleSystemDown manages the logic when a system status changes to "down". It schedules pending alerts for each alert record.
|
||||||
func (am *AlertManager) getSystemStatusAlerts(systemID string) ([]*core.Record, error) {
|
func (am *AlertManager) handleSystemDown(systemName string, alerts []CachedAlertData) {
|
||||||
alertRecords, err := am.hub.FindAllRecords("alerts", dbx.HashExp{
|
for _, alertData := range alerts {
|
||||||
"system": systemID,
|
min := max(1, int(alertData.Min))
|
||||||
"name": "Status",
|
am.schedulePendingStatusAlert(systemName, alertData, time.Duration(min)*time.Minute)
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
return alertRecords, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Schedules delayed "down" alerts for each alert record.
|
// schedulePendingStatusAlert sets up a timer to send a "down" alert after the specified delay if the system is still down.
|
||||||
func (am *AlertManager) handleSystemDown(systemName string, alertRecords []*core.Record) {
|
// It returns true if the alert was scheduled, or false if an alert was already pending for the given alert record.
|
||||||
for _, alertRecord := range alertRecords {
|
func (am *AlertManager) schedulePendingStatusAlert(systemName string, alertData CachedAlertData, delay time.Duration) bool {
|
||||||
// Continue if alert is already scheduled
|
alert := &alertInfo{
|
||||||
if _, exists := am.pendingAlerts.Load(alertRecord.Id); exists {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Schedule by adding to queue
|
|
||||||
min := max(1, alertRecord.GetInt("min"))
|
|
||||||
am.alertQueue <- alertTask{
|
|
||||||
action: "schedule",
|
|
||||||
systemName: systemName,
|
systemName: systemName,
|
||||||
alertRecord: alertRecord,
|
alertData: alertData,
|
||||||
delay: time.Duration(min) * time.Minute,
|
expireTime: time.Now().Add(delay),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
storedAlert, loaded := am.pendingAlerts.LoadOrStore(alertData.Id, alert)
|
||||||
|
if loaded {
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
stored := storedAlert.(*alertInfo)
|
||||||
|
stored.timer = time.AfterFunc(time.Until(stored.expireTime), func() {
|
||||||
|
am.processPendingAlert(alertData.Id)
|
||||||
|
})
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// handleSystemUp manages the logic when a system status changes to "up".
|
// handleSystemUp manages the logic when a system status changes to "up".
|
||||||
// It cancels any pending alerts and sends "up" alerts.
|
// It cancels any pending alerts and sends "up" alerts.
|
||||||
func (am *AlertManager) handleSystemUp(systemName string, alertRecords []*core.Record) {
|
func (am *AlertManager) handleSystemUp(systemName string, alerts []CachedAlertData) {
|
||||||
for _, alertRecord := range alertRecords {
|
for _, alertData := range alerts {
|
||||||
alertRecordID := alertRecord.Id
|
|
||||||
// If alert exists for record, delete and continue (down alert not sent)
|
// If alert exists for record, delete and continue (down alert not sent)
|
||||||
if _, exists := am.pendingAlerts.Load(alertRecordID); exists {
|
if am.cancelPendingAlert(alertData.Id) {
|
||||||
am.alertQueue <- alertTask{
|
|
||||||
action: "cancel",
|
|
||||||
alertRecord: alertRecord,
|
|
||||||
}
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// No alert scheduled for this record, send "up" alert
|
if !alertData.Triggered {
|
||||||
if err := am.sendStatusAlert("up", systemName, alertRecord); err != nil {
|
continue
|
||||||
|
}
|
||||||
|
if err := am.sendStatusAlert("up", systemName, alertData); err != nil {
|
||||||
am.hub.Logger().Error("Failed to send alert", "err", err)
|
am.hub.Logger().Error("Failed to send alert", "err", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// sendStatusAlert sends a status alert ("up" or "down") to the users associated with the alert records.
|
// cancelPendingAlert stops the timer and removes the pending alert for the given alert ID. Returns true if a pending alert was found and cancelled.
|
||||||
func (am *AlertManager) sendStatusAlert(alertStatus string, systemName string, alertRecord *core.Record) error {
|
func (am *AlertManager) cancelPendingAlert(alertID string) bool {
|
||||||
switch alertStatus {
|
value, loaded := am.pendingAlerts.LoadAndDelete(alertID)
|
||||||
case "up":
|
if !loaded {
|
||||||
alertRecord.Set("triggered", false)
|
return false
|
||||||
case "down":
|
}
|
||||||
alertRecord.Set("triggered", true)
|
|
||||||
|
info := value.(*alertInfo)
|
||||||
|
if info.timer != nil {
|
||||||
|
info.timer.Stop()
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// CancelPendingStatusAlerts cancels all pending status alert timers for a given system.
|
||||||
|
// This is called when a system is paused to prevent delayed alerts from firing.
|
||||||
|
func (am *AlertManager) CancelPendingStatusAlerts(systemID string) {
|
||||||
|
am.pendingAlerts.Range(func(key, value any) bool {
|
||||||
|
info := value.(*alertInfo)
|
||||||
|
if info.alertData.SystemID == systemID {
|
||||||
|
am.cancelPendingAlert(key.(string))
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// processPendingAlert sends a "down" alert if the pending alert has expired and the system is still down.
|
||||||
|
func (am *AlertManager) processPendingAlert(alertID string) {
|
||||||
|
value, loaded := am.pendingAlerts.LoadAndDelete(alertID)
|
||||||
|
if !loaded {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
info := value.(*alertInfo)
|
||||||
|
refreshedAlertData, ok := am.alertsCache.Refresh(info.alertData)
|
||||||
|
if !ok || refreshedAlertData.Triggered {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := am.sendStatusAlert("down", info.systemName, refreshedAlertData); err != nil {
|
||||||
|
am.hub.Logger().Error("Failed to send alert", "err", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// sendStatusAlert sends a status alert ("up" or "down") to the users associated with the alert records.
|
||||||
|
func (am *AlertManager) sendStatusAlert(alertStatus string, systemName string, alertData CachedAlertData) error {
|
||||||
|
// Update trigger state for alert record before sending alert
|
||||||
|
triggered := alertStatus == "down"
|
||||||
|
if err := am.setAlertTriggered(alertData, triggered); err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
am.hub.Save(alertRecord)
|
|
||||||
|
|
||||||
var emoji string
|
var emoji string
|
||||||
if alertStatus == "up" {
|
if alertStatus == "up" {
|
||||||
@@ -162,10 +157,10 @@ func (am *AlertManager) sendStatusAlert(alertStatus string, systemName string, a
|
|||||||
message := strings.TrimSuffix(title, emoji)
|
message := strings.TrimSuffix(title, emoji)
|
||||||
|
|
||||||
// Get system ID for the link
|
// Get system ID for the link
|
||||||
systemID := alertRecord.GetString("system")
|
systemID := alertData.SystemID
|
||||||
|
|
||||||
return am.SendAlert(AlertMessageData{
|
return am.SendAlert(AlertMessageData{
|
||||||
UserID: alertRecord.GetString("user"),
|
UserID: alertData.UserID,
|
||||||
SystemID: systemID,
|
SystemID: systemID,
|
||||||
Title: title,
|
Title: title,
|
||||||
Message: message,
|
Message: message,
|
||||||
@@ -174,8 +169,8 @@ func (am *AlertManager) sendStatusAlert(alertStatus string, systemName string, a
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// resolveStatusAlerts resolves any status alerts that weren't resolved
|
// resolveStatusAlerts resolves any triggered status alerts that weren't resolved
|
||||||
// when system came up (https://github.com/henrygd/beszel/issues/1052)
|
// when system came up (https://github.com/henrygd/beszel/issues/1052).
|
||||||
func resolveStatusAlerts(app core.App) error {
|
func resolveStatusAlerts(app core.App) error {
|
||||||
db := app.DB()
|
db := app.DB()
|
||||||
// Find all active status alerts where the system is actually up
|
// Find all active status alerts where the system is actually up
|
||||||
@@ -205,3 +200,40 @@ func resolveStatusAlerts(app core.App) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// restorePendingStatusAlerts re-queues untriggered status alerts for systems that
|
||||||
|
// are still down after a hub restart. This rebuilds the lost in-memory timer state.
|
||||||
|
func (am *AlertManager) restorePendingStatusAlerts() error {
|
||||||
|
type pendingStatusAlert struct {
|
||||||
|
AlertID string `db:"alert_id"`
|
||||||
|
SystemID string `db:"system_id"`
|
||||||
|
SystemName string `db:"system_name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var pending []pendingStatusAlert
|
||||||
|
err := am.hub.DB().NewQuery(`
|
||||||
|
SELECT a.id AS alert_id, a.system AS system_id, s.name AS system_name
|
||||||
|
FROM alerts a
|
||||||
|
JOIN systems s ON a.system = s.id
|
||||||
|
WHERE a.name = 'Status'
|
||||||
|
AND a.triggered = false
|
||||||
|
AND s.status = 'down'
|
||||||
|
`).All(&pending)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure cache is populated before trying to restore pending alerts
|
||||||
|
_ = am.alertsCache.PopulateFromDB(false)
|
||||||
|
|
||||||
|
for _, item := range pending {
|
||||||
|
alertData, ok := am.alertsCache.GetAlert(item.SystemID, item.AlertID)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
min := max(1, int(alertData.Min))
|
||||||
|
am.schedulePendingStatusAlert(item.SystemName, alertData, time.Duration(min)*time.Minute)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|||||||
1008
internal/alerts/alerts_status_test.go
Normal file
1008
internal/alerts/alerts_status_test.go
Normal file
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user