Compare commits

...

135 Commits

Author SHA1 Message Date
Dominik Eisenberg
ec6f705410 os: rename install_time_timestamp to install_time_timestamp_seconds (#2315)
Co-authored-by: EisenbergD <dominik.eisenberg@beiersdorf.com>
2026-02-04 13:23:28 +01:00
renovate[bot]
7119da5cf5 fix(deps): update golang.org/x/ (#2311)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2026-01-30 16:14:03 +00:00
renovate[bot]
856c108e7f fix(deps): update module github.com/bmatcuk/doublestar/v4 to v4.10.0 (#2312)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2026-01-30 16:10:39 +00:00
renovate[bot]
eaecea7797 chore(deps): update module github.com/coreos/go-systemd/v22 to v22.7.0 (#2310)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2026-01-30 17:07:14 +01:00
renovate[bot]
a95df3d6f2 chore(deps): update module github.com/golang-jwt/jwt/v5 to v5.3.1 (#2308)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2026-01-30 16:54:30 +01:00
renovate[bot]
3fcdaca34a chore(deps): update github actions (#2307)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2026-01-30 16:54:18 +01:00
Calle Pettersson
2a3f0ff9d9 Formalize retirement (#2303) 2026-01-27 12:05:07 +01:00
Jan-Otto Kröpke
7dcf0d3137 gpu: skip Microsoft Basic Render Driver metrics (#2275)
Co-authored-by: Elliot Nevills <elliotnev27@users.noreply.github.com>
2026-01-17 12:32:31 +01:00
renovate[bot]
b584539387 chore(deps): update actions/setup-go action to v6.2.0 (#2298)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2026-01-16 20:33:48 +01:00
renovate[bot]
7cec563af2 fix(deps): update module github.com/bmatcuk/doublestar/v4 to v4.9.2 (#2297)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2026-01-16 20:33:36 +01:00
renovate[bot]
f0d5fd9ba0 fix(deps): update module github.com/prometheus/exporter-toolkit to v0.15.1 (#2295)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2026-01-10 20:03:30 +01:00
renovate[bot]
2efe98d8ec fix(deps): update module github.com/prometheus/common to v0.67.5 (#2294)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2026-01-10 13:31:12 +01:00
Corporte Gadfly
5ab9019509 chore: Add sample dashboard (#2255) 2026-01-06 13:20:13 +00:00
renovate[bot]
2b4576ecd4 chore(deps): update github actions (major) (#2289)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2026-01-02 21:16:51 +01:00
Dominik Eisenberg
9a666ace81 cpu: add example query for Task Manager-style CPU utilization (#2286)
Co-authored-by: EisenbergD <dominik.eisenberg@beiersdorf.com>
2025-12-30 16:00:24 +01:00
Dominik Eisenberg
27186f7e78 os: add system installation date to metrics (#2284)
Co-authored-by: EisenbergD <dominik.eisenberg@beiersdorf.com>
2025-12-29 20:23:02 +01:00
PrometheusBot
0c1336b845 Synchronize common files from prometheus/prometheus (#2279) 2025-12-24 09:40:03 +00:00
PrometheusBot
f3e50f4db4 Synchronize common files from prometheus/prometheus (#2278) 2025-12-21 20:52:20 +00:00
renovate[bot]
04714a3fbd chore(deps): update docker/setup-buildx-action action to v3.12.0 (#2276)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-12-19 18:02:20 +01:00
renovate[bot]
43bd352cfd fix(deps): update golang.org/x/ (#2277)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-12-19 18:01:20 +01:00
renovate[bot]
6e831b0176 chore(deps): update module google.golang.org/protobuf to v1.36.11 (#2273)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-12-17 20:59:05 +01:00
renovate[bot]
84dc1977de chore(deps): update dependency golangci/golangci-lint to v2.7.2 (#2272)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-12-17 20:59:00 +01:00
renovate[bot]
a944cd02a8 chore(deps): update dependency golangci/golangci-lint to v2.7.1 (#2269)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-12-06 10:04:32 +01:00
renovate[bot]
65ac3585a3 chore(deps): update github actions (#2268)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-12-06 10:04:25 +01:00
renovate[bot]
0dbea50704 chore(deps): update docker/metadata-action action to v5.10.0 (#2264)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-11-28 18:27:51 +01:00
renovate[bot]
7585044277 chore(deps): update github actions (major) (#2265)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-11-28 18:27:40 +01:00
renovate[bot]
aaf22e7322 chore(deps): update module golang.org/x/oauth2 to v0.33.0 (#2262)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-11-21 15:34:20 +00:00
renovate[bot]
7671e42c70 fix(deps): update module github.com/prometheus/common to v0.67.4 (#2261)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-11-21 16:33:50 +01:00
renovate[bot]
1e16767afb chore(deps): update module golang.org/x/crypto to v0.45.0 [security] (#2257)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-11-21 16:30:00 +01:00
renovate[bot]
81ff006f08 chore(deps): update dependency golangci/golangci-lint to v2.6.2 (#2259)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-11-21 16:29:56 +01:00
renovate[bot]
7c586b204d chore(deps): update github actions (#2260)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-11-21 16:29:50 +01:00
buckleyGI
5351641287 docs: Fix metric name for Windows Disk Alerts (#2254) 2025-11-12 14:54:33 +01:00
renovate[bot]
462a495514 chore(deps): update docker/metadata-action action to v5.9.0 (#2253)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-11-07 18:59:17 +01:00
renovate[bot]
16ee024a1b chore(deps): update dependency golangci/golangci-lint to v2.6.1 (#2252)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-11-07 18:58:28 +01:00
PrometheusBot
afe1bfc29d Synchronize common files from prometheus/prometheus (#2251) 2025-11-06 18:35:21 +00:00
Szilard Parrag
ebbad8943b collector: fix race condition in Build() (#2250)
Co-authored-by: Jan-Otto Kröpke <mail@jkroepke.de>
2025-11-06 02:33:05 +00:00
renovate[bot]
3d0587d28c chore(deps): update dependency golangci/golangci-lint to v2.6.0 (#2246)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Jan-Otto Kröpke <mail@jkroepke.de>
2025-11-02 19:41:27 +01:00
renovate[bot]
a523ef69fd chore(deps): update module github.com/prometheus/procfs to v0.19.2 (#2247)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-11-02 18:50:59 +01:00
Jan-Otto Kröpke
cd088325ef installer: remove repair option (#2243) 2025-11-02 17:40:39 +00:00
renovate[bot]
8fe118bff9 chore(deps): update github actions (major) (#2249)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-11-02 18:38:10 +01:00
Jan-Otto Kröpke
81051791e2 filetime: replace collector with file (#2244) 2025-11-02 18:38:01 +01:00
renovate[bot]
4fc7402985 chore(deps): update module golang.org/x/time to v0.14.0 (#2248)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-11-02 18:37:49 +01:00
renovate[bot]
52efb1c61c fix(deps): update module github.com/prometheus/common to v0.67.2 (#2245)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-11-02 18:37:38 +01:00
xieshujian
3a9e227bd9 file: add file collector to scrape file size and file modify time which can replace filetime collector (#2205)
Co-authored-by: Jan-Otto Kröpke <mail@jkroepke.de>
2025-11-02 18:27:34 +01:00
Johan Thomsen
ed11d8e8fa netframework: add process_id label to clrmemory (#2242) 2025-11-02 18:26:55 +01:00
renovate[bot]
402eb6ef4e chore(deps): update module github.com/prometheus/procfs to v0.18.0 (#2238)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-10-24 13:24:57 +00:00
renovate[bot]
767367edc4 fix(deps): update module github.com/prometheus/exporter-toolkit to v0.15.0 (#2239)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-10-24 15:20:30 +02:00
Jan-Otto Kröpke
370a42b79a Update documentation for collector update flags (#2235) 2025-10-17 22:46:45 +02:00
renovate[bot]
ccd977177c fix(deps): update module github.com/prometheus/common to v0.67.1 (#2228)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-10-10 15:34:29 +02:00
renovate[bot]
33fe157545 fix(deps): update golang.org/x/ (#2227)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-10-10 15:34:22 +02:00
renovate[bot]
9e32f62ca4 chore(deps): update dependency golangci/golangci-lint to v2.5.0 (#2220)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Jan-Otto Kröpke <mail@jkroepke.de>
2025-10-06 03:30:11 +02:00
Jan-Otto Kröpke
c26c27a7f4 Fix collector flag names in documentation (#2225) 2025-10-05 15:26:29 +00:00
renovate[bot]
1f43ca4d8f chore(deps): update module google.golang.org/protobuf to v1.36.10 (#2223)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-10-05 17:01:42 +02:00
renovate[bot]
d818d96e07 chore(deps): update github actions (#2224) 2025-10-04 20:39:49 +02:00
Jan-Otto Kröpke
7c108ea5be thermalzone: deprecate collector (#2201) 2025-09-26 10:29:55 +00:00
Jan-Otto Kröpke
bbe0d1aba7 os: include installation type in Windows version retrieval (#2217) 2025-09-26 10:27:31 +00:00
Jan-Otto Kröpke
1394f2399d Update renovate.json (#2219) 2025-09-20 20:41:05 +02:00
PrometheusBot
f4d77803ea Synchronize common files from prometheus/prometheus (#2218) 2025-09-20 20:40:16 +02:00
renovate[bot]
a19af1b695 chore(deps): update github actions (major) (#2215)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-09-19 20:39:10 +02:00
renovate[bot]
95bf157049 fix(deps): update golang.org/x/ (#2214)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-09-19 20:39:02 +02:00
Jan-Otto Kröpke
9969618026 Update renovate.json (#2213) 2025-09-19 19:55:26 +02:00
PrometheusBot
f2e62c6f53 Synchronize common files from prometheus/prometheus (#2212) 2025-09-19 19:53:50 +02:00
renovate[bot]
a9b42ab3a4 fix(deps): update module github.com/prometheus/exporter-toolkit to v0.14.1 (#2210)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-09-19 14:23:10 +00:00
renovate[bot]
e778eea250 chore(deps): update actions/checkout action to v4.3.0 (#2211)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-09-19 14:22:56 +00:00
renovate[bot]
c0eb53812d chore(deps): update module google.golang.org/protobuf to v1.36.9 (#2209)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-09-19 16:19:18 +02:00
renovate[bot]
21d9fb057d chore(deps): update module go.yaml.in/yaml/v2 to v2.4.3 (#2208)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-09-19 16:19:10 +02:00
Jan-Otto Kröpke
f1772a742f pdh: added logging, if PDH CStatus is not valid (#2203) 2025-09-07 13:31:29 +02:00
renovate[bot]
fcf21bb600 fix(deps): update module github.com/prometheus/client_golang to v1.23.1 (#2199)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-09-05 21:37:06 +00:00
renovate[bot]
cd5f136079 chore(deps): update module google.golang.org/protobuf to v1.36.8 (#2198)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-09-05 18:52:32 +02:00
Jan-Otto Kröpke
4171ec17a5 chore: switch to go.yaml.in/yaml/v3 (#2196) 2025-09-04 23:50:27 +02:00
Karthik Panjaje
6289499dee docs: Fixed HTTP request metrics documentation (#2192) 2025-08-31 14:41:40 +00:00
Jan-Otto Kröpke
79917893d1 installer: set failureflag for Windows service (#2191) 2025-08-29 21:57:28 +02:00
Jan-Otto Kröpke
0b8a257b31 gpu: add device id label (#2186) 2025-08-28 06:36:10 +02:00
Jan-Otto Kröpke
71cedbc4d0 mi: remove callbacks (#2188) 2025-08-26 21:04:56 +02:00
Jan-Otto Kröpke
c8a4cb3806 mssql: expose correct patch level without restart (#2187) 2025-08-26 20:52:09 +02:00
Jan-Otto Kröpke
558629dff5 chore: update to go 1.25 (#2185) 2025-08-24 14:27:00 +02:00
Jan-Otto Kröpke
5a8ebf0c44 collector: support sub-second timeout values. (#2181) 2025-08-15 23:55:24 +02:00
PrometheusBot
acbabb926d Synchronize common files from prometheus/prometheus (#2180) 2025-08-15 20:34:45 +02:00
renovate[bot]
e37392c00b chore(deps): update dependency golangci/golangci-lint to v2.4.0 (#2179)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-08-15 16:43:04 +02:00
renovate[bot]
00d86ba792 chore(deps): update actions/checkout action to v4.3.0 (#2178)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-08-15 16:42:52 +02:00
renovate[bot]
691f64f5cc fix(deps): update golang.org/x/ (#2170)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-08-10 15:39:40 +02:00
renovate[bot]
19999dea49 chore(deps): update docker/login-action action to v3.5.0 (#2169)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-08-10 15:39:24 +02:00
renovate[bot]
c2df4d7514 chore(deps): update actions/download-artifact action to v5 (#2171)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-08-08 15:48:04 +02:00
renovate[bot]
8937a5ac91 chore(deps): update module google.golang.org/protobuf to v1.36.7 (#2168)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-08-08 15:30:46 +02:00
Jan-Otto Kröpke
930130f58a collector: Add disable flag (#2165)
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-07 08:58:29 +02:00
Jan-Otto Kröpke
0e85959a4d installer: do not fail, if service can't be started. (#2163) 2025-08-03 20:11:57 +02:00
Jan-Otto Kröpke
6253bf812d process: Add flag to control the export of the process cmdline (#2153) 2025-08-03 20:09:03 +02:00
Jan-Otto Kröpke
6c2380bd04 installer: disable config file creation, if CONFIG_FILE is set to a non default location. (#2162) 2025-08-03 20:08:13 +02:00
Jan-Otto Kröpke
5266f9ebfe installer: add quote to avoid argument splitting (#2161) 2025-08-03 19:39:59 +02:00
renovate[bot]
6c9a5b66e2 chore(deps): update dependency golangci/golangci-lint to v2.3.1 (#2158)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Jan-Otto Kröpke <mail@jkroepke.de>
2025-08-03 04:27:21 +02:00
renovate[bot]
c4ab8cb8a5 chore(deps): update docker/metadata-action action to v5.8.0 (#2159)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-08-01 21:01:20 +02:00
renovate[bot]
7bcaf81d26 fix(deps): update module github.com/prometheus/client_golang to v1.23.0 (#2160)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-08-01 20:49:03 +02:00
renovate[bot]
5f6ba2c6e7 fix(deps): update module github.com/bmatcuk/doublestar/v4 to v4.9.1 (#2157)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-08-01 15:47:11 +02:00
Jan-Otto Kröpke
75c85fbde1 docs: add note about property preferences (#2155) 2025-07-29 20:53:50 +02:00
Jan-Otto Kröpke
120c244313 docs: Update example_config.yml (#2152) 2025-07-28 22:53:13 +02:00
Jan-Otto Kröpke
0e2d78affe docs: allow backport PR title prefix. (#2142) 2025-07-20 02:30:42 +02:00
Jan-Otto Kröpke
f0591d85cd process: fix windows_process_start_time_seconds_timestamp return relative time on Windows Server 2019 (#2137) 2025-07-20 02:14:17 +02:00
Jan-Otto Kröpke
255b01f610 container: fix network metrics (#2136) 2025-07-17 21:07:55 +02:00
Jan-Otto Kröpke
ab7db07836 filetime: support windows paths (#2118) 2025-07-13 02:01:44 +02:00
Jan-Otto Kröpke
52056a5cd9 filetime: support case insensitive matching (#2132) 2025-07-12 23:51:40 +00:00
Jan-Otto Kröpke
524fea08c4 gpu: fix windows_gpu_info metric (#2130) 2025-07-13 01:05:59 +02:00
Jan-Otto Kröpke
6b8c895a68 container: fix memory leaks (#2129) 2025-07-11 20:10:45 +02:00
Jan-Otto Kröpke
eade0da514 config: fix lists (#2124) 2025-07-11 19:50:58 +02:00
renovate[bot]
b07e866b4a chore(deps): update dependency golangci/golangci-lint to v2.2.2 (#2126)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-11 19:50:19 +02:00
renovate[bot]
98618408ce fix(deps): update golang.org/x/ (#2127)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-11 18:33:04 +02:00
Jan-Otto Kröpke
56b9f7fd27 docs: strip readme to avoid dockerhub limitations (#2123) 2025-07-09 20:17:26 +02:00
Jan-Otto Kröpke
8d267336c1 Update README.md (#2121) 2025-07-08 18:17:07 +02:00
Jan-Otto Kröpke
fd7070354a cs: remove deprecated cs collector (#2115) 2025-07-05 15:47:57 +02:00
Jan-Otto Kröpke
373d741260 os: remove deprecated metrics (#2116) 2025-07-05 15:44:57 +02:00
Jan-Otto Kröpke
ed15b3c671 system: remove windows_system_boot_time_timestamp_seconds (#2112) 2025-07-05 15:40:09 +02:00
Jan-Otto Kröpke
f8805932b2 logon: remove logon collector. Use terminal_services instead. (#2114) 2025-07-05 15:34:04 +02:00
Jan-Otto Kröpke
4fd26fa0fa update: remove deprecated flags (#2113) 2025-07-05 15:32:59 +02:00
Jan-Otto Kröpke
bf722630d6 mssql: fix ratio based counter (#2096) 2025-07-05 00:20:28 +02:00
renovate[bot]
9320e992cc chore(deps): update dependency golangci/golangci-lint to v2.2.1 (#2110)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Jan-Otto Kröpke <mail@jkroepke.de>
2025-07-05 00:08:07 +02:00
renovate[bot]
7a4e92a473 chore(deps): update module github.com/prometheus/procfs to v0.17.0 (#2111)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-07-04 21:51:47 +02:00
Szymon Sobocki
02b9ab4058 mssql: fix incorrect patch version in windows_mssql_instance_info (#2109) 2025-07-04 09:42:26 +00:00
Jan-Otto Kröpke
c3043693df fix: add missing concurrency lock (#2098) 2025-07-04 11:14:49 +02:00
Jan-Otto Kröpke
7377d48f07 iis: missing metrics if app-include is set. (#2103) 2025-07-02 16:32:29 +02:00
Jan-Otto Kröpke
d64f1316ca os: missing deprecated metric windows_os_processes (#2104) 2025-07-02 00:50:03 +02:00
Jan-Otto Kröpke
492f3af317 diskdrive: fix not exposing state "Pred Fail" (#2101) 2025-06-30 19:55:45 +02:00
nbav12
116203fd19 Update collector.mscluster.md (#2099) 2025-06-30 11:54:41 +02:00
Jan-Otto Kröpke
66751baef6 process: do not fail, if collector.process.iis is enabled and WMI WebAdministration is not present. (#2082) 2025-06-29 14:12:56 +02:00
renovate[bot]
b02bddd38e fix(deps): update module github.com/prometheus/common to v0.65.0 (#2095)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-06-27 15:30:59 +00:00
Jan-Otto Kröpke
3dbc19e18b docs: add note about Server 2012 (#2093) 2025-06-21 11:28:58 +02:00
renovate[bot]
be481e8776 chore(deps): update docker/setup-buildx-action action to v3.11.1 (#2088)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-06-20 15:40:55 +02:00
Jan-Otto Kröpke
aea5c5a2fb docs: Clearify Windows Server 2012R2 support (#2087) 2025-06-20 15:33:34 +02:00
Lapo Luchini
59ac3072b1 feat: increase time resolution when possible (collectors: os, system, time) (#2047) 2025-06-20 13:12:26 +02:00
Jan-Otto Kröpke
66cd489c4a dhcp: fix log level for dhcp server, if not present (#2086) 2025-06-20 10:15:14 +02:00
Jan-Otto Kröpke
4891e23d29 fix: added count checks (#2083) 2025-06-19 16:59:19 +02:00
Nic Jansma
f285c3d1e2 logical_disk: skip unmounted volumes (#2084) 2025-06-18 07:59:50 +02:00
Jan-Otto Kröpke
90dac66bf5 config: fix validation error with empty config files (#2080) 2025-06-16 19:53:14 +02:00
Jan-Otto Kröpke
34cfda306b logical_disk: add bitlocker status sub-collector (#2077) 2025-06-16 12:48:23 +02:00
Sanjeevi Subramani
3e8693f1e3 iis: Add HTTP Service Request Queues (#1948)
Co-authored-by: Jan-Otto Kröpke <mail@jkroepke.de>
2025-06-15 07:40:21 +02:00
renovate[bot]
036c858355 chore(deps): update golang.org/x/ (#2078)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-06-13 16:38:52 +02:00
Sam DeHaan
a69720ae1d docs: fix typo in dns docs subcollector name (#2073) 2025-06-05 01:14:51 +02:00
205 changed files with 7884 additions and 2475 deletions

View File

@@ -4,8 +4,12 @@ labels: [ 🐞 bug ]
body:
- type: markdown
attributes:
value: Thanks for taking the time to fill out this bug report!
value: |-
> [!NOTE]
> Windows Server 2012 and Windows Server 2012 R2 are no longer supported by the windows_exporter project.
Thanks for taking the time to fill out this bug report!
- type: markdown
attributes:
value: |-
@@ -15,18 +19,18 @@ body:
```
PS C:\WINDOWS\system32> cd c:\windows\system32
PS C:\windows\system32> lodctr /R
Error: Unable to rebuild performance counter setting from system backup store, error code is 2
PS C:\windows\system32> cd ..
PS C:\windows> cd syswow64
PS C:\windows\syswow64> lodctr /R
Info: Successfully rebuilt performance counter setting from system backup store
PS C:\windows\syswow64> winmgmt.exe /RESYNCPERF
```
----
- type: textarea
attributes:
label: Current Behavior
@@ -36,7 +40,7 @@ body:
```...```
validations:
required: true
- type: textarea
attributes:
label: Expected Behavior
@@ -44,7 +48,7 @@ body:
placeholder: When I do <X>, <Z> should happen instead.
validations:
required: true
- type: textarea
attributes:
label: Steps To Reproduce
@@ -57,7 +61,7 @@ body:
render: Markdown
validations:
required: false
- type: textarea
attributes:
label: Environment
@@ -70,7 +74,7 @@ body:
- Windows Server Version:
validations:
required: true
- type: textarea
attributes:
label: windows_exporter logs
@@ -80,7 +84,7 @@ body:
render: shell
validations:
required: true
- type: textarea
attributes:
label: Anything else?

View File

@@ -18,7 +18,7 @@ jobs:
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
steps:
- name: git checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
persist-credentials: false
- name: Set docker hub repo name
@@ -42,7 +42,7 @@ jobs:
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
steps:
- name: git checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
persist-credentials: false
- name: Set quay.io org name

View File

@@ -20,8 +20,8 @@ jobs:
test:
runs-on: windows-2025
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
with:
go-version-file: 'go.mod'
@@ -43,8 +43,8 @@ jobs:
promtool:
runs-on: windows-2025
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
with:
go-version-file: 'go.mod'
@@ -65,7 +65,7 @@ jobs:
run: make promtool
- name: Upload windows_exporter.exe
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
if: always()
with:
name: windows_exporter.amd64.exe
@@ -82,14 +82,14 @@ jobs:
git config --global core.autocrlf false
git config --global core.eol lf
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
with:
go-version-file: 'go.mod'
- name: golangci-lint
uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0
uses: golangci/golangci-lint-action@1e7e51e771db61008b38414a730f564565cf7c20 # v9.2.0
with:
# renovate: github=golangci/golangci-lint
version: v2.1.6
version: v2.7.2
args: "--max-same-issues=0"

View File

@@ -33,11 +33,11 @@ jobs:
name: check title prefix
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: check
run: |
PR_TITLE_PREFIX=$(echo "$PR_TITLE" | cut -d':' -f1)
if [[ -d "internal/collector/$PR_TITLE_PREFIX" ]] || [[ -d "internal/$PR_TITLE_PREFIX" ]] || [[ -d "pkg/$PR_TITLE_PREFIX" ]] || [[ -d "$PR_TITLE_PREFIX" ]] || [[ "$PR_TITLE_PREFIX" == "docs" ]] || [[ "$PR_TITLE_PREFIX" == "ci" ]] || [[ "$PR_TITLE_PREFIX" == "revert" ]] || [[ "$PR_TITLE_PREFIX" == "fix" ]] || [[ "$PR_TITLE_PREFIX" == "fix(deps)" ]] || [[ "$PR_TITLE_PREFIX" == "feat" ]] || [[ "$PR_TITLE_PREFIX" == "chore" ]] || [[ "$PR_TITLE_PREFIX" == "chore(docs)" ]] || [[ "$PR_TITLE_PREFIX" == "chore(deps)" ]] || [[ "$PR_TITLE_PREFIX" == "*" ]] || [[ "$PR_TITLE_PREFIX" == "Release"* ]] || [[ "$PR_TITLE_PREFIX" == "Synchronize common files from prometheus/prometheus" ]]; then
if [[ -d "internal/collector/$PR_TITLE_PREFIX" ]] || [[ -d "internal/$PR_TITLE_PREFIX" ]] || [[ -d "pkg/$PR_TITLE_PREFIX" ]] || [[ -d "$PR_TITLE_PREFIX" ]] || [[ "$PR_TITLE_PREFIX" == "docs" ]] || [[ "$PR_TITLE_PREFIX" == "ci" ]] || [[ "$PR_TITLE_PREFIX" == "revert" ]] || [[ "$PR_TITLE_PREFIX" == "fix" ]] || [[ "$PR_TITLE_PREFIX" == "fix(deps)" ]] || [[ "$PR_TITLE_PREFIX" == "feat" ]] || [[ "$PR_TITLE_PREFIX" == "chore" ]] || [[ "$PR_TITLE_PREFIX" == "chore(docs)" ]] || [[ "$PR_TITLE_PREFIX" == "chore(deps)" ]] || [[ "$PR_TITLE_PREFIX" == "*" ]] || [[ "$PR_TITLE_PREFIX" == "Release"* ]] || [[ "$PR_TITLE_PREFIX" == "Synchronize common files from prometheus/prometheus" ]] || [[ "$PR_TITLE_PREFIX" == "[0."* ]] || [[ "$PR_TITLE_PREFIX" == "[1."* ]]; then
exit 0
fi

View File

@@ -24,11 +24,11 @@ jobs:
runs-on: windows-2025
environment: build
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
fetch-depth: '0'
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
- uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
with:
go-version-file: 'go.mod'
@@ -157,7 +157,7 @@ jobs:
cat output\sha256sums.txt
- name: Upload Artifacts
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
with:
name: windows_exporter_binaries
path: |
@@ -180,25 +180,25 @@ jobs:
DOCKER_BUILD_SUMMARY: false
DOCKER_BUILD_RECORD_UPLOAD: false
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
fetch-depth: '0'
- name: Download Artifacts
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
with:
name: windows_exporter_binaries
- name: Login to Docker Hub
if: ${{ github.event_name != 'pull_request' }}
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
with:
username: ${{ secrets.DOCKER_HUB_LOGIN }}
password: ${{ secrets.DOCKER_HUB_PASSWORD }}
- name: Login to quay.io
if: ${{ github.event_name != 'pull_request' }}
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
with:
registry: quay.io
username: ${{ secrets.QUAY_IO_LOGIN }}
@@ -206,7 +206,7 @@ jobs:
- name: Login to GitHub container registry
if: ${{ github.event_name != 'pull_request' }}
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
@@ -214,7 +214,7 @@ jobs:
- name: Docker meta
id: meta
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5.7.0
uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5.10.0
with:
images: |
ghcr.io/prometheus-community/windows-exporter
@@ -231,7 +231,7 @@ jobs:
org.opencontainers.image.licenses=MIT
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
- name: Build and push
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0

View File

@@ -17,7 +17,7 @@ jobs:
name: Check for spelling errors
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: codespell-project/actions-codespell@master
with:
check_filenames: true

View File

@@ -11,7 +11,7 @@ jobs:
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
runs-on: ubuntu-latest
steps:
- uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0
- uses: actions/stale@997185467fa4f803885201cee163a9f38240193d # v10.1.1
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
# opt out of defaults to avoid marking issues as stale and closing them

View File

@@ -11,7 +11,7 @@ jobs:
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
runs-on: ubuntu-latest
steps:
- uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0
- uses: actions/stale@997185467fa4f803885201cee163a9f38240193d # v10.1.1
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
# opt out of defaults to avoid marking issues as stale and closing them

1
.gitignore vendored
View File

@@ -6,6 +6,7 @@ output/
.vscode
*.syso
installer/*.msi
installer/*.log
installer/*.wixpdb
local/

View File

@@ -15,15 +15,19 @@ linters:
- gocognit
- goconst
- gocyclo
- godoclint
- godot
- lll
- maintidx
- mnd
- noinlineerr
- paralleltest
- tagliatelle
- testpackage
- unqueryvet
- varnamelen
- wrapcheck
- wsl
settings:
forbidigo:
forbid:
@@ -62,6 +66,13 @@ linters:
disable:
- fieldalignment
- shadow
revive:
rules:
- name: var-naming
arguments:
- [ ] # AllowList - do not remove as args for the rule are positional and won't work without lists first
- [ ] # DenyList
- - skip-package-name-checks: true
sloglint:
no-mixed-args: true
kv-only: false

View File

@@ -2,8 +2,15 @@
<dictionary name="project">
<words>
<w>containerd</w>
<w>endpointstats</w>
<w>gochecknoglobals</w>
<w>lpwstr</w>
<w>luid</w>
<w>operationoptions</w>
<w>setupapi</w>
<w>spdx</w>
<w>textfile</w>
<w>vmcompute</w>
</words>
</dictionary>
</component>

View File

@@ -19,7 +19,7 @@
<configuration default="false" name="all" type="GoApplicationRunConfiguration" factoryName="Go Application" folderName="run">
<module name="windows_exporter" />
<working_directory value="$PROJECT_DIR$" />
<parameters value="--web.listen-address=127.0.0.1:9182 --log.level=info --collectors.enabled=ad,adcs,adfs,cache,container,cpu,cpu_info,cs,dfsr,dhcp,diskdrive,dns,exchange,filetime,fsrmquota,hyperv,iis,license,logical_disk,logon,memory,mscluster,msmq,mssql,net,netframework,nps,os,pagefile,performancecounter,physical_disk,printer,process,remote_fx,scheduled_task,service,smb,smbclient,smtp,system,tcp,terminal_services,thermalzone,time,udp,update,vmware,performancecounter --debug.enabled --collector.performancecounter.objects='[{ &quot;name&quot;: &quot;memory&quot;, &quot;type&quot;: &quot;formatted&quot;, &quot;object&quot;: &quot;Memory&quot;, &quot;counters&quot;: [{ &quot;name&quot;:&quot;Cache Faults/sec&quot;, &quot;type&quot;:&quot;counter&quot; }]}]'" />
<parameters value="--web.listen-address=127.0.0.1:9182 --log.level=info --collectors.enabled=ad,adcs,adfs,cache,container,cpu,cpu_info,dfsr,dhcp,diskdrive,dns,exchange,file,fsrmquota,hyperv,iis,license,logical_disk,memory,mscluster,msmq,mssql,net,netframework,nps,os,pagefile,performancecounter,physical_disk,printer,process,remote_fx,scheduled_task,service,smb,smbclient,smtp,system,tcp,terminal_services,thermalzone,time,udp,update,vmware,performancecounter --debug.enabled --collector.performancecounter.objects='[{ &quot;name&quot;: &quot;memory&quot;, &quot;type&quot;: &quot;formatted&quot;, &quot;object&quot;: &quot;Memory&quot;, &quot;counters&quot;: [{ &quot;name&quot;:&quot;Cache Faults/sec&quot;, &quot;type&quot;:&quot;counter&quot; }]}]'" />
<sudo value="true" />
<kind value="PACKAGE" />
<package value="github.com/prometheus-community/windows_exporter/cmd/windows_exporter" />

View File

@@ -1,10 +1,11 @@
Maintainers in alphabetical order
* [Ben Reedy](https://github.com/breed808) - breed808@breed808.com
* [Calle Pettersson](https://github.com/carlpett) - calle@cape.nu
* [Jan-Otto Kröpke](https://github.com/jkroepke) - github@jkroepke.de
Alumni
* [Brian Brazil](https://github.com/brian-brazil)
* [Calle Pettersson](https://github.com/carlpett)
* [Martin Lindhe](https://github.com/martinlindhe)

View File

@@ -29,7 +29,7 @@ test:
go test -v ./...
bench:
go test -v -bench='benchmarkcollector' ./internal/collectors/{cpu,logical_disk,physical_disk,logon,memory,net,printer,process,service,system,tcp,time}
go test -v -bench='benchmarkcollector' ./internal/collectors/{cpu,logical_disk,physical_disk,memory,net,printer,process,service,system,tcp,time}
lint:
golangci-lint -c .golangci.yaml run

179
README.md
View File

@@ -1,4 +1,4 @@
config.file# windows_exporter
# windows_exporter
[![CI](https://github.com/prometheus-community/windows_exporter/actions/workflows/release.yml/badge.svg)](https://github.com/prometheus-community/windows_exporter)
[![Linting](https://github.com/prometheus-community/windows_exporter/actions/workflows/lint.yml/badge.svg)](https://github.com/prometheus-community/windows_exporter)
@@ -12,55 +12,53 @@ A Prometheus exporter for Windows machines.
## Collectors
Name | Description | Enabled by default
------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------
[ad](docs/collector.ad.md) | Active Directory Domain Services |
[adcs](docs/collector.adcs.md) | Active Directory Certificate Services |
[adfs](docs/collector.adfs.md) | Active Directory Federation Services |
[cache](docs/collector.cache.md) | Cache metrics |
[cpu](docs/collector.cpu.md) | CPU usage | &#10003;
[cpu_info](docs/collector.cpu_info.md) | CPU Information |
[cs](docs/collector.cs.md) | "Computer System" metrics (system properties, num cpus/total memory) |
[container](docs/collector.container.md) | Container metrics |
[diskdrive](docs/collector.diskdrive.md) | Diskdrive metrics |
[dfsr](docs/collector.dfsr.md) | DFSR metrics |
[dhcp](docs/collector.dhcp.md) | DHCP Server |
[dns](docs/collector.dns.md) | DNS Server |
[exchange](docs/collector.exchange.md) | Exchange metrics |
[filetime](docs/collector.filetime.md) | FileTime metrics |
[fsrmquota](docs/collector.fsrmquota.md) | Microsoft File Server Resource Manager (FSRM) Quotas collector |
[gpu](docs/collector.gpu.md) | GPU metrics |
[hyperv](docs/collector.hyperv.md) | Hyper-V hosts |
[iis](docs/collector.iis.md) | IIS sites and applications |
[license](docs/collector.license.md) | Windows license status |
[logical_disk](docs/collector.logical_disk.md) | Logical disks, disk I/O | &#10003;
[memory](docs/collector.memory.md) | Memory usage metrics | &#10003;
[mscluster](docs/collector.mscluster.md) | MSCluster metrics |
[msmq](docs/collector.msmq.md) | MSMQ queues |
[mssql](docs/collector.mssql.md) | [SQL Server Performance Objects](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/use-sql-server-objects#SQLServerPOs) metrics |
[netframework](docs/collector.netframework.md) | .NET Framework metrics |
[net](docs/collector.net.md) | Network interface I/O | &#10003;
[os](docs/collector.os.md) | OS metrics (memory, processes, users) | &#10003;
[pagefile](docs/collector.pagefile.md) | pagefile metrics |
[performancecounter](docs/collector.performancecounter.md) | Custom performance counter metrics |
[physical_disk](docs/collector.physical_disk.md) | physical disk metrics | &#10003;
[printer](docs/collector.printer.md) | Printer metrics |
[process](docs/collector.process.md) | Per-process metrics |
[remote_fx](docs/collector.remote_fx.md) | RemoteFX protocol (RDP) metrics |
[scheduled_task](docs/collector.scheduled_task.md) | Scheduled Tasks metrics |
[service](docs/collector.service.md) | Service state metrics | &#10003;
[smb](docs/collector.smb.md) | SMB Server |
[smbclient](docs/collector.smbclient.md) | SMB Client |
[smtp](docs/collector.smtp.md) | IIS SMTP Server |
[system](docs/collector.system.md) | System calls | &#10003;
[tcp](docs/collector.tcp.md) | TCP connections |
[terminal_services](docs/collector.terminal_services.md) | Terminal services (RDS)
[textfile](docs/collector.textfile.md) | Read prometheus metrics from a text file |
[thermalzone](docs/collector.thermalzone.md) | Thermal information |
[time](docs/collector.time.md) | Windows Time Service |
[udp](docs/collector.udp.md) | UDP connections |
[update](docs/collector.update.md) | Windows Update Service |
[vmware](docs/collector.vmware.md) | Performance counters installed by the Vmware Guest agent |
| Name | Description | Enabled by default |
|------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------|
| [ad](docs/collector.ad.md) | Active Directory Domain Services | |
| [adcs](docs/collector.adcs.md) | Active Directory Certificate Services | |
| [adfs](docs/collector.adfs.md) | Active Directory Federation Services | |
| [cache](docs/collector.cache.md) | Cache metrics | |
| [cpu](docs/collector.cpu.md) | CPU usage | &#10003; |
| [cpu_info](docs/collector.cpu_info.md) | CPU Information | |
| [container](docs/collector.container.md) | Container metrics | |
| [diskdrive](docs/collector.diskdrive.md) | Diskdrive metrics | |
| [dfsr](docs/collector.dfsr.md) | DFSR metrics | |
| [dhcp](docs/collector.dhcp.md) | DHCP Server | |
| [dns](docs/collector.dns.md) | DNS Server | |
| [exchange](docs/collector.exchange.md) | Exchange metrics | |
| [file](docs/collector.file.md) | File metrics | |
| [fsrmquota](docs/collector.fsrmquota.md) | Microsoft File Server Resource Manager (FSRM) Quotas collector | |
| [gpu](docs/collector.gpu.md) | GPU metrics | |
| [hyperv](docs/collector.hyperv.md) | Hyper-V hosts | |
| [iis](docs/collector.iis.md) | IIS sites and applications | |
| [license](docs/collector.license.md) | Windows license status | |
| [logical_disk](docs/collector.logical_disk.md) | Logical disks, disk I/O | &#10003; |
| [memory](docs/collector.memory.md) | Memory usage metrics | &#10003; |
| [mscluster](docs/collector.mscluster.md) | MSCluster metrics | |
| [msmq](docs/collector.msmq.md) | MSMQ queues | |
| [mssql](docs/collector.mssql.md) | [SQL Server Performance Objects](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/use-sql-server-objects#SQLServerPOs) metrics | |
| [netframework](docs/collector.netframework.md) | .NET Framework metrics | |
| [net](docs/collector.net.md) | Network interface I/O | &#10003; |
| [os](docs/collector.os.md) | OS metrics (memory, processes, users) | &#10003; |
| [pagefile](docs/collector.pagefile.md) | pagefile metrics | |
| [performancecounter](docs/collector.performancecounter.md) | Custom performance counter metrics | |
| [physical_disk](docs/collector.physical_disk.md) | physical disk metrics | &#10003; |
| [printer](docs/collector.printer.md) | Printer metrics | |
| [process](docs/collector.process.md) | Per-process metrics | |
| [remote_fx](docs/collector.remote_fx.md) | RemoteFX protocol (RDP) metrics | |
| [scheduled_task](docs/collector.scheduled_task.md) | Scheduled Tasks metrics | |
| [service](docs/collector.service.md) | Service state metrics | &#10003; |
| [smb](docs/collector.smb.md) | SMB Server | |
| [smbclient](docs/collector.smbclient.md) | SMB Client | |
| [smtp](docs/collector.smtp.md) | IIS SMTP Server | |
| [system](docs/collector.system.md) | System calls | &#10003; |
| [tcp](docs/collector.tcp.md) | TCP connections | |
| [terminal_services](docs/collector.terminal_services.md) | Terminal services (RDS) | |
| [textfile](docs/collector.textfile.md) | Read prometheus metrics from a text file | |
| [time](docs/collector.time.md) | Windows Time Service | |
| [udp](docs/collector.udp.md) | UDP connections | |
| [update](docs/collector.update.md) | Windows Update Service | |
| [vmware](docs/collector.vmware.md) | Performance counters installed by the Vmware Guest agent | |
See the linked documentation on each collector for more information on reported metrics, configuration settings and usage examples.
@@ -83,15 +81,15 @@ This can be useful for having different Prometheus servers collect specific metr
windows_exporter accepts flags to configure certain behaviours. The ones configuring the global behaviour of the exporter are listed below, while collector-specific ones are documented in the respective collector documentation above.
| Flag | Description | Default value |
|--------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------|
| `--web.listen-address` | host:port for exporter. | `:9182` |
| `--telemetry.path` | URL path for surfacing collected metrics. | `/metrics` |
| `--collectors.enabled` | Comma-separated list of collectors to use. Use `[defaults]` as a placeholder which gets expanded containing all the collectors enabled by default. | `[defaults]` |
| `--scrape.timeout-margin` | Seconds to subtract from the timeout allowed by the client. Tune to allow for overhead or high loads. | `0.5` |
| `--web.config.file` | A [web config][web_config] for setting up TLS and Auth | None |
| `--config.file` | [Using a config file](#using-a-configuration-file) from path or URL | None |
| `--log.file` | Output file of log messages. One of [stdout, stderr, eventlog, \<path to log file>]<br>**NOTE:** The MSI installer will add a default argument to the installed service setting this to eventlog | stderr |
| Flag | Description | Default value |
|---------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------|
| `--web.listen-address` | host:port for exporter. | `:9182` |
| `--telemetry.path` | URL path for surfacing collected metrics. | `/metrics` |
| `--collectors.enabled` | Comma-separated list of collectors to use. Use `[defaults]` as a placeholder which gets expanded containing all the collectors enabled by default. | `[defaults]` |
| `--scrape.timeout-margin` | Seconds to subtract from the timeout allowed by the client. Tune to allow for overhead or high loads. | `0.5` |
| `--web.config.file` | A [web config][web_config] for setting up TLS and Auth | None |
| `--config.file` | [Using a config file](#using-a-configuration-file) from path | None |
| `--log.file` | Output file of log messages. One of [stdout, stderr, eventlog, \<path to log file>]<br>**NOTE:** The MSI installer will add a default argument to the installed service setting this to eventlog | stderr |
## Installation
@@ -113,20 +111,22 @@ The configuration file
The following parameters are available:
| Name | Description |
|----------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `ENABLED_COLLECTORS` | As the `--collectors.enabled` flag, provide a comma-separated list of enabled collectors |
| `CONFIG_FILE` | Use the `--config.file` flag to specify a config file. If empty, no config file will be set. The special value `config.yaml` set the path to the config.yaml at install dir | |
| `LISTEN_ADDR` | The IP address to bind to. Defaults to an empty string. (any local address) |
| `LISTEN_PORT` | The port to bind to. Defaults to `9182`. |
| `METRICS_PATH` | The path at which to serve metrics. Defaults to `/metrics` |
| `TEXTFILE_DIRS` | Use the `--collector.textfile.directories` flag to specify one or more directories, separated by commas, where the collector should read text files containing metrics |
| `REMOTE_ADDR` | Allows setting comma separated remote IP addresses for the Windows Firewall exception (allow list). Defaults to an empty string (any remote address). |
| `EXTRA_FLAGS` | Allows passing full CLI flags. Defaults to an empty string. For `--collectors.enabled` and `--config.file`, use the specialized properties `ENABLED_COLLECTORS` and `CONFIG_FILE` |
| `ADDLOCAL` | Enables features within the windows_exporter installer. Supported values: `FirewallException` |
| `REMOVE` | Disables features within the windows_exporter installer. Supported values: `FirewallException` |
| `APPLICATIONFOLDER` | Directory to install windows_exporter. Defaults to `C:\Program Files\windows_exporter` |
| Name | Description |
|----------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `ENABLED_COLLECTORS` | As the `--collectors.enabled` flag, provide a comma-separated list of enabled collectors |
| `CONFIG_FILE` | Use the `--config.file` flag to specify a config file. If empty, default config file at install dir will be used. If set, the config file must be exist before the installation is started. | |
| `LISTEN_ADDR` | The IP address to bind to. Defaults to an empty string. (any local address) |
| `LISTEN_PORT` | The port to bind to. Defaults to `9182`. |
| `METRICS_PATH` | The path at which to serve metrics. Defaults to `/metrics` |
| `TEXTFILE_DIRS` | Use the `--collector.textfile.directories` flag to specify one or more directories, separated by commas, where the collector should read text files containing metrics |
| `REMOTE_ADDR` | Allows setting comma separated remote IP addresses for the Windows Firewall exception (allow list). Defaults to an empty string (any remote address). |
| `EXTRA_FLAGS` | Allows passing full CLI flags. Defaults to an empty string. For `--collectors.enabled` and `--config.file`, use the specialized properties `ENABLED_COLLECTORS` and `CONFIG_FILE` |
| `ADDLOCAL` | Enables features within the windows_exporter installer. Supported values: `FirewallException` |
| `REMOVE` | Disables features within the windows_exporter installer. Supported values: `FirewallException` |
| `APPLICATIONFOLDER` | Directory to install windows_exporter. Defaults to `C:\Program Files\windows_exporter` |
> [!NOTE]
> The installer properties are always preferred over the values defined in the config file. If you prefer to configure via the config file, avoid using any of the properties listed above.
Parameters are sent to the installer via `msiexec`.
On PowerShell, the `--%` should be passed before defining properties.
@@ -155,7 +155,7 @@ msiexec /i <path-to-msi-file> --% ADDLOCAL=FirewallException APPLICATIONFOLDER="
On some older versions of Windows,
you may need to surround parameter values with double quotes to get the installation command parsing properly:
```powershell
msiexec /i C:\Users\Administrator\Downloads\windows_exporter.msi --% ENABLED_COLLECTORS="ad,iis,logon,memory,process,tcp,textfile,thermalzone" TEXTFILE_DIRS="C:\custom_metrics\"
msiexec /i C:\Users\Administrator\Downloads\windows_exporter.msi --% ENABLED_COLLECTORS="ad,iis,memory,process,tcp,textfile,thermalzone" TEXTFILE_DIRS="C:\custom_metrics\"
```
To install the exporter with creating a firewall exception, use the following command:
@@ -183,9 +183,6 @@ The windows_exporter can be run as a Docker container. The Docker image is avail
The Docker image is tagged with the version of the exporter. The `latest` tag is also available and points to the latest release.
Additionally, a flavor `hostprocess` with `-hostprocess` as suffix is based on the https://github.com/microsoft/windows-host-process-containers-base-image
which is designed to run as a Windows host process container. The size of that images is smaller than the default one.
## Kubernetes Implementation
See detailed steps to install on Windows Kubernetes [here](./kubernetes/kubernetes.md).
@@ -194,17 +191,7 @@ See detailed steps to install on Windows Kubernetes [here](./kubernetes/kubernet
`windows_exporter` supports Windows Server versions 2016 and later, and desktop Windows version 10 and 11 (21H2 or later).
Windows Server 2012 and 2012R2 are supported as best-effort only, but not guaranteed to work.
## Usage
go get -u github.com/prometheus/promu
go get -u github.com/prometheus-community/windows_exporter
cd $env:GOPATH/src/github.com/prometheus-community/windows_exporter
promu build -v
.\windows_exporter.exe
The prometheus metrics will be exposed on [localhost:9182](http://localhost:9182)
There are known compatibility issues with Windows Server 2012 R2 and earlier versions.
### HTTP Endpoints
@@ -214,18 +201,6 @@ windows_exporter provides the following HTTP endpoints:
* `/health`: Returns 200 OK when the exporter is running.
* `/debug/pprof/`: Exposes the [pprof](https://golang.org/pkg/net/http/pprof/) endpoints. Only, if `--debug.enabled` is set.
## Examples
### Enable only service collector and specify a custom query
.\windows_exporter.exe --collectors.enabled "service" --collector.service.include="windows_exporter"
### Enable only process collector and specify a custom query
.\windows_exporter.exe --collectors.enabled "process" --collector.process.include="firefox.+"
When there are multiple processes with the same name, WMI represents those after the first instance as `process-name#index`. So to get them all, rather than just the first one, the [regular expression](https://en.wikipedia.org/wiki/Regular_expression) must use `.+`. See [process](docs/collector.process.md) for more information.
### Using [defaults] with `--collectors.enabled` argument
Using `[defaults]` with `--collectors.enabled` argument which gets expanded with all default collectors.
@@ -238,10 +213,6 @@ This enables the additional process and container collectors on top of the defau
YAML configuration files can be specified with the `--config.file` flag. e.g. `.\windows_exporter.exe --config.file=config.yml`. If you are using the absolute path, make sure to quote the path, e.g. `.\windows_exporter.exe --config.file="C:\Program Files\windows_exporter\config.yml"`
It is also possible to load the configuration from a URL. e.g. `.\windows_exporter.exe --config.file="https://example.com/config.yml"`
If you need to skip TLS verification, you can use the `--config.file.insecure-skip-verify` flag. e.g. `.\windows_exporter.exe --config.file="https://example.com/config.yml" --config.file.insecure-skip-verify`
```yaml
collectors:
enabled: cpu,net,service
@@ -258,7 +229,7 @@ An example configuration file can be found [here](docs/example_config.yml).
Configuration file values can be mixed with CLI flags. E.G.
`.\windows_exporter.exe --collectors.enabled=cpu,logon`
`.\windows_exporter.exe --collectors.enabled=cpu`
```yaml
log:

View File

@@ -102,6 +102,7 @@ type windowsExporterService struct{}
// Execute is the entry point for the Windows service manager.
func (s *windowsExporterService) Execute(_ []string, r <-chan svc.ChangeRequest, changes chan<- svc.Status) (bool, uint32) {
changes <- svc.Status{State: svc.StartPending}
// Send a signal to the main function that the service is running.
changes <- svc.Status{State: svc.Running, Accepts: svc.AcceptStop | svc.AcceptShutdown}
for {
@@ -179,6 +180,7 @@ func logToFile(msg string) {
// https://github.com/DataDog/datadog-agent/blob/46740e82ef40a04c4be545ed8c16a4b0d1f046cf/pkg/util/winutil/servicemain/servicemain.go#L128
func isWindowsService() (bool, error) {
var currentProcess windows.PROCESS_BASIC_INFORMATION
infoSize := uint32(unsafe.Sizeof(currentProcess))
err := windows.NtQueryInformationProcess(windows.CurrentProcess(), windows.ProcessBasicInformation, unsafe.Pointer(&currentProcess), infoSize, &infoSize)

View File

@@ -89,6 +89,10 @@ func run(ctx context.Context, args []string) int {
"collectors.enabled",
"Comma-separated list of collectors to use. Use '[defaults]' as a placeholder for all the collectors enabled by default.").
Default(collector.DefaultCollectors).String()
disabledCollectors = app.Flag(
"collectors.disabled",
"Comma-separated list of collectors to exclude. Can be used to disable collector from the defaults.").
Default("").String()
timeoutMargin = app.Flag(
"scrape.timeout-margin",
"Seconds to subtract from the timeout allowed by the client. Tune to allow for overhead or high loads.",
@@ -166,6 +170,10 @@ func run(ctx context.Context, args []string) int {
return 1
}
if *disabledCollectors != "" {
collectors.Disable(slices.Compact(strings.Split(*disabledCollectors, ",")))
}
// Initialize collectors before loading
if err = collectors.Build(ctx, logger); err != nil {
for _, err := range utils.SplitError(err) {

View File

@@ -171,8 +171,10 @@ func waitUntilListening(tb testing.TB, network, address string) error {
err error
)
dialer := &net.Dialer{Timeout: 100 * time.Millisecond}
for range 20 {
conn, err = net.DialTimeout(network, address, 100*time.Millisecond)
conn, err = dialer.DialContext(tb.Context(), network, address)
if err == nil {
_ = conn.Close()
@@ -189,7 +191,6 @@ func waitUntilListening(tb testing.TB, network, address string) error {
}
var winErr windows.Errno
if errors.As(err, &winErr) {
return fmt.Errorf("listener not listening: %w (#%d)", winErr, uint32(winErr))
}

View File

@@ -1,8 +1,12 @@
# example configuration file for windows_exporter
collectors:
enabled: cpu,cpu_info,exchange,iis,logical_disk,logon,memory,net,os,performancecounter,process,remote_fx,service,system,tcp,time,terminal_services,textfile
enabled: cpu,cpu_info,exchange,iis,logical_disk,memory,net,os,performancecounter,process,remote_fx,service,system,tcp,time,terminal_services,textfile
collector:
textfile:
directories:
- 'C:\MyDir1'
- 'C:\MyDir2'
service:
include: "windows_exporter"
performancecounter:

13
dashboard/README.md Normal file
View File

@@ -0,0 +1,13 @@
## Sample dashboard for Windows Exporter
This sample dashboard is heavily inspired by [this dashboard in Chinese](https://grafana.com/grafana/dashboards/10467-windows-exporter-for-prometheus-dashboard-cn-v20230531/).
First row shows an Overview of your Windows landscape.
<br/>
![Screenshot of overview row.](dashboard-overview.png)
Second row provides resource details about specific Windows VM picked from the variables at the top.
<br/>
![Screenshot of resource details (part 1).](resource-details-part1.png)
<br/>
![Screenshot of resource details (part 2).](resource-details-part2.png)

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 649 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 511 KiB

File diff suppressed because it is too large Load Diff

View File

@@ -9,18 +9,17 @@ This directory contains documentation of the collectors in the windows_exporter,
- [`container`](collector.container.md)
- [`cpu`](collector.cpu.md)
- [`cpu_info`](collector.cpu_info.md)
- [`cs`](collector.cs.md)
- [`dfsr`](collector.dfsr.md)
- [`dhcp`](collector.dhcp.md)
- [`diskdrive`](collector.diskdrive.md)
- [`dns`](collector.dns.md)
- [`exchange`](collector.exchange.md)
- [`file`](collector.file.md)
- [`fsrmquota`](collector.fsrmquota.md)
- [`hyperv`](collector.hyperv.md)
- [`iis`](collector.iis.md)
- [`license`](collector.license.md)
- [`logical_disk`](collector.logical_disk.md)
- [`logon`](collector.logon.md)
- [`memory`](collector.memory.md)
- [`mscluster`](collector.mscluster.md)
- [`msmq`](collector.msmq.md)
@@ -44,7 +43,6 @@ This directory contains documentation of the collectors in the windows_exporter,
- [`tcp`](collector.tcp.md)
- [`terminal_services`](collector.terminal_services.md)
- [`textfile`](collector.textfile.md)
- [`thermalzone`](collector.thermalzone.md)
- [`time`](collector.time.md)
- [`udp`](collector.udp.md)
- [`update`](collector.update.md)

View File

@@ -48,6 +48,27 @@ Show per-cpu utilisation using the processor utility metrics
```
rate(windows_cpu_processor_utility_total{instance="localhost"}[5m]) / rate(windows_cpu_processor_rtc_total{instance="localhost"}[5m])
```
Show average CPU utilization percentage (like Windows Task Manager)
```
sum by (instance) (
clamp_max(
(
rate(windows_cpu_processor_utility_total{
job=~"$job",
}[1m])
/
rate(windows_cpu_processor_rtc_total{
job=~"$job",
}[1m])
), 100
)
) /
count by (instance) (
windows_cpu_processor_utility_total{
job=~"$job"
}
)
```
Show actual average CPU frequency in Hz
```
avg by(instance) (

View File

@@ -1,34 +0,0 @@
# cs collector
> [!CAUTION]
> This collector is deprecated and will be removed in a future release.
> See https://github.com/prometheus-community/windows_exporter/pull/1596 for more information.
The cs collector exposes metrics detailing the hardware of the computer system
|||
-|-
Metric name prefix | `cs`
Classes | [`Win32_ComputerSystem`](https://msdn.microsoft.com/en-us/library/aa394102)
Enabled by default? | Yes
## Flags
None
## Metrics
Name | Description | Type | Labels
-----|-------------|------|-------
`windows_cs_logical_processors` | Number of installed logical processors | gauge | None
`windows_cs_physical_memory_bytes` | Total installed physical memory | gauge | None
`windows_cs_hostname` | Labelled system hostname information | gauge | `hostname`, `domain`, `fqdn`
### Example metric
_This collector does not yet have explained examples, we would appreciate your help adding them!_
## Useful queries
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
## Alerting examples
_This collector does not yet have alerting examples, we would appreciate your help adding them!_

View File

@@ -30,7 +30,7 @@ groups:
rules:
- alert: Drive_Status
expr: windows_disk_drive_status{status="OK"} != 1
expr: windows_diskdrive_status{status="OK"} != 1
for: 10m
labels:
severity: high

View File

@@ -15,7 +15,7 @@ Enabled by default (error stats)? | Yes |
Name | Description
-----|------------
`collector.dns.enabled` | Comma-separated list of collectors to use. Available collectors: `metrics`, `error_stats`. Defaults to all collectors if not specified.
`collector.dns.enabled` | Comma-separated list of collectors to use. Available collectors: `metrics`, `wmi_stats`. Defaults to all collectors if not specified.
## Metrics
@@ -95,4 +95,4 @@ windows_dns_wmi_stats_total{collection_name="Error Stats",dns_server="EC2AMAZ-5N
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
## Alerting examples
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
_This collector does not yet have alerting examples, we would appreciate your help adding them!_

40
docs/collector.file.md Normal file
View File

@@ -0,0 +1,40 @@
# file collector
The file collector exposes modified timestamps and file size of files in the filesystem.
The collector
|||
-|-
Metric name prefix | `file`
Enabled by default? | No
## Flags
### `--collector.file.file-patterns`
Comma-separated list of file patterns. Each pattern is a glob pattern that can contain `*`, `?`, and `**` (recursive).
See https://github.com/bmatcuk/doublestar#patterns for an extended description of the pattern syntax.
## Metrics
| Name | Description | Type | Labels |
|----------------------------------------|------------------------|-------|--------|
| `windows_file_mtime_timestamp_seconds` | File modification time | gauge | `file` |
| `windows_file_size_bytes` | File size | gauge | `file` |
### Example metric
```
# HELP windows_file_mtime_timestamp_seconds File modification time
# TYPE windows_file_mtime_timestamp_seconds gauge
windows_file_mtime_timestamp_seconds{file="C:\\Users\\admin\\Desktop\\Dashboard.lnk"} 1.726434517e+09
# HELP windows_file_size_bytes File size
# TYPE windows_file_size_bytes gauge
windows_file_size_bytes{file="C:\\Users\\admin\\Desktop\\Dashboard.lnk"} 123
```
## Useful queries
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
## Alerting examples
_This collector does not yet have alerting examples, we would appreciate your help adding them!_

View File

@@ -1,36 +0,0 @@
# filetime collector
The filetime collector exposes modified timestamps of files in the filesystem.
The collector
|||
-|-
Metric name prefix | `filetime`
Enabled by default? | No
## Flags
### `--collectors.filetime.file-patterns`
Comma-separated list of file patterns. Each pattern is a glob pattern that can contain `*`, `?`, and `**` (recursive).
See https://github.com/bmatcuk/doublestar#patterns for an extended description of the pattern syntax.
## Metrics
Name | Description | Type | Labels
-----|-------------|------|-------
`windows_filetime_mtime_timestamp_seconds` | File modification time | gauge | `file`
### Example metric
```
# HELP windows_filetime_mtime_timestamp_seconds File modification time
# TYPE windows_filetime_mtime_timestamp_seconds gauge
windows_filetime_mtime_timestamp_seconds{file="C:\\Users\\admin\\Desktop\\Dashboard.lnk"} 1.726434517e+09
```
## Useful queries
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
## Alerting examples
_This collector does not yet have alerting examples, we would appreciate your help adding them!_

View File

@@ -20,29 +20,32 @@ These metrics are available on supported versions of Windows with compatible GPU
### Adapter-level Metrics
| Name | Description | Type | Labels |
|----------------------------------------------|-------------------------------------------------------------------------|-------|--------------------------------------------------------------------------------------|
| `windows_gpu_adapter_memory_committed_bytes` | Total committed GPU memory in bytes per physical GPU | gauge | `phys` |
| `windows_gpu_adapter_memory_dedicated_bytes` | Dedicated GPU memory usage in bytes per physical GPU | gauge | `phys` |
| `windows_gpu_adapter_memory_shared_bytes` | Shared GPU memory usage in bytes per physical GPU | gauge | `phys` |
| `windows_gpu_info` | A metric with a constant '1' value labeled with gpu device information. | gauge | `phys`, `physical_device_object_name`, `hardware_id`, `friendly_name`, `description` |
| `windows_gpu_local_adapter_memory_bytes` | Local adapter memory usage in bytes per physical GPU | gauge | `phys` |
| `windows_gpu_non_local_adapter_memory_bytes` | Non-local adapter memory usage in bytes per physical GPU | gauge | `phys` |
| Name | Description | Type | Labels |
|--------------------------------------------------|------------------------------------------------------------------------------------|-------|-----------------------------------------------------------------|
| `windows_gpu_info` | A metric with a constant '1' value labeled with gpu device information. | gauge | `bus_number`,`device_id`,`function_number`,`luid`,`name`,`phys` |
| `windows_gpu_dedicated_system_memory_size_bytes` | The size, in bytes, of memory that is dedicated from system memory. | gauge | `device_id`,`luid` |
| `windows_gpu_dedicated_video_memory_size_bytes` | The size, in bytes, of memory that is dedicated from video memory. | gauge | `device_id`,`luid` |
| `windows_gpu_shared_system_memory_size_bytes` | The size, in bytes, of memory from system memory that can be shared by many users. | gauge | `device_id`,`luid` |
| `windows_gpu_adapter_memory_committed_bytes` | Total committed GPU memory in bytes per physical GPU | gauge | `device_id`,`luid`,`phys` |
| `windows_gpu_adapter_memory_dedicated_bytes` | Dedicated GPU memory usage in bytes per physical GPU | gauge | `device_id`,`luid`,`phys` |
| `windows_gpu_adapter_memory_shared_bytes` | Shared GPU memory usage in bytes per physical GPU | gauge | `device_id`,`luid`,`phys` |
| `windows_gpu_local_adapter_memory_bytes` | Local adapter memory usage in bytes per physical GPU | gauge | `device_id`,`luid`,`phys`,`part` |
| `windows_gpu_non_local_adapter_memory_bytes` | Non-local adapter memory usage in bytes per physical GPU | gauge | `device_id`,`luid`,`phys`,`part` |
### Per-process Metrics
| Name | Description | Type | Labels |
|----------------------------------------------|-------------------------------------------------------------------------|---------|--------------------------------------------------------------------------------------|
| `windows_gpu_engine_time_seconds` | Total running time of the GPU engine in seconds | counter | `phys`, `eng`, `engtype`, `process_id` |
| `windows_gpu_process_memory_committed_bytes` | Total committed GPU memory in bytes per process | gauge | `phys`,`process_id` |
| `windows_gpu_process_memory_dedicated_bytes` | Dedicated GPU memory usage in bytes per process | gauge | `phys`,`process_id` |
| `windows_gpu_process_memory_local_bytes` | Local GPU memory usage in bytes per process | gauge | `phys`,`process_id` |
| `windows_gpu_process_memory_non_local_bytes` | Non-local GPU memory usage in bytes per process | gauge | `phys`,`process_id` |
| `windows_gpu_process_memory_shared_bytes` | Shared GPU memory usage in bytes per process | gauge | `phys`,`process_id` |
| Name | Description | Type | Labels |
|----------------------------------------------|-------------------------------------------------|---------|-----------------------------------------------------------|
| `windows_gpu_engine_time_seconds` | Total running time of the GPU engine in seconds | counter | `device_id`,`luid`,`phys`, `eng`, `engtype`, `process_id` |
| `windows_gpu_process_memory_committed_bytes` | Total committed GPU memory in bytes per process | gauge | `device_id`,`luid`,`phys`,`process_id` |
| `windows_gpu_process_memory_dedicated_bytes` | Dedicated GPU memory usage in bytes per process | gauge | `device_id`,`luid`,`phys`,`process_id` |
| `windows_gpu_process_memory_local_bytes` | Local GPU memory usage in bytes per process | gauge | `device_id`,`luid`,`phys`,`process_id` |
| `windows_gpu_process_memory_non_local_bytes` | Non-local GPU memory usage in bytes per process | gauge | `device_id`,`luid`,`phys`,`process_id` |
| `windows_gpu_process_memory_shared_bytes` | Shared GPU memory usage in bytes per process | gauge | `device_id`,`luid`,`phys`,`process_id` |
## Metric Labels
* `phys`: Physical GPU index (e.g., "0")
* `luid`,`phys`: Physical GPU index (e.g., "0")
* `eng`: GPU engine index (e.g., "0", "1", ...)
* `engtype`: GPU engine type (e.g., "3D", "Copy", "VideoDecode", etc.)
* `process_id`: Process ID
@@ -54,7 +57,7 @@ These are basic queries to help you get started with GPU monitoring on Windows u
**Show GPU information for a specific physical GPU (0):**
```promql
windows_gpu_info{description="NVIDIA GeForce GTX 1070",friendly_name="",hardware_id="PCI\\VEN_10DE&DEV_1B81&SUBSYS_61733842&REV_A1",phys="0",physical_device_object_name="\\Device\\NTPNP_PCI0027"} 1
windows_gpu_info{bus_number="8",device_id="PCI\\VEN_10DE&DEV_1B81&SUBSYS_61733842&REV_A1",function_number="0",luid="0x00000000_0x00010F8A",name="NVIDIA GeForce GTX 1070",phys="0"} 1
```
**Show total dedicated GPU memory (in bytes) usage on GPU 0:**

View File

@@ -130,6 +130,10 @@ If given, an application needs to *not* match the exclude regexp in order for th
| `windows_iis_server_output_cache_hits_total` | Total number of successful lookups in output cache (since service startup) | counter | None |
| `windows_iis_server_output_cache_items_flushed_total` | Total number of items flushed from output cache (since service startup) | counter | None |
| `windows_iis_server_output_cache_flushes_total` | Total number of flushes of output cache (since service startup) | counter | None |
| `windows_iis_http_requests_current_queue_size` | Http Request Current queue size | counter | None |
| `windows_iis_http_request_total_rejected_request` | Http Request total rejected request | counter | None |
| `windows_iis_http_requests_max_queue_item_age` | Http Request Max queue Item age | counter | None |
| `windows_iis_http_requests_arrival_rate` | Http requests Arrival Rate | counter | None |
### Example metric
_This collector does not yet have explained examples, we would appreciate your help adding them!_

View File

@@ -2,12 +2,12 @@
The logical_disk collector exposes metrics about logical disks (in contrast to physical disks)
|||
-|-
Metric name prefix | `logical_disk`
Data source | Perflib
Counters | `LogicalDisk` ([`Win32_PerfRawData_PerfDisk_LogicalDisk`](https://msdn.microsoft.com/en-us/windows/hardware/aa394307(v=vs.71)))
Enabled by default? | Yes
| | |
|---------------------|------------------|
| Metric name prefix | `logical_disk` |
| Data source | Performance Data |
| Counters | `LogicalDisk` |
| Enabled by default? | Yes |
## Flags
@@ -19,25 +19,30 @@ If given, a disk needs to match the include regexp in order for the correspondin
If given, a disk needs to *not* match the exclude regexp in order for the corresponding disk metrics to be reported
### `--collector.logical_disk.enabled`
Comma-separated list of collectors to use. Available collectors: metrics, bitlocker_status. Defaults to metrics, if not specified.
## Metrics
Name | Description | Type | Labels
-----|-------------|------|-------
`windows_logical_disk_info` | A metric with a constant '1' value labeled with logical disk information | gauge | `disk`,`filesystem`,`serial_number`,`volume`,`volume_name`,`type`
`windows_logical_disk_requests_queued` | Number of requests outstanding on the disk at the time the performance data is collected | gauge | `volume`
`windows_logical_disk_avg_read_requests_queued` | Average number of read requests that were queued for the selected disk during the sample interval | gauge | `volume`
`windows_logical_disk_avg_write_requests_queued` | Average number of write requests that were queued for the selected disk during the sample interval | gauge | `volume`
`windows_logical_disk_read_bytes_total` | Rate at which bytes are transferred from the disk during read operations | counter | `volume`
`windows_logical_disk_reads_total` | Rate of read operations on the disk | counter | `volume`
`windows_logical_disk_write_bytes_total` | Rate at which bytes are transferred to the disk during write operations | counter | `volume`
`windows_logical_disk_writes_total` | Rate of write operations on the disk | counter | `volume`
`windows_logical_disk_read_seconds_total` | Seconds the disk was busy servicing read requests | counter | `volume`
`windows_logical_disk_write_seconds_total` | Seconds the disk was busy servicing write requests | counter | `volume`
`windows_logical_disk_free_bytes` | Unused space of the disk in bytes (not real time, updates every 10-15 min) | gauge | `volume`
`windows_logical_disk_size_bytes` | Total size of the disk in bytes (not real time, updates every 10-15 min) | gauge | `volume`
`windows_logical_disk_idle_seconds_total` | Seconds the disk was idle (not servicing read/write requests) | counter | `volume`
`windows_logical_disk_split_ios_total` | Number of I/Os to the disk split into multiple I/Os | counter | `volume`
`windows_logical_disk_readonly` | Whether the logical disk is read-only | gauge | `volume`
| Name | Description | Type | Labels |
|--------------------------------------------------|----------------------------------------------------------------------------------------------------|---------|-------------------------------------------------------------------|
| `windows_logical_disk_info` | A metric with a constant '1' value labeled with logical disk information | gauge | `disk`,`filesystem`,`serial_number`,`volume`,`volume_name`,`type` |
| `windows_logical_disk_requests_queued` | Number of requests outstanding on the disk at the time the performance data is collected | gauge | `volume` |
| `windows_logical_disk_avg_read_requests_queued` | Average number of read requests that were queued for the selected disk during the sample interval | gauge | `volume` |
| `windows_logical_disk_avg_write_requests_queued` | Average number of write requests that were queued for the selected disk during the sample interval | gauge | `volume` |
| `windows_logical_disk_read_bytes_total` | Rate at which bytes are transferred from the disk during read operations | counter | `volume` |
| `windows_logical_disk_reads_total` | Rate of read operations on the disk | counter | `volume` |
| `windows_logical_disk_write_bytes_total` | Rate at which bytes are transferred to the disk during write operations | counter | `volume` |
| `windows_logical_disk_writes_total` | Rate of write operations on the disk | counter | `volume` |
| `windows_logical_disk_read_seconds_total` | Seconds the disk was busy servicing read requests | counter | `volume` |
| `windows_logical_disk_write_seconds_total` | Seconds the disk was busy servicing write requests | counter | `volume` |
| `windows_logical_disk_free_bytes` | Unused space of the disk in bytes (not real time, updates every 10-15 min) | gauge | `volume` |
| `windows_logical_disk_size_bytes` | Total size of the disk in bytes (not real time, updates every 10-15 min) | gauge | `volume` |
| `windows_logical_disk_idle_seconds_total` | Seconds the disk was idle (not servicing read/write requests) | counter | `volume` |
| `windows_logical_disk_split_ios_total` | Number of I/Os to the disk split into multiple I/Os | counter | `volume` |
| `windows_logical_disk_readonly` | Whether the logical disk is read-only | gauge | `volume` |
| `windows_logical_disk_bitlocker_status` | BitLocker status for the logical disk | gauge | `volume`,`status` |
### Warning about size metrics
The `free_bytes` and `size_bytes` metrics are not updated in real time and might have a delay of 10-15min.

View File

@@ -109,7 +109,7 @@ Matching is case-sensitive.
| `mscluster_network_Role` | Provides access to the network's Role property. The Role property describes the role of the network in the cluster. 0: None; 1: Cluster; 2: Client; 3: Both | gauge | `name` |
| `mscluster_network_State` | Provides the current state of the network. 1-1: Unknown; 0: Unavailable; 1: Down; 2: Partitioned; 3: Up | gauge | `name` |
### Network
### Node
| Name | Description | Type | Labels |
|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------|--------|

View File

@@ -72,18 +72,18 @@ Comma-separated list of collectors to use. Defaults to all, if not specified.
| Name | Description | Type | Labels |
|----------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|-----------|
| `windows_netframework_clrmemory_allocated_bytes_total` | Displays the total number of bytes allocated on the garbage collection heap. | counter | `process` |
| `windows_netframework_clrmemory_finalization_survivors` | Displays the number of garbage-collected objects that survive a collection because they are waiting to be finalized. | gauge | `process` |
| `windows_netframework_clrmemory_heap_size_bytes` | Displays the maximum bytes that can be allocated; it does not indicate the current number of bytes allocated. | gauge | `process` |
| `windows_netframework_clrmemory_promoted_bytes` | Displays the bytes that were promoted from the generation to the next one during the last GC. Memory is promoted when it survives a garbage collection. | gauge | `process` |
| `windows_netframework_clrmemory_number_gc_handles` | Displays the current number of garbage collection handles in use. Garbage collection handles are handles to resources external to the common language runtime and the managed environment. | gauge | `process` |
| `windows_netframework_clrmemory_collections_total` | Displays the number of times the generation objects are garbage collected since the application started. | counter | `process` |
| `windows_netframework_clrmemory_induced_gc_total` | Displays the peak number of times garbage collection was performed because of an explicit call to GC.Collect. | counter | `process` |
| `windows_netframework_clrmemory_number_pinned_objects` | Displays the number of pinned objects encountered in the last garbage collection. | gauge | `process` |
| `windows_netframework_clrmemory_number_sink_blocksinuse` | Displays the current number of synchronization blocks in use. Synchronization blocks are per-object data structures allocated for storing synchronization information. They hold weak references to managed objects and must be scanned by the garbage collector. | gauge | `process` |
| `windows_netframework_clrmemory_committed_bytes` | Displays the amount of virtual memory, in bytes, currently committed by the garbage collector. Committed memory is the physical memory for which space has been reserved in the disk paging file. | gauge | `process` |
| `windows_netframework_clrmemory_reserved_bytes` | Displays the amount of virtual memory, in bytes, currently reserved by the garbage collector. Reserved memory is the virtual memory space reserved for the application when no disk or main memory pages have been used. | gauge | `process` |
| `windows_netframework_clrmemory_gc_time_percent` | Displays the percentage of time that was spent performing a garbage collection in the last sample. | gauge | `process` |
| `windows_netframework_clrmemory_allocated_bytes_total` | Displays the total number of bytes allocated on the garbage collection heap. | counter | `process`, `process_id` |
| `windows_netframework_clrmemory_finalization_survivors` | Displays the number of garbage-collected objects that survive a collection because they are waiting to be finalized. | gauge | `process`, `process_id` |
| `windows_netframework_clrmemory_heap_size_bytes` | Displays the maximum bytes that can be allocated; it does not indicate the current number of bytes allocated. | gauge | `process`, `process_id` |
| `windows_netframework_clrmemory_promoted_bytes` | Displays the bytes that were promoted from the generation to the next one during the last GC. Memory is promoted when it survives a garbage collection. | gauge | `process`, `process_id` |
| `windows_netframework_clrmemory_number_gc_handles` | Displays the current number of garbage collection handles in use. Garbage collection handles are handles to resources external to the common language runtime and the managed environment. | gauge | `process`, `process_id` |
| `windows_netframework_clrmemory_collections_total` | Displays the number of times the generation objects are garbage collected since the application started. | counter | `process`, `process_id` |
| `windows_netframework_clrmemory_induced_gc_total` | Displays the peak number of times garbage collection was performed because of an explicit call to GC.Collect. | counter | `process`, `process_id` |
| `windows_netframework_clrmemory_number_pinned_objects` | Displays the number of pinned objects encountered in the last garbage collection. | gauge | `process`, `process_id` |
| `windows_netframework_clrmemory_number_sink_blocksinuse` | Displays the current number of synchronization blocks in use. Synchronization blocks are per-object data structures allocated for storing synchronization information. They hold weak references to managed objects and must be scanned by the garbage collector. | gauge | `process`, `process_id` |
| `windows_netframework_clrmemory_committed_bytes` | Displays the amount of virtual memory, in bytes, currently committed by the garbage collector. Committed memory is the physical memory for which space has been reserved in the disk paging file. | gauge | `process`, `process_id` |
| `windows_netframework_clrmemory_reserved_bytes` | Displays the amount of virtual memory, in bytes, currently reserved by the garbage collector. Reserved memory is the virtual memory space reserved for the application when no disk or main memory pages have been used. | gauge | `process`, `process_id` |
| `windows_netframework_clrmemory_gc_time_percent` | Displays the percentage of time that was spent performing a garbage collection in the last sample. | gauge | `process`, `process_id` |
### CLR Remoting

View File

@@ -14,10 +14,11 @@ None
## Metrics
| Name | Description | Type | Labels |
|-----------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------|-------|------------------------------------------------------------------------|
| `windows_os_hostname` | Labelled system hostname information as provided by ComputerSystem.DNSHostName and ComputerSystem.Domain | gauge | `domain`, `fqdn`, `hostname` |
| `windows_os_info` | Contains full product name & version in labels. Note that the `major_version` for Windows 11 is "10"; a build number greater than 22000 represents Windows 11. | gauge | `product`, `version`, `major_version`, `minor_version`, `build_number` |
| Name | Description | Type | Labels |
|----------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------|-------|-----------------------------------------------------------------------------------------------------------------|
| `windows_os_hostname` | Labelled system hostname information as provided by ComputerSystem.DNSHostName and ComputerSystem.Domain | gauge | `domain`, `fqdn`, `hostname` |
| `windows_os_info` | Contains full product name & version in labels. Note that the `major_version` for Windows 11 is "10"; a build number greater than 22000 represents Windows 11. | gauge | `product`, `version`, `major_version`, `minor_version`, `build_number`, `revision`, `installation_type` |
| `windows_os_install_time_timestamp_seconds` | Unix timestamp of OS installation time | gauge | None |
### Example metric
@@ -27,7 +28,10 @@ None
windows_os_hostname{domain="",fqdn="PC",hostname="PC"} 1
# HELP windows_os_info Contains full product name & version in labels. Note that the "major_version" for Windows 11 is \\"10\\"; a build number greater than 22000 represents Windows 11.
# TYPE windows_os_info gauge
windows_os_info{build_number="19045",major_version="10",minor_version="0",product="Windows 10 Pro",revision="4842",version="10.0.19045"} 1
windows_os_info{build_number="19045",installation_type="Client",major_version="10",minor_version="0",product="Windows 10 Pro",revision="4842",version="10.0.19045"} 1
# HELP windows_os_install_time_timestamp_seconds Unix timestamp of OS installation time
# TYPE windows_os_install_time_timestamp_seconds gauge
windows_os_install_time_timestamp_seconds 1.6725312e+09
```
## Useful queries

View File

@@ -260,3 +260,27 @@ collector:
The perfdata collector returns metrics based on the user configuration.
The metrics are named based on the object name and the counter name.
The instance name is added as a label to the metric.
# Examples
## thermalzone collector
```yaml
collector:
performancecounter:
objects: |-
- name: thermalzone
object: "Thermal Zone Information"
instances: ["*"]
type: formatted
counters:
- name: "Temperature"
type: "gauge"
metric: windows_thermalzone_percent_passive_limit
- name: "% Passive Limit"
type: "gauge"
metric: windows_thermalzone_temperature_celsius
- name: "Throttle Reasons"
type: "gauge"
metric: windows_thermalzone_throttle_reasons
```

View File

@@ -42,6 +42,11 @@ Disabled by default, and can be enabled with `--collector.process.iis`. NOTE: Ju
Version of the process collector to use. 1 for Process V1, 2 for Process V2.
Defaults to 0 which will use the latest version available.
### `--collector.process.cmdline`
Enables the `cmdline` label for the process metrics.
This label contains the command line used to start the process.
Enabled by default, and can be turned off with `--no-collector.process.cmdline`.
### Example
To match all firefox processes: `--collector.process.include="firefox.*"`.

View File

@@ -10,13 +10,8 @@ Enabled by default? | No
## Flags
### `--collector.textfile.directory`
:warning: DEPRECATED Use `--collector.textfile.directories`
<br>
### `--collector.textfile.directories`
One or multiple directories containing the files to be ingested.
One or multiple directories containing the files to be ingested.
E.G. `--collector.textfile.directories="C:\MyDir1,C:\MyDir2"`

View File

@@ -1,32 +0,0 @@
# thermalzone collector
The thermalzone collector exposes metrics about system temps. Note that temperature is given in Kelvin
|||
-|-
Metric name prefix | `thermalzone`
Classes | [`Win32_PerfRawData_Counters_ThermalZoneInformation`](https://wutils.com/wmi/root/cimv2/win32_perfrawdata_counters_thermalzoneinformation/#temperature_properties)
Enabled by default? | No
## Flags
None
## Metrics
Name | Description | Type | Labels
-----|-------------|------|-------
`windows_thermalzone_percent_passive_limit` | % Passive Limit is the current limit this thermal zone is placing on the devices it controls. A limit of 100% indicates the devices are unconstrained. A limit of 0% indicates the devices are fully constrained. | gauge | None
`windows_thermalzone_temperature_celsius ` | Temperature of the thermal zone, in degrees Celsius. | gauge | None
`windows_thermalzone_throttle_reasons ` | Throttle Reasons indicate reasons why the thermal zone is limiting performance of the devices it controls. 0x0 - The zone is not throttled. 0x1 - The zone is throttled for thermal reasons. 0x2 - The zone is throttled to limit electrical current. | gauge | None
[`Throttle reasons` source](https://docs.microsoft.com/en-us/windows-hardware/design/device-experiences/examples--requirements-and-diagnostics)
### Example metric
_This collector does not yet have explained examples, we would appreciate your help adding them!_
## Useful queries
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
## Alerting examples
_This collector does not yet have alerting examples, we would appreciate your help adding them!_

View File

@@ -30,7 +30,7 @@ Matching is case-sensitive.
| `windows_time_ntp_round_trip_delay_seconds` | Total roundtrip delay experienced by the NTP client in receiving a response from the server for the most recent request, in seconds. This is the time elapsed on the NTP client between transmitting a request to the NTP server and receiving a valid response from the server. | gauge | None |
| `windows_time_ntp_server_outgoing_responses_total` | Total number of requests responded to by the NTP server. | counter | None |
| `windows_time_ntp_server_incoming_requests_total` | Total number of requests received by the NTP server. | counter | None |
| `windows_time_current_timestamp_seconds` | Current time as reported by the operating system, in [Unix time](https://en.wikipedia.org/wiki/Unix_time). See [time.Unix()](https://golang.org/pkg/time/#Unix) for details | gauge | None |
| `windows_time_current_timestamp_seconds` | Current time as reported by the operating system, in [Unix time](https://en.wikipedia.org/wiki/Unix_time). See [time.UnixMicro()](https://golang.org/pkg/time/#UnixMicro) for details | gauge | None |
| `windows_time_timezone` | Current timezone as reported by the operating system. | gauge | `timezone` |
| `windows_time_clock_sync_source` | This value reflects the sync source of the system clock. | gauge | `type` |

View File

@@ -14,14 +14,11 @@ The Windows Update service is responsible for managing the installation of updat
## Flags
> [!NOTE]
> The collector name used in the CLI flags is `updates`, while the metric prefix is `update`. This naming mismatch is known and intentional for compatibility reasons.
### `--collector.update.online`
Whether to search for updates online. If set to `false` via `--no-collector.update.online`, the collector will only list updates that are already found by the Windows Update service.
Set to `true` via `--collector.update.online` to search for updates online, which will take longer to complete.
### `--collector.updates.online`
Whether to search for updates online. If set to `false`, the collector will only list updates that are already found by the Windows Update service.
Set to `true` to search for updates online, which will take longer to complete.
### `--collector.updates.scrape-interval`
### `--collector.update.scrape-interval`
Define the interval of scraping Windows Update information
## Metrics

View File

@@ -1,7 +1,7 @@
---
# Note this is not an exhaustive list of all configuration values
collectors:
enabled: cpu,cs,logical_disk,net,os,service,system
enabled: cpu,logical_disk,net,os,service,system
collector:
service:
include: "windows_exporter"
@@ -13,6 +13,5 @@ scrape:
timeout-margin: 0.5
telemetry:
path: /metrics
max-requests: 5
web:
listen-address: ":9182"

38
go.mod
View File

@@ -1,40 +1,44 @@
module github.com/prometheus-community/windows_exporter
go 1.24
go 1.25
require (
github.com/alecthomas/kingpin/v2 v2.4.0
github.com/bmatcuk/doublestar/v4 v4.8.1
github.com/bmatcuk/doublestar/v4 v4.10.0
github.com/dimchansky/utfbom v1.1.1
github.com/go-ole/go-ole v1.3.0
github.com/prometheus/client_golang v1.22.0
github.com/prometheus/client_golang v1.23.2
github.com/prometheus/client_model v0.6.2
github.com/prometheus/common v0.64.0
github.com/prometheus/exporter-toolkit v0.14.0
github.com/stretchr/testify v1.10.0
golang.org/x/sys v0.33.0
gopkg.in/yaml.v3 v3.0.1
github.com/prometheus/common v0.67.5
github.com/prometheus/exporter-toolkit v0.15.1
github.com/stretchr/testify v1.11.1
go.yaml.in/yaml/v3 v3.0.4
golang.org/x/sys v0.40.0
)
require (
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/coreos/go-systemd/v22 v22.7.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/golang-jwt/jwt/v5 v5.3.1 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/mdlayher/socket v0.5.1 // indirect
github.com/mdlayher/vsock v1.2.1 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/procfs v0.16.1 // indirect
github.com/prometheus/procfs v0.19.2 // indirect
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
golang.org/x/crypto v0.38.0 // indirect
golang.org/x/net v0.40.0 // indirect
golang.org/x/oauth2 v0.30.0 // indirect
golang.org/x/sync v0.14.0 // indirect
golang.org/x/text v0.25.0 // indirect
google.golang.org/protobuf v1.36.6 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect
golang.org/x/crypto v0.47.0 // indirect
golang.org/x/net v0.49.0 // indirect
golang.org/x/oauth2 v0.34.0 // indirect
golang.org/x/sync v0.19.0 // indirect
golang.org/x/text v0.33.0 // indirect
golang.org/x/time v0.14.0 // indirect
google.golang.org/protobuf v1.36.11 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

71
go.sum
View File

@@ -4,12 +4,12 @@ github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vS
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bmatcuk/doublestar/v4 v4.8.1 h1:54Bopc5c2cAvhLRAzqOGCYHYyhcDHsFF4wWIR5wKP38=
github.com/bmatcuk/doublestar/v4 v4.8.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
github.com/bmatcuk/doublestar/v4 v4.10.0 h1:zU9WiOla1YA122oLM6i4EXvGW62DvKZVxIe6TYWexEs=
github.com/bmatcuk/doublestar/v4 v4.10.0/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/go-systemd/v22 v22.7.0 h1:LAEzFkke61DFROc7zNLX/WA2i5J8gYqe0rSj9KI28KA=
github.com/coreos/go-systemd/v22 v22.7.0/go.mod h1:xNUYtjHu2EDXbsxz1i41wouACIwT7Ybq9o0BQhMwD0w=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -17,9 +17,12 @@ github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi
github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/golang-jwt/jwt/v5 v5.3.1 h1:kYf81DTWFe7t+1VvL7eS+jKFVWaUnK9cB1qbwn63YCY=
github.com/golang-jwt/jwt/v5 v5.3.1/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
@@ -41,16 +44,16 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4=
github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
github.com/prometheus/exporter-toolkit v0.14.0 h1:NMlswfibpcZZ+H0sZBiTjrA3/aBFHkNZqE+iCj5EmRg=
github.com/prometheus/exporter-toolkit v0.14.0/go.mod h1:Gu5LnVvt7Nr/oqTBUC23WILZepW0nffNo10XdhQcwWA=
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4=
github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw=
github.com/prometheus/exporter-toolkit v0.15.1 h1:XrGGr/qWl8Gd+pqJqTkNLww9eG8vR/CoRk0FubOKfLE=
github.com/prometheus/exporter-toolkit v0.15.1/go.mod h1:P/NR9qFRGbCFgpklyhix9F6v6fFr/VQB/CVsrMDGKo4=
github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws=
github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@@ -61,30 +64,36 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc=
github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8=
golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw=
golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ=
golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4=
golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA=
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@@ -21,6 +21,14 @@
<DirectoryRef Id="APPLICATIONFOLDER">
<Component Transitive="yes">
<File Id="windows_exporter.exe" Name="windows_exporter.exe" Source="Work\windows_exporter.exe" KeyPath="yes" Vital="yes" Checksum="yes"/>
<!-- The "Name" field must match the argument to eventlog.Open() -->
<util:EventSource Log="Application" Name="windows_exporter"
EventMessageFile="%SystemRoot%\System32\EventCreate.exe"
SupportsErrors="yes"
SupportsInformationals="yes"
SupportsWarnings="yes"/>
<ServiceInstall
Id="InstallExporterService"
Name="windows_exporter"
@@ -45,13 +53,8 @@
/>
<ServiceDependency Id="wmiApSrv" />
</ServiceInstall>
<ServiceControl Id="ServiceStateControl" Name="windows_exporter" Remove="uninstall" Start="install" Stop="both"/>
<!-- The "Name" field must match the argument to eventlog.Open() -->
<util:EventSource Log="Application" Name="windows_exporter"
EventMessageFile="%SystemRoot%\System32\EventCreate.exe"
SupportsErrors="yes"
SupportsInformationals="yes"
SupportsWarnings="yes"/>
<ServiceControl Id="StartService" Name="windows_exporter" Start="install" Wait="no" />
<ServiceControl Id="StopService" Name="windows_exporter" Remove="uninstall" Stop="both" Wait="yes" />
</Component>
<Component Id="CreateTextfileDirectory" Directory="textfile_inputs" Guid="d03ef58a-9cbf-4165-ad39-d143e9b27e14">
<CreateFolder />

BIN
installer/icon.ico Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 108 KiB

View File

@@ -45,6 +45,45 @@
Property="OLDERVERSIONBEINGUPGRADED" />
</Upgrade>
<Media Id="1" Cabinet="windows_exporter.cab" EmbedCab="yes" />
<MajorUpgrade Schedule="afterInstallInitialize" DowngradeErrorMessage="A later version of [ProductName] is already installed. Setup will now exit." AllowSameVersionUpgrades="yes" />
<Property Id="ENABLED_COLLECTORS" Secure="yes" />
<SetProperty Id="CollectorsFlag" After="InstallFiles" Sequence="execute" Value="--collectors.enabled [ENABLED_COLLECTORS]" Condition="ENABLED_COLLECTORS" />
<Property Id="EXTRA_FLAGS" Secure="yes" />
<SetProperty Id="ExtraFlags" After="InstallFiles" Sequence="execute" Value="[EXTRA_FLAGS]" Condition="EXTRA_FLAGS" />
<Property Id="CONFIG_FILE" Secure="yes" Value="config.yaml" />
<SetProperty Id="ConfigFile_NonDefault" After="InstallFiles" Sequence="execute" Value="[CONFIG_FILE]" Condition="CONFIG_FILE AND CONFIG_FILE&lt;&gt;&quot;config.yaml&quot;" />
<SetProperty Id="ConfigFile_Default" After="InstallFiles" Sequence="execute" Value="[APPLICATIONFOLDER]config.yaml" Condition="CONFIG_FILE=&quot;config.yaml&quot;" />
<SetProperty Id="ConfigFileFlag" After="InstallFiles" Sequence="execute" Value="--config.file=&quot;[ConfigFile_NonDefault][ConfigFile_Default]&quot;" Condition="ConfigFile_NonDefault OR ConfigFile_Default" />
<Property Id="LISTEN_PORT" Secure="yes" Value="9182" />
<SetProperty Id="ListenFlag" After="InstallFiles" Sequence="execute" Value="--web.listen-address=&quot;[LISTEN_ADDR]:[LISTEN_PORT]&quot;" Condition="LISTEN_ADDR&lt;&gt;&quot;&quot; OR LISTEN_PORT&lt;&gt;9182" />
<Property Id="METRICS_PATH" Secure="yes" />
<SetProperty Id="MetricsPathFlag" After="InstallFiles" Sequence="execute" Value="--telemetry.path=&quot;[METRICS_PATH]&quot;" Condition="METRICS_PATH" />
<Property Id="REMOTE_ADDR" Secure="yes" />
<SetProperty Id="RemoteAddressFlag" After="InstallFiles" Sequence="execute" Value="[REMOTE_ADDR]" Condition="REMOTE_ADDR" />
<Property Id="TEXTFILE_DIRS" Secure="yes" />
<SetProperty Id="TextfileDirsFlag" After="InstallFiles" Sequence="execute" Value="--collector.textfile.directories=&quot;[TEXTFILE_DIRS]&quot;" Condition="TEXTFILE_DIRS" />
<!-- Configuration for how the installer shows in Add/Remove Programs. -->
<Icon Id="icon.ico" SourceFile=".\icon.ico"/>
<Property Id="ARPPRODUCTICON" Value="icon.ico" />
<Property Id="ARPHELPLINK" Value="https://github.com/prometheus-community/windows_exporter/issues" />
<Property Id="ARPURLINFOABOUT" Value="https://github.com/prometheus-community/windows_exporter" />
<Property Id="ARPCOMMENTS" Value="Prometheus exporter for Windows machines" />
<!-- Disable the repair option; the functionality is available through Change instead. -->
<Property Id="ARPNOREPAIR" Value="1" />
<Property Id="ARPSIZE" Value="10000" />
<Property Id="START_MENU_FOLDER" Value="0" />
<Property Id="NOSTART" Value="0" />
<CustomAction Id="CheckExtraFlags"
Error="The parameter '--config.file' must not be included in EXTRA_FLAGS. Use CONFIG_FILE instead. Please remove it and try again." />
@@ -93,6 +132,23 @@
/>
<!-- END CUSTOM ACTION FOR KILLING THE PROCESS -->
<!-- START CUSTOM ACTION FOR SET SERVICE FAILUREFLAG -->
<SetProperty
Id="ConfigureServiceRecovery"
Value="&quot;[WindowsFolder]System32\sc.exe&quot; failureflag &quot;windows_exporter&quot; 1"
Before="ConfigureServiceRecovery"
Sequence="execute"
/>
<CustomAction
Id="ConfigureServiceRecovery"
BinaryRef="Wix4UtilCA_$(sys.BUILDARCHSHORT)"
DllEntry="WixQuietExec"
Execute="deferred"
Return="ignore"
Impersonate="no"
/>
<!-- END CUSTOM ACTION FFOR SET SERVICE FAILUREFLAG -->
<InstallExecuteSequence>
<!-- Set REINSTALL=all and REINSTALLMODE=amus if the user reruns the
MSI, which will force reinstalling all files and services. -->
@@ -100,47 +156,14 @@
Condition="Installed AND (NOT REMOVE) AND (NOT UPGRADINGPRODUCTCODE)"/>
<Custom Action="set_reinstall_all_property" Before="set_reinstallmode_property" Condition="MAINTENANCE"/>
<Custom Action="set_reinstallmode_property" Before="LaunchConditions" Condition="MAINTENANCE"/>
<Custom Action="CreateConfigFile" Before="InstallServices" Condition="ConfigFile_NonDefault OR ConfigFile_Default" />
<Custom Action="CreateConfigFile" Before="InstallServices" Condition="ConfigFile_Default" />
<Custom Action="ConfigureServiceRecovery" After="InstallServices" Condition="NOT REMOVE" />
<Custom Action="KillProcess" Before="RemoveFiles" />
<Custom Action="CheckExtraFlags" Before="InstallInitialize"
Condition="EXTRA_FLAGS AND (EXTRA_FLAGS&gt;&lt;&quot;--config.file&quot;)" />
</InstallExecuteSequence>
<Media Id="1" Cabinet="windows_exporter.cab" EmbedCab="yes" />
<MajorUpgrade Schedule="afterInstallInitialize" DowngradeErrorMessage="A later version of [ProductName] is already installed. Setup will now exit." AllowSameVersionUpgrades="yes" />
<Property Id="ENABLED_COLLECTORS" Secure="yes" />
<SetProperty Id="CollectorsFlag" After="InstallFiles" Sequence="execute" Value="--collectors.enabled [ENABLED_COLLECTORS]" Condition="ENABLED_COLLECTORS" />
<Property Id="EXTRA_FLAGS" Secure="yes" />
<SetProperty Id="ExtraFlags" After="InstallFiles" Sequence="execute" Value="[EXTRA_FLAGS]" Condition="EXTRA_FLAGS" />
<Property Id="CONFIG_FILE" Secure="yes" Value="config.yaml" />
<SetProperty Id="ConfigFile_NonDefault" After="InstallFiles" Sequence="execute" Value="[CONFIG_FILE]" Condition="CONFIG_FILE AND CONFIG_FILE&lt;&gt;&quot;config.yaml&quot;" />
<SetProperty Id="ConfigFile_Default" After="InstallFiles" Sequence="execute" Value="[APPLICATIONFOLDER]config.yaml" Condition="CONFIG_FILE=&quot;config.yaml&quot;" />
<SetProperty Id="ConfigFileFlag" After="InstallFiles" Sequence="execute" Value="--config.file=&quot;[ConfigFile_NonDefault][ConfigFile_Default]&quot;" Condition="ConfigFile_NonDefault OR ConfigFile_Default" />
<Property Id="LISTEN_PORT" Secure="yes" Value="9182" />
<SetProperty Id="ListenFlag" After="InstallFiles" Sequence="execute" Value="--web.listen-address [LISTEN_ADDR]:[LISTEN_PORT]" Condition="LISTEN_ADDR&lt;&gt;&quot;&quot; OR LISTEN_PORT&lt;&gt;9182" />
<Property Id="METRICS_PATH" Secure="yes" />
<SetProperty Id="MetricsPathFlag" After="InstallFiles" Sequence="execute" Value="--telemetry.path [METRICS_PATH]" Condition="METRICS_PATH" />
<Property Id="REMOTE_ADDR" Secure="yes" />
<SetProperty Id="RemoteAddressFlag" After="InstallFiles" Sequence="execute" Value="[REMOTE_ADDR]" Condition="REMOTE_ADDR" />
<Property Id="TEXTFILE_DIRS" Secure="yes" />
<SetProperty Id="TextfileDirsFlag" After="InstallFiles" Sequence="execute" Value="--collector.textfile.directories [TEXTFILE_DIRS]" Condition="TEXTFILE_DIRS" />
<Property Id="ARPHELPLINK" Value="https://github.com/prometheus-community/windows_exporter/issues" />
<Property Id="ARPSIZE" Value="9000" />
<Property Id="ARPURLINFOABOUT" Value="https://github.com/prometheus-community/windows_exporter" />
<!--<Property Id="ARPNOMODIFY" Value="0" />-->
<!--<Property Id="ARPNOREPAIR" Value="1" />-->
<Property Id="START_MENU_FOLDER" Value="0" />
<Property Id="NOSTART" Value="0" />
<Feature
Id="DefaultFeature"
Level="1"
@@ -178,7 +201,7 @@
<Control Id="Title" Type="Text" X="15" Y="6" Width="210" Height="15" Transparent="yes" NoPrefix="yes" Text="{\WixUI_Font_Title}windows_exporter configuration" />
<!-- Edit box for property input -->
<!-- cpu,cs,logical_disk,physical_disk,net,os,service,system -->
<!-- cpu,logical_disk,physical_disk,net,os,service,system -->
<Control Id="PropertyEdit_ENABLED_COLLECTORS_Title1" Type="Text" X="25" Y="55" Width="300" Height="15" Transparent="yes" NoPrefix="yes" Text="Comma-separated list of collectors to use. Use '[\[]defaults[\]]' as a placeholder for all" />
<Control Id="PropertyEdit_ENABLED_COLLECTORS_Title2" Type="Text" X="25" Y="65" Width="300" Height="15" Transparent="yes" NoPrefix="yes" Text="the collectors enabled by default. If value is empty, the exporter default will be used." />
<Control Id="PropertyEdit_ENABLED_COLLECTORS" Type="Edit" X="24" Y="77" Width="300" Height="18" Property="ENABLED_COLLECTORS" Text="[ENABLED_COLLECTORS]" Indirect="no" />

View File

@@ -131,7 +131,7 @@ func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
c.addressBookOperationsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "address_book_operations_total"),
"",
@@ -508,7 +508,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "DirectoryServices", pdh.InstancesAll)
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](logger.With(slog.String("collector", Name)), pdh.CounterTypeRaw, "DirectoryServices", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create DirectoryServices collector: %w", err)
}
@@ -522,6 +522,8 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
err := c.perfDataCollector.Collect(&c.perfDataObject)
if err != nil {
return fmt.Errorf("failed to collect DirectoryServices (AD) metrics: %w", err)
} else if len(c.perfDataObject) == 0 {
return fmt.Errorf("failed to collect DirectoryServices (AD) metrics: %w", types.ErrNoDataUnexpected)
}
ch <- prometheus.MustNewConstMetric(
@@ -530,30 +532,35 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
c.perfDataObject[0].AbANRPerSec,
"ambiguous_name_resolution",
)
ch <- prometheus.MustNewConstMetric(
c.addressBookOperationsTotal,
prometheus.CounterValue,
c.perfDataObject[0].AbBrowsesPerSec,
"browse",
)
ch <- prometheus.MustNewConstMetric(
c.addressBookOperationsTotal,
prometheus.CounterValue,
c.perfDataObject[0].AbMatchesPerSec,
"find",
)
ch <- prometheus.MustNewConstMetric(
c.addressBookOperationsTotal,
prometheus.CounterValue,
c.perfDataObject[0].AbPropertyReadsPerSec,
"property_read",
)
ch <- prometheus.MustNewConstMetric(
c.addressBookOperationsTotal,
prometheus.CounterValue,
c.perfDataObject[0].AbSearchesPerSec,
"search",
)
ch <- prometheus.MustNewConstMetric(
c.addressBookOperationsTotal,
prometheus.CounterValue,
@@ -578,22 +585,26 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
prometheus.GaugeValue,
c.perfDataObject[0].AtqEstimatedQueueDelay/1000,
)
ch <- prometheus.MustNewConstMetric(
c.atqOutstandingRequests,
prometheus.GaugeValue,
c.perfDataObject[0].AtqOutstandingQueuedRequests,
)
ch <- prometheus.MustNewConstMetric(
c.atqAverageRequestLatency,
prometheus.GaugeValue,
c.perfDataObject[0].AtqRequestLatency,
)
ch <- prometheus.MustNewConstMetric(
c.atqCurrentThreads,
prometheus.GaugeValue,
c.perfDataObject[0].AtqThreadsLDAP,
"ldap",
)
ch <- prometheus.MustNewConstMetric(
c.atqCurrentThreads,
prometheus.GaugeValue,
@@ -607,12 +618,14 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
c.perfDataObject[0].BaseSearchesPerSec,
"base",
)
ch <- prometheus.MustNewConstMetric(
c.searchesTotal,
prometheus.CounterValue,
c.perfDataObject[0].SubtreeSearchesPerSec,
"subtree",
)
ch <- prometheus.MustNewConstMetric(
c.searchesTotal,
prometheus.CounterValue,
@@ -626,18 +639,21 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
c.perfDataObject[0].DatabaseAddsPerSec,
"add",
)
ch <- prometheus.MustNewConstMetric(
c.databaseOperationsTotal,
prometheus.CounterValue,
c.perfDataObject[0].DatabaseDeletesPerSec,
"delete",
)
ch <- prometheus.MustNewConstMetric(
c.databaseOperationsTotal,
prometheus.CounterValue,
c.perfDataObject[0].DatabaseModifiesPerSec,
"modify",
)
ch <- prometheus.MustNewConstMetric(
c.databaseOperationsTotal,
prometheus.CounterValue,
@@ -651,48 +667,56 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
c.perfDataObject[0].DigestBindsPerSec,
"digest",
)
ch <- prometheus.MustNewConstMetric(
c.bindsTotal,
prometheus.CounterValue,
c.perfDataObject[0].DsClientBindsPerSec,
"ds_client",
)
ch <- prometheus.MustNewConstMetric(
c.bindsTotal,
prometheus.CounterValue,
c.perfDataObject[0].DsServerBindsPerSec,
"ds_server",
)
ch <- prometheus.MustNewConstMetric(
c.bindsTotal,
prometheus.CounterValue,
c.perfDataObject[0].ExternalBindsPerSec,
"external",
)
ch <- prometheus.MustNewConstMetric(
c.bindsTotal,
prometheus.CounterValue,
c.perfDataObject[0].FastBindsPerSec,
"fast",
)
ch <- prometheus.MustNewConstMetric(
c.bindsTotal,
prometheus.CounterValue,
c.perfDataObject[0].NegotiatedBindsPerSec,
"negotiate",
)
ch <- prometheus.MustNewConstMetric(
c.bindsTotal,
prometheus.CounterValue,
c.perfDataObject[0].NTLMBindsPerSec,
"ntlm",
)
ch <- prometheus.MustNewConstMetric(
c.bindsTotal,
prometheus.CounterValue,
c.perfDataObject[0].SimpleBindsPerSec,
"simple",
)
ch <- prometheus.MustNewConstMetric(
c.bindsTotal,
prometheus.CounterValue,
@@ -706,6 +730,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
float64(uint64(c.perfDataObject[0].DRAHighestUSNCommittedHighPart)<<32)+c.perfDataObject[0].DRAHighestUSNCommittedLowPart,
"committed",
)
ch <- prometheus.MustNewConstMetric(
c.replicationHighestUsn,
prometheus.CounterValue,
@@ -744,6 +769,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
c.perfDataObject[0].DRAInboundBytesNotCompressedWithinSitePerSec,
"inbound",
)
ch <- prometheus.MustNewConstMetric(
c.intraSiteReplicationDataBytesTotal,
prometheus.CounterValue,
@@ -768,6 +794,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
prometheus.CounterValue,
c.perfDataObject[0].DRAInboundObjectsAppliedPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.replicationInboundObjectsFilteredTotal,
prometheus.CounterValue,
@@ -779,6 +806,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
prometheus.CounterValue,
c.perfDataObject[0].DRAInboundPropertiesAppliedPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.replicationInboundPropertiesFilteredTotal,
prometheus.CounterValue,
@@ -790,6 +818,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
prometheus.GaugeValue,
c.perfDataObject[0].DRAPendingReplicationOperations,
)
ch <- prometheus.MustNewConstMetric(
c.replicationPendingSynchronizations,
prometheus.GaugeValue,
@@ -801,11 +830,13 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
prometheus.CounterValue,
c.perfDataObject[0].DRASyncRequestsMade,
)
ch <- prometheus.MustNewConstMetric(
c.replicationSyncRequestsSuccessTotal,
prometheus.CounterValue,
c.perfDataObject[0].DRASyncRequestsSuccessful,
)
ch <- prometheus.MustNewConstMetric(
c.replicationSyncRequestsSchemaMismatchFailureTotal,
prometheus.CounterValue,
@@ -818,6 +849,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
c.perfDataObject[0].DsClientNameTranslationsPerSec,
"client",
)
ch <- prometheus.MustNewConstMetric(
c.nameTranslationsTotal,
prometheus.CounterValue,
@@ -830,6 +862,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
prometheus.GaugeValue,
c.perfDataObject[0].DsMonitorListSize,
)
ch <- prometheus.MustNewConstMetric(
c.changeMonitorUpdatesPending,
prometheus.GaugeValue,
@@ -841,6 +874,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
prometheus.CounterValue,
c.perfDataObject[0].DsNameCacheHitRate,
)
ch <- prometheus.MustNewConstMetric(
c.nameCacheLookupsTotal,
prometheus.CounterValue,
@@ -854,6 +888,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
"read",
"replication_agent",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
@@ -861,6 +896,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
"read",
"knowledge_consistency_checker",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
@@ -868,6 +904,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
"read",
"local_security_authority",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
@@ -875,6 +912,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
"read",
"name_service_provider_interface",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
@@ -882,6 +920,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
"read",
"directory_service_api",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
@@ -889,6 +928,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
"read",
"security_account_manager",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
@@ -896,6 +936,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
"read",
"other",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
@@ -903,6 +944,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
"search",
"replication_agent",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
@@ -910,6 +952,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
"search",
"knowledge_consistency_checker",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
@@ -917,6 +960,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
"search",
"ldap",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
@@ -924,6 +968,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
"search",
"local_security_authority",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
@@ -931,6 +976,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
"search",
"name_service_provider_interface",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
@@ -938,6 +984,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
"search",
"directory_service_api",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
@@ -945,6 +992,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
"search",
"security_account_manager",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
@@ -952,6 +1000,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
"search",
"other",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
@@ -959,6 +1008,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
"write",
"replication_agent",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
@@ -966,6 +1016,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
"write",
"knowledge_consistency_checker",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
@@ -973,6 +1024,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
"write",
"ldap",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
@@ -980,6 +1032,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
"write",
"local_security_authority",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
@@ -987,6 +1040,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
"write",
"name_service_provider_interface",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
@@ -994,6 +1048,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
"write",
"directory_service_api",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
@@ -1001,6 +1056,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
"write",
"security_account_manager",
)
ch <- prometheus.MustNewConstMetric(
c.directoryOperationsTotal,
prometheus.CounterValue,
@@ -1020,16 +1076,19 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
prometheus.CounterValue,
c.perfDataObject[0].DsSecurityDescriptorSubOperationsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.securityDescriptorPropagationEventsQueued,
prometheus.GaugeValue,
c.perfDataObject[0].DsSecurityDescriptorPropagationsEvents,
)
ch <- prometheus.MustNewConstMetric(
c.securityDescriptorPropagationAccessWaitTotalSeconds,
prometheus.GaugeValue,
c.perfDataObject[0].DsSecurityDescriptorPropagatorAverageExclusionTime,
)
ch <- prometheus.MustNewConstMetric(
c.securityDescriptorPropagationItemsQueuedTotal,
prometheus.CounterValue,
@@ -1047,12 +1106,14 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
prometheus.CounterValue,
c.perfDataObject[0].LdapClosedConnectionsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.ldapOpenedConnectionsTotal,
prometheus.CounterValue,
c.perfDataObject[0].LdapNewConnectionsPerSec,
"ldap",
)
ch <- prometheus.MustNewConstMetric(
c.ldapOpenedConnectionsTotal,
prometheus.CounterValue,
@@ -1083,11 +1144,13 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
prometheus.CounterValue,
c.perfDataObject[0].LdapUDPOperationsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.ldapWritesTotal,
prometheus.CounterValue,
c.perfDataObject[0].LdapWritesPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.ldapClientSessions,
prometheus.GaugeValue,
@@ -1105,6 +1168,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
prometheus.CounterValue,
c.perfDataObject[0].PhantomsCleanedPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.phantomObjectsVisitedTotal,
prometheus.CounterValue,
@@ -1117,18 +1181,21 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
c.perfDataObject[0].SamGlobalGroupMembershipEvaluationsPerSec,
"global",
)
ch <- prometheus.MustNewConstMetric(
c.samGroupMembershipEvaluationsTotal,
prometheus.CounterValue,
c.perfDataObject[0].SamDomainLocalGroupMembershipEvaluationsPerSec,
"domain_local",
)
ch <- prometheus.MustNewConstMetric(
c.samGroupMembershipEvaluationsTotal,
prometheus.CounterValue,
c.perfDataObject[0].SamUniversalGroupMembershipEvaluationsPerSec,
"universal",
)
ch <- prometheus.MustNewConstMetric(
c.samGroupMembershipGlobalCatalogEvaluationsTotal,
prometheus.CounterValue,
@@ -1140,6 +1207,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
prometheus.CounterValue,
c.perfDataObject[0].SamNonTransitiveMembershipEvaluationsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.samGroupMembershipEvaluationsTransitiveTotal,
prometheus.CounterValue,
@@ -1152,6 +1220,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
c.perfDataObject[0].SamAccountGroupEvaluationLatency,
"account_group",
)
ch <- prometheus.MustNewConstMetric(
c.samGroupEvaluationLatency,
prometheus.GaugeValue,
@@ -1164,6 +1233,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
prometheus.CounterValue,
c.perfDataObject[0].SamSuccessfulComputerCreationsPerSecIncludesAllRequests,
)
ch <- prometheus.MustNewConstMetric(
c.samComputerCreationSuccessfulRequestsTotal,
prometheus.CounterValue,
@@ -1175,6 +1245,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
prometheus.CounterValue,
c.perfDataObject[0].SamUserCreationAttemptsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.samUserCreationSuccessfulRequestsTotal,
prometheus.CounterValue,
@@ -1186,6 +1257,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
prometheus.CounterValue,
c.perfDataObject[0].SamDisplayInformationQueriesPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.samEnumerationsTotal,
prometheus.CounterValue,
@@ -1209,6 +1281,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
prometheus.CounterValue,
c.perfDataObject[0].TombstonesGarbageCollectedPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.tombstonesObjectsVisitedTotal,
prometheus.CounterValue,

View File

@@ -83,7 +83,7 @@ func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
c.requestsPerSecond = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "requests_total"),
"Total certificate requests processed",
@@ -165,7 +165,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Certification Authority", pdh.InstancesAll)
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](logger.With(slog.String("collector", Name)), pdh.CounterTypeRaw, "Certification Authority", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Certification Authority collector: %w", err)
}
@@ -186,72 +186,84 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
data.RequestsPerSecond,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.requestProcessingTime,
prometheus.GaugeValue,
utils.MilliSecToSec(data.RequestProcessingTime),
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.retrievalsPerSecond,
prometheus.CounterValue,
data.RetrievalsPerSecond,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.retrievalProcessingTime,
prometheus.GaugeValue,
utils.MilliSecToSec(data.RetrievalProcessingTime),
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.failedRequestsPerSecond,
prometheus.CounterValue,
data.FailedRequestsPerSecond,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.issuedRequestsPerSecond,
prometheus.CounterValue,
data.IssuedRequestsPerSecond,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.pendingRequestsPerSecond,
prometheus.CounterValue,
data.PendingRequestsPerSecond,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.requestCryptographicSigningTime,
prometheus.GaugeValue,
utils.MilliSecToSec(data.RequestCryptographicSigningTime),
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.requestPolicyModuleProcessingTime,
prometheus.GaugeValue,
utils.MilliSecToSec(data.RequestPolicyModuleProcessingTime),
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.challengeResponsesPerSecond,
prometheus.CounterValue,
data.ChallengeResponsesPerSecond,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.challengeResponseProcessingTime,
prometheus.GaugeValue,
utils.MilliSecToSec(data.ChallengeResponseProcessingTime),
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.signedCertificateTimestampListsPerSecond,
prometheus.CounterValue,
data.SignedCertificateTimestampListsPerSecond,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.signedCertificateTimestampListProcessingTime,
prometheus.GaugeValue,

View File

@@ -113,7 +113,7 @@ func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
c.adLoginConnectionFailures = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "ad_login_connection_failures_total"),
"Total number of connection failures to an Active Directory domain controller",
@@ -375,7 +375,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "AD FS", nil)
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](logger.With(slog.String("collector", Name)), pdh.CounterTypeRaw, "AD FS", nil)
if err != nil {
return fmt.Errorf("failed to create AD FS collector: %w", err)
}
@@ -387,6 +387,8 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
err := c.perfDataCollector.Collect(&c.perfDataObject)
if err != nil {
return fmt.Errorf("failed to collect ADFS metrics: %w", err)
} else if len(c.perfDataObject) == 0 {
return fmt.Errorf("failed to collect ADFS metrics: %w", types.ErrNoDataUnexpected)
}
ch <- prometheus.MustNewConstMetric(

View File

@@ -99,7 +99,7 @@ func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
c.asyncCopyReadsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "async_copy_reads_total"),
"(AsyncCopyReadsTotal)",
@@ -277,7 +277,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Cache", pdh.InstancesAll)
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](logger.With(slog.String("collector", Name)), pdh.CounterTypeRaw, "Cache", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Cache collector: %w", err)
}
@@ -290,6 +290,8 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
err := c.perfDataCollector.Collect(&c.perfDataObject)
if err != nil {
return fmt.Errorf("failed to collect Cache metrics: %w", err)
} else if len(c.perfDataObject) == 0 {
return fmt.Errorf("failed to collect Cache metrics: %w", types.ErrNoDataUnexpected)
}
ch <- prometheus.MustNewConstMetric(

View File

@@ -29,10 +29,8 @@ import (
"unsafe"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/headers/guid"
"github.com/prometheus-community/windows_exporter/internal/headers/hcn"
"github.com/prometheus-community/windows_exporter/internal/headers/hcs"
"github.com/prometheus-community/windows_exporter/internal/headers/iphlpapi"
"github.com/prometheus-community/windows_exporter/internal/headers/kernel32"
"github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/pdh"
@@ -437,6 +435,7 @@ func (c *Collector) collectHCSContainer(ch chan<- prometheus.Metric, containerDe
1,
containerInfo.id, containerInfo.namespace, containerInfo.pod, containerInfo.container, "false",
)
ch <- prometheus.MustNewConstMetric(
c.usageCommitBytes,
prometheus.GaugeValue,
@@ -444,6 +443,7 @@ func (c *Collector) collectHCSContainer(ch chan<- prometheus.Metric, containerDe
containerInfo.id, containerInfo.namespace, containerInfo.pod, containerInfo.container,
)
ch <- prometheus.MustNewConstMetric(
c.usageCommitPeakBytes,
prometheus.GaugeValue,
@@ -451,6 +451,7 @@ func (c *Collector) collectHCSContainer(ch chan<- prometheus.Metric, containerDe
containerInfo.id, containerInfo.namespace, containerInfo.pod, containerInfo.container,
)
ch <- prometheus.MustNewConstMetric(
c.usagePrivateWorkingSetBytes,
prometheus.GaugeValue,
@@ -458,6 +459,7 @@ func (c *Collector) collectHCSContainer(ch chan<- prometheus.Metric, containerDe
containerInfo.id, containerInfo.namespace, containerInfo.pod, containerInfo.container,
)
ch <- prometheus.MustNewConstMetric(
c.runtimeTotal,
prometheus.CounterValue,
@@ -465,6 +467,7 @@ func (c *Collector) collectHCSContainer(ch chan<- prometheus.Metric, containerDe
containerInfo.id, containerInfo.namespace, containerInfo.pod, containerInfo.container,
)
ch <- prometheus.MustNewConstMetric(
c.runtimeUser,
prometheus.CounterValue,
@@ -472,6 +475,7 @@ func (c *Collector) collectHCSContainer(ch chan<- prometheus.Metric, containerDe
containerInfo.id, containerInfo.namespace, containerInfo.pod, containerInfo.container,
)
ch <- prometheus.MustNewConstMetric(
c.runtimeKernel,
prometheus.CounterValue,
@@ -479,6 +483,7 @@ func (c *Collector) collectHCSContainer(ch chan<- prometheus.Metric, containerDe
containerInfo.id, containerInfo.namespace, containerInfo.pod, containerInfo.container,
)
ch <- prometheus.MustNewConstMetric(
c.readCountNormalized,
prometheus.CounterValue,
@@ -486,6 +491,7 @@ func (c *Collector) collectHCSContainer(ch chan<- prometheus.Metric, containerDe
containerInfo.id, containerInfo.namespace, containerInfo.pod, containerInfo.container,
)
ch <- prometheus.MustNewConstMetric(
c.readSizeBytes,
prometheus.CounterValue,
@@ -493,6 +499,7 @@ func (c *Collector) collectHCSContainer(ch chan<- prometheus.Metric, containerDe
containerInfo.id, containerInfo.namespace, containerInfo.pod, containerInfo.container,
)
ch <- prometheus.MustNewConstMetric(
c.writeCountNormalized,
prometheus.CounterValue,
@@ -500,6 +507,7 @@ func (c *Collector) collectHCSContainer(ch chan<- prometheus.Metric, containerDe
containerInfo.id, containerInfo.namespace, containerInfo.pod, containerInfo.container,
)
ch <- prometheus.MustNewConstMetric(
c.writeSizeBytes,
prometheus.CounterValue,
@@ -513,7 +521,7 @@ func (c *Collector) collectHCSContainer(ch chan<- prometheus.Metric, containerDe
// collectNetworkMetrics collects network metrics for containers.
func (c *Collector) collectNetworkMetrics(ch chan<- prometheus.Metric) error {
endpoints, err := hcn.EnumerateEndpoints()
endpoints, err := hcn.ListEndpoints()
if err != nil {
return fmt.Errorf("error in fetching HCN endpoints: %w", err)
}
@@ -523,56 +531,24 @@ func (c *Collector) collectNetworkMetrics(ch chan<- prometheus.Metric) error {
}
for _, endpoint := range endpoints {
properties, err := hcn.GetEndpointProperties(endpoint)
if len(endpoint.SharedContainers) == 0 {
continue
}
endpointStats, err := hcn.GetHNSEndpointStats(endpoint.ID)
if err != nil {
c.logger.Warn("Failed to collect properties for interface "+endpoint.String(),
c.logger.Warn("Failed to collect network stats for interface "+endpoint.ID,
slog.Any("err", err),
)
continue
}
if len(properties.SharedContainers) == 0 {
continue
}
var nicGUID *guid.GUID
for _, allocator := range properties.Resources.Allocators {
if allocator.AdapterNetCfgInstanceId != nil {
nicGUID = allocator.AdapterNetCfgInstanceId
break
}
}
if nicGUID == nil {
c.logger.Warn("Failed to get nic GUID for endpoint " + endpoint.String())
continue
}
luid, err := iphlpapi.ConvertInterfaceGUIDToLUID(*nicGUID)
if err != nil {
return fmt.Errorf("error in converting interface GUID to LUID: %w", err)
}
var endpointStats iphlpapi.MIB_IF_ROW2
endpointStats.InterfaceLuid = luid
if err := iphlpapi.GetIfEntry2Ex(&endpointStats); err != nil {
c.logger.Warn("Failed to get interface entry for endpoint "+endpoint.String(),
slog.Any("err", err),
)
continue
}
for _, containerId := range properties.SharedContainers {
for _, containerId := range endpoint.SharedContainers {
containerInfo, ok := c.annotationsCacheHCS[containerId]
if !ok {
c.logger.Debug("Unknown container " + containerId + " for endpoint " + endpoint.String())
c.logger.Debug("Unknown container " + containerId + " for endpoint " + endpoint.ID)
continue
}
@@ -582,43 +558,47 @@ func (c *Collector) collectNetworkMetrics(ch chan<- prometheus.Metric) error {
continue
}
endpointId := strings.ToUpper(endpoint.String())
endpointId := strings.ToUpper(endpoint.ID)
ch <- prometheus.MustNewConstMetric(
c.bytesReceived,
prometheus.CounterValue,
float64(endpointStats.InOctets),
float64(endpointStats.BytesReceived),
containerInfo.id, containerInfo.namespace, containerInfo.pod, containerInfo.container, endpointId,
)
ch <- prometheus.MustNewConstMetric(
c.bytesSent,
prometheus.CounterValue,
float64(endpointStats.OutOctets),
float64(endpointStats.BytesSent),
containerInfo.id, containerInfo.namespace, containerInfo.pod, containerInfo.container, endpointId,
)
ch <- prometheus.MustNewConstMetric(
c.packetsReceived,
prometheus.CounterValue,
float64(endpointStats.InUcastPkts+endpointStats.InNUcastPkts),
float64(endpointStats.PacketsReceived),
containerInfo.id, containerInfo.namespace, containerInfo.pod, containerInfo.container, endpointId,
)
ch <- prometheus.MustNewConstMetric(
c.packetsSent,
prometheus.CounterValue,
float64(endpointStats.OutUcastPkts+endpointStats.OutNUcastPkts),
float64(endpointStats.PacketsSent),
containerInfo.id, containerInfo.namespace, containerInfo.pod, containerInfo.container, endpointId,
)
ch <- prometheus.MustNewConstMetric(
c.droppedPacketsIncoming,
prometheus.CounterValue,
float64(endpointStats.InDiscards+endpointStats.InErrors),
float64(endpointStats.DroppedPacketsIncoming),
containerInfo.id, containerInfo.namespace, containerInfo.pod, containerInfo.container, endpointId,
)
ch <- prometheus.MustNewConstMetric(
c.droppedPacketsOutgoing,
prometheus.CounterValue,
float64(endpointStats.OutDiscards+endpointStats.OutErrors),
float64(endpointStats.DroppedPacketsOutgoing),
containerInfo.id, containerInfo.namespace, containerInfo.pod, containerInfo.container, endpointId,
)
}
@@ -768,6 +748,7 @@ func (c *Collector) collectJobContainer(ch chan<- prometheus.Metric, containerID
1,
containerInfo.id, containerInfo.namespace, containerInfo.pod, containerInfo.container, "true",
)
ch <- prometheus.MustNewConstMetric(
c.usageCommitBytes,
prometheus.GaugeValue,
@@ -775,6 +756,7 @@ func (c *Collector) collectJobContainer(ch chan<- prometheus.Metric, containerID
containerInfo.id, containerInfo.namespace, containerInfo.pod, containerInfo.container,
)
ch <- prometheus.MustNewConstMetric(
c.usageCommitPeakBytes,
prometheus.GaugeValue,
@@ -782,6 +764,7 @@ func (c *Collector) collectJobContainer(ch chan<- prometheus.Metric, containerID
containerInfo.id, containerInfo.namespace, containerInfo.pod, containerInfo.container,
)
ch <- prometheus.MustNewConstMetric(
c.usagePrivateWorkingSetBytes,
prometheus.GaugeValue,
@@ -789,6 +772,7 @@ func (c *Collector) collectJobContainer(ch chan<- prometheus.Metric, containerID
containerInfo.id, containerInfo.namespace, containerInfo.pod, containerInfo.container,
)
ch <- prometheus.MustNewConstMetric(
c.runtimeTotal,
prometheus.CounterValue,
@@ -796,6 +780,7 @@ func (c *Collector) collectJobContainer(ch chan<- prometheus.Metric, containerID
containerInfo.id, containerInfo.namespace, containerInfo.pod, containerInfo.container,
)
ch <- prometheus.MustNewConstMetric(
c.runtimeUser,
prometheus.CounterValue,
@@ -803,6 +788,7 @@ func (c *Collector) collectJobContainer(ch chan<- prometheus.Metric, containerID
containerInfo.id, containerInfo.namespace, containerInfo.pod, containerInfo.container,
)
ch <- prometheus.MustNewConstMetric(
c.runtimeKernel,
prometheus.CounterValue,
@@ -810,6 +796,7 @@ func (c *Collector) collectJobContainer(ch chan<- prometheus.Metric, containerID
containerInfo.id, containerInfo.namespace, containerInfo.pod, containerInfo.container,
)
ch <- prometheus.MustNewConstMetric(
c.readCountNormalized,
prometheus.CounterValue,
@@ -817,6 +804,7 @@ func (c *Collector) collectJobContainer(ch chan<- prometheus.Metric, containerID
containerInfo.id, containerInfo.namespace, containerInfo.pod, containerInfo.container,
)
ch <- prometheus.MustNewConstMetric(
c.readSizeBytes,
prometheus.CounterValue,
@@ -824,6 +812,7 @@ func (c *Collector) collectJobContainer(ch chan<- prometheus.Metric, containerID
containerInfo.id, containerInfo.namespace, containerInfo.pod, containerInfo.container,
)
ch <- prometheus.MustNewConstMetric(
c.writeCountNormalized,
prometheus.CounterValue,
@@ -831,6 +820,7 @@ func (c *Collector) collectJobContainer(ch chan<- prometheus.Metric, containerID
containerInfo.id, containerInfo.namespace, containerInfo.pod, containerInfo.container,
)
ch <- prometheus.MustNewConstMetric(
c.writeSizeBytes,
prometheus.CounterValue,

View File

@@ -90,7 +90,7 @@ func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
c.mu = sync.Mutex{}
c.logicalProcessors = prometheus.NewDesc(
@@ -183,7 +183,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Processor Information", pdh.InstancesAll)
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](logger.With(slog.String("collector", Name)), pdh.CounterTypeRaw, "Processor Information", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Processor Information collector: %w", err)
}
@@ -234,12 +234,14 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
coreData.C1TimeSeconds,
core, "c1",
)
ch <- prometheus.MustNewConstMetric(
c.cStateSecondsTotal,
prometheus.CounterValue,
coreData.C2TimeSeconds,
core, "c2",
)
ch <- prometheus.MustNewConstMetric(
c.cStateSecondsTotal,
prometheus.CounterValue,
@@ -253,24 +255,28 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
coreData.IdleTimeSeconds,
core, "idle",
)
ch <- prometheus.MustNewConstMetric(
c.timeTotal,
prometheus.CounterValue,
coreData.InterruptTimeSeconds,
core, "interrupt",
)
ch <- prometheus.MustNewConstMetric(
c.timeTotal,
prometheus.CounterValue,
coreData.DpcTimeSeconds,
core, "dpc",
)
ch <- prometheus.MustNewConstMetric(
c.timeTotal,
prometheus.CounterValue,
coreData.PrivilegedTimeSeconds,
core, "privileged",
)
ch <- prometheus.MustNewConstMetric(
c.timeTotal,
prometheus.CounterValue,
@@ -284,18 +290,21 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
coreData.InterruptsTotal,
core,
)
ch <- prometheus.MustNewConstMetric(
c.dpcsTotal,
prometheus.CounterValue,
coreData.DpcQueuedPerSecond,
core,
)
ch <- prometheus.MustNewConstMetric(
c.clockInterruptsTotal,
prometheus.CounterValue,
coreData.ClockInterruptsTotal,
core,
)
ch <- prometheus.MustNewConstMetric(
c.idleBreakEventsTotal,
prometheus.CounterValue,
@@ -316,30 +325,35 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
coreData.ProcessorFrequencyMHz,
core,
)
ch <- prometheus.MustNewConstMetric(
c.processorPerformance,
prometheus.CounterValue,
coreData.ProcessorPerformance,
core,
)
ch <- prometheus.MustNewConstMetric(
c.processorMPerf,
prometheus.CounterValue,
counterProcessorMPerfValues.Value(),
core,
)
ch <- prometheus.MustNewConstMetric(
c.processorRTC,
prometheus.CounterValue,
counterProcessorRTCValues.Value(),
core,
)
ch <- prometheus.MustNewConstMetric(
c.processorUtility,
prometheus.CounterValue,
coreData.ProcessorUtilityRate,
core,
)
ch <- prometheus.MustNewConstMetric(
c.processorPrivilegedUtility,
prometheus.CounterValue,

View File

@@ -194,36 +194,42 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
strconv.Itoa(int(processor.Family)),
strings.TrimRight(processor.Name, " "),
)
ch <- prometheus.MustNewConstMetric(
c.cpuCoreCount,
prometheus.GaugeValue,
float64(processor.NumberOfCores),
strings.TrimRight(processor.DeviceID, " "),
)
ch <- prometheus.MustNewConstMetric(
c.cpuEnabledCoreCount,
prometheus.GaugeValue,
float64(processor.NumberOfEnabledCore),
strings.TrimRight(processor.DeviceID, " "),
)
ch <- prometheus.MustNewConstMetric(
c.cpuLogicalProcessorsCount,
prometheus.GaugeValue,
float64(processor.NumberOfLogicalProcessors),
strings.TrimRight(processor.DeviceID, " "),
)
ch <- prometheus.MustNewConstMetric(
c.cpuThreadCount,
prometheus.GaugeValue,
float64(processor.ThreadCount),
strings.TrimRight(processor.DeviceID, " "),
)
ch <- prometheus.MustNewConstMetric(
c.cpuL2CacheSize,
prometheus.GaugeValue,
float64(processor.L2CacheSize),
strings.TrimRight(processor.DeviceID, " "),
)
ch <- prometheus.MustNewConstMetric(
c.cpuL3CacheSize,
prometheus.GaugeValue,

View File

@@ -1,157 +0,0 @@
// SPDX-License-Identifier: Apache-2.0
//
// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build windows
package cs
import (
"log/slog"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/headers/sysinfoapi"
"github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
const Name = "cs"
type Config struct{}
//nolint:gochecknoglobals
var ConfigDefaults = Config{}
// A Collector is a Prometheus Collector for WMI metrics.
type Collector struct {
config Config
// physicalMemoryBytes
// Deprecated: Use windows_memory_physical_total_bytes instead
physicalMemoryBytes *prometheus.Desc
// logicalProcessors
// Deprecated: Use windows_cpu_logical_processor instead
logicalProcessors *prometheus.Desc
// hostname
// Deprecated: Use windows_os_hostname instead
hostname *prometheus.Desc
}
func New(config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
}
c := &Collector{
config: *config,
}
return c
}
func NewWithFlags(_ *kingpin.Application) *Collector {
return &Collector{}
}
func (c *Collector) GetName() string {
return Name
}
func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
logger.Warn("The cs collector is deprecated and will be removed in a future release. " +
"Logical processors has been moved to cpu_info collector. " +
"Physical memory has been moved to memory collector. " +
"Hostname has been moved to os collector.")
c.logicalProcessors = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "logical_processors"),
"Deprecated: Use windows_cpu_logical_processor instead",
nil,
nil,
)
c.physicalMemoryBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "physical_memory_bytes"),
"Deprecated: Use windows_memory_physical_total_bytes instead",
nil,
nil,
)
c.hostname = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "hostname"),
"Deprecated: Use windows_os_hostname instead",
[]string{
"hostname",
"domain",
"fqdn",
},
nil,
)
return nil
}
// Collect sends the metric values for each metric
// to the provided prometheus Metric channel.
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
// Get systeminfo for number of processors
systemInfo := sysinfoapi.GetSystemInfo()
// Get memory status for physical memory
mem, err := sysinfoapi.GlobalMemoryStatusEx()
if err != nil {
return err
}
ch <- prometheus.MustNewConstMetric(
c.logicalProcessors,
prometheus.GaugeValue,
float64(systemInfo.NumberOfProcessors),
)
ch <- prometheus.MustNewConstMetric(
c.physicalMemoryBytes,
prometheus.GaugeValue,
float64(mem.TotalPhys),
)
hostname, err := sysinfoapi.GetComputerName(sysinfoapi.ComputerNameDNSHostname)
if err != nil {
return err
}
domain, err := sysinfoapi.GetComputerName(sysinfoapi.ComputerNameDNSDomain)
if err != nil {
return err
}
fqdn, err := sysinfoapi.GetComputerName(sysinfoapi.ComputerNameDNSFullyQualified)
if err != nil {
return err
}
ch <- prometheus.MustNewConstMetric(
c.hostname,
prometheus.GaugeValue,
1.0,
hostname,
domain,
fqdn,
)
return nil
}

View File

@@ -455,21 +455,21 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
var err error
if slices.Contains(c.config.CollectorsEnabled, "connection") {
c.perfDataCollectorConnection, err = pdh.NewCollector[perfDataCounterValuesConnection](pdh.CounterTypeRaw, "DFS Replication Connections", pdh.InstancesAll)
c.perfDataCollectorConnection, err = pdh.NewCollector[perfDataCounterValuesConnection](logger.With(slog.String("collector", Name)), pdh.CounterTypeRaw, "DFS Replication Connections", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create DFS Replication Connections collector: %w", err)
}
}
if slices.Contains(c.config.CollectorsEnabled, "folder") {
c.perfDataCollectorFolder, err = pdh.NewCollector[perfDataCounterValuesFolder](pdh.CounterTypeRaw, "DFS Replicated Folders", pdh.InstancesAll)
c.perfDataCollectorFolder, err = pdh.NewCollector[perfDataCounterValuesFolder](logger.With(slog.String("collector", Name)), pdh.CounterTypeRaw, "DFS Replicated Folders", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create DFS Replicated Folders collector: %w", err)
}
}
if slices.Contains(c.config.CollectorsEnabled, "volume") {
c.perfDataCollectorVolume, err = pdh.NewCollector[perfDataCounterValuesVolume](pdh.CounterTypeRaw, "DFS Replication Service Volumes", pdh.InstancesAll)
c.perfDataCollectorVolume, err = pdh.NewCollector[perfDataCounterValuesVolume](logger.With(slog.String("collector", Name)), pdh.CounterTypeRaw, "DFS Replication Service Volumes", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create DFS Replication Service Volumes collector: %w", err)
}

View File

@@ -56,6 +56,8 @@ var ConfigDefaults = Config{
type Collector struct {
config Config
logger *slog.Logger
perfDataCollector *pdh.Collector
perfDataObject []perfDataCounterValues
@@ -147,7 +149,9 @@ func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
c.logger = logger.With(slog.String("collector", Name))
var err error
if slices.Contains(c.config.CollectorsEnabled, subCollectorScopeMetrics) {
@@ -374,7 +378,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
nil,
)
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "DHCP Server", nil)
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](c.logger, pdh.CounterTypeRaw, "DHCP Server", nil)
if err != nil {
return fmt.Errorf("failed to create DHCP Server collector: %w", err)
}
@@ -405,6 +409,8 @@ func (c *Collector) collectServerMetrics(ch chan<- prometheus.Metric) error {
err := c.perfDataCollector.Collect(&c.perfDataObject)
if err != nil {
return fmt.Errorf("failed to collect DHCP Server metrics: %w", err)
} else if len(c.perfDataObject) == 0 {
return fmt.Errorf("failed to collect DHCP Server metrics: %w", types.ErrNoDataUnexpected)
}
ch <- prometheus.MustNewConstMetric(

View File

@@ -38,7 +38,9 @@ var ConfigDefaults = Config{}
// A Collector is a Prometheus Collector for a few WMI metrics in Win32_DiskDrive.
type Collector struct {
config Config
config Config
logger *slog.Logger
miSession *mi.Session
miQuery mi.Query
@@ -73,7 +75,9 @@ func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
func (c *Collector) Build(logger *slog.Logger, miSession *mi.Session) error {
c.logger = logger.With(slog.String("collector", Name))
c.diskInfo = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "info"),
"General drive information",
@@ -148,7 +152,7 @@ var (
"Error",
"Degraded",
"Unknown",
"Pred fail",
"Pred Fail",
"Starting",
"Stopping",
"Service",
@@ -241,6 +245,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
if availNum == int(disk.Availability) {
isCurrentState = 1.0
}
ch <- prometheus.MustNewConstMetric(
c.availability,
prometheus.GaugeValue,

View File

@@ -132,7 +132,7 @@ func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
func (c *Collector) Build(logger *slog.Logger, miSession *mi.Session) error {
for _, collector := range c.config.CollectorsEnabled {
if !slices.Contains([]string{subCollectorMetrics, subCollectorWMIStats}, collector) {
return fmt.Errorf("unknown sub collector: %s. Possible values: %s", collector,
@@ -142,7 +142,7 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
}
if slices.Contains(c.config.CollectorsEnabled, subCollectorMetrics) {
if err := c.buildMetricsCollector(); err != nil {
if err := c.buildMetricsCollector(logger); err != nil {
return err
}
}
@@ -156,7 +156,7 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
return nil
}
func (c *Collector) buildMetricsCollector() error {
func (c *Collector) buildMetricsCollector(logger *slog.Logger) error {
c.zoneTransferRequestsReceived = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "zone_transfer_requests_received_total"),
"Number of zone transfer requests (AXFR/IXFR) received by the master DNS server",
@@ -299,7 +299,7 @@ func (c *Collector) buildMetricsCollector() error {
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "DNS", pdh.InstancesAll)
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](logger.With(slog.String("collector", Name)), pdh.CounterTypeRaw, "DNS", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create DNS collector: %w", err)
}
@@ -347,6 +347,8 @@ func (c *Collector) collectMetrics(ch chan<- prometheus.Metric) error {
err := c.perfDataCollector.Collect(&c.perfDataObject)
if err != nil {
return fmt.Errorf("failed to collect DNS metrics: %w", err)
} else if len(c.perfDataObject) == 0 {
return fmt.Errorf("failed to collect DNS metrics: %w", types.ErrNoDataUnexpected)
}
ch <- prometheus.MustNewConstMetric(
@@ -355,6 +357,7 @@ func (c *Collector) collectMetrics(ch chan<- prometheus.Metric) error {
c.perfDataObject[0].AxfrRequestReceived,
"full",
)
ch <- prometheus.MustNewConstMetric(
c.zoneTransferRequestsReceived,
prometheus.CounterValue,
@@ -368,12 +371,14 @@ func (c *Collector) collectMetrics(ch chan<- prometheus.Metric) error {
c.perfDataObject[0].AxfrRequestSent,
"full",
)
ch <- prometheus.MustNewConstMetric(
c.zoneTransferRequestsSent,
prometheus.CounterValue,
c.perfDataObject[0].IxfrRequestSent,
"incremental",
)
ch <- prometheus.MustNewConstMetric(
c.zoneTransferRequestsSent,
prometheus.CounterValue,
@@ -387,6 +392,7 @@ func (c *Collector) collectMetrics(ch chan<- prometheus.Metric) error {
c.perfDataObject[0].AxfrResponseReceived,
"full",
)
ch <- prometheus.MustNewConstMetric(
c.zoneTransferResponsesReceived,
prometheus.CounterValue,
@@ -401,6 +407,7 @@ func (c *Collector) collectMetrics(ch chan<- prometheus.Metric) error {
"full",
"tcp",
)
ch <- prometheus.MustNewConstMetric(
c.zoneTransferSuccessReceived,
prometheus.CounterValue,
@@ -408,6 +415,7 @@ func (c *Collector) collectMetrics(ch chan<- prometheus.Metric) error {
"incremental",
"tcp",
)
ch <- prometheus.MustNewConstMetric(
c.zoneTransferSuccessReceived,
prometheus.CounterValue,
@@ -422,6 +430,7 @@ func (c *Collector) collectMetrics(ch chan<- prometheus.Metric) error {
c.perfDataObject[0].AxfrSuccessSent,
"full",
)
ch <- prometheus.MustNewConstMetric(
c.zoneTransferSuccessSent,
prometheus.CounterValue,
@@ -441,30 +450,35 @@ func (c *Collector) collectMetrics(ch chan<- prometheus.Metric) error {
c.perfDataObject[0].CachingMemory,
"caching",
)
ch <- prometheus.MustNewConstMetric(
c.memoryUsedBytes,
prometheus.GaugeValue,
c.perfDataObject[0].DatabaseNodeMemory,
"database_node",
)
ch <- prometheus.MustNewConstMetric(
c.memoryUsedBytes,
prometheus.GaugeValue,
c.perfDataObject[0].NbStatMemory,
"nbstat",
)
ch <- prometheus.MustNewConstMetric(
c.memoryUsedBytes,
prometheus.GaugeValue,
c.perfDataObject[0].RecordFlowMemory,
"record_flow",
)
ch <- prometheus.MustNewConstMetric(
c.memoryUsedBytes,
prometheus.GaugeValue,
c.perfDataObject[0].TcpMessageMemory,
"tcp_message",
)
ch <- prometheus.MustNewConstMetric(
c.memoryUsedBytes,
prometheus.GaugeValue,
@@ -478,23 +492,27 @@ func (c *Collector) collectMetrics(ch chan<- prometheus.Metric) error {
c.perfDataObject[0].DynamicUpdateNoOperation,
"noop",
)
ch <- prometheus.MustNewConstMetric(
c.dynamicUpdatesReceived,
prometheus.CounterValue,
c.perfDataObject[0].DynamicUpdateWrittenToDatabase,
"written",
)
ch <- prometheus.MustNewConstMetric(
c.dynamicUpdatesQueued,
prometheus.GaugeValue,
c.perfDataObject[0].DynamicUpdateQueued,
)
ch <- prometheus.MustNewConstMetric(
c.dynamicUpdatesFailures,
prometheus.CounterValue,
c.perfDataObject[0].DynamicUpdateRejected,
"rejected",
)
ch <- prometheus.MustNewConstMetric(
c.dynamicUpdatesFailures,
prometheus.CounterValue,
@@ -507,6 +525,7 @@ func (c *Collector) collectMetrics(ch chan<- prometheus.Metric) error {
prometheus.CounterValue,
c.perfDataObject[0].NotifyReceived,
)
ch <- prometheus.MustNewConstMetric(
c.notifySent,
prometheus.CounterValue,
@@ -518,11 +537,13 @@ func (c *Collector) collectMetrics(ch chan<- prometheus.Metric) error {
prometheus.CounterValue,
c.perfDataObject[0].RecursiveQueries,
)
ch <- prometheus.MustNewConstMetric(
c.recursiveQueryFailures,
prometheus.CounterValue,
c.perfDataObject[0].RecursiveQueryFailure,
)
ch <- prometheus.MustNewConstMetric(
c.recursiveQuerySendTimeouts,
prometheus.CounterValue,
@@ -535,6 +556,7 @@ func (c *Collector) collectMetrics(ch chan<- prometheus.Metric) error {
c.perfDataObject[0].TcpQueryReceived,
"tcp",
)
ch <- prometheus.MustNewConstMetric(
c.queries,
prometheus.CounterValue,
@@ -548,6 +570,7 @@ func (c *Collector) collectMetrics(ch chan<- prometheus.Metric) error {
c.perfDataObject[0].TcpResponseSent,
"tcp",
)
ch <- prometheus.MustNewConstMetric(
c.responses,
prometheus.CounterValue,
@@ -567,6 +590,7 @@ func (c *Collector) collectMetrics(ch chan<- prometheus.Metric) error {
c.perfDataObject[0].WinsLookupReceived,
"forward",
)
ch <- prometheus.MustNewConstMetric(
c.winsQueries,
prometheus.CounterValue,
@@ -580,6 +604,7 @@ func (c *Collector) collectMetrics(ch chan<- prometheus.Metric) error {
c.perfDataObject[0].WinsResponseSent,
"forward",
)
ch <- prometheus.MustNewConstMetric(
c.winsResponses,
prometheus.CounterValue,
@@ -592,6 +617,7 @@ func (c *Collector) collectMetrics(ch chan<- prometheus.Metric) error {
prometheus.CounterValue,
c.perfDataObject[0].SecureUpdateFailure,
)
ch <- prometheus.MustNewConstMetric(
c.secureUpdateReceived,
prometheus.CounterValue,

View File

@@ -66,11 +66,6 @@ var ConfigDefaults = Config{
}
type Collector struct {
config Config
collectorFns []func(ch chan<- prometheus.Metric) error
closeFns []func()
collectorADAccessProcesses
collectorActiveSync
collectorAutoDiscover
@@ -81,6 +76,12 @@ type Collector struct {
collectorRpcClientAccess
collectorTransportQueues
collectorWorkloadManagementWorkloads
config Config
logger *slog.Logger
collectorFns []func(ch chan<- prometheus.Metric) error
closeFns []func()
}
func New(config *Config) *Collector {
@@ -170,7 +171,9 @@ func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
c.logger = logger.With(slog.String("collector", Name))
subCollectors := map[string]struct {
build func() error
collect func(ch chan<- prometheus.Metric) error

View File

@@ -43,7 +43,7 @@ type perfDataCounterValuesActiveSync struct {
func (c *Collector) buildActiveSync() error {
var err error
c.perfDataCollectorActiveSync, err = pdh.NewCollector[perfDataCounterValuesActiveSync](pdh.CounterTypeRaw, "MSExchange ActiveSync", pdh.InstancesAll)
c.perfDataCollectorActiveSync, err = pdh.NewCollector[perfDataCounterValuesActiveSync](c.logger, pdh.CounterTypeRaw, "MSExchange ActiveSync", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create MSExchange ActiveSync collector: %w", err)
}
@@ -82,11 +82,13 @@ func (c *Collector) collectActiveSync(ch chan<- prometheus.Metric) error {
prometheus.CounterValue,
data.RequestsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.pingCommandsPending,
prometheus.GaugeValue,
data.PingCommandsPending,
)
ch <- prometheus.MustNewConstMetric(
c.syncCommandsPerSec,
prometheus.CounterValue,

View File

@@ -50,7 +50,7 @@ type perfDataCounterValuesADAccessProcesses struct {
func (c *Collector) buildADAccessProcesses() error {
var err error
c.perfDataCollectorADAccessProcesses, err = pdh.NewCollector[perfDataCounterValuesADAccessProcesses](pdh.CounterTypeRaw, "MSExchange ADAccess Processes", pdh.InstancesAll)
c.perfDataCollectorADAccessProcesses, err = pdh.NewCollector[perfDataCounterValuesADAccessProcesses](c.logger, pdh.CounterTypeRaw, "MSExchange ADAccess Processes", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create MSExchange ADAccess Processes collector: %w", err)
}
@@ -113,24 +113,28 @@ func (c *Collector) collectADAccessProcesses(ch chan<- prometheus.Metric) error
utils.MilliSecToSec(data.LdapReadTime),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.ldapSearchTime,
prometheus.CounterValue,
utils.MilliSecToSec(data.LdapSearchTime),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.ldapWriteTime,
prometheus.CounterValue,
utils.MilliSecToSec(data.LdapWriteTime),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.ldapTimeoutErrorsPerSec,
prometheus.CounterValue,
data.LdapTimeoutErrorsPerSec,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.longRunningLDAPOperationsPerMin,
prometheus.CounterValue,

View File

@@ -39,7 +39,7 @@ type perfDataCounterValuesAutoDiscover struct {
func (c *Collector) buildAutoDiscover() error {
var err error
c.perfDataCollectorAutoDiscover, err = pdh.NewCollector[perfDataCounterValuesAutoDiscover](pdh.CounterTypeRaw, "MSExchangeAutodiscover", nil)
c.perfDataCollectorAutoDiscover, err = pdh.NewCollector[perfDataCounterValuesAutoDiscover](c.logger, pdh.CounterTypeRaw, "MSExchangeAutodiscover", nil)
if err != nil {
return fmt.Errorf("failed to create MSExchange Autodiscover collector: %w", err)
}

View File

@@ -39,7 +39,7 @@ type perfDataCounterValuesAvailabilityService struct {
func (c *Collector) buildAvailabilityService() error {
var err error
c.perfDataCollectorAvailabilityService, err = pdh.NewCollector[perfDataCounterValuesAvailabilityService](pdh.CounterTypeRaw, "MSExchange Availability Service", pdh.InstancesAll)
c.perfDataCollectorAvailabilityService, err = pdh.NewCollector[perfDataCounterValuesAvailabilityService](c.logger, pdh.CounterTypeRaw, "MSExchange Availability Service", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create MSExchange Availability Service collector: %w", err)
}

View File

@@ -52,7 +52,7 @@ type perfDataCounterValuesHTTPProxy struct {
func (c *Collector) buildHTTPProxy() error {
var err error
c.perfDataCollectorHTTPProxy, err = pdh.NewCollector[perfDataCounterValuesHTTPProxy](pdh.CounterTypeRaw, "MSExchange HttpProxy", pdh.InstancesAll)
c.perfDataCollectorHTTPProxy, err = pdh.NewCollector[perfDataCounterValuesHTTPProxy](c.logger, pdh.CounterTypeRaw, "MSExchange HttpProxy", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create MSExchange HttpProxy collector: %w", err)
}
@@ -111,30 +111,35 @@ func (c *Collector) collectHTTPProxy(ch chan<- prometheus.Metric) error {
utils.MilliSecToSec(data.MailboxServerLocatorAverageLatency),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.averageAuthenticationLatency,
prometheus.GaugeValue,
data.AverageAuthenticationLatency,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.averageCASProcessingLatency,
prometheus.GaugeValue,
utils.MilliSecToSec(data.AverageCASProcessingLatency),
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.mailboxServerProxyFailureRate,
prometheus.GaugeValue,
data.MailboxServerProxyFailureRate,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.outstandingProxyRequests,
prometheus.GaugeValue,
data.OutstandingProxyRequests,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.proxyRequestsPerSec,
prometheus.CounterValue,

View File

@@ -39,7 +39,7 @@ type perfDataCounterValuesMapiHTTPEmsMDB struct {
func (c *Collector) buildMapiHTTPEmsMDB() error {
var err error
c.perfDataCollectorMapiHTTPEmsMDB, err = pdh.NewCollector[perfDataCounterValuesMapiHTTPEmsMDB](pdh.CounterTypeRaw, "MSExchange MapiHttp Emsmdb", pdh.InstancesAll)
c.perfDataCollectorMapiHTTPEmsMDB, err = pdh.NewCollector[perfDataCounterValuesMapiHTTPEmsMDB](c.logger, pdh.CounterTypeRaw, "MSExchange MapiHttp Emsmdb", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create MSExchange MapiHttp Emsmdb: %w", err)
}

View File

@@ -41,7 +41,7 @@ type perfDataCounterValuesOWA struct {
func (c *Collector) buildOWA() error {
var err error
c.perfDataCollectorOWA, err = pdh.NewCollector[perfDataCounterValuesOWA](pdh.CounterTypeRaw, "MSExchange OWA", pdh.InstancesAll)
c.perfDataCollectorOWA, err = pdh.NewCollector[perfDataCounterValuesOWA](c.logger, pdh.CounterTypeRaw, "MSExchange OWA", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create MSExchange OWA collector: %w", err)
}
@@ -74,6 +74,7 @@ func (c *Collector) collectOWA(ch chan<- prometheus.Metric) error {
prometheus.GaugeValue,
data.CurrentUniqueUsers,
)
ch <- prometheus.MustNewConstMetric(
c.owaRequestsPerSec,
prometheus.CounterValue,

View File

@@ -50,7 +50,7 @@ type perfDataCounterValuesRpcClientAccess struct {
func (c *Collector) buildRpcClientAccess() error {
var err error
c.perfDataCollectorRpcClientAccess, err = pdh.NewCollector[perfDataCounterValuesRpcClientAccess](pdh.CounterTypeRaw, "MSExchange RpcClientAccess", pdh.InstancesAll)
c.perfDataCollectorRpcClientAccess, err = pdh.NewCollector[perfDataCounterValuesRpcClientAccess](c.logger, pdh.CounterTypeRaw, "MSExchange RpcClientAccess", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create MSExchange RpcClientAccess collector: %w", err)
}
@@ -107,26 +107,31 @@ func (c *Collector) collectRpcClientAccess(ch chan<- prometheus.Metric) error {
prometheus.GaugeValue,
utils.MilliSecToSec(data.RpcAveragedLatency),
)
ch <- prometheus.MustNewConstMetric(
c.rpcRequests,
prometheus.GaugeValue,
data.RpcRequests,
)
ch <- prometheus.MustNewConstMetric(
c.activeUserCount,
prometheus.GaugeValue,
data.ActiveUserCount,
)
ch <- prometheus.MustNewConstMetric(
c.connectionCount,
prometheus.GaugeValue,
data.ConnectionCount,
)
ch <- prometheus.MustNewConstMetric(
c.rpcOperationsPerSec,
prometheus.CounterValue,
data.RpcOperationsPerSec,
)
ch <- prometheus.MustNewConstMetric(
c.userCount,
prometheus.GaugeValue,

View File

@@ -77,7 +77,7 @@ type perfDataCounterValuesTransportQueues struct {
func (c *Collector) buildTransportQueues() error {
var err error
c.perfDataCollectorTransportQueues, err = pdh.NewCollector[perfDataCounterValuesTransportQueues](pdh.CounterTypeRaw, "MSExchangeTransport Queues", pdh.InstancesAll)
c.perfDataCollectorTransportQueues, err = pdh.NewCollector[perfDataCounterValuesTransportQueues](c.logger, pdh.CounterTypeRaw, "MSExchangeTransport Queues", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create MSExchangeTransport Queues collector: %w", err)
}
@@ -215,108 +215,126 @@ func (c *Collector) collectTransportQueues(ch chan<- prometheus.Metric) error {
data.ExternalActiveRemoteDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.internalActiveRemoteDeliveryQueueLength,
prometheus.GaugeValue,
data.InternalActiveRemoteDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.activeMailboxDeliveryQueueLength,
prometheus.GaugeValue,
data.ActiveMailboxDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.retryMailboxDeliveryQueueLength,
prometheus.GaugeValue,
data.RetryMailboxDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.unreachableQueueLength,
prometheus.GaugeValue,
data.UnreachableQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.externalLargestDeliveryQueueLength,
prometheus.GaugeValue,
data.ExternalLargestDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.internalLargestDeliveryQueueLength,
prometheus.GaugeValue,
data.InternalLargestDeliveryQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.poisonQueueLength,
prometheus.GaugeValue,
data.PoisonQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.messagesQueuedForDeliveryTotal,
prometheus.CounterValue,
data.MessagesQueuedForDeliveryTotal,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.messagesSubmittedTotal,
prometheus.CounterValue,
data.MessagesSubmittedTotal,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.messagesDelayedTotal,
prometheus.CounterValue,
data.MessagesDelayedTotal,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.messagesCompletedDeliveryTotal,
prometheus.CounterValue,
data.MessagesCompletedDeliveryTotal,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.aggregateShadowQueueLength,
prometheus.GaugeValue,
data.AggregateShadowQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.submissionQueueLength,
prometheus.GaugeValue,
data.SubmissionQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.delayQueueLength,
prometheus.GaugeValue,
data.DelayQueueLength,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.itemsCompletedDeliveryTotal,
prometheus.CounterValue,
data.ItemsCompletedDeliveryTotal,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.itemsQueuedForDeliveryExpiredTotal,
prometheus.CounterValue,
data.ItemsQueuedForDeliveryExpiredTotal,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.itemsQueuedForDeliveryTotal,
prometheus.CounterValue,
data.ItemsQueuedForDeliveryTotal,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.itemsResubmittedTotal,
prometheus.CounterValue,

View File

@@ -49,7 +49,7 @@ type perfDataCounterValuesWorkloadManagementWorkloads struct {
func (c *Collector) buildWorkloadManagementWorkloads() error {
var err error
c.perfDataCollectorWorkloadManagementWorkloads, err = pdh.NewCollector[perfDataCounterValuesWorkloadManagementWorkloads](pdh.CounterTypeRaw, "MSExchange WorkloadManagement Workloads", pdh.InstancesAll)
c.perfDataCollectorWorkloadManagementWorkloads, err = pdh.NewCollector[perfDataCounterValuesWorkloadManagementWorkloads](c.logger, pdh.CounterTypeRaw, "MSExchange WorkloadManagement Workloads", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create MSExchange WorkloadManagement Workloads collector: %w", err)
}
@@ -103,24 +103,28 @@ func (c *Collector) collectWorkloadManagementWorkloads(ch chan<- prometheus.Metr
data.ActiveTasks,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.completedTasks,
prometheus.CounterValue,
data.CompletedTasks,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.queuedTasks,
prometheus.CounterValue,
data.QueuedTasks,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.yieldedTasks,
prometheus.CounterValue,
data.YieldedTasks,
labelName,
)
ch <- prometheus.MustNewConstMetric(
c.isActive,
prometheus.GaugeValue,

View File

@@ -15,10 +15,11 @@
//go:build windows
package filetime
package file
import (
"fmt"
"io/fs"
"log/slog"
"os"
"path/filepath"
@@ -32,7 +33,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
)
const Name = "filetime"
const Name = "file"
type Config struct {
FilePatterns []string `yaml:"file-patterns"`
@@ -49,6 +50,7 @@ type Collector struct {
logger *slog.Logger
fileMTime *prometheus.Desc
fileSize *prometheus.Desc
}
func New(config *Config) *Collector {
@@ -73,19 +75,10 @@ func NewWithFlags(app *kingpin.Application) *Collector {
}
c.config.FilePatterns = make([]string, 0)
var filePatterns string
app.Flag(
"collector.filetime.file-patterns",
"collector.file.file-patterns",
"Comma-separated list of file patterns. Each pattern is a glob pattern that can contain `*`, `?`, and `**` (recursive). See https://github.com/bmatcuk/doublestar#patterns",
).Default(strings.Join(ConfigDefaults.FilePatterns, ",")).StringVar(&filePatterns)
app.Action(func(*kingpin.ParseContext) error {
// doublestar.Glob() requires forward slashes
c.config.FilePatterns = strings.Split(filepath.ToSlash(filePatterns), ",")
return nil
})
).Default(strings.Join(ConfigDefaults.FilePatterns, ",")).StringsVar(&c.config.FilePatterns)
return c
}
@@ -101,7 +94,7 @@ func (c *Collector) Close() error {
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
c.logger = logger.With(slog.String("collector", Name))
c.logger.Info("filetime collector is in an experimental state! It may subject to change.")
c.logger.Info("file collector is in an experimental state! It may subject to change.")
c.fileMTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "mtime_timestamp_seconds"),
@@ -110,6 +103,13 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
nil,
)
c.fileSize = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "size_bytes"),
"File size",
[]string{"file"},
nil,
)
for _, filePattern := range c.config.FilePatterns {
basePath, pattern := doublestar.SplitPattern(filePattern)
@@ -148,16 +148,11 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
}
func (c *Collector) collectGlobFilePath(ch chan<- prometheus.Metric, filePattern string) error {
basePath, pattern := doublestar.SplitPattern(filePattern)
basePath, pattern := doublestar.SplitPattern(filepath.ToSlash(filePattern))
basePathFS := os.DirFS(basePath)
matches, err := doublestar.Glob(basePathFS, pattern, doublestar.WithFilesOnly())
if err != nil {
return fmt.Errorf("failed to glob: %w", err)
}
for _, match := range matches {
filePath := filepath.Join(basePath, match)
err := doublestar.GlobWalk(basePathFS, pattern, func(path string, d fs.DirEntry) error {
filePath := filepath.Join(basePath, path)
fileInfo, err := os.Stat(filePath)
if err != nil {
@@ -166,15 +161,27 @@ func (c *Collector) collectGlobFilePath(ch chan<- prometheus.Metric, filePattern
slog.Any("err", err),
)
continue
return nil
}
ch <- prometheus.MustNewConstMetric(
c.fileMTime,
prometheus.GaugeValue,
float64(fileInfo.ModTime().UTC().Unix()),
float64(fileInfo.ModTime().UTC().UnixMicro())/1e6,
filePath,
)
ch <- prometheus.MustNewConstMetric(
c.fileSize,
prometheus.GaugeValue,
float64(fileInfo.Size()),
filePath,
)
return nil
}, doublestar.WithFilesOnly(), doublestar.WithCaseInsensitive())
if err != nil {
return fmt.Errorf("failed to glob: %w", err)
}
return nil

View File

@@ -15,21 +15,21 @@
//go:build windows
package filetime_test
package file_test
import (
"testing"
"github.com/prometheus-community/windows_exporter/internal/collector/filetime"
"github.com/prometheus-community/windows_exporter/internal/collector/file"
"github.com/prometheus-community/windows_exporter/internal/utils/testutils"
)
func BenchmarkCollector(b *testing.B) {
testutils.FuncBenchmarkCollector(b, filetime.Name, filetime.NewWithFlags)
testutils.FuncBenchmarkCollector(b, file.Name, file.NewWithFlags)
}
func TestCollector(t *testing.T) {
testutils.TestCollector(t, filetime.New, &filetime.Config{
testutils.TestCollector(t, file.New, &file.Config{
FilePatterns: []string{"*.*"},
})
}

View File

@@ -190,6 +190,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
path,
template,
)
ch <- prometheus.MustNewConstMetric(
c.size,
prometheus.GaugeValue,
@@ -197,6 +198,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
path,
template,
)
ch <- prometheus.MustNewConstMetric(
c.usage,
prometheus.GaugeValue,
@@ -204,12 +206,14 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
path,
template,
)
ch <- prometheus.MustNewConstMetric(
c.description,
prometheus.GaugeValue,
1.0,
path, template, Description,
)
ch <- prometheus.MustNewConstMetric(
c.disabled,
prometheus.GaugeValue,
@@ -217,6 +221,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
path,
template,
)
ch <- prometheus.MustNewConstMetric(
c.matchesTemplate,
prometheus.GaugeValue,
@@ -224,6 +229,7 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
path,
template,
)
ch <- prometheus.MustNewConstMetric(
c.softLimit,
prometheus.GaugeValue,

View File

@@ -21,10 +21,10 @@ import (
"errors"
"fmt"
"log/slog"
"strconv"
"github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/headers/setupapi"
"github.com/prometheus-community/windows_exporter/internal/headers/cfgmgr32"
"github.com/prometheus-community/windows_exporter/internal/headers/gdi32"
"github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
@@ -41,6 +41,8 @@ var ConfigDefaults = Config{}
type Collector struct {
config Config
gpuDeviceCache map[string]gpuDevice
// GPU Engine
gpuEnginePerfDataCollector *pdh.Collector
gpuEnginePerfDataObject []gpuEnginePerfDataCounterValues
@@ -48,6 +50,10 @@ type Collector struct {
gpuInfo *prometheus.Desc
gpuEngineRunningTime *prometheus.Desc
gpuSharedSystemMemorySize *prometheus.Desc
gpuDedicatedSystemMemorySize *prometheus.Desc
gpuDedicatedVideoMemorySize *prometheus.Desc
// GPU Adapter Memory
gpuAdapterMemoryPerfDataCollector *pdh.Collector
gpuAdapterMemoryPerfDataObject []gpuAdapterMemoryPerfDataCounterValues
@@ -79,6 +85,12 @@ type Collector struct {
gpuProcessMemoryTotalCommitted *prometheus.Desc
}
type gpuDevice struct {
gdi32 gdi32.GPUDevice
cfgmgr32 cfgmgr32.Device
ID string
}
func New(config *Config) *Collector {
if config == nil {
config = &ConfigDefaults
@@ -109,123 +121,203 @@ func (c *Collector) Close() error {
return nil
}
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
var err error
c.gpuInfo = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "info"),
"A metric with a constant '1' value labeled with gpu device information.",
[]string{"phys", "physical_device_object_name", "hardware_id", "friendly_name", "description"},
[]string{"luid", "device_id", "name", "bus_number", "phys", "function_number"},
nil,
)
c.gpuSharedSystemMemorySize = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "shared_system_memory_size_bytes"),
"The size, in bytes, of memory from system memory that can be shared by many users.",
[]string{"luid", "device_id"},
nil,
)
c.gpuDedicatedSystemMemorySize = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "dedicated_system_memory_size_bytes"),
"The size, in bytes, of memory that is dedicated from system memory.",
[]string{"luid", "device_id"},
nil,
)
c.gpuDedicatedVideoMemorySize = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "dedicated_video_memory_size_bytes"),
"The size, in bytes, of memory that is dedicated from video memory.",
[]string{"luid", "device_id"},
nil,
)
c.gpuEngineRunningTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "engine_time_seconds"),
"Total running time of the GPU in seconds.",
[]string{"process_id", "phys", "eng", "engtype"},
[]string{"process_id", "luid", "device_id", "phys", "eng", "engtype"},
nil,
)
c.gpuAdapterMemoryDedicatedUsage = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "adapter_memory_dedicated_bytes"),
"Dedicated GPU memory usage in bytes.",
[]string{"phys"},
[]string{"luid", "device_id", "phys"},
nil,
)
c.gpuAdapterMemorySharedUsage = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "adapter_memory_shared_bytes"),
"Shared GPU memory usage in bytes.",
[]string{"phys"},
[]string{"luid", "device_id", "phys"},
nil,
)
c.gpuAdapterMemoryTotalCommitted = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "adapter_memory_committed_bytes"),
"Total committed GPU memory in bytes.",
[]string{"phys"},
[]string{"luid", "device_id", "phys"},
nil,
)
c.gpuLocalAdapterMemoryUsage = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "local_adapter_memory_bytes"),
"Local adapter memory usage in bytes.",
[]string{"phys"},
[]string{"luid", "device_id", "phys", "part"},
nil,
)
c.gpuNonLocalAdapterMemoryUsage = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "non_local_adapter_memory_bytes"),
"Non-local adapter memory usage in bytes.",
[]string{"phys"},
[]string{"luid", "device_id", "phys", "part"},
nil,
)
c.gpuProcessMemoryDedicatedUsage = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "process_memory_dedicated_bytes"),
"Dedicated process memory usage in bytes.",
[]string{"process_id", "phys"},
[]string{"process_id", "luid", "device_id", "phys"},
nil,
)
c.gpuProcessMemoryLocalUsage = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "process_memory_local_bytes"),
"Local process memory usage in bytes.",
[]string{"process_id", "phys"},
[]string{"process_id", "luid", "device_id", "phys"},
nil,
)
c.gpuProcessMemoryNonLocalUsage = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "process_memory_non_local_bytes"),
"Non-local process memory usage in bytes.",
[]string{"process_id", "phys"},
[]string{"process_id", "luid", "device_id", "phys"},
nil,
)
c.gpuProcessMemorySharedUsage = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "process_memory_shared_bytes"),
"Shared process memory usage in bytes.",
[]string{"process_id", "phys"},
[]string{"process_id", "luid", "device_id", "phys"},
nil,
)
c.gpuProcessMemoryTotalCommitted = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "process_memory_committed_bytes"),
"Total committed process memory in bytes.",
[]string{"process_id", "phys"},
[]string{"process_id", "luid", "device_id", "phys"},
nil,
)
errs := make([]error, 0)
c.gpuEnginePerfDataCollector, err = pdh.NewCollector[gpuEnginePerfDataCounterValues](pdh.CounterTypeRaw, "GPU Engine", pdh.InstancesAll)
c.gpuEnginePerfDataCollector, err = pdh.NewCollector[gpuEnginePerfDataCounterValues](logger.With(slog.String("collector", Name)), pdh.CounterTypeRaw, "GPU Engine", pdh.InstancesAll)
if err != nil {
errs = append(errs, fmt.Errorf("failed to create GPU Engine perf data collector: %w", err))
}
c.gpuAdapterMemoryPerfDataCollector, err = pdh.NewCollector[gpuAdapterMemoryPerfDataCounterValues](pdh.CounterTypeRaw, "GPU Adapter Memory", pdh.InstancesAll)
c.gpuAdapterMemoryPerfDataCollector, err = pdh.NewCollector[gpuAdapterMemoryPerfDataCounterValues](logger.With(slog.String("collector", Name)), pdh.CounterTypeRaw, "GPU Adapter Memory", pdh.InstancesAll)
if err != nil {
errs = append(errs, fmt.Errorf("failed to create GPU Adapter Memory perf data collector: %w", err))
}
c.gpuLocalAdapterMemoryPerfDataCollector, err = pdh.NewCollector[gpuLocalAdapterMemoryPerfDataCounterValues](pdh.CounterTypeRaw, "GPU Local Adapter Memory", pdh.InstancesAll)
c.gpuLocalAdapterMemoryPerfDataCollector, err = pdh.NewCollector[gpuLocalAdapterMemoryPerfDataCounterValues](logger.With(slog.String("collector", Name)), pdh.CounterTypeRaw, "GPU Local Adapter Memory", pdh.InstancesAll)
if err != nil {
errs = append(errs, fmt.Errorf("failed to create GPU Local Adapter Memory perf data collector: %w", err))
}
c.gpuNonLocalAdapterMemoryPerfDataCollector, err = pdh.NewCollector[gpuNonLocalAdapterMemoryPerfDataCounterValues](pdh.CounterTypeRaw, "GPU Non Local Adapter Memory", pdh.InstancesAll)
c.gpuNonLocalAdapterMemoryPerfDataCollector, err = pdh.NewCollector[gpuNonLocalAdapterMemoryPerfDataCounterValues](logger.With(slog.String("collector", Name)), pdh.CounterTypeRaw, "GPU Non Local Adapter Memory", pdh.InstancesAll)
if err != nil {
errs = append(errs, fmt.Errorf("failed to create GPU Non Local Adapter Memory perf data collector: %w", err))
}
c.gpuProcessMemoryPerfDataCollector, err = pdh.NewCollector[gpuProcessMemoryPerfDataCounterValues](pdh.CounterTypeRaw, "GPU Process Memory", pdh.InstancesAll)
c.gpuProcessMemoryPerfDataCollector, err = pdh.NewCollector[gpuProcessMemoryPerfDataCounterValues](logger.With(slog.String("collector", Name)), pdh.CounterTypeRaw, "GPU Process Memory", pdh.InstancesAll)
if err != nil {
errs = append(errs, fmt.Errorf("failed to create GPU Process Memory perf data collector: %w", err))
}
gpus, err := gdi32.GetGPUDevices()
if err != nil {
errs = append(errs, fmt.Errorf("failed to get GPU devices: %w", err))
}
for _, gpu := range gpus {
if gpu.AdapterString == "" {
continue
}
// Skip Microsoft Basic Render Driver
// https://devicehunt.com/view/type/pci/vendor/1414/device/008C
if gpu.DeviceID == `PCI\VEN_1414&DEV_008C&SUBSYS_00000000&REV_00` {
continue
}
if c.gpuDeviceCache == nil {
c.gpuDeviceCache = make(map[string]gpuDevice)
}
luidKey := fmt.Sprintf("0x%08X_0x%08X", gpu.LUID.HighPart, gpu.LUID.LowPart)
deviceID := gpu.DeviceID
cfgmgr32Devs, err := cfgmgr32.GetDevicesInstanceIDs(gpu.DeviceID)
if err != nil {
errs = append(errs, fmt.Errorf("failed to get device instance IDs for device ID %s: %w", gpu.DeviceID, err))
}
var cfgmgr32Dev cfgmgr32.Device
for _, dev := range cfgmgr32Devs {
if dev.BusNumber == gpu.BusNumber && dev.DeviceNumber == gpu.DeviceNumber && dev.FunctionNumber == gpu.FunctionNumber {
cfgmgr32Dev = dev
break
}
}
if cfgmgr32Dev.InstanceID == "" {
errs = append(errs, fmt.Errorf("failed to find matching device for device ID %s", gpu.DeviceID))
} else {
deviceID = cfgmgr32Dev.InstanceID
}
c.gpuDeviceCache[luidKey] = gpuDevice{
gdi32: gpu,
cfgmgr32: cfgmgr32Dev,
ID: deviceID,
}
logger.Debug("Found GPU device",
slog.String("collector", Name),
slog.String("name", gpu.AdapterString),
slog.String("luid", luidKey),
slog.String("device_id", deviceID),
slog.String("name", gpu.AdapterString),
slog.Uint64("bus_number", uint64(gpu.BusNumber)),
slog.Uint64("device_number", uint64(gpu.DeviceNumber)),
slog.Uint64("function_number", uint64(gpu.FunctionNumber)),
)
}
return errors.Join(errs...)
}
func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
errs := make([]error, 0)
if err := c.collectGpuInfo(ch); err != nil {
errs = append(errs, err)
}
c.collectGpuInfo(ch)
if err := c.collectGpuEngineMetrics(ch); err != nil {
errs = append(errs, err)
@@ -250,26 +342,41 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
return errors.Join(errs...)
}
func (c *Collector) collectGpuInfo(ch chan<- prometheus.Metric) error {
gpus, err := setupapi.GetGPUDevices()
if err != nil {
return fmt.Errorf("failed to get GPU devices: %w", err)
}
for i, gpu := range gpus {
func (c *Collector) collectGpuInfo(ch chan<- prometheus.Metric) {
for luid, gpu := range c.gpuDeviceCache {
ch <- prometheus.MustNewConstMetric(
c.gpuInfo,
prometheus.GaugeValue,
1.0,
strconv.Itoa(i),
gpu.PhysicalDeviceObjectName,
gpu.HardwareID,
gpu.FriendlyName,
gpu.DeviceDesc,
luid,
gpu.ID,
gpu.gdi32.AdapterString,
gpu.gdi32.BusNumber.String(),
gpu.gdi32.DeviceNumber.String(),
gpu.gdi32.FunctionNumber.String(),
)
ch <- prometheus.MustNewConstMetric(
c.gpuSharedSystemMemorySize,
prometheus.GaugeValue,
float64(gpu.gdi32.SharedSystemMemorySize),
luid, gpu.ID,
)
ch <- prometheus.MustNewConstMetric(
c.gpuDedicatedSystemMemorySize,
prometheus.GaugeValue,
float64(gpu.gdi32.DedicatedSystemMemorySize),
luid, gpu.ID,
)
ch <- prometheus.MustNewConstMetric(
c.gpuDedicatedVideoMemorySize,
prometheus.GaugeValue,
float64(gpu.gdi32.DedicatedVideoMemorySize),
luid, gpu.ID,
)
}
return nil
}
func (c *Collector) collectGpuEngineMetrics(ch chan<- prometheus.Metric) error {
@@ -278,26 +385,20 @@ func (c *Collector) collectGpuEngineMetrics(ch chan<- prometheus.Metric) error {
return fmt.Errorf("failed to collect GPU Engine perf data: %w", err)
}
runningTimeMap := make(map[PidPhysEngEngType]float64)
// Iterate over the GPU Engine perf data and aggregate the values.
for _, data := range c.gpuEnginePerfDataObject {
instance := parseGPUCounterInstanceString(data.Name)
key := PidPhysEngEngType{
Pid: instance.Pid,
Phys: instance.Phys,
Eng: instance.Eng,
Engtype: instance.Engtype,
device, ok := c.gpuDeviceCache[instance.Luid]
if !ok {
continue
}
runningTimeMap[key] += data.RunningTime / 10_000_000 // RunningTime is in 100ns units, convert to seconds.
}
for key, runningTime := range runningTimeMap {
ch <- prometheus.MustNewConstMetric(
c.gpuEngineRunningTime,
prometheus.CounterValue,
runningTime,
key.Pid, key.Phys, key.Eng, key.Engtype,
data.RunningTime/10_000_000,
instance.Pid, instance.Luid, device.ID, instance.Phys, instance.Eng, instance.Engtype,
)
}
@@ -310,42 +411,33 @@ func (c *Collector) collectGpuAdapterMemoryMetrics(ch chan<- prometheus.Metric)
return fmt.Errorf("failed to collect GPU Adapter Memory perf data: %w", err)
}
dedicatedUsageMap := make(map[PidPhysEngEngType]float64)
sharedUsageMap := make(map[PidPhysEngEngType]float64)
totalCommittedMap := make(map[PidPhysEngEngType]float64)
for _, data := range c.gpuAdapterMemoryPerfDataObject {
instance := parseGPUCounterInstanceString(data.Name)
key := PidPhysEngEngType{
Pid: instance.Pid,
Phys: instance.Phys,
Eng: instance.Eng,
Engtype: instance.Engtype,
device, ok := c.gpuDeviceCache[instance.Luid]
if !ok {
continue
}
dedicatedUsageMap[key] += data.DedicatedUsage
sharedUsageMap[key] += data.SharedUsage
totalCommittedMap[key] += data.TotalCommitted
}
for key, dedicatedUsage := range dedicatedUsageMap {
ch <- prometheus.MustNewConstMetric(
c.gpuAdapterMemoryDedicatedUsage,
prometheus.GaugeValue,
dedicatedUsage,
key.Phys,
data.DedicatedUsage,
instance.Luid, device.ID, instance.Phys,
)
ch <- prometheus.MustNewConstMetric(
c.gpuAdapterMemorySharedUsage,
prometheus.GaugeValue,
sharedUsageMap[key],
key.Phys,
data.SharedUsage,
instance.Luid, device.ID, instance.Phys,
)
ch <- prometheus.MustNewConstMetric(
c.gpuAdapterMemoryTotalCommitted,
prometheus.GaugeValue,
totalCommittedMap[key],
key.Phys,
data.TotalCommitted,
instance.Luid, device.ID, instance.Phys,
)
}
@@ -358,20 +450,19 @@ func (c *Collector) collectGpuLocalAdapterMemoryMetrics(ch chan<- prometheus.Met
return fmt.Errorf("failed to collect GPU Local Adapter Memory perf data: %w", err)
}
localAdapterMemoryMap := make(map[string]float64)
for _, data := range c.gpuLocalAdapterMemoryPerfDataObject {
instance := parseGPUCounterInstanceString(data.Name)
localAdapterMemoryMap[instance.Phys] += data.LocalUsage
}
device, ok := c.gpuDeviceCache[instance.Luid]
if !ok {
continue
}
for phys, localUsage := range localAdapterMemoryMap {
ch <- prometheus.MustNewConstMetric(
c.gpuLocalAdapterMemoryUsage,
prometheus.GaugeValue,
localUsage,
phys,
data.LocalUsage,
instance.Luid, device.ID, instance.Phys, instance.Part,
)
}
@@ -384,20 +475,19 @@ func (c *Collector) collectGpuNonLocalAdapterMemoryMetrics(ch chan<- prometheus.
return fmt.Errorf("failed to collect GPU Non Local Adapter Memory perf data: %w", err)
}
nonLocalAdapterMemoryMap := make(map[string]float64)
for _, data := range c.gpuNonLocalAdapterMemoryPerfDataObject {
instance := parseGPUCounterInstanceString(data.Name)
nonLocalAdapterMemoryMap[instance.Phys] += data.NonLocalUsage
}
device, ok := c.gpuDeviceCache[instance.Luid]
if !ok {
continue
}
for phys, nonLocalUsage := range nonLocalAdapterMemoryMap {
ch <- prometheus.MustNewConstMetric(
c.gpuNonLocalAdapterMemoryUsage,
prometheus.GaugeValue,
nonLocalUsage,
phys,
data.NonLocalUsage,
instance.Luid, device.ID, instance.Phys, instance.Part,
)
}
@@ -410,56 +500,47 @@ func (c *Collector) collectGpuProcessMemoryMetrics(ch chan<- prometheus.Metric)
return fmt.Errorf("failed to collect GPU Process Memory perf data: %w", err)
}
processDedicatedUsageMap := make(map[PidPhys]float64)
processLocalUsageMap := make(map[PidPhys]float64)
processNonLocalUsageMap := make(map[PidPhys]float64)
processSharedUsageMap := make(map[PidPhys]float64)
processTotalCommittedMap := make(map[PidPhys]float64)
for _, data := range c.gpuProcessMemoryPerfDataObject {
instance := parseGPUCounterInstanceString(data.Name)
key := PidPhys{
Pid: instance.Pid,
Phys: instance.Phys,
device, ok := c.gpuDeviceCache[instance.Luid]
if !ok {
continue
}
processDedicatedUsageMap[key] += data.DedicatedUsage
processLocalUsageMap[key] += data.LocalUsage
processNonLocalUsageMap[key] += data.NonLocalUsage
processSharedUsageMap[key] += data.SharedUsage
processTotalCommittedMap[key] += data.TotalCommitted
}
for key, dedicatedUsage := range processDedicatedUsageMap {
ch <- prometheus.MustNewConstMetric(
c.gpuProcessMemoryDedicatedUsage,
prometheus.GaugeValue,
dedicatedUsage,
key.Pid, key.Phys,
data.DedicatedUsage,
instance.Pid, instance.Luid, device.ID, instance.Phys,
)
ch <- prometheus.MustNewConstMetric(
c.gpuProcessMemoryLocalUsage,
prometheus.GaugeValue,
processLocalUsageMap[key],
key.Pid, key.Phys,
data.LocalUsage,
instance.Pid, instance.Luid, device.ID, instance.Phys,
)
ch <- prometheus.MustNewConstMetric(
c.gpuProcessMemoryNonLocalUsage,
prometheus.GaugeValue,
processNonLocalUsageMap[key],
key.Pid, key.Phys,
data.NonLocalUsage,
instance.Pid, instance.Luid, device.ID, instance.Phys,
)
ch <- prometheus.MustNewConstMetric(
c.gpuProcessMemorySharedUsage,
prometheus.GaugeValue,
processSharedUsageMap[key],
key.Pid, key.Phys,
data.SharedUsage,
instance.Pid, instance.Luid, device.ID, instance.Phys,
)
ch <- prometheus.MustNewConstMetric(
c.gpuProcessMemoryTotalCommitted,
prometheus.GaugeValue,
processTotalCommittedMap[key],
key.Pid, key.Phys,
data.TotalCommitted,
instance.Pid, instance.Luid, device.ID, instance.Phys,
)
}

View File

@@ -18,28 +18,34 @@
package gpu
import (
"fmt"
"strings"
)
type Instance struct {
Pid string
Luid [2]string
Phys string
Eng string
Engtype string
Part string
Pid string
Luid string
DeviceID string
Phys string
Eng string
Engtype string
Part string
}
type PidPhys struct {
Pid string
Phys string
Pid string
Luid string
DeviceID string
Phys string
}
type PidPhysEngEngType struct {
Pid string
Phys string
Eng string
Engtype string
Pid string
Luid string
DeviceID string
Phys string
Eng string
Engtype string
}
func parseGPUCounterInstanceString(s string) Instance {
@@ -58,8 +64,7 @@ func parseGPUCounterInstanceString(s string) Instance {
}
case "luid":
if i+2 < len(parts) {
instance.Luid[0] = parts[i+1]
instance.Luid[1] = parts[i+2]
instance.Luid = fmt.Sprintf("%s_%s", parts[i+1], parts[i+2])
}
case "phys":
if i+1 < len(parts) {

View File

@@ -78,11 +78,6 @@ var ConfigDefaults = Config{
// Collector is a Prometheus Collector for hyper-v.
type Collector struct {
config Config
collectorFns []func(ch chan<- prometheus.Metric) error
closeFns []func()
collectorDataStore
collectorDynamicMemoryBalancer
collectorDynamicMemoryVM
@@ -98,6 +93,12 @@ type Collector struct {
collectorVirtualSMB
collectorVirtualStorageDevice
collectorVirtualSwitch
config Config
logger *slog.Logger
collectorFns []func(ch chan<- prometheus.Metric) error
closeFns []func()
}
func New(config *Config) *Collector {
@@ -151,6 +152,7 @@ func (c *Collector) Close() error {
}
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
c.logger = logger.With(slog.String("collector", Name))
c.collectorFns = make([]func(ch chan<- prometheus.Metric) error, 0, len(c.config.CollectorsEnabled))
c.closeFns = make([]func(), 0, len(c.config.CollectorsEnabled))
@@ -256,10 +258,10 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
}
if buildNumber < subCollectors[name].minBuildNumber {
logger.Warn(fmt.Sprintf(
c.logger.Warn(fmt.Sprintf(
"collector %s requires windows build version %d. Current build version: %d",
name, subCollectors[name].minBuildNumber, buildNumber,
), slog.String("collector", name))
))
continue
}

View File

@@ -132,7 +132,7 @@ type perfDataCounterValuesDataStore struct {
func (c *Collector) buildDataStore() error {
var err error
c.perfDataCollectorDataStore, err = pdh.NewCollector[perfDataCounterValuesDataStore](pdh.CounterTypeRaw, "Hyper-V DataStore", pdh.InstancesAll)
c.perfDataCollectorDataStore, err = pdh.NewCollector[perfDataCounterValuesDataStore](c.logger, pdh.CounterTypeRaw, "Hyper-V DataStore", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Hyper-V DataStore collector: %w", err)
}

View File

@@ -52,7 +52,7 @@ func (c *Collector) buildDynamicMemoryBalancer() error {
var err error
// https://learn.microsoft.com/en-us/archive/blogs/chrisavis/monitoring-dynamic-memory-in-windows-server-hyper-v-2012
c.perfDataCollectorDynamicMemoryBalancer, err = pdh.NewCollector[perfDataCounterValuesDynamicMemoryBalancer](pdh.CounterTypeRaw, "Hyper-V Dynamic Memory Balancer", pdh.InstancesAll)
c.perfDataCollectorDynamicMemoryBalancer, err = pdh.NewCollector[perfDataCounterValuesDynamicMemoryBalancer](c.logger, pdh.CounterTypeRaw, "Hyper-V Dynamic Memory Balancer", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Hyper-V Virtual Machine Health Summary collector: %w", err)
}

View File

@@ -63,7 +63,7 @@ type perfDataCounterValuesDynamicMemoryVM struct {
func (c *Collector) buildDynamicMemoryVM() error {
var err error
c.perfDataCollectorDynamicMemoryVM, err = pdh.NewCollector[perfDataCounterValuesDynamicMemoryVM](pdh.CounterTypeRaw, "Hyper-V Dynamic Memory VM", pdh.InstancesAll)
c.perfDataCollectorDynamicMemoryVM, err = pdh.NewCollector[perfDataCounterValuesDynamicMemoryVM](c.logger, pdh.CounterTypeRaw, "Hyper-V Dynamic Memory VM", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Hyper-V Dynamic Memory VM collector: %w", err)
}

View File

@@ -52,7 +52,7 @@ type perfDataCounterValuesHypervisorLogicalProcessor struct {
func (c *Collector) buildHypervisorLogicalProcessor() error {
var err error
c.perfDataCollectorHypervisorLogicalProcessor, err = pdh.NewCollector[perfDataCounterValuesHypervisorLogicalProcessor](pdh.CounterTypeRaw, "Hyper-V Hypervisor Logical Processor", pdh.InstancesAll)
c.perfDataCollectorHypervisorLogicalProcessor, err = pdh.NewCollector[perfDataCounterValuesHypervisorLogicalProcessor](c.logger, pdh.CounterTypeRaw, "Hyper-V Hypervisor Logical Processor", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Hyper-V Hypervisor Logical Processor collector: %w", err)
}

View File

@@ -80,7 +80,7 @@ type perfDataCounterValuesHypervisorRootPartition struct {
func (c *Collector) buildHypervisorRootPartition() error {
var err error
c.perfDataCollectorHypervisorRootPartition, err = pdh.NewCollector[perfDataCounterValuesHypervisorRootPartition](pdh.CounterTypeRaw, "Hyper-V Hypervisor Root Partition", []string{"Root"})
c.perfDataCollectorHypervisorRootPartition, err = pdh.NewCollector[perfDataCounterValuesHypervisorRootPartition](c.logger, pdh.CounterTypeRaw, "Hyper-V Hypervisor Root Partition", []string{"Root"})
if err != nil {
return fmt.Errorf("failed to create Hyper-V Hypervisor Root Partition collector: %w", err)
}
@@ -310,26 +310,31 @@ func (c *Collector) collectHypervisorRootPartition(ch chan<- prometheus.Metric)
prometheus.GaugeValue,
c.perfDataObjectHypervisorRootPartition[0].HypervisorRootPartition2MDevicePages,
)
ch <- prometheus.MustNewConstMetric(
c.hypervisorRootPartition2MGPAPages,
prometheus.GaugeValue,
c.perfDataObjectHypervisorRootPartition[0].HypervisorRootPartition2MGPAPages,
)
ch <- prometheus.MustNewConstMetric(
c.hypervisorRootPartition4KDevicePages,
prometheus.GaugeValue,
c.perfDataObjectHypervisorRootPartition[0].HypervisorRootPartition4KDevicePages,
)
ch <- prometheus.MustNewConstMetric(
c.hypervisorRootPartition4KGPAPages,
prometheus.GaugeValue,
c.perfDataObjectHypervisorRootPartition[0].HypervisorRootPartition4KGPAPages,
)
ch <- prometheus.MustNewConstMetric(
c.hypervisorRootPartitionVirtualTLBFlushEntries,
prometheus.CounterValue,
c.perfDataObjectHypervisorRootPartition[0].HypervisorRootPartitionVirtualTLBFlushEntries,
)
ch <- prometheus.MustNewConstMetric(
c.hypervisorRootPartitionVirtualTLBPages,
prometheus.GaugeValue,

View File

@@ -53,7 +53,7 @@ type perfDataCounterValuesHypervisorRootVirtualProcessor struct {
func (c *Collector) buildHypervisorRootVirtualProcessor() error {
var err error
c.perfDataCollectorHypervisorRootVirtualProcessor, err = pdh.NewCollector[perfDataCounterValuesHypervisorRootVirtualProcessor](pdh.CounterTypeRaw, "Hyper-V Hypervisor Root Virtual Processor", pdh.InstancesAll)
c.perfDataCollectorHypervisorRootVirtualProcessor, err = pdh.NewCollector[perfDataCounterValuesHypervisorRootVirtualProcessor](c.logger, pdh.CounterTypeRaw, "Hyper-V Hypervisor Root Virtual Processor", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Hyper-V Hypervisor Root Virtual Processor collector: %w", err)
}

View File

@@ -52,7 +52,7 @@ type perfDataCounterValuesHypervisorVirtualProcessor struct {
func (c *Collector) buildHypervisorVirtualProcessor() error {
var err error
c.perfDataCollectorHypervisorVirtualProcessor, err = pdh.NewCollector[perfDataCounterValuesHypervisorVirtualProcessor](pdh.CounterTypeRaw, "Hyper-V Hypervisor Virtual Processor", pdh.InstancesAll)
c.perfDataCollectorHypervisorVirtualProcessor, err = pdh.NewCollector[perfDataCounterValuesHypervisorVirtualProcessor](c.logger, pdh.CounterTypeRaw, "Hyper-V Hypervisor Virtual Processor", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Hyper-V Hypervisor Virtual Processor collector: %w", err)
}

View File

@@ -52,7 +52,7 @@ type perfDataCounterValuesLegacyNetworkAdapter struct {
func (c *Collector) buildLegacyNetworkAdapter() error {
var err error
c.perfDataCollectorLegacyNetworkAdapter, err = pdh.NewCollector[perfDataCounterValuesLegacyNetworkAdapter](pdh.CounterTypeRaw, "Hyper-V Legacy Network Adapter", pdh.InstancesAll)
c.perfDataCollectorLegacyNetworkAdapter, err = pdh.NewCollector[perfDataCounterValuesLegacyNetworkAdapter](c.logger, pdh.CounterTypeRaw, "Hyper-V Legacy Network Adapter", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Hyper-V Legacy Network Adapter collector: %w", err)
}

View File

@@ -44,7 +44,7 @@ type perfDataCounterValuesVirtualMachineHealthSummary struct {
func (c *Collector) buildVirtualMachineHealthSummary() error {
var err error
c.perfDataCollectorVirtualMachineHealthSummary, err = pdh.NewCollector[perfDataCounterValuesVirtualMachineHealthSummary](pdh.CounterTypeRaw, "Hyper-V Virtual Machine Health Summary", nil)
c.perfDataCollectorVirtualMachineHealthSummary, err = pdh.NewCollector[perfDataCounterValuesVirtualMachineHealthSummary](c.logger, pdh.CounterTypeRaw, "Hyper-V Virtual Machine Health Summary", nil)
if err != nil {
return fmt.Errorf("failed to create Hyper-V Virtual Machine Health Summary collector: %w", err)
}

View File

@@ -46,7 +46,7 @@ type perfDataCounterValuesVirtualMachineVidPartition struct {
func (c *Collector) buildVirtualMachineVidPartition() error {
var err error
c.perfDataCollectorVirtualMachineVidPartition, err = pdh.NewCollector[perfDataCounterValuesVirtualMachineVidPartition](pdh.CounterTypeRaw, "Hyper-V VM Vid Partition", pdh.InstancesAll)
c.perfDataCollectorVirtualMachineVidPartition, err = pdh.NewCollector[perfDataCounterValuesVirtualMachineVidPartition](c.logger, pdh.CounterTypeRaw, "Hyper-V VM Vid Partition", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Hyper-V VM Vid Partition collector: %w", err)
}

View File

@@ -52,7 +52,7 @@ type perfDataCounterValuesVirtualNetworkAdapter struct {
func (c *Collector) buildVirtualNetworkAdapter() error {
var err error
c.perfDataCollectorVirtualNetworkAdapter, err = pdh.NewCollector[perfDataCounterValuesVirtualNetworkAdapter](pdh.CounterTypeRaw, "Hyper-V Virtual Network Adapter", pdh.InstancesAll)
c.perfDataCollectorVirtualNetworkAdapter, err = pdh.NewCollector[perfDataCounterValuesVirtualNetworkAdapter](c.logger, pdh.CounterTypeRaw, "Hyper-V Virtual Network Adapter", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Hyper-V Virtual Network Adapter collector: %w", err)
}

View File

@@ -167,7 +167,7 @@ type perfDataCounterValuesVirtualNetworkAdapterDropReasons struct {
func (c *Collector) buildVirtualNetworkAdapterDropReasons() error {
var err error
c.perfDataCollectorVirtualNetworkAdapterDropReasons, err = pdh.NewCollector[perfDataCounterValuesVirtualNetworkAdapterDropReasons](pdh.CounterTypeRaw, "Hyper-V Virtual Network Adapter Drop Reasons", pdh.InstancesAll)
c.perfDataCollectorVirtualNetworkAdapterDropReasons, err = pdh.NewCollector[perfDataCounterValuesVirtualNetworkAdapterDropReasons](c.logger, pdh.CounterTypeRaw, "Hyper-V Virtual Network Adapter Drop Reasons", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Hyper-V Virtual Network Adapter Drop Reasons collector: %w", err)
}
@@ -195,294 +195,343 @@ func (c *Collector) collectVirtualNetworkAdapterDropReasons(ch chan<- prometheus
data.VirtualNetworkAdapterDropReasonsOutgoingNativeFwdingReq,
data.Name, "NativeFwdingReq", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsIncomingNativeFwdingReq,
data.Name, "NativeFwdingReq", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsOutgoingMTUMismatch,
data.Name, "MTUMismatch", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsIncomingMTUMismatch,
data.Name, "MTUMismatch", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsOutgoingInvalidConfig,
data.Name, "InvalidConfig", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsIncomingInvalidConfig,
data.Name, "InvalidConfig", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsOutgoingRequiredExtensionMissing,
data.Name, "RequiredExtensionMissing", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsIncomingRequiredExtensionMissing,
data.Name, "RequiredExtensionMissing", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsOutgoingVirtualSubnetId,
data.Name, "VirtualSubnetId", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsIncomingVirtualSubnetId,
data.Name, "VirtualSubnetId", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsOutgoingBridgeReserved,
data.Name, "BridgeReserved", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsIncomingBridgeReserved,
data.Name, "BridgeReserved", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsOutgoingRouterGuard,
data.Name, "RouterGuard", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsIncomingRouterGuard,
data.Name, "RouterGuard", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsOutgoingDhcpGuard,
data.Name, "DhcpGuard", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsIncomingDhcpGuard,
data.Name, "DhcpGuard", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsOutgoingMacSpoofing,
data.Name, "MacSpoofing", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsIncomingMacSpoofing,
data.Name, "MacSpoofing", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsOutgoingIpsec,
data.Name, "Ipsec", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsIncomingIpsec,
data.Name, "Ipsec", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsOutgoingQos,
data.Name, "Qos", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsIncomingQos,
data.Name, "Qos", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsOutgoingFailedPvlanSetting,
data.Name, "FailedPvlanSetting", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsIncomingFailedPvlanSetting,
data.Name, "FailedPvlanSetting", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsOutgoingFailedSecurityPolicy,
data.Name, "FailedSecurityPolicy", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsIncomingFailedSecurityPolicy,
data.Name, "FailedSecurityPolicy", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsOutgoingUnauthorizedMAC,
data.Name, "UnauthorizedMAC", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsIncomingUnauthorizedMAC,
data.Name, "UnauthorizedMAC", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsOutgoingUnauthorizedVLAN,
data.Name, "UnauthorizedVLAN", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsIncomingUnauthorizedVLAN,
data.Name, "UnauthorizedVLAN", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsOutgoingFilteredVLAN,
data.Name, "FilteredVLAN", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsIncomingFilteredVLAN,
data.Name, "FilteredVLAN", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsOutgoingFiltered,
data.Name, "Filtered", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsIncomingFiltered,
data.Name, "Filtered", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsOutgoingBusy,
data.Name, "Busy", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsIncomingBusy,
data.Name, "Busy", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsOutgoingNotAccepted,
data.Name, "NotAccepted", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsIncomingNotAccepted,
data.Name, "NotAccepted", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsOutgoingDisconnected,
data.Name, "Disconnected", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsIncomingDisconnected,
data.Name, "Disconnected", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsOutgoingNotReady,
data.Name, "NotReady", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsIncomingNotReady,
data.Name, "NotReady", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsOutgoingResources,
data.Name, "Resources", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsIncomingResources,
data.Name, "Resources", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsOutgoingInvalidPacket,
data.Name, "InvalidPacket", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsIncomingInvalidPacket,
data.Name, "InvalidPacket", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsOutgoingInvalidData,
data.Name, "InvalidData", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsIncomingInvalidData,
data.Name, "InvalidData", "incoming",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,
data.VirtualNetworkAdapterDropReasonsOutgoingUnknown,
data.Name, "Unknown", "outgoing",
)
ch <- prometheus.MustNewConstMetric(
c.virtualNetworkAdapterDropReasons,
prometheus.CounterValue,

View File

@@ -74,7 +74,7 @@ type perfDataCounterValuesVirtualSMB struct {
func (c *Collector) buildVirtualSMB() error {
var err error
c.perfDataCollectorVirtualSMB, err = pdh.NewCollector[perfDataCounterValuesVirtualSMB](pdh.CounterTypeRaw, "Hyper-V Virtual SMB", pdh.InstancesAll)
c.perfDataCollectorVirtualSMB, err = pdh.NewCollector[perfDataCounterValuesVirtualSMB](c.logger, pdh.CounterTypeRaw, "Hyper-V Virtual SMB", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Hyper-V Virtual SMB collector: %w", err)
}

View File

@@ -64,7 +64,7 @@ type perfDataCounterValuesVirtualStorageDevice struct {
func (c *Collector) buildVirtualStorageDevice() error {
var err error
c.perfDataCollectorVirtualStorageDevice, err = pdh.NewCollector[perfDataCounterValuesVirtualStorageDevice](pdh.CounterTypeRaw, "Hyper-V Virtual Storage Device", pdh.InstancesAll)
c.perfDataCollectorVirtualStorageDevice, err = pdh.NewCollector[perfDataCounterValuesVirtualStorageDevice](c.logger, pdh.CounterTypeRaw, "Hyper-V Virtual Storage Device", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Hyper-V Virtual Storage Device collector: %w", err)
}

View File

@@ -82,7 +82,7 @@ type perfDataCounterValuesVirtualSwitch struct {
func (c *Collector) buildVirtualSwitch() error {
var err error
c.perfDataCollectorVirtualSwitch, err = pdh.NewCollector[perfDataCounterValuesVirtualSwitch](pdh.CounterTypeRaw, "Hyper-V Virtual Switch", pdh.InstancesAll)
c.perfDataCollectorVirtualSwitch, err = pdh.NewCollector[perfDataCounterValuesVirtualSwitch](c.logger, pdh.CounterTypeRaw, "Hyper-V Virtual Switch", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Hyper-V Virtual Switch collector: %w", err)
}
@@ -265,6 +265,7 @@ func (c *Collector) collectVirtualSwitch(ch chan<- prometheus.Metric) error {
data.VirtualSwitchDirectedPacketsReceived,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSwitchDirectedPacketsSent,
prometheus.CounterValue,
@@ -278,18 +279,21 @@ func (c *Collector) collectVirtualSwitch(ch chan<- prometheus.Metric) error {
data.VirtualSwitchDroppedPacketsIncoming,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSwitchDroppedPacketsOutgoing,
prometheus.CounterValue,
data.VirtualSwitchDroppedPacketsOutgoing,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSwitchExtensionsDroppedPacketsIncoming,
prometheus.CounterValue,
data.VirtualSwitchExtensionsDroppedPacketsIncoming,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSwitchExtensionsDroppedPacketsOutgoing,
prometheus.CounterValue,
@@ -303,24 +307,28 @@ func (c *Collector) collectVirtualSwitch(ch chan<- prometheus.Metric) error {
data.VirtualSwitchLearnedMacAddresses,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSwitchMulticastPacketsReceived,
prometheus.CounterValue,
data.VirtualSwitchMulticastPacketsReceived,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSwitchMulticastPacketsSent,
prometheus.CounterValue,
data.VirtualSwitchMulticastPacketsSent,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSwitchNumberOfSendChannelMoves,
prometheus.CounterValue,
data.VirtualSwitchNumberOfSendChannelMoves,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSwitchNumberOfVMQMoves,
prometheus.CounterValue,
@@ -348,12 +356,14 @@ func (c *Collector) collectVirtualSwitch(ch chan<- prometheus.Metric) error {
data.VirtualSwitchPacketsReceived,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSwitchPacketsSent,
prometheus.CounterValue,
data.VirtualSwitchPacketsSent,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.virtualSwitchPurgedMacAddresses,
prometheus.CounterValue,

View File

@@ -50,14 +50,18 @@ var ConfigDefaults = Config{
}
type Collector struct {
config Config
iisVersion simpleVersion
info *prometheus.Desc
collectorWebService
collectorHttpServiceRequestQueues
collectorAppPoolWAS
collectorW3SVCW3WP
collectorWebServiceCache
config Config
iisVersion simpleVersion
logger *slog.Logger
info *prometheus.Desc
}
func New(config *Config) *Collector {
@@ -150,6 +154,7 @@ func (c *Collector) GetName() string {
func (c *Collector) Close() error {
c.perfDataCollectorWebService.Close()
c.perfDataCollectorHttpServiceRequestQueues.Close()
c.perfDataCollectorAppPoolWAS.Close()
c.w3SVCW3WPPerfDataCollector.Close()
c.serviceCachePerfDataCollector.Close()
@@ -158,9 +163,9 @@ func (c *Collector) Close() error {
}
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
logger = logger.With(slog.String("collector", Name))
c.logger = logger.With(slog.String("collector", Name))
c.iisVersion = c.getIISVersion(logger)
c.iisVersion = c.getIISVersion()
c.info = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "info"),
@@ -175,6 +180,10 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
errs = append(errs, fmt.Errorf("failed to build Web Service collector: %w", err))
}
if err := c.buildHttpServiceRequestQueues(); err != nil {
errs = append(errs, fmt.Errorf("failed to build Http Service collector: %w", err))
}
if err := c.buildAppPoolWAS(); err != nil {
errs = append(errs, fmt.Errorf("failed to build APP_POOL_WAS collector: %w", err))
}
@@ -195,10 +204,10 @@ type simpleVersion struct {
minor uint64
}
func (c *Collector) getIISVersion(logger *slog.Logger) simpleVersion {
func (c *Collector) getIISVersion() simpleVersion {
k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\InetStp\`, registry.QUERY_VALUE)
if err != nil {
logger.Warn("couldn't open registry to determine IIS version",
c.logger.Warn("couldn't open registry to determine IIS version",
slog.Any("err", err),
)
@@ -208,7 +217,7 @@ func (c *Collector) getIISVersion(logger *slog.Logger) simpleVersion {
defer func() {
err = k.Close()
if err != nil {
logger.Warn("Failed to close registry key",
c.logger.Warn("Failed to close registry key",
slog.Any("err", err),
)
}
@@ -216,7 +225,7 @@ func (c *Collector) getIISVersion(logger *slog.Logger) simpleVersion {
major, _, err := k.GetIntegerValue("MajorVersion")
if err != nil {
logger.Warn("Couldn't open registry to determine IIS version",
c.logger.Warn("Couldn't open registry to determine IIS version",
slog.Any("err", err),
)
@@ -225,14 +234,14 @@ func (c *Collector) getIISVersion(logger *slog.Logger) simpleVersion {
minor, _, err := k.GetIntegerValue("MinorVersion")
if err != nil {
logger.Warn("Couldn't open registry to determine IIS version",
c.logger.Warn("Couldn't open registry to determine IIS version",
slog.Any("err", err),
)
return simpleVersion{}
}
logger.Debug(fmt.Sprintf("Detected IIS %d.%d\n", major, minor))
c.logger.Debug(fmt.Sprintf("Detected IIS %d.%d\n", major, minor))
return simpleVersion{
major: major,
@@ -255,6 +264,10 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
errs = append(errs, fmt.Errorf("failed to collect Web Service metrics: %w", err))
}
if err := c.collectHttpServiceRequestQueues(ch); err != nil {
errs = append(errs, fmt.Errorf("failed to collect Http Service Request Queues metrics: %w", err))
}
if err := c.collectAppPoolWAS(ch); err != nil {
errs = append(errs, fmt.Errorf("failed to collect APP_POOL_WAS metrics: %w", err))
}

View File

@@ -79,7 +79,7 @@ var applicationStates = map[uint32]string{
func (c *Collector) buildAppPoolWAS() error {
var err error
c.perfDataCollectorAppPoolWAS, err = pdh.NewCollector[perfDataCounterValuesAppPoolWAS](pdh.CounterTypeRaw, "APP_POOL_WAS", pdh.InstancesAll)
c.perfDataCollectorAppPoolWAS, err = pdh.NewCollector[perfDataCounterValuesAppPoolWAS](c.logger, pdh.CounterTypeRaw, "APP_POOL_WAS", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create APP_POOL_WAS collector: %w", err)
}
@@ -201,66 +201,77 @@ func (c *Collector) collectAppPoolWAS(ch chan<- prometheus.Metric) error {
data.CurrentApplicationPoolUptime,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.currentWorkerProcesses,
prometheus.GaugeValue,
data.CurrentWorkerProcesses,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.maximumWorkerProcesses,
prometheus.GaugeValue,
data.MaximumWorkerProcesses,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.recentWorkerProcessFailures,
prometheus.GaugeValue,
data.RecentWorkerProcessFailures,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.timeSinceLastWorkerProcessFailure,
prometheus.GaugeValue,
data.TimeSinceLastWorkerProcessFailure,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.totalApplicationPoolRecycles,
prometheus.CounterValue,
data.TotalApplicationPoolRecycles,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.totalApplicationPoolUptime,
prometheus.CounterValue,
data.TotalApplicationPoolUptime,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.totalWorkerProcessesCreated,
prometheus.CounterValue,
data.TotalWorkerProcessesCreated,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.totalWorkerProcessFailures,
prometheus.CounterValue,
data.TotalWorkerProcessFailures,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.totalWorkerProcessPingFailures,
prometheus.CounterValue,
data.TotalWorkerProcessPingFailures,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.totalWorkerProcessShutdownFailures,
prometheus.CounterValue,
data.TotalWorkerProcessShutdownFailures,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.totalWorkerProcessStartupFailures,
prometheus.CounterValue,

View File

@@ -0,0 +1,137 @@
// SPDX-License-Identifier: Apache-2.0
//
// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build windows
package iis
import (
"fmt"
"strings"
"github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
type collectorHttpServiceRequestQueues struct {
perfDataCollectorHttpServiceRequestQueues *pdh.Collector
perfDataObjectHttpServiceRequestQueues []perfDataCounterValuesHttpServiceRequestQueues
httpRequestQueuesCurrentQueueSize *prometheus.Desc
httpRequestQueuesTotalRejectedRequest *prometheus.Desc
httpRequestQueuesMaxQueueItemAge *prometheus.Desc
httpRequestQueuesArrivalRate *prometheus.Desc
}
type perfDataCounterValuesHttpServiceRequestQueues struct {
Name string
HttpRequestQueuesCurrentQueueSize float64 `perfdata:"CurrentQueueSize"`
HttpRequestQueuesTotalRejectedRequests float64 `perfdata:"RejectedRequests"`
HttpRequestQueuesMaxQueueItemAge float64 `perfdata:"MaxQueueItemAge"`
HttpRequestQueuesArrivalRate float64 `perfdata:"ArrivalRate"`
}
func (p perfDataCounterValuesHttpServiceRequestQueues) GetName() string {
return p.Name
}
func (c *Collector) buildHttpServiceRequestQueues() error {
var err error
c.logger.Info("IIS/HttpServiceRequestQueues collector is in an experimental state! The configuration and metrics may change in future. Please report any issues.")
c.perfDataCollectorHttpServiceRequestQueues, err = pdh.NewCollector[perfDataCounterValuesHttpServiceRequestQueues](c.logger, pdh.CounterTypeRaw, "HTTP Service Request Queues", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Http Service collector: %w", err)
}
c.httpRequestQueuesCurrentQueueSize = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "http_requests_current_queue_size"),
"Http Request Current Queue Size",
[]string{"site"},
nil,
)
c.httpRequestQueuesTotalRejectedRequest = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "http_request_total_rejected_request"),
"Http Request Total Rejected Request",
[]string{"site"},
nil,
)
c.httpRequestQueuesMaxQueueItemAge = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "http_requests_max_queue_item_age"),
"Http Request Max Queue Item Age. The values might be bogus if the queue is empty.",
[]string{"site"},
nil,
)
c.httpRequestQueuesArrivalRate = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "http_requests_arrival_rate"),
"Http Request Arrival Rate",
[]string{"site"},
nil,
)
return nil
}
func (c *Collector) collectHttpServiceRequestQueues(ch chan<- prometheus.Metric) error {
err := c.perfDataCollectorHttpServiceRequestQueues.Collect(&c.perfDataObjectHttpServiceRequestQueues)
if err != nil {
return fmt.Errorf("failed to collect Http Service Request Queues metrics: %w", err)
}
deduplicateIISNames(c.perfDataObjectHttpServiceRequestQueues)
for _, data := range c.perfDataObjectHttpServiceRequestQueues {
if strings.HasPrefix(data.Name, "---") {
continue
}
if c.config.SiteExclude.MatchString(data.Name) || !c.config.SiteInclude.MatchString(data.Name) {
continue
}
ch <- prometheus.MustNewConstMetric(
c.httpRequestQueuesCurrentQueueSize,
prometheus.GaugeValue,
data.HttpRequestQueuesCurrentQueueSize,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.httpRequestQueuesTotalRejectedRequest,
prometheus.GaugeValue,
data.HttpRequestQueuesTotalRejectedRequests,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.httpRequestQueuesMaxQueueItemAge,
prometheus.GaugeValue,
data.HttpRequestQueuesMaxQueueItemAge,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.httpRequestQueuesArrivalRate,
prometheus.GaugeValue,
data.HttpRequestQueuesArrivalRate,
data.Name,
)
}
return nil
}

View File

@@ -152,13 +152,13 @@ func (p perfDataCounterValuesW3SVCW3WPV8) GetName() string {
func (c *Collector) buildW3SVCW3WP() error {
var err error
c.w3SVCW3WPPerfDataCollector, err = pdh.NewCollector[perfDataCounterValuesW3SVCW3WP](pdh.CounterTypeRaw, "W3SVC_W3WP", pdh.InstancesAll)
c.w3SVCW3WPPerfDataCollector, err = pdh.NewCollector[perfDataCounterValuesW3SVCW3WP](c.logger, pdh.CounterTypeRaw, "W3SVC_W3WP", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create W3SVC_W3WP collector: %w", err)
}
if c.iisVersion.major >= 8 {
c.w3SVCW3WPPerfDataCollectorV8, err = pdh.NewCollector[perfDataCounterValuesW3SVCW3WPV8](pdh.CounterTypeRaw, "W3SVC_W3WP", pdh.InstancesAll)
c.w3SVCW3WPPerfDataCollectorV8, err = pdh.NewCollector[perfDataCounterValuesW3SVCW3WPV8](c.logger, pdh.CounterTypeRaw, "W3SVC_W3WP", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create W3SVC_W3WP collector: %w", err)
}
@@ -441,6 +441,7 @@ func (c *Collector) collectW3SVCW3WPv8(ch chan<- prometheus.Metric) error {
pid,
"401",
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPRequestErrorsTotal,
prometheus.CounterValue,
@@ -449,6 +450,7 @@ func (c *Collector) collectW3SVCW3WPv8(ch chan<- prometheus.Metric) error {
pid,
"403",
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPRequestErrorsTotal,
prometheus.CounterValue,
@@ -457,6 +459,7 @@ func (c *Collector) collectW3SVCW3WPv8(ch chan<- prometheus.Metric) error {
pid,
"404",
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPRequestErrorsTotal,
prometheus.CounterValue,
@@ -465,6 +468,7 @@ func (c *Collector) collectW3SVCW3WPv8(ch chan<- prometheus.Metric) error {
pid,
"500",
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPWebSocketRequestsActive,
prometheus.CounterValue,
@@ -472,6 +476,7 @@ func (c *Collector) collectW3SVCW3WPv8(ch chan<- prometheus.Metric) error {
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPWebSocketConnectionAttempts,
prometheus.CounterValue,
@@ -479,6 +484,7 @@ func (c *Collector) collectW3SVCW3WPv8(ch chan<- prometheus.Metric) error {
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPWebSocketConnectionsAccepted,
prometheus.CounterValue,
@@ -486,6 +492,7 @@ func (c *Collector) collectW3SVCW3WPv8(ch chan<- prometheus.Metric) error {
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPWebSocketConnectionsRejected,
prometheus.CounterValue,
@@ -507,10 +514,6 @@ func (c *Collector) collectW3SVCW3WPv7(ch chan<- prometheus.Metric) error {
deduplicateIISNames(c.perfDataObjectW3SVCW3WP)
for _, data := range c.perfDataObjectW3SVCW3WP {
if c.config.AppExclude.MatchString(data.Name) || !c.config.AppInclude.MatchString(data.Name) {
continue
}
// Extract the apppool name from the format <PID>_<NAME>
pid := workerProcessNameExtractor.ReplaceAllString(data.Name, "$1")
@@ -533,6 +536,7 @@ func (c *Collector) collectW3SVCW3WPv7(ch chan<- prometheus.Metric) error {
pid,
"busy",
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPMaximumThreads,
prometheus.CounterValue,
@@ -540,6 +544,7 @@ func (c *Collector) collectW3SVCW3WPv7(ch chan<- prometheus.Metric) error {
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPRequestsTotal,
prometheus.CounterValue,
@@ -547,6 +552,7 @@ func (c *Collector) collectW3SVCW3WPv7(ch chan<- prometheus.Metric) error {
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPRequestsActive,
prometheus.CounterValue,
@@ -554,6 +560,7 @@ func (c *Collector) collectW3SVCW3WPv7(ch chan<- prometheus.Metric) error {
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPActiveFlushedEntries,
prometheus.GaugeValue,
@@ -561,6 +568,7 @@ func (c *Collector) collectW3SVCW3WPv7(ch chan<- prometheus.Metric) error {
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPCurrentFileCacheMemoryUsage,
prometheus.GaugeValue,
@@ -568,6 +576,7 @@ func (c *Collector) collectW3SVCW3WPv7(ch chan<- prometheus.Metric) error {
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPMaximumFileCacheMemoryUsage,
prometheus.CounterValue,
@@ -575,6 +584,7 @@ func (c *Collector) collectW3SVCW3WPv7(ch chan<- prometheus.Metric) error {
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPFileCacheFlushesTotal,
prometheus.CounterValue,
@@ -582,6 +592,7 @@ func (c *Collector) collectW3SVCW3WPv7(ch chan<- prometheus.Metric) error {
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPFileCacheQueriesTotal,
prometheus.CounterValue,
@@ -589,6 +600,7 @@ func (c *Collector) collectW3SVCW3WPv7(ch chan<- prometheus.Metric) error {
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPFileCacheHitsTotal,
prometheus.CounterValue,
@@ -596,6 +608,7 @@ func (c *Collector) collectW3SVCW3WPv7(ch chan<- prometheus.Metric) error {
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPFilesCached,
prometheus.GaugeValue,
@@ -603,6 +616,7 @@ func (c *Collector) collectW3SVCW3WPv7(ch chan<- prometheus.Metric) error {
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPFilesCachedTotal,
prometheus.CounterValue,
@@ -610,6 +624,7 @@ func (c *Collector) collectW3SVCW3WPv7(ch chan<- prometheus.Metric) error {
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPFilesFlushedTotal,
prometheus.CounterValue,
@@ -617,6 +632,7 @@ func (c *Collector) collectW3SVCW3WPv7(ch chan<- prometheus.Metric) error {
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPURICacheFlushesTotal,
prometheus.CounterValue,
@@ -624,6 +640,7 @@ func (c *Collector) collectW3SVCW3WPv7(ch chan<- prometheus.Metric) error {
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPURICacheQueriesTotal,
prometheus.CounterValue,
@@ -631,6 +648,7 @@ func (c *Collector) collectW3SVCW3WPv7(ch chan<- prometheus.Metric) error {
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPURICacheHitsTotal,
prometheus.CounterValue,
@@ -638,6 +656,7 @@ func (c *Collector) collectW3SVCW3WPv7(ch chan<- prometheus.Metric) error {
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPURIsCached,
prometheus.GaugeValue,
@@ -645,6 +664,7 @@ func (c *Collector) collectW3SVCW3WPv7(ch chan<- prometheus.Metric) error {
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPURIsCachedTotal,
prometheus.CounterValue,
@@ -652,6 +672,7 @@ func (c *Collector) collectW3SVCW3WPv7(ch chan<- prometheus.Metric) error {
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPURIsFlushedTotal,
prometheus.CounterValue,
@@ -659,6 +680,7 @@ func (c *Collector) collectW3SVCW3WPv7(ch chan<- prometheus.Metric) error {
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPMetadataCached,
prometheus.GaugeValue,
@@ -666,6 +688,7 @@ func (c *Collector) collectW3SVCW3WPv7(ch chan<- prometheus.Metric) error {
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPMetadataCacheFlushes,
prometheus.CounterValue,
@@ -673,6 +696,7 @@ func (c *Collector) collectW3SVCW3WPv7(ch chan<- prometheus.Metric) error {
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPMetadataCacheQueriesTotal,
prometheus.CounterValue,
@@ -680,6 +704,7 @@ func (c *Collector) collectW3SVCW3WPv7(ch chan<- prometheus.Metric) error {
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPMetadataCacheHitsTotal,
prometheus.CounterValue,
@@ -687,6 +712,7 @@ func (c *Collector) collectW3SVCW3WPv7(ch chan<- prometheus.Metric) error {
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPMetadataCachedTotal,
prometheus.CounterValue,
@@ -694,6 +720,7 @@ func (c *Collector) collectW3SVCW3WPv7(ch chan<- prometheus.Metric) error {
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPMetadataFlushedTotal,
prometheus.CounterValue,
@@ -701,6 +728,7 @@ func (c *Collector) collectW3SVCW3WPv7(ch chan<- prometheus.Metric) error {
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPOutputCacheActiveFlushedItems,
prometheus.CounterValue,
@@ -708,6 +736,7 @@ func (c *Collector) collectW3SVCW3WPv7(ch chan<- prometheus.Metric) error {
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPOutputCacheItems,
prometheus.CounterValue,
@@ -715,6 +744,7 @@ func (c *Collector) collectW3SVCW3WPv7(ch chan<- prometheus.Metric) error {
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPOutputCacheMemoryUsage,
prometheus.CounterValue,
@@ -722,6 +752,7 @@ func (c *Collector) collectW3SVCW3WPv7(ch chan<- prometheus.Metric) error {
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPOutputCacheQueriesTotal,
prometheus.CounterValue,
@@ -729,6 +760,7 @@ func (c *Collector) collectW3SVCW3WPv7(ch chan<- prometheus.Metric) error {
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPOutputCacheHitsTotal,
prometheus.CounterValue,
@@ -736,6 +768,7 @@ func (c *Collector) collectW3SVCW3WPv7(ch chan<- prometheus.Metric) error {
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPOutputCacheFlushedItemsTotal,
prometheus.CounterValue,
@@ -743,6 +776,7 @@ func (c *Collector) collectW3SVCW3WPv7(ch chan<- prometheus.Metric) error {
name,
pid,
)
ch <- prometheus.MustNewConstMetric(
c.w3SVCW3WPOutputCacheFlushesTotal,
prometheus.CounterValue,

View File

@@ -102,7 +102,7 @@ func (p perfDataCounterValuesWebService) GetName() string {
func (c *Collector) buildWebService() error {
var err error
c.perfDataCollectorWebService, err = pdh.NewCollector[perfDataCounterValuesWebService](pdh.CounterTypeRaw, "Web Service", pdh.InstancesAll)
c.perfDataCollectorWebService, err = pdh.NewCollector[perfDataCounterValuesWebService](c.logger, pdh.CounterTypeRaw, "Web Service", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Web Service collector: %w", err)
}
@@ -262,126 +262,147 @@ func (c *Collector) collectWebService(ch chan<- prometheus.Metric) error {
data.WebServiceCurrentAnonymousUsers,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceCurrentBlockedAsyncIORequests,
prometheus.GaugeValue,
data.WebServiceCurrentBlockedAsyncIORequests,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceCurrentCGIRequests,
prometheus.GaugeValue,
data.WebServiceCurrentCGIRequests,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceCurrentConnections,
prometheus.GaugeValue,
data.WebServiceCurrentConnections,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceCurrentISAPIExtensionRequests,
prometheus.GaugeValue,
data.WebServiceCurrentISAPIExtensionRequests,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceCurrentNonAnonymousUsers,
prometheus.GaugeValue,
data.WebServiceCurrentNonAnonymousUsers,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceServiceUptime,
prometheus.GaugeValue,
data.WebServiceServiceUptime,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalBytesReceived,
prometheus.CounterValue,
data.WebServiceTotalBytesReceived,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalBytesSent,
prometheus.CounterValue,
data.WebServiceTotalBytesSent,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalAnonymousUsers,
prometheus.CounterValue,
data.WebServiceTotalAnonymousUsers,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalBlockedAsyncIORequests,
prometheus.CounterValue,
data.WebServiceTotalBlockedAsyncIORequests,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalCGIRequests,
prometheus.CounterValue,
data.WebServiceTotalCGIRequests,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalConnectionAttemptsAllInstances,
prometheus.CounterValue,
data.WebServiceTotalConnectionAttemptsAllInstances,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalFilesReceived,
prometheus.CounterValue,
data.WebServiceTotalFilesReceived,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalFilesSent,
prometheus.CounterValue,
data.WebServiceTotalFilesSent,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalISAPIExtensionRequests,
prometheus.CounterValue,
data.WebServiceTotalISAPIExtensionRequests,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalLockedErrors,
prometheus.CounterValue,
data.WebServiceTotalLockedErrors,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalLogonAttempts,
prometheus.CounterValue,
data.WebServiceTotalLogonAttempts,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalNonAnonymousUsers,
prometheus.CounterValue,
data.WebServiceTotalNonAnonymousUsers,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalNotFoundErrors,
prometheus.CounterValue,
data.WebServiceTotalNotFoundErrors,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalRejectedAsyncIORequests,
prometheus.CounterValue,
data.WebServiceTotalRejectedAsyncIORequests,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalRequests,
prometheus.CounterValue,
@@ -389,6 +410,7 @@ func (c *Collector) collectWebService(ch chan<- prometheus.Metric) error {
data.Name,
"other",
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalRequests,
prometheus.CounterValue,
@@ -396,6 +418,7 @@ func (c *Collector) collectWebService(ch chan<- prometheus.Metric) error {
data.Name,
"COPY",
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalRequests,
prometheus.CounterValue,
@@ -403,6 +426,7 @@ func (c *Collector) collectWebService(ch chan<- prometheus.Metric) error {
data.Name,
"DELETE",
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalRequests,
prometheus.CounterValue,
@@ -410,6 +434,7 @@ func (c *Collector) collectWebService(ch chan<- prometheus.Metric) error {
data.Name,
"GET",
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalRequests,
prometheus.CounterValue,
@@ -417,6 +442,7 @@ func (c *Collector) collectWebService(ch chan<- prometheus.Metric) error {
data.Name,
"HEAD",
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalRequests,
prometheus.CounterValue,
@@ -424,6 +450,7 @@ func (c *Collector) collectWebService(ch chan<- prometheus.Metric) error {
data.Name,
"LOCK",
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalRequests,
prometheus.CounterValue,
@@ -431,6 +458,7 @@ func (c *Collector) collectWebService(ch chan<- prometheus.Metric) error {
data.Name,
"MKCOL",
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalRequests,
prometheus.CounterValue,
@@ -438,6 +466,7 @@ func (c *Collector) collectWebService(ch chan<- prometheus.Metric) error {
data.Name,
"MOVE",
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalRequests,
prometheus.CounterValue,
@@ -445,6 +474,7 @@ func (c *Collector) collectWebService(ch chan<- prometheus.Metric) error {
data.Name,
"OPTIONS",
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalRequests,
prometheus.CounterValue,
@@ -452,6 +482,7 @@ func (c *Collector) collectWebService(ch chan<- prometheus.Metric) error {
data.Name,
"POST",
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalRequests,
prometheus.CounterValue,
@@ -459,6 +490,7 @@ func (c *Collector) collectWebService(ch chan<- prometheus.Metric) error {
data.Name,
"PROPFIND",
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalRequests,
prometheus.CounterValue,
@@ -466,6 +498,7 @@ func (c *Collector) collectWebService(ch chan<- prometheus.Metric) error {
data.Name,
"PROPPATCH",
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalRequests,
prometheus.CounterValue,
@@ -473,6 +506,7 @@ func (c *Collector) collectWebService(ch chan<- prometheus.Metric) error {
data.Name,
"PUT",
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalRequests,
prometheus.CounterValue,
@@ -480,6 +514,7 @@ func (c *Collector) collectWebService(ch chan<- prometheus.Metric) error {
data.Name,
"SEARCH",
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalRequests,
prometheus.CounterValue,
@@ -487,6 +522,7 @@ func (c *Collector) collectWebService(ch chan<- prometheus.Metric) error {
data.Name,
"TRACE",
)
ch <- prometheus.MustNewConstMetric(
c.webServiceTotalRequests,
prometheus.CounterValue,

View File

@@ -64,8 +64,6 @@ type collectorWebServiceCache struct {
}
type perfDataCounterServiceCache struct {
Name string
ServiceCacheActiveFlushedEntries float64 `perfdata:"Active Flushed Entries"`
ServiceCacheCurrentFileCacheMemoryUsage float64 `perfdata:"Current File Cache Memory Usage"`
ServiceCacheMaximumFileCacheMemoryUsage float64 `perfdata:"Maximum File Cache Memory Usage"`
@@ -102,14 +100,10 @@ type perfDataCounterServiceCache struct {
ServiceCacheOutputCacheFlushesTotal float64 `perfdata:"Output Cache Total Flushes"`
}
func (p perfDataCounterServiceCache) GetName() string {
return p.Name
}
func (c *Collector) buildWebServiceCache() error {
var err error
c.serviceCachePerfDataCollector, err = pdh.NewCollector[perfDataCounterServiceCache](pdh.CounterTypeRaw, "Web Service Cache", pdh.InstancesAll)
c.serviceCachePerfDataCollector, err = pdh.NewCollector[perfDataCounterServiceCache](c.logger, pdh.CounterTypeRaw, "Web Service Cache", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create Web Service Cache collector: %w", err)
}
@@ -293,190 +287,217 @@ func (c *Collector) collectWebServiceCache(ch chan<- prometheus.Metric) error {
return fmt.Errorf("failed to collect Web Service Cache metrics: %w", err)
}
deduplicateIISNames(c.perfDataObjectServiceCache)
for _, data := range c.perfDataObjectServiceCache {
if c.config.SiteExclude.MatchString(data.Name) || !c.config.SiteInclude.MatchString(data.Name) {
continue
}
ch <- prometheus.MustNewConstMetric(
c.serviceCacheActiveFlushedEntries,
prometheus.GaugeValue,
data.ServiceCacheActiveFlushedEntries,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheCurrentFileCacheMemoryUsage,
prometheus.GaugeValue,
data.ServiceCacheCurrentFileCacheMemoryUsage,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheMaximumFileCacheMemoryUsage,
prometheus.CounterValue,
data.ServiceCacheMaximumFileCacheMemoryUsage,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheFileCacheFlushesTotal,
prometheus.CounterValue,
data.ServiceCacheFileCacheFlushesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheFileCacheQueriesTotal,
prometheus.CounterValue,
data.ServiceCacheFileCacheHitsTotal+data.ServiceCacheFileCacheMissesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheFileCacheHitsTotal,
prometheus.CounterValue,
data.ServiceCacheFileCacheHitsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheFilesCached,
prometheus.GaugeValue,
data.ServiceCacheFilesCached,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheFilesCachedTotal,
prometheus.CounterValue,
data.ServiceCacheFilesCachedTotal,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheFilesFlushedTotal,
prometheus.CounterValue,
data.ServiceCacheFilesFlushedTotal,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheURICacheFlushesTotal,
prometheus.CounterValue,
data.ServiceCacheURICacheFlushesTotal,
"user",
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheURICacheFlushesTotal,
prometheus.CounterValue,
data.ServiceCacheURICacheFlushesTotalKernel,
"kernel",
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheURICacheQueriesTotal,
prometheus.CounterValue,
data.ServiceCacheURICacheHitsTotal+data.ServiceCacheURICacheMissesTotal,
"user",
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheURICacheQueriesTotal,
prometheus.CounterValue,
data.ServiceCacheURICacheHitsTotalKernel+data.ServiceCacheURICacheMissesTotalKernel,
"kernel",
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheURICacheHitsTotal,
prometheus.CounterValue,
data.ServiceCacheURICacheHitsTotal,
"user",
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheURICacheHitsTotal,
prometheus.CounterValue,
data.ServiceCacheURICacheHitsTotalKernel,
"kernel",
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheURIsCached,
prometheus.GaugeValue,
data.ServiceCacheURIsCached,
"user",
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheURIsCached,
prometheus.GaugeValue,
data.ServiceCacheURIsCachedKernel,
"kernel",
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheURIsCachedTotal,
prometheus.CounterValue,
data.ServiceCacheURIsCachedTotal,
"user",
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheURIsCachedTotal,
prometheus.CounterValue,
data.ServiceCacheURIsCachedTotalKernel,
"kernel",
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheURIsFlushedTotal,
prometheus.CounterValue,
data.ServiceCacheURIsFlushedTotal,
"user",
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheURIsFlushedTotal,
prometheus.CounterValue,
data.ServiceCacheURIsFlushedTotalKernel,
"kernel",
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheMetadataCached,
prometheus.GaugeValue,
data.ServiceCacheMetadataCached,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheMetadataCacheFlushes,
prometheus.CounterValue,
data.ServiceCacheMetadataCacheFlushes,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheMetadataCacheQueriesTotal,
prometheus.CounterValue,
data.ServiceCacheMetaDataCacheHits+data.ServiceCacheMetaDataCacheMisses,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheMetadataCacheHitsTotal,
prometheus.CounterValue,
0, // data.ServiceCacheMetadataCacheHitsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheMetadataCachedTotal,
prometheus.CounterValue,
data.ServiceCacheMetadataCachedTotal,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheMetadataFlushedTotal,
prometheus.CounterValue,
data.ServiceCacheMetadataFlushedTotal,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheOutputCacheActiveFlushedItems,
prometheus.CounterValue,
data.ServiceCacheOutputCacheActiveFlushedItems,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheOutputCacheItems,
prometheus.CounterValue,
data.ServiceCacheOutputCacheItems,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheOutputCacheMemoryUsage,
prometheus.CounterValue,
data.ServiceCacheOutputCacheMemoryUsage,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheOutputCacheQueriesTotal,
prometheus.CounterValue,
data.ServiceCacheOutputCacheHitsTotal+data.ServiceCacheOutputCacheMissesTotal,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheOutputCacheHitsTotal,
prometheus.CounterValue,
data.ServiceCacheOutputCacheHitsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheOutputCacheFlushedItemsTotal,
prometheus.CounterValue,
data.ServiceCacheOutputCacheFlushedItemsTotal,
)
ch <- prometheus.MustNewConstMetric(
c.serviceCacheOutputCacheFlushesTotal,
prometheus.CounterValue,

View File

@@ -18,16 +18,22 @@
package logical_disk
import (
"context"
"encoding/binary"
"errors"
"fmt"
"log/slog"
"regexp"
"runtime"
"runtime/debug"
"slices"
"strconv"
"strings"
"github.com/alecthomas/kingpin/v2"
"github.com/go-ole/go-ole"
"github.com/prometheus-community/windows_exporter/internal/headers/propsys"
"github.com/prometheus-community/windows_exporter/internal/headers/shell32"
"github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/pdh"
"github.com/prometheus-community/windows_exporter/internal/types"
@@ -35,15 +41,23 @@ import (
"golang.org/x/sys/windows"
)
const Name = "logical_disk"
const (
Name = "logical_disk"
subCollectorMetrics = "metrics"
subCollectorBitlocker = "bitlocker_status"
)
type Config struct {
VolumeInclude *regexp.Regexp `yaml:"volume-include"`
VolumeExclude *regexp.Regexp `yaml:"volume-exclude"`
CollectorsEnabled []string `yaml:"enabled"`
VolumeInclude *regexp.Regexp `yaml:"volume-include"`
VolumeExclude *regexp.Regexp `yaml:"volume-exclude"`
}
//nolint:gochecknoglobals
var ConfigDefaults = Config{
CollectorsEnabled: []string{
subCollectorMetrics,
},
VolumeInclude: types.RegExpAny,
VolumeExclude: types.RegExpEmpty,
}
@@ -56,6 +70,14 @@ type Collector struct {
perfDataCollector *pdh.Collector
perfDataObject []perfDataCounterValues
bitlockerReqCh chan string
bitlockerResCh chan struct {
err error
status int
}
ctxCancelFunc context.CancelFunc
avgReadQueue *prometheus.Desc
avgWriteQueue *prometheus.Desc
freeSpace *prometheus.Desc
@@ -74,6 +96,8 @@ type Collector struct {
writeLatency *prometheus.Desc
writesTotal *prometheus.Desc
writeTime *prometheus.Desc
bitlockerStatus *prometheus.Desc
}
type volumeInfo struct {
@@ -109,8 +133,9 @@ func NewWithFlags(app *kingpin.Application) *Collector {
c := &Collector{
config: ConfigDefaults,
}
c.config.CollectorsEnabled = make([]string, 0)
var volumeExclude, volumeInclude string
var collectorsEnabled, volumeExclude, volumeInclude string
app.Flag(
"collector.logical_disk.volume-exclude",
@@ -122,7 +147,17 @@ func NewWithFlags(app *kingpin.Application) *Collector {
"Regexp of volumes to include. Volume name must both match include and not match exclude to be included.",
).Default(".+").StringVar(&volumeInclude)
app.Flag(
"collector.logical_disk.enabled",
fmt.Sprintf("Comma-separated list of collectors to use. Available collectors: %s, %s. Defaults to metrics, if not specified.",
subCollectorMetrics,
subCollectorBitlocker,
),
).Default(strings.Join(ConfigDefaults.CollectorsEnabled, ",")).StringVar(&collectorsEnabled)
app.Action(func(*kingpin.ParseContext) error {
c.config.CollectorsEnabled = strings.Split(collectorsEnabled, ",")
var err error
c.config.VolumeExclude, err = regexp.Compile(fmt.Sprintf("^(?:%s)$", volumeExclude))
@@ -146,12 +181,24 @@ func (c *Collector) GetName() string {
}
func (c *Collector) Close() error {
if slices.Contains(c.config.CollectorsEnabled, subCollectorBitlocker) {
c.ctxCancelFunc()
}
return nil
}
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
c.logger = logger.With(slog.String("collector", Name))
for _, collector := range c.config.CollectorsEnabled {
if !slices.Contains([]string{subCollectorMetrics, subCollectorBitlocker}, collector) {
return fmt.Errorf("unknown sub collector: %s. Possible values: %s", collector,
strings.Join([]string{subCollectorMetrics, subCollectorBitlocker}, ", "),
)
}
}
c.information = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "info"),
"A metric with a constant '1' value labeled with logical disk information",
@@ -276,13 +323,39 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
nil,
)
c.bitlockerStatus = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "bitlocker_status"),
"BitLocker status for the logical disk",
[]string{"volume", "status"},
nil,
)
var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "LogicalDisk", pdh.InstancesAll)
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](logger.With(slog.String("collector", Name)), pdh.CounterTypeRaw, "LogicalDisk", pdh.InstancesAll)
if err != nil {
return fmt.Errorf("failed to create LogicalDisk collector: %w", err)
}
if slices.Contains(c.config.CollectorsEnabled, subCollectorBitlocker) {
initErrCh := make(chan error)
c.bitlockerReqCh = make(chan string, 1)
c.bitlockerResCh = make(chan struct {
err error
status int
}, 1)
ctx, cancel := context.WithCancel(context.Background())
c.ctxCancelFunc = cancel
go c.workerBitlocker(ctx, initErrCh)
if err = <-initErrCh; err != nil {
return fmt.Errorf("failed to initialize BitLocker worker: %w", err)
}
}
return nil
}
@@ -325,117 +398,156 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
info.serialNumber,
)
ch <- prometheus.MustNewConstMetric(
c.requestsQueued,
prometheus.GaugeValue,
data.CurrentDiskQueueLength,
data.Name,
)
if slices.Contains(c.config.CollectorsEnabled, subCollectorMetrics) {
ch <- prometheus.MustNewConstMetric(
c.requestsQueued,
prometheus.GaugeValue,
data.CurrentDiskQueueLength,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.avgReadQueue,
prometheus.GaugeValue,
data.AvgDiskReadQueueLength*pdh.TicksToSecondScaleFactor,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.avgReadQueue,
prometheus.GaugeValue,
data.AvgDiskReadQueueLength*pdh.TicksToSecondScaleFactor,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.avgWriteQueue,
prometheus.GaugeValue,
data.AvgDiskWriteQueueLength*pdh.TicksToSecondScaleFactor,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.avgWriteQueue,
prometheus.GaugeValue,
data.AvgDiskWriteQueueLength*pdh.TicksToSecondScaleFactor,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.readBytesTotal,
prometheus.CounterValue,
data.DiskReadBytesPerSec,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.readBytesTotal,
prometheus.CounterValue,
data.DiskReadBytesPerSec,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.readsTotal,
prometheus.CounterValue,
data.DiskReadsPerSec,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.readsTotal,
prometheus.CounterValue,
data.DiskReadsPerSec,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.writeBytesTotal,
prometheus.CounterValue,
data.DiskWriteBytesPerSec,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.writeBytesTotal,
prometheus.CounterValue,
data.DiskWriteBytesPerSec,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.writesTotal,
prometheus.CounterValue,
data.DiskWritesPerSec,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.writesTotal,
prometheus.CounterValue,
data.DiskWritesPerSec,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.readTime,
prometheus.CounterValue,
data.PercentDiskReadTime,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.readTime,
prometheus.CounterValue,
data.PercentDiskReadTime,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.writeTime,
prometheus.CounterValue,
data.PercentDiskWriteTime,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.writeTime,
prometheus.CounterValue,
data.PercentDiskWriteTime,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.freeSpace,
prometheus.GaugeValue,
data.FreeSpace*1024*1024,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.freeSpace,
prometheus.GaugeValue,
data.FreeSpace*1024*1024,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.totalSpace,
prometheus.GaugeValue,
data.PercentFreeSpace*1024*1024,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.totalSpace,
prometheus.GaugeValue,
data.PercentFreeSpace*1024*1024,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.idleTime,
prometheus.CounterValue,
data.PercentIdleTime,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.idleTime,
prometheus.CounterValue,
data.PercentIdleTime,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.splitIOs,
prometheus.CounterValue,
data.SplitIOPerSec,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.splitIOs,
prometheus.CounterValue,
data.SplitIOPerSec,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.readLatency,
prometheus.CounterValue,
data.AvgDiskSecPerRead*pdh.TicksToSecondScaleFactor,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.readLatency,
prometheus.CounterValue,
data.AvgDiskSecPerRead*pdh.TicksToSecondScaleFactor,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.writeLatency,
prometheus.CounterValue,
data.AvgDiskSecPerWrite*pdh.TicksToSecondScaleFactor,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.writeLatency,
prometheus.CounterValue,
data.AvgDiskSecPerWrite*pdh.TicksToSecondScaleFactor,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.readWriteLatency,
prometheus.CounterValue,
data.AvgDiskSecPerTransfer*pdh.TicksToSecondScaleFactor,
data.Name,
)
ch <- prometheus.MustNewConstMetric(
c.readWriteLatency,
prometheus.CounterValue,
data.AvgDiskSecPerTransfer*pdh.TicksToSecondScaleFactor,
data.Name,
)
}
if slices.Contains(c.config.CollectorsEnabled, subCollectorBitlocker) {
c.bitlockerReqCh <- data.Name
bitlockerStatus := <-c.bitlockerResCh
if bitlockerStatus.err != nil {
c.logger.Warn("failed to get BitLocker status for "+data.Name,
slog.Any("err", bitlockerStatus.err),
)
continue
}
if bitlockerStatus.status == -1 {
c.logger.Debug("BitLocker status for "+data.Name+" is unknown",
slog.Int("status", bitlockerStatus.status),
)
continue
}
for i, status := range []string{"disabled", "on", "off", "encrypting", "decrypting", "suspended", "locked", "unknown", "waiting_for_activation"} {
val := 0.0
if bitlockerStatus.status == i {
val = 1.0
}
ch <- prometheus.MustNewConstMetric(
c.bitlockerStatus,
prometheus.GaugeValue,
val,
data.Name,
status,
)
}
}
}
return nil
@@ -590,6 +702,11 @@ func getAllMountedVolumes() (map[string]string, error) {
break
}
if errors.Is(err, windows.ERROR_FILE_NOT_FOUND) {
// the volume is not mounted
break
}
if errors.Is(err, windows.ERROR_NO_MORE_FILES) {
rootPathBuf = make([]uint16, (rootPathLen+1)/2)
@@ -609,3 +726,134 @@ func getAllMountedVolumes() (map[string]string, error) {
volumes[strings.TrimSuffix(mountPoint, `\`)] = strings.TrimSuffix(windows.UTF16ToString(guidBuf), `\`)
}
}
/*
++ References
| System.Volume. | Control Panel | manage-bde conversion | manage-bde | Get-BitlockerVolume | Get-BitlockerVolume |
| BitLockerProtection | | | protection | VolumeStatus | ProtectionStatus |
| ------------------- | -------------------------------- | ------------------------- | -------------- | ---------------------------- | ------------------- |
| 1 | BitLocker on | Used Space Only Encrypted | Protection On | FullyEncrypted | On |
| 1 | BitLocker on | Fully Encrypted | Protection On | FullyEncrypted | On |
| 1 | BitLocker on | Fully Encrypted | Protection On | FullyEncryptedWipeInProgress | On |
| 2 | BitLocker off | Fully Decrypted | Protection Off | FullyDecrypted | Off |
| 3 | BitLocker Encrypting | Encryption In Progress | Protection Off | EncryptionInProgress | Off |
| 3 | BitLocker Encryption Paused | Encryption Paused | Protection Off | EncryptionSuspended | Off |
| 4 | BitLocker Decrypting | Decryption in progress | Protection Off | DecyptionInProgress | Off |
| 4 | BitLocker Decryption Paused | Decryption Paused | Protection Off | DecryptionSuspended | Off |
| 5 | BitLocker suspended | Used Space Only Encrypted | Protection Off | FullyEncrypted | Off |
| 5 | BitLocker suspended | Fully Encrypted | Protection Off | FullyEncrypted | Off |
| 6 | BitLocker on (Locked) | Unknown | Unknown | $null | Unknown |
| 7 | | | | | |
| 8 | BitLocker waiting for activation | Used Space Only Encrypted | Protection Off | FullyEncrypted | Off |
--
*/
func (c *Collector) workerBitlocker(ctx context.Context, initErrCh chan<- error) {
defer func() {
if r := recover(); r != nil {
c.logger.Error("workerBitlocker panic",
slog.Any("panic", r),
slog.String("stack", string(debug.Stack())),
)
// Restart the workerBitlocker
initErrCh := make(chan error)
go c.workerBitlocker(ctx, initErrCh)
if err := <-initErrCh; err != nil {
c.logger.Error("workerBitlocker restart failed",
slog.Any("err", err),
)
}
}
}()
// The only way to run WMI queries in parallel while being thread-safe is to
// ensure the CoInitialize[Ex]() call is bound to its current OS thread.
// Otherwise, attempting to initialize and run parallel queries across
// goroutines will result in protected memory errors.
runtime.LockOSThread()
defer runtime.UnlockOSThread()
if err := ole.CoInitializeEx(0, ole.COINIT_APARTMENTTHREADED|ole.COINIT_DISABLE_OLE1DDE); err != nil {
var oleCode *ole.OleError
if errors.As(err, &oleCode) && oleCode.Code() != ole.S_OK && oleCode.Code() != 0x00000001 {
initErrCh <- fmt.Errorf("CoInitializeEx: %w", err)
return
}
}
defer ole.CoUninitialize()
var pkey propsys.PROPERTYKEY
// The ideal solution to check the disk encryption (BitLocker) status is to
// use the WMI APIs (Win32_EncryptableVolume). However, only programs running
// with elevated priledges can access those APIs.
//
// Our alternative solution is based on the value of the undocumented (shell)
// property: "System.Volume.BitLockerProtection". That property is essentially
// an enum containing the current BitLocker status for a given volume. This
// approached was suggested here:
// https://stackoverflow.com/questions/41308245/detect-bitlocker-programmatically-from-c-sharp-without-admin/41310139
//
// Note that the link above doesn't give any explanation / meaning for the
// enum values, it simply says that 1, 3 or 5 means the disk is encrypted.
//
// I directly tested and validated this strategy on a Windows 10 machine.
// The values given in the BitLockerStatus enum contain the relevant values
// for the shell property. I also directly validated them.
if err := propsys.PSGetPropertyKeyFromName("System.Volume.BitLockerProtection", &pkey); err != nil {
initErrCh <- fmt.Errorf("PSGetPropertyKeyFromName failed: %w", err)
return
}
close(initErrCh)
for {
select {
case <-ctx.Done():
return
case path, ok := <-c.bitlockerReqCh:
if !ok {
return
}
if !strings.Contains(path, `:`) {
c.bitlockerResCh <- struct {
err error
status int
}{err: nil, status: -1}
continue
}
status, err := func(path string) (int, error) {
item, err := shell32.SHCreateItemFromParsingName(path)
if err != nil {
return -1, fmt.Errorf("SHCreateItemFromParsingName failed: %w", err)
}
defer item.Release()
var v ole.VARIANT
if err := item.GetProperty(&pkey, &v); err != nil {
return -1, fmt.Errorf("GetProperty failed: %w", err)
}
return int(v.Val), v.Clear()
}(path)
c.bitlockerResCh <- struct {
err error
status int
}{err: err, status: status}
}
}
}

Some files were not shown because too many files have changed in this diff Show More