Compare commits

...

99 Commits

Author SHA1 Message Date
Dominik Eisenberg
56c29a6280 mscluster: Add virtual disk metrics sub-collector (#2296)
Signed-off-by: Dominik Eisenberg <d.business@outlook.de>
Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>
Signed-off-by: EisenbergD <dominik.eisenberg@beiersdorf.com>
Co-authored-by: Jan-Otto Kröpke <mail@jkroepke.de>
Co-authored-by: EisenbergD <dominik.eisenberg@beiersdorf.com>
2026-02-08 21:01:53 +01:00
Dominik Eisenberg
78395afc67 mscluster: add shared volumes collector and update documentation (#2301)
Signed-off-by: EisenbergD <dominik.eisenberg@beiersdorf.com>
Signed-off-by: Jan-Otto Kröpke <mail@jkroepke.de>
Co-authored-by: EisenbergD <dominik.eisenberg@beiersdorf.com>
Co-authored-by: Jan-Otto Kröpke <mail@jkroepke.de>
2026-02-08 12:53:40 +00:00
Dominik Eisenberg
e951e516de docs: add alerting examples for CPU and CSV (#2317)
Signed-off-by: EisenbergD <dominik.eisenberg@beiersdorf.com>
Co-authored-by: EisenbergD <dominik.eisenberg@beiersdorf.com>
2026-02-08 13:46:02 +01:00
Dominik Eisenberg
ec6f705410 os: rename install_time_timestamp to install_time_timestamp_seconds (#2315)
Co-authored-by: EisenbergD <dominik.eisenberg@beiersdorf.com>
2026-02-04 13:23:28 +01:00
renovate[bot]
7119da5cf5 fix(deps): update golang.org/x/ (#2311)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2026-01-30 16:14:03 +00:00
renovate[bot]
856c108e7f fix(deps): update module github.com/bmatcuk/doublestar/v4 to v4.10.0 (#2312)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2026-01-30 16:10:39 +00:00
renovate[bot]
eaecea7797 chore(deps): update module github.com/coreos/go-systemd/v22 to v22.7.0 (#2310)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2026-01-30 17:07:14 +01:00
renovate[bot]
a95df3d6f2 chore(deps): update module github.com/golang-jwt/jwt/v5 to v5.3.1 (#2308)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2026-01-30 16:54:30 +01:00
renovate[bot]
3fcdaca34a chore(deps): update github actions (#2307)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2026-01-30 16:54:18 +01:00
Calle Pettersson
2a3f0ff9d9 Formalize retirement (#2303) 2026-01-27 12:05:07 +01:00
Jan-Otto Kröpke
7dcf0d3137 gpu: skip Microsoft Basic Render Driver metrics (#2275)
Co-authored-by: Elliot Nevills <elliotnev27@users.noreply.github.com>
2026-01-17 12:32:31 +01:00
renovate[bot]
b584539387 chore(deps): update actions/setup-go action to v6.2.0 (#2298)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2026-01-16 20:33:48 +01:00
renovate[bot]
7cec563af2 fix(deps): update module github.com/bmatcuk/doublestar/v4 to v4.9.2 (#2297)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2026-01-16 20:33:36 +01:00
renovate[bot]
f0d5fd9ba0 fix(deps): update module github.com/prometheus/exporter-toolkit to v0.15.1 (#2295)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2026-01-10 20:03:30 +01:00
renovate[bot]
2efe98d8ec fix(deps): update module github.com/prometheus/common to v0.67.5 (#2294)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2026-01-10 13:31:12 +01:00
Corporte Gadfly
5ab9019509 chore: Add sample dashboard (#2255) 2026-01-06 13:20:13 +00:00
renovate[bot]
2b4576ecd4 chore(deps): update github actions (major) (#2289)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2026-01-02 21:16:51 +01:00
Dominik Eisenberg
9a666ace81 cpu: add example query for Task Manager-style CPU utilization (#2286)
Co-authored-by: EisenbergD <dominik.eisenberg@beiersdorf.com>
2025-12-30 16:00:24 +01:00
Dominik Eisenberg
27186f7e78 os: add system installation date to metrics (#2284)
Co-authored-by: EisenbergD <dominik.eisenberg@beiersdorf.com>
2025-12-29 20:23:02 +01:00
PrometheusBot
0c1336b845 Synchronize common files from prometheus/prometheus (#2279) 2025-12-24 09:40:03 +00:00
PrometheusBot
f3e50f4db4 Synchronize common files from prometheus/prometheus (#2278) 2025-12-21 20:52:20 +00:00
renovate[bot]
04714a3fbd chore(deps): update docker/setup-buildx-action action to v3.12.0 (#2276)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-12-19 18:02:20 +01:00
renovate[bot]
43bd352cfd fix(deps): update golang.org/x/ (#2277)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-12-19 18:01:20 +01:00
renovate[bot]
6e831b0176 chore(deps): update module google.golang.org/protobuf to v1.36.11 (#2273)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-12-17 20:59:05 +01:00
renovate[bot]
84dc1977de chore(deps): update dependency golangci/golangci-lint to v2.7.2 (#2272)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-12-17 20:59:00 +01:00
renovate[bot]
a944cd02a8 chore(deps): update dependency golangci/golangci-lint to v2.7.1 (#2269)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-12-06 10:04:32 +01:00
renovate[bot]
65ac3585a3 chore(deps): update github actions (#2268)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-12-06 10:04:25 +01:00
renovate[bot]
0dbea50704 chore(deps): update docker/metadata-action action to v5.10.0 (#2264)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-11-28 18:27:51 +01:00
renovate[bot]
7585044277 chore(deps): update github actions (major) (#2265)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-11-28 18:27:40 +01:00
renovate[bot]
aaf22e7322 chore(deps): update module golang.org/x/oauth2 to v0.33.0 (#2262)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-11-21 15:34:20 +00:00
renovate[bot]
7671e42c70 fix(deps): update module github.com/prometheus/common to v0.67.4 (#2261)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-11-21 16:33:50 +01:00
renovate[bot]
1e16767afb chore(deps): update module golang.org/x/crypto to v0.45.0 [security] (#2257)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-11-21 16:30:00 +01:00
renovate[bot]
81ff006f08 chore(deps): update dependency golangci/golangci-lint to v2.6.2 (#2259)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-11-21 16:29:56 +01:00
renovate[bot]
7c586b204d chore(deps): update github actions (#2260)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-11-21 16:29:50 +01:00
buckleyGI
5351641287 docs: Fix metric name for Windows Disk Alerts (#2254) 2025-11-12 14:54:33 +01:00
renovate[bot]
462a495514 chore(deps): update docker/metadata-action action to v5.9.0 (#2253)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-11-07 18:59:17 +01:00
renovate[bot]
16ee024a1b chore(deps): update dependency golangci/golangci-lint to v2.6.1 (#2252)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-11-07 18:58:28 +01:00
PrometheusBot
afe1bfc29d Synchronize common files from prometheus/prometheus (#2251) 2025-11-06 18:35:21 +00:00
Szilard Parrag
ebbad8943b collector: fix race condition in Build() (#2250)
Co-authored-by: Jan-Otto Kröpke <mail@jkroepke.de>
2025-11-06 02:33:05 +00:00
renovate[bot]
3d0587d28c chore(deps): update dependency golangci/golangci-lint to v2.6.0 (#2246)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Jan-Otto Kröpke <mail@jkroepke.de>
2025-11-02 19:41:27 +01:00
renovate[bot]
a523ef69fd chore(deps): update module github.com/prometheus/procfs to v0.19.2 (#2247)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-11-02 18:50:59 +01:00
Jan-Otto Kröpke
cd088325ef installer: remove repair option (#2243) 2025-11-02 17:40:39 +00:00
renovate[bot]
8fe118bff9 chore(deps): update github actions (major) (#2249)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-11-02 18:38:10 +01:00
Jan-Otto Kröpke
81051791e2 filetime: replace collector with file (#2244) 2025-11-02 18:38:01 +01:00
renovate[bot]
4fc7402985 chore(deps): update module golang.org/x/time to v0.14.0 (#2248)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-11-02 18:37:49 +01:00
renovate[bot]
52efb1c61c fix(deps): update module github.com/prometheus/common to v0.67.2 (#2245)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-11-02 18:37:38 +01:00
xieshujian
3a9e227bd9 file: add file collector to scrape file size and file modify time which can replace filetime collector (#2205)
Co-authored-by: Jan-Otto Kröpke <mail@jkroepke.de>
2025-11-02 18:27:34 +01:00
Johan Thomsen
ed11d8e8fa netframework: add process_id label to clrmemory (#2242) 2025-11-02 18:26:55 +01:00
renovate[bot]
402eb6ef4e chore(deps): update module github.com/prometheus/procfs to v0.18.0 (#2238)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-10-24 13:24:57 +00:00
renovate[bot]
767367edc4 fix(deps): update module github.com/prometheus/exporter-toolkit to v0.15.0 (#2239)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-10-24 15:20:30 +02:00
Jan-Otto Kröpke
370a42b79a Update documentation for collector update flags (#2235) 2025-10-17 22:46:45 +02:00
renovate[bot]
ccd977177c fix(deps): update module github.com/prometheus/common to v0.67.1 (#2228)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-10-10 15:34:29 +02:00
renovate[bot]
33fe157545 fix(deps): update golang.org/x/ (#2227)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-10-10 15:34:22 +02:00
renovate[bot]
9e32f62ca4 chore(deps): update dependency golangci/golangci-lint to v2.5.0 (#2220)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Jan-Otto Kröpke <mail@jkroepke.de>
2025-10-06 03:30:11 +02:00
Jan-Otto Kröpke
c26c27a7f4 Fix collector flag names in documentation (#2225) 2025-10-05 15:26:29 +00:00
renovate[bot]
1f43ca4d8f chore(deps): update module google.golang.org/protobuf to v1.36.10 (#2223)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-10-05 17:01:42 +02:00
renovate[bot]
d818d96e07 chore(deps): update github actions (#2224) 2025-10-04 20:39:49 +02:00
Jan-Otto Kröpke
7c108ea5be thermalzone: deprecate collector (#2201) 2025-09-26 10:29:55 +00:00
Jan-Otto Kröpke
bbe0d1aba7 os: include installation type in Windows version retrieval (#2217) 2025-09-26 10:27:31 +00:00
Jan-Otto Kröpke
1394f2399d Update renovate.json (#2219) 2025-09-20 20:41:05 +02:00
PrometheusBot
f4d77803ea Synchronize common files from prometheus/prometheus (#2218) 2025-09-20 20:40:16 +02:00
renovate[bot]
a19af1b695 chore(deps): update github actions (major) (#2215)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-09-19 20:39:10 +02:00
renovate[bot]
95bf157049 fix(deps): update golang.org/x/ (#2214)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-09-19 20:39:02 +02:00
Jan-Otto Kröpke
9969618026 Update renovate.json (#2213) 2025-09-19 19:55:26 +02:00
PrometheusBot
f2e62c6f53 Synchronize common files from prometheus/prometheus (#2212) 2025-09-19 19:53:50 +02:00
renovate[bot]
a9b42ab3a4 fix(deps): update module github.com/prometheus/exporter-toolkit to v0.14.1 (#2210)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-09-19 14:23:10 +00:00
renovate[bot]
e778eea250 chore(deps): update actions/checkout action to v4.3.0 (#2211)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-09-19 14:22:56 +00:00
renovate[bot]
c0eb53812d chore(deps): update module google.golang.org/protobuf to v1.36.9 (#2209)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-09-19 16:19:18 +02:00
renovate[bot]
21d9fb057d chore(deps): update module go.yaml.in/yaml/v2 to v2.4.3 (#2208)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-09-19 16:19:10 +02:00
Jan-Otto Kröpke
f1772a742f pdh: added logging, if PDH CStatus is not valid (#2203) 2025-09-07 13:31:29 +02:00
renovate[bot]
fcf21bb600 fix(deps): update module github.com/prometheus/client_golang to v1.23.1 (#2199)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-09-05 21:37:06 +00:00
renovate[bot]
cd5f136079 chore(deps): update module google.golang.org/protobuf to v1.36.8 (#2198)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-09-05 18:52:32 +02:00
Jan-Otto Kröpke
4171ec17a5 chore: switch to go.yaml.in/yaml/v3 (#2196) 2025-09-04 23:50:27 +02:00
Karthik Panjaje
6289499dee docs: Fixed HTTP request metrics documentation (#2192) 2025-08-31 14:41:40 +00:00
Jan-Otto Kröpke
79917893d1 installer: set failureflag for Windows service (#2191) 2025-08-29 21:57:28 +02:00
Jan-Otto Kröpke
0b8a257b31 gpu: add device id label (#2186) 2025-08-28 06:36:10 +02:00
Jan-Otto Kröpke
71cedbc4d0 mi: remove callbacks (#2188) 2025-08-26 21:04:56 +02:00
Jan-Otto Kröpke
c8a4cb3806 mssql: expose correct patch level without restart (#2187) 2025-08-26 20:52:09 +02:00
Jan-Otto Kröpke
558629dff5 chore: update to go 1.25 (#2185) 2025-08-24 14:27:00 +02:00
Jan-Otto Kröpke
5a8ebf0c44 collector: support sub-second timeout values. (#2181) 2025-08-15 23:55:24 +02:00
PrometheusBot
acbabb926d Synchronize common files from prometheus/prometheus (#2180) 2025-08-15 20:34:45 +02:00
renovate[bot]
e37392c00b chore(deps): update dependency golangci/golangci-lint to v2.4.0 (#2179)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-08-15 16:43:04 +02:00
renovate[bot]
00d86ba792 chore(deps): update actions/checkout action to v4.3.0 (#2178)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-08-15 16:42:52 +02:00
renovate[bot]
691f64f5cc fix(deps): update golang.org/x/ (#2170)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-08-10 15:39:40 +02:00
renovate[bot]
19999dea49 chore(deps): update docker/login-action action to v3.5.0 (#2169)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-08-10 15:39:24 +02:00
renovate[bot]
c2df4d7514 chore(deps): update actions/download-artifact action to v5 (#2171)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-08-08 15:48:04 +02:00
renovate[bot]
8937a5ac91 chore(deps): update module google.golang.org/protobuf to v1.36.7 (#2168)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-08-08 15:30:46 +02:00
Jan-Otto Kröpke
930130f58a collector: Add disable flag (#2165)
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-07 08:58:29 +02:00
Jan-Otto Kröpke
0e85959a4d installer: do not fail, if service can't be started. (#2163) 2025-08-03 20:11:57 +02:00
Jan-Otto Kröpke
6253bf812d process: Add flag to control the export of the process cmdline (#2153) 2025-08-03 20:09:03 +02:00
Jan-Otto Kröpke
6c2380bd04 installer: disable config file creation, if CONFIG_FILE is set to a non default location. (#2162) 2025-08-03 20:08:13 +02:00
Jan-Otto Kröpke
5266f9ebfe installer: add quote to avoid argument splitting (#2161) 2025-08-03 19:39:59 +02:00
renovate[bot]
6c9a5b66e2 chore(deps): update dependency golangci/golangci-lint to v2.3.1 (#2158)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: Jan-Otto Kröpke <mail@jkroepke.de>
2025-08-03 04:27:21 +02:00
renovate[bot]
c4ab8cb8a5 chore(deps): update docker/metadata-action action to v5.8.0 (#2159)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-08-01 21:01:20 +02:00
renovate[bot]
7bcaf81d26 fix(deps): update module github.com/prometheus/client_golang to v1.23.0 (#2160)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-08-01 20:49:03 +02:00
renovate[bot]
5f6ba2c6e7 fix(deps): update module github.com/bmatcuk/doublestar/v4 to v4.9.1 (#2157)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-08-01 15:47:11 +02:00
Jan-Otto Kröpke
75c85fbde1 docs: add note about property preferences (#2155) 2025-07-29 20:53:50 +02:00
Jan-Otto Kröpke
120c244313 docs: Update example_config.yml (#2152) 2025-07-28 22:53:13 +02:00
Jan-Otto Kröpke
0e2d78affe docs: allow backport PR title prefix. (#2142) 2025-07-20 02:30:42 +02:00
158 changed files with 6347 additions and 879 deletions

View File

@@ -18,7 +18,7 @@ jobs:
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
steps: steps:
- name: git checkout - name: git checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with: with:
persist-credentials: false persist-credentials: false
- name: Set docker hub repo name - name: Set docker hub repo name
@@ -42,7 +42,7 @@ jobs:
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
steps: steps:
- name: git checkout - name: git checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with: with:
persist-credentials: false persist-credentials: false
- name: Set quay.io org name - name: Set quay.io org name

View File

@@ -20,8 +20,8 @@ jobs:
test: test:
runs-on: windows-2025 runs-on: windows-2025
steps: steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 - uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
with: with:
go-version-file: 'go.mod' go-version-file: 'go.mod'
@@ -43,8 +43,8 @@ jobs:
promtool: promtool:
runs-on: windows-2025 runs-on: windows-2025
steps: steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 - uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
with: with:
go-version-file: 'go.mod' go-version-file: 'go.mod'
@@ -65,7 +65,7 @@ jobs:
run: make promtool run: make promtool
- name: Upload windows_exporter.exe - name: Upload windows_exporter.exe
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
if: always() if: always()
with: with:
name: windows_exporter.amd64.exe name: windows_exporter.amd64.exe
@@ -82,14 +82,14 @@ jobs:
git config --global core.autocrlf false git config --global core.autocrlf false
git config --global core.eol lf git config --global core.eol lf
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 - uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
with: with:
go-version-file: 'go.mod' go-version-file: 'go.mod'
- name: golangci-lint - name: golangci-lint
uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0 uses: golangci/golangci-lint-action@1e7e51e771db61008b38414a730f564565cf7c20 # v9.2.0
with: with:
# renovate: github=golangci/golangci-lint # renovate: github=golangci/golangci-lint
version: v2.2.2 version: v2.7.2
args: "--max-same-issues=0" args: "--max-same-issues=0"

View File

@@ -33,11 +33,11 @@ jobs:
name: check title prefix name: check title prefix
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: check - name: check
run: | run: |
PR_TITLE_PREFIX=$(echo "$PR_TITLE" | cut -d':' -f1) PR_TITLE_PREFIX=$(echo "$PR_TITLE" | cut -d':' -f1)
if [[ -d "internal/collector/$PR_TITLE_PREFIX" ]] || [[ -d "internal/$PR_TITLE_PREFIX" ]] || [[ -d "pkg/$PR_TITLE_PREFIX" ]] || [[ -d "$PR_TITLE_PREFIX" ]] || [[ "$PR_TITLE_PREFIX" == "docs" ]] || [[ "$PR_TITLE_PREFIX" == "ci" ]] || [[ "$PR_TITLE_PREFIX" == "revert" ]] || [[ "$PR_TITLE_PREFIX" == "fix" ]] || [[ "$PR_TITLE_PREFIX" == "fix(deps)" ]] || [[ "$PR_TITLE_PREFIX" == "feat" ]] || [[ "$PR_TITLE_PREFIX" == "chore" ]] || [[ "$PR_TITLE_PREFIX" == "chore(docs)" ]] || [[ "$PR_TITLE_PREFIX" == "chore(deps)" ]] || [[ "$PR_TITLE_PREFIX" == "*" ]] || [[ "$PR_TITLE_PREFIX" == "Release"* ]] || [[ "$PR_TITLE_PREFIX" == "Synchronize common files from prometheus/prometheus" ]]; then if [[ -d "internal/collector/$PR_TITLE_PREFIX" ]] || [[ -d "internal/$PR_TITLE_PREFIX" ]] || [[ -d "pkg/$PR_TITLE_PREFIX" ]] || [[ -d "$PR_TITLE_PREFIX" ]] || [[ "$PR_TITLE_PREFIX" == "docs" ]] || [[ "$PR_TITLE_PREFIX" == "ci" ]] || [[ "$PR_TITLE_PREFIX" == "revert" ]] || [[ "$PR_TITLE_PREFIX" == "fix" ]] || [[ "$PR_TITLE_PREFIX" == "fix(deps)" ]] || [[ "$PR_TITLE_PREFIX" == "feat" ]] || [[ "$PR_TITLE_PREFIX" == "chore" ]] || [[ "$PR_TITLE_PREFIX" == "chore(docs)" ]] || [[ "$PR_TITLE_PREFIX" == "chore(deps)" ]] || [[ "$PR_TITLE_PREFIX" == "*" ]] || [[ "$PR_TITLE_PREFIX" == "Release"* ]] || [[ "$PR_TITLE_PREFIX" == "Synchronize common files from prometheus/prometheus" ]] || [[ "$PR_TITLE_PREFIX" == "[0."* ]] || [[ "$PR_TITLE_PREFIX" == "[1."* ]]; then
exit 0 exit 0
fi fi

View File

@@ -24,11 +24,11 @@ jobs:
runs-on: windows-2025 runs-on: windows-2025
environment: build environment: build
steps: steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with: with:
fetch-depth: '0' fetch-depth: '0'
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 - uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
with: with:
go-version-file: 'go.mod' go-version-file: 'go.mod'
@@ -157,7 +157,7 @@ jobs:
cat output\sha256sums.txt cat output\sha256sums.txt
- name: Upload Artifacts - name: Upload Artifacts
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
with: with:
name: windows_exporter_binaries name: windows_exporter_binaries
path: | path: |
@@ -180,25 +180,25 @@ jobs:
DOCKER_BUILD_SUMMARY: false DOCKER_BUILD_SUMMARY: false
DOCKER_BUILD_RECORD_UPLOAD: false DOCKER_BUILD_RECORD_UPLOAD: false
steps: steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with: with:
fetch-depth: '0' fetch-depth: '0'
- name: Download Artifacts - name: Download Artifacts
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0 uses: actions/download-artifact@37930b1c2abaa49bbe596cd826c3c89aef350131 # v7.0.0
with: with:
name: windows_exporter_binaries name: windows_exporter_binaries
- name: Login to Docker Hub - name: Login to Docker Hub
if: ${{ github.event_name != 'pull_request' }} if: ${{ github.event_name != 'pull_request' }}
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0 uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
with: with:
username: ${{ secrets.DOCKER_HUB_LOGIN }} username: ${{ secrets.DOCKER_HUB_LOGIN }}
password: ${{ secrets.DOCKER_HUB_PASSWORD }} password: ${{ secrets.DOCKER_HUB_PASSWORD }}
- name: Login to quay.io - name: Login to quay.io
if: ${{ github.event_name != 'pull_request' }} if: ${{ github.event_name != 'pull_request' }}
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0 uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
with: with:
registry: quay.io registry: quay.io
username: ${{ secrets.QUAY_IO_LOGIN }} username: ${{ secrets.QUAY_IO_LOGIN }}
@@ -206,7 +206,7 @@ jobs:
- name: Login to GitHub container registry - name: Login to GitHub container registry
if: ${{ github.event_name != 'pull_request' }} if: ${{ github.event_name != 'pull_request' }}
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0 uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ github.repository_owner }} username: ${{ github.repository_owner }}
@@ -214,7 +214,7 @@ jobs:
- name: Docker meta - name: Docker meta
id: meta id: meta
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5.7.0 uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5.10.0
with: with:
images: | images: |
ghcr.io/prometheus-community/windows-exporter ghcr.io/prometheus-community/windows-exporter
@@ -231,7 +231,7 @@ jobs:
org.opencontainers.image.licenses=MIT org.opencontainers.image.licenses=MIT
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1 uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
- name: Build and push - name: Build and push
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0 uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0

View File

@@ -17,7 +17,7 @@ jobs:
name: Check for spelling errors name: Check for spelling errors
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: codespell-project/actions-codespell@master - uses: codespell-project/actions-codespell@master
with: with:
check_filenames: true check_filenames: true

View File

@@ -11,7 +11,7 @@ jobs:
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0 - uses: actions/stale@997185467fa4f803885201cee163a9f38240193d # v10.1.1
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
# opt out of defaults to avoid marking issues as stale and closing them # opt out of defaults to avoid marking issues as stale and closing them

View File

@@ -11,7 +11,7 @@ jobs:
if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks. if: github.repository_owner == 'prometheus' || github.repository_owner == 'prometheus-community' # Don't run this workflow on forks.
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0 - uses: actions/stale@997185467fa4f803885201cee163a9f38240193d # v10.1.1
with: with:
repo-token: ${{ secrets.GITHUB_TOKEN }} repo-token: ${{ secrets.GITHUB_TOKEN }}
# opt out of defaults to avoid marking issues as stale and closing them # opt out of defaults to avoid marking issues as stale and closing them

1
.gitignore vendored
View File

@@ -6,6 +6,7 @@ output/
.vscode .vscode
*.syso *.syso
installer/*.msi installer/*.msi
installer/*.log
installer/*.wixpdb installer/*.wixpdb
local/ local/

View File

@@ -15,6 +15,7 @@ linters:
- gocognit - gocognit
- goconst - goconst
- gocyclo - gocyclo
- godoclint
- godot - godot
- lll - lll
- maintidx - maintidx
@@ -23,6 +24,7 @@ linters:
- paralleltest - paralleltest
- tagliatelle - tagliatelle
- testpackage - testpackage
- unqueryvet
- varnamelen - varnamelen
- wrapcheck - wrapcheck
- wsl - wsl

View File

@@ -4,9 +4,12 @@
<w>containerd</w> <w>containerd</w>
<w>endpointstats</w> <w>endpointstats</w>
<w>gochecknoglobals</w> <w>gochecknoglobals</w>
<w>lpwstr</w>
<w>luid</w> <w>luid</w>
<w>operationoptions</w>
<w>setupapi</w> <w>setupapi</w>
<w>spdx</w> <w>spdx</w>
<w>textfile</w>
<w>vmcompute</w> <w>vmcompute</w>
</words> </words>
</dictionary> </dictionary>

View File

@@ -19,7 +19,7 @@
<configuration default="false" name="all" type="GoApplicationRunConfiguration" factoryName="Go Application" folderName="run"> <configuration default="false" name="all" type="GoApplicationRunConfiguration" factoryName="Go Application" folderName="run">
<module name="windows_exporter" /> <module name="windows_exporter" />
<working_directory value="$PROJECT_DIR$" /> <working_directory value="$PROJECT_DIR$" />
<parameters value="--web.listen-address=127.0.0.1:9182 --log.level=info --collectors.enabled=ad,adcs,adfs,cache,container,cpu,cpu_info,dfsr,dhcp,diskdrive,dns,exchange,filetime,fsrmquota,hyperv,iis,license,logical_disk,memory,mscluster,msmq,mssql,net,netframework,nps,os,pagefile,performancecounter,physical_disk,printer,process,remote_fx,scheduled_task,service,smb,smbclient,smtp,system,tcp,terminal_services,thermalzone,time,udp,update,vmware,performancecounter --debug.enabled --collector.performancecounter.objects='[{ &quot;name&quot;: &quot;memory&quot;, &quot;type&quot;: &quot;formatted&quot;, &quot;object&quot;: &quot;Memory&quot;, &quot;counters&quot;: [{ &quot;name&quot;:&quot;Cache Faults/sec&quot;, &quot;type&quot;:&quot;counter&quot; }]}]'" /> <parameters value="--web.listen-address=127.0.0.1:9182 --log.level=info --collectors.enabled=ad,adcs,adfs,cache,container,cpu,cpu_info,dfsr,dhcp,diskdrive,dns,exchange,file,fsrmquota,hyperv,iis,license,logical_disk,memory,mscluster,msmq,mssql,net,netframework,nps,os,pagefile,performancecounter,physical_disk,printer,process,remote_fx,scheduled_task,service,smb,smbclient,smtp,system,tcp,terminal_services,thermalzone,time,udp,update,vmware,performancecounter --debug.enabled --collector.performancecounter.objects='[{ &quot;name&quot;: &quot;memory&quot;, &quot;type&quot;: &quot;formatted&quot;, &quot;object&quot;: &quot;Memory&quot;, &quot;counters&quot;: [{ &quot;name&quot;:&quot;Cache Faults/sec&quot;, &quot;type&quot;:&quot;counter&quot; }]}]'" />
<sudo value="true" /> <sudo value="true" />
<kind value="PACKAGE" /> <kind value="PACKAGE" />
<package value="github.com/prometheus-community/windows_exporter/cmd/windows_exporter" /> <package value="github.com/prometheus-community/windows_exporter/cmd/windows_exporter" />

View File

@@ -1,10 +1,11 @@
Maintainers in alphabetical order Maintainers in alphabetical order
* [Ben Reedy](https://github.com/breed808) - breed808@breed808.com * [Ben Reedy](https://github.com/breed808) - breed808@breed808.com
* [Calle Pettersson](https://github.com/carlpett) - calle@cape.nu
* [Jan-Otto Kröpke](https://github.com/jkroepke) - github@jkroepke.de * [Jan-Otto Kröpke](https://github.com/jkroepke) - github@jkroepke.de
Alumni Alumni
* [Brian Brazil](https://github.com/brian-brazil) * [Brian Brazil](https://github.com/brian-brazil)
* [Calle Pettersson](https://github.com/carlpett)
* [Martin Lindhe](https://github.com/martinlindhe) * [Martin Lindhe](https://github.com/martinlindhe)

View File

@@ -26,7 +26,7 @@ A Prometheus exporter for Windows machines.
| [dhcp](docs/collector.dhcp.md) | DHCP Server | | | [dhcp](docs/collector.dhcp.md) | DHCP Server | |
| [dns](docs/collector.dns.md) | DNS Server | | | [dns](docs/collector.dns.md) | DNS Server | |
| [exchange](docs/collector.exchange.md) | Exchange metrics | | | [exchange](docs/collector.exchange.md) | Exchange metrics | |
| [filetime](docs/collector.filetime.md) | FileTime metrics | | | [file](docs/collector.file.md) | File metrics | |
| [fsrmquota](docs/collector.fsrmquota.md) | Microsoft File Server Resource Manager (FSRM) Quotas collector | | | [fsrmquota](docs/collector.fsrmquota.md) | Microsoft File Server Resource Manager (FSRM) Quotas collector | |
| [gpu](docs/collector.gpu.md) | GPU metrics | | | [gpu](docs/collector.gpu.md) | GPU metrics | |
| [hyperv](docs/collector.hyperv.md) | Hyper-V hosts | | | [hyperv](docs/collector.hyperv.md) | Hyper-V hosts | |
@@ -55,7 +55,6 @@ A Prometheus exporter for Windows machines.
| [tcp](docs/collector.tcp.md) | TCP connections | | | [tcp](docs/collector.tcp.md) | TCP connections | |
| [terminal_services](docs/collector.terminal_services.md) | Terminal services (RDS) | | | [terminal_services](docs/collector.terminal_services.md) | Terminal services (RDS) | |
| [textfile](docs/collector.textfile.md) | Read prometheus metrics from a text file | | | [textfile](docs/collector.textfile.md) | Read prometheus metrics from a text file | |
| [thermalzone](docs/collector.thermalzone.md) | Thermal information | |
| [time](docs/collector.time.md) | Windows Time Service | | | [time](docs/collector.time.md) | Windows Time Service | |
| [udp](docs/collector.udp.md) | UDP connections | | | [udp](docs/collector.udp.md) | UDP connections | |
| [update](docs/collector.update.md) | Windows Update Service | | | [update](docs/collector.update.md) | Windows Update Service | |
@@ -82,15 +81,15 @@ This can be useful for having different Prometheus servers collect specific metr
windows_exporter accepts flags to configure certain behaviours. The ones configuring the global behaviour of the exporter are listed below, while collector-specific ones are documented in the respective collector documentation above. windows_exporter accepts flags to configure certain behaviours. The ones configuring the global behaviour of the exporter are listed below, while collector-specific ones are documented in the respective collector documentation above.
| Flag | Description | Default value | | Flag | Description | Default value |
|--------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------| |---------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------|
| `--web.listen-address` | host:port for exporter. | `:9182` | | `--web.listen-address` | host:port for exporter. | `:9182` |
| `--telemetry.path` | URL path for surfacing collected metrics. | `/metrics` | | `--telemetry.path` | URL path for surfacing collected metrics. | `/metrics` |
| `--collectors.enabled` | Comma-separated list of collectors to use. Use `[defaults]` as a placeholder which gets expanded containing all the collectors enabled by default. | `[defaults]` | | `--collectors.enabled` | Comma-separated list of collectors to use. Use `[defaults]` as a placeholder which gets expanded containing all the collectors enabled by default. | `[defaults]` |
| `--scrape.timeout-margin` | Seconds to subtract from the timeout allowed by the client. Tune to allow for overhead or high loads. | `0.5` | | `--scrape.timeout-margin` | Seconds to subtract from the timeout allowed by the client. Tune to allow for overhead or high loads. | `0.5` |
| `--web.config.file` | A [web config][web_config] for setting up TLS and Auth | None | | `--web.config.file` | A [web config][web_config] for setting up TLS and Auth | None |
| `--config.file` | [Using a config file](#using-a-configuration-file) from path or URL | None | | `--config.file` | [Using a config file](#using-a-configuration-file) from path | None |
| `--log.file` | Output file of log messages. One of [stdout, stderr, eventlog, \<path to log file>]<br>**NOTE:** The MSI installer will add a default argument to the installed service setting this to eventlog | stderr | | `--log.file` | Output file of log messages. One of [stdout, stderr, eventlog, \<path to log file>]<br>**NOTE:** The MSI installer will add a default argument to the installed service setting this to eventlog | stderr |
## Installation ## Installation
@@ -112,20 +111,22 @@ The configuration file
The following parameters are available: The following parameters are available:
| Name | Description | | Name | Description |
|----------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| |----------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `ENABLED_COLLECTORS` | As the `--collectors.enabled` flag, provide a comma-separated list of enabled collectors | | `ENABLED_COLLECTORS` | As the `--collectors.enabled` flag, provide a comma-separated list of enabled collectors |
| `CONFIG_FILE` | Use the `--config.file` flag to specify a config file. If empty, no config file will be set. The special value `config.yaml` set the path to the config.yaml at install dir | | | `CONFIG_FILE` | Use the `--config.file` flag to specify a config file. If empty, default config file at install dir will be used. If set, the config file must be exist before the installation is started. | |
| `LISTEN_ADDR` | The IP address to bind to. Defaults to an empty string. (any local address) | | `LISTEN_ADDR` | The IP address to bind to. Defaults to an empty string. (any local address) |
| `LISTEN_PORT` | The port to bind to. Defaults to `9182`. | | `LISTEN_PORT` | The port to bind to. Defaults to `9182`. |
| `METRICS_PATH` | The path at which to serve metrics. Defaults to `/metrics` | | `METRICS_PATH` | The path at which to serve metrics. Defaults to `/metrics` |
| `TEXTFILE_DIRS` | Use the `--collector.textfile.directories` flag to specify one or more directories, separated by commas, where the collector should read text files containing metrics | | `TEXTFILE_DIRS` | Use the `--collector.textfile.directories` flag to specify one or more directories, separated by commas, where the collector should read text files containing metrics |
| `REMOTE_ADDR` | Allows setting comma separated remote IP addresses for the Windows Firewall exception (allow list). Defaults to an empty string (any remote address). | | `REMOTE_ADDR` | Allows setting comma separated remote IP addresses for the Windows Firewall exception (allow list). Defaults to an empty string (any remote address). |
| `EXTRA_FLAGS` | Allows passing full CLI flags. Defaults to an empty string. For `--collectors.enabled` and `--config.file`, use the specialized properties `ENABLED_COLLECTORS` and `CONFIG_FILE` | | `EXTRA_FLAGS` | Allows passing full CLI flags. Defaults to an empty string. For `--collectors.enabled` and `--config.file`, use the specialized properties `ENABLED_COLLECTORS` and `CONFIG_FILE` |
| `ADDLOCAL` | Enables features within the windows_exporter installer. Supported values: `FirewallException` | | `ADDLOCAL` | Enables features within the windows_exporter installer. Supported values: `FirewallException` |
| `REMOVE` | Disables features within the windows_exporter installer. Supported values: `FirewallException` | | `REMOVE` | Disables features within the windows_exporter installer. Supported values: `FirewallException` |
| `APPLICATIONFOLDER` | Directory to install windows_exporter. Defaults to `C:\Program Files\windows_exporter` | | `APPLICATIONFOLDER` | Directory to install windows_exporter. Defaults to `C:\Program Files\windows_exporter` |
> [!NOTE]
> The installer properties are always preferred over the values defined in the config file. If you prefer to configure via the config file, avoid using any of the properties listed above.
Parameters are sent to the installer via `msiexec`. Parameters are sent to the installer via `msiexec`.
On PowerShell, the `--%` should be passed before defining properties. On PowerShell, the `--%` should be passed before defining properties.

View File

@@ -89,6 +89,10 @@ func run(ctx context.Context, args []string) int {
"collectors.enabled", "collectors.enabled",
"Comma-separated list of collectors to use. Use '[defaults]' as a placeholder for all the collectors enabled by default."). "Comma-separated list of collectors to use. Use '[defaults]' as a placeholder for all the collectors enabled by default.").
Default(collector.DefaultCollectors).String() Default(collector.DefaultCollectors).String()
disabledCollectors = app.Flag(
"collectors.disabled",
"Comma-separated list of collectors to exclude. Can be used to disable collector from the defaults.").
Default("").String()
timeoutMargin = app.Flag( timeoutMargin = app.Flag(
"scrape.timeout-margin", "scrape.timeout-margin",
"Seconds to subtract from the timeout allowed by the client. Tune to allow for overhead or high loads.", "Seconds to subtract from the timeout allowed by the client. Tune to allow for overhead or high loads.",
@@ -166,6 +170,10 @@ func run(ctx context.Context, args []string) int {
return 1 return 1
} }
if *disabledCollectors != "" {
collectors.Disable(slices.Compact(strings.Split(*disabledCollectors, ",")))
}
// Initialize collectors before loading // Initialize collectors before loading
if err = collectors.Build(ctx, logger); err != nil { if err = collectors.Build(ctx, logger); err != nil {
for _, err := range utils.SplitError(err) { for _, err := range utils.SplitError(err) {

View File

@@ -171,8 +171,10 @@ func waitUntilListening(tb testing.TB, network, address string) error {
err error err error
) )
dialer := &net.Dialer{Timeout: 100 * time.Millisecond}
for range 20 { for range 20 {
conn, err = net.DialTimeout(network, address, 100*time.Millisecond) conn, err = dialer.DialContext(tb.Context(), network, address)
if err == nil { if err == nil {
_ = conn.Close() _ = conn.Close()

13
dashboard/README.md Normal file
View File

@@ -0,0 +1,13 @@
## Sample dashboard for Windows Exporter
This sample dashboard is heavily inspired by [this dashboard in Chinese](https://grafana.com/grafana/dashboards/10467-windows-exporter-for-prometheus-dashboard-cn-v20230531/).
First row shows an Overview of your Windows landscape.
<br/>
![Screenshot of overview row.](dashboard-overview.png)
Second row provides resource details about specific Windows VM picked from the variables at the top.
<br/>
![Screenshot of resource details (part 1).](resource-details-part1.png)
<br/>
![Screenshot of resource details (part 2).](resource-details-part2.png)

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 649 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 511 KiB

File diff suppressed because it is too large Load Diff

View File

@@ -14,6 +14,7 @@ This directory contains documentation of the collectors in the windows_exporter,
- [`diskdrive`](collector.diskdrive.md) - [`diskdrive`](collector.diskdrive.md)
- [`dns`](collector.dns.md) - [`dns`](collector.dns.md)
- [`exchange`](collector.exchange.md) - [`exchange`](collector.exchange.md)
- [`file`](collector.file.md)
- [`fsrmquota`](collector.fsrmquota.md) - [`fsrmquota`](collector.fsrmquota.md)
- [`hyperv`](collector.hyperv.md) - [`hyperv`](collector.hyperv.md)
- [`iis`](collector.iis.md) - [`iis`](collector.iis.md)
@@ -42,7 +43,6 @@ This directory contains documentation of the collectors in the windows_exporter,
- [`tcp`](collector.tcp.md) - [`tcp`](collector.tcp.md)
- [`terminal_services`](collector.terminal_services.md) - [`terminal_services`](collector.terminal_services.md)
- [`textfile`](collector.textfile.md) - [`textfile`](collector.textfile.md)
- [`thermalzone`](collector.thermalzone.md)
- [`time`](collector.time.md) - [`time`](collector.time.md)
- [`udp`](collector.udp.md) - [`udp`](collector.udp.md)
- [`update`](collector.update.md) - [`update`](collector.update.md)

View File

@@ -48,6 +48,27 @@ Show per-cpu utilisation using the processor utility metrics
``` ```
rate(windows_cpu_processor_utility_total{instance="localhost"}[5m]) / rate(windows_cpu_processor_rtc_total{instance="localhost"}[5m]) rate(windows_cpu_processor_utility_total{instance="localhost"}[5m]) / rate(windows_cpu_processor_rtc_total{instance="localhost"}[5m])
``` ```
Show average CPU utilization percentage (like Windows Task Manager)
```
sum by (instance) (
clamp_max(
(
rate(windows_cpu_processor_utility_total{
job=~"$job",
}[1m])
/
rate(windows_cpu_processor_rtc_total{
job=~"$job",
}[1m])
), 100
)
) /
count by (instance) (
windows_cpu_processor_utility_total{
job=~"$job"
}
)
```
Show actual average CPU frequency in Hz Show actual average CPU frequency in Hz
``` ```
avg by(instance) ( avg by(instance) (
@@ -59,7 +80,36 @@ avg by(instance) (
## Alerting examples ## Alerting examples
**prometheus.rules** #### Average CPU utilization over 1 hour exceeds 80% (New CPU metric)
```yaml
# Alert on hosts with 1h avg CPU more than 80%
- alert: HighCPUUtilization
expr: |
avg_over_time(
(
sum by (instance) (
(
rate(windows_cpu_processor_utility_total{}[1m])
/
rate(windows_cpu_processor_rtc_total{}[1m])
)
) /
count by (instance) (
windows_cpu_processor_utility_total{}
)
)[1h:]
) > 80
for: 1m
labels:
severity: warning
metric_name: CPUUtilization
annotations:
summary: "High CPU utilization on {{ $labels.instance }}"
description: |
CPU utilization on {{ $labels.instance }} has averaged more than 80% over the last hour (current value: {{ printf "%.2f" $value }})
```
#### Average CPU utilization over 1 hour exceeds 80% (Old CPU metric)
```yaml ```yaml
# Alert on hosts with more than 80% CPU usage over a 10 minute period # Alert on hosts with more than 80% CPU usage over a 10 minute period
- alert: CpuUsage - alert: CpuUsage
@@ -70,6 +120,10 @@ avg by(instance) (
annotations: annotations:
summary: "CPU Usage (instance {{ $labels.instance }})" summary: "CPU Usage (instance {{ $labels.instance }})"
description: "CPU Usage is more than 80%\n VALUE = {{ $value }}\n LABELS: {{ $labels }}" description: "CPU Usage is more than 80%\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
```
#### CPU not using boost frequencies
```yaml
# Alert on hosts which are not boosting their CPU frequencies # Alert on hosts which are not boosting their CPU frequencies
- alert: NoCpuTurbo - alert: NoCpuTurbo
expr: | expr: |

View File

@@ -30,7 +30,7 @@ groups:
rules: rules:
- alert: Drive_Status - alert: Drive_Status
expr: windows_disk_drive_status{status="OK"} != 1 expr: windows_diskdrive_status{status="OK"} != 1
for: 10m for: 10m
labels: labels:
severity: high severity: high

40
docs/collector.file.md Normal file
View File

@@ -0,0 +1,40 @@
# file collector
The file collector exposes modified timestamps and file size of files in the filesystem.
The collector
|||
-|-
Metric name prefix | `file`
Enabled by default? | No
## Flags
### `--collector.file.file-patterns`
Comma-separated list of file patterns. Each pattern is a glob pattern that can contain `*`, `?`, and `**` (recursive).
See https://github.com/bmatcuk/doublestar#patterns for an extended description of the pattern syntax.
## Metrics
| Name | Description | Type | Labels |
|----------------------------------------|------------------------|-------|--------|
| `windows_file_mtime_timestamp_seconds` | File modification time | gauge | `file` |
| `windows_file_size_bytes` | File size | gauge | `file` |
### Example metric
```
# HELP windows_file_mtime_timestamp_seconds File modification time
# TYPE windows_file_mtime_timestamp_seconds gauge
windows_file_mtime_timestamp_seconds{file="C:\\Users\\admin\\Desktop\\Dashboard.lnk"} 1.726434517e+09
# HELP windows_file_size_bytes File size
# TYPE windows_file_size_bytes gauge
windows_file_size_bytes{file="C:\\Users\\admin\\Desktop\\Dashboard.lnk"} 123
```
## Useful queries
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
## Alerting examples
_This collector does not yet have alerting examples, we would appreciate your help adding them!_

View File

@@ -1,36 +0,0 @@
# filetime collector
The filetime collector exposes modified timestamps of files in the filesystem.
The collector
|||
-|-
Metric name prefix | `filetime`
Enabled by default? | No
## Flags
### `--collectors.filetime.file-patterns`
Comma-separated list of file patterns. Each pattern is a glob pattern that can contain `*`, `?`, and `**` (recursive).
See https://github.com/bmatcuk/doublestar#patterns for an extended description of the pattern syntax.
## Metrics
Name | Description | Type | Labels
-----|-------------|------|-------
`windows_filetime_mtime_timestamp_seconds` | File modification time | gauge | `file`
### Example metric
```
# HELP windows_filetime_mtime_timestamp_seconds File modification time
# TYPE windows_filetime_mtime_timestamp_seconds gauge
windows_filetime_mtime_timestamp_seconds{file="C:\\Users\\admin\\Desktop\\Dashboard.lnk"} 1.726434517e+09
```
## Useful queries
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
## Alerting examples
_This collector does not yet have alerting examples, we would appreciate your help adding them!_

View File

@@ -20,28 +20,28 @@ These metrics are available on supported versions of Windows with compatible GPU
### Adapter-level Metrics ### Adapter-level Metrics
| Name | Description | Type | Labels | | Name | Description | Type | Labels |
|--------------------------------------------------|------------------------------------------------------------------------------------|-------|---------------| |--------------------------------------------------|------------------------------------------------------------------------------------|-------|-----------------------------------------------------------------|
| `windows_gpu_info` | A metric with a constant '1' value labeled with gpu device information. | gauge | `luid`,`name`,`bus_number`,`phys`,`function_number` | | `windows_gpu_info` | A metric with a constant '1' value labeled with gpu device information. | gauge | `bus_number`,`device_id`,`function_number`,`luid`,`name`,`phys` |
| `windows_gpu_dedicated_system_memory_size_bytes` | The size, in bytes, of memory that is dedicated from system memory. | gauge | `luid` | | `windows_gpu_dedicated_system_memory_size_bytes` | The size, in bytes, of memory that is dedicated from system memory. | gauge | `device_id`,`luid` |
| `windows_gpu_dedicated_video_memory_size_bytes` | The size, in bytes, of memory that is dedicated from video memory. | gauge | `luid` | | `windows_gpu_dedicated_video_memory_size_bytes` | The size, in bytes, of memory that is dedicated from video memory. | gauge | `device_id`,`luid` |
| `windows_gpu_shared_system_memory_size_bytes` | The size, in bytes, of memory from system memory that can be shared by many users. | gauge | `luid` | | `windows_gpu_shared_system_memory_size_bytes` | The size, in bytes, of memory from system memory that can be shared by many users. | gauge | `device_id`,`luid` |
| `windows_gpu_adapter_memory_committed_bytes` | Total committed GPU memory in bytes per physical GPU | gauge | `luid`,`phys` | | `windows_gpu_adapter_memory_committed_bytes` | Total committed GPU memory in bytes per physical GPU | gauge | `device_id`,`luid`,`phys` |
| `windows_gpu_adapter_memory_dedicated_bytes` | Dedicated GPU memory usage in bytes per physical GPU | gauge | `luid`,`phys` | | `windows_gpu_adapter_memory_dedicated_bytes` | Dedicated GPU memory usage in bytes per physical GPU | gauge | `device_id`,`luid`,`phys` |
| `windows_gpu_adapter_memory_shared_bytes` | Shared GPU memory usage in bytes per physical GPU | gauge | `luid`,`phys` | | `windows_gpu_adapter_memory_shared_bytes` | Shared GPU memory usage in bytes per physical GPU | gauge | `device_id`,`luid`,`phys` |
| `windows_gpu_local_adapter_memory_bytes` | Local adapter memory usage in bytes per physical GPU | gauge | `luid`,`phys` | | `windows_gpu_local_adapter_memory_bytes` | Local adapter memory usage in bytes per physical GPU | gauge | `device_id`,`luid`,`phys`,`part` |
| `windows_gpu_non_local_adapter_memory_bytes` | Non-local adapter memory usage in bytes per physical GPU | gauge | `luid`,`phys` | | `windows_gpu_non_local_adapter_memory_bytes` | Non-local adapter memory usage in bytes per physical GPU | gauge | `device_id`,`luid`,`phys`,`part` |
### Per-process Metrics ### Per-process Metrics
| Name | Description | Type | Labels | | Name | Description | Type | Labels |
|----------------------------------------------|-------------------------------------------------|---------|-----------------------------------------------| |----------------------------------------------|-------------------------------------------------|---------|-----------------------------------------------------------|
| `windows_gpu_engine_time_seconds` | Total running time of the GPU engine in seconds | counter | `luid`,`phys`, `eng`, `engtype`, `process_id` | | `windows_gpu_engine_time_seconds` | Total running time of the GPU engine in seconds | counter | `device_id`,`luid`,`phys`, `eng`, `engtype`, `process_id` |
| `windows_gpu_process_memory_committed_bytes` | Total committed GPU memory in bytes per process | gauge | `luid`,`phys`,`process_id` | | `windows_gpu_process_memory_committed_bytes` | Total committed GPU memory in bytes per process | gauge | `device_id`,`luid`,`phys`,`process_id` |
| `windows_gpu_process_memory_dedicated_bytes` | Dedicated GPU memory usage in bytes per process | gauge | `luid`,`phys`,`process_id` | | `windows_gpu_process_memory_dedicated_bytes` | Dedicated GPU memory usage in bytes per process | gauge | `device_id`,`luid`,`phys`,`process_id` |
| `windows_gpu_process_memory_local_bytes` | Local GPU memory usage in bytes per process | gauge | `luid`,`phys`,`process_id` | | `windows_gpu_process_memory_local_bytes` | Local GPU memory usage in bytes per process | gauge | `device_id`,`luid`,`phys`,`process_id` |
| `windows_gpu_process_memory_non_local_bytes` | Non-local GPU memory usage in bytes per process | gauge | `luid`,`phys`,`process_id` | | `windows_gpu_process_memory_non_local_bytes` | Non-local GPU memory usage in bytes per process | gauge | `device_id`,`luid`,`phys`,`process_id` |
| `windows_gpu_process_memory_shared_bytes` | Shared GPU memory usage in bytes per process | gauge | `luid`,`phys`,`process_id` | | `windows_gpu_process_memory_shared_bytes` | Shared GPU memory usage in bytes per process | gauge | `device_id`,`luid`,`phys`,`process_id` |
## Metric Labels ## Metric Labels
@@ -57,7 +57,7 @@ These are basic queries to help you get started with GPU monitoring on Windows u
**Show GPU information for a specific physical GPU (0):** **Show GPU information for a specific physical GPU (0):**
```promql ```promql
windows_gpu_info{description="NVIDIA GeForce GTX 1070",friendly_name="",hardware_id="PCI\\VEN_10DE&DEV_1B81&SUBSYS_61733842&REV_A1",phys="0",physical_device_object_name="\\Device\\NTPNP_PCI0027"} 1 windows_gpu_info{bus_number="8",device_id="PCI\\VEN_10DE&DEV_1B81&SUBSYS_61733842&REV_A1",function_number="0",luid="0x00000000_0x00010F8A",name="NVIDIA GeForce GTX 1070",phys="0"} 1
``` ```
**Show total dedicated GPU memory (in bytes) usage on GPU 0:** **Show total dedicated GPU memory (in bytes) usage on GPU 0:**

View File

@@ -130,10 +130,10 @@ If given, an application needs to *not* match the exclude regexp in order for th
| `windows_iis_server_output_cache_hits_total` | Total number of successful lookups in output cache (since service startup) | counter | None | | `windows_iis_server_output_cache_hits_total` | Total number of successful lookups in output cache (since service startup) | counter | None |
| `windows_iis_server_output_cache_items_flushed_total` | Total number of items flushed from output cache (since service startup) | counter | None | | `windows_iis_server_output_cache_items_flushed_total` | Total number of items flushed from output cache (since service startup) | counter | None |
| `windows_iis_server_output_cache_flushes_total` | Total number of flushes of output cache (since service startup) | counter | None | | `windows_iis_server_output_cache_flushes_total` | Total number of flushes of output cache (since service startup) | counter | None |
| `http_requests_current_queue_size` | Http Request Current queue size | counter | None | | `windows_iis_http_requests_current_queue_size` | Http Request Current queue size | counter | None |
| `http_request_total_rejected_request` | Http Request total rejected request | counter | None | | `windows_iis_http_request_total_rejected_request` | Http Request total rejected request | counter | None |
| `http_requests_max_queue_item_age` | Http Request Max queue Item age | counter | None | | `windows_iis_http_requests_max_queue_item_age` | Http Request Max queue Item age | counter | None |
| `http_requests_arrival_rate` | Http requests Arrival Rate | counter | None | | `windows_iis_http_requests_arrival_rate` | Http requests Arrival Rate | counter | None |
### Example metric ### Example metric
_This collector does not yet have explained examples, we would appreciate your help adding them!_ _This collector does not yet have explained examples, we would appreciate your help adding them!_

View File

@@ -5,14 +5,14 @@ The MSCluster_Cluster class is a dynamic WMI class that represents a cluster.
||| |||
-|- -|-
Metric name prefix | `mscluster` Metric name prefix | `mscluster`
Classes | `MSCluster_Cluster`,`MSCluster_Network`,`MSCluster_Node`,`MSCluster_Resource`,`MSCluster_ResourceGroup` Classes | `MSCluster_Cluster`,`MSCluster_Network`,`MSCluster_Node`,`MSCluster_Resource`,`MSCluster_ResourceGroup`,`MSCluster_DiskPartition`,`MSFT_VirtualDisk`
Enabled by default? | No Enabled by default? | No
## Flags ## Flags
### `--collectors.mscluster.enabled` ### `--collectors.mscluster.enabled`
Comma-separated list of collectors to use, for example: Comma-separated list of collectors to use, for example:
`--collectors.mscluster.enabled=cluster,network,node,resource,resouregroup`. `--collectors.mscluster.enabled=cluster,network,node,resource,resouregroup,shared_volumes,virtualdisk`.
Matching is case-sensitive. Matching is case-sensitive.
## Metrics ## Metrics
@@ -170,17 +170,68 @@ Matching is case-sensitive.
| `mscluster_resourcegroup_State` | The current state of the resource group. -1: Unknown; 0: Online; 1: Offline; 2: Failed; 3: Partial Online; 4: Pending | gauge | `name` | | `mscluster_resourcegroup_State` | The current state of the resource group. -1: Unknown; 0: Online; 1: Offline; 2: Failed; 3: Partial Online; 4: Pending | gauge | `name` |
| `mscluster_resourcegroup_UpdateDomain` | | gauge | `name` | | `mscluster_resourcegroup_UpdateDomain` | | gauge | `name` |
### Shared Volumes
| Name | Description | Type | Labels |
|------------------------------------------|----------------------------------------------------------------|-------|-----------------------------|
| `mscluster_shared_volumes_info` | Cluster Shared Volumes information (value is always 1) | gauge | `name`,`path`,`volume_guid` |
| `mscluster_shared_volumes_total_bytes` | Total size of the Cluster Shared Volume in bytes | gauge | `name`,`volume_guid` |
| `mscluster_shared_volumes_free_bytes` | Free space on the Cluster Shared Volume in bytes | gauge | `name`,`volume_guid` |
### Virtual Disk
| Name | Description | Type | Labels |
|-----------------------------------------------------------|------------------------------------------------------------------------------------------------|-------|--------|
| `mscluster_virtualdisk_info` | Virtual disk information (value is always 1) | gauge | `name`, `unique_id` |
| `mscluster_virtualdisk_health_status` | Health status of the virtual disk. 0: Healthy, 1: Warning, 2: Unhealthy, 5: Unknown | gauge | `name`, `unique_id` |
| `mscluster_virtualdisk_size_bytes` | Total size of the virtual disk in bytes | gauge | `name`, `unique_id` |
| `mscluster_virtualdisk_footprint_on_pool_bytes` | Physical storage consumed by the virtual disk on the storage pool in bytes | gauge | `name`, `unique_id` |
| `mscluster_virtualdisk_storage_efficiency_percent` | Storage efficiency percentage (Size / FootprintOnPool * 100) | gauge | `name`, `unique_id` |
### Example metric ### Example metric
Query the state of all cluster resource owned by node1 Query the state of all cluster resource owned by node1
``` ```
windows_mscluster_resource_owner_node{node_name="node1"} windows_mscluster_resource_owner_node{node_name="node1"}
``` ```
Query virtual disk storage efficiency for thin provisioned disks
```
windows_mscluster_virtualdisk_storage_efficiency_percent
```
## Useful queries ## Useful queries
Counts the number of Network Name cluster resource Counts the number of Network Name cluster resource
``` ```
count(windows_mscluster_resource_state{type="Network Name"}) count(windows_mscluster_resource_state{type="Network Name"})
``` ```
Find virtual disks with low storage efficiency (over-provisioned)
```
windows_mscluster_virtualdisk_storage_efficiency_percent < 50
```
Calculate total virtual disk capacity vs physical usage
```
sum(windows_mscluster_virtualdisk_size_bytes) / sum(windows_mscluster_virtualdisk_footprint_on_pool_bytes) * 100
```
## Alerting examples ## Alerting examples
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
#### Low free space on cluster shared volume
```yaml
# Alerts if volume has less then 20% free space
- alert: LowCSVFreeSpace
expr: |
(
max by (name, cluster) (windows_mscluster_shared_volumes_free_bytes{name!="ClusterPerformanceHistory"})
/
max by (name, cluster) (windows_mscluster_shared_volumes_total_bytes{name!="ClusterPerformanceHistory"})
) * 100 < 20
for: 10m
labels:
severity: warning
annotations:
summary: "Low CSV free space on {{ $labels.name }}"
description: |
Cluster Shared Volume {{ $labels.name }} on cluster {{ $labels.cluster }} has less than 20% free space (current: {{ printf "%.2f" $value }}%)
```

View File

@@ -72,18 +72,18 @@ Comma-separated list of collectors to use. Defaults to all, if not specified.
| Name | Description | Type | Labels | | Name | Description | Type | Labels |
|----------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|-----------| |----------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|-----------|
| `windows_netframework_clrmemory_allocated_bytes_total` | Displays the total number of bytes allocated on the garbage collection heap. | counter | `process` | | `windows_netframework_clrmemory_allocated_bytes_total` | Displays the total number of bytes allocated on the garbage collection heap. | counter | `process`, `process_id` |
| `windows_netframework_clrmemory_finalization_survivors` | Displays the number of garbage-collected objects that survive a collection because they are waiting to be finalized. | gauge | `process` | | `windows_netframework_clrmemory_finalization_survivors` | Displays the number of garbage-collected objects that survive a collection because they are waiting to be finalized. | gauge | `process`, `process_id` |
| `windows_netframework_clrmemory_heap_size_bytes` | Displays the maximum bytes that can be allocated; it does not indicate the current number of bytes allocated. | gauge | `process` | | `windows_netframework_clrmemory_heap_size_bytes` | Displays the maximum bytes that can be allocated; it does not indicate the current number of bytes allocated. | gauge | `process`, `process_id` |
| `windows_netframework_clrmemory_promoted_bytes` | Displays the bytes that were promoted from the generation to the next one during the last GC. Memory is promoted when it survives a garbage collection. | gauge | `process` | | `windows_netframework_clrmemory_promoted_bytes` | Displays the bytes that were promoted from the generation to the next one during the last GC. Memory is promoted when it survives a garbage collection. | gauge | `process`, `process_id` |
| `windows_netframework_clrmemory_number_gc_handles` | Displays the current number of garbage collection handles in use. Garbage collection handles are handles to resources external to the common language runtime and the managed environment. | gauge | `process` | | `windows_netframework_clrmemory_number_gc_handles` | Displays the current number of garbage collection handles in use. Garbage collection handles are handles to resources external to the common language runtime and the managed environment. | gauge | `process`, `process_id` |
| `windows_netframework_clrmemory_collections_total` | Displays the number of times the generation objects are garbage collected since the application started. | counter | `process` | | `windows_netframework_clrmemory_collections_total` | Displays the number of times the generation objects are garbage collected since the application started. | counter | `process`, `process_id` |
| `windows_netframework_clrmemory_induced_gc_total` | Displays the peak number of times garbage collection was performed because of an explicit call to GC.Collect. | counter | `process` | | `windows_netframework_clrmemory_induced_gc_total` | Displays the peak number of times garbage collection was performed because of an explicit call to GC.Collect. | counter | `process`, `process_id` |
| `windows_netframework_clrmemory_number_pinned_objects` | Displays the number of pinned objects encountered in the last garbage collection. | gauge | `process` | | `windows_netframework_clrmemory_number_pinned_objects` | Displays the number of pinned objects encountered in the last garbage collection. | gauge | `process`, `process_id` |
| `windows_netframework_clrmemory_number_sink_blocksinuse` | Displays the current number of synchronization blocks in use. Synchronization blocks are per-object data structures allocated for storing synchronization information. They hold weak references to managed objects and must be scanned by the garbage collector. | gauge | `process` | | `windows_netframework_clrmemory_number_sink_blocksinuse` | Displays the current number of synchronization blocks in use. Synchronization blocks are per-object data structures allocated for storing synchronization information. They hold weak references to managed objects and must be scanned by the garbage collector. | gauge | `process`, `process_id` |
| `windows_netframework_clrmemory_committed_bytes` | Displays the amount of virtual memory, in bytes, currently committed by the garbage collector. Committed memory is the physical memory for which space has been reserved in the disk paging file. | gauge | `process` | | `windows_netframework_clrmemory_committed_bytes` | Displays the amount of virtual memory, in bytes, currently committed by the garbage collector. Committed memory is the physical memory for which space has been reserved in the disk paging file. | gauge | `process`, `process_id` |
| `windows_netframework_clrmemory_reserved_bytes` | Displays the amount of virtual memory, in bytes, currently reserved by the garbage collector. Reserved memory is the virtual memory space reserved for the application when no disk or main memory pages have been used. | gauge | `process` | | `windows_netframework_clrmemory_reserved_bytes` | Displays the amount of virtual memory, in bytes, currently reserved by the garbage collector. Reserved memory is the virtual memory space reserved for the application when no disk or main memory pages have been used. | gauge | `process`, `process_id` |
| `windows_netframework_clrmemory_gc_time_percent` | Displays the percentage of time that was spent performing a garbage collection in the last sample. | gauge | `process` | | `windows_netframework_clrmemory_gc_time_percent` | Displays the percentage of time that was spent performing a garbage collection in the last sample. | gauge | `process`, `process_id` |
### CLR Remoting ### CLR Remoting

View File

@@ -14,10 +14,11 @@ None
## Metrics ## Metrics
| Name | Description | Type | Labels | | Name | Description | Type | Labels |
|-----------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------|-------|------------------------------------------------------------------------| |----------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------|-------|-----------------------------------------------------------------------------------------------------------------|
| `windows_os_hostname` | Labelled system hostname information as provided by ComputerSystem.DNSHostName and ComputerSystem.Domain | gauge | `domain`, `fqdn`, `hostname` | | `windows_os_hostname` | Labelled system hostname information as provided by ComputerSystem.DNSHostName and ComputerSystem.Domain | gauge | `domain`, `fqdn`, `hostname` |
| `windows_os_info` | Contains full product name & version in labels. Note that the `major_version` for Windows 11 is "10"; a build number greater than 22000 represents Windows 11. | gauge | `product`, `version`, `major_version`, `minor_version`, `build_number` | | `windows_os_info` | Contains full product name & version in labels. Note that the `major_version` for Windows 11 is "10"; a build number greater than 22000 represents Windows 11. | gauge | `product`, `version`, `major_version`, `minor_version`, `build_number`, `revision`, `installation_type` |
| `windows_os_install_time_timestamp_seconds` | Unix timestamp of OS installation time | gauge | None |
### Example metric ### Example metric
@@ -27,11 +28,29 @@ None
windows_os_hostname{domain="",fqdn="PC",hostname="PC"} 1 windows_os_hostname{domain="",fqdn="PC",hostname="PC"} 1
# HELP windows_os_info Contains full product name & version in labels. Note that the "major_version" for Windows 11 is \\"10\\"; a build number greater than 22000 represents Windows 11. # HELP windows_os_info Contains full product name & version in labels. Note that the "major_version" for Windows 11 is \\"10\\"; a build number greater than 22000 represents Windows 11.
# TYPE windows_os_info gauge # TYPE windows_os_info gauge
windows_os_info{build_number="19045",major_version="10",minor_version="0",product="Windows 10 Pro",revision="4842",version="10.0.19045"} 1 windows_os_info{build_number="19045",installation_type="Client",major_version="10",minor_version="0",product="Windows 10 Pro",revision="4842",version="10.0.19045"} 1
# HELP windows_os_install_time_timestamp_seconds Unix timestamp of OS installation time
# TYPE windows_os_install_time_timestamp_seconds gauge
windows_os_install_time_timestamp_seconds 1.6725312e+09
``` ```
## Useful queries ## Useful queries
_This collector does not yet have useful queries, we would appreciate your help adding them!_ _This collector does not yet have useful queries, we would appreciate your help adding them!_
## Alerting examples ## Alerting examples
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
#### Average CPU utilization over 1 hour exceeds 80% (New CPU metric)
```yaml
# Alerts if Agent/Host is down for 5min
- alert: HypervHostDown
expr: up{app="hyper-v"} == 0
for: 5m
labels:
severity: critical
annotations:
summary: Hyper-V host {{ $labels.instance }} is down
description: |
Hyper-V host {{ $labels.instance }} has been unreachable for more than 5 minutes.
Job: {{ $labels.job }}
```

View File

@@ -260,3 +260,27 @@ collector:
The perfdata collector returns metrics based on the user configuration. The perfdata collector returns metrics based on the user configuration.
The metrics are named based on the object name and the counter name. The metrics are named based on the object name and the counter name.
The instance name is added as a label to the metric. The instance name is added as a label to the metric.
# Examples
## thermalzone collector
```yaml
collector:
performancecounter:
objects: |-
- name: thermalzone
object: "Thermal Zone Information"
instances: ["*"]
type: formatted
counters:
- name: "Temperature"
type: "gauge"
metric: windows_thermalzone_percent_passive_limit
- name: "% Passive Limit"
type: "gauge"
metric: windows_thermalzone_temperature_celsius
- name: "Throttle Reasons"
type: "gauge"
metric: windows_thermalzone_throttle_reasons
```

View File

@@ -42,6 +42,11 @@ Disabled by default, and can be enabled with `--collector.process.iis`. NOTE: Ju
Version of the process collector to use. 1 for Process V1, 2 for Process V2. Version of the process collector to use. 1 for Process V1, 2 for Process V2.
Defaults to 0 which will use the latest version available. Defaults to 0 which will use the latest version available.
### `--collector.process.cmdline`
Enables the `cmdline` label for the process metrics.
This label contains the command line used to start the process.
Enabled by default, and can be turned off with `--no-collector.process.cmdline`.
### Example ### Example
To match all firefox processes: `--collector.process.include="firefox.*"`. To match all firefox processes: `--collector.process.include="firefox.*"`.

View File

@@ -1,32 +0,0 @@
# thermalzone collector
The thermalzone collector exposes metrics about system temps. Note that temperature is given in Kelvin
|||
-|-
Metric name prefix | `thermalzone`
Classes | [`Win32_PerfRawData_Counters_ThermalZoneInformation`](https://wutils.com/wmi/root/cimv2/win32_perfrawdata_counters_thermalzoneinformation/#temperature_properties)
Enabled by default? | No
## Flags
None
## Metrics
Name | Description | Type | Labels
-----|-------------|------|-------
`windows_thermalzone_percent_passive_limit` | % Passive Limit is the current limit this thermal zone is placing on the devices it controls. A limit of 100% indicates the devices are unconstrained. A limit of 0% indicates the devices are fully constrained. | gauge | None
`windows_thermalzone_temperature_celsius ` | Temperature of the thermal zone, in degrees Celsius. | gauge | None
`windows_thermalzone_throttle_reasons ` | Throttle Reasons indicate reasons why the thermal zone is limiting performance of the devices it controls. 0x0 - The zone is not throttled. 0x1 - The zone is throttled for thermal reasons. 0x2 - The zone is throttled to limit electrical current. | gauge | None
[`Throttle reasons` source](https://docs.microsoft.com/en-us/windows-hardware/design/device-experiences/examples--requirements-and-diagnostics)
### Example metric
_This collector does not yet have explained examples, we would appreciate your help adding them!_
## Useful queries
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
## Alerting examples
_This collector does not yet have alerting examples, we would appreciate your help adding them!_

View File

@@ -14,14 +14,11 @@ The Windows Update service is responsible for managing the installation of updat
## Flags ## Flags
> [!NOTE] ### `--collector.update.online`
> The collector name used in the CLI flags is `updates`, while the metric prefix is `update`. This naming mismatch is known and intentional for compatibility reasons. Whether to search for updates online. If set to `false` via `--no-collector.update.online`, the collector will only list updates that are already found by the Windows Update service.
Set to `true` via `--collector.update.online` to search for updates online, which will take longer to complete.
### `--collector.updates.online` ### `--collector.update.scrape-interval`
Whether to search for updates online. If set to `false`, the collector will only list updates that are already found by the Windows Update service.
Set to `true` to search for updates online, which will take longer to complete.
### `--collector.updates.scrape-interval`
Define the interval of scraping Windows Update information Define the interval of scraping Windows Update information
## Metrics ## Metrics

View File

@@ -13,6 +13,5 @@ scrape:
timeout-margin: 0.5 timeout-margin: 0.5
telemetry: telemetry:
path: /metrics path: /metrics
max-requests: 5
web: web:
listen-address: ":9182" listen-address: ":9182"

38
go.mod
View File

@@ -1,40 +1,44 @@
module github.com/prometheus-community/windows_exporter module github.com/prometheus-community/windows_exporter
go 1.24 go 1.25
require ( require (
github.com/alecthomas/kingpin/v2 v2.4.0 github.com/alecthomas/kingpin/v2 v2.4.0
github.com/bmatcuk/doublestar/v4 v4.9.0 github.com/bmatcuk/doublestar/v4 v4.10.0
github.com/dimchansky/utfbom v1.1.1 github.com/dimchansky/utfbom v1.1.1
github.com/go-ole/go-ole v1.3.0 github.com/go-ole/go-ole v1.3.0
github.com/prometheus/client_golang v1.22.0 github.com/prometheus/client_golang v1.23.2
github.com/prometheus/client_model v0.6.2 github.com/prometheus/client_model v0.6.2
github.com/prometheus/common v0.65.0 github.com/prometheus/common v0.67.5
github.com/prometheus/exporter-toolkit v0.14.0 github.com/prometheus/exporter-toolkit v0.15.1
github.com/stretchr/testify v1.10.0 github.com/stretchr/testify v1.11.1
golang.org/x/sys v0.34.0 go.yaml.in/yaml/v3 v3.0.4
gopkg.in/yaml.v3 v3.0.1 golang.org/x/sys v0.40.0
) )
require ( require (
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/coreos/go-systemd/v22 v22.7.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect
github.com/golang-jwt/jwt/v5 v5.3.1 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect
github.com/mdlayher/socket v0.5.1 // indirect github.com/mdlayher/socket v0.5.1 // indirect
github.com/mdlayher/vsock v1.2.1 // indirect github.com/mdlayher/vsock v1.2.1 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/procfs v0.17.0 // indirect github.com/prometheus/procfs v0.19.2 // indirect
github.com/xhit/go-str2duration/v2 v2.1.0 // indirect github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
golang.org/x/crypto v0.40.0 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect
golang.org/x/net v0.42.0 // indirect golang.org/x/crypto v0.47.0 // indirect
golang.org/x/oauth2 v0.30.0 // indirect golang.org/x/net v0.49.0 // indirect
golang.org/x/sync v0.16.0 // indirect golang.org/x/oauth2 v0.34.0 // indirect
golang.org/x/text v0.27.0 // indirect golang.org/x/sync v0.19.0 // indirect
google.golang.org/protobuf v1.36.6 // indirect golang.org/x/text v0.33.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect golang.org/x/time v0.14.0 // indirect
google.golang.org/protobuf v1.36.11 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
) )

71
go.sum
View File

@@ -4,12 +4,12 @@ github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vS
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bmatcuk/doublestar/v4 v4.9.0 h1:DBvuZxjdKkRP/dr4GVV4w2fnmrk5Hxc90T51LZjv0JA= github.com/bmatcuk/doublestar/v4 v4.10.0 h1:zU9WiOla1YA122oLM6i4EXvGW62DvKZVxIe6TYWexEs=
github.com/bmatcuk/doublestar/v4 v4.9.0/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/bmatcuk/doublestar/v4 v4.10.0/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.7.0 h1:LAEzFkke61DFROc7zNLX/WA2i5J8gYqe0rSj9KI28KA=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.7.0/go.mod h1:xNUYtjHu2EDXbsxz1i41wouACIwT7Ybq9o0BQhMwD0w=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -17,9 +17,12 @@ github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi
github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/golang-jwt/jwt/v5 v5.3.1 h1:kYf81DTWFe7t+1VvL7eS+jKFVWaUnK9cB1qbwn63YCY=
github.com/golang-jwt/jwt/v5 v5.3.1/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
@@ -41,16 +44,16 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4=
github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw=
github.com/prometheus/exporter-toolkit v0.14.0 h1:NMlswfibpcZZ+H0sZBiTjrA3/aBFHkNZqE+iCj5EmRg= github.com/prometheus/exporter-toolkit v0.15.1 h1:XrGGr/qWl8Gd+pqJqTkNLww9eG8vR/CoRk0FubOKfLE=
github.com/prometheus/exporter-toolkit v0.14.0/go.mod h1:Gu5LnVvt7Nr/oqTBUC23WILZepW0nffNo10XdhQcwWA= github.com/prometheus/exporter-toolkit v0.15.1/go.mod h1:P/NR9qFRGbCFgpklyhix9F6v6fFr/VQB/CVsrMDGKo4=
github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws=
github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@@ -61,30 +64,36 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc=
github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs= go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8= go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4= golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU= golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

View File

@@ -21,6 +21,14 @@
<DirectoryRef Id="APPLICATIONFOLDER"> <DirectoryRef Id="APPLICATIONFOLDER">
<Component Transitive="yes"> <Component Transitive="yes">
<File Id="windows_exporter.exe" Name="windows_exporter.exe" Source="Work\windows_exporter.exe" KeyPath="yes" Vital="yes" Checksum="yes"/> <File Id="windows_exporter.exe" Name="windows_exporter.exe" Source="Work\windows_exporter.exe" KeyPath="yes" Vital="yes" Checksum="yes"/>
<!-- The "Name" field must match the argument to eventlog.Open() -->
<util:EventSource Log="Application" Name="windows_exporter"
EventMessageFile="%SystemRoot%\System32\EventCreate.exe"
SupportsErrors="yes"
SupportsInformationals="yes"
SupportsWarnings="yes"/>
<ServiceInstall <ServiceInstall
Id="InstallExporterService" Id="InstallExporterService"
Name="windows_exporter" Name="windows_exporter"
@@ -45,13 +53,8 @@
/> />
<ServiceDependency Id="wmiApSrv" /> <ServiceDependency Id="wmiApSrv" />
</ServiceInstall> </ServiceInstall>
<ServiceControl Id="ServiceStateControl" Name="windows_exporter" Remove="uninstall" Start="install" Stop="both"/> <ServiceControl Id="StartService" Name="windows_exporter" Start="install" Wait="no" />
<!-- The "Name" field must match the argument to eventlog.Open() --> <ServiceControl Id="StopService" Name="windows_exporter" Remove="uninstall" Stop="both" Wait="yes" />
<util:EventSource Log="Application" Name="windows_exporter"
EventMessageFile="%SystemRoot%\System32\EventCreate.exe"
SupportsErrors="yes"
SupportsInformationals="yes"
SupportsWarnings="yes"/>
</Component> </Component>
<Component Id="CreateTextfileDirectory" Directory="textfile_inputs" Guid="d03ef58a-9cbf-4165-ad39-d143e9b27e14"> <Component Id="CreateTextfileDirectory" Directory="textfile_inputs" Guid="d03ef58a-9cbf-4165-ad39-d143e9b27e14">
<CreateFolder /> <CreateFolder />

BIN
installer/icon.ico Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 108 KiB

View File

@@ -45,6 +45,45 @@
Property="OLDERVERSIONBEINGUPGRADED" /> Property="OLDERVERSIONBEINGUPGRADED" />
</Upgrade> </Upgrade>
<Media Id="1" Cabinet="windows_exporter.cab" EmbedCab="yes" />
<MajorUpgrade Schedule="afterInstallInitialize" DowngradeErrorMessage="A later version of [ProductName] is already installed. Setup will now exit." AllowSameVersionUpgrades="yes" />
<Property Id="ENABLED_COLLECTORS" Secure="yes" />
<SetProperty Id="CollectorsFlag" After="InstallFiles" Sequence="execute" Value="--collectors.enabled [ENABLED_COLLECTORS]" Condition="ENABLED_COLLECTORS" />
<Property Id="EXTRA_FLAGS" Secure="yes" />
<SetProperty Id="ExtraFlags" After="InstallFiles" Sequence="execute" Value="[EXTRA_FLAGS]" Condition="EXTRA_FLAGS" />
<Property Id="CONFIG_FILE" Secure="yes" Value="config.yaml" />
<SetProperty Id="ConfigFile_NonDefault" After="InstallFiles" Sequence="execute" Value="[CONFIG_FILE]" Condition="CONFIG_FILE AND CONFIG_FILE&lt;&gt;&quot;config.yaml&quot;" />
<SetProperty Id="ConfigFile_Default" After="InstallFiles" Sequence="execute" Value="[APPLICATIONFOLDER]config.yaml" Condition="CONFIG_FILE=&quot;config.yaml&quot;" />
<SetProperty Id="ConfigFileFlag" After="InstallFiles" Sequence="execute" Value="--config.file=&quot;[ConfigFile_NonDefault][ConfigFile_Default]&quot;" Condition="ConfigFile_NonDefault OR ConfigFile_Default" />
<Property Id="LISTEN_PORT" Secure="yes" Value="9182" />
<SetProperty Id="ListenFlag" After="InstallFiles" Sequence="execute" Value="--web.listen-address=&quot;[LISTEN_ADDR]:[LISTEN_PORT]&quot;" Condition="LISTEN_ADDR&lt;&gt;&quot;&quot; OR LISTEN_PORT&lt;&gt;9182" />
<Property Id="METRICS_PATH" Secure="yes" />
<SetProperty Id="MetricsPathFlag" After="InstallFiles" Sequence="execute" Value="--telemetry.path=&quot;[METRICS_PATH]&quot;" Condition="METRICS_PATH" />
<Property Id="REMOTE_ADDR" Secure="yes" />
<SetProperty Id="RemoteAddressFlag" After="InstallFiles" Sequence="execute" Value="[REMOTE_ADDR]" Condition="REMOTE_ADDR" />
<Property Id="TEXTFILE_DIRS" Secure="yes" />
<SetProperty Id="TextfileDirsFlag" After="InstallFiles" Sequence="execute" Value="--collector.textfile.directories=&quot;[TEXTFILE_DIRS]&quot;" Condition="TEXTFILE_DIRS" />
<!-- Configuration for how the installer shows in Add/Remove Programs. -->
<Icon Id="icon.ico" SourceFile=".\icon.ico"/>
<Property Id="ARPPRODUCTICON" Value="icon.ico" />
<Property Id="ARPHELPLINK" Value="https://github.com/prometheus-community/windows_exporter/issues" />
<Property Id="ARPURLINFOABOUT" Value="https://github.com/prometheus-community/windows_exporter" />
<Property Id="ARPCOMMENTS" Value="Prometheus exporter for Windows machines" />
<!-- Disable the repair option; the functionality is available through Change instead. -->
<Property Id="ARPNOREPAIR" Value="1" />
<Property Id="ARPSIZE" Value="10000" />
<Property Id="START_MENU_FOLDER" Value="0" />
<Property Id="NOSTART" Value="0" />
<CustomAction Id="CheckExtraFlags" <CustomAction Id="CheckExtraFlags"
Error="The parameter '--config.file' must not be included in EXTRA_FLAGS. Use CONFIG_FILE instead. Please remove it and try again." /> Error="The parameter '--config.file' must not be included in EXTRA_FLAGS. Use CONFIG_FILE instead. Please remove it and try again." />
@@ -93,6 +132,23 @@
/> />
<!-- END CUSTOM ACTION FOR KILLING THE PROCESS --> <!-- END CUSTOM ACTION FOR KILLING THE PROCESS -->
<!-- START CUSTOM ACTION FOR SET SERVICE FAILUREFLAG -->
<SetProperty
Id="ConfigureServiceRecovery"
Value="&quot;[WindowsFolder]System32\sc.exe&quot; failureflag &quot;windows_exporter&quot; 1"
Before="ConfigureServiceRecovery"
Sequence="execute"
/>
<CustomAction
Id="ConfigureServiceRecovery"
BinaryRef="Wix4UtilCA_$(sys.BUILDARCHSHORT)"
DllEntry="WixQuietExec"
Execute="deferred"
Return="ignore"
Impersonate="no"
/>
<!-- END CUSTOM ACTION FFOR SET SERVICE FAILUREFLAG -->
<InstallExecuteSequence> <InstallExecuteSequence>
<!-- Set REINSTALL=all and REINSTALLMODE=amus if the user reruns the <!-- Set REINSTALL=all and REINSTALLMODE=amus if the user reruns the
MSI, which will force reinstalling all files and services. --> MSI, which will force reinstalling all files and services. -->
@@ -100,47 +156,14 @@
Condition="Installed AND (NOT REMOVE) AND (NOT UPGRADINGPRODUCTCODE)"/> Condition="Installed AND (NOT REMOVE) AND (NOT UPGRADINGPRODUCTCODE)"/>
<Custom Action="set_reinstall_all_property" Before="set_reinstallmode_property" Condition="MAINTENANCE"/> <Custom Action="set_reinstall_all_property" Before="set_reinstallmode_property" Condition="MAINTENANCE"/>
<Custom Action="set_reinstallmode_property" Before="LaunchConditions" Condition="MAINTENANCE"/> <Custom Action="set_reinstallmode_property" Before="LaunchConditions" Condition="MAINTENANCE"/>
<Custom Action="CreateConfigFile" Before="InstallServices" Condition="ConfigFile_NonDefault OR ConfigFile_Default" /> <Custom Action="CreateConfigFile" Before="InstallServices" Condition="ConfigFile_Default" />
<Custom Action="ConfigureServiceRecovery" After="InstallServices" Condition="NOT REMOVE" />
<Custom Action="KillProcess" Before="RemoveFiles" /> <Custom Action="KillProcess" Before="RemoveFiles" />
<Custom Action="CheckExtraFlags" Before="InstallInitialize" <Custom Action="CheckExtraFlags" Before="InstallInitialize"
Condition="EXTRA_FLAGS AND (EXTRA_FLAGS&gt;&lt;&quot;--config.file&quot;)" /> Condition="EXTRA_FLAGS AND (EXTRA_FLAGS&gt;&lt;&quot;--config.file&quot;)" />
</InstallExecuteSequence> </InstallExecuteSequence>
<Media Id="1" Cabinet="windows_exporter.cab" EmbedCab="yes" />
<MajorUpgrade Schedule="afterInstallInitialize" DowngradeErrorMessage="A later version of [ProductName] is already installed. Setup will now exit." AllowSameVersionUpgrades="yes" />
<Property Id="ENABLED_COLLECTORS" Secure="yes" />
<SetProperty Id="CollectorsFlag" After="InstallFiles" Sequence="execute" Value="--collectors.enabled [ENABLED_COLLECTORS]" Condition="ENABLED_COLLECTORS" />
<Property Id="EXTRA_FLAGS" Secure="yes" />
<SetProperty Id="ExtraFlags" After="InstallFiles" Sequence="execute" Value="[EXTRA_FLAGS]" Condition="EXTRA_FLAGS" />
<Property Id="CONFIG_FILE" Secure="yes" Value="config.yaml" />
<SetProperty Id="ConfigFile_NonDefault" After="InstallFiles" Sequence="execute" Value="[CONFIG_FILE]" Condition="CONFIG_FILE AND CONFIG_FILE&lt;&gt;&quot;config.yaml&quot;" />
<SetProperty Id="ConfigFile_Default" After="InstallFiles" Sequence="execute" Value="[APPLICATIONFOLDER]config.yaml" Condition="CONFIG_FILE=&quot;config.yaml&quot;" />
<SetProperty Id="ConfigFileFlag" After="InstallFiles" Sequence="execute" Value="--config.file=&quot;[ConfigFile_NonDefault][ConfigFile_Default]&quot;" Condition="ConfigFile_NonDefault OR ConfigFile_Default" />
<Property Id="LISTEN_PORT" Secure="yes" Value="9182" />
<SetProperty Id="ListenFlag" After="InstallFiles" Sequence="execute" Value="--web.listen-address [LISTEN_ADDR]:[LISTEN_PORT]" Condition="LISTEN_ADDR&lt;&gt;&quot;&quot; OR LISTEN_PORT&lt;&gt;9182" />
<Property Id="METRICS_PATH" Secure="yes" />
<SetProperty Id="MetricsPathFlag" After="InstallFiles" Sequence="execute" Value="--telemetry.path [METRICS_PATH]" Condition="METRICS_PATH" />
<Property Id="REMOTE_ADDR" Secure="yes" />
<SetProperty Id="RemoteAddressFlag" After="InstallFiles" Sequence="execute" Value="[REMOTE_ADDR]" Condition="REMOTE_ADDR" />
<Property Id="TEXTFILE_DIRS" Secure="yes" />
<SetProperty Id="TextfileDirsFlag" After="InstallFiles" Sequence="execute" Value="--collector.textfile.directories [TEXTFILE_DIRS]" Condition="TEXTFILE_DIRS" />
<Property Id="ARPHELPLINK" Value="https://github.com/prometheus-community/windows_exporter/issues" />
<Property Id="ARPSIZE" Value="9000" />
<Property Id="ARPURLINFOABOUT" Value="https://github.com/prometheus-community/windows_exporter" />
<!--<Property Id="ARPNOMODIFY" Value="0" />-->
<!--<Property Id="ARPNOREPAIR" Value="1" />-->
<Property Id="START_MENU_FOLDER" Value="0" />
<Property Id="NOSTART" Value="0" />
<Feature <Feature
Id="DefaultFeature" Id="DefaultFeature"
Level="1" Level="1"

View File

@@ -131,7 +131,7 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
c.addressBookOperationsTotal = prometheus.NewDesc( c.addressBookOperationsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "address_book_operations_total"), prometheus.BuildFQName(types.Namespace, Name, "address_book_operations_total"),
"", "",
@@ -508,7 +508,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "DirectoryServices", pdh.InstancesAll) c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](logger.With(slog.String("collector", Name)), pdh.CounterTypeRaw, "DirectoryServices", pdh.InstancesAll)
if err != nil { if err != nil {
return fmt.Errorf("failed to create DirectoryServices collector: %w", err) return fmt.Errorf("failed to create DirectoryServices collector: %w", err)
} }

View File

@@ -83,7 +83,7 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
c.requestsPerSecond = prometheus.NewDesc( c.requestsPerSecond = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "requests_total"), prometheus.BuildFQName(types.Namespace, Name, "requests_total"),
"Total certificate requests processed", "Total certificate requests processed",
@@ -165,7 +165,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Certification Authority", pdh.InstancesAll) c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](logger.With(slog.String("collector", Name)), pdh.CounterTypeRaw, "Certification Authority", pdh.InstancesAll)
if err != nil { if err != nil {
return fmt.Errorf("failed to create Certification Authority collector: %w", err) return fmt.Errorf("failed to create Certification Authority collector: %w", err)
} }

View File

@@ -113,7 +113,7 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
c.adLoginConnectionFailures = prometheus.NewDesc( c.adLoginConnectionFailures = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "ad_login_connection_failures_total"), prometheus.BuildFQName(types.Namespace, Name, "ad_login_connection_failures_total"),
"Total number of connection failures to an Active Directory domain controller", "Total number of connection failures to an Active Directory domain controller",
@@ -375,7 +375,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "AD FS", nil) c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](logger.With(slog.String("collector", Name)), pdh.CounterTypeRaw, "AD FS", nil)
if err != nil { if err != nil {
return fmt.Errorf("failed to create AD FS collector: %w", err) return fmt.Errorf("failed to create AD FS collector: %w", err)
} }

View File

@@ -99,7 +99,7 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
c.asyncCopyReadsTotal = prometheus.NewDesc( c.asyncCopyReadsTotal = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "async_copy_reads_total"), prometheus.BuildFQName(types.Namespace, Name, "async_copy_reads_total"),
"(AsyncCopyReadsTotal)", "(AsyncCopyReadsTotal)",
@@ -277,7 +277,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Cache", pdh.InstancesAll) c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](logger.With(slog.String("collector", Name)), pdh.CounterTypeRaw, "Cache", pdh.InstancesAll)
if err != nil { if err != nil {
return fmt.Errorf("failed to create Cache collector: %w", err) return fmt.Errorf("failed to create Cache collector: %w", err)
} }

View File

@@ -90,7 +90,7 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
c.mu = sync.Mutex{} c.mu = sync.Mutex{}
c.logicalProcessors = prometheus.NewDesc( c.logicalProcessors = prometheus.NewDesc(
@@ -183,7 +183,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Processor Information", pdh.InstancesAll) c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](logger.With(slog.String("collector", Name)), pdh.CounterTypeRaw, "Processor Information", pdh.InstancesAll)
if err != nil { if err != nil {
return fmt.Errorf("failed to create Processor Information collector: %w", err) return fmt.Errorf("failed to create Processor Information collector: %w", err)
} }

View File

@@ -455,21 +455,21 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
var err error var err error
if slices.Contains(c.config.CollectorsEnabled, "connection") { if slices.Contains(c.config.CollectorsEnabled, "connection") {
c.perfDataCollectorConnection, err = pdh.NewCollector[perfDataCounterValuesConnection](pdh.CounterTypeRaw, "DFS Replication Connections", pdh.InstancesAll) c.perfDataCollectorConnection, err = pdh.NewCollector[perfDataCounterValuesConnection](logger.With(slog.String("collector", Name)), pdh.CounterTypeRaw, "DFS Replication Connections", pdh.InstancesAll)
if err != nil { if err != nil {
return fmt.Errorf("failed to create DFS Replication Connections collector: %w", err) return fmt.Errorf("failed to create DFS Replication Connections collector: %w", err)
} }
} }
if slices.Contains(c.config.CollectorsEnabled, "folder") { if slices.Contains(c.config.CollectorsEnabled, "folder") {
c.perfDataCollectorFolder, err = pdh.NewCollector[perfDataCounterValuesFolder](pdh.CounterTypeRaw, "DFS Replicated Folders", pdh.InstancesAll) c.perfDataCollectorFolder, err = pdh.NewCollector[perfDataCounterValuesFolder](logger.With(slog.String("collector", Name)), pdh.CounterTypeRaw, "DFS Replicated Folders", pdh.InstancesAll)
if err != nil { if err != nil {
return fmt.Errorf("failed to create DFS Replicated Folders collector: %w", err) return fmt.Errorf("failed to create DFS Replicated Folders collector: %w", err)
} }
} }
if slices.Contains(c.config.CollectorsEnabled, "volume") { if slices.Contains(c.config.CollectorsEnabled, "volume") {
c.perfDataCollectorVolume, err = pdh.NewCollector[perfDataCounterValuesVolume](pdh.CounterTypeRaw, "DFS Replication Service Volumes", pdh.InstancesAll) c.perfDataCollectorVolume, err = pdh.NewCollector[perfDataCounterValuesVolume](logger.With(slog.String("collector", Name)), pdh.CounterTypeRaw, "DFS Replication Service Volumes", pdh.InstancesAll)
if err != nil { if err != nil {
return fmt.Errorf("failed to create DFS Replication Service Volumes collector: %w", err) return fmt.Errorf("failed to create DFS Replication Service Volumes collector: %w", err)
} }

View File

@@ -378,7 +378,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
nil, nil,
) )
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "DHCP Server", nil) c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](c.logger, pdh.CounterTypeRaw, "DHCP Server", nil)
if err != nil { if err != nil {
return fmt.Errorf("failed to create DHCP Server collector: %w", err) return fmt.Errorf("failed to create DHCP Server collector: %w", err)
} }

View File

@@ -132,7 +132,7 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error { func (c *Collector) Build(logger *slog.Logger, miSession *mi.Session) error {
for _, collector := range c.config.CollectorsEnabled { for _, collector := range c.config.CollectorsEnabled {
if !slices.Contains([]string{subCollectorMetrics, subCollectorWMIStats}, collector) { if !slices.Contains([]string{subCollectorMetrics, subCollectorWMIStats}, collector) {
return fmt.Errorf("unknown sub collector: %s. Possible values: %s", collector, return fmt.Errorf("unknown sub collector: %s. Possible values: %s", collector,
@@ -142,7 +142,7 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
} }
if slices.Contains(c.config.CollectorsEnabled, subCollectorMetrics) { if slices.Contains(c.config.CollectorsEnabled, subCollectorMetrics) {
if err := c.buildMetricsCollector(); err != nil { if err := c.buildMetricsCollector(logger); err != nil {
return err return err
} }
} }
@@ -156,7 +156,7 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
return nil return nil
} }
func (c *Collector) buildMetricsCollector() error { func (c *Collector) buildMetricsCollector(logger *slog.Logger) error {
c.zoneTransferRequestsReceived = prometheus.NewDesc( c.zoneTransferRequestsReceived = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "zone_transfer_requests_received_total"), prometheus.BuildFQName(types.Namespace, Name, "zone_transfer_requests_received_total"),
"Number of zone transfer requests (AXFR/IXFR) received by the master DNS server", "Number of zone transfer requests (AXFR/IXFR) received by the master DNS server",
@@ -299,7 +299,7 @@ func (c *Collector) buildMetricsCollector() error {
var err error var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "DNS", pdh.InstancesAll) c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](logger.With(slog.String("collector", Name)), pdh.CounterTypeRaw, "DNS", pdh.InstancesAll)
if err != nil { if err != nil {
return fmt.Errorf("failed to create DNS collector: %w", err) return fmt.Errorf("failed to create DNS collector: %w", err)
} }

View File

@@ -78,6 +78,7 @@ type Collector struct {
collectorWorkloadManagementWorkloads collectorWorkloadManagementWorkloads
config Config config Config
logger *slog.Logger
collectorFns []func(ch chan<- prometheus.Metric) error collectorFns []func(ch chan<- prometheus.Metric) error
closeFns []func() closeFns []func()
@@ -170,7 +171,9 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
c.logger = logger.With(slog.String("collector", Name))
subCollectors := map[string]struct { subCollectors := map[string]struct {
build func() error build func() error
collect func(ch chan<- prometheus.Metric) error collect func(ch chan<- prometheus.Metric) error

View File

@@ -43,7 +43,7 @@ type perfDataCounterValuesActiveSync struct {
func (c *Collector) buildActiveSync() error { func (c *Collector) buildActiveSync() error {
var err error var err error
c.perfDataCollectorActiveSync, err = pdh.NewCollector[perfDataCounterValuesActiveSync](pdh.CounterTypeRaw, "MSExchange ActiveSync", pdh.InstancesAll) c.perfDataCollectorActiveSync, err = pdh.NewCollector[perfDataCounterValuesActiveSync](c.logger, pdh.CounterTypeRaw, "MSExchange ActiveSync", pdh.InstancesAll)
if err != nil { if err != nil {
return fmt.Errorf("failed to create MSExchange ActiveSync collector: %w", err) return fmt.Errorf("failed to create MSExchange ActiveSync collector: %w", err)
} }

View File

@@ -50,7 +50,7 @@ type perfDataCounterValuesADAccessProcesses struct {
func (c *Collector) buildADAccessProcesses() error { func (c *Collector) buildADAccessProcesses() error {
var err error var err error
c.perfDataCollectorADAccessProcesses, err = pdh.NewCollector[perfDataCounterValuesADAccessProcesses](pdh.CounterTypeRaw, "MSExchange ADAccess Processes", pdh.InstancesAll) c.perfDataCollectorADAccessProcesses, err = pdh.NewCollector[perfDataCounterValuesADAccessProcesses](c.logger, pdh.CounterTypeRaw, "MSExchange ADAccess Processes", pdh.InstancesAll)
if err != nil { if err != nil {
return fmt.Errorf("failed to create MSExchange ADAccess Processes collector: %w", err) return fmt.Errorf("failed to create MSExchange ADAccess Processes collector: %w", err)
} }

View File

@@ -39,7 +39,7 @@ type perfDataCounterValuesAutoDiscover struct {
func (c *Collector) buildAutoDiscover() error { func (c *Collector) buildAutoDiscover() error {
var err error var err error
c.perfDataCollectorAutoDiscover, err = pdh.NewCollector[perfDataCounterValuesAutoDiscover](pdh.CounterTypeRaw, "MSExchangeAutodiscover", nil) c.perfDataCollectorAutoDiscover, err = pdh.NewCollector[perfDataCounterValuesAutoDiscover](c.logger, pdh.CounterTypeRaw, "MSExchangeAutodiscover", nil)
if err != nil { if err != nil {
return fmt.Errorf("failed to create MSExchange Autodiscover collector: %w", err) return fmt.Errorf("failed to create MSExchange Autodiscover collector: %w", err)
} }

View File

@@ -39,7 +39,7 @@ type perfDataCounterValuesAvailabilityService struct {
func (c *Collector) buildAvailabilityService() error { func (c *Collector) buildAvailabilityService() error {
var err error var err error
c.perfDataCollectorAvailabilityService, err = pdh.NewCollector[perfDataCounterValuesAvailabilityService](pdh.CounterTypeRaw, "MSExchange Availability Service", pdh.InstancesAll) c.perfDataCollectorAvailabilityService, err = pdh.NewCollector[perfDataCounterValuesAvailabilityService](c.logger, pdh.CounterTypeRaw, "MSExchange Availability Service", pdh.InstancesAll)
if err != nil { if err != nil {
return fmt.Errorf("failed to create MSExchange Availability Service collector: %w", err) return fmt.Errorf("failed to create MSExchange Availability Service collector: %w", err)
} }

View File

@@ -52,7 +52,7 @@ type perfDataCounterValuesHTTPProxy struct {
func (c *Collector) buildHTTPProxy() error { func (c *Collector) buildHTTPProxy() error {
var err error var err error
c.perfDataCollectorHTTPProxy, err = pdh.NewCollector[perfDataCounterValuesHTTPProxy](pdh.CounterTypeRaw, "MSExchange HttpProxy", pdh.InstancesAll) c.perfDataCollectorHTTPProxy, err = pdh.NewCollector[perfDataCounterValuesHTTPProxy](c.logger, pdh.CounterTypeRaw, "MSExchange HttpProxy", pdh.InstancesAll)
if err != nil { if err != nil {
return fmt.Errorf("failed to create MSExchange HttpProxy collector: %w", err) return fmt.Errorf("failed to create MSExchange HttpProxy collector: %w", err)
} }

View File

@@ -39,7 +39,7 @@ type perfDataCounterValuesMapiHTTPEmsMDB struct {
func (c *Collector) buildMapiHTTPEmsMDB() error { func (c *Collector) buildMapiHTTPEmsMDB() error {
var err error var err error
c.perfDataCollectorMapiHTTPEmsMDB, err = pdh.NewCollector[perfDataCounterValuesMapiHTTPEmsMDB](pdh.CounterTypeRaw, "MSExchange MapiHttp Emsmdb", pdh.InstancesAll) c.perfDataCollectorMapiHTTPEmsMDB, err = pdh.NewCollector[perfDataCounterValuesMapiHTTPEmsMDB](c.logger, pdh.CounterTypeRaw, "MSExchange MapiHttp Emsmdb", pdh.InstancesAll)
if err != nil { if err != nil {
return fmt.Errorf("failed to create MSExchange MapiHttp Emsmdb: %w", err) return fmt.Errorf("failed to create MSExchange MapiHttp Emsmdb: %w", err)
} }

View File

@@ -41,7 +41,7 @@ type perfDataCounterValuesOWA struct {
func (c *Collector) buildOWA() error { func (c *Collector) buildOWA() error {
var err error var err error
c.perfDataCollectorOWA, err = pdh.NewCollector[perfDataCounterValuesOWA](pdh.CounterTypeRaw, "MSExchange OWA", pdh.InstancesAll) c.perfDataCollectorOWA, err = pdh.NewCollector[perfDataCounterValuesOWA](c.logger, pdh.CounterTypeRaw, "MSExchange OWA", pdh.InstancesAll)
if err != nil { if err != nil {
return fmt.Errorf("failed to create MSExchange OWA collector: %w", err) return fmt.Errorf("failed to create MSExchange OWA collector: %w", err)
} }

View File

@@ -50,7 +50,7 @@ type perfDataCounterValuesRpcClientAccess struct {
func (c *Collector) buildRpcClientAccess() error { func (c *Collector) buildRpcClientAccess() error {
var err error var err error
c.perfDataCollectorRpcClientAccess, err = pdh.NewCollector[perfDataCounterValuesRpcClientAccess](pdh.CounterTypeRaw, "MSExchange RpcClientAccess", pdh.InstancesAll) c.perfDataCollectorRpcClientAccess, err = pdh.NewCollector[perfDataCounterValuesRpcClientAccess](c.logger, pdh.CounterTypeRaw, "MSExchange RpcClientAccess", pdh.InstancesAll)
if err != nil { if err != nil {
return fmt.Errorf("failed to create MSExchange RpcClientAccess collector: %w", err) return fmt.Errorf("failed to create MSExchange RpcClientAccess collector: %w", err)
} }

View File

@@ -77,7 +77,7 @@ type perfDataCounterValuesTransportQueues struct {
func (c *Collector) buildTransportQueues() error { func (c *Collector) buildTransportQueues() error {
var err error var err error
c.perfDataCollectorTransportQueues, err = pdh.NewCollector[perfDataCounterValuesTransportQueues](pdh.CounterTypeRaw, "MSExchangeTransport Queues", pdh.InstancesAll) c.perfDataCollectorTransportQueues, err = pdh.NewCollector[perfDataCounterValuesTransportQueues](c.logger, pdh.CounterTypeRaw, "MSExchangeTransport Queues", pdh.InstancesAll)
if err != nil { if err != nil {
return fmt.Errorf("failed to create MSExchangeTransport Queues collector: %w", err) return fmt.Errorf("failed to create MSExchangeTransport Queues collector: %w", err)
} }

View File

@@ -49,7 +49,7 @@ type perfDataCounterValuesWorkloadManagementWorkloads struct {
func (c *Collector) buildWorkloadManagementWorkloads() error { func (c *Collector) buildWorkloadManagementWorkloads() error {
var err error var err error
c.perfDataCollectorWorkloadManagementWorkloads, err = pdh.NewCollector[perfDataCounterValuesWorkloadManagementWorkloads](pdh.CounterTypeRaw, "MSExchange WorkloadManagement Workloads", pdh.InstancesAll) c.perfDataCollectorWorkloadManagementWorkloads, err = pdh.NewCollector[perfDataCounterValuesWorkloadManagementWorkloads](c.logger, pdh.CounterTypeRaw, "MSExchange WorkloadManagement Workloads", pdh.InstancesAll)
if err != nil { if err != nil {
return fmt.Errorf("failed to create MSExchange WorkloadManagement Workloads collector: %w", err) return fmt.Errorf("failed to create MSExchange WorkloadManagement Workloads collector: %w", err)
} }

View File

@@ -15,7 +15,7 @@
//go:build windows //go:build windows
package filetime package file
import ( import (
"fmt" "fmt"
@@ -33,7 +33,7 @@ import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
) )
const Name = "filetime" const Name = "file"
type Config struct { type Config struct {
FilePatterns []string `yaml:"file-patterns"` FilePatterns []string `yaml:"file-patterns"`
@@ -50,6 +50,7 @@ type Collector struct {
logger *slog.Logger logger *slog.Logger
fileMTime *prometheus.Desc fileMTime *prometheus.Desc
fileSize *prometheus.Desc
} }
func New(config *Config) *Collector { func New(config *Config) *Collector {
@@ -75,7 +76,7 @@ func NewWithFlags(app *kingpin.Application) *Collector {
c.config.FilePatterns = make([]string, 0) c.config.FilePatterns = make([]string, 0)
app.Flag( app.Flag(
"collector.filetime.file-patterns", "collector.file.file-patterns",
"Comma-separated list of file patterns. Each pattern is a glob pattern that can contain `*`, `?`, and `**` (recursive). See https://github.com/bmatcuk/doublestar#patterns", "Comma-separated list of file patterns. Each pattern is a glob pattern that can contain `*`, `?`, and `**` (recursive). See https://github.com/bmatcuk/doublestar#patterns",
).Default(strings.Join(ConfigDefaults.FilePatterns, ",")).StringsVar(&c.config.FilePatterns) ).Default(strings.Join(ConfigDefaults.FilePatterns, ",")).StringsVar(&c.config.FilePatterns)
@@ -93,7 +94,7 @@ func (c *Collector) Close() error {
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error { func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
c.logger = logger.With(slog.String("collector", Name)) c.logger = logger.With(slog.String("collector", Name))
c.logger.Info("filetime collector is in an experimental state! It may subject to change.") c.logger.Info("file collector is in an experimental state! It may subject to change.")
c.fileMTime = prometheus.NewDesc( c.fileMTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "mtime_timestamp_seconds"), prometheus.BuildFQName(types.Namespace, Name, "mtime_timestamp_seconds"),
@@ -102,6 +103,13 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
nil, nil,
) )
c.fileSize = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "size_bytes"),
"File size",
[]string{"file"},
nil,
)
for _, filePattern := range c.config.FilePatterns { for _, filePattern := range c.config.FilePatterns {
basePath, pattern := doublestar.SplitPattern(filePattern) basePath, pattern := doublestar.SplitPattern(filePattern)
@@ -163,6 +171,13 @@ func (c *Collector) collectGlobFilePath(ch chan<- prometheus.Metric, filePattern
filePath, filePath,
) )
ch <- prometheus.MustNewConstMetric(
c.fileSize,
prometheus.GaugeValue,
float64(fileInfo.Size()),
filePath,
)
return nil return nil
}, doublestar.WithFilesOnly(), doublestar.WithCaseInsensitive()) }, doublestar.WithFilesOnly(), doublestar.WithCaseInsensitive())
if err != nil { if err != nil {

View File

@@ -15,21 +15,21 @@
//go:build windows //go:build windows
package filetime_test package file_test
import ( import (
"testing" "testing"
"github.com/prometheus-community/windows_exporter/internal/collector/filetime" "github.com/prometheus-community/windows_exporter/internal/collector/file"
"github.com/prometheus-community/windows_exporter/internal/utils/testutils" "github.com/prometheus-community/windows_exporter/internal/utils/testutils"
) )
func BenchmarkCollector(b *testing.B) { func BenchmarkCollector(b *testing.B) {
testutils.FuncBenchmarkCollector(b, filetime.Name, filetime.NewWithFlags) testutils.FuncBenchmarkCollector(b, file.Name, file.NewWithFlags)
} }
func TestCollector(t *testing.T) { func TestCollector(t *testing.T) {
testutils.TestCollector(t, filetime.New, &filetime.Config{ testutils.TestCollector(t, file.New, &file.Config{
FilePatterns: []string{"*.*"}, FilePatterns: []string{"*.*"},
}) })
} }

View File

@@ -21,9 +21,9 @@ import (
"errors" "errors"
"fmt" "fmt"
"log/slog" "log/slog"
"strconv"
"github.com/alecthomas/kingpin/v2" "github.com/alecthomas/kingpin/v2"
"github.com/prometheus-community/windows_exporter/internal/headers/cfgmgr32"
"github.com/prometheus-community/windows_exporter/internal/headers/gdi32" "github.com/prometheus-community/windows_exporter/internal/headers/gdi32"
"github.com/prometheus-community/windows_exporter/internal/mi" "github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/pdh" "github.com/prometheus-community/windows_exporter/internal/pdh"
@@ -41,7 +41,7 @@ var ConfigDefaults = Config{}
type Collector struct { type Collector struct {
config Config config Config
gpuDeviceCache map[string]gdi32.GPUDevice gpuDeviceCache map[string]gpuDevice
// GPU Engine // GPU Engine
gpuEnginePerfDataCollector *pdh.Collector gpuEnginePerfDataCollector *pdh.Collector
@@ -85,6 +85,12 @@ type Collector struct {
gpuProcessMemoryTotalCommitted *prometheus.Desc gpuProcessMemoryTotalCommitted *prometheus.Desc
} }
type gpuDevice struct {
gdi32 gdi32.GPUDevice
cfgmgr32 cfgmgr32.Device
ID string
}
func New(config *Config) *Collector { func New(config *Config) *Collector {
if config == nil { if config == nil {
config = &ConfigDefaults config = &ConfigDefaults
@@ -115,129 +121,129 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
var err error var err error
c.gpuInfo = prometheus.NewDesc( c.gpuInfo = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "info"), prometheus.BuildFQName(types.Namespace, Name, "info"),
"A metric with a constant '1' value labeled with gpu device information.", "A metric with a constant '1' value labeled with gpu device information.",
[]string{"luid", "name", "bus_number", "phys", "function_number"}, []string{"luid", "device_id", "name", "bus_number", "phys", "function_number"},
nil, nil,
) )
c.gpuSharedSystemMemorySize = prometheus.NewDesc( c.gpuSharedSystemMemorySize = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "shared_system_memory_size_bytes"), prometheus.BuildFQName(types.Namespace, Name, "shared_system_memory_size_bytes"),
"The size, in bytes, of memory from system memory that can be shared by many users.", "The size, in bytes, of memory from system memory that can be shared by many users.",
[]string{"luid"}, []string{"luid", "device_id"},
nil, nil,
) )
c.gpuDedicatedSystemMemorySize = prometheus.NewDesc( c.gpuDedicatedSystemMemorySize = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "dedicated_system_memory_size_bytes"), prometheus.BuildFQName(types.Namespace, Name, "dedicated_system_memory_size_bytes"),
"The size, in bytes, of memory that is dedicated from system memory.", "The size, in bytes, of memory that is dedicated from system memory.",
[]string{"luid"}, []string{"luid", "device_id"},
nil, nil,
) )
c.gpuDedicatedVideoMemorySize = prometheus.NewDesc( c.gpuDedicatedVideoMemorySize = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "dedicated_video_memory_size_bytes"), prometheus.BuildFQName(types.Namespace, Name, "dedicated_video_memory_size_bytes"),
"The size, in bytes, of memory that is dedicated from video memory.", "The size, in bytes, of memory that is dedicated from video memory.",
[]string{"luid"}, []string{"luid", "device_id"},
nil, nil,
) )
c.gpuEngineRunningTime = prometheus.NewDesc( c.gpuEngineRunningTime = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "engine_time_seconds"), prometheus.BuildFQName(types.Namespace, Name, "engine_time_seconds"),
"Total running time of the GPU in seconds.", "Total running time of the GPU in seconds.",
[]string{"process_id", "luid", "phys", "eng", "engtype"}, []string{"process_id", "luid", "device_id", "phys", "eng", "engtype"},
nil, nil,
) )
c.gpuAdapterMemoryDedicatedUsage = prometheus.NewDesc( c.gpuAdapterMemoryDedicatedUsage = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "adapter_memory_dedicated_bytes"), prometheus.BuildFQName(types.Namespace, Name, "adapter_memory_dedicated_bytes"),
"Dedicated GPU memory usage in bytes.", "Dedicated GPU memory usage in bytes.",
[]string{"luid", "phys"}, []string{"luid", "device_id", "phys"},
nil, nil,
) )
c.gpuAdapterMemorySharedUsage = prometheus.NewDesc( c.gpuAdapterMemorySharedUsage = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "adapter_memory_shared_bytes"), prometheus.BuildFQName(types.Namespace, Name, "adapter_memory_shared_bytes"),
"Shared GPU memory usage in bytes.", "Shared GPU memory usage in bytes.",
[]string{"luid", "phys"}, []string{"luid", "device_id", "phys"},
nil, nil,
) )
c.gpuAdapterMemoryTotalCommitted = prometheus.NewDesc( c.gpuAdapterMemoryTotalCommitted = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "adapter_memory_committed_bytes"), prometheus.BuildFQName(types.Namespace, Name, "adapter_memory_committed_bytes"),
"Total committed GPU memory in bytes.", "Total committed GPU memory in bytes.",
[]string{"luid", "phys"}, []string{"luid", "device_id", "phys"},
nil, nil,
) )
c.gpuLocalAdapterMemoryUsage = prometheus.NewDesc( c.gpuLocalAdapterMemoryUsage = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "local_adapter_memory_bytes"), prometheus.BuildFQName(types.Namespace, Name, "local_adapter_memory_bytes"),
"Local adapter memory usage in bytes.", "Local adapter memory usage in bytes.",
[]string{"luid", "phys"}, []string{"luid", "device_id", "phys", "part"},
nil, nil,
) )
c.gpuNonLocalAdapterMemoryUsage = prometheus.NewDesc( c.gpuNonLocalAdapterMemoryUsage = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "non_local_adapter_memory_bytes"), prometheus.BuildFQName(types.Namespace, Name, "non_local_adapter_memory_bytes"),
"Non-local adapter memory usage in bytes.", "Non-local adapter memory usage in bytes.",
[]string{"luid", "phys"}, []string{"luid", "device_id", "phys", "part"},
nil, nil,
) )
c.gpuProcessMemoryDedicatedUsage = prometheus.NewDesc( c.gpuProcessMemoryDedicatedUsage = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "process_memory_dedicated_bytes"), prometheus.BuildFQName(types.Namespace, Name, "process_memory_dedicated_bytes"),
"Dedicated process memory usage in bytes.", "Dedicated process memory usage in bytes.",
[]string{"process_id", "luid", "phys"}, []string{"process_id", "luid", "device_id", "phys"},
nil, nil,
) )
c.gpuProcessMemoryLocalUsage = prometheus.NewDesc( c.gpuProcessMemoryLocalUsage = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "process_memory_local_bytes"), prometheus.BuildFQName(types.Namespace, Name, "process_memory_local_bytes"),
"Local process memory usage in bytes.", "Local process memory usage in bytes.",
[]string{"process_id", "luid", "phys"}, []string{"process_id", "luid", "device_id", "phys"},
nil, nil,
) )
c.gpuProcessMemoryNonLocalUsage = prometheus.NewDesc( c.gpuProcessMemoryNonLocalUsage = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "process_memory_non_local_bytes"), prometheus.BuildFQName(types.Namespace, Name, "process_memory_non_local_bytes"),
"Non-local process memory usage in bytes.", "Non-local process memory usage in bytes.",
[]string{"process_id", "luid", "phys"}, []string{"process_id", "luid", "device_id", "phys"},
nil, nil,
) )
c.gpuProcessMemorySharedUsage = prometheus.NewDesc( c.gpuProcessMemorySharedUsage = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "process_memory_shared_bytes"), prometheus.BuildFQName(types.Namespace, Name, "process_memory_shared_bytes"),
"Shared process memory usage in bytes.", "Shared process memory usage in bytes.",
[]string{"process_id", "luid", "phys"}, []string{"process_id", "luid", "device_id", "phys"},
nil, nil,
) )
c.gpuProcessMemoryTotalCommitted = prometheus.NewDesc( c.gpuProcessMemoryTotalCommitted = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "process_memory_committed_bytes"), prometheus.BuildFQName(types.Namespace, Name, "process_memory_committed_bytes"),
"Total committed process memory in bytes.", "Total committed process memory in bytes.",
[]string{"process_id", "luid", "phys"}, []string{"process_id", "luid", "device_id", "phys"},
nil, nil,
) )
errs := make([]error, 0) errs := make([]error, 0)
c.gpuEnginePerfDataCollector, err = pdh.NewCollector[gpuEnginePerfDataCounterValues](pdh.CounterTypeRaw, "GPU Engine", pdh.InstancesAll) c.gpuEnginePerfDataCollector, err = pdh.NewCollector[gpuEnginePerfDataCounterValues](logger.With(slog.String("collector", Name)), pdh.CounterTypeRaw, "GPU Engine", pdh.InstancesAll)
if err != nil { if err != nil {
errs = append(errs, fmt.Errorf("failed to create GPU Engine perf data collector: %w", err)) errs = append(errs, fmt.Errorf("failed to create GPU Engine perf data collector: %w", err))
} }
c.gpuAdapterMemoryPerfDataCollector, err = pdh.NewCollector[gpuAdapterMemoryPerfDataCounterValues](pdh.CounterTypeRaw, "GPU Adapter Memory", pdh.InstancesAll) c.gpuAdapterMemoryPerfDataCollector, err = pdh.NewCollector[gpuAdapterMemoryPerfDataCounterValues](logger.With(slog.String("collector", Name)), pdh.CounterTypeRaw, "GPU Adapter Memory", pdh.InstancesAll)
if err != nil { if err != nil {
errs = append(errs, fmt.Errorf("failed to create GPU Adapter Memory perf data collector: %w", err)) errs = append(errs, fmt.Errorf("failed to create GPU Adapter Memory perf data collector: %w", err))
} }
c.gpuLocalAdapterMemoryPerfDataCollector, err = pdh.NewCollector[gpuLocalAdapterMemoryPerfDataCounterValues](pdh.CounterTypeRaw, "GPU Local Adapter Memory", pdh.InstancesAll) c.gpuLocalAdapterMemoryPerfDataCollector, err = pdh.NewCollector[gpuLocalAdapterMemoryPerfDataCounterValues](logger.With(slog.String("collector", Name)), pdh.CounterTypeRaw, "GPU Local Adapter Memory", pdh.InstancesAll)
if err != nil { if err != nil {
errs = append(errs, fmt.Errorf("failed to create GPU Local Adapter Memory perf data collector: %w", err)) errs = append(errs, fmt.Errorf("failed to create GPU Local Adapter Memory perf data collector: %w", err))
} }
c.gpuNonLocalAdapterMemoryPerfDataCollector, err = pdh.NewCollector[gpuNonLocalAdapterMemoryPerfDataCounterValues](pdh.CounterTypeRaw, "GPU Non Local Adapter Memory", pdh.InstancesAll) c.gpuNonLocalAdapterMemoryPerfDataCollector, err = pdh.NewCollector[gpuNonLocalAdapterMemoryPerfDataCounterValues](logger.With(slog.String("collector", Name)), pdh.CounterTypeRaw, "GPU Non Local Adapter Memory", pdh.InstancesAll)
if err != nil { if err != nil {
errs = append(errs, fmt.Errorf("failed to create GPU Non Local Adapter Memory perf data collector: %w", err)) errs = append(errs, fmt.Errorf("failed to create GPU Non Local Adapter Memory perf data collector: %w", err))
} }
c.gpuProcessMemoryPerfDataCollector, err = pdh.NewCollector[gpuProcessMemoryPerfDataCounterValues](pdh.CounterTypeRaw, "GPU Process Memory", pdh.InstancesAll) c.gpuProcessMemoryPerfDataCollector, err = pdh.NewCollector[gpuProcessMemoryPerfDataCounterValues](logger.With(slog.String("collector", Name)), pdh.CounterTypeRaw, "GPU Process Memory", pdh.InstancesAll)
if err != nil { if err != nil {
errs = append(errs, fmt.Errorf("failed to create GPU Process Memory perf data collector: %w", err)) errs = append(errs, fmt.Errorf("failed to create GPU Process Memory perf data collector: %w", err))
} }
@@ -252,12 +258,57 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
continue continue
} }
// Skip Microsoft Basic Render Driver
// https://devicehunt.com/view/type/pci/vendor/1414/device/008C
if gpu.DeviceID == `PCI\VEN_1414&DEV_008C&SUBSYS_00000000&REV_00` {
continue
}
if c.gpuDeviceCache == nil { if c.gpuDeviceCache == nil {
c.gpuDeviceCache = make(map[string]gdi32.GPUDevice) c.gpuDeviceCache = make(map[string]gpuDevice)
} }
luidKey := fmt.Sprintf("0x%08X_0x%08X", gpu.LUID.HighPart, gpu.LUID.LowPart) luidKey := fmt.Sprintf("0x%08X_0x%08X", gpu.LUID.HighPart, gpu.LUID.LowPart)
c.gpuDeviceCache[luidKey] = gpu
deviceID := gpu.DeviceID
cfgmgr32Devs, err := cfgmgr32.GetDevicesInstanceIDs(gpu.DeviceID)
if err != nil {
errs = append(errs, fmt.Errorf("failed to get device instance IDs for device ID %s: %w", gpu.DeviceID, err))
}
var cfgmgr32Dev cfgmgr32.Device
for _, dev := range cfgmgr32Devs {
if dev.BusNumber == gpu.BusNumber && dev.DeviceNumber == gpu.DeviceNumber && dev.FunctionNumber == gpu.FunctionNumber {
cfgmgr32Dev = dev
break
}
}
if cfgmgr32Dev.InstanceID == "" {
errs = append(errs, fmt.Errorf("failed to find matching device for device ID %s", gpu.DeviceID))
} else {
deviceID = cfgmgr32Dev.InstanceID
}
c.gpuDeviceCache[luidKey] = gpuDevice{
gdi32: gpu,
cfgmgr32: cfgmgr32Dev,
ID: deviceID,
}
logger.Debug("Found GPU device",
slog.String("collector", Name),
slog.String("name", gpu.AdapterString),
slog.String("luid", luidKey),
slog.String("device_id", deviceID),
slog.String("name", gpu.AdapterString),
slog.Uint64("bus_number", uint64(gpu.BusNumber)),
slog.Uint64("device_number", uint64(gpu.DeviceNumber)),
slog.Uint64("function_number", uint64(gpu.FunctionNumber)),
)
} }
return errors.Join(errs...) return errors.Join(errs...)
@@ -298,31 +349,32 @@ func (c *Collector) collectGpuInfo(ch chan<- prometheus.Metric) {
prometheus.GaugeValue, prometheus.GaugeValue,
1.0, 1.0,
luid, luid,
gpu.AdapterString, gpu.ID,
strconv.FormatInt(int64(gpu.BusNumber), 10), gpu.gdi32.AdapterString,
strconv.FormatInt(int64(gpu.DeviceNumber), 10), gpu.gdi32.BusNumber.String(),
strconv.FormatInt(int64(gpu.FunctionNumber), 10), gpu.gdi32.DeviceNumber.String(),
gpu.gdi32.FunctionNumber.String(),
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.gpuSharedSystemMemorySize, c.gpuSharedSystemMemorySize,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(gpu.SharedSystemMemorySize), float64(gpu.gdi32.SharedSystemMemorySize),
luid, luid, gpu.ID,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.gpuDedicatedSystemMemorySize, c.gpuDedicatedSystemMemorySize,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(gpu.DedicatedSystemMemorySize), float64(gpu.gdi32.DedicatedSystemMemorySize),
luid, luid, gpu.ID,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.gpuDedicatedVideoMemorySize, c.gpuDedicatedVideoMemorySize,
prometheus.GaugeValue, prometheus.GaugeValue,
float64(gpu.DedicatedVideoMemorySize), float64(gpu.gdi32.DedicatedVideoMemorySize),
luid, luid, gpu.ID,
) )
} }
} }
@@ -333,31 +385,20 @@ func (c *Collector) collectGpuEngineMetrics(ch chan<- prometheus.Metric) error {
return fmt.Errorf("failed to collect GPU Engine perf data: %w", err) return fmt.Errorf("failed to collect GPU Engine perf data: %w", err)
} }
runningTimeMap := make(map[PidPhysEngEngType]float64)
// Iterate over the GPU Engine perf data and aggregate the values. // Iterate over the GPU Engine perf data and aggregate the values.
for _, data := range c.gpuEnginePerfDataObject { for _, data := range c.gpuEnginePerfDataObject {
instance := parseGPUCounterInstanceString(data.Name) instance := parseGPUCounterInstanceString(data.Name)
if _, ok := c.gpuDeviceCache[instance.Luid]; !ok { device, ok := c.gpuDeviceCache[instance.Luid]
if !ok {
continue continue
} }
key := PidPhysEngEngType{
Pid: instance.Pid,
Phys: instance.Phys,
Luid: instance.Luid,
Eng: instance.Eng,
Engtype: instance.Engtype,
}
runningTimeMap[key] += data.RunningTime / 10_000_000 // RunningTime is in 100ns units, convert to seconds.
}
for key, runningTime := range runningTimeMap {
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.gpuEngineRunningTime, c.gpuEngineRunningTime,
prometheus.CounterValue, prometheus.CounterValue,
runningTime, data.RunningTime/10_000_000,
key.Pid, key.Luid, key.Phys, key.Eng, key.Engtype, instance.Pid, instance.Luid, device.ID, instance.Phys, instance.Eng, instance.Engtype,
) )
} }
@@ -370,49 +411,33 @@ func (c *Collector) collectGpuAdapterMemoryMetrics(ch chan<- prometheus.Metric)
return fmt.Errorf("failed to collect GPU Adapter Memory perf data: %w", err) return fmt.Errorf("failed to collect GPU Adapter Memory perf data: %w", err)
} }
dedicatedUsageMap := make(map[PidPhysEngEngType]float64)
sharedUsageMap := make(map[PidPhysEngEngType]float64)
totalCommittedMap := make(map[PidPhysEngEngType]float64)
for _, data := range c.gpuAdapterMemoryPerfDataObject { for _, data := range c.gpuAdapterMemoryPerfDataObject {
instance := parseGPUCounterInstanceString(data.Name) instance := parseGPUCounterInstanceString(data.Name)
if _, ok := c.gpuDeviceCache[instance.Luid]; !ok { device, ok := c.gpuDeviceCache[instance.Luid]
if !ok {
continue continue
} }
key := PidPhysEngEngType{
Pid: instance.Pid,
Luid: instance.Luid,
Phys: instance.Phys,
Eng: instance.Eng,
Engtype: instance.Engtype,
}
dedicatedUsageMap[key] += data.DedicatedUsage
sharedUsageMap[key] += data.SharedUsage
totalCommittedMap[key] += data.TotalCommitted
}
for key, dedicatedUsage := range dedicatedUsageMap {
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.gpuAdapterMemoryDedicatedUsage, c.gpuAdapterMemoryDedicatedUsage,
prometheus.GaugeValue, prometheus.GaugeValue,
dedicatedUsage, data.DedicatedUsage,
key.Luid, key.Phys, instance.Luid, device.ID, instance.Phys,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.gpuAdapterMemorySharedUsage, c.gpuAdapterMemorySharedUsage,
prometheus.GaugeValue, prometheus.GaugeValue,
sharedUsageMap[key], data.SharedUsage,
key.Luid, key.Phys, instance.Luid, device.ID, instance.Phys,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.gpuAdapterMemoryTotalCommitted, c.gpuAdapterMemoryTotalCommitted,
prometheus.GaugeValue, prometheus.GaugeValue,
totalCommittedMap[key], data.TotalCommitted,
key.Luid, key.Phys, instance.Luid, device.ID, instance.Phys,
) )
} }
@@ -425,29 +450,19 @@ func (c *Collector) collectGpuLocalAdapterMemoryMetrics(ch chan<- prometheus.Met
return fmt.Errorf("failed to collect GPU Local Adapter Memory perf data: %w", err) return fmt.Errorf("failed to collect GPU Local Adapter Memory perf data: %w", err)
} }
localAdapterMemoryMap := make(map[PidPhysEngEngType]float64)
for _, data := range c.gpuLocalAdapterMemoryPerfDataObject { for _, data := range c.gpuLocalAdapterMemoryPerfDataObject {
instance := parseGPUCounterInstanceString(data.Name) instance := parseGPUCounterInstanceString(data.Name)
if _, ok := c.gpuDeviceCache[instance.Luid]; !ok { device, ok := c.gpuDeviceCache[instance.Luid]
if !ok {
continue continue
} }
key := PidPhysEngEngType{
Luid: instance.Luid,
Phys: instance.Phys,
}
localAdapterMemoryMap[key] += data.LocalUsage
}
for key, localUsage := range localAdapterMemoryMap {
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.gpuLocalAdapterMemoryUsage, c.gpuLocalAdapterMemoryUsage,
prometheus.GaugeValue, prometheus.GaugeValue,
localUsage, data.LocalUsage,
key.Luid, key.Phys, instance.Luid, device.ID, instance.Phys, instance.Part,
) )
} }
@@ -460,28 +475,19 @@ func (c *Collector) collectGpuNonLocalAdapterMemoryMetrics(ch chan<- prometheus.
return fmt.Errorf("failed to collect GPU Non Local Adapter Memory perf data: %w", err) return fmt.Errorf("failed to collect GPU Non Local Adapter Memory perf data: %w", err)
} }
nonLocalAdapterMemoryMap := make(map[PidPhysEngEngType]float64)
for _, data := range c.gpuNonLocalAdapterMemoryPerfDataObject { for _, data := range c.gpuNonLocalAdapterMemoryPerfDataObject {
instance := parseGPUCounterInstanceString(data.Name) instance := parseGPUCounterInstanceString(data.Name)
if _, ok := c.gpuDeviceCache[instance.Luid]; !ok { device, ok := c.gpuDeviceCache[instance.Luid]
if !ok {
continue continue
} }
key := PidPhysEngEngType{
Luid: instance.Luid,
Phys: instance.Phys,
}
nonLocalAdapterMemoryMap[key] += data.NonLocalUsage
}
for key, nonLocalUsage := range nonLocalAdapterMemoryMap {
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.gpuNonLocalAdapterMemoryUsage, c.gpuNonLocalAdapterMemoryUsage,
prometheus.GaugeValue, prometheus.GaugeValue,
nonLocalUsage, data.NonLocalUsage,
key.Luid, key.Phys, instance.Luid, device.ID, instance.Phys, instance.Part,
) )
} }
@@ -494,65 +500,47 @@ func (c *Collector) collectGpuProcessMemoryMetrics(ch chan<- prometheus.Metric)
return fmt.Errorf("failed to collect GPU Process Memory perf data: %w", err) return fmt.Errorf("failed to collect GPU Process Memory perf data: %w", err)
} }
processDedicatedUsageMap := make(map[PidPhys]float64)
processLocalUsageMap := make(map[PidPhys]float64)
processNonLocalUsageMap := make(map[PidPhys]float64)
processSharedUsageMap := make(map[PidPhys]float64)
processTotalCommittedMap := make(map[PidPhys]float64)
for _, data := range c.gpuProcessMemoryPerfDataObject { for _, data := range c.gpuProcessMemoryPerfDataObject {
instance := parseGPUCounterInstanceString(data.Name) instance := parseGPUCounterInstanceString(data.Name)
if _, ok := c.gpuDeviceCache[instance.Luid]; !ok { device, ok := c.gpuDeviceCache[instance.Luid]
if !ok {
continue continue
} }
key := PidPhys{
Pid: instance.Pid,
Luid: instance.Luid,
Phys: instance.Phys,
}
processDedicatedUsageMap[key] += data.DedicatedUsage
processLocalUsageMap[key] += data.LocalUsage
processNonLocalUsageMap[key] += data.NonLocalUsage
processSharedUsageMap[key] += data.SharedUsage
processTotalCommittedMap[key] += data.TotalCommitted
}
for key, dedicatedUsage := range processDedicatedUsageMap {
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.gpuProcessMemoryDedicatedUsage, c.gpuProcessMemoryDedicatedUsage,
prometheus.GaugeValue, prometheus.GaugeValue,
dedicatedUsage, data.DedicatedUsage,
key.Pid, key.Luid, key.Phys, instance.Pid, instance.Luid, device.ID, instance.Phys,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.gpuProcessMemoryLocalUsage, c.gpuProcessMemoryLocalUsage,
prometheus.GaugeValue, prometheus.GaugeValue,
processLocalUsageMap[key], data.LocalUsage,
key.Pid, key.Luid, key.Phys, instance.Pid, instance.Luid, device.ID, instance.Phys,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.gpuProcessMemoryNonLocalUsage, c.gpuProcessMemoryNonLocalUsage,
prometheus.GaugeValue, prometheus.GaugeValue,
processNonLocalUsageMap[key], data.NonLocalUsage,
key.Pid, key.Luid, key.Phys, instance.Pid, instance.Luid, device.ID, instance.Phys,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.gpuProcessMemorySharedUsage, c.gpuProcessMemorySharedUsage,
prometheus.GaugeValue, prometheus.GaugeValue,
processSharedUsageMap[key], data.SharedUsage,
key.Pid, key.Luid, key.Phys, instance.Pid, instance.Luid, device.ID, instance.Phys,
) )
ch <- prometheus.MustNewConstMetric( ch <- prometheus.MustNewConstMetric(
c.gpuProcessMemoryTotalCommitted, c.gpuProcessMemoryTotalCommitted,
prometheus.GaugeValue, prometheus.GaugeValue,
processTotalCommittedMap[key], data.TotalCommitted,
key.Pid, key.Luid, key.Phys, instance.Pid, instance.Luid, device.ID, instance.Phys,
) )
} }

View File

@@ -23,26 +23,29 @@ import (
) )
type Instance struct { type Instance struct {
Pid string Pid string
Luid string Luid string
Phys string DeviceID string
Eng string Phys string
Engtype string Eng string
Part string Engtype string
Part string
} }
type PidPhys struct { type PidPhys struct {
Pid string Pid string
Luid string Luid string
Phys string DeviceID string
Phys string
} }
type PidPhysEngEngType struct { type PidPhysEngEngType struct {
Pid string Pid string
Luid string Luid string
Phys string DeviceID string
Eng string Phys string
Engtype string Eng string
Engtype string
} }
func parseGPUCounterInstanceString(s string) Instance { func parseGPUCounterInstanceString(s string) Instance {

View File

@@ -95,6 +95,7 @@ type Collector struct {
collectorVirtualSwitch collectorVirtualSwitch
config Config config Config
logger *slog.Logger
collectorFns []func(ch chan<- prometheus.Metric) error collectorFns []func(ch chan<- prometheus.Metric) error
closeFns []func() closeFns []func()
@@ -151,6 +152,7 @@ func (c *Collector) Close() error {
} }
func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error { func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
c.logger = logger.With(slog.String("collector", Name))
c.collectorFns = make([]func(ch chan<- prometheus.Metric) error, 0, len(c.config.CollectorsEnabled)) c.collectorFns = make([]func(ch chan<- prometheus.Metric) error, 0, len(c.config.CollectorsEnabled))
c.closeFns = make([]func(), 0, len(c.config.CollectorsEnabled)) c.closeFns = make([]func(), 0, len(c.config.CollectorsEnabled))
@@ -256,10 +258,10 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
} }
if buildNumber < subCollectors[name].minBuildNumber { if buildNumber < subCollectors[name].minBuildNumber {
logger.Warn(fmt.Sprintf( c.logger.Warn(fmt.Sprintf(
"collector %s requires windows build version %d. Current build version: %d", "collector %s requires windows build version %d. Current build version: %d",
name, subCollectors[name].minBuildNumber, buildNumber, name, subCollectors[name].minBuildNumber, buildNumber,
), slog.String("collector", name)) ))
continue continue
} }

View File

@@ -132,7 +132,7 @@ type perfDataCounterValuesDataStore struct {
func (c *Collector) buildDataStore() error { func (c *Collector) buildDataStore() error {
var err error var err error
c.perfDataCollectorDataStore, err = pdh.NewCollector[perfDataCounterValuesDataStore](pdh.CounterTypeRaw, "Hyper-V DataStore", pdh.InstancesAll) c.perfDataCollectorDataStore, err = pdh.NewCollector[perfDataCounterValuesDataStore](c.logger, pdh.CounterTypeRaw, "Hyper-V DataStore", pdh.InstancesAll)
if err != nil { if err != nil {
return fmt.Errorf("failed to create Hyper-V DataStore collector: %w", err) return fmt.Errorf("failed to create Hyper-V DataStore collector: %w", err)
} }

View File

@@ -52,7 +52,7 @@ func (c *Collector) buildDynamicMemoryBalancer() error {
var err error var err error
// https://learn.microsoft.com/en-us/archive/blogs/chrisavis/monitoring-dynamic-memory-in-windows-server-hyper-v-2012 // https://learn.microsoft.com/en-us/archive/blogs/chrisavis/monitoring-dynamic-memory-in-windows-server-hyper-v-2012
c.perfDataCollectorDynamicMemoryBalancer, err = pdh.NewCollector[perfDataCounterValuesDynamicMemoryBalancer](pdh.CounterTypeRaw, "Hyper-V Dynamic Memory Balancer", pdh.InstancesAll) c.perfDataCollectorDynamicMemoryBalancer, err = pdh.NewCollector[perfDataCounterValuesDynamicMemoryBalancer](c.logger, pdh.CounterTypeRaw, "Hyper-V Dynamic Memory Balancer", pdh.InstancesAll)
if err != nil { if err != nil {
return fmt.Errorf("failed to create Hyper-V Virtual Machine Health Summary collector: %w", err) return fmt.Errorf("failed to create Hyper-V Virtual Machine Health Summary collector: %w", err)
} }

View File

@@ -63,7 +63,7 @@ type perfDataCounterValuesDynamicMemoryVM struct {
func (c *Collector) buildDynamicMemoryVM() error { func (c *Collector) buildDynamicMemoryVM() error {
var err error var err error
c.perfDataCollectorDynamicMemoryVM, err = pdh.NewCollector[perfDataCounterValuesDynamicMemoryVM](pdh.CounterTypeRaw, "Hyper-V Dynamic Memory VM", pdh.InstancesAll) c.perfDataCollectorDynamicMemoryVM, err = pdh.NewCollector[perfDataCounterValuesDynamicMemoryVM](c.logger, pdh.CounterTypeRaw, "Hyper-V Dynamic Memory VM", pdh.InstancesAll)
if err != nil { if err != nil {
return fmt.Errorf("failed to create Hyper-V Dynamic Memory VM collector: %w", err) return fmt.Errorf("failed to create Hyper-V Dynamic Memory VM collector: %w", err)
} }

View File

@@ -52,7 +52,7 @@ type perfDataCounterValuesHypervisorLogicalProcessor struct {
func (c *Collector) buildHypervisorLogicalProcessor() error { func (c *Collector) buildHypervisorLogicalProcessor() error {
var err error var err error
c.perfDataCollectorHypervisorLogicalProcessor, err = pdh.NewCollector[perfDataCounterValuesHypervisorLogicalProcessor](pdh.CounterTypeRaw, "Hyper-V Hypervisor Logical Processor", pdh.InstancesAll) c.perfDataCollectorHypervisorLogicalProcessor, err = pdh.NewCollector[perfDataCounterValuesHypervisorLogicalProcessor](c.logger, pdh.CounterTypeRaw, "Hyper-V Hypervisor Logical Processor", pdh.InstancesAll)
if err != nil { if err != nil {
return fmt.Errorf("failed to create Hyper-V Hypervisor Logical Processor collector: %w", err) return fmt.Errorf("failed to create Hyper-V Hypervisor Logical Processor collector: %w", err)
} }

View File

@@ -80,7 +80,7 @@ type perfDataCounterValuesHypervisorRootPartition struct {
func (c *Collector) buildHypervisorRootPartition() error { func (c *Collector) buildHypervisorRootPartition() error {
var err error var err error
c.perfDataCollectorHypervisorRootPartition, err = pdh.NewCollector[perfDataCounterValuesHypervisorRootPartition](pdh.CounterTypeRaw, "Hyper-V Hypervisor Root Partition", []string{"Root"}) c.perfDataCollectorHypervisorRootPartition, err = pdh.NewCollector[perfDataCounterValuesHypervisorRootPartition](c.logger, pdh.CounterTypeRaw, "Hyper-V Hypervisor Root Partition", []string{"Root"})
if err != nil { if err != nil {
return fmt.Errorf("failed to create Hyper-V Hypervisor Root Partition collector: %w", err) return fmt.Errorf("failed to create Hyper-V Hypervisor Root Partition collector: %w", err)
} }

View File

@@ -53,7 +53,7 @@ type perfDataCounterValuesHypervisorRootVirtualProcessor struct {
func (c *Collector) buildHypervisorRootVirtualProcessor() error { func (c *Collector) buildHypervisorRootVirtualProcessor() error {
var err error var err error
c.perfDataCollectorHypervisorRootVirtualProcessor, err = pdh.NewCollector[perfDataCounterValuesHypervisorRootVirtualProcessor](pdh.CounterTypeRaw, "Hyper-V Hypervisor Root Virtual Processor", pdh.InstancesAll) c.perfDataCollectorHypervisorRootVirtualProcessor, err = pdh.NewCollector[perfDataCounterValuesHypervisorRootVirtualProcessor](c.logger, pdh.CounterTypeRaw, "Hyper-V Hypervisor Root Virtual Processor", pdh.InstancesAll)
if err != nil { if err != nil {
return fmt.Errorf("failed to create Hyper-V Hypervisor Root Virtual Processor collector: %w", err) return fmt.Errorf("failed to create Hyper-V Hypervisor Root Virtual Processor collector: %w", err)
} }

View File

@@ -52,7 +52,7 @@ type perfDataCounterValuesHypervisorVirtualProcessor struct {
func (c *Collector) buildHypervisorVirtualProcessor() error { func (c *Collector) buildHypervisorVirtualProcessor() error {
var err error var err error
c.perfDataCollectorHypervisorVirtualProcessor, err = pdh.NewCollector[perfDataCounterValuesHypervisorVirtualProcessor](pdh.CounterTypeRaw, "Hyper-V Hypervisor Virtual Processor", pdh.InstancesAll) c.perfDataCollectorHypervisorVirtualProcessor, err = pdh.NewCollector[perfDataCounterValuesHypervisorVirtualProcessor](c.logger, pdh.CounterTypeRaw, "Hyper-V Hypervisor Virtual Processor", pdh.InstancesAll)
if err != nil { if err != nil {
return fmt.Errorf("failed to create Hyper-V Hypervisor Virtual Processor collector: %w", err) return fmt.Errorf("failed to create Hyper-V Hypervisor Virtual Processor collector: %w", err)
} }

View File

@@ -52,7 +52,7 @@ type perfDataCounterValuesLegacyNetworkAdapter struct {
func (c *Collector) buildLegacyNetworkAdapter() error { func (c *Collector) buildLegacyNetworkAdapter() error {
var err error var err error
c.perfDataCollectorLegacyNetworkAdapter, err = pdh.NewCollector[perfDataCounterValuesLegacyNetworkAdapter](pdh.CounterTypeRaw, "Hyper-V Legacy Network Adapter", pdh.InstancesAll) c.perfDataCollectorLegacyNetworkAdapter, err = pdh.NewCollector[perfDataCounterValuesLegacyNetworkAdapter](c.logger, pdh.CounterTypeRaw, "Hyper-V Legacy Network Adapter", pdh.InstancesAll)
if err != nil { if err != nil {
return fmt.Errorf("failed to create Hyper-V Legacy Network Adapter collector: %w", err) return fmt.Errorf("failed to create Hyper-V Legacy Network Adapter collector: %w", err)
} }

View File

@@ -44,7 +44,7 @@ type perfDataCounterValuesVirtualMachineHealthSummary struct {
func (c *Collector) buildVirtualMachineHealthSummary() error { func (c *Collector) buildVirtualMachineHealthSummary() error {
var err error var err error
c.perfDataCollectorVirtualMachineHealthSummary, err = pdh.NewCollector[perfDataCounterValuesVirtualMachineHealthSummary](pdh.CounterTypeRaw, "Hyper-V Virtual Machine Health Summary", nil) c.perfDataCollectorVirtualMachineHealthSummary, err = pdh.NewCollector[perfDataCounterValuesVirtualMachineHealthSummary](c.logger, pdh.CounterTypeRaw, "Hyper-V Virtual Machine Health Summary", nil)
if err != nil { if err != nil {
return fmt.Errorf("failed to create Hyper-V Virtual Machine Health Summary collector: %w", err) return fmt.Errorf("failed to create Hyper-V Virtual Machine Health Summary collector: %w", err)
} }

View File

@@ -46,7 +46,7 @@ type perfDataCounterValuesVirtualMachineVidPartition struct {
func (c *Collector) buildVirtualMachineVidPartition() error { func (c *Collector) buildVirtualMachineVidPartition() error {
var err error var err error
c.perfDataCollectorVirtualMachineVidPartition, err = pdh.NewCollector[perfDataCounterValuesVirtualMachineVidPartition](pdh.CounterTypeRaw, "Hyper-V VM Vid Partition", pdh.InstancesAll) c.perfDataCollectorVirtualMachineVidPartition, err = pdh.NewCollector[perfDataCounterValuesVirtualMachineVidPartition](c.logger, pdh.CounterTypeRaw, "Hyper-V VM Vid Partition", pdh.InstancesAll)
if err != nil { if err != nil {
return fmt.Errorf("failed to create Hyper-V VM Vid Partition collector: %w", err) return fmt.Errorf("failed to create Hyper-V VM Vid Partition collector: %w", err)
} }

View File

@@ -52,7 +52,7 @@ type perfDataCounterValuesVirtualNetworkAdapter struct {
func (c *Collector) buildVirtualNetworkAdapter() error { func (c *Collector) buildVirtualNetworkAdapter() error {
var err error var err error
c.perfDataCollectorVirtualNetworkAdapter, err = pdh.NewCollector[perfDataCounterValuesVirtualNetworkAdapter](pdh.CounterTypeRaw, "Hyper-V Virtual Network Adapter", pdh.InstancesAll) c.perfDataCollectorVirtualNetworkAdapter, err = pdh.NewCollector[perfDataCounterValuesVirtualNetworkAdapter](c.logger, pdh.CounterTypeRaw, "Hyper-V Virtual Network Adapter", pdh.InstancesAll)
if err != nil { if err != nil {
return fmt.Errorf("failed to create Hyper-V Virtual Network Adapter collector: %w", err) return fmt.Errorf("failed to create Hyper-V Virtual Network Adapter collector: %w", err)
} }

View File

@@ -167,7 +167,7 @@ type perfDataCounterValuesVirtualNetworkAdapterDropReasons struct {
func (c *Collector) buildVirtualNetworkAdapterDropReasons() error { func (c *Collector) buildVirtualNetworkAdapterDropReasons() error {
var err error var err error
c.perfDataCollectorVirtualNetworkAdapterDropReasons, err = pdh.NewCollector[perfDataCounterValuesVirtualNetworkAdapterDropReasons](pdh.CounterTypeRaw, "Hyper-V Virtual Network Adapter Drop Reasons", pdh.InstancesAll) c.perfDataCollectorVirtualNetworkAdapterDropReasons, err = pdh.NewCollector[perfDataCounterValuesVirtualNetworkAdapterDropReasons](c.logger, pdh.CounterTypeRaw, "Hyper-V Virtual Network Adapter Drop Reasons", pdh.InstancesAll)
if err != nil { if err != nil {
return fmt.Errorf("failed to create Hyper-V Virtual Network Adapter Drop Reasons collector: %w", err) return fmt.Errorf("failed to create Hyper-V Virtual Network Adapter Drop Reasons collector: %w", err)
} }

View File

@@ -74,7 +74,7 @@ type perfDataCounterValuesVirtualSMB struct {
func (c *Collector) buildVirtualSMB() error { func (c *Collector) buildVirtualSMB() error {
var err error var err error
c.perfDataCollectorVirtualSMB, err = pdh.NewCollector[perfDataCounterValuesVirtualSMB](pdh.CounterTypeRaw, "Hyper-V Virtual SMB", pdh.InstancesAll) c.perfDataCollectorVirtualSMB, err = pdh.NewCollector[perfDataCounterValuesVirtualSMB](c.logger, pdh.CounterTypeRaw, "Hyper-V Virtual SMB", pdh.InstancesAll)
if err != nil { if err != nil {
return fmt.Errorf("failed to create Hyper-V Virtual SMB collector: %w", err) return fmt.Errorf("failed to create Hyper-V Virtual SMB collector: %w", err)
} }

View File

@@ -64,7 +64,7 @@ type perfDataCounterValuesVirtualStorageDevice struct {
func (c *Collector) buildVirtualStorageDevice() error { func (c *Collector) buildVirtualStorageDevice() error {
var err error var err error
c.perfDataCollectorVirtualStorageDevice, err = pdh.NewCollector[perfDataCounterValuesVirtualStorageDevice](pdh.CounterTypeRaw, "Hyper-V Virtual Storage Device", pdh.InstancesAll) c.perfDataCollectorVirtualStorageDevice, err = pdh.NewCollector[perfDataCounterValuesVirtualStorageDevice](c.logger, pdh.CounterTypeRaw, "Hyper-V Virtual Storage Device", pdh.InstancesAll)
if err != nil { if err != nil {
return fmt.Errorf("failed to create Hyper-V Virtual Storage Device collector: %w", err) return fmt.Errorf("failed to create Hyper-V Virtual Storage Device collector: %w", err)
} }

View File

@@ -82,7 +82,7 @@ type perfDataCounterValuesVirtualSwitch struct {
func (c *Collector) buildVirtualSwitch() error { func (c *Collector) buildVirtualSwitch() error {
var err error var err error
c.perfDataCollectorVirtualSwitch, err = pdh.NewCollector[perfDataCounterValuesVirtualSwitch](pdh.CounterTypeRaw, "Hyper-V Virtual Switch", pdh.InstancesAll) c.perfDataCollectorVirtualSwitch, err = pdh.NewCollector[perfDataCounterValuesVirtualSwitch](c.logger, pdh.CounterTypeRaw, "Hyper-V Virtual Switch", pdh.InstancesAll)
if err != nil { if err != nil {
return fmt.Errorf("failed to create Hyper-V Virtual Switch collector: %w", err) return fmt.Errorf("failed to create Hyper-V Virtual Switch collector: %w", err)
} }

View File

@@ -79,7 +79,7 @@ var applicationStates = map[uint32]string{
func (c *Collector) buildAppPoolWAS() error { func (c *Collector) buildAppPoolWAS() error {
var err error var err error
c.perfDataCollectorAppPoolWAS, err = pdh.NewCollector[perfDataCounterValuesAppPoolWAS](pdh.CounterTypeRaw, "APP_POOL_WAS", pdh.InstancesAll) c.perfDataCollectorAppPoolWAS, err = pdh.NewCollector[perfDataCounterValuesAppPoolWAS](c.logger, pdh.CounterTypeRaw, "APP_POOL_WAS", pdh.InstancesAll)
if err != nil { if err != nil {
return fmt.Errorf("failed to create APP_POOL_WAS collector: %w", err) return fmt.Errorf("failed to create APP_POOL_WAS collector: %w", err)
} }

View File

@@ -54,7 +54,7 @@ func (c *Collector) buildHttpServiceRequestQueues() error {
c.logger.Info("IIS/HttpServiceRequestQueues collector is in an experimental state! The configuration and metrics may change in future. Please report any issues.") c.logger.Info("IIS/HttpServiceRequestQueues collector is in an experimental state! The configuration and metrics may change in future. Please report any issues.")
c.perfDataCollectorHttpServiceRequestQueues, err = pdh.NewCollector[perfDataCounterValuesHttpServiceRequestQueues](pdh.CounterTypeRaw, "HTTP Service Request Queues", pdh.InstancesAll) c.perfDataCollectorHttpServiceRequestQueues, err = pdh.NewCollector[perfDataCounterValuesHttpServiceRequestQueues](c.logger, pdh.CounterTypeRaw, "HTTP Service Request Queues", pdh.InstancesAll)
if err != nil { if err != nil {
return fmt.Errorf("failed to create Http Service collector: %w", err) return fmt.Errorf("failed to create Http Service collector: %w", err)
} }

View File

@@ -152,13 +152,13 @@ func (p perfDataCounterValuesW3SVCW3WPV8) GetName() string {
func (c *Collector) buildW3SVCW3WP() error { func (c *Collector) buildW3SVCW3WP() error {
var err error var err error
c.w3SVCW3WPPerfDataCollector, err = pdh.NewCollector[perfDataCounterValuesW3SVCW3WP](pdh.CounterTypeRaw, "W3SVC_W3WP", pdh.InstancesAll) c.w3SVCW3WPPerfDataCollector, err = pdh.NewCollector[perfDataCounterValuesW3SVCW3WP](c.logger, pdh.CounterTypeRaw, "W3SVC_W3WP", pdh.InstancesAll)
if err != nil { if err != nil {
return fmt.Errorf("failed to create W3SVC_W3WP collector: %w", err) return fmt.Errorf("failed to create W3SVC_W3WP collector: %w", err)
} }
if c.iisVersion.major >= 8 { if c.iisVersion.major >= 8 {
c.w3SVCW3WPPerfDataCollectorV8, err = pdh.NewCollector[perfDataCounterValuesW3SVCW3WPV8](pdh.CounterTypeRaw, "W3SVC_W3WP", pdh.InstancesAll) c.w3SVCW3WPPerfDataCollectorV8, err = pdh.NewCollector[perfDataCounterValuesW3SVCW3WPV8](c.logger, pdh.CounterTypeRaw, "W3SVC_W3WP", pdh.InstancesAll)
if err != nil { if err != nil {
return fmt.Errorf("failed to create W3SVC_W3WP collector: %w", err) return fmt.Errorf("failed to create W3SVC_W3WP collector: %w", err)
} }

View File

@@ -102,7 +102,7 @@ func (p perfDataCounterValuesWebService) GetName() string {
func (c *Collector) buildWebService() error { func (c *Collector) buildWebService() error {
var err error var err error
c.perfDataCollectorWebService, err = pdh.NewCollector[perfDataCounterValuesWebService](pdh.CounterTypeRaw, "Web Service", pdh.InstancesAll) c.perfDataCollectorWebService, err = pdh.NewCollector[perfDataCounterValuesWebService](c.logger, pdh.CounterTypeRaw, "Web Service", pdh.InstancesAll)
if err != nil { if err != nil {
return fmt.Errorf("failed to create Web Service collector: %w", err) return fmt.Errorf("failed to create Web Service collector: %w", err)
} }

View File

@@ -103,7 +103,7 @@ type perfDataCounterServiceCache struct {
func (c *Collector) buildWebServiceCache() error { func (c *Collector) buildWebServiceCache() error {
var err error var err error
c.serviceCachePerfDataCollector, err = pdh.NewCollector[perfDataCounterServiceCache](pdh.CounterTypeRaw, "Web Service Cache", pdh.InstancesAll) c.serviceCachePerfDataCollector, err = pdh.NewCollector[perfDataCounterServiceCache](c.logger, pdh.CounterTypeRaw, "Web Service Cache", pdh.InstancesAll)
if err != nil { if err != nil {
return fmt.Errorf("failed to create Web Service Cache collector: %w", err) return fmt.Errorf("failed to create Web Service Cache collector: %w", err)
} }

View File

@@ -332,7 +332,7 @@ func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
var err error var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "LogicalDisk", pdh.InstancesAll) c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](logger.With(slog.String("collector", Name)), pdh.CounterTypeRaw, "LogicalDisk", pdh.InstancesAll)
if err != nil { if err != nil {
return fmt.Errorf("failed to create LogicalDisk collector: %w", err) return fmt.Errorf("failed to create LogicalDisk collector: %w", err)
} }
@@ -775,6 +775,7 @@ func (c *Collector) workerBitlocker(ctx context.Context, initErrCh chan<- error)
// Otherwise, attempting to initialize and run parallel queries across // Otherwise, attempting to initialize and run parallel queries across
// goroutines will result in protected memory errors. // goroutines will result in protected memory errors.
runtime.LockOSThread() runtime.LockOSThread()
defer runtime.UnlockOSThread() defer runtime.UnlockOSThread()
if err := ole.CoInitializeEx(0, ole.COINIT_APARTMENTTHREADED|ole.COINIT_DISABLE_OLE1DDE); err != nil { if err := ole.CoInitializeEx(0, ole.COINIT_APARTMENTTHREADED|ole.COINIT_DISABLE_OLE1DDE); err != nil {

View File

@@ -111,7 +111,7 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
c.availableBytes = prometheus.NewDesc( c.availableBytes = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "available_bytes"), prometheus.BuildFQName(types.Namespace, Name, "available_bytes"),
"The amount of physical memory immediately available for allocation to a process or for system use. It is equal to the sum of memory assigned to"+ "The amount of physical memory immediately available for allocation to a process or for system use. It is equal to the sum of memory assigned to"+
@@ -337,7 +337,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "Memory", pdh.InstancesAll) c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](logger.With(slog.String("collector", Name)), pdh.CounterTypeRaw, "Memory", pdh.InstancesAll)
if err != nil { if err != nil {
return fmt.Errorf("failed to create Memory collector: %w", err) return fmt.Errorf("failed to create Memory collector: %w", err)
} }

View File

@@ -38,6 +38,8 @@ const (
subCollectorNode = "node" subCollectorNode = "node"
subCollectorResource = "resource" subCollectorResource = "resource"
subCollectorResourceGroup = "resourcegroup" subCollectorResourceGroup = "resourcegroup"
subCollectorSharedVolumes = "shared_volumes"
subCollectorVirtualDisk = "virtualdisk"
) )
type Config struct { type Config struct {
@@ -52,6 +54,8 @@ var ConfigDefaults = Config{
subCollectorNode, subCollectorNode,
subCollectorResource, subCollectorResource,
subCollectorResourceGroup, subCollectorResourceGroup,
subCollectorSharedVolumes,
subCollectorVirtualDisk,
}, },
} }
@@ -62,6 +66,8 @@ type Collector struct {
collectorNode collectorNode
collectorResource collectorResource
collectorResourceGroup collectorResourceGroup
collectorSharedVolumes
collectorVirtualDisk
config Config config Config
miSession *mi.Session miSession *mi.Session
@@ -156,6 +162,18 @@ func (c *Collector) Build(_ *slog.Logger, miSession *mi.Session) error {
} }
} }
if slices.Contains(c.config.CollectorsEnabled, subCollectorSharedVolumes) {
if err := c.buildSharedVolumes(); err != nil {
errs = append(errs, fmt.Errorf("failed to build shared_volumes collector: %w", err))
}
}
if slices.Contains(c.config.CollectorsEnabled, subCollectorVirtualDisk) {
if err := c.buildVirtualDisk(); err != nil {
errs = append(errs, fmt.Errorf("failed to build virtualdisk collector: %w", err))
}
}
return errors.Join(errs...) return errors.Join(errs...)
} }
@@ -166,10 +184,10 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
return nil return nil
} }
errCh := make(chan error, 5) errCh := make(chan error, 6)
wg := sync.WaitGroup{} wg := sync.WaitGroup{}
wg.Add(5) wg.Add(6)
go func() { go func() {
defer wg.Done() defer wg.Done()
@@ -226,6 +244,22 @@ func (c *Collector) Collect(ch chan<- prometheus.Metric) error {
}() }()
}() }()
go func() {
defer wg.Done()
if slices.Contains(c.config.CollectorsEnabled, subCollectorSharedVolumes) {
if err := c.collectSharedVolumes(ch); err != nil {
errCh <- fmt.Errorf("failed to collect shared_volumes metrics: %w", err)
}
}
if slices.Contains(c.config.CollectorsEnabled, subCollectorVirtualDisk) {
if err := c.collectVirtualDisk(ch); err != nil {
errCh <- fmt.Errorf("failed to collect virtualdisk metrics: %w", err)
}
}
}()
wg.Wait() wg.Wait()
close(errCh) close(errCh)

View File

@@ -0,0 +1,122 @@
// SPDX-License-Identifier: Apache-2.0
//
// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build windows
package mscluster
import (
"fmt"
"strings"
"github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
const nameSharedVolumes = Name + "_shared_volumes"
type collectorSharedVolumes struct {
sharedVolumesMIQuery mi.Query
sharedVolumesInfo *prometheus.Desc
sharedVolumesTotalSize *prometheus.Desc
sharedVolumesFreeSpace *prometheus.Desc
}
// msClusterDiskPartition represents the MSCluster_DiskPartition WMI class
type msClusterDiskPartition struct {
Name string `mi:"Name"`
Path string `mi:"Path"`
TotalSize uint64 `mi:"TotalSize"`
FreeSpace uint64 `mi:"FreeSpace"`
Volume string `mi:"VolumeLabel"`
VolumeGuid string `mi:"VolumeGuid"`
}
func (c *Collector) buildSharedVolumes() error {
sharedVolumesMIQuery, err := mi.NewQuery("SELECT Name, Path, TotalSize, FreeSpace, VolumeLabel, VolumeGuid FROM MSCluster_DiskPartition")
if err != nil {
return fmt.Errorf("failed to create WMI query: %w", err)
}
c.sharedVolumesMIQuery = sharedVolumesMIQuery
c.sharedVolumesInfo = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameSharedVolumes, "info"),
"Cluster Shared Volumes information (value is always 1)",
[]string{"name", "path", "volume_guid"},
nil,
)
c.sharedVolumesTotalSize = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameSharedVolumes, "total_bytes"),
"Total size of the Cluster Shared Volume in bytes",
[]string{"name", "volume_guid"},
nil,
)
c.sharedVolumesFreeSpace = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameSharedVolumes, "free_bytes"),
"Free space on the Cluster Shared Volume in bytes",
[]string{"name", "volume_guid"},
nil,
)
var dst []msClusterDiskPartition
if err := c.miSession.Query(&dst, mi.NamespaceRootMSCluster, c.sharedVolumesMIQuery); err != nil {
return fmt.Errorf("WMI query failed: %w", err)
}
return nil
}
func (c *Collector) collectSharedVolumes(ch chan<- prometheus.Metric) error {
var dst []msClusterDiskPartition
if err := c.miSession.Query(&dst, mi.NamespaceRootMSCluster, c.sharedVolumesMIQuery); err != nil {
return fmt.Errorf("WMI query failed: %w", err)
}
for _, partition := range dst {
volume := strings.TrimRight(partition.Volume, " ")
ch <- prometheus.MustNewConstMetric(
c.sharedVolumesInfo,
prometheus.GaugeValue,
1.0,
volume,
partition.Path,
partition.VolumeGuid,
)
ch <- prometheus.MustNewConstMetric(
c.sharedVolumesTotalSize,
prometheus.GaugeValue,
float64(partition.TotalSize)*1024*1024, // Convert from KB to bytes
volume,
partition.VolumeGuid,
)
ch <- prometheus.MustNewConstMetric(
c.sharedVolumesFreeSpace,
prometheus.GaugeValue,
float64(partition.FreeSpace)*1024*1024, // Convert from KB to bytes
volume,
partition.VolumeGuid,
)
}
return nil
}

View File

@@ -0,0 +1,156 @@
// SPDX-License-Identifier: Apache-2.0
//
// Copyright The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build windows
package mscluster
import (
"fmt"
"github.com/prometheus-community/windows_exporter/internal/mi"
"github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus"
)
const nameVirtualDisk = Name + "_virtualdisk"
type collectorVirtualDisk struct {
virtualDiskMIQuery mi.Query
virtualDiskInfo *prometheus.Desc
virtualDiskHealthStatus *prometheus.Desc
virtualDiskSize *prometheus.Desc
virtualDiskFootprintOnPool *prometheus.Desc
virtualDiskStorageEfficiency *prometheus.Desc
}
// msftVirtualDisk represents the MSFT_VirtualDisk WMI class
type msftVirtualDisk struct {
FriendlyName string `mi:"FriendlyName"`
UniqueId string `mi:"UniqueId"`
HealthStatus uint16 `mi:"HealthStatus"`
Size uint64 `mi:"Size"`
FootprintOnPool uint64 `mi:"FootprintOnPool"`
// OperationalStatus []uint16 `mi:"OperationalStatus"` Not supported my mi query: https://github.com/prometheus-community/windows_exporter/pull/2296#issuecomment-3736584632
}
func (c *Collector) buildVirtualDisk() error {
wmiSelect := "FriendlyName,UniqueId,HealthStatus,Size,FootprintOnPool"
virtualDiskMIQuery, err := mi.NewQuery(fmt.Sprintf("SELECT %s FROM MSFT_VirtualDisk", wmiSelect))
if err != nil {
return fmt.Errorf("failed to create WMI query: %w", err)
}
c.virtualDiskMIQuery = virtualDiskMIQuery
c.virtualDiskInfo = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameVirtualDisk, "info"),
"Virtual Disk information (value is always 1)",
[]string{"name", "unique_id"},
nil,
)
c.virtualDiskHealthStatus = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameVirtualDisk, "health_status"),
"Health status of the virtual disk. 0: Healthy, 1: Warning, 2: Unhealthy, 5: Unknown",
[]string{"name", "unique_id"},
nil,
)
c.virtualDiskSize = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameVirtualDisk, "size_bytes"),
"Total size of the virtual disk in bytes",
[]string{"name", "unique_id"},
nil,
)
c.virtualDiskFootprintOnPool = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameVirtualDisk, "footprint_on_pool_bytes"),
"Physical storage consumed by the virtual disk on the storage pool in bytes",
[]string{"name", "unique_id"},
nil,
)
c.virtualDiskStorageEfficiency = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, nameVirtualDisk, "storage_efficiency_percent"),
"Storage efficiency percentage (Size / FootprintOnPool * 100)",
[]string{"name", "unique_id"},
nil,
)
return nil
}
func (c *Collector) collectVirtualDisk(ch chan<- prometheus.Metric) error {
var dst []msftVirtualDisk
if err := c.miSession.Query(&dst, mi.NamespaceRootStorage, c.virtualDiskMIQuery); err != nil {
return fmt.Errorf("WMI query failed: %w", err)
}
for _, vdisk := range dst {
ch <- prometheus.MustNewConstMetric(
c.virtualDiskInfo,
prometheus.GaugeValue,
1.0,
vdisk.FriendlyName,
vdisk.UniqueId,
)
ch <- prometheus.MustNewConstMetric(
c.virtualDiskHealthStatus,
prometheus.GaugeValue,
float64(vdisk.HealthStatus),
vdisk.FriendlyName,
vdisk.UniqueId,
)
ch <- prometheus.MustNewConstMetric(
c.virtualDiskSize,
prometheus.GaugeValue,
float64(vdisk.Size),
vdisk.FriendlyName,
vdisk.UniqueId,
)
ch <- prometheus.MustNewConstMetric(
c.virtualDiskFootprintOnPool,
prometheus.GaugeValue,
float64(vdisk.FootprintOnPool),
vdisk.FriendlyName,
vdisk.UniqueId,
)
// Calculate storage efficiency (avoid division by zero)
var storageEfficiency float64
if vdisk.FootprintOnPool > 0 {
storageEfficiency = float64(vdisk.Size) / float64(vdisk.FootprintOnPool) * 100
} else {
storageEfficiency = 0
}
ch <- prometheus.MustNewConstMetric(
c.virtualDiskStorageEfficiency,
prometheus.GaugeValue,
storageEfficiency,
vdisk.FriendlyName,
vdisk.UniqueId,
)
}
return nil
}

View File

@@ -75,7 +75,7 @@ func (c *Collector) Close() error {
return nil return nil
} }
func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error { func (c *Collector) Build(logger *slog.Logger, _ *mi.Session) error {
c.bytesInJournalQueue = prometheus.NewDesc( c.bytesInJournalQueue = prometheus.NewDesc(
prometheus.BuildFQName(types.Namespace, Name, "bytes_in_journal_queue"), prometheus.BuildFQName(types.Namespace, Name, "bytes_in_journal_queue"),
"Size of queue journal in bytes", "Size of queue journal in bytes",
@@ -103,7 +103,7 @@ func (c *Collector) Build(_ *slog.Logger, _ *mi.Session) error {
var err error var err error
c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](pdh.CounterTypeRaw, "MSMQ Queue", pdh.InstancesAll) c.perfDataCollector, err = pdh.NewCollector[perfDataCounterValues](logger.With(slog.String("collector", Name)), pdh.CounterTypeRaw, "MSMQ Queue", pdh.InstancesAll)
if err != nil { if err != nil {
return fmt.Errorf("failed to create MSMQ Queue collector: %w", err) return fmt.Errorf("failed to create MSMQ Queue collector: %w", err)
} }

View File

@@ -130,7 +130,7 @@ func (c *Collector) buildAccessMethods() error {
errs := make([]error, 0, len(c.mssqlInstances)) errs := make([]error, 0, len(c.mssqlInstances))
for _, sqlInstance := range c.mssqlInstances { for _, sqlInstance := range c.mssqlInstances {
c.accessMethodsPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesAccessMethods](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Access Methods"), nil) c.accessMethodsPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesAccessMethods](c.logger, pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Access Methods"), nil)
if err != nil { if err != nil {
errs = append(errs, fmt.Errorf("failed to create AccessMethods collector for instance %s: %w", sqlInstance.name, err)) errs = append(errs, fmt.Errorf("failed to create AccessMethods collector for instance %s: %w", sqlInstance.name, err))
} }

View File

@@ -63,7 +63,7 @@ func (c *Collector) buildAvailabilityReplica() error {
errs := make([]error, 0, len(c.mssqlInstances)) errs := make([]error, 0, len(c.mssqlInstances))
for _, sqlInstance := range c.mssqlInstances { for _, sqlInstance := range c.mssqlInstances {
c.availabilityReplicaPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesAvailabilityReplica](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Availability Replica"), pdh.InstancesAll) c.availabilityReplicaPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesAvailabilityReplica](c.logger, pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Availability Replica"), pdh.InstancesAll)
if err != nil { if err != nil {
errs = append(errs, fmt.Errorf("failed to create Availability Replica collector for instance %s: %w", sqlInstance.name, err)) errs = append(errs, fmt.Errorf("failed to create Availability Replica collector for instance %s: %w", sqlInstance.name, err))
} }

View File

@@ -88,7 +88,7 @@ func (c *Collector) buildBufferManager() error {
errs := make([]error, 0, len(c.mssqlInstances)) errs := make([]error, 0, len(c.mssqlInstances))
for _, sqlInstance := range c.mssqlInstances { for _, sqlInstance := range c.mssqlInstances {
c.bufManPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesBufMan](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Buffer Manager"), nil) c.bufManPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesBufMan](c.logger, pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Buffer Manager"), nil)
if err != nil { if err != nil {
errs = append(errs, fmt.Errorf("failed to create Buffer Manager collector for instance %s: %w", sqlInstance.name, err)) errs = append(errs, fmt.Errorf("failed to create Buffer Manager collector for instance %s: %w", sqlInstance.name, err))
} }

View File

@@ -148,13 +148,13 @@ func (c *Collector) buildDatabases() error {
errs := make([]error, 0, len(c.mssqlInstances)) errs := make([]error, 0, len(c.mssqlInstances))
for _, sqlInstance := range c.mssqlInstances { for _, sqlInstance := range c.mssqlInstances {
c.databasesPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesDatabases](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Databases"), pdh.InstancesAll) c.databasesPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesDatabases](c.logger, pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Databases"), pdh.InstancesAll)
if err != nil { if err != nil {
errs = append(errs, fmt.Errorf("failed to create Databases collector for instance %s: %w", sqlInstance.name, err)) errs = append(errs, fmt.Errorf("failed to create Databases collector for instance %s: %w", sqlInstance.name, err))
} }
if sqlInstance.isVersionGreaterOrEqualThan(serverVersion2019) { if sqlInstance.isVersionGreaterOrEqualThan(serverVersion2019) {
c.databasesPerfDataCollectors2019[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesDatabases2019](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Databases"), pdh.InstancesAll) c.databasesPerfDataCollectors2019[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesDatabases2019](c.logger, pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Databases"), pdh.InstancesAll)
if err != nil { if err != nil {
errs = append(errs, fmt.Errorf("failed to create Databases 2019 collector for instance %s: %w", sqlInstance.name, err)) errs = append(errs, fmt.Errorf("failed to create Databases 2019 collector for instance %s: %w", sqlInstance.name, err))
} }

View File

@@ -92,7 +92,7 @@ func (c *Collector) buildDatabaseReplica() error {
errs := make([]error, 0, len(c.mssqlInstances)) errs := make([]error, 0, len(c.mssqlInstances))
for _, sqlInstance := range c.mssqlInstances { for _, sqlInstance := range c.mssqlInstances {
c.dbReplicaPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesDBReplica](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Database Replica"), pdh.InstancesAll) c.dbReplicaPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesDBReplica](c.logger, pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Database Replica"), pdh.InstancesAll)
if err != nil { if err != nil {
errs = append(errs, fmt.Errorf("failed to create Database Replica collector for instance %s: %w", sqlInstance.name, err)) errs = append(errs, fmt.Errorf("failed to create Database Replica collector for instance %s: %w", sqlInstance.name, err))
} }

View File

@@ -90,7 +90,7 @@ func (c *Collector) buildGeneralStatistics() error {
errs := make([]error, 0, len(c.mssqlInstances)) errs := make([]error, 0, len(c.mssqlInstances))
for _, sqlInstance := range c.mssqlInstances { for _, sqlInstance := range c.mssqlInstances {
c.genStatsPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesGenStats](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "General Statistics"), nil) c.genStatsPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesGenStats](c.logger, pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "General Statistics"), nil)
if err != nil { if err != nil {
errs = append(errs, fmt.Errorf("failed to create General Statistics collector for instance %s: %w", sqlInstance.name, err)) errs = append(errs, fmt.Errorf("failed to create General Statistics collector for instance %s: %w", sqlInstance.name, err))
} }

View File

@@ -18,34 +18,63 @@
package mssql package mssql
import ( import (
"fmt"
"log/slog"
"github.com/prometheus-community/windows_exporter/internal/types" "github.com/prometheus-community/windows_exporter/internal/types"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"golang.org/x/sys/windows/registry"
) )
type collectorInstance struct { type collectorInstance struct {
instances *prometheus.GaugeVec instances *prometheus.Desc
} }
func (c *Collector) buildInstance() error { func (c *Collector) buildInstance() error {
c.instances = prometheus.NewGaugeVec( c.instances = prometheus.NewDesc(
prometheus.GaugeOpts{ prometheus.BuildFQName(types.Namespace, Name, "instance_info"),
Namespace: types.Namespace, "A metric with a constant '1' value labeled with mssql instance information",
Subsystem: Name,
Name: "instance_info",
Help: "A metric with a constant '1' value labeled with mssql instance information",
},
[]string{"edition", "mssql_instance", "patch", "version"}, []string{"edition", "mssql_instance", "patch", "version"},
nil,
) )
for _, instance := range c.mssqlInstances {
c.instances.WithLabelValues(instance.edition, instance.name, instance.patchVersion, instance.majorVersion.String()).Set(1)
}
return nil return nil
} }
func (c *Collector) collectInstance(ch chan<- prometheus.Metric) error { func (c *Collector) collectInstance(ch chan<- prometheus.Metric) error {
c.instances.Collect(ch) for _, instance := range c.mssqlInstances {
regKeyName := fmt.Sprintf(`Software\Microsoft\Microsoft SQL Server\%s\Setup`, instance.instanceName)
regKey, err := registry.OpenKey(registry.LOCAL_MACHINE, regKeyName, registry.QUERY_VALUE)
if err != nil {
c.logger.Debug(fmt.Sprintf("couldn't open registry %s:", regKeyName),
slog.Any("err", err),
)
continue
}
patchVersion, _, err := regKey.GetStringValue("PatchLevel")
_ = regKey.Close()
if err != nil {
c.logger.Debug("couldn't get version from registry",
slog.Any("err", err),
)
continue
}
ch <- prometheus.MustNewConstMetric(
c.instances,
prometheus.GaugeValue,
1,
instance.edition,
instance.name,
patchVersion,
instance.majorVersion.String(),
)
}
return nil return nil
} }

View File

@@ -61,7 +61,7 @@ func (c *Collector) buildLocks() error {
errs := make([]error, 0, len(c.mssqlInstances)) errs := make([]error, 0, len(c.mssqlInstances))
for _, sqlInstance := range c.mssqlInstances { for _, sqlInstance := range c.mssqlInstances {
c.locksPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesLocks](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Locks"), pdh.InstancesAll) c.locksPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesLocks](c.logger, pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Locks"), pdh.InstancesAll)
if err != nil { if err != nil {
errs = append(errs, fmt.Errorf("failed to create Locks collector for instance %s: %w", sqlInstance.name, err)) errs = append(errs, fmt.Errorf("failed to create Locks collector for instance %s: %w", sqlInstance.name, err))
} }

View File

@@ -82,7 +82,7 @@ func (c *Collector) buildMemoryManager() error {
errs := make([]error, 0, len(c.mssqlInstances)) errs := make([]error, 0, len(c.mssqlInstances))
for _, sqlInstance := range c.mssqlInstances { for _, sqlInstance := range c.mssqlInstances {
c.memMgrPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesMemMgr](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Memory Manager"), pdh.InstancesAll) c.memMgrPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesMemMgr](c.logger, pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "Memory Manager"), pdh.InstancesAll)
if err != nil { if err != nil {
errs = append(errs, fmt.Errorf("failed to create Memory Manager collector for instance %s: %w", sqlInstance.name, err)) errs = append(errs, fmt.Errorf("failed to create Memory Manager collector for instance %s: %w", sqlInstance.name, err))
} }

View File

@@ -47,7 +47,7 @@ func (c *Collector) buildSQLErrors() error {
errs := make([]error, 0, len(c.mssqlInstances)) errs := make([]error, 0, len(c.mssqlInstances))
for _, sqlInstance := range c.mssqlInstances { for _, sqlInstance := range c.mssqlInstances {
c.sqlErrorsPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesSqlErrors](pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "SQL Errors"), pdh.InstancesAll) c.sqlErrorsPerfDataCollectors[sqlInstance], err = pdh.NewCollector[perfDataCounterValuesSqlErrors](c.logger, pdh.CounterTypeRaw, c.mssqlGetPerfObjectName(sqlInstance, "SQL Errors"), pdh.InstancesAll)
if err != nil { if err != nil {
errs = append(errs, fmt.Errorf("failed to create SQL Errors collector for instance %s: %w", sqlInstance.name, err)) errs = append(errs, fmt.Errorf("failed to create SQL Errors collector for instance %s: %w", sqlInstance.name, err))
} }

Some files were not shown because too many files have changed in this diff Show More