mirror of
https://github.com/prometheus-community/windows_exporter.git
synced 2026-02-08 05:56:37 +00:00
Compare commits
767 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f5a3cf7839 | ||
|
|
72995f4bb1 | ||
|
|
1db0dd909b | ||
|
|
cd23833ff2 | ||
|
|
20d7048478 | ||
|
|
a8fad055ac | ||
|
|
484385d387 | ||
|
|
bf181eee38 | ||
|
|
260e34394a | ||
|
|
d0c6d13e3b | ||
|
|
f10a06f908 | ||
|
|
7479946385 | ||
|
|
66a1e18c32 | ||
|
|
10078892c2 | ||
|
|
f6912cc645 | ||
|
|
c791e5212c | ||
|
|
09bc4e9e1b | ||
|
|
015b7c0666 | ||
|
|
5dda46de98 | ||
|
|
25c1449b91 | ||
|
|
29086368ac | ||
|
|
1f9d29470c | ||
|
|
1493a20262 | ||
|
|
f8c98e60db | ||
|
|
f32342400f | ||
|
|
9a73630b48 | ||
|
|
5da47255e2 | ||
|
|
eb102fd37f | ||
|
|
3aa409590f | ||
|
|
e8991095d4 | ||
|
|
3dbb37deb5 | ||
|
|
53b6816612 | ||
|
|
44b63dc23c | ||
|
|
8b8fd2d3a6 | ||
|
|
134f62511f | ||
|
|
09a83cf01f | ||
|
|
46ed47c585 | ||
|
|
187dbfc4ae | ||
|
|
6ddab61fa5 | ||
|
|
e4b8b5718f | ||
|
|
86f12b9e55 | ||
|
|
48cef00491 | ||
|
|
e9c594473c | ||
|
|
63fb570b23 | ||
|
|
cff611b60c | ||
|
|
9a262579ce | ||
|
|
25d23852d4 | ||
|
|
70dcd80f3d | ||
|
|
c3fa0039ba | ||
|
|
7a6e2d7456 | ||
|
|
68f7efec32 | ||
|
|
15b8276cf3 | ||
|
|
e01dd5e334 | ||
|
|
8f8369a356 | ||
|
|
347759933f | ||
|
|
eb73859393 | ||
|
|
3e2357fd68 | ||
|
|
677a7c8d67 | ||
|
|
8f05e77b0a | ||
|
|
8efca83ac4 | ||
|
|
fb38512f38 | ||
|
|
63800b5c6a | ||
|
|
c1b7ca42c5 | ||
|
|
f8abca5292 | ||
|
|
2ef7c5604a | ||
|
|
a0a81c4a9f | ||
|
|
8e27a9983f | ||
|
|
6d506887cd | ||
|
|
15be1c1bd6 | ||
|
|
7bd2ebc6d0 | ||
|
|
690fe8de86 | ||
|
|
46fa84f9b0 | ||
|
|
4b226cde40 | ||
|
|
cdeceaeca5 | ||
|
|
8061c4e5fa | ||
|
|
981d687e60 | ||
|
|
37ea988125 | ||
|
|
a722cee322 | ||
|
|
b43978eeb4 | ||
|
|
f02f51aceb | ||
|
|
a5f22ebb04 | ||
|
|
1c199e6c0e | ||
|
|
306197fe93 | ||
|
|
45fac2a618 | ||
|
|
716707cd06 | ||
|
|
4b0bcb46d0 | ||
|
|
55312ebdca | ||
|
|
3df660799c | ||
|
|
8ef590ee3a | ||
|
|
c9e28c4c00 | ||
|
|
c5ec339750 | ||
|
|
2602ca04f6 | ||
|
|
8fe8e85559 | ||
|
|
38cfae3e66 | ||
|
|
b2ed5f61b4 | ||
|
|
752d467b12 | ||
|
|
1fab2621eb | ||
|
|
739642bd6a | ||
|
|
a4cf96d94d | ||
|
|
1dd7b58bdf | ||
|
|
c696fb40cf | ||
|
|
ae4bc822e8 | ||
|
|
096949c682 | ||
|
|
da15c9659d | ||
|
|
a12b3e0ea0 | ||
|
|
7e7bdc104f | ||
|
|
682e6967b0 | ||
|
|
93dcdf95f7 | ||
|
|
3619d14844 | ||
|
|
9712fa3bad | ||
|
|
fe7730a51b | ||
|
|
61ea9d049c | ||
|
|
a50fe95370 | ||
|
|
33615c8b58 | ||
|
|
313ffb73bd | ||
|
|
48e54e8513 | ||
|
|
bf5177ed12 | ||
|
|
00f79ebaf4 | ||
|
|
3d50cf4309 | ||
|
|
c7cbc48afc | ||
|
|
0f304413b5 | ||
|
|
b6f12aeb9f | ||
|
|
af523f13bc | ||
|
|
8c7dd7fd5f | ||
|
|
6a186f26f0 | ||
|
|
740e277cf6 | ||
|
|
96e081c7d4 | ||
|
|
3cf0fa347f | ||
|
|
ebabddf558 | ||
|
|
4e76e6938a | ||
|
|
5f9759586e | ||
|
|
7f69cc0acf | ||
|
|
820c6bdb93 | ||
|
|
6042ea3e51 | ||
|
|
641f3222c7 | ||
|
|
9241df5528 | ||
|
|
c443eafe2b | ||
|
|
0cd6fd3f3c | ||
|
|
79a3a1ccec | ||
|
|
603cb54e8f | ||
|
|
329a96de44 | ||
|
|
fd36eae54d | ||
|
|
0dc32bf434 | ||
|
|
306e63a240 | ||
|
|
42c05df272 | ||
|
|
f471cc0e4e | ||
|
|
3c4ae95a8d | ||
|
|
21f4757b63 | ||
|
|
4aba3e1222 | ||
|
|
8f6204f960 | ||
|
|
dd494b11bb | ||
|
|
9f384e3db1 | ||
|
|
68c338b479 | ||
|
|
a01f72a8b0 | ||
|
|
09ec6e68ad | ||
|
|
5dc10096f9 | ||
|
|
1aa00ebca8 | ||
|
|
fb11263c3e | ||
|
|
9df59c75cd | ||
|
|
e112446ce9 | ||
|
|
4f82e02d8d | ||
|
|
c3ed036402 | ||
|
|
393546fe01 | ||
|
|
ca645edde1 | ||
|
|
69d4043ce4 | ||
|
|
fd5135f5ff | ||
|
|
8f08f55cf2 | ||
|
|
92d53d07b2 | ||
|
|
02e2e257c9 | ||
|
|
791df8009c | ||
|
|
aaf202236d | ||
|
|
e9ba751c82 | ||
|
|
ed83cb1c3e | ||
|
|
ac447e5b15 | ||
|
|
4d2a247e50 | ||
|
|
54f86001ad | ||
|
|
bccb3b3296 | ||
|
|
c6285cdf9d | ||
|
|
cde750f76d | ||
|
|
dedd60f02b | ||
|
|
d0c9fc6dbb | ||
|
|
f728224ef2 | ||
|
|
fba7682f01 | ||
|
|
79ecaf6a99 | ||
|
|
6941ee7ab2 | ||
|
|
5a9711dc90 | ||
|
|
5106b829c3 | ||
|
|
e1796c3d42 | ||
|
|
14d3e4ea28 | ||
|
|
36f033ae57 | ||
|
|
0de2fc4af7 | ||
|
|
b450a50103 | ||
|
|
e07b2053af | ||
|
|
7d3c0d3b76 | ||
|
|
27b2ca0b76 | ||
|
|
803a0a9a70 | ||
|
|
4891acba2d | ||
|
|
fa51270218 | ||
|
|
a68e6af15a | ||
|
|
7ad9b6d74a | ||
|
|
9acd5e695e | ||
|
|
d4be3b9f31 | ||
|
|
277f141587 | ||
|
|
2a5c51a236 | ||
|
|
ce205d4c4d | ||
|
|
2ed0ae837c | ||
|
|
57c7911c91 | ||
|
|
a56ec9166b | ||
|
|
e03432a22d | ||
|
|
be004b8423 | ||
|
|
e08a0411d6 | ||
|
|
3d7894049f | ||
|
|
de664d4b93 | ||
|
|
78e026b6ee | ||
|
|
9eba8dd024 | ||
|
|
01100d3e6e | ||
|
|
0f1eb4a936 | ||
|
|
a8eefae123 | ||
|
|
746158d354 | ||
|
|
d9f4264fc4 | ||
|
|
a89b53779d | ||
|
|
27ceeecff3 | ||
|
|
1ba5835af6 | ||
|
|
0db956aa4d | ||
|
|
9d1628a329 | ||
|
|
fc33fa320b | ||
|
|
b6f88cbbdd | ||
|
|
4b9b9e97cb | ||
|
|
3ebe0e937e | ||
|
|
4d771d2bce | ||
|
|
919f90a571 | ||
|
|
c7d07a37ea | ||
|
|
87c21bfa50 | ||
|
|
df4f6b206b | ||
|
|
9e3c585a28 | ||
|
|
e4a43c539b | ||
|
|
03e15a0f80 | ||
|
|
b98a956d51 | ||
|
|
524bfde5a3 | ||
|
|
963cee0a13 | ||
|
|
45e9357ad9 | ||
|
|
6120ea9be1 | ||
|
|
376060b053 | ||
|
|
e04c4aab29 | ||
|
|
479e6b1381 | ||
|
|
f6f7dc96e9 | ||
|
|
f84f54afda | ||
|
|
e22ef6e3cc | ||
|
|
02b69afe8b | ||
|
|
b7a0a09e58 | ||
|
|
6105792f29 | ||
|
|
1fbc626ee2 | ||
|
|
ca07abc1cd | ||
|
|
60583c3366 | ||
|
|
a7dcf5896c | ||
|
|
438cb87fc7 | ||
|
|
f8b6260ab5 | ||
|
|
d2b3f0f94b | ||
|
|
d6b4466bc3 | ||
|
|
ce3d517cb3 | ||
|
|
a6ea021468 | ||
|
|
b58dfdf4f3 | ||
|
|
676eb55f99 | ||
|
|
121d9980c1 | ||
|
|
947d8473e0 | ||
|
|
c1569686f7 | ||
|
|
75966fd37c | ||
|
|
d0cfc14af9 | ||
|
|
941b66d342 | ||
|
|
388195be97 | ||
|
|
bbefd8ac97 | ||
|
|
5b92e1bd3d | ||
|
|
82f17fd607 | ||
|
|
3e37b7b6f0 | ||
|
|
5d29ff6497 | ||
|
|
f4f5aaf146 | ||
|
|
5931604b58 | ||
|
|
67ca5e5ef2 | ||
|
|
384183120f | ||
|
|
a9ac2d4672 | ||
|
|
1b96bb6d08 | ||
|
|
cc45eeb90b | ||
|
|
4b2cd0a024 | ||
|
|
ad447a6b08 | ||
|
|
e4d7604193 | ||
|
|
757f88be04 | ||
|
|
cff484b5e1 | ||
|
|
2dc568b5cd | ||
|
|
448f505729 | ||
|
|
6d1ba11a8e | ||
|
|
0f5a232142 | ||
|
|
bbab591570 | ||
|
|
2bc3c1859a | ||
|
|
7c61a4dc25 | ||
|
|
5a57da53be | ||
|
|
72c46664db | ||
|
|
8689c41c68 | ||
|
|
74eac8f29b | ||
|
|
bb48f1caac | ||
|
|
068d03bd01 | ||
|
|
5072879dca | ||
|
|
0fb7eec670 | ||
|
|
4293497b29 | ||
|
|
95f10f19cb | ||
|
|
288f2a60e7 | ||
|
|
2e32b0e2b1 | ||
|
|
09759a4e8c | ||
|
|
dfd42a6c0c | ||
|
|
576c3bf918 | ||
|
|
19fee044bf | ||
|
|
45a74fdb7f | ||
|
|
db00553ca6 | ||
|
|
a2c4bf6a2d | ||
|
|
7adcac8f39 | ||
|
|
863b7d8ab4 | ||
|
|
33c6b2c6a5 | ||
|
|
6dee2422e1 | ||
|
|
5d224b43ca | ||
|
|
3f2a143104 | ||
|
|
ee3848141c | ||
|
|
df2a7a9ec0 | ||
|
|
05f0f6f688 | ||
|
|
d947d0f6db | ||
|
|
d063bc0842 | ||
|
|
dd473c4807 | ||
|
|
7bd58abd27 | ||
|
|
6f941044c7 | ||
|
|
3da11645cf | ||
|
|
048bff919e | ||
|
|
f76334213d | ||
|
|
71054ac429 | ||
|
|
248b7214e3 | ||
|
|
094558b1f1 | ||
|
|
18495abb69 | ||
|
|
f316d81d50 | ||
|
|
cc709ac380 | ||
|
|
2262b88fac | ||
|
|
795cc5ca85 | ||
|
|
ce0513f69d | ||
|
|
ee146b3710 | ||
|
|
a9752ebc1e | ||
|
|
d54aa033b1 | ||
|
|
3682c1b9af | ||
|
|
5af2a781cb | ||
|
|
7086e0f627 | ||
|
|
682378e170 | ||
|
|
c1fff498c6 | ||
|
|
648b6e0ab5 | ||
|
|
e9abe4d5f5 | ||
|
|
0af38ddbcf | ||
|
|
b615301efc | ||
|
|
25eb64bb3d | ||
|
|
19fbd57f60 | ||
|
|
b9b60f1ea0 | ||
|
|
eaa003f5af | ||
|
|
c5a545540d | ||
|
|
054cf5c5f5 | ||
|
|
cdc81b03d5 | ||
|
|
e141e531ed | ||
|
|
b44d855fe0 | ||
|
|
4b66473d2e | ||
|
|
556138189a | ||
|
|
7456afecae | ||
|
|
8407f4aeb8 | ||
|
|
6b8e9bee3f | ||
|
|
eb15f8ee80 | ||
|
|
2c7bea1892 | ||
|
|
59ba77b87f | ||
|
|
9723aa2218 | ||
|
|
9d03debcb6 | ||
|
|
2837bdfb50 | ||
|
|
a1a986f4d0 | ||
|
|
769b15eb86 | ||
|
|
b5ce53fdac | ||
|
|
ccac306c2d | ||
|
|
df0618e64d | ||
|
|
433e00a20b | ||
|
|
e8ffeaa0d7 | ||
|
|
c93b709f96 | ||
|
|
b300998b4b | ||
|
|
6e0ac6a1fc | ||
|
|
ec6b7210e3 | ||
|
|
704f6e2fe4 | ||
|
|
7a16d111b0 | ||
|
|
82471f39cd | ||
|
|
23dafc93ed | ||
|
|
cdbb27d0b4 | ||
|
|
2fbd0464dc | ||
|
|
f616589c5f | ||
|
|
f623c0ed89 | ||
|
|
ce5c6eed72 | ||
|
|
d7122930d0 | ||
|
|
96aa2cf095 | ||
|
|
6231eb43e8 | ||
|
|
0880ec6a1a | ||
|
|
8f85475725 | ||
|
|
a4aef9b3c7 | ||
|
|
637fc246af | ||
|
|
6b141a128c | ||
|
|
e97a04ed65 | ||
|
|
cdfe3cf258 | ||
|
|
24fe6813b2 | ||
|
|
7eab1fc411 | ||
|
|
78918f7034 | ||
|
|
59e72c7016 | ||
|
|
49c082d594 | ||
|
|
b7b19aafa0 | ||
|
|
3624ea3bba | ||
|
|
898c17e657 | ||
|
|
f9790f03fb | ||
|
|
3708c85611 | ||
|
|
7a5dc3c6f5 | ||
|
|
8f2f9d83f9 | ||
|
|
c5ea575fb1 | ||
|
|
be39c1126a | ||
|
|
6765935d17 | ||
|
|
51dd61beeb | ||
|
|
a30422c31c | ||
|
|
332a903757 | ||
|
|
6e518f21bb | ||
|
|
3bf94cdaf6 | ||
|
|
94bda6aa79 | ||
|
|
380eff24c9 | ||
|
|
2ebea42de5 | ||
|
|
d39d5230ab | ||
|
|
922c08b85b | ||
|
|
a3867b8dbf | ||
|
|
3b2ef6287c | ||
|
|
8d0d7b31b1 | ||
|
|
24470eb17e | ||
|
|
6bcaee5885 | ||
|
|
ea557547dd | ||
|
|
3c90b96cc6 | ||
|
|
a03f9ef01b | ||
|
|
391335a91f | ||
|
|
6367863c43 | ||
|
|
5c7c0aaa69 | ||
|
|
51f1f884e7 | ||
|
|
7d2511b7ab | ||
|
|
f1384759cb | ||
|
|
3316dc502d | ||
|
|
c9f1e5068a | ||
|
|
86dc495aeb | ||
|
|
418b5b3ca9 | ||
|
|
f2d8418e9f | ||
|
|
0fbdfae85c | ||
|
|
501ac3da4c | ||
|
|
c62fe4477f | ||
|
|
eff5f24153 | ||
|
|
e24f2d2d06 | ||
|
|
dd6bfbe963 | ||
|
|
a4f815b5fd | ||
|
|
44d419e8fa | ||
|
|
db19d46eb1 | ||
|
|
cd8d676443 | ||
|
|
a1db538118 | ||
|
|
c1a137f7a8 | ||
|
|
22672fc15f | ||
|
|
10caabe0b1 | ||
|
|
6efa6bed42 | ||
|
|
eaa23705d0 | ||
|
|
6f90d83b83 | ||
|
|
f8f34ab983 | ||
|
|
5052f4d00e | ||
|
|
3c98edaa2c | ||
|
|
2ea20ff628 | ||
|
|
245b2b4d28 | ||
|
|
7742ff6a1c | ||
|
|
99ed969bf7 | ||
|
|
54d94c261b | ||
|
|
05d1e4bde9 | ||
|
|
17324b9fc6 | ||
|
|
7890c9ce91 | ||
|
|
bcb6f2b218 | ||
|
|
31605b5096 | ||
|
|
91a64fecb8 | ||
|
|
9148728b87 | ||
|
|
2290969596 | ||
|
|
1d7747b4d1 | ||
|
|
cba42d24c1 | ||
|
|
58d259a2b6 | ||
|
|
4f89133893 | ||
|
|
af250824f7 | ||
|
|
7f57491fac | ||
|
|
890fdc2996 | ||
|
|
d1a807840c | ||
|
|
74d7332b47 | ||
|
|
22d4f50c83 | ||
|
|
df954ddf9d | ||
|
|
34996b206a | ||
|
|
6dad58fc8f | ||
|
|
8231bc4395 | ||
|
|
baba51bc6a | ||
|
|
b64ccbe683 | ||
|
|
21a02c4fbe | ||
|
|
089bc3b2d4 | ||
|
|
285a165eba | ||
|
|
90b197450e | ||
|
|
0865061210 | ||
|
|
2e50f515d8 | ||
|
|
8be7dc7e83 | ||
|
|
0d4f747f8f | ||
|
|
de285e1043 | ||
|
|
7fde426e88 | ||
|
|
fa12d1476f | ||
|
|
92d0a1d8f0 | ||
|
|
2f46a088de | ||
|
|
1cc4df2bd7 | ||
|
|
feb2b18e6a | ||
|
|
012b938b54 | ||
|
|
a0e5baa171 | ||
|
|
7611e33bc7 | ||
|
|
2aafa9ebf3 | ||
|
|
f9f27b0b97 | ||
|
|
18128f48f5 | ||
|
|
2688847c2e | ||
|
|
1c605adb5e | ||
|
|
d0877d0dc0 | ||
|
|
2cd630fb2f | ||
|
|
b210986181 | ||
|
|
375a74f1e8 | ||
|
|
abd5a53045 | ||
|
|
aa394d1d8e | ||
|
|
bdcc7b0913 | ||
|
|
d7a908e6c0 | ||
|
|
c23a98ae90 | ||
|
|
f8a7c99092 | ||
|
|
29b020999d | ||
|
|
2f0a57898f | ||
|
|
1ad20d6eb8 | ||
|
|
de000b74c8 | ||
|
|
d860d92dc8 | ||
|
|
3a19fe4e7d | ||
|
|
26a468f17a | ||
|
|
a6f3b33928 | ||
|
|
8ef215cc7e | ||
|
|
2c155a12bd | ||
|
|
e1141c3ec0 | ||
|
|
b635ecc6c1 | ||
|
|
a7b5cf7aa6 | ||
|
|
719ccd4f7f | ||
|
|
7ab8c7dde4 | ||
|
|
eb002eb667 | ||
|
|
a1638cdf4c | ||
|
|
091406877a | ||
|
|
84970ac086 | ||
|
|
d86f318010 | ||
|
|
853d615673 | ||
|
|
cd9a740e2b | ||
|
|
c70e7674a5 | ||
|
|
d3e3835c29 | ||
|
|
592c8a8d69 | ||
|
|
6f6a479535 | ||
|
|
d01c66986c | ||
|
|
823ffb7597 | ||
|
|
a90f9cda0f | ||
|
|
31d4c28124 | ||
|
|
e880889f07 | ||
|
|
a283608812 | ||
|
|
8251ddd176 | ||
|
|
4f0a3a89ab | ||
|
|
27cc1072fe | ||
|
|
eb9cf56dee | ||
|
|
3c20887433 | ||
|
|
37d1c4e958 | ||
|
|
33b6e17b2d | ||
|
|
1a9d4afdd6 | ||
|
|
9e198c55a4 | ||
|
|
b309a05bde | ||
|
|
123a055242 | ||
|
|
9308108284 | ||
|
|
0ecf3cd792 | ||
|
|
801444b35b | ||
|
|
f4ab322e5b | ||
|
|
72de199528 | ||
|
|
304972580d | ||
|
|
6322bb124f | ||
|
|
cb6a91b705 | ||
|
|
4d9fb1be72 | ||
|
|
27e26037e3 | ||
|
|
e09497116f | ||
|
|
3099e10555 | ||
|
|
3900504504 | ||
|
|
2c5e30d920 | ||
|
|
b348c245e8 | ||
|
|
578bcc4959 | ||
|
|
31a30474f1 | ||
|
|
ce1005add8 | ||
|
|
6107a59306 | ||
|
|
47656b16bd | ||
|
|
8fc47669be | ||
|
|
1a67ca54b6 | ||
|
|
c73f52338d | ||
|
|
c5f23b4e64 | ||
|
|
411954cf9d | ||
|
|
56be7c63d5 | ||
|
|
6ffe504f7e | ||
|
|
daa6f3d111 | ||
|
|
85fdfb44b8 | ||
|
|
33879449a2 | ||
|
|
462a136673 | ||
|
|
d5e39892cf | ||
|
|
ec0d863c29 | ||
|
|
afc3655a41 | ||
|
|
e25e96a62e | ||
|
|
23d92cfcae | ||
|
|
1258703f23 | ||
|
|
8841091f9c | ||
|
|
517cd3b04b | ||
|
|
9daa8c8775 | ||
|
|
e04d3f414d | ||
|
|
4c69ed1610 | ||
|
|
a171401f57 | ||
|
|
e24e0dc9f5 | ||
|
|
0eab86c731 | ||
|
|
13c68634ce | ||
|
|
73ad1ba960 | ||
|
|
0121fd6471 | ||
|
|
93904954f4 | ||
|
|
f2462b26c8 | ||
|
|
7e05621b26 | ||
|
|
76ddad34b8 | ||
|
|
2053dea3ac | ||
|
|
35b81dcdd0 | ||
|
|
39b0000514 | ||
|
|
76ec763c42 | ||
|
|
7ccc47cc51 | ||
|
|
ad29ac0792 | ||
|
|
d58ce114d9 | ||
|
|
5f9dfcc378 | ||
|
|
f4e5bc3d29 | ||
|
|
f4362c5987 | ||
|
|
f691b48304 | ||
|
|
d12d31a17f | ||
|
|
48d23cfb12 | ||
|
|
17039b8206 | ||
|
|
2993552e19 | ||
|
|
5d4cafc0a1 | ||
|
|
a70c57ffd1 | ||
|
|
b2cb04834a | ||
|
|
f27fdbbbf5 | ||
|
|
7dda8eba03 | ||
|
|
080f80eb26 | ||
|
|
2766f0e3af | ||
|
|
939f4832ee | ||
|
|
9e1d4bbaed | ||
|
|
8ef341a51c | ||
|
|
1fde8bae5b | ||
|
|
48220d825e | ||
|
|
9ed68ae86c | ||
|
|
700bbb37c5 | ||
|
|
cb9da1ae22 | ||
|
|
7de316af9f | ||
|
|
263ab8c444 | ||
|
|
57449c4768 | ||
|
|
fe7e5cb4d8 | ||
|
|
c156f2bcbe | ||
|
|
832771b4a2 | ||
|
|
16fecfbc67 | ||
|
|
bad1e7f7b0 | ||
|
|
c868c00e89 | ||
|
|
5035e97369 | ||
|
|
144715e3d2 | ||
|
|
a20cf1274a | ||
|
|
626a25cd00 | ||
|
|
96dd456bb1 | ||
|
|
af1b8bf4d0 | ||
|
|
d83615a818 | ||
|
|
fe4c61a70e | ||
|
|
143705bbf6 | ||
|
|
e8ffb736d0 | ||
|
|
21e0f926a3 | ||
|
|
8ea862a3da | ||
|
|
bb67658853 | ||
|
|
aecd90dcf1 | ||
|
|
cd365c6a3b | ||
|
|
667d06116d | ||
|
|
f3072bb4f3 | ||
|
|
17072bf257 | ||
|
|
d3d8537201 | ||
|
|
2951a9ef80 | ||
|
|
3141fc3ed3 | ||
|
|
a0333ee256 | ||
|
|
c9fc76de4c | ||
|
|
3752a547d5 | ||
|
|
0ab6c191be | ||
|
|
467e83722a | ||
|
|
7fe8ca8554 | ||
|
|
4b3d1d60d9 | ||
|
|
1358123482 | ||
|
|
ec79488478 | ||
|
|
c3b227a4f2 | ||
|
|
c241513d56 | ||
|
|
5a538d7682 | ||
|
|
a0ec1e2da6 | ||
|
|
353de09798 | ||
|
|
de4838454a | ||
|
|
86d2b8bdc3 | ||
|
|
041ff0351d | ||
|
|
076af99418 | ||
|
|
76bb06b32f | ||
|
|
be5ac7b440 | ||
|
|
63e51a554b | ||
|
|
cf792394f3 | ||
|
|
5db7c0a936 | ||
|
|
2461407277 | ||
|
|
33c5e99e0f | ||
|
|
5ecdfe9498 | ||
|
|
caa46799f8 | ||
|
|
aee1e4b1fd | ||
|
|
69c83b6a39 | ||
|
|
cede267565 | ||
|
|
191debeed6 | ||
|
|
df0db7a54f | ||
|
|
bda7dd18cf | ||
|
|
617d795383 | ||
|
|
b9b8cfd1ca | ||
|
|
6b98771187 | ||
|
|
afa17b2a1b | ||
|
|
3b88460eb5 | ||
|
|
a52df7696a | ||
|
|
105a1c866b | ||
|
|
69c1d0faad | ||
|
|
809fe9becf | ||
|
|
7b6974e595 | ||
|
|
caf8742dcd | ||
|
|
94caf8ee61 | ||
|
|
2f7a372429 | ||
|
|
e547c13716 | ||
|
|
44c39405c7 | ||
|
|
96faedf481 | ||
|
|
3cfc11c6d2 | ||
|
|
80f1cf0546 | ||
|
|
691b672a1e | ||
|
|
7206f020cb | ||
|
|
2e0842573d | ||
|
|
7537c9896e | ||
|
|
88271ddf14 | ||
|
|
b0f1e1843b | ||
|
|
00f57c183d | ||
|
|
dc76c4227d | ||
|
|
f4195aa435 | ||
|
|
1ebee26c30 | ||
|
|
1db60e22b9 | ||
|
|
e8cfeef26c | ||
|
|
0c4c15c4ce | ||
|
|
175c54acf1 | ||
|
|
22a9c96ffd | ||
|
|
95b04ec0a1 | ||
|
|
2c84c5ad8a | ||
|
|
d163a30e15 | ||
|
|
ec6b786608 | ||
|
|
781ab7ca20 | ||
|
|
6bb522b6c3 | ||
|
|
9d515255a6 | ||
|
|
012e143601 | ||
|
|
880ad4d07c | ||
|
|
b6d5367093 | ||
|
|
4895e707ef | ||
|
|
0616dd6690 | ||
|
|
53048a42fd | ||
|
|
a41b9de37c | ||
|
|
745f0a6f61 | ||
|
|
8ca60af43a | ||
|
|
a66f0b5475 | ||
|
|
25b8dba6df | ||
|
|
f5365c96f6 |
1
.github/CODEOWNERS
vendored
Normal file
1
.github/CODEOWNERS
vendored
Normal file
@@ -0,0 +1 @@
|
||||
* @prometheus-community/windows_exporter-reviewers
|
||||
6
.github/dependabot.yml
vendored
Normal file
6
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
105
.github/workflows/lint.yml
vendored
Normal file
105
.github/workflows/lint.yml
vendored
Normal file
@@ -0,0 +1,105 @@
|
||||
name: Linting
|
||||
|
||||
# Trigger on pull requests and pushes to master branch where Go-related files
|
||||
# have been changed.
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- "go.mod"
|
||||
- "go.sum"
|
||||
- "**.go"
|
||||
- ".github/workflows/lint.yml"
|
||||
- "tools/e2e-output.txt"
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
paths:
|
||||
- "go.mod"
|
||||
- "go.sum"
|
||||
- "**.go"
|
||||
- ".github/workflows/lint.yml"
|
||||
- "tools/e2e-output.txt"
|
||||
branches:
|
||||
- master
|
||||
|
||||
env:
|
||||
PROMU_VER: '0.13.0'
|
||||
PROMTOOL_VER: '2.32.1'
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: windows-2019
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '^1.17.5'
|
||||
|
||||
- name: Test
|
||||
run: make test
|
||||
|
||||
- name: Install e2e deps
|
||||
run: |
|
||||
Invoke-WebRequest -Uri https://github.com/prometheus/promu/releases/download/v$($Env:PROMU_VER)/promu-$($Env:PROMU_VER).windows-amd64.zip -OutFile promu-$($Env:PROMU_VER).windows-amd64.zip
|
||||
Expand-Archive -Path promu-$($Env:PROMU_VER).windows-amd64.zip -DestinationPath .
|
||||
Copy-Item -Path promu-$($Env:PROMU_VER).windows-amd64\promu.exe -Destination "$(go env GOPATH)\bin"
|
||||
|
||||
go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@v1.2.0
|
||||
# GOPATH\bin dir must be appended to PATH else the `promu` command won't be found
|
||||
echo "$(go env GOPATH)\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
|
||||
- name: e2e Test
|
||||
run: make e2e-test
|
||||
|
||||
promtool:
|
||||
runs-on: windows-2019
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '^1.17.5'
|
||||
|
||||
- name: Install promtool
|
||||
run: |
|
||||
Invoke-WebRequest -Uri https://github.com/prometheus/prometheus/releases/download/v$($Env:PROMTOOL_VER)/prometheus-$($Env:PROMTOOL_VER).windows-amd64.zip -OutFile prometheus-$($Env:PROMTOOL_VER).windows-amd64.zip
|
||||
Expand-Archive -Path prometheus-$($Env:PROMTOOL_VER).windows-amd64.zip -DestinationPath .
|
||||
Copy-Item -Path prometheus-$($Env:PROMTOOL_VER).windows-amd64\promtool.exe -Destination "$(go env GOPATH)\bin"
|
||||
|
||||
Invoke-WebRequest -Uri https://github.com/prometheus/promu/releases/download/v$($Env:PROMU_VER)/promu-$($Env:PROMU_VER).windows-amd64.zip -OutFile promu-$($Env:PROMU_VER).windows-amd64.zip
|
||||
Expand-Archive -Path promu-$($Env:PROMU_VER).windows-amd64.zip -DestinationPath .
|
||||
Copy-Item -Path promu-$($Env:PROMU_VER).windows-amd64\promu.exe -Destination "$(go env GOPATH)\bin"
|
||||
|
||||
# No binaries available so build from source
|
||||
go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@v1.2.0
|
||||
# GOPATH\bin dir must be appended to PATH else the `promu` command won't be found
|
||||
echo "$(go env GOPATH)\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
|
||||
- name: Promtool
|
||||
run: make promtool
|
||||
|
||||
lint:
|
||||
runs-on: windows-2022
|
||||
steps:
|
||||
# `gofmt` linter run by golangci-lint fails on CRLF line endings (the default for Windows)
|
||||
- name: Set git to use LF
|
||||
run: |
|
||||
git config --global core.autocrlf false
|
||||
git config --global core.eol lf
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '^1.17.5'
|
||||
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
version: v1.45.2
|
||||
args: "--timeout=5m"
|
||||
|
||||
# golangci-lint action doesn't always provide helpful output, so re-run without the action for
|
||||
# better output of the problem.
|
||||
# The cache from the golangci-lint step is re-used here, so this step should finish quickly.
|
||||
- name: errors
|
||||
if: ${{ failure() }}
|
||||
run: golangci-lint run --timeout=5m -c .golangci.yaml
|
||||
106
.github/workflows/release.yml
vendored
Normal file
106
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,106 @@
|
||||
name: Releases
|
||||
|
||||
# Trigger on releases.
|
||||
on:
|
||||
release:
|
||||
types:
|
||||
- published
|
||||
- edited
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
packages: write
|
||||
|
||||
env:
|
||||
PROMU_VER: '0.13.0'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: windows-2022
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
# fetch-depth required for gitversion in `Build` step
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '^1.17.5'
|
||||
|
||||
- name: Install Build deps
|
||||
run: |
|
||||
dotnet tool install --global GitVersion.Tool --version 5.*
|
||||
Invoke-WebRequest -Uri https://github.com/prometheus/promu/releases/download/v$($Env:PROMU_VER)/promu-$($Env:PROMU_VER).windows-amd64.zip -OutFile promu-$($Env:PROMU_VER).windows-amd64.zip
|
||||
Expand-Archive -Path promu-$($Env:PROMU_VER).windows-amd64.zip -DestinationPath .
|
||||
Copy-Item -Path promu-$($Env:PROMU_VER).windows-amd64\promu.exe -Destination "$(go env GOPATH)\bin"
|
||||
|
||||
# No binaries available so build from source
|
||||
go install github.com/josephspurrier/goversioninfo/cmd/goversioninfo@v1.4.0
|
||||
# GOPATH\bin dir must be added to PATH else the `promu` and `goversioninfo` commands won't be found
|
||||
echo "$(go env GOPATH)\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
$ErrorActionPreference = "Stop"
|
||||
|
||||
dotnet-gitversion /output json /showvariable FullSemVer | Set-Content VERSION -PassThru
|
||||
$Version = Get-Content VERSION
|
||||
# Windows versioninfo resources need the file version by parts (but product version is free text)
|
||||
$VersionParts = ($Version -replace '^v?([0-9\.]+).*$','$1').Split(".")
|
||||
goversioninfo.exe -ver-major $VersionParts[0] -ver-minor $VersionParts[1] -ver-patch $VersionParts[2] -product-version $Version -platform-specific
|
||||
|
||||
make crossbuild
|
||||
# '+' symbols are invalid characters in image tags
|
||||
(Get-Content -Path VERSION) -replace '\+', '_' | Set-Content -Path VERSION
|
||||
make build-all
|
||||
# GH requires all files to have different names, so add version/arch to differentiate
|
||||
foreach($Arch in "amd64", "arm64","386") {
|
||||
Move-Item output\$Arch\windows_exporter.exe output\windows_exporter-$Version-$Arch.exe
|
||||
}
|
||||
|
||||
- name: Upload Artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: windows_exporter_binaries
|
||||
path: output\windows_exporter-*.exe
|
||||
|
||||
- name: Build Release Artifacts
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
run: |
|
||||
$ErrorActionPreference = "Stop"
|
||||
$BuildVersion = Get-Content VERSION
|
||||
$TagName = $env:GITHUB_REF -replace 'refs/tags/', ''
|
||||
# The MSI version is not semver compliant, so just take the numerical parts
|
||||
$MSIVersion = $TagName -replace '^v?([0-9\.]+).*$','$1'
|
||||
foreach($Arch in "amd64", "386") {
|
||||
Write-Verbose "Building windows_exporter $MSIVersion msi for $Arch"
|
||||
.\installer\build.ps1 -PathToExecutable .\output\windows_exporter-$BuildVersion-$Arch.exe -Version $MSIVersion -Arch "$Arch"
|
||||
Move-Item installer\Output\windows_exporter-$MSIVersion-$Arch.msi output\
|
||||
}
|
||||
|
||||
promu checksum output\
|
||||
|
||||
- name: Login to GitHub container registry
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Push Latest image
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
$Env:VERSION = 'latest'
|
||||
make push-all
|
||||
|
||||
- name: Release
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
$TagName = $env:GITHUB_REF -replace 'refs/tags/', ''
|
||||
Get-ChildItem -Path output\* -Include @('windows_exporter*.msi', 'windows_exporter*.exe', 'sha256sums.txt') | Foreach-Object {gh release upload $TagName $_}
|
||||
make push-all
|
||||
26
.github/workflows/spelling.yml
vendored
Normal file
26
.github/workflows/spelling.yml
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
name: Spell checking
|
||||
|
||||
# Trigger on pull requests, and pushes to master branch.
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
env:
|
||||
PROMU_VER: 'v0.13.0'
|
||||
|
||||
jobs:
|
||||
codespell:
|
||||
name: Check for spelling errors
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: codespell-project/actions-codespell@master
|
||||
with:
|
||||
check_filenames: true
|
||||
# When using this Action in other repos, the --skip option below can be removed
|
||||
skip: ./.git,go.mod,go.sum
|
||||
ignore_words_list: calle
|
||||
8
.gitignore
vendored
8
.gitignore
vendored
@@ -1,2 +1,8 @@
|
||||
*.exe
|
||||
VERSION
|
||||
VERSION
|
||||
*.swp
|
||||
*.un~
|
||||
output/
|
||||
.vscode
|
||||
.idea
|
||||
*.syso
|
||||
|
||||
25
.golangci.yaml
Normal file
25
.golangci.yaml
Normal file
@@ -0,0 +1,25 @@
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- deadcode
|
||||
- errcheck
|
||||
- revive
|
||||
- govet
|
||||
- gofmt
|
||||
- ineffassign
|
||||
- structcheck
|
||||
- unconvert
|
||||
- varcheck
|
||||
|
||||
issues:
|
||||
exclude:
|
||||
- don't use underscores in Go names
|
||||
- exported type .+ should have comment or be unexported
|
||||
exclude-rules:
|
||||
- # Golint has many capitalisation complaints on WMI class names
|
||||
text: "`?\\w+`? should be `?\\w+`?"
|
||||
linters:
|
||||
- revive
|
||||
- text: "don't use ALL_CAPS in Go names; use CamelCase"
|
||||
linters:
|
||||
- revive
|
||||
18
.promu.yml
18
.promu.yml
@@ -1,12 +1,18 @@
|
||||
repository:
|
||||
path: github.com/martinlindhe/wmi_exporter
|
||||
path: github.com/prometheus-community/windows_exporter
|
||||
build:
|
||||
binaries:
|
||||
- name: windows_exporter
|
||||
ldflags: |
|
||||
-X {{repoPath}}/vendor/github.com/prometheus/common/version.Version={{.Version}}
|
||||
-X {{repoPath}}/vendor/github.com/prometheus/common/version.Revision={{.Revision}}
|
||||
-X {{repoPath}}/vendor/github.com/prometheus/common/version.Branch={{.Branch}}
|
||||
-X {{repoPath}}/vendor/github.com/prometheus/common/version.BuildUser={{user}}@{{host}}
|
||||
-X {{repoPath}}/vendor/github.com/prometheus/common/version.BuildDate={{date "20060102-15:04:05"}}
|
||||
-X github.com/prometheus/common/version.Version={{.Version}}
|
||||
-X github.com/prometheus/common/version.Revision={{.Revision}}
|
||||
-X github.com/prometheus/common/version.Branch={{.Branch}}
|
||||
-X github.com/prometheus/common/version.BuildUser={{user}}@{{host}}
|
||||
-X github.com/prometheus/common/version.BuildDate={{date "20060102-15:04:05"}}
|
||||
tarball:
|
||||
files:
|
||||
- LICENSE
|
||||
crossbuild:
|
||||
platforms:
|
||||
- windows/amd64
|
||||
- windows/386
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
Contributors in alphabetical order
|
||||
|
||||
* [Martin Lindhe](https://github.com/martinlindhe)
|
||||
* [Calle Pettersson](https://github.com/carlpett)
|
||||
3
CODE_OF_CONDUCT.md
Normal file
3
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,3 @@
|
||||
## Prometheus Community Code of Conduct
|
||||
|
||||
Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
|
||||
9
Dockerfile
Normal file
9
Dockerfile
Normal file
@@ -0,0 +1,9 @@
|
||||
# Note this image doesn't really matter for hostprocess but it is good to build per OS version
|
||||
# the files in the image are copied to $env:CONTAINER_SANDBOX_MOUNT_POINT on the host
|
||||
# but the file system is the Host NOT the container
|
||||
ARG BASE="mcr.microsoft.com/windows/nanoserver:1809"
|
||||
FROM $BASE
|
||||
|
||||
ENV PATH="C:\Windows\system32;C:\Windows;"
|
||||
COPY output/amd64/windows_exporter.exe /windows_exporter.exe
|
||||
ENTRYPOINT ["windows_exporter.exe"]
|
||||
9
MAINTAINERS.md
Normal file
9
MAINTAINERS.md
Normal file
@@ -0,0 +1,9 @@
|
||||
Maintainers in alphabetical order
|
||||
|
||||
* [Ben Reedy](https://github.com/breed808) - breed808@breed808.com
|
||||
* [Calle Pettersson](https://github.com/carlpett) - calle@cape.nu
|
||||
|
||||
Alumni
|
||||
|
||||
* [Brian Brazil](https://github.com/brian-brazil)
|
||||
* [Martin Lindhe](https://github.com/martinlindhe)
|
||||
64
Makefile
Normal file
64
Makefile
Normal file
@@ -0,0 +1,64 @@
|
||||
export GOOS=windows
|
||||
export DOCKER_IMAGE_NAME ?= windows-exporter
|
||||
export DOCKER_REPO ?= ghcr.io/prometheus-community
|
||||
|
||||
VERSION?=$(shell cat VERSION)
|
||||
DOCKER?=docker
|
||||
|
||||
# Image Variables for Hostprocess Container
|
||||
# Windows image build is heavily influenced by https://github.com/kubernetes/kubernetes/blob/master/cluster/images/etcd/Makefile
|
||||
OS=1809
|
||||
ALL_OS:= 1809 ltsc2022
|
||||
BASE_IMAGE=mcr.microsoft.com/windows/nanoserver
|
||||
|
||||
.PHONY: build
|
||||
build: windows_exporter.exe
|
||||
windows_exporter.exe: **/*.go
|
||||
promu build -v
|
||||
|
||||
test:
|
||||
go test -v ./...
|
||||
|
||||
bench:
|
||||
go test -v -bench='benchmark(cpu|logicaldisk|logon|memory|net|process|service|system|tcp|time)collector' ./...
|
||||
|
||||
lint:
|
||||
golangci-lint -c .golangci.yaml run
|
||||
|
||||
.PHONY: e2e-test
|
||||
e2e-test: windows_exporter.exe
|
||||
pwsh -NonInteractive -ExecutionPolicy Bypass -File .\tools\end-to-end-test.ps1
|
||||
|
||||
.PHONY: promtool
|
||||
promtool: windows_exporter.exe
|
||||
pwsh -NonInteractive -ExecutionPolicy Bypass -File .\tools\promtool.ps1
|
||||
|
||||
fmt:
|
||||
gofmt -l -w -s .
|
||||
|
||||
crossbuild:
|
||||
# The prometheus/golang-builder image for promu crossbuild doesn't exist
|
||||
# on Windows, so for now, we'll just build twice
|
||||
GOARCH=amd64 promu build --prefix=output/amd64
|
||||
GOARCH=arm64 promu build --prefix=output/arm64
|
||||
GOARCH=386 promu build --prefix=output/386
|
||||
|
||||
build-image: crossbuild
|
||||
$(DOCKER) build --build-arg=BASE=$(BASE_IMAGE):$(OS) -f Dockerfile -t $(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(VERSION)-$(OS) .
|
||||
|
||||
sub-build-%:
|
||||
$(MAKE) OS=$* build-image
|
||||
|
||||
build-all: $(addprefix sub-build-,$(ALL_OS))
|
||||
|
||||
push:
|
||||
set -x; \
|
||||
for osversion in ${ALL_OS}; do \
|
||||
$(DOCKER) push $(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(VERSION)-$${osversion}; \
|
||||
$(DOCKER) manifest create --amend $(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(VERSION) $(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(VERSION)-$${osversion}; \
|
||||
full_version=`$(DOCKER) manifest inspect $(BASE_IMAGE):$${osversion} | grep "os.version" | head -n 1 | awk -F\" '{print $$4}'` || true; \
|
||||
$(DOCKER) manifest annotate --os windows --arch amd64 --os-version $${full_version} $(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(VERSION) $(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(VERSION)-$${osversion}; \
|
||||
done
|
||||
$(DOCKER) manifest push --purge $(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(VERSION)
|
||||
|
||||
push-all: build-all push
|
||||
182
README.md
182
README.md
@@ -1,60 +1,196 @@
|
||||
# WMI exporter
|
||||
# windows_exporter
|
||||
|
||||
[](https://ci.appveyor.com/project/martinlindhe/wmi-exporter)
|
||||

|
||||
|
||||
Prometheus exporter for Windows machines, using the WMI (Windows Management Instrumentation).
|
||||
A Prometheus exporter for Windows machines.
|
||||
|
||||
|
||||
## Collectors
|
||||
|
||||
Name | Description | Enabled by default
|
||||
---------|-------------|--------------------
|
||||
cpu | [Win32_PerfRawData_PerfOS_Processor](https://msdn.microsoft.com/en-us/library/aa394317(v=vs.90).aspx) metrics (cpu usage) | ✓
|
||||
cs | [Win32_ComputerSystem](https://msdn.microsoft.com/en-us/library/aa394102) metrics (system properties, num cpus/total memory) | ✓
|
||||
dns | [Win32_PerfRawData_DNS_DNS](https://technet.microsoft.com/en-us/library/cc977686.aspx) metrics (DNS Server) |
|
||||
iis | [Win32_PerfRawData_W3SVC_WebService](https://msdn.microsoft.com/en-us/library/aa394345) IIS metrics |
|
||||
logical_disk | [Win32_PerfRawData_PerfDisk_LogicalDisk](https://msdn.microsoft.com/en-us/windows/hardware/aa394307(v=vs.71)) metrics (disk I/O) | ✓
|
||||
net | [Win32_PerfRawData_Tcpip_NetworkInterface](https://technet.microsoft.com/en-us/security/aa394340(v=vs.80)) metrics (network interface I/O) | ✓
|
||||
os | [Win32_OperatingSystem](https://msdn.microsoft.com/en-us/library/aa394239) metrics (memory, processes, users) | ✓
|
||||
service | [Win32_Service](https://msdn.microsoft.com/en-us/library/aa394418(v=vs.85).aspx) metrics (service states) | ✓
|
||||
system | Win32_PerfRawData_PerfOS_System metrics (system calls) | ✓
|
||||
[ad](docs/collector.ad.md) | Active Directory Domain Services |
|
||||
[adcs](docs/collector.adcs.md) | Active Directory Certificate Services |
|
||||
[adfs](docs/collector.adfs.md) | Active Directory Federation Services |
|
||||
[cache](docs/collector.cache.md) | Cache metrics |
|
||||
[cpu](docs/collector.cpu.md) | CPU usage | ✓
|
||||
[cpu_info](docs/collector.cpu_info.md) | CPU Information |
|
||||
[cs](docs/collector.cs.md) | "Computer System" metrics (system properties, num cpus/total memory) | ✓
|
||||
[container](docs/collector.container.md) | Container metrics |
|
||||
[dfsr](docs/collector.dfsr.md) | DFSR metrics |
|
||||
[dhcp](docs/collector.dhcp.md) | DHCP Server |
|
||||
[dns](docs/collector.dns.md) | DNS Server |
|
||||
[exchange](docs/collector.exchange.md) | Exchange metrics |
|
||||
[fsrmquota](docs/collector.fsrmquota.md) | Microsoft File Server Resource Manager (FSRM) Quotas collector |
|
||||
[hyperv](docs/collector.hyperv.md) | Hyper-V hosts |
|
||||
[iis](docs/collector.iis.md) | IIS sites and applications |
|
||||
[logical_disk](docs/collector.logical_disk.md) | Logical disks, disk I/O | ✓
|
||||
[logon](docs/collector.logon.md) | User logon sessions |
|
||||
[memory](docs/collector.memory.md) | Memory usage metrics |
|
||||
[mscluster_cluster](docs/collector.mscluster_cluster.md) | MSCluster cluster metrics |
|
||||
[mscluster_network](docs/collector.mscluster_network.md) | MSCluster network metrics |
|
||||
[mscluster_node](docs/collector.mscluster_node.md) | MSCluster Node metrics |
|
||||
[mscluster_resource](docs/collector.mscluster_resource.md) | MSCluster Resource metrics |
|
||||
[mscluster_resourcegroup](docs/collector.mscluster_resourcegroup.md) | MSCluster ResourceGroup metrics |
|
||||
[msmq](docs/collector.msmq.md) | MSMQ queues |
|
||||
[mssql](docs/collector.mssql.md) | [SQL Server Performance Objects](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/use-sql-server-objects#SQLServerPOs) metrics |
|
||||
[netframework_clrexceptions](docs/collector.netframework_clrexceptions.md) | .NET Framework CLR Exceptions |
|
||||
[netframework_clrinterop](docs/collector.netframework_clrinterop.md) | .NET Framework Interop Metrics |
|
||||
[netframework_clrjit](docs/collector.netframework_clrjit.md) | .NET Framework JIT metrics |
|
||||
[netframework_clrloading](docs/collector.netframework_clrloading.md) | .NET Framework CLR Loading metrics |
|
||||
[netframework_clrlocksandthreads](docs/collector.netframework_clrlocksandthreads.md) | .NET Framework locks and metrics threads |
|
||||
[netframework_clrmemory](docs/collector.netframework_clrmemory.md) | .NET Framework Memory metrics |
|
||||
[netframework_clrremoting](docs/collector.netframework_clrremoting.md) | .NET Framework Remoting metrics |
|
||||
[netframework_clrsecurity](docs/collector.netframework_clrsecurity.md) | .NET Framework Security Check metrics |
|
||||
[net](docs/collector.net.md) | Network interface I/O | ✓
|
||||
[os](docs/collector.os.md) | OS metrics (memory, processes, users) | ✓
|
||||
[process](docs/collector.process.md) | Per-process metrics |
|
||||
[remote_fx](docs/collector.remote_fx.md) | RemoteFX protocol (RDP) metrics |
|
||||
[scheduled_task](docs/collector.scheduled_task.md) | Scheduled Tasks metrics |
|
||||
[service](docs/collector.service.md) | Service state metrics | ✓
|
||||
[smtp](docs/collector.smtp.md) | IIS SMTP Server |
|
||||
[system](docs/collector.system.md) | System calls | ✓
|
||||
[tcp](docs/collector.tcp.md) | TCP connections |
|
||||
[time](docs/collector.time.md) | Windows Time Service |
|
||||
[thermalzone](docs/collector.thermalzone.md) | Thermal information
|
||||
[terminal_services](docs/collector.terminal_services.md) | Terminal services (RDS)
|
||||
[textfile](docs/collector.textfile.md) | Read prometheus metrics from a text file | ✓
|
||||
[vmware](docs/collector.vmware.md) | Performance counters installed by the Vmware Guest agent |
|
||||
|
||||
The HELP texts shows the WMI data source, please see MSDN documentation for details.
|
||||
See the linked documentation on each collector for more information on reported metrics, configuration settings and usage examples.
|
||||
|
||||
### Filtering enabled collectors
|
||||
|
||||
The `windows_exporter` will expose all metrics from enabled collectors by default. This is the recommended way to collect metrics to avoid errors when comparing metrics of different families.
|
||||
|
||||
For advanced use the `windows_exporter` can be passed an optional list of collectors to filter metrics. The `collect[]` parameter may be used multiple times. In Prometheus configuration you can use this syntax under the [scrape config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#<scrape_config>).
|
||||
|
||||
```
|
||||
params:
|
||||
collect[]:
|
||||
- foo
|
||||
- bar
|
||||
```
|
||||
|
||||
This can be useful for having different Prometheus servers collect specific metrics from nodes.
|
||||
|
||||
## Flags
|
||||
|
||||
windows_exporter accepts flags to configure certain behaviours. The ones configuring the global behaviour of the exporter are listed below, while collector-specific ones are documented in the respective collector documentation above.
|
||||
|
||||
Flag | Description | Default value
|
||||
---------|-------------|--------------------
|
||||
`--telemetry.addr` | host:port for exporter. | `:9182`
|
||||
`--telemetry.path` | URL path for surfacing collected metrics. | `/metrics`
|
||||
`--telemetry.max-requests` | Maximum number of concurrent requests. 0 to disable. | `5`
|
||||
`--collectors.enabled` | Comma-separated list of collectors to use. Use `[defaults]` as a placeholder which gets expanded containing all the collectors enabled by default." | `[defaults]`
|
||||
`--collectors.print` | If true, print available collectors and exit. |
|
||||
`--scrape.timeout-margin` | Seconds to subtract from the timeout allowed by the client. Tune to allow for overhead or high loads. | `0.5`
|
||||
`--web.config.file` | A [web config][web_config] for setting up TLS and Auth | None
|
||||
|
||||
## Installation
|
||||
Each release provides a .msi installer. The installer will setup the WMI Exporter as a Windows service, as well as create an exception in the Windows Firewall.
|
||||
The latest release can be downloaded from the [releases page](https://github.com/prometheus-community/windows_exporter/releases).
|
||||
|
||||
Each release provides a .msi installer. The installer will setup the windows_exporter as a Windows service, as well as create an exception in the Windows Firewall.
|
||||
|
||||
If the installer is run without any parameters, the exporter will run with default settings for enabled collectors, ports, etc. The following parameters are available:
|
||||
|
||||
Name | Description
|
||||
-----|------------
|
||||
`ENABLED_COLLECTORS` | As the `-collectors.enabled` flag, provide a comma-separated list of enabled collectors
|
||||
`ENABLED_COLLECTORS` | As the `--collectors.enabled` flag, provide a comma-separated list of enabled collectors
|
||||
`LISTEN_ADDR` | The IP address to bind to. Defaults to 0.0.0.0
|
||||
`LISTEN_PORT` | The port to bind to. Defaults to 9182.
|
||||
`METRICS_PATH` | The path at which to serve metrics. Defaults to `/metrics`
|
||||
`TEXTFILE_DIR` | As the `--collector.textfile.directory` flag, provide a directory to read text files with metrics from
|
||||
`REMOTE_ADDR` | Allows setting comma separated remote IP addresses for the Windows Firewall exception (whitelist). Defaults to an empty string (any remote address).
|
||||
`EXTRA_FLAGS` | Allows passing full CLI flags. Defaults to an empty string.
|
||||
|
||||
Parameters are sent to the installer via `msiexec`. Example invocation:
|
||||
Parameters are sent to the installer via `msiexec`. Example invocations:
|
||||
|
||||
```powershell
|
||||
msiexec /i <path-to-msi-file> ENABLED_COLLECTORS=os,iis LISTEN_PORT=5000
|
||||
```
|
||||
|
||||
## Roadmap
|
||||
Example service collector with a custom query.
|
||||
```powershell
|
||||
msiexec /i <path-to-msi-file> ENABLED_COLLECTORS=os,service --% EXTRA_FLAGS="--collector.service.services-where ""Name LIKE 'sql%'"""
|
||||
```
|
||||
|
||||
See [open issues](https://github.com/martinlindhe/wmi_exporter/issues)
|
||||
On some older versions of Windows you may need to surround parameter values with double quotes to get the install command parsing properly:
|
||||
```powershell
|
||||
msiexec /i C:\Users\Administrator\Downloads\windows_exporter.msi ENABLED_COLLECTORS="ad,iis,logon,memory,process,tcp,thermalzone" TEXTFILE_DIR="C:\custom_metrics\"
|
||||
```
|
||||
|
||||
|
||||
## Kubernetes Implementation
|
||||
|
||||
See detailed steps to install on Windows Kubernetes [here](./kubernetes/kubernetes.md).
|
||||
|
||||
## Supported versions
|
||||
|
||||
windows_exporter supports Windows Server versions 2008R2 and later, and desktop Windows version 7 and later.
|
||||
|
||||
## Usage
|
||||
|
||||
go get -u github.com/kardianos/govendor
|
||||
go get -u github.com/martinlindhe/wmi_exporter
|
||||
cd $env:GOPATH/src/github.com/martinlindhe/wmi_exporter
|
||||
govendor build +local
|
||||
.\wmi_exporter.exe
|
||||
go get -u github.com/prometheus/promu
|
||||
go get -u github.com/prometheus-community/windows_exporter
|
||||
cd $env:GOPATH/src/github.com/prometheus-community/windows_exporter
|
||||
promu build -v
|
||||
.\windows_exporter.exe
|
||||
|
||||
The prometheus metrics will be exposed on [localhost:9182](http://localhost:9182)
|
||||
|
||||
## Examples
|
||||
|
||||
### Enable only service collector and specify a custom query
|
||||
|
||||
.\windows_exporter.exe --collectors.enabled "service" --collector.service.services-where "Name='windows_exporter'"
|
||||
|
||||
### Enable only process collector and specify a custom query
|
||||
|
||||
.\windows_exporter.exe --collectors.enabled "process" --collector.process.whitelist="firefox.+"
|
||||
|
||||
When there are multiple processes with the same name, WMI represents those after the first instance as `process-name#index`. So to get them all, rather than just the first one, the [regular expression](https://en.wikipedia.org/wiki/Regular_expression) must use `.+`. See [process](docs/collector.process.md) for more information.
|
||||
|
||||
### Using [defaults] with `--collectors.enabled` argument
|
||||
|
||||
Using `[defaults]` with `--collectors.enabled` argument which gets expanded with all default collectors.
|
||||
|
||||
.\windows_exporter.exe --collectors.enabled "[defaults],process,container"
|
||||
|
||||
This enables the additional process and container collectors on top of the defaults.
|
||||
|
||||
### Using a configuration file
|
||||
|
||||
YAML configuration files can be specified with the `--config.file` flag. E.G. `.\windows_exporter.exe --config.file=config.yml`
|
||||
|
||||
```yaml
|
||||
collectors:
|
||||
enabled: cpu,cs,net,service
|
||||
collector:
|
||||
service:
|
||||
services-where: "Name='windows_exporter'"
|
||||
log:
|
||||
level: warn
|
||||
```
|
||||
|
||||
An example configuration file can be found [here](docs/example_config.yml).
|
||||
|
||||
#### Configuration file notes
|
||||
|
||||
Configuration file values can be mixed with CLI flags. E.G.
|
||||
|
||||
`.\windows_exporter.exe --collectors.enabled=cpu,logon`
|
||||
|
||||
```yaml
|
||||
log:
|
||||
level: debug
|
||||
```
|
||||
|
||||
CLI flags enjoy a higher priority over values specified in the configuration file.
|
||||
|
||||
## License
|
||||
|
||||
Under [MIT](LICENSE)
|
||||
|
||||
[web_config]: https://github.com/prometheus/exporter-toolkit/blob/master/docs/web-configuration.md
|
||||
|
||||
6
SECURITY.md
Normal file
6
SECURITY.md
Normal file
@@ -0,0 +1,6 @@
|
||||
# Reporting a security issue
|
||||
|
||||
The Prometheus security policy, including how to report vulnerabilities, can be
|
||||
found here:
|
||||
|
||||
https://prometheus.io/docs/operating/security/
|
||||
52
appveyor.yml
52
appveyor.yml
@@ -1,52 +0,0 @@
|
||||
version: "{build}"
|
||||
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
matrix:
|
||||
- MSI_ARCH: amd64
|
||||
GOARCH: amd64
|
||||
- MSI_ARCH: 386
|
||||
GOARCH: 386
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\martinlindhe\wmi_exporter
|
||||
|
||||
install:
|
||||
- go version
|
||||
- set PATH=%GOPATH%\bin;c:\go\bin;%GOPATH%\bin\windows_%GOARCH%;%PATH%
|
||||
- go get -u github.com/kardianos/govendor
|
||||
- go get -u github.com/prometheus/promu
|
||||
- choco install gitversion.portable -y
|
||||
|
||||
build_script:
|
||||
- ps: gitversion /output json /showvariable FullSemVer | Set-Content VERSION -PassThru
|
||||
- govendor test -v +local
|
||||
- promu build -v .
|
||||
- ps: |
|
||||
$ErrorActionPreference = "Stop"
|
||||
if($env:APPVEYOR_REPO_TAG -eq "True") {
|
||||
# The MSI version is not semver compliant, so just take the numerical parts
|
||||
$Version = $env:APPVEYOR_REPO_TAG_NAME -replace '^v?([0-9\.]+).*$','$1'
|
||||
Write-Verbose "Setting msi version to $Version"
|
||||
.\installer\build.ps1 -PathToExecutable .\wmi_exporter.exe -Version $Version -Arch "$env:MSI_ARCH"
|
||||
Push-AppveyorArtifact installer\Output\wmi_exporter-$Version-$env:MSI_ARCH.msi -DeploymentName Installer
|
||||
}
|
||||
|
||||
after_build:
|
||||
- 7z a wmi_exporter-%MSI_ARCH%.zip wmi_exporter.exe
|
||||
|
||||
artifacts:
|
||||
- name: Executable
|
||||
path: 'wmi_exporter-*.zip'
|
||||
|
||||
deploy:
|
||||
- provider: GitHub
|
||||
description: WMI Exporter version $(appveyor_build_version)
|
||||
artifact: Executable,Installer
|
||||
auth_token:
|
||||
secure: 'CrXWeTf7qONUOEki5olFfGEUPMLDeHj61koDXV3OVEaLgtACmnVHsKUub9POflda'
|
||||
draft: false
|
||||
prerelease: false
|
||||
on:
|
||||
appveyor_repo_tag: true
|
||||
1315
collector/ad.go
Normal file
1315
collector/ad.go
Normal file
File diff suppressed because it is too large
Load Diff
9
collector/ad_test.go
Normal file
9
collector/ad_test.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkADCollector(b *testing.B) {
|
||||
benchmarkCollector(b, "ad", NewADCollector)
|
||||
}
|
||||
242
collector/adcs.go
Normal file
242
collector/adcs.go
Normal file
@@ -0,0 +1,242 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/prometheus-community/windows_exporter/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerCollector("adcs", adcsCollectorMethod, "Certification Authority")
|
||||
}
|
||||
|
||||
type adcsCollector struct {
|
||||
RequestsPerSecond *prometheus.Desc
|
||||
RequestProcessingTime *prometheus.Desc
|
||||
RetrievalsPerSecond *prometheus.Desc
|
||||
RetrievalProcessingTime *prometheus.Desc
|
||||
FailedRequestsPerSecond *prometheus.Desc
|
||||
IssuedRequestsPerSecond *prometheus.Desc
|
||||
PendingRequestsPerSecond *prometheus.Desc
|
||||
RequestCryptographicSigningTime *prometheus.Desc
|
||||
RequestPolicyModuleProcessingTime *prometheus.Desc
|
||||
ChallengeResponsesPerSecond *prometheus.Desc
|
||||
ChallengeResponseProcessingTime *prometheus.Desc
|
||||
SignedCertificateTimestampListsPerSecond *prometheus.Desc
|
||||
SignedCertificateTimestampListProcessingTime *prometheus.Desc
|
||||
}
|
||||
|
||||
// ADCSCollectorMethod ...
|
||||
func adcsCollectorMethod() (Collector, error) {
|
||||
const subsystem = "adcs"
|
||||
return &adcsCollector{
|
||||
RequestsPerSecond: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "requests_total"),
|
||||
"Total certificate requests processed",
|
||||
[]string{"cert_template"},
|
||||
nil,
|
||||
),
|
||||
RequestProcessingTime: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "request_processing_time_seconds"),
|
||||
"Last time elapsed for certificate requests",
|
||||
[]string{"cert_template"},
|
||||
nil,
|
||||
),
|
||||
RetrievalsPerSecond: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "retrievals_total"),
|
||||
"Total certificate retrieval requests processed",
|
||||
[]string{"cert_template"},
|
||||
nil,
|
||||
),
|
||||
RetrievalProcessingTime: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "retrievals_processing_time_seconds"),
|
||||
"Last time elapsed for certificate retrieval request",
|
||||
[]string{"cert_template"},
|
||||
nil,
|
||||
),
|
||||
FailedRequestsPerSecond: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "failed_requests_total"),
|
||||
"Total failed certificate requests processed",
|
||||
[]string{"cert_template"},
|
||||
nil,
|
||||
),
|
||||
IssuedRequestsPerSecond: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "issued_requests_total"),
|
||||
"Total issued certificate requests processed",
|
||||
[]string{"cert_template"},
|
||||
nil,
|
||||
),
|
||||
PendingRequestsPerSecond: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "pending_requests_total"),
|
||||
"Total pending certificate requests processed",
|
||||
[]string{"cert_template"},
|
||||
nil,
|
||||
),
|
||||
RequestCryptographicSigningTime: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "request_cryptographic_signing_time_seconds"),
|
||||
"Last time elapsed for signing operation request",
|
||||
[]string{"cert_template"},
|
||||
nil,
|
||||
),
|
||||
RequestPolicyModuleProcessingTime: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "request_policy_module_processing_time_seconds"),
|
||||
"Last time elapsed for policy module processing request",
|
||||
[]string{"cert_template"},
|
||||
nil,
|
||||
),
|
||||
ChallengeResponsesPerSecond: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "challenge_responses_total"),
|
||||
"Total certificate challenge responses processed",
|
||||
[]string{"cert_template"},
|
||||
nil,
|
||||
),
|
||||
ChallengeResponseProcessingTime: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "challenge_response_processing_time_seconds"),
|
||||
"Last time elapsed for challenge response",
|
||||
[]string{"cert_template"},
|
||||
nil,
|
||||
),
|
||||
SignedCertificateTimestampListsPerSecond: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "signed_certificate_timestamp_lists_total"),
|
||||
"Total Signed Certificate Timestamp Lists processed",
|
||||
[]string{"cert_template"},
|
||||
nil,
|
||||
),
|
||||
SignedCertificateTimestampListProcessingTime: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "signed_certificate_timestamp_list_processing_time_seconds"),
|
||||
"Last time elapsed for Signed Certificate Timestamp List",
|
||||
[]string{"cert_template"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *adcsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collectADCSCounters(ctx, ch); err != nil {
|
||||
log.Error("Failed collecting ADCS Metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type perflibADCS struct {
|
||||
Name string
|
||||
RequestsPerSecond float64 `perflib:"Requests/sec"`
|
||||
RequestProcessingTime float64 `perflib:"Request processing time (ms)"`
|
||||
RetrievalsPerSecond float64 `perflib:"Retrievals/sec"`
|
||||
RetrievalProcessingTime float64 `perflib:"Retrieval processing time (ms)"`
|
||||
FailedRequestsPerSecond float64 `perflib:"Failed Requests/sec"`
|
||||
IssuedRequestsPerSecond float64 `perflib:"Issued Requests/sec"`
|
||||
PendingRequestsPerSecond float64 `perflib:"Pending Requests/sec"`
|
||||
RequestCryptographicSigningTime float64 `perflib:"Request cryptographic signing time (ms)"`
|
||||
RequestPolicyModuleProcessingTime float64 `perflib:"Request policy module processing time (ms)"`
|
||||
ChallengeResponsesPerSecond float64 `perflib:"Challenge Responses/sec"`
|
||||
ChallengeResponseProcessingTime float64 `perflib:"Challenge Response processing time (ms)"`
|
||||
SignedCertificateTimestampListsPerSecond float64 `perflib:"Signed Certificate Timestamp Lists/sec"`
|
||||
SignedCertificateTimestampListProcessingTime float64 `perflib:"Signed Certificate Timestamp List processing time (ms)"`
|
||||
}
|
||||
|
||||
func (c *adcsCollector) collectADCSCounters(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
dst := make([]perflibADCS, 0)
|
||||
if _, ok := ctx.perfObjects["Certification Authority"]; !ok {
|
||||
return nil, errors.New("Perflib did not contain an entry for Certification Authority")
|
||||
}
|
||||
err := unmarshalObject(ctx.perfObjects["Certification Authority"], &dst)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(dst) == 0 {
|
||||
return nil, errors.New("Perflib query for Certification Authority (ADCS) returned empty result set")
|
||||
}
|
||||
|
||||
for _, d := range dst {
|
||||
n := strings.ToLower(d.Name)
|
||||
if n == "" {
|
||||
continue
|
||||
}
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RequestsPerSecond,
|
||||
prometheus.CounterValue,
|
||||
d.RequestsPerSecond,
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RequestProcessingTime,
|
||||
prometheus.GaugeValue,
|
||||
milliSecToSec(d.RequestProcessingTime),
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RetrievalsPerSecond,
|
||||
prometheus.CounterValue,
|
||||
d.RetrievalsPerSecond,
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RetrievalProcessingTime,
|
||||
prometheus.GaugeValue,
|
||||
milliSecToSec(d.RetrievalProcessingTime),
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FailedRequestsPerSecond,
|
||||
prometheus.CounterValue,
|
||||
d.FailedRequestsPerSecond,
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.IssuedRequestsPerSecond,
|
||||
prometheus.CounterValue,
|
||||
d.IssuedRequestsPerSecond,
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PendingRequestsPerSecond,
|
||||
prometheus.CounterValue,
|
||||
d.PendingRequestsPerSecond,
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RequestCryptographicSigningTime,
|
||||
prometheus.GaugeValue,
|
||||
milliSecToSec(d.RequestCryptographicSigningTime),
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RequestPolicyModuleProcessingTime,
|
||||
prometheus.GaugeValue,
|
||||
milliSecToSec(d.RequestPolicyModuleProcessingTime),
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ChallengeResponsesPerSecond,
|
||||
prometheus.CounterValue,
|
||||
d.ChallengeResponsesPerSecond,
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ChallengeResponseProcessingTime,
|
||||
prometheus.GaugeValue,
|
||||
milliSecToSec(d.ChallengeResponseProcessingTime),
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SignedCertificateTimestampListsPerSecond,
|
||||
prometheus.CounterValue,
|
||||
d.SignedCertificateTimestampListsPerSecond,
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SignedCertificateTimestampListProcessingTime,
|
||||
prometheus.GaugeValue,
|
||||
milliSecToSec(d.SignedCertificateTimestampListProcessingTime),
|
||||
d.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
9
collector/adcs_test.go
Normal file
9
collector/adcs_test.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkADCSCollector(b *testing.B) {
|
||||
benchmarkCollector(b, "adcs", adcsCollectorMethod)
|
||||
}
|
||||
638
collector/adfs.go
Normal file
638
collector/adfs.go
Normal file
@@ -0,0 +1,638 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"math"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerCollector("adfs", newADFSCollector, "AD FS")
|
||||
}
|
||||
|
||||
type adfsCollector struct {
|
||||
adLoginConnectionFailures *prometheus.Desc
|
||||
certificateAuthentications *prometheus.Desc
|
||||
deviceAuthentications *prometheus.Desc
|
||||
extranetAccountLockouts *prometheus.Desc
|
||||
federatedAuthentications *prometheus.Desc
|
||||
passportAuthentications *prometheus.Desc
|
||||
passiveRequests *prometheus.Desc
|
||||
passwordChangeFailed *prometheus.Desc
|
||||
passwordChangeSucceeded *prometheus.Desc
|
||||
tokenRequests *prometheus.Desc
|
||||
windowsIntegratedAuthentications *prometheus.Desc
|
||||
oAuthAuthZRequests *prometheus.Desc
|
||||
oAuthClientAuthentications *prometheus.Desc
|
||||
oAuthClientAuthenticationsFailures *prometheus.Desc
|
||||
oAuthClientCredentialsRequestFailures *prometheus.Desc
|
||||
oAuthClientCredentialsRequests *prometheus.Desc
|
||||
oAuthClientPrivateKeyJwtAuthenticationFailures *prometheus.Desc
|
||||
oAuthClientPrivateKeyJwtAuthentications *prometheus.Desc
|
||||
oAuthClientSecretBasicAuthenticationFailures *prometheus.Desc
|
||||
oAuthClientSecretBasicAuthentications *prometheus.Desc
|
||||
oAuthClientSecretPostAuthenticationFailures *prometheus.Desc
|
||||
oAuthClientSecretPostAuthentications *prometheus.Desc
|
||||
oAuthClientWindowsIntegratedAuthenticationFailures *prometheus.Desc
|
||||
oAuthClientWindowsIntegratedAuthentications *prometheus.Desc
|
||||
oAuthLogonCertificateRequestFailures *prometheus.Desc
|
||||
oAuthLogonCertificateTokenRequests *prometheus.Desc
|
||||
oAuthPasswordGrantRequestFailures *prometheus.Desc
|
||||
oAuthPasswordGrantRequests *prometheus.Desc
|
||||
oAuthTokenRequests *prometheus.Desc
|
||||
samlPTokenRequests *prometheus.Desc
|
||||
ssoAuthenticationFailures *prometheus.Desc
|
||||
ssoAuthentications *prometheus.Desc
|
||||
wsfedTokenRequests *prometheus.Desc
|
||||
wstrustTokenRequests *prometheus.Desc
|
||||
upAuthenticationFailures *prometheus.Desc
|
||||
upAuthentications *prometheus.Desc
|
||||
externalAuthenticationFailures *prometheus.Desc
|
||||
externalAuthentications *prometheus.Desc
|
||||
artifactDBFailures *prometheus.Desc
|
||||
avgArtifactDBQueryTime *prometheus.Desc
|
||||
configDBFailures *prometheus.Desc
|
||||
avgConfigDBQueryTime *prometheus.Desc
|
||||
federationMetadataRequests *prometheus.Desc
|
||||
}
|
||||
|
||||
// newADFSCollector constructs a new adfsCollector
|
||||
func newADFSCollector() (Collector, error) {
|
||||
const subsystem = "adfs"
|
||||
|
||||
return &adfsCollector{
|
||||
adLoginConnectionFailures: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "ad_login_connection_failures_total"),
|
||||
"Total number of connection failures to an Active Directory domain controller",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
certificateAuthentications: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "certificate_authentications_total"),
|
||||
"Total number of User Certificate authentications",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
deviceAuthentications: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "device_authentications_total"),
|
||||
"Total number of Device authentications",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
extranetAccountLockouts: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "extranet_account_lockouts_total"),
|
||||
"Total number of Extranet Account Lockouts",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
federatedAuthentications: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "federated_authentications_total"),
|
||||
"Total number of authentications from a federated source",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
passportAuthentications: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "passport_authentications_total"),
|
||||
"Total number of Microsoft Passport SSO authentications",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
passiveRequests: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "passive_requests_total"),
|
||||
"Total number of passive (browser-based) requests",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
passwordChangeFailed: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "password_change_failed_total"),
|
||||
"Total number of failed password changes",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
passwordChangeSucceeded: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "password_change_succeeded_total"),
|
||||
"Total number of successful password changes",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
tokenRequests: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "token_requests_total"),
|
||||
"Total number of token requests",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
windowsIntegratedAuthentications: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "windows_integrated_authentications_total"),
|
||||
"Total number of Windows integrated authentications (Kerberos/NTLM)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
oAuthAuthZRequests: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "oauth_authorization_requests_total"),
|
||||
"Total number of incoming requests to the OAuth Authorization endpoint",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
oAuthClientAuthentications: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "oauth_client_authentication_success_total"),
|
||||
"Total number of successful OAuth client Authentications",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
oAuthClientAuthenticationsFailures: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "oauth_client_authentication_failure_total"),
|
||||
"Total number of failed OAuth client Authentications",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
oAuthClientCredentialsRequestFailures: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "oauth_client_credentials_failure_total"),
|
||||
"Total number of failed OAuth Client Credentials Requests",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
oAuthClientCredentialsRequests: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "oauth_client_credentials_success_total"),
|
||||
"Total number of successful RP tokens issued for OAuth Client Credentials Requests",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
oAuthClientPrivateKeyJwtAuthenticationFailures: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "oauth_client_privkey_jtw_authentication_failure_total"),
|
||||
"Total number of failed OAuth Client Private Key Jwt Authentications",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
oAuthClientPrivateKeyJwtAuthentications: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "oauth_client_privkey_jwt_authentications_success_total"),
|
||||
"Total number of successful OAuth Client Private Key Jwt Authentications",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
oAuthClientSecretBasicAuthenticationFailures: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "oauth_client_secret_basic_authentications_failure_total"),
|
||||
"Total number of failed OAuth Client Secret Basic Authentications",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
oAuthClientSecretBasicAuthentications: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "oauth_client_secret_basic_authentications_success_total"),
|
||||
"Total number of successful OAuth Client Secret Basic Authentications",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
oAuthClientSecretPostAuthenticationFailures: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "oauth_client_secret_post_authentications_failure_total"),
|
||||
"Total number of failed OAuth Client Secret Post Authentications",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
oAuthClientSecretPostAuthentications: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "oauth_client_secret_post_authentications_success_total"),
|
||||
"Total number of successful OAuth Client Secret Post Authentications",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
oAuthClientWindowsIntegratedAuthenticationFailures: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "oauth_client_windows_authentications_failure_total"),
|
||||
"Total number of failed OAuth Client Windows Integrated Authentications",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
oAuthClientWindowsIntegratedAuthentications: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "oauth_client_windows_authentications_success_total"),
|
||||
"Total number of successful OAuth Client Windows Integrated Authentications",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
oAuthLogonCertificateRequestFailures: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "oauth_logon_certificate_requests_failure_total"),
|
||||
"Total number of failed OAuth Logon Certificate Requests",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
oAuthLogonCertificateTokenRequests: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "oauth_logon_certificate_token_requests_success_total"),
|
||||
"Total number of successful RP tokens issued for OAuth Logon Certificate Requests",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
oAuthPasswordGrantRequestFailures: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "oauth_password_grant_requests_failure_total"),
|
||||
"Total number of failed OAuth Password Grant Requests",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
oAuthPasswordGrantRequests: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "oauth_password_grant_requests_success_total"),
|
||||
"Total number of successful OAuth Password Grant Requests",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
oAuthTokenRequests: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "oauth_token_requests_success_total"),
|
||||
"Total number of successful RP tokens issued over OAuth protocol",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
samlPTokenRequests: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "samlp_token_requests_success_total"),
|
||||
"Total number of successful RP tokens issued over SAML-P protocol",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
ssoAuthenticationFailures: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "sso_authentications_failure_total"),
|
||||
"Total number of failed SSO authentications",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
ssoAuthentications: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "sso_authentications_success_total"),
|
||||
"Total number of successful SSO authentications",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
wsfedTokenRequests: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "wsfed_token_requests_success_total"),
|
||||
"Total number of successful RP tokens issued over WS-Fed protocol",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
wstrustTokenRequests: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "wstrust_token_requests_success_total"),
|
||||
"Total number of successful RP tokens issued over WS-Trust protocol",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
upAuthenticationFailures: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "userpassword_authentications_failure_total"),
|
||||
"Total number of failed AD U/P authentications",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
upAuthentications: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "userpassword_authentications_success_total"),
|
||||
"Total number of successful AD U/P authentications",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
externalAuthenticationFailures: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "external_authentications_failure_total"),
|
||||
"Total number of failed authentications from external MFA providers",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
externalAuthentications: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "external_authentications_success_total"),
|
||||
"Total number of successful authentications from external MFA providers",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
artifactDBFailures: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "db_artifact_failure_total"),
|
||||
"Total number of failures connecting to the artifact database",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
avgArtifactDBQueryTime: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "db_artifact_query_time_seconds_total"),
|
||||
"Accumulator of time taken for an artifact database query",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
configDBFailures: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "db_config_failure_total"),
|
||||
"Total number of failures connecting to the configuration database",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
avgConfigDBQueryTime: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "db_config_query_time_seconds_total"),
|
||||
"Accumulator of time taken for a configuration database query",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
federationMetadataRequests: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "federation_metadata_requests_total"),
|
||||
"Total number of Federation Metadata requests",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
type perflibADFS struct {
|
||||
AdLoginConnectionFailures float64 `perflib:"AD Login Connection Failures"`
|
||||
CertificateAuthentications float64 `perflib:"Certificate Authentications"`
|
||||
DeviceAuthentications float64 `perflib:"Device Authentications"`
|
||||
ExtranetAccountLockouts float64 `perflib:"Extranet Account Lockouts"`
|
||||
FederatedAuthentications float64 `perflib:"Federated Authentications"`
|
||||
PassportAuthentications float64 `perflib:"Microsoft Passport Authentications"`
|
||||
PassiveRequests float64 `perflib:"Passive Requests"`
|
||||
PasswordChangeFailed float64 `perflib:"Password Change Failed Requests"`
|
||||
PasswordChangeSucceeded float64 `perflib:"Password Change Successful Requests"`
|
||||
TokenRequests float64 `perflib:"Token Requests"`
|
||||
WindowsIntegratedAuthentications float64 `perflib:"Windows Integrated Authentications"`
|
||||
OAuthAuthZRequests float64 `perflib:"OAuth AuthZ Requests"`
|
||||
OAuthClientAuthentications float64 `perflib:"OAuth Client Authentications"`
|
||||
OAuthClientAuthenticationFailures float64 `perflib:"OAuth Client Authentications Failures"`
|
||||
OAuthClientCredentialRequestFailures float64 `perflib:"OAuth Client Credentials Request Failures"`
|
||||
OAuthClientCredentialRequests float64 `perflib:"OAuth Client Credentials Requests"`
|
||||
OAuthClientPrivKeyJWTAuthnFailures float64 `perflib:"OAuth Client Private Key Jwt Authentication Failures"`
|
||||
OAuthClientPrivKeyJWTAuthentications float64 `perflib:"OAuth Client Private Key Jwt Authentications"`
|
||||
OAuthClientBasicAuthnFailures float64 `perflib:"OAuth Client Secret Basic Authentication Failures"`
|
||||
OAuthClientBasicAuthentications float64 `perflib:"OAuth Client Secret Basic Authentication Requests"`
|
||||
OAuthClientSecretPostAuthnFailures float64 `perflib:"OAuth Client Secret Post Authentication Failures"`
|
||||
OAuthClientSecretPostAuthentications float64 `perflib:"OAuth Client Secret Post Authentications"`
|
||||
OAuthClientWindowsAuthnFailures float64 `perflib:"OAuth Client Windows Integrated Authentication Failures"`
|
||||
OAuthClientWindowsAuthentications float64 `perflib:"OAuth Client Windows Integrated Authentications"`
|
||||
OAuthLogonCertRequestFailures float64 `perflib:"OAuth Logon Certificate Request Failures"`
|
||||
OAuthLogonCertTokenRequests float64 `perflib:"OAuth Logon Certificate Token Requests"`
|
||||
OAuthPasswordGrantRequestFailures float64 `perflib:"OAuth Password Grant Request Failures"`
|
||||
OAuthPasswordGrantRequests float64 `perflib:"OAuth Password Grant Requests"`
|
||||
OAuthTokenRequests float64 `perflib:"OAuth Token Requests"`
|
||||
SAMLPTokenRequests float64 `perflib:"SAML-P Token Requests"`
|
||||
SSOAuthenticationFailures float64 `perflib:"SSO Authentication Failures"`
|
||||
SSOAuthentications float64 `perflib:"SSO Authentications"`
|
||||
WSFedTokenRequests float64 `perflib:"WS-Fed Token Requests"`
|
||||
WSTrustTokenRequests float64 `perflib:"WS-Trust Token Requests"`
|
||||
UsernamePasswordAuthnFailures float64 `perflib:"U/P Authentication Failures"`
|
||||
UsernamePasswordAuthentications float64 `perflib:"U/P Authentications"`
|
||||
ExternalAuthentications float64 `perflib:"External Authentications"`
|
||||
ExternalAuthNFailures float64 `perflib:"External Authentication Failures"`
|
||||
ArtifactDBFailures float64 `perflib:"Artifact Database Connection Failures"`
|
||||
AvgArtifactDBQueryTime float64 `perflib:"Average Artifact Database Query Time"`
|
||||
ConfigDBFailures float64 `perflib:"Configuration Database Connection Failures"`
|
||||
AvgConfigDBQueryTime float64 `perflib:"Average Config Database Query Time"`
|
||||
FederationMetadataRequests float64 `perflib:"Federation Metadata Requests"`
|
||||
}
|
||||
|
||||
func (c *adfsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
var adfsData []perflibADFS
|
||||
err := unmarshalObject(ctx.perfObjects["AD FS"], &adfsData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.adLoginConnectionFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].AdLoginConnectionFailures,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.certificateAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].CertificateAuthentications,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.deviceAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].DeviceAuthentications,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.extranetAccountLockouts,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].ExtranetAccountLockouts,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.federatedAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].FederatedAuthentications,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.passportAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].PassportAuthentications,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.passiveRequests,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].PassiveRequests,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.passwordChangeFailed,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].PasswordChangeFailed,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.passwordChangeSucceeded,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].PasswordChangeSucceeded,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.tokenRequests,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].TokenRequests,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.windowsIntegratedAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].WindowsIntegratedAuthentications,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthAuthZRequests,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].OAuthAuthZRequests,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthClientAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].OAuthClientAuthentications,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthClientAuthenticationsFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].OAuthClientAuthenticationFailures,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthClientCredentialsRequestFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].OAuthClientCredentialRequestFailures,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthClientCredentialsRequests,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].OAuthClientCredentialRequests,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthClientPrivateKeyJwtAuthenticationFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].OAuthClientPrivKeyJWTAuthnFailures,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthClientPrivateKeyJwtAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].OAuthClientPrivKeyJWTAuthentications,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthClientSecretBasicAuthenticationFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].OAuthClientBasicAuthnFailures,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthClientSecretBasicAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].OAuthClientBasicAuthentications,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthClientSecretPostAuthenticationFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].OAuthClientSecretPostAuthnFailures,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthClientSecretPostAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].OAuthClientSecretPostAuthentications,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthClientWindowsIntegratedAuthenticationFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].OAuthClientWindowsAuthnFailures,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthClientWindowsIntegratedAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].OAuthClientWindowsAuthentications,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthLogonCertificateRequestFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].OAuthLogonCertRequestFailures,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthLogonCertificateTokenRequests,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].OAuthLogonCertTokenRequests,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthPasswordGrantRequestFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].OAuthPasswordGrantRequestFailures,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthPasswordGrantRequests,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].OAuthPasswordGrantRequests,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.oAuthTokenRequests,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].OAuthTokenRequests,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.samlPTokenRequests,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].SAMLPTokenRequests,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ssoAuthenticationFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].SSOAuthenticationFailures,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ssoAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].SSOAuthentications,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.wsfedTokenRequests,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].WSFedTokenRequests,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.wstrustTokenRequests,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].WSTrustTokenRequests,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.upAuthenticationFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].UsernamePasswordAuthnFailures,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.upAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].UsernamePasswordAuthentications,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.externalAuthenticationFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].ExternalAuthNFailures,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.externalAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].ExternalAuthentications,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.artifactDBFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].ArtifactDBFailures,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.avgArtifactDBQueryTime,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].AvgArtifactDBQueryTime*math.Pow(10, -8),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.configDBFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].ConfigDBFailures,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.avgConfigDBQueryTime,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].AvgConfigDBQueryTime*math.Pow(10, -8),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.federationMetadataRequests,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].FederationMetadataRequests,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
9
collector/adfs_test.go
Normal file
9
collector/adfs_test.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkADFSCollector(b *testing.B) {
|
||||
benchmarkCollector(b, "adfs", newADFSCollector)
|
||||
}
|
||||
453
collector/cache.go
Normal file
453
collector/cache.go
Normal file
@@ -0,0 +1,453 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/prometheus-community/windows_exporter/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerCollector("cache", newCacheCollector, "Cache")
|
||||
}
|
||||
|
||||
// A CacheCollector is a Prometheus collector for Perflib Cache metrics
|
||||
type CacheCollector struct {
|
||||
AsyncCopyReadsTotal *prometheus.Desc
|
||||
AsyncDataMapsTotal *prometheus.Desc
|
||||
AsyncFastReadsTotal *prometheus.Desc
|
||||
AsyncMDLReadsTotal *prometheus.Desc
|
||||
AsyncPinReadsTotal *prometheus.Desc
|
||||
CopyReadHitsTotal *prometheus.Desc
|
||||
CopyReadsTotal *prometheus.Desc
|
||||
DataFlushesTotal *prometheus.Desc
|
||||
DataFlushPagesTotal *prometheus.Desc
|
||||
DataMapHitsPercent *prometheus.Desc
|
||||
DataMapPinsTotal *prometheus.Desc
|
||||
DataMapsTotal *prometheus.Desc
|
||||
DirtyPages *prometheus.Desc
|
||||
DirtyPageThreshold *prometheus.Desc
|
||||
FastReadNotPossiblesTotal *prometheus.Desc
|
||||
FastReadResourceMissesTotal *prometheus.Desc
|
||||
FastReadsTotal *prometheus.Desc
|
||||
LazyWriteFlushesTotal *prometheus.Desc
|
||||
LazyWritePagesTotal *prometheus.Desc
|
||||
MDLReadHitsTotal *prometheus.Desc
|
||||
MDLReadsTotal *prometheus.Desc
|
||||
PinReadHitsTotal *prometheus.Desc
|
||||
PinReadsTotal *prometheus.Desc
|
||||
ReadAheadsTotal *prometheus.Desc
|
||||
SyncCopyReadsTotal *prometheus.Desc
|
||||
SyncDataMapsTotal *prometheus.Desc
|
||||
SyncFastReadsTotal *prometheus.Desc
|
||||
SyncMDLReadsTotal *prometheus.Desc
|
||||
SyncPinReadsTotal *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewCacheCollector ...
|
||||
func newCacheCollector() (Collector, error) {
|
||||
const subsystem = "cache"
|
||||
return &CacheCollector{
|
||||
AsyncCopyReadsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "async_copy_reads_total"),
|
||||
"(AsyncCopyReadsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
AsyncDataMapsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "async_data_maps_total"),
|
||||
"(AsyncDataMapsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
AsyncFastReadsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "async_fast_reads_total"),
|
||||
"(AsyncFastReadsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
AsyncMDLReadsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "async_mdl_reads_total"),
|
||||
"(AsyncMDLReadsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
AsyncPinReadsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "async_pin_reads_total"),
|
||||
"(AsyncPinReadsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
CopyReadHitsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "copy_read_hits_total"),
|
||||
"(CopyReadHitsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
CopyReadsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "copy_reads_total"),
|
||||
"(CopyReadsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
DataFlushesTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "data_flushes_total"),
|
||||
"(DataFlushesTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
DataFlushPagesTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "data_flush_pages_total"),
|
||||
"(DataFlushPagesTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
DataMapHitsPercent: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "data_map_hits_percent"),
|
||||
"(DataMapHitsPercent)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
DataMapPinsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "data_map_pins_total"),
|
||||
"(DataMapPinsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
DataMapsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "data_maps_total"),
|
||||
"(DataMapsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
DirtyPages: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "dirty_pages"),
|
||||
"(DirtyPages)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
DirtyPageThreshold: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "dirty_page_threshold"),
|
||||
"(DirtyPageThreshold)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
FastReadNotPossiblesTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "fast_read_not_possibles_total"),
|
||||
"(FastReadNotPossiblesTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
FastReadResourceMissesTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "fast_read_resource_misses_total"),
|
||||
"(FastReadResourceMissesTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
FastReadsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "fast_reads_total"),
|
||||
"(FastReadsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
LazyWriteFlushesTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "lazy_write_flushes_total"),
|
||||
"(LazyWriteFlushesTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
LazyWritePagesTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "lazy_write_pages_total"),
|
||||
"(LazyWritePagesTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
MDLReadHitsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "mdl_read_hits_total"),
|
||||
"(MDLReadHitsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
MDLReadsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "mdl_reads_total"),
|
||||
"(MDLReadsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
PinReadHitsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "pin_read_hits_total"),
|
||||
"(PinReadHitsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
PinReadsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "pin_reads_total"),
|
||||
"(PinReadsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
ReadAheadsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "read_aheads_total"),
|
||||
"(ReadAheadsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SyncCopyReadsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "sync_copy_reads_total"),
|
||||
"(SyncCopyReadsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SyncDataMapsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "sync_data_maps_total"),
|
||||
"(SyncDataMapsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SyncFastReadsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "sync_fast_reads_total"),
|
||||
"(SyncFastReadsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SyncMDLReadsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "sync_mdl_reads_total"),
|
||||
"(SyncMDLReadsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SyncPinReadsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "sync_pin_reads_total"),
|
||||
"(SyncPinReadsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect implements the Collector interface
|
||||
func (c *CacheCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ctx, ch); err != nil {
|
||||
log.Error("failed collecting cache metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Perflib "Cache":
|
||||
// - https://docs.microsoft.com/en-us/previous-versions/aa394267(v=vs.85)
|
||||
type perflibCache struct {
|
||||
AsyncCopyReadsTotal float64 `perflib:"Async Copy Reads/sec"`
|
||||
AsyncDataMapsTotal float64 `perflib:"Async Data Maps/sec"`
|
||||
AsyncFastReadsTotal float64 `perflib:"Async Fast Reads/sec"`
|
||||
AsyncMDLReadsTotal float64 `perflib:"Async MDL Reads/sec"`
|
||||
AsyncPinReadsTotal float64 `perflib:"Async Pin Reads/sec"`
|
||||
CopyReadHitsTotal float64 `perflib:"Copy Read Hits %"`
|
||||
CopyReadsTotal float64 `perflib:"Copy Reads/sec"`
|
||||
DataFlushesTotal float64 `perflib:"Data Flushes/sec"`
|
||||
DataFlushPagesTotal float64 `perflib:"Data Flush Pages/sec"`
|
||||
DataMapHitsPercent float64 `perflib:"Data Map Hits %"`
|
||||
DataMapPinsTotal float64 `perflib:"Data Map Pins/sec"`
|
||||
DataMapsTotal float64 `perflib:"Data Maps/sec"`
|
||||
DirtyPages float64 `perflib:"Dirty Pages"`
|
||||
DirtyPageThreshold float64 `perflib:"Dirty Page Threshold"`
|
||||
FastReadNotPossiblesTotal float64 `perflib:"Fast Read Not Possibles/sec"`
|
||||
FastReadResourceMissesTotal float64 `perflib:"Fast Read Resource Misses/sec"`
|
||||
FastReadsTotal float64 `perflib:"Fast Reads/sec"`
|
||||
LazyWriteFlushesTotal float64 `perflib:"Lazy Write Flushes/sec"`
|
||||
LazyWritePagesTotal float64 `perflib:"Lazy Write Pages/sec"`
|
||||
MDLReadHitsTotal float64 `perflib:"MDL Read Hits %"`
|
||||
MDLReadsTotal float64 `perflib:"MDL Reads/sec"`
|
||||
PinReadHitsTotal float64 `perflib:"Pin Read Hits %"`
|
||||
PinReadsTotal float64 `perflib:"Pin Reads/sec"`
|
||||
ReadAheadsTotal float64 `perflib:"Read Aheads/sec"`
|
||||
SyncCopyReadsTotal float64 `perflib:"Sync Copy Reads/sec"`
|
||||
SyncDataMapsTotal float64 `perflib:"Sync Data Maps/sec"`
|
||||
SyncFastReadsTotal float64 `perflib:"Sync Fast Reads/sec"`
|
||||
SyncMDLReadsTotal float64 `perflib:"Sync MDL Reads/sec"`
|
||||
SyncPinReadsTotal float64 `perflib:"Sync Pin Reads/sec"`
|
||||
}
|
||||
|
||||
func (c *CacheCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []perflibCache // Single-instance class, array is required but will have single entry.
|
||||
if err := unmarshalObject(ctx.perfObjects["Cache"], &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.AsyncCopyReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].AsyncCopyReadsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.AsyncDataMapsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].AsyncDataMapsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.AsyncFastReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].AsyncFastReadsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.AsyncMDLReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].AsyncMDLReadsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.AsyncPinReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].AsyncPinReadsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CopyReadHitsTotal,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].CopyReadHitsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CopyReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].CopyReadsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DataFlushesTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].DataFlushesTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DataFlushPagesTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].DataFlushPagesTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DataMapHitsPercent,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].DataMapHitsPercent,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DataMapPinsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].DataMapPinsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DataMapsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].DataMapsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DirtyPages,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].DirtyPages,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DirtyPageThreshold,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].DirtyPageThreshold,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FastReadNotPossiblesTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].FastReadNotPossiblesTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FastReadResourceMissesTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].FastReadResourceMissesTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FastReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].FastReadsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LazyWriteFlushesTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].LazyWriteFlushesTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LazyWritePagesTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].LazyWritePagesTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MDLReadHitsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].MDLReadHitsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MDLReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].MDLReadsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PinReadHitsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].PinReadHitsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PinReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].PinReadsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ReadAheadsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].ReadAheadsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SyncCopyReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].SyncCopyReadsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SyncDataMapsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].SyncDataMapsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SyncFastReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].SyncFastReadsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SyncMDLReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].SyncMDLReadsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SyncPinReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].SyncPinReadsTotal,
|
||||
)
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
154
collector/collector.go
Normal file
154
collector/collector.go
Normal file
@@ -0,0 +1,154 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/leoluk/perflib_exporter/perflib"
|
||||
"github.com/prometheus-community/windows_exporter/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"golang.org/x/sys/windows/registry"
|
||||
)
|
||||
|
||||
// ...
|
||||
const (
|
||||
// TODO: Make package-local
|
||||
Namespace = "windows"
|
||||
|
||||
// Conversion factors
|
||||
ticksToSecondsScaleFactor = 1 / 1e7
|
||||
windowsEpoch = 116444736000000000
|
||||
)
|
||||
|
||||
// getWindowsVersion reads the version number of the OS from the Registry
|
||||
// See https://docs.microsoft.com/en-us/windows/desktop/sysinfo/operating-system-version
|
||||
func getWindowsVersion() float64 {
|
||||
k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
|
||||
if err != nil {
|
||||
log.Warn("Couldn't open registry", err)
|
||||
return 0
|
||||
}
|
||||
defer func() {
|
||||
err = k.Close()
|
||||
if err != nil {
|
||||
log.Warnf("Failed to close registry key: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
currentv, _, err := k.GetStringValue("CurrentVersion")
|
||||
if err != nil {
|
||||
log.Warn("Couldn't open registry to determine current Windows version:", err)
|
||||
return 0
|
||||
}
|
||||
|
||||
currentv_flt, err := strconv.ParseFloat(currentv, 64)
|
||||
|
||||
log.Debugf("Detected Windows version %f\n", currentv_flt)
|
||||
|
||||
return currentv_flt
|
||||
}
|
||||
|
||||
type collectorBuilder func() (Collector, error)
|
||||
|
||||
var (
|
||||
builders = make(map[string]collectorBuilder)
|
||||
perfCounterDependencies = make(map[string]string)
|
||||
)
|
||||
|
||||
func registerCollector(name string, builder collectorBuilder, perfCounterNames ...string) {
|
||||
builders[name] = builder
|
||||
addPerfCounterDependencies(name, perfCounterNames)
|
||||
}
|
||||
|
||||
func addPerfCounterDependencies(name string, perfCounterNames []string) {
|
||||
perfIndicies := make([]string, 0, len(perfCounterNames))
|
||||
for _, cn := range perfCounterNames {
|
||||
perfIndicies = append(perfIndicies, MapCounterToIndex(cn))
|
||||
}
|
||||
perfCounterDependencies[name] = strings.Join(perfIndicies, " ")
|
||||
}
|
||||
|
||||
func Available() []string {
|
||||
cs := make([]string, 0, len(builders))
|
||||
for c := range builders {
|
||||
cs = append(cs, c)
|
||||
}
|
||||
return cs
|
||||
}
|
||||
func Build(collector string) (Collector, error) {
|
||||
builder, exists := builders[collector]
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("Unknown collector %q", collector)
|
||||
}
|
||||
return builder()
|
||||
}
|
||||
func getPerfQuery(collectors []string) string {
|
||||
parts := make([]string, 0, len(collectors))
|
||||
for _, c := range collectors {
|
||||
if p := perfCounterDependencies[c]; p != "" {
|
||||
parts = append(parts, p)
|
||||
}
|
||||
}
|
||||
return strings.Join(parts, " ")
|
||||
}
|
||||
|
||||
// Collector is the interface a collector has to implement.
|
||||
type Collector interface {
|
||||
// Get new metrics and expose them via prometheus registry.
|
||||
Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (err error)
|
||||
}
|
||||
|
||||
type ScrapeContext struct {
|
||||
perfObjects map[string]*perflib.PerfObject
|
||||
}
|
||||
|
||||
// PrepareScrapeContext creates a ScrapeContext to be used during a single scrape
|
||||
func PrepareScrapeContext(collectors []string) (*ScrapeContext, error) {
|
||||
q := getPerfQuery(collectors) // TODO: Memoize
|
||||
objs, err := getPerflibSnapshot(q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ScrapeContext{objs}, nil
|
||||
}
|
||||
func boolToFloat(b bool) float64 {
|
||||
if b {
|
||||
return 1.0
|
||||
}
|
||||
return 0.0
|
||||
}
|
||||
|
||||
func find(slice []string, val string) bool {
|
||||
for _, item := range slice {
|
||||
if item == val {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Used by more complex collectors where user input specifies enabled child collectors.
|
||||
// Splits provided child collectors and deduplicate.
|
||||
func expandEnabledChildCollectors(enabled string) []string {
|
||||
separated := strings.Split(enabled, ",")
|
||||
unique := map[string]bool{}
|
||||
for _, s := range separated {
|
||||
if s != "" {
|
||||
unique[s] = true
|
||||
}
|
||||
}
|
||||
result := make([]string, 0, len(unique))
|
||||
for s := range unique {
|
||||
result = append(result, s)
|
||||
}
|
||||
// Ensure result is ordered, to prevent test failure
|
||||
sort.Strings(result)
|
||||
return result
|
||||
}
|
||||
|
||||
func milliSecToSec(t float64) float64 {
|
||||
return t / 1000
|
||||
}
|
||||
60
collector/collector_test.go
Normal file
60
collector/collector_test.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func TestExpandChildCollectors(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
input string
|
||||
expectedOutput []string
|
||||
}{
|
||||
{
|
||||
name: "simple",
|
||||
input: "testing1,testing2,testing3",
|
||||
expectedOutput: []string{"testing1", "testing2", "testing3"},
|
||||
},
|
||||
{
|
||||
name: "duplicate",
|
||||
input: "testing1,testing2,testing2,testing3",
|
||||
expectedOutput: []string{"testing1", "testing2", "testing3"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
output := expandEnabledChildCollectors(c.input)
|
||||
if !reflect.DeepEqual(output, c.expectedOutput) {
|
||||
t.Errorf("Output mismatch, expected %+v, got %+v", c.expectedOutput, output)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkCollector(b *testing.B, name string, collectFunc func() (Collector, error)) {
|
||||
// Create perflib scrape context. Some perflib collectors required a correct context,
|
||||
// or will fail during benchmark.
|
||||
scrapeContext, err := PrepareScrapeContext([]string{name})
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
c, err := collectFunc()
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
|
||||
metrics := make(chan prometheus.Metric)
|
||||
go func() {
|
||||
for {
|
||||
<-metrics
|
||||
}
|
||||
}()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
c.Collect(scrapeContext, metrics) //nolint:errcheck
|
||||
}
|
||||
}
|
||||
345
collector/container.go
Normal file
345
collector/container.go
Normal file
@@ -0,0 +1,345 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/Microsoft/hcsshim"
|
||||
"github.com/prometheus-community/windows_exporter/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerCollector("container", NewContainerMetricsCollector)
|
||||
}
|
||||
|
||||
// A ContainerMetricsCollector is a Prometheus collector for containers metrics
|
||||
type ContainerMetricsCollector struct {
|
||||
// Presence
|
||||
ContainerAvailable *prometheus.Desc
|
||||
|
||||
// Number of containers
|
||||
ContainersCount *prometheus.Desc
|
||||
// memory
|
||||
UsageCommitBytes *prometheus.Desc
|
||||
UsageCommitPeakBytes *prometheus.Desc
|
||||
UsagePrivateWorkingSetBytes *prometheus.Desc
|
||||
|
||||
// CPU
|
||||
RuntimeTotal *prometheus.Desc
|
||||
RuntimeUser *prometheus.Desc
|
||||
RuntimeKernel *prometheus.Desc
|
||||
|
||||
// Network
|
||||
BytesReceived *prometheus.Desc
|
||||
BytesSent *prometheus.Desc
|
||||
PacketsReceived *prometheus.Desc
|
||||
PacketsSent *prometheus.Desc
|
||||
DroppedPacketsIncoming *prometheus.Desc
|
||||
DroppedPacketsOutgoing *prometheus.Desc
|
||||
|
||||
// Storage
|
||||
ReadCountNormalized *prometheus.Desc
|
||||
ReadSizeBytes *prometheus.Desc
|
||||
WriteCountNormalized *prometheus.Desc
|
||||
WriteSizeBytes *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewContainerMetricsCollector constructs a new ContainerMetricsCollector
|
||||
func NewContainerMetricsCollector() (Collector, error) {
|
||||
const subsystem = "container"
|
||||
return &ContainerMetricsCollector{
|
||||
ContainerAvailable: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "available"),
|
||||
"Available",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
ContainersCount: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "count"),
|
||||
"Number of containers",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
UsageCommitBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "memory_usage_commit_bytes"),
|
||||
"Memory Usage Commit Bytes",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
UsageCommitPeakBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "memory_usage_commit_peak_bytes"),
|
||||
"Memory Usage Commit Peak Bytes",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
UsagePrivateWorkingSetBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "memory_usage_private_working_set_bytes"),
|
||||
"Memory Usage Private Working Set Bytes",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
RuntimeTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cpu_usage_seconds_total"),
|
||||
"Total Run time in Seconds",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
RuntimeUser: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cpu_usage_seconds_usermode"),
|
||||
"Run Time in User mode in Seconds",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
RuntimeKernel: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cpu_usage_seconds_kernelmode"),
|
||||
"Run time in Kernel mode in Seconds",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
BytesReceived: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "network_receive_bytes_total"),
|
||||
"Bytes Received on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
),
|
||||
BytesSent: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "network_transmit_bytes_total"),
|
||||
"Bytes Sent on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
),
|
||||
PacketsReceived: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "network_receive_packets_total"),
|
||||
"Packets Received on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
),
|
||||
PacketsSent: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "network_transmit_packets_total"),
|
||||
"Packets Sent on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
),
|
||||
DroppedPacketsIncoming: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "network_receive_packets_dropped_total"),
|
||||
"Dropped Incoming Packets on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
),
|
||||
DroppedPacketsOutgoing: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "network_transmit_packets_dropped_total"),
|
||||
"Dropped Outgoing Packets on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
),
|
||||
ReadCountNormalized: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "storage_read_count_normalized_total"),
|
||||
"Read Count Normalized",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
ReadSizeBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "storage_read_size_bytes_total"),
|
||||
"Read Size Bytes",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
WriteCountNormalized: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "storage_write_count_normalized_total"),
|
||||
"Write Count Normalized",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
WriteSizeBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "storage_write_size_bytes_total"),
|
||||
"Write Size Bytes",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *ContainerMetricsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting ContainerMetricsCollector metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// containerClose closes the container resource
|
||||
func containerClose(c hcsshim.Container) {
|
||||
err := c.Close()
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ContainerMetricsCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
|
||||
// Types Container is passed to get the containers compute systems only
|
||||
containers, err := hcsshim.GetContainers(hcsshim.ComputeSystemQuery{Types: []string{"Container"}})
|
||||
if err != nil {
|
||||
log.Error("Err in Getting containers:", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
count := len(containers)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ContainersCount,
|
||||
prometheus.GaugeValue,
|
||||
float64(count),
|
||||
)
|
||||
if count == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
for _, containerDetails := range containers {
|
||||
container, err := hcsshim.OpenContainer(containerDetails.ID)
|
||||
if container != nil {
|
||||
defer containerClose(container)
|
||||
}
|
||||
if err != nil {
|
||||
log.Error("err in opening container: ", containerDetails.ID, err)
|
||||
continue
|
||||
}
|
||||
|
||||
cstats, err := container.Statistics()
|
||||
if err != nil {
|
||||
log.Error("err in fetching container Statistics: ", containerDetails.ID, err)
|
||||
continue
|
||||
}
|
||||
containerIdWithPrefix := getContainerIdWithPrefix(containerDetails)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ContainerAvailable,
|
||||
prometheus.CounterValue,
|
||||
1,
|
||||
containerIdWithPrefix,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.UsageCommitBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(cstats.Memory.UsageCommitBytes),
|
||||
containerIdWithPrefix,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.UsageCommitPeakBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(cstats.Memory.UsageCommitPeakBytes),
|
||||
containerIdWithPrefix,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.UsagePrivateWorkingSetBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(cstats.Memory.UsagePrivateWorkingSetBytes),
|
||||
containerIdWithPrefix,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RuntimeTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(cstats.Processor.TotalRuntime100ns)*ticksToSecondsScaleFactor,
|
||||
containerIdWithPrefix,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RuntimeUser,
|
||||
prometheus.CounterValue,
|
||||
float64(cstats.Processor.RuntimeUser100ns)*ticksToSecondsScaleFactor,
|
||||
containerIdWithPrefix,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RuntimeKernel,
|
||||
prometheus.CounterValue,
|
||||
float64(cstats.Processor.RuntimeKernel100ns)*ticksToSecondsScaleFactor,
|
||||
containerIdWithPrefix,
|
||||
)
|
||||
|
||||
if len(cstats.Network) == 0 {
|
||||
log.Info("No Network Stats for container: ", containerDetails.ID)
|
||||
continue
|
||||
}
|
||||
|
||||
networkStats := cstats.Network
|
||||
|
||||
for _, networkInterface := range networkStats {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BytesReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(networkInterface.BytesReceived),
|
||||
containerIdWithPrefix, networkInterface.EndpointId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BytesSent,
|
||||
prometheus.CounterValue,
|
||||
float64(networkInterface.BytesSent),
|
||||
containerIdWithPrefix, networkInterface.EndpointId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(networkInterface.PacketsReceived),
|
||||
containerIdWithPrefix, networkInterface.EndpointId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsSent,
|
||||
prometheus.CounterValue,
|
||||
float64(networkInterface.PacketsSent),
|
||||
containerIdWithPrefix, networkInterface.EndpointId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DroppedPacketsIncoming,
|
||||
prometheus.CounterValue,
|
||||
float64(networkInterface.DroppedPacketsIncoming),
|
||||
containerIdWithPrefix, networkInterface.EndpointId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DroppedPacketsOutgoing,
|
||||
prometheus.CounterValue,
|
||||
float64(networkInterface.DroppedPacketsOutgoing),
|
||||
containerIdWithPrefix, networkInterface.EndpointId,
|
||||
)
|
||||
break
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ReadCountNormalized,
|
||||
prometheus.CounterValue,
|
||||
float64(cstats.Storage.ReadCountNormalized),
|
||||
containerIdWithPrefix,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ReadSizeBytes,
|
||||
prometheus.CounterValue,
|
||||
float64(cstats.Storage.ReadSizeBytes),
|
||||
containerIdWithPrefix,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.WriteCountNormalized,
|
||||
prometheus.CounterValue,
|
||||
float64(cstats.Storage.WriteCountNormalized),
|
||||
containerIdWithPrefix,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.WriteSizeBytes,
|
||||
prometheus.CounterValue,
|
||||
float64(cstats.Storage.WriteSizeBytes),
|
||||
containerIdWithPrefix,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func getContainerIdWithPrefix(containerDetails hcsshim.ContainerProperties) string {
|
||||
switch containerDetails.Owner {
|
||||
case "containerd-shim-runhcs-v1.exe":
|
||||
return "containerd://" + containerDetails.ID
|
||||
default:
|
||||
// default to docker or if owner is not set
|
||||
return "docker://" + containerDetails.ID
|
||||
}
|
||||
}
|
||||
9
collector/container_test.go
Normal file
9
collector/container_test.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkContainerCollector(b *testing.B) {
|
||||
benchmarkCollector(b, "container", NewContainerMetricsCollector)
|
||||
}
|
||||
447
collector/cpu.go
447
collector/cpu.go
@@ -1,30 +1,89 @@
|
||||
// returns data points from Win32_PerfRawData_PerfOS_Processor
|
||||
// https://msdn.microsoft.com/en-us/library/aa394317(v=vs.90).aspx - Win32_PerfRawData_PerfOS_Processor class
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["cpu"] = NewCPUCollector
|
||||
var deps string
|
||||
// See below for 6.05 magic value
|
||||
if getWindowsVersion() > 6.05 {
|
||||
deps = "Processor Information"
|
||||
} else {
|
||||
deps = "Processor"
|
||||
}
|
||||
registerCollector("cpu", newCPUCollector, deps)
|
||||
}
|
||||
|
||||
// A CPUCollector is a Prometheus collector for WMI Win32_PerfRawData_PerfOS_Processor metrics
|
||||
type CPUCollector struct {
|
||||
type cpuCollectorBasic struct {
|
||||
CStateSecondsTotal *prometheus.Desc
|
||||
TimeTotal *prometheus.Desc
|
||||
InterruptsTotal *prometheus.Desc
|
||||
DPCsTotal *prometheus.Desc
|
||||
}
|
||||
type cpuCollectorFull struct {
|
||||
CStateSecondsTotal *prometheus.Desc
|
||||
TimeTotal *prometheus.Desc
|
||||
InterruptsTotal *prometheus.Desc
|
||||
DPCsTotal *prometheus.Desc
|
||||
ClockInterruptsTotal *prometheus.Desc
|
||||
IdleBreakEventsTotal *prometheus.Desc
|
||||
ParkingStatus *prometheus.Desc
|
||||
ProcessorFrequencyMHz *prometheus.Desc
|
||||
ProcessorMaxFrequencyMHz *prometheus.Desc
|
||||
ProcessorPerformance *prometheus.Desc
|
||||
ProcessorMPerf *prometheus.Desc
|
||||
ProcessorRTC *prometheus.Desc
|
||||
ProcessorUtility *prometheus.Desc
|
||||
ProcessorPrivUtility *prometheus.Desc
|
||||
}
|
||||
|
||||
func NewCPUCollector() (Collector, error) {
|
||||
// newCPUCollector constructs a new cpuCollector, appropriate for the running OS
|
||||
func newCPUCollector() (Collector, error) {
|
||||
const subsystem = "cpu"
|
||||
return &CPUCollector{
|
||||
|
||||
version := getWindowsVersion()
|
||||
// For Windows 2008 (version 6.0) or earlier we only have the "Processor"
|
||||
// class. As of Windows 2008 R2 (version 6.1) the more detailed
|
||||
// "Processor Information" set is available (although some of the counters
|
||||
// are added in later versions, so we aren't guaranteed to get all of
|
||||
// them).
|
||||
// Value 6.05 was selected to split between Windows versions.
|
||||
if version < 6.05 {
|
||||
return &cpuCollectorBasic{
|
||||
CStateSecondsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cstate_seconds_total"),
|
||||
"Time spent in low-power idle state",
|
||||
[]string{"core", "state"},
|
||||
nil,
|
||||
),
|
||||
TimeTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "time_total"),
|
||||
"Time that processor spent in different modes (dpc, idle, interrupt, privileged, user)",
|
||||
[]string{"core", "mode"},
|
||||
nil,
|
||||
),
|
||||
InterruptsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "interrupts_total"),
|
||||
"Total number of received and serviced hardware interrupts",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
DPCsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "dpcs_total"),
|
||||
"Total number of received and serviced deferred procedure calls (DPCs)",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
return &cpuCollectorFull{
|
||||
CStateSecondsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cstate_seconds_total"),
|
||||
"Time spent in low-power idle state",
|
||||
@@ -33,11 +92,10 @@ func NewCPUCollector() (Collector, error) {
|
||||
),
|
||||
TimeTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "time_total"),
|
||||
"Time that processor spent in different modes (idle, user, system, ...)",
|
||||
"Time that processor spent in different modes (dpc, idle, interrupt, privileged, user)",
|
||||
[]string{"core", "mode"},
|
||||
nil,
|
||||
),
|
||||
|
||||
InterruptsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "interrupts_total"),
|
||||
"Total number of received and serviced hardware interrupts",
|
||||
@@ -50,162 +108,323 @@ func NewCPUCollector() (Collector, error) {
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
ClockInterruptsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "clock_interrupts_total"),
|
||||
"Total number of received and serviced clock tick interrupts",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
IdleBreakEventsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "idle_break_events_total"),
|
||||
"Total number of time processor was woken from idle",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
ParkingStatus: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "parking_status"),
|
||||
"Parking Status represents whether a processor is parked or not",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
ProcessorFrequencyMHz: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "core_frequency_mhz"),
|
||||
"Core frequency in megahertz",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
ProcessorPerformance: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "processor_performance_total"),
|
||||
"Processor Performance is the average performance of the processor while it is executing instructions, as a percentage of the nominal performance of the processor. On some processors, Processor Performance may exceed 100%",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
ProcessorMPerf: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "processor_mperf_total"),
|
||||
"Processor MPerf is the number of TSC ticks incremented while executing instructions",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
ProcessorRTC: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "processor_rtc_total"),
|
||||
"Processor RTC represents the number of RTC ticks made since the system booted. It should consistently be 64e6, and can be used to properly derive Processor Utility Rate",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
ProcessorUtility: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "processor_utility_total"),
|
||||
"Processor Utility represents is the amount of time the core spends executing instructions",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
ProcessorPrivUtility: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "processor_privileged_utility_total"),
|
||||
"Processor Privilieged Utility represents is the amount of time the core has spent executing instructions inside the kernel",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *CPUCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Println("[ERROR] failed collecting os metrics:", desc, err)
|
||||
type perflibProcessor struct {
|
||||
Name string
|
||||
C1Transitions float64 `perflib:"C1 Transitions/sec"`
|
||||
C2Transitions float64 `perflib:"C2 Transitions/sec"`
|
||||
C3Transitions float64 `perflib:"C3 Transitions/sec"`
|
||||
DPCRate float64 `perflib:"DPC Rate"`
|
||||
DPCsQueued float64 `perflib:"DPCs Queued/sec"`
|
||||
Interrupts float64 `perflib:"Interrupts/sec"`
|
||||
PercentC2Time float64 `perflib:"% C1 Time"`
|
||||
PercentC3Time float64 `perflib:"% C2 Time"`
|
||||
PercentC1Time float64 `perflib:"% C3 Time"`
|
||||
PercentDPCTime float64 `perflib:"% DPC Time"`
|
||||
PercentIdleTime float64 `perflib:"% Idle Time"`
|
||||
PercentInterruptTime float64 `perflib:"% Interrupt Time"`
|
||||
PercentPrivilegedTime float64 `perflib:"% Privileged Time"`
|
||||
PercentProcessorTime float64 `perflib:"% Processor Time"`
|
||||
PercentUserTime float64 `perflib:"% User Time"`
|
||||
}
|
||||
|
||||
func (c *cpuCollectorBasic) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
data := make([]perflibProcessor, 0)
|
||||
err := unmarshalObject(ctx.perfObjects["Processor"], &data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_PerfOS_Processor struct {
|
||||
Name string
|
||||
C1TransitionsPersec uint64
|
||||
C2TransitionsPersec uint64
|
||||
C3TransitionsPersec uint64
|
||||
DPCRate uint32
|
||||
DPCsQueuedPersec uint32
|
||||
InterruptsPersec uint32
|
||||
PercentC1Time uint64
|
||||
PercentC2Time uint64
|
||||
PercentC3Time uint64
|
||||
PercentDPCTime uint64
|
||||
PercentIdleTime uint64
|
||||
PercentInterruptTime uint64
|
||||
PercentPrivilegedTime uint64
|
||||
PercentProcessorTime uint64
|
||||
PercentUserTime uint64
|
||||
}
|
||||
|
||||
/* NOTE: This is an alternative class, but it is not as widely available. Decide which to use
|
||||
type Win32_PerfRawData_Counters_ProcessorInformation struct {
|
||||
Name string
|
||||
AverageIdleTime uint64
|
||||
C1TransitionsPersec uint64
|
||||
C2TransitionsPersec uint64
|
||||
C3TransitionsPersec uint64
|
||||
ClockInterruptsPersec uint64
|
||||
DPCRate uint64
|
||||
DPCsQueuedPersec uint64
|
||||
IdleBreakEventsPersec uint64
|
||||
InterruptsPersec uint64
|
||||
ParkingStatus uint64
|
||||
PercentC1Time uint64
|
||||
PercentC2Time uint64
|
||||
PercentC3Time uint64
|
||||
PercentDPCTime uint64
|
||||
PercentIdleTime uint64
|
||||
PercentInterruptTime uint64
|
||||
PercentofMaximumFrequency uint64
|
||||
PercentPerformanceLimit uint64
|
||||
PercentPriorityTime uint64
|
||||
PercentPrivilegedTime uint64
|
||||
PercentPrivilegedUtility uint64
|
||||
PercentProcessorPerformance uint64
|
||||
PercentProcessorTime uint64
|
||||
PercentProcessorUtility uint64
|
||||
PercentUserTime uint64
|
||||
PerformanceLimitFlags uint64
|
||||
ProcessorFrequency uint64
|
||||
ProcessorStateFlags uint64
|
||||
}*/
|
||||
|
||||
func (c *CPUCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_PerfOS_Processor
|
||||
q := wmi.CreateQuery(&dst, "")
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, data := range dst {
|
||||
if strings.Contains(data.Name, "_Total") {
|
||||
for _, cpu := range data {
|
||||
if strings.Contains(strings.ToLower(cpu.Name), "_total") {
|
||||
continue
|
||||
}
|
||||
|
||||
core := data.Name
|
||||
|
||||
// These are only available from Win32_PerfRawData_Counters_ProcessorInformation, which is only available from Win2008R2+
|
||||
/*ch <- prometheus.MustNewConstMetric(
|
||||
c.ProcessorFrequency,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.ProcessorFrequency),
|
||||
socket, core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MaximumFrequency,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentofMaximumFrequency)/100*float64(data.ProcessorFrequency),
|
||||
socket, core,
|
||||
)*/
|
||||
core := cpu.Name
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentC1Time)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentC1Time,
|
||||
core, "c1",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentC2Time)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentC2Time,
|
||||
core, "c2",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentC3Time)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentC3Time,
|
||||
core, "c3",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentIdleTime)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentIdleTime,
|
||||
core, "idle",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentInterruptTime)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentInterruptTime,
|
||||
core, "interrupt",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentDPCTime)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentDPCTime,
|
||||
core, "dpc",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentPrivilegedTime)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentPrivilegedTime,
|
||||
core, "privileged",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentUserTime)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentUserTime,
|
||||
core, "user",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.InterruptsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(data.InterruptsPersec),
|
||||
cpu.Interrupts,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DPCsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(data.DPCsQueuedPersec),
|
||||
cpu.DPCsQueued,
|
||||
core,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
type perflibProcessorInformation struct {
|
||||
Name string
|
||||
C1TimeSeconds float64 `perflib:"% C1 Time"`
|
||||
C2TimeSeconds float64 `perflib:"% C2 Time"`
|
||||
C3TimeSeconds float64 `perflib:"% C3 Time"`
|
||||
C1TransitionsTotal float64 `perflib:"C1 Transitions/sec"`
|
||||
C2TransitionsTotal float64 `perflib:"C2 Transitions/sec"`
|
||||
C3TransitionsTotal float64 `perflib:"C3 Transitions/sec"`
|
||||
ClockInterruptsTotal float64 `perflib:"Clock Interrupts/sec"`
|
||||
DPCsQueuedTotal float64 `perflib:"DPCs Queued/sec"`
|
||||
DPCTimeSeconds float64 `perflib:"% DPC Time"`
|
||||
IdleBreakEventsTotal float64 `perflib:"Idle Break Events/sec"`
|
||||
IdleTimeSeconds float64 `perflib:"% Idle Time"`
|
||||
InterruptsTotal float64 `perflib:"Interrupts/sec"`
|
||||
InterruptTimeSeconds float64 `perflib:"% Interrupt Time"`
|
||||
ParkingStatus float64 `perflib:"Parking Status"`
|
||||
PerformanceLimitPercent float64 `perflib:"% Performance Limit"`
|
||||
PriorityTimeSeconds float64 `perflib:"% Priority Time"`
|
||||
PrivilegedTimeSeconds float64 `perflib:"% Privileged Time"`
|
||||
PrivilegedUtilitySeconds float64 `perflib:"% Privileged Utility"`
|
||||
ProcessorFrequencyMHz float64 `perflib:"Processor Frequency"`
|
||||
ProcessorPerformance float64 `perflib:"% Processor Performance"`
|
||||
ProcessorMPerf float64 `perflib:"% Processor Performance,secondvalue"`
|
||||
ProcessorTimeSeconds float64 `perflib:"% Processor Time"`
|
||||
ProcessorUtilityRate float64 `perflib:"% Processor Utility"`
|
||||
ProcessorRTC float64 `perflib:"% Processor Utility,secondvalue"`
|
||||
UserTimeSeconds float64 `perflib:"% User Time"`
|
||||
}
|
||||
|
||||
func (c *cpuCollectorFull) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
data := make([]perflibProcessorInformation, 0)
|
||||
err := unmarshalObject(ctx.perfObjects["Processor Information"], &data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, cpu := range data {
|
||||
if strings.Contains(strings.ToLower(cpu.Name), "_total") {
|
||||
continue
|
||||
}
|
||||
core := cpu.Name
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.C1TimeSeconds,
|
||||
core, "c1",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.C2TimeSeconds,
|
||||
core, "c2",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.C3TimeSeconds,
|
||||
core, "c3",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.IdleTimeSeconds,
|
||||
core, "idle",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.InterruptTimeSeconds,
|
||||
core, "interrupt",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.DPCTimeSeconds,
|
||||
core, "dpc",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.PrivilegedTimeSeconds,
|
||||
core, "privileged",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.UserTimeSeconds,
|
||||
core, "user",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.InterruptsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.InterruptsTotal,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DPCsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.DPCsQueuedTotal,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ClockInterruptsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.ClockInterruptsTotal,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.IdleBreakEventsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.IdleBreakEventsTotal,
|
||||
core,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ParkingStatus,
|
||||
prometheus.GaugeValue,
|
||||
cpu.ParkingStatus,
|
||||
core,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ProcessorFrequencyMHz,
|
||||
prometheus.GaugeValue,
|
||||
cpu.ProcessorFrequencyMHz,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ProcessorPerformance,
|
||||
prometheus.CounterValue,
|
||||
cpu.ProcessorPerformance,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ProcessorMPerf,
|
||||
prometheus.CounterValue,
|
||||
cpu.ProcessorMPerf,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ProcessorRTC,
|
||||
prometheus.CounterValue,
|
||||
cpu.ProcessorRTC,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ProcessorUtility,
|
||||
prometheus.CounterValue,
|
||||
cpu.ProcessorUtilityRate,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ProcessorPrivUtility,
|
||||
prometheus.CounterValue,
|
||||
cpu.PrivilegedUtilitySeconds,
|
||||
core,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
97
collector/cpu_info.go
Normal file
97
collector/cpu_info.go
Normal file
@@ -0,0 +1,97 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus-community/windows_exporter/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerCollector("cpu_info", newCpuInfoCollector)
|
||||
}
|
||||
|
||||
// If you are adding additional labels to the metric, make sure that they get added in here as well. See below for explanation.
|
||||
const (
|
||||
win32ProcessorQuery = "SELECT Architecture, DeviceId, Description, Family, L2CacheSize, L3CacheSize, Name FROM Win32_Processor"
|
||||
)
|
||||
|
||||
// A CpuInfoCollector is a Prometheus collector for a few WMI metrics in Win32_Processor
|
||||
type CpuInfoCollector struct {
|
||||
CpuInfo *prometheus.Desc
|
||||
}
|
||||
|
||||
func newCpuInfoCollector() (Collector, error) {
|
||||
return &CpuInfoCollector{
|
||||
CpuInfo: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, "", "cpu_info"),
|
||||
"Labeled CPU information as provided provided by Win32_Processor",
|
||||
[]string{
|
||||
"architecture",
|
||||
"device_id",
|
||||
"description",
|
||||
"family",
|
||||
"l2_cache_size",
|
||||
"l3_cache_size",
|
||||
"name"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
type win32_Processor struct {
|
||||
Architecture uint32
|
||||
DeviceID string
|
||||
Description string
|
||||
Family uint16
|
||||
L2CacheSize uint32
|
||||
L3CacheSize uint32
|
||||
Name string
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *CpuInfoCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting cpu_info metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *CpuInfoCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []win32_Processor
|
||||
// We use a static query here because the provided methods in wmi.go all issue a SELECT *;
|
||||
// This results in the time consuming LoadPercentage field being read which seems to measure each CPU
|
||||
// serially over a 1 second interval, so the scrape time is at least 1s * num_sockets
|
||||
if err := wmi.Query(win32ProcessorQuery, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(dst) == 0 {
|
||||
return nil, errors.New("WMI query returned empty result set")
|
||||
}
|
||||
|
||||
// Some CPUs end up exposing trailing spaces for certain strings, so clean them up
|
||||
for _, processor := range dst {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CpuInfo,
|
||||
prometheus.GaugeValue,
|
||||
1.0,
|
||||
strconv.Itoa(int(processor.Architecture)),
|
||||
strings.TrimRight(processor.DeviceID, " "),
|
||||
strings.TrimRight(processor.Description, " "),
|
||||
strconv.Itoa(int(processor.Family)),
|
||||
strconv.Itoa(int(processor.L2CacheSize)),
|
||||
strconv.Itoa(int(processor.L3CacheSize)),
|
||||
strings.TrimRight(processor.Name, " "),
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
9
collector/cpu_test.go
Normal file
9
collector/cpu_test.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkCPUCollector(b *testing.B) {
|
||||
benchmarkCollector(b, "cpu", newCPUCollector)
|
||||
}
|
||||
@@ -1,23 +1,24 @@
|
||||
// returns data points from Win32_ComputerSystem
|
||||
// https://msdn.microsoft.com/en-us/library/aa394102 - Win32_ComputerSystem class
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"log"
|
||||
"github.com/prometheus-community/windows_exporter/headers/sysinfoapi"
|
||||
"github.com/prometheus-community/windows_exporter/log"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["cs"] = NewCSCollector
|
||||
registerCollector("cs", NewCSCollector)
|
||||
}
|
||||
|
||||
// A CSCollector is a Prometheus collector for WMI metrics
|
||||
type CSCollector struct {
|
||||
PhysicalMemoryBytes *prometheus.Desc
|
||||
LogicalProcessors *prometheus.Desc
|
||||
Hostname *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewCSCollector ...
|
||||
@@ -37,40 +38,70 @@ func NewCSCollector() (Collector, error) {
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
Hostname: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "hostname"),
|
||||
"Labeled system hostname information as provided by ComputerSystem.DNSHostName and ComputerSystem.Domain",
|
||||
[]string{
|
||||
"hostname",
|
||||
"domain",
|
||||
"fqdn"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *CSCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *CSCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Println("[ERROR] failed collecting cs metrics:", desc, err)
|
||||
log.Error("failed collecting cs metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Win32_ComputerSystem struct {
|
||||
NumberOfLogicalProcessors uint32
|
||||
TotalPhysicalMemory uint64
|
||||
}
|
||||
|
||||
func (c *CSCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_ComputerSystem
|
||||
if err := wmi.Query(wmi.CreateQuery(&dst, ""), &dst); err != nil {
|
||||
// Get systeminfo for number of processors
|
||||
systemInfo := sysinfoapi.GetSystemInfo()
|
||||
|
||||
// Get memory status for physical memory
|
||||
mem, err := sysinfoapi.GlobalMemoryStatusEx()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogicalProcessors,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].NumberOfLogicalProcessors),
|
||||
float64(systemInfo.NumberOfProcessors),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PhysicalMemoryBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].TotalPhysicalMemory),
|
||||
float64(mem.TotalPhys),
|
||||
)
|
||||
|
||||
hostname, err := sysinfoapi.GetComputerName(sysinfoapi.ComputerNameDNSHostname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
domain, err := sysinfoapi.GetComputerName(sysinfoapi.ComputerNameDNSDomain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fqdn, err := sysinfoapi.GetComputerName(sysinfoapi.ComputerNameDNSFullyQualified)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Hostname,
|
||||
prometheus.GaugeValue,
|
||||
1.0,
|
||||
hostname,
|
||||
domain,
|
||||
fqdn,
|
||||
)
|
||||
|
||||
return nil, nil
|
||||
|
||||
9
collector/cs_test.go
Normal file
9
collector/cs_test.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkCsCollector(b *testing.B) {
|
||||
benchmarkCollector(b, "cs", NewCSCollector)
|
||||
}
|
||||
810
collector/dfsr.go
Normal file
810
collector/dfsr.go
Normal file
@@ -0,0 +1,810 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/prometheus-community/windows_exporter/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"gopkg.in/alecthomas/kingpin.v2"
|
||||
)
|
||||
|
||||
var dfsrEnabledCollectors = kingpin.Flag("collectors.dfsr.sources-enabled", "Comma-seperated list of DFSR Perflib sources to use.").Default("connection,folder,volume").String()
|
||||
|
||||
func init() {
|
||||
// Perflib sources are dynamic, depending on the enabled child collectors
|
||||
var perflibDependencies []string
|
||||
for _, source := range expandEnabledChildCollectors(*dfsrEnabledCollectors) {
|
||||
perflibDependencies = append(perflibDependencies, dfsrGetPerfObjectName(source))
|
||||
}
|
||||
|
||||
registerCollector("dfsr", NewDFSRCollector, perflibDependencies...)
|
||||
}
|
||||
|
||||
// DFSRCollector contains the metric and state data of the DFSR collectors.
|
||||
type DFSRCollector struct {
|
||||
// Connection source
|
||||
ConnectionBandwidthSavingsUsingDFSReplicationTotal *prometheus.Desc
|
||||
ConnectionBytesReceivedTotal *prometheus.Desc
|
||||
ConnectionCompressedSizeOfFilesReceivedTotal *prometheus.Desc
|
||||
ConnectionFilesReceivedTotal *prometheus.Desc
|
||||
ConnectionRDCBytesReceivedTotal *prometheus.Desc
|
||||
ConnectionRDCCompressedSizeOfFilesReceivedTotal *prometheus.Desc
|
||||
ConnectionRDCSizeOfFilesReceivedTotal *prometheus.Desc
|
||||
ConnectionRDCNumberofFilesReceivedTotal *prometheus.Desc
|
||||
ConnectionSizeOfFilesReceivedTotal *prometheus.Desc
|
||||
|
||||
// Folder source
|
||||
FolderBandwidthSavingsUsingDFSReplicationTotal *prometheus.Desc
|
||||
FolderCompressedSizeOfFilesReceivedTotal *prometheus.Desc
|
||||
FolderConflictBytesCleanedupTotal *prometheus.Desc
|
||||
FolderConflictBytesGeneratedTotal *prometheus.Desc
|
||||
FolderConflictFilesCleanedUpTotal *prometheus.Desc
|
||||
FolderConflictFilesGeneratedTotal *prometheus.Desc
|
||||
FolderConflictFolderCleanupsCompletedTotal *prometheus.Desc
|
||||
FolderConflictSpaceInUse *prometheus.Desc
|
||||
FolderDeletedSpaceInUse *prometheus.Desc
|
||||
FolderDeletedBytesCleanedUpTotal *prometheus.Desc
|
||||
FolderDeletedBytesGeneratedTotal *prometheus.Desc
|
||||
FolderDeletedFilesCleanedUpTotal *prometheus.Desc
|
||||
FolderDeletedFilesGeneratedTotal *prometheus.Desc
|
||||
FolderFileInstallsRetriedTotal *prometheus.Desc
|
||||
FolderFileInstallsSucceededTotal *prometheus.Desc
|
||||
FolderFilesReceivedTotal *prometheus.Desc
|
||||
FolderRDCBytesReceivedTotal *prometheus.Desc
|
||||
FolderRDCCompressedSizeOfFilesReceivedTotal *prometheus.Desc
|
||||
FolderRDCNumberofFilesReceivedTotal *prometheus.Desc
|
||||
FolderRDCSizeOfFilesReceivedTotal *prometheus.Desc
|
||||
FolderSizeOfFilesReceivedTotal *prometheus.Desc
|
||||
FolderStagingSpaceInUse *prometheus.Desc
|
||||
FolderStagingBytesCleanedUpTotal *prometheus.Desc
|
||||
FolderStagingBytesGeneratedTotal *prometheus.Desc
|
||||
FolderStagingFilesCleanedUpTotal *prometheus.Desc
|
||||
FolderStagingFilesGeneratedTotal *prometheus.Desc
|
||||
FolderUpdatesDroppedTotal *prometheus.Desc
|
||||
|
||||
// Volume source
|
||||
VolumeDatabaseLookupsTotal *prometheus.Desc
|
||||
VolumeDatabaseCommitsTotal *prometheus.Desc
|
||||
VolumeUSNJournalUnreadPercentage *prometheus.Desc
|
||||
VolumeUSNJournalRecordsAcceptedTotal *prometheus.Desc
|
||||
VolumeUSNJournalRecordsReadTotal *prometheus.Desc
|
||||
|
||||
// Map of child collector functions used during collection
|
||||
dfsrChildCollectors []dfsrCollectorFunc
|
||||
}
|
||||
|
||||
type dfsrCollectorFunc func(ctx *ScrapeContext, ch chan<- prometheus.Metric) error
|
||||
|
||||
// Map Perflib sources to DFSR collector names
|
||||
// E.G. volume -> DFS Replication Service Volumes
|
||||
func dfsrGetPerfObjectName(collector string) string {
|
||||
prefix := "DFS "
|
||||
suffix := ""
|
||||
switch collector {
|
||||
case "connection":
|
||||
suffix = "Replication Connections"
|
||||
case "folder":
|
||||
suffix = "Replicated Folders"
|
||||
case "volume":
|
||||
suffix = "Replication Service Volumes"
|
||||
}
|
||||
return (prefix + suffix)
|
||||
}
|
||||
|
||||
// NewDFSRCollector is registered
|
||||
func NewDFSRCollector() (Collector, error) {
|
||||
log.Info("dfsr collector is in an experimental state! Metrics for this collector have not been tested.")
|
||||
const subsystem = "dfsr"
|
||||
|
||||
enabled := expandEnabledChildCollectors(*dfsrEnabledCollectors)
|
||||
perfCounters := make([]string, 0, len(enabled))
|
||||
for _, c := range enabled {
|
||||
perfCounters = append(perfCounters, dfsrGetPerfObjectName(c))
|
||||
}
|
||||
addPerfCounterDependencies(subsystem, perfCounters)
|
||||
|
||||
dfsrCollector := DFSRCollector{
|
||||
// Connection
|
||||
ConnectionBandwidthSavingsUsingDFSReplicationTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "connection_bandwidth_savings_using_dfs_replication_bytes_total"),
|
||||
"Total bytes of bandwidth saved using DFS Replication for this connection",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
ConnectionBytesReceivedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "connection_bytes_received_total"),
|
||||
"Total bytes received for connection",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
ConnectionCompressedSizeOfFilesReceivedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "connection_compressed_size_of_files_received_bytes_total"),
|
||||
"Total compressed size of files received on the connection, in bytes",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
ConnectionFilesReceivedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "connection_received_files_total"),
|
||||
"Total number of files received for connection",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
ConnectionRDCBytesReceivedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "connection_rdc_received_bytes_total"),
|
||||
"Total bytes received on the connection while replicating files using Remote Differential Compression",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
ConnectionRDCCompressedSizeOfFilesReceivedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "connection_rdc_compressed_size_of_received_files_bytes_total"),
|
||||
"Total uncompressed size of files received with Remote Differential Compression for connection",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
ConnectionRDCNumberofFilesReceivedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "connection_rdc_received_files_total"),
|
||||
"Total number of files received using remote differential compression",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
ConnectionRDCSizeOfFilesReceivedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "connection_rdc_size_of_received_files_bytes_total"),
|
||||
"Total size of received Remote Differential Compression files, in bytes.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
ConnectionSizeOfFilesReceivedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "connection_files_received_bytes_total"),
|
||||
"Total size of files received, in bytes",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
// Folder
|
||||
FolderBandwidthSavingsUsingDFSReplicationTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "folder_bandwidth_savings_using_dfs_replication_bytes_total"),
|
||||
"Total bytes of bandwidth saved using DFS Replication for this folder",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
FolderCompressedSizeOfFilesReceivedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "folder_compressed_size_of_received_files_bytes_total"),
|
||||
"Total compressed size of files received on the folder, in bytes",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
FolderConflictBytesCleanedupTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "folder_conflict_cleaned_up_bytes_total"),
|
||||
"Total size of conflict loser files and folders deleted from the Conflict and Deleted folder, in bytes",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
FolderConflictBytesGeneratedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "folder_conflict_generated_bytes_total"),
|
||||
"Total size of conflict loser files and folders moved to the Conflict and Deleted folder, in bytes",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
FolderConflictFilesCleanedUpTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "folder_conflict_cleaned_up_files_total"),
|
||||
"Number of conflict loser files deleted from the Conflict and Deleted folder",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
FolderConflictFilesGeneratedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "folder_conflict_generated_files_total"),
|
||||
"Number of files and folders moved to the Conflict and Deleted folder",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
FolderConflictFolderCleanupsCompletedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "folder_conflict_folder_cleanups_total"),
|
||||
"Number of deletions of conflict loser files and folders in the Conflict and Deleted",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
FolderConflictSpaceInUse: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "folder_conflict_space_in_use_bytes"),
|
||||
"Total size of the conflict loser files and folders currently in the Conflict and Deleted folder",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
FolderDeletedSpaceInUse: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "folder_deleted_space_in_use_bytes"),
|
||||
"Total size (in bytes) of the deleted files and folders currently in the Conflict and Deleted folder",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
FolderDeletedBytesCleanedUpTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "folder_deleted_cleaned_up_bytes_total"),
|
||||
"Total size (in bytes) of replicating deleted files and folders that were cleaned up from the Conflict and Deleted folder",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
FolderDeletedBytesGeneratedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "folder_deleted_generated_bytes_total"),
|
||||
"Total size (in bytes) of replicated deleted files and folders that were moved to the Conflict and Deleted folder after they were deleted from a replicated folder on a sending member",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
FolderDeletedFilesCleanedUpTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "folder_deleted_cleaned_up_files_total"),
|
||||
"Number of files and folders that were cleaned up from the Conflict and Deleted folder",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
FolderDeletedFilesGeneratedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "folder_deleted_generated_files_total"),
|
||||
"Number of deleted files and folders that were moved to the Conflict and Deleted folder",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
FolderFileInstallsRetriedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "folder_file_installs_retried_total"),
|
||||
"Total number of file installs that are being retried due to sharing violations or other errors encountered when installing the files",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
FolderFileInstallsSucceededTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "folder_file_installs_succeeded_total"),
|
||||
"Total number of files that were successfully received from sending members and installed locally on this server",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
FolderFilesReceivedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "folder_received_files_total"),
|
||||
"Total number of files received",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
FolderRDCBytesReceivedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "folder_rdc_received_bytes_total"),
|
||||
"Total number of bytes received in replicating files using Remote Differential Compression",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
FolderRDCCompressedSizeOfFilesReceivedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "folder_rdc_compressed_size_of_received_files_bytes_total"),
|
||||
"Total compressed size (in bytes) of the files received with Remote Differential Compression",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
FolderRDCNumberofFilesReceivedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "folder_rdc_received_files_total"),
|
||||
"Total number of files received with Remote Differential Compression",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
FolderRDCSizeOfFilesReceivedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "folder_rdc_files_received_bytes_total"),
|
||||
"Total uncompressed size (in bytes) of the files received with Remote Differential Compression",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
FolderSizeOfFilesReceivedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "folder_files_received_bytes_total"),
|
||||
"Total uncompressed size (in bytes) of the files received",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
FolderStagingSpaceInUse: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "folder_staging_space_in_use_bytes"),
|
||||
"Total size of files and folders currently in the staging folder.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
FolderStagingBytesCleanedUpTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "folder_staging_cleaned_up_bytes_total"),
|
||||
"Total size (in bytes) of the files and folders that have been cleaned up from the staging folder",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
FolderStagingBytesGeneratedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "folder_staging_generated_bytes_total"),
|
||||
"Total size (in bytes) of replicated files and folders in the staging folder created by the DFS Replication service since last restart",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
FolderStagingFilesCleanedUpTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "folder_staging_cleaned_up_files_total"),
|
||||
"Total number of files and folders that have been cleaned up from the staging folder",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
FolderStagingFilesGeneratedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "folder_staging_generated_files_total"),
|
||||
"Total number of times replicated files and folders have been staged by the DFS Replication service",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
FolderUpdatesDroppedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "folder_dropped_updates_total"),
|
||||
"Total number of redundant file replication update records that have been ignored by the DFS Replication service because they did not change the replicated file or folder",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
// Volume
|
||||
VolumeDatabaseCommitsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "volume_database_commits_total"),
|
||||
"Total number of DFSR Volume database commits",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
VolumeDatabaseLookupsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "volume_database_lookups_total"),
|
||||
"Total number of DFSR Volume database lookups",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
VolumeUSNJournalUnreadPercentage: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "volume_usn_journal_unread_percentage"),
|
||||
"Percentage of DFSR Volume USN journal records that are unread",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
VolumeUSNJournalRecordsAcceptedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "volume_usn_journal_accepted_records_total"),
|
||||
"Total number of USN journal records accepted",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
VolumeUSNJournalRecordsReadTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "volume_usn_journal_read_records_total"),
|
||||
"Total number of DFSR Volume USN journal records read",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
}
|
||||
|
||||
dfsrCollector.dfsrChildCollectors = dfsrCollector.getDFSRChildCollectors(enabled)
|
||||
|
||||
return &dfsrCollector, nil
|
||||
}
|
||||
|
||||
// Maps enabled child collectors names to their relevant collection function,
|
||||
// for use in DFSRCollector.Collect()
|
||||
func (c *DFSRCollector) getDFSRChildCollectors(enabledCollectors []string) []dfsrCollectorFunc {
|
||||
var dfsrCollectors []dfsrCollectorFunc
|
||||
for _, collector := range enabledCollectors {
|
||||
switch collector {
|
||||
case "connection":
|
||||
dfsrCollectors = append(dfsrCollectors, c.collectConnection)
|
||||
case "folder":
|
||||
dfsrCollectors = append(dfsrCollectors, c.collectFolder)
|
||||
case "volume":
|
||||
dfsrCollectors = append(dfsrCollectors, c.collectVolume)
|
||||
}
|
||||
}
|
||||
|
||||
return dfsrCollectors
|
||||
}
|
||||
|
||||
// Collect implements the Collector interface.
|
||||
// Sends metric values for each metric to the provided prometheus Metric channel.
|
||||
func (c *DFSRCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
for _, fn := range c.dfsrChildCollectors {
|
||||
err := fn(ctx, ch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Perflib: "DFS Replication Service Connections"
|
||||
type PerflibDFSRConnection struct {
|
||||
Name string
|
||||
|
||||
BandwidthSavingsUsingDFSReplicationTotal float64 `perflib:"Bandwidth Savings Using DFS Replication"`
|
||||
BytesReceivedTotal float64 `perflib:"Total Bytes Received"`
|
||||
CompressedSizeOfFilesReceivedTotal float64 `perflib:"Compressed Size of Files Received"`
|
||||
FilesReceivedTotal float64 `perflib:"Total Files Received"`
|
||||
RDCBytesReceivedTotal float64 `perflib:"RDC Bytes Received"`
|
||||
RDCCompressedSizeOfFilesReceivedTotal float64 `perflib:"RDC Compressed Size of Files Received"`
|
||||
RDCNumberofFilesReceivedTotal float64 `perflib:"RDC Number of Files Received"`
|
||||
RDCSizeOfFilesReceivedTotal float64 `perflib:"RDC Size of Files Received"`
|
||||
SizeOfFilesReceivedTotal float64 `perflib:"Size of Files Received"`
|
||||
}
|
||||
|
||||
func (c *DFSRCollector) collectConnection(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
var dst []PerflibDFSRConnection
|
||||
if err := unmarshalObject(ctx.perfObjects["DFS Replication Connections"], &dst); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, connection := range dst {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ConnectionBandwidthSavingsUsingDFSReplicationTotal,
|
||||
prometheus.CounterValue,
|
||||
connection.BandwidthSavingsUsingDFSReplicationTotal,
|
||||
connection.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ConnectionBytesReceivedTotal,
|
||||
prometheus.CounterValue,
|
||||
connection.BytesReceivedTotal,
|
||||
connection.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ConnectionCompressedSizeOfFilesReceivedTotal,
|
||||
prometheus.CounterValue,
|
||||
connection.CompressedSizeOfFilesReceivedTotal,
|
||||
connection.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ConnectionFilesReceivedTotal,
|
||||
prometheus.CounterValue,
|
||||
connection.FilesReceivedTotal,
|
||||
connection.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ConnectionRDCBytesReceivedTotal,
|
||||
prometheus.CounterValue,
|
||||
connection.RDCBytesReceivedTotal,
|
||||
connection.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ConnectionRDCCompressedSizeOfFilesReceivedTotal,
|
||||
prometheus.CounterValue,
|
||||
connection.RDCCompressedSizeOfFilesReceivedTotal,
|
||||
connection.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ConnectionRDCSizeOfFilesReceivedTotal,
|
||||
prometheus.CounterValue,
|
||||
connection.RDCSizeOfFilesReceivedTotal,
|
||||
connection.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ConnectionRDCNumberofFilesReceivedTotal,
|
||||
prometheus.CounterValue,
|
||||
connection.RDCNumberofFilesReceivedTotal,
|
||||
connection.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ConnectionSizeOfFilesReceivedTotal,
|
||||
prometheus.CounterValue,
|
||||
connection.SizeOfFilesReceivedTotal,
|
||||
connection.Name,
|
||||
)
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Perflib: "DFS Replicated Folder"
|
||||
type PerflibDFSRFolder struct {
|
||||
Name string
|
||||
|
||||
BandwidthSavingsUsingDFSReplicationTotal float64 `perflib:"Bandwidth Savings Using DFS Replication"`
|
||||
CompressedSizeOfFilesReceivedTotal float64 `perflib:"Compressed Size of Files Received"`
|
||||
ConflictBytesCleanedupTotal float64 `perflib:"Conflict Bytes Cleaned Up"`
|
||||
ConflictBytesGeneratedTotal float64 `perflib:"Conflict Bytes Generated"`
|
||||
ConflictFilesCleanedUpTotal float64 `perflib:"Conflict Files Cleaned Up"`
|
||||
ConflictFilesGeneratedTotal float64 `perflib:"Conflict Files Generated"`
|
||||
ConflictFolderCleanupsCompletedTotal float64 `perflib:"Conflict Folder Cleanups Completed"`
|
||||
ConflictSpaceInUse float64 `perflib:"Conflict Space In Use"`
|
||||
DeletedSpaceInUse float64 `perflib:"Deleted Space In Use"`
|
||||
DeletedBytesCleanedUpTotal float64 `perflib:"Deleted Bytes Cleaned Up"`
|
||||
DeletedBytesGeneratedTotal float64 `perflib:"Deleted Bytes Generated"`
|
||||
DeletedFilesCleanedUpTotal float64 `perflib:"Deleted Files Cleaned Up"`
|
||||
DeletedFilesGeneratedTotal float64 `perflib:"Deleted Files Generated"`
|
||||
FileInstallsRetriedTotal float64 `perflib:"File Installs Retried"`
|
||||
FileInstallsSucceededTotal float64 `perflib:"File Installs Succeeded"`
|
||||
FilesReceivedTotal float64 `perflib:"Total Files Received"`
|
||||
RDCBytesReceivedTotal float64 `perflib:"RDC Bytes Received"`
|
||||
RDCCompressedSizeOfFilesReceivedTotal float64 `perflib:"RDC Compressed Size of Files Received"`
|
||||
RDCNumberofFilesReceivedTotal float64 `perflib:"RDC Number of Files Received"`
|
||||
RDCSizeOfFilesReceivedTotal float64 `perflib:"RDC Size of Files Received"`
|
||||
SizeOfFilesReceivedTotal float64 `perflib:"Size of Files Received"`
|
||||
StagingSpaceInUse float64 `perflib:"Staging Space In Use"`
|
||||
StagingBytesCleanedUpTotal float64 `perflib:"Staging Bytes Cleaned Up"`
|
||||
StagingBytesGeneratedTotal float64 `perflib:"Staging Bytes Generated"`
|
||||
StagingFilesCleanedUpTotal float64 `perflib:"Staging Files Cleaned Up"`
|
||||
StagingFilesGeneratedTotal float64 `perflib:"Staging Files Generated"`
|
||||
UpdatesDroppedTotal float64 `perflib:"Updates Dropped"`
|
||||
}
|
||||
|
||||
func (c *DFSRCollector) collectFolder(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
var dst []PerflibDFSRFolder
|
||||
if err := unmarshalObject(ctx.perfObjects["DFS Replicated Folders"], &dst); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, folder := range dst {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FolderBandwidthSavingsUsingDFSReplicationTotal,
|
||||
prometheus.CounterValue,
|
||||
folder.BandwidthSavingsUsingDFSReplicationTotal,
|
||||
folder.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FolderCompressedSizeOfFilesReceivedTotal,
|
||||
prometheus.CounterValue,
|
||||
folder.CompressedSizeOfFilesReceivedTotal,
|
||||
folder.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FolderConflictBytesCleanedupTotal,
|
||||
prometheus.CounterValue,
|
||||
folder.ConflictBytesCleanedupTotal,
|
||||
folder.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FolderConflictBytesGeneratedTotal,
|
||||
prometheus.CounterValue,
|
||||
folder.ConflictBytesGeneratedTotal,
|
||||
folder.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FolderConflictFilesCleanedUpTotal,
|
||||
prometheus.CounterValue,
|
||||
folder.ConflictFilesCleanedUpTotal,
|
||||
folder.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FolderConflictFilesGeneratedTotal,
|
||||
prometheus.CounterValue,
|
||||
folder.ConflictFilesGeneratedTotal,
|
||||
folder.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FolderConflictFolderCleanupsCompletedTotal,
|
||||
prometheus.CounterValue,
|
||||
folder.ConflictFolderCleanupsCompletedTotal,
|
||||
folder.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FolderConflictSpaceInUse,
|
||||
prometheus.GaugeValue,
|
||||
folder.ConflictSpaceInUse,
|
||||
folder.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FolderDeletedSpaceInUse,
|
||||
prometheus.GaugeValue,
|
||||
folder.DeletedSpaceInUse,
|
||||
folder.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FolderDeletedBytesCleanedUpTotal,
|
||||
prometheus.CounterValue,
|
||||
folder.DeletedBytesCleanedUpTotal,
|
||||
folder.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FolderDeletedBytesGeneratedTotal,
|
||||
prometheus.CounterValue,
|
||||
folder.DeletedBytesGeneratedTotal,
|
||||
folder.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FolderDeletedFilesCleanedUpTotal,
|
||||
prometheus.CounterValue,
|
||||
folder.DeletedFilesCleanedUpTotal,
|
||||
folder.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FolderDeletedFilesGeneratedTotal,
|
||||
prometheus.CounterValue,
|
||||
folder.DeletedFilesGeneratedTotal,
|
||||
folder.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FolderFileInstallsRetriedTotal,
|
||||
prometheus.CounterValue,
|
||||
folder.FileInstallsRetriedTotal,
|
||||
folder.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FolderFileInstallsSucceededTotal,
|
||||
prometheus.CounterValue,
|
||||
folder.FileInstallsSucceededTotal,
|
||||
folder.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FolderFilesReceivedTotal,
|
||||
prometheus.CounterValue,
|
||||
folder.FilesReceivedTotal,
|
||||
folder.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FolderRDCBytesReceivedTotal,
|
||||
prometheus.CounterValue,
|
||||
folder.RDCBytesReceivedTotal,
|
||||
folder.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FolderRDCCompressedSizeOfFilesReceivedTotal,
|
||||
prometheus.CounterValue,
|
||||
folder.RDCCompressedSizeOfFilesReceivedTotal,
|
||||
folder.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FolderRDCNumberofFilesReceivedTotal,
|
||||
prometheus.CounterValue,
|
||||
folder.RDCNumberofFilesReceivedTotal,
|
||||
folder.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FolderRDCSizeOfFilesReceivedTotal,
|
||||
prometheus.CounterValue,
|
||||
folder.RDCSizeOfFilesReceivedTotal,
|
||||
folder.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FolderSizeOfFilesReceivedTotal,
|
||||
prometheus.CounterValue,
|
||||
folder.SizeOfFilesReceivedTotal,
|
||||
folder.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FolderStagingSpaceInUse,
|
||||
prometheus.GaugeValue,
|
||||
folder.StagingSpaceInUse,
|
||||
folder.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FolderStagingBytesCleanedUpTotal,
|
||||
prometheus.CounterValue,
|
||||
folder.StagingBytesCleanedUpTotal,
|
||||
folder.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FolderStagingBytesGeneratedTotal,
|
||||
prometheus.CounterValue,
|
||||
folder.StagingBytesGeneratedTotal,
|
||||
folder.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FolderStagingFilesCleanedUpTotal,
|
||||
prometheus.CounterValue,
|
||||
folder.StagingFilesCleanedUpTotal,
|
||||
folder.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FolderStagingFilesGeneratedTotal,
|
||||
prometheus.CounterValue,
|
||||
folder.StagingFilesGeneratedTotal,
|
||||
folder.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FolderUpdatesDroppedTotal,
|
||||
prometheus.CounterValue,
|
||||
folder.UpdatesDroppedTotal,
|
||||
folder.Name,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Perflib: "DFS Replication Service Volumes"
|
||||
type PerflibDFSRVolume struct {
|
||||
Name string
|
||||
|
||||
DatabaseCommitsTotal float64 `perflib:"Database Commits"`
|
||||
DatabaseLookupsTotal float64 `perflib:"Database Lookups"`
|
||||
USNJournalRecordsReadTotal float64 `perflib:"USN Journal Records Read"`
|
||||
USNJournalRecordsAcceptedTotal float64 `perflib:"USN Journal Records Accepted"`
|
||||
USNJournalUnreadPercentage float64 `perflib:"USN Journal Records Unread Percentage"`
|
||||
}
|
||||
|
||||
func (c *DFSRCollector) collectVolume(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
var dst []PerflibDFSRVolume
|
||||
if err := unmarshalObject(ctx.perfObjects["DFS Replication Service Volumes"], &dst); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, volume := range dst {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.VolumeDatabaseLookupsTotal,
|
||||
prometheus.CounterValue,
|
||||
volume.DatabaseLookupsTotal,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.VolumeDatabaseCommitsTotal,
|
||||
prometheus.CounterValue,
|
||||
volume.DatabaseCommitsTotal,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.VolumeUSNJournalRecordsAcceptedTotal,
|
||||
prometheus.CounterValue,
|
||||
volume.USNJournalRecordsAcceptedTotal,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.VolumeUSNJournalRecordsReadTotal,
|
||||
prometheus.CounterValue,
|
||||
volume.USNJournalRecordsReadTotal,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.VolumeUSNJournalUnreadPercentage,
|
||||
prometheus.GaugeValue,
|
||||
volume.USNJournalUnreadPercentage,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
9
collector/dfsr_test.go
Normal file
9
collector/dfsr_test.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkDFSRCollector(b *testing.B) {
|
||||
benchmarkCollector(b, "dfsr", NewDFSRCollector)
|
||||
}
|
||||
388
collector/dhcp.go
Normal file
388
collector/dhcp.go
Normal file
@@ -0,0 +1,388 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerCollector("dhcp", NewDhcpCollector, "DHCP Server")
|
||||
}
|
||||
|
||||
// A DhcpCollector is a Prometheus collector perflib DHCP metrics
|
||||
type DhcpCollector struct {
|
||||
PacketsReceivedTotal *prometheus.Desc
|
||||
DuplicatesDroppedTotal *prometheus.Desc
|
||||
PacketsExpiredTotal *prometheus.Desc
|
||||
ActiveQueueLength *prometheus.Desc
|
||||
ConflictCheckQueueLength *prometheus.Desc
|
||||
DiscoversTotal *prometheus.Desc
|
||||
OffersTotal *prometheus.Desc
|
||||
RequestsTotal *prometheus.Desc
|
||||
InformsTotal *prometheus.Desc
|
||||
AcksTotal *prometheus.Desc
|
||||
NacksTotal *prometheus.Desc
|
||||
DeclinesTotal *prometheus.Desc
|
||||
ReleasesTotal *prometheus.Desc
|
||||
OfferQueueLength *prometheus.Desc
|
||||
DeniedDueToMatch *prometheus.Desc
|
||||
DeniedDueToNonMatch *prometheus.Desc
|
||||
FailoverBndupdSentTotal *prometheus.Desc
|
||||
FailoverBndupdReceivedTotal *prometheus.Desc
|
||||
FailoverBndackSentTotal *prometheus.Desc
|
||||
FailoverBndackReceivedTotal *prometheus.Desc
|
||||
FailoverBndupdPendingOutboundQueue *prometheus.Desc
|
||||
FailoverTransitionsCommunicationinterruptedState *prometheus.Desc
|
||||
FailoverTransitionsPartnerdownState *prometheus.Desc
|
||||
FailoverTransitionsRecoverState *prometheus.Desc
|
||||
FailoverBndupdDropped *prometheus.Desc
|
||||
}
|
||||
|
||||
func NewDhcpCollector() (Collector, error) {
|
||||
const subsystem = "dhcp"
|
||||
|
||||
return &DhcpCollector{
|
||||
PacketsReceivedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "packets_received_total"),
|
||||
"Total number of packets received by the DHCP server (PacketsReceivedTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
DuplicatesDroppedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "duplicates_dropped_total"),
|
||||
"Total number of duplicate packets received by the DHCP server (DuplicatesDroppedTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
PacketsExpiredTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "packets_expired_total"),
|
||||
"Total number of packets expired in the DHCP server message queue (PacketsExpiredTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
ActiveQueueLength: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "active_queue_length"),
|
||||
"Number of packets in the processing queue of the DHCP server (ActiveQueueLength)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
ConflictCheckQueueLength: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "conflict_check_queue_length"),
|
||||
"Number of packets in the DHCP server queue waiting on conflict detection (ping). (ConflictCheckQueueLength)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
DiscoversTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "discovers_total"),
|
||||
"Total DHCP Discovers received by the DHCP server (DiscoversTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
OffersTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "offers_total"),
|
||||
"Total DHCP Offers sent by the DHCP server (OffersTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
RequestsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "requests_total"),
|
||||
"Total DHCP Requests received by the DHCP server (RequestsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
InformsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "informs_total"),
|
||||
"Total DHCP Informs received by the DHCP server (InformsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
AcksTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "acks_total"),
|
||||
"Total DHCP Acks sent by the DHCP server (AcksTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
NacksTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "nacks_total"),
|
||||
"Total DHCP Nacks sent by the DHCP server (NacksTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
DeclinesTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "declines_total"),
|
||||
"Total DHCP Declines received by the DHCP server (DeclinesTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
ReleasesTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "releases_total"),
|
||||
"Total DHCP Releases received by the DHCP server (ReleasesTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
OfferQueueLength: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "offer_queue_length"),
|
||||
"Number of packets in the offer queue of the DHCP server (OfferQueueLength)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
DeniedDueToMatch: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "denied_due_to_match_total"),
|
||||
"Total number of DHCP requests denied, based on matches from the Deny list (DeniedDueToMatch)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
DeniedDueToNonMatch: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "denied_due_to_nonmatch_total"),
|
||||
"Total number of DHCP requests denied, based on non-matches from the Allow list (DeniedDueToNonMatch)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
FailoverBndupdSentTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "failover_bndupd_sent_total"),
|
||||
"Number of DHCP failover Binding Update messages sent (FailoverBndupdSentTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
FailoverBndupdReceivedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "failover_bndupd_received_total"),
|
||||
"Number of DHCP failover Binding Update messages received (FailoverBndupdReceivedTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
FailoverBndackSentTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "failover_bndack_sent_total"),
|
||||
"Number of DHCP failover Binding Ack messages sent (FailoverBndackSentTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
FailoverBndackReceivedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "failover_bndack_received_total"),
|
||||
"Number of DHCP failover Binding Ack messages received (FailoverBndackReceivedTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
FailoverBndupdPendingOutboundQueue: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "failover_bndupd_pending_in_outbound_queue"),
|
||||
"Number of pending outbound DHCP failover Binding Update messages (FailoverBndupdPendingOutboundQueue)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
FailoverTransitionsCommunicationinterruptedState: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "failover_transitions_communicationinterrupted_state_total"),
|
||||
"Total number of transitions into COMMUNICATION INTERRUPTED state (FailoverTransitionsCommunicationinterruptedState)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
FailoverTransitionsPartnerdownState: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "failover_transitions_partnerdown_state_total"),
|
||||
"Total number of transitions into PARTNER DOWN state (FailoverTransitionsPartnerdownState)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
FailoverTransitionsRecoverState: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "failover_transitions_recover_total"),
|
||||
"Total number of transitions into RECOVER state (FailoverTransitionsRecoverState)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
FailoverBndupdDropped: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "failover_bndupd_dropped_total"),
|
||||
"Total number of DHCP faileover Binding Updates dropped (FailoverBndupdDropped)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// represents perflib metrics from the DHCP Server class.
|
||||
// While the name of a number of perflib metrics would indicate a rate is being returned (E.G. Packets Received/sec),
|
||||
// perflib instead returns a counter, hence the "Total" suffix in some of the variable names.
|
||||
type dhcpPerf struct {
|
||||
PacketsReceivedTotal float64 `perflib:"Packets Received/sec"`
|
||||
DuplicatesDroppedTotal float64 `perflib:"Duplicates Dropped/sec"`
|
||||
PacketsExpiredTotal float64 `perflib:"Packets Expired/sec"`
|
||||
ActiveQueueLength float64 `perflib:"Active Queue Length"`
|
||||
ConflictCheckQueueLength float64 `perflib:"Conflict Check Queue Length"`
|
||||
DiscoversTotal float64 `perflib:"Discovers/sec"`
|
||||
OffersTotal float64 `perflib:"Offers/sec"`
|
||||
RequestsTotal float64 `perflib:"Requests/sec"`
|
||||
InformsTotal float64 `perflib:"Informs/sec"`
|
||||
AcksTotal float64 `perflib:"Acks/sec"`
|
||||
NacksTotal float64 `perflib:"Nacks/sec"`
|
||||
DeclinesTotal float64 `perflib:"Declines/sec"`
|
||||
ReleasesTotal float64 `perflib:"Releases/sec"`
|
||||
DeniedDueToMatch float64 `perflib:"Denied due to match."`
|
||||
DeniedDueToNonMatch float64 `perflib:"Denied due to match."`
|
||||
OfferQueueLength float64 `perflib:"Offer Queue Length"`
|
||||
FailoverBndupdSentTotal float64 `perflib:"Failover: BndUpd sent/sec."`
|
||||
FailoverBndupdReceivedTotal float64 `perflib:"Failover: BndUpd received/sec."`
|
||||
FailoverBndackSentTotal float64 `perflib:"Failover: BndAck sent/sec."`
|
||||
FailoverBndackReceivedTotal float64 `perflib:"Failover: BndAck received/sec."`
|
||||
FailoverBndupdPendingOutboundQueue float64 `perflib:"Failover: BndUpd pending in outbound queue."`
|
||||
FailoverTransitionsCommunicationinterruptedState float64 `perflib:"Failover: Transitions to COMMUNICATION-INTERRUPTED state."`
|
||||
FailoverTransitionsPartnerdownState float64 `perflib:"Failover: Transitions to PARTNER-DOWN state."`
|
||||
FailoverTransitionsRecoverState float64 `perflib:"Failover: Transitions to RECOVER state."`
|
||||
FailoverBndupdDropped float64 `perflib:"Failover: BndUpd Dropped."`
|
||||
}
|
||||
|
||||
func (c *DhcpCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
var perflib []dhcpPerf
|
||||
if err := unmarshalObject(ctx.perfObjects["DHCP Server"], &perflib); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsReceivedTotal,
|
||||
prometheus.CounterValue,
|
||||
perflib[0].PacketsReceivedTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DuplicatesDroppedTotal,
|
||||
prometheus.CounterValue,
|
||||
perflib[0].DuplicatesDroppedTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsExpiredTotal,
|
||||
prometheus.CounterValue,
|
||||
perflib[0].PacketsExpiredTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ActiveQueueLength,
|
||||
prometheus.GaugeValue,
|
||||
perflib[0].ActiveQueueLength,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ConflictCheckQueueLength,
|
||||
prometheus.GaugeValue,
|
||||
perflib[0].ConflictCheckQueueLength,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DiscoversTotal,
|
||||
prometheus.CounterValue,
|
||||
perflib[0].DiscoversTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.OffersTotal,
|
||||
prometheus.CounterValue,
|
||||
perflib[0].OffersTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RequestsTotal,
|
||||
prometheus.CounterValue,
|
||||
perflib[0].RequestsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.InformsTotal,
|
||||
prometheus.CounterValue,
|
||||
perflib[0].InformsTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.AcksTotal,
|
||||
prometheus.CounterValue,
|
||||
perflib[0].AcksTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NacksTotal,
|
||||
prometheus.CounterValue,
|
||||
perflib[0].NacksTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DeclinesTotal,
|
||||
prometheus.CounterValue,
|
||||
perflib[0].DeclinesTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ReleasesTotal,
|
||||
prometheus.CounterValue,
|
||||
perflib[0].ReleasesTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.OfferQueueLength,
|
||||
prometheus.GaugeValue,
|
||||
perflib[0].OfferQueueLength,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DeniedDueToMatch,
|
||||
prometheus.CounterValue,
|
||||
perflib[0].DeniedDueToMatch,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DeniedDueToNonMatch,
|
||||
prometheus.CounterValue,
|
||||
perflib[0].DeniedDueToNonMatch,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FailoverBndupdSentTotal,
|
||||
prometheus.CounterValue,
|
||||
perflib[0].FailoverBndupdSentTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FailoverBndupdReceivedTotal,
|
||||
prometheus.CounterValue,
|
||||
perflib[0].FailoverBndupdReceivedTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FailoverBndackSentTotal,
|
||||
prometheus.CounterValue,
|
||||
perflib[0].FailoverBndackSentTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FailoverBndackReceivedTotal,
|
||||
prometheus.CounterValue,
|
||||
perflib[0].FailoverBndackReceivedTotal,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FailoverBndupdPendingOutboundQueue,
|
||||
prometheus.GaugeValue,
|
||||
perflib[0].FailoverBndupdPendingOutboundQueue,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FailoverTransitionsCommunicationinterruptedState,
|
||||
prometheus.CounterValue,
|
||||
perflib[0].FailoverTransitionsCommunicationinterruptedState,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FailoverTransitionsPartnerdownState,
|
||||
prometheus.CounterValue,
|
||||
perflib[0].FailoverTransitionsPartnerdownState,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FailoverTransitionsRecoverState,
|
||||
prometheus.CounterValue,
|
||||
perflib[0].FailoverTransitionsRecoverState,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FailoverBndupdDropped,
|
||||
prometheus.CounterValue,
|
||||
perflib[0].FailoverBndupdDropped,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
9
collector/dhcp_test.go
Normal file
9
collector/dhcp_test.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkDHCPCollector(b *testing.B) {
|
||||
benchmarkCollector(b, "dhcp", NewDhcpCollector)
|
||||
}
|
||||
@@ -1,17 +1,18 @@
|
||||
// returns data points from Win32_PerfRawData_DNS_DNS
|
||||
// https://msdn.microsoft.com/en-us/library/ms803992.aspx?f=255&MSPPError=-2147217396
|
||||
// https://technet.microsoft.com/en-us/library/cc977686.aspx
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"log"
|
||||
"errors"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus-community/windows_exporter/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["dns"] = NewDNSCollector
|
||||
registerCollector("dns", NewDNSCollector)
|
||||
}
|
||||
|
||||
// A DNSCollector is a Prometheus collector for WMI Win32_PerfRawData_DNS_DNS metrics
|
||||
@@ -81,8 +82,8 @@ func NewDNSCollector() (Collector, error) {
|
||||
nil,
|
||||
),
|
||||
MemoryUsedBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "memory_used_bytes_total"),
|
||||
"Total memory used by DNS server",
|
||||
prometheus.BuildFQName(Namespace, subsystem, "memory_used_bytes"),
|
||||
"Current memory used by DNS server",
|
||||
[]string{"area"},
|
||||
nil,
|
||||
),
|
||||
@@ -136,7 +137,7 @@ func NewDNSCollector() (Collector, error) {
|
||||
),
|
||||
Responses: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "responses_total"),
|
||||
"Number of reponses sent by DNS server",
|
||||
"Number of responses sent by DNS server",
|
||||
[]string{"protocol"},
|
||||
nil,
|
||||
),
|
||||
@@ -181,14 +182,17 @@ func NewDNSCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *DNSCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *DNSCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Println("[ERROR] failed collecting dns metrics:", desc, err)
|
||||
log.Error("failed collecting dns metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_DNS_DNS docs:
|
||||
// - https://msdn.microsoft.com/en-us/library/ms803992.aspx?f=255&MSPPError=-2147217396
|
||||
// - https://technet.microsoft.com/en-us/library/cc977686.aspx
|
||||
type Win32_PerfRawData_DNS_DNS struct {
|
||||
AXFRRequestReceived uint32
|
||||
AXFRRequestSent uint32
|
||||
@@ -234,10 +238,13 @@ type Win32_PerfRawData_DNS_DNS struct {
|
||||
|
||||
func (c *DNSCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_DNS_DNS
|
||||
q := wmi.CreateQuery(&dst, "")
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(dst) == 0 {
|
||||
return nil, errors.New("WMI query returned empty result set")
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ZoneTransferRequestsReceived,
|
||||
|
||||
9
collector/dns_test.go
Normal file
9
collector/dns_test.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkDNSCollector(b *testing.B) {
|
||||
benchmarkCollector(b, "dns", NewDNSCollector)
|
||||
}
|
||||
634
collector/exchange.go
Normal file
634
collector/exchange.go
Normal file
@@ -0,0 +1,634 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"gopkg.in/alecthomas/kingpin.v2"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerCollector("exchange", newExchangeCollector,
|
||||
"MSExchange ADAccess Processes",
|
||||
"MSExchangeTransport Queues",
|
||||
"MSExchange HttpProxy",
|
||||
"MSExchange ActiveSync",
|
||||
"MSExchange Availability Service",
|
||||
"MSExchange OWA",
|
||||
"MSExchangeAutodiscover",
|
||||
"MSExchange WorkloadManagement Workloads",
|
||||
"MSExchange RpcClientAccess",
|
||||
)
|
||||
}
|
||||
|
||||
type exchangeCollector struct {
|
||||
LDAPReadTime *prometheus.Desc
|
||||
LDAPSearchTime *prometheus.Desc
|
||||
LDAPWriteTime *prometheus.Desc
|
||||
LDAPTimeoutErrorsPerSec *prometheus.Desc
|
||||
LongRunningLDAPOperationsPerMin *prometheus.Desc
|
||||
ExternalActiveRemoteDeliveryQueueLength *prometheus.Desc
|
||||
InternalActiveRemoteDeliveryQueueLength *prometheus.Desc
|
||||
ActiveMailboxDeliveryQueueLength *prometheus.Desc
|
||||
RetryMailboxDeliveryQueueLength *prometheus.Desc
|
||||
UnreachableQueueLength *prometheus.Desc
|
||||
ExternalLargestDeliveryQueueLength *prometheus.Desc
|
||||
InternalLargestDeliveryQueueLength *prometheus.Desc
|
||||
PoisonQueueLength *prometheus.Desc
|
||||
MailboxServerLocatorAverageLatency *prometheus.Desc
|
||||
AverageAuthenticationLatency *prometheus.Desc
|
||||
AverageCASProcessingLatency *prometheus.Desc
|
||||
MailboxServerProxyFailureRate *prometheus.Desc
|
||||
OutstandingProxyRequests *prometheus.Desc
|
||||
ProxyRequestsPerSec *prometheus.Desc
|
||||
ActiveSyncRequestsPerSec *prometheus.Desc
|
||||
PingCommandsPending *prometheus.Desc
|
||||
SyncCommandsPerSec *prometheus.Desc
|
||||
AvailabilityRequestsSec *prometheus.Desc
|
||||
CurrentUniqueUsers *prometheus.Desc
|
||||
OWARequestsPerSec *prometheus.Desc
|
||||
AutodiscoverRequestsPerSec *prometheus.Desc
|
||||
ActiveTasks *prometheus.Desc
|
||||
CompletedTasks *prometheus.Desc
|
||||
QueuedTasks *prometheus.Desc
|
||||
YieldedTasks *prometheus.Desc
|
||||
IsActive *prometheus.Desc
|
||||
RPCAveragedLatency *prometheus.Desc
|
||||
RPCRequests *prometheus.Desc
|
||||
ActiveUserCount *prometheus.Desc
|
||||
ConnectionCount *prometheus.Desc
|
||||
RPCOperationsPerSec *prometheus.Desc
|
||||
UserCount *prometheus.Desc
|
||||
|
||||
enabledCollectors []string
|
||||
}
|
||||
|
||||
var (
|
||||
// All available collector functions
|
||||
exchangeAllCollectorNames = []string{
|
||||
"ADAccessProcesses",
|
||||
"TransportQueues",
|
||||
"HttpProxy",
|
||||
"ActiveSync",
|
||||
"AvailabilityService",
|
||||
"OutlookWebAccess",
|
||||
"Autodiscover",
|
||||
"WorkloadManagement",
|
||||
"RpcClientAccess",
|
||||
}
|
||||
|
||||
argExchangeListAllCollectors = kingpin.Flag(
|
||||
"collectors.exchange.list",
|
||||
"List the collectors along with their perflib object name/ids",
|
||||
).Bool()
|
||||
|
||||
argExchangeCollectorsEnabled = kingpin.Flag(
|
||||
"collectors.exchange.enabled",
|
||||
"Comma-separated list of collectors to use. Defaults to all, if not specified.",
|
||||
).Default("").String()
|
||||
)
|
||||
|
||||
// newExchangeCollector returns a new Collector
|
||||
func newExchangeCollector() (Collector, error) {
|
||||
|
||||
// desc creates a new prometheus description
|
||||
desc := func(metricName string, description string, labels ...string) *prometheus.Desc {
|
||||
return prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, "exchange", metricName),
|
||||
description,
|
||||
labels,
|
||||
nil,
|
||||
)
|
||||
}
|
||||
|
||||
c := exchangeCollector{
|
||||
RPCAveragedLatency: desc("rpc_avg_latency_sec", "The latency (sec), averaged for the past 1024 packets"),
|
||||
RPCRequests: desc("rpc_requests", "Number of client requests currently being processed by the RPC Client Access service"),
|
||||
ActiveUserCount: desc("rpc_active_user_count", "Number of unique users that have shown some kind of activity in the last 2 minutes"),
|
||||
ConnectionCount: desc("rpc_connection_count", "Total number of client connections maintained"),
|
||||
RPCOperationsPerSec: desc("rpc_operations_total", "The rate at which RPC operations occur"),
|
||||
UserCount: desc("rpc_user_count", "Number of users"),
|
||||
LDAPReadTime: desc("ldap_read_time_sec", "Time (sec) to send an LDAP read request and receive a response", "name"),
|
||||
LDAPSearchTime: desc("ldap_search_time_sec", "Time (sec) to send an LDAP search request and receive a response", "name"),
|
||||
LDAPWriteTime: desc("ldap_write_time_sec", "Time (sec) to send an LDAP Add/Modify/Delete request and receive a response", "name"),
|
||||
LDAPTimeoutErrorsPerSec: desc("ldap_timeout_errors_total", "Total number of LDAP timeout errors", "name"),
|
||||
LongRunningLDAPOperationsPerMin: desc("ldap_long_running_ops_per_sec", "Long Running LDAP operations per second", "name"),
|
||||
ExternalActiveRemoteDeliveryQueueLength: desc("transport_queues_external_active_remote_delivery", "External Active Remote Delivery Queue length", "name"),
|
||||
InternalActiveRemoteDeliveryQueueLength: desc("transport_queues_internal_active_remote_delivery", "Internal Active Remote Delivery Queue length", "name"),
|
||||
ActiveMailboxDeliveryQueueLength: desc("transport_queues_active_mailbox_delivery", "Active Mailbox Delivery Queue length", "name"),
|
||||
RetryMailboxDeliveryQueueLength: desc("transport_queues_retry_mailbox_delivery", "Retry Mailbox Delivery Queue length", "name"),
|
||||
UnreachableQueueLength: desc("transport_queues_unreachable", "Unreachable Queue length", "name"),
|
||||
ExternalLargestDeliveryQueueLength: desc("transport_queues_external_largest_delivery", "External Largest Delivery Queue length", "name"),
|
||||
InternalLargestDeliveryQueueLength: desc("transport_queues_internal_largest_delivery", "Internal Largest Delivery Queue length", "name"),
|
||||
PoisonQueueLength: desc("transport_queues_poison", "Poison Queue length", "name"),
|
||||
MailboxServerLocatorAverageLatency: desc("http_proxy_mailbox_server_locator_avg_latency_sec", "Average latency (sec) of MailboxServerLocator web service calls", "name"),
|
||||
AverageAuthenticationLatency: desc("http_proxy_avg_auth_latency", "Average time spent authenticating CAS requests over the last 200 samples", "name"),
|
||||
OutstandingProxyRequests: desc("http_proxy_outstanding_proxy_requests", "Number of concurrent outstanding proxy requests", "name"),
|
||||
ProxyRequestsPerSec: desc("http_proxy_requests_total", "Number of proxy requests processed each second", "name"),
|
||||
AvailabilityRequestsSec: desc("avail_service_requests_per_sec", "Number of requests serviced per second"),
|
||||
CurrentUniqueUsers: desc("owa_current_unique_users", "Number of unique users currently logged on to Outlook Web App"),
|
||||
OWARequestsPerSec: desc("owa_requests_total", "Number of requests handled by Outlook Web App per second"),
|
||||
AutodiscoverRequestsPerSec: desc("autodiscover_requests_total", "Number of autodiscover service requests processed each second"),
|
||||
ActiveTasks: desc("workload_active_tasks", "Number of active tasks currently running in the background for workload management", "name"),
|
||||
CompletedTasks: desc("workload_completed_tasks", "Number of workload management tasks that have been completed", "name"),
|
||||
QueuedTasks: desc("workload_queued_tasks", "Number of workload management tasks that are currently queued up waiting to be processed", "name"),
|
||||
YieldedTasks: desc("workload_yielded_tasks", "The total number of tasks that have been yielded by a workload", "name"),
|
||||
IsActive: desc("workload_is_active", "Active indicates whether the workload is in an active (1) or paused (0) state", "name"),
|
||||
ActiveSyncRequestsPerSec: desc("activesync_requests_total", "Num HTTP requests received from the client via ASP.NET per sec. Shows Current user load"),
|
||||
AverageCASProcessingLatency: desc("http_proxy_avg_cas_proccessing_latency_sec", "Average latency (sec) of CAS processing time over the last 200 reqs", "name"),
|
||||
MailboxServerProxyFailureRate: desc("http_proxy_mailbox_proxy_failure_rate", "% of failures between this CAS and MBX servers over the last 200 samples", "name"),
|
||||
PingCommandsPending: desc("activesync_ping_cmds_pending", "Number of ping commands currently pending in the queue"),
|
||||
SyncCommandsPerSec: desc("activesync_sync_cmds_total", "Number of sync commands processed per second. Clients use this command to synchronize items within a folder"),
|
||||
|
||||
enabledCollectors: make([]string, 0, len(exchangeAllCollectorNames)),
|
||||
}
|
||||
|
||||
collectorDesc := map[string]string{
|
||||
"ADAccessProcesses": "[19108] MSExchange ADAccess Processes",
|
||||
"TransportQueues": "[20524] MSExchangeTransport Queues",
|
||||
"HttpProxy": "[36934] MSExchange HttpProxy",
|
||||
"ActiveSync": "[25138] MSExchange ActiveSync",
|
||||
"AvailabilityService": "[24914] MSExchange Availability Service",
|
||||
"OutlookWebAccess": "[24618] MSExchange OWA",
|
||||
"Autodiscover": "[29240] MSExchange Autodiscover",
|
||||
"WorkloadManagement": "[19430] MSExchange WorkloadManagement Workloads",
|
||||
"RpcClientAccess": "[29336] MSExchange RpcClientAccess",
|
||||
}
|
||||
|
||||
if *argExchangeListAllCollectors {
|
||||
fmt.Printf("%-32s %-32s\n", "Collector Name", "[PerfID] Perflib Object")
|
||||
for _, cname := range exchangeAllCollectorNames {
|
||||
fmt.Printf("%-32s %-32s\n", cname, collectorDesc[cname])
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
if *argExchangeCollectorsEnabled == "" {
|
||||
for _, collectorName := range exchangeAllCollectorNames {
|
||||
c.enabledCollectors = append(c.enabledCollectors, collectorName)
|
||||
}
|
||||
} else {
|
||||
for _, collectorName := range strings.Split(*argExchangeCollectorsEnabled, ",") {
|
||||
if find(exchangeAllCollectorNames, collectorName) {
|
||||
c.enabledCollectors = append(c.enabledCollectors, collectorName)
|
||||
} else {
|
||||
return nil, fmt.Errorf("Unknown exchange collector: %s", collectorName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &c, nil
|
||||
}
|
||||
|
||||
// Collect collects exchange metrics and sends them to prometheus
|
||||
func (c *exchangeCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
|
||||
collectorFuncs := map[string]func(ctx *ScrapeContext, ch chan<- prometheus.Metric) error{
|
||||
"ADAccessProcesses": c.collectADAccessProcesses,
|
||||
"TransportQueues": c.collectTransportQueues,
|
||||
"HttpProxy": c.collectHTTPProxy,
|
||||
"ActiveSync": c.collectActiveSync,
|
||||
"AvailabilityService": c.collectAvailabilityService,
|
||||
"OutlookWebAccess": c.collectOWA,
|
||||
"Autodiscover": c.collectAutoDiscover,
|
||||
"WorkloadManagement": c.collectWorkloadManagementWorkloads,
|
||||
"RpcClientAccess": c.collectRPC,
|
||||
}
|
||||
|
||||
for _, collectorName := range c.enabledCollectors {
|
||||
if err := collectorFuncs[collectorName](ctx, ch); err != nil {
|
||||
log.Errorf("Error in %s: %s", collectorName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Perflib: [19108] MSExchange ADAccess Processes
|
||||
type perflibADAccessProcesses struct {
|
||||
Name string
|
||||
|
||||
LDAPReadTime float64 `perflib:"LDAP Read Time"`
|
||||
LDAPSearchTime float64 `perflib:"LDAP Search Time"`
|
||||
LDAPWriteTime float64 `perflib:"LDAP Write Time"`
|
||||
LDAPTimeoutErrorsPerSec float64 `perflib:"LDAP Timeout Errors/sec"`
|
||||
LongRunningLDAPOperationsPerMin float64 `perflib:"Long Running LDAP Operations/min"`
|
||||
}
|
||||
|
||||
func (c *exchangeCollector) collectADAccessProcesses(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
var data []perflibADAccessProcesses
|
||||
if err := unmarshalObject(ctx.perfObjects["MSExchange ADAccess Processes"], &data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
labelUseCount := make(map[string]int)
|
||||
for _, proc := range data {
|
||||
labelName := c.toLabelName(proc.Name)
|
||||
if strings.HasSuffix(labelName, "_total") {
|
||||
continue
|
||||
}
|
||||
|
||||
// since we're not including the PID suffix from the instance names in the label names,
|
||||
// we get an occasional duplicate. This seems to affect about 4 instances only on this object.
|
||||
labelUseCount[labelName]++
|
||||
if labelUseCount[labelName] > 1 {
|
||||
labelName = fmt.Sprintf("%s_%d", labelName, labelUseCount[labelName])
|
||||
}
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LDAPReadTime,
|
||||
prometheus.CounterValue,
|
||||
c.msToSec(proc.LDAPReadTime),
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LDAPSearchTime,
|
||||
prometheus.CounterValue,
|
||||
c.msToSec(proc.LDAPSearchTime),
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LDAPWriteTime,
|
||||
prometheus.CounterValue,
|
||||
c.msToSec(proc.LDAPWriteTime),
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LDAPTimeoutErrorsPerSec,
|
||||
prometheus.CounterValue,
|
||||
proc.LDAPTimeoutErrorsPerSec,
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LongRunningLDAPOperationsPerMin,
|
||||
prometheus.CounterValue,
|
||||
proc.LongRunningLDAPOperationsPerMin*60,
|
||||
labelName,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Perflib: [24914] MSExchange Availability Service
|
||||
type perflibAvailabilityService struct {
|
||||
RequestsSec float64 `perflib:"Availability Requests (sec)"`
|
||||
}
|
||||
|
||||
func (c *exchangeCollector) collectAvailabilityService(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
var data []perflibAvailabilityService
|
||||
if err := unmarshalObject(ctx.perfObjects["MSExchange Availability Service"], &data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, availservice := range data {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.AvailabilityRequestsSec,
|
||||
prometheus.CounterValue,
|
||||
availservice.RequestsSec,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Perflib: [36934] MSExchange HttpProxy
|
||||
type perflibHTTPProxy struct {
|
||||
Name string
|
||||
|
||||
MailboxServerLocatorAverageLatency float64 `perflib:"MailboxServerLocator Average Latency (Moving Average)"`
|
||||
AverageAuthenticationLatency float64 `perflib:"Average Authentication Latency"`
|
||||
AverageCASProcessingLatency float64 `perflib:"Average ClientAccess Server Processing Latency"`
|
||||
MailboxServerProxyFailureRate float64 `perflib:"Mailbox Server Proxy Failure Rate"`
|
||||
OutstandingProxyRequests float64 `perflib:"Outstanding Proxy Requests"`
|
||||
ProxyRequestsPerSec float64 `perflib:"Proxy Requests/Sec"`
|
||||
}
|
||||
|
||||
func (c *exchangeCollector) collectHTTPProxy(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
var data []perflibHTTPProxy
|
||||
if err := unmarshalObject(ctx.perfObjects["MSExchange HttpProxy"], &data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, instance := range data {
|
||||
labelName := c.toLabelName(instance.Name)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MailboxServerLocatorAverageLatency,
|
||||
prometheus.GaugeValue,
|
||||
c.msToSec(instance.MailboxServerLocatorAverageLatency),
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.AverageAuthenticationLatency,
|
||||
prometheus.GaugeValue,
|
||||
instance.AverageAuthenticationLatency,
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.AverageCASProcessingLatency,
|
||||
prometheus.GaugeValue,
|
||||
c.msToSec(instance.AverageCASProcessingLatency),
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MailboxServerProxyFailureRate,
|
||||
prometheus.GaugeValue,
|
||||
instance.MailboxServerProxyFailureRate,
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.OutstandingProxyRequests,
|
||||
prometheus.GaugeValue,
|
||||
instance.OutstandingProxyRequests,
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ProxyRequestsPerSec,
|
||||
prometheus.CounterValue,
|
||||
instance.ProxyRequestsPerSec,
|
||||
labelName,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Perflib: [24618] MSExchange OWA
|
||||
type perflibOWA struct {
|
||||
CurrentUniqueUsers float64 `perflib:"Current Unique Users"`
|
||||
RequestsPerSec float64 `perflib:"Requests/sec"`
|
||||
}
|
||||
|
||||
func (c *exchangeCollector) collectOWA(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
var data []perflibOWA
|
||||
if err := unmarshalObject(ctx.perfObjects["MSExchange OWA"], &data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, owa := range data {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CurrentUniqueUsers,
|
||||
prometheus.GaugeValue,
|
||||
owa.CurrentUniqueUsers,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.OWARequestsPerSec,
|
||||
prometheus.CounterValue,
|
||||
owa.RequestsPerSec,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Perflib: [25138] MSExchange ActiveSync
|
||||
type perflibActiveSync struct {
|
||||
RequestsPerSec float64 `perflib:"Requests/sec"`
|
||||
PingCommandsPending float64 `perflib:"Ping Commands Pending"`
|
||||
SyncCommandsPerSec float64 `perflib:"Sync Commands/sec"`
|
||||
}
|
||||
|
||||
func (c *exchangeCollector) collectActiveSync(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
var data []perflibActiveSync
|
||||
if err := unmarshalObject(ctx.perfObjects["MSExchange ActiveSync"], &data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, instance := range data {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ActiveSyncRequestsPerSec,
|
||||
prometheus.CounterValue,
|
||||
instance.RequestsPerSec,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PingCommandsPending,
|
||||
prometheus.GaugeValue,
|
||||
instance.PingCommandsPending,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SyncCommandsPerSec,
|
||||
prometheus.CounterValue,
|
||||
instance.SyncCommandsPerSec,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Perflib: [29366] MSExchange RpcClientAccess
|
||||
type perflibRPCClientAccess struct {
|
||||
RPCAveragedLatency float64 `perflib:"RPC Averaged Latency"`
|
||||
RPCRequests float64 `perflib:"RPC Requests"`
|
||||
ActiveUserCount float64 `perflib:"Active User Count"`
|
||||
ConnectionCount float64 `perflib:"Connection Count"`
|
||||
RPCOperationsPerSec float64 `perflib:"RPC Operations/sec"`
|
||||
UserCount float64 `perflib:"User Count"`
|
||||
}
|
||||
|
||||
func (c *exchangeCollector) collectRPC(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
var data []perflibRPCClientAccess
|
||||
if err := unmarshalObject(ctx.perfObjects["MSExchange RpcClientAccess"], &data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, rpc := range data {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RPCAveragedLatency,
|
||||
prometheus.GaugeValue,
|
||||
c.msToSec(rpc.RPCAveragedLatency),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RPCRequests,
|
||||
prometheus.GaugeValue,
|
||||
rpc.RPCRequests,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ActiveUserCount,
|
||||
prometheus.GaugeValue,
|
||||
rpc.ActiveUserCount,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ConnectionCount,
|
||||
prometheus.GaugeValue,
|
||||
rpc.ConnectionCount,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RPCOperationsPerSec,
|
||||
prometheus.CounterValue,
|
||||
rpc.RPCOperationsPerSec,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.UserCount,
|
||||
prometheus.GaugeValue,
|
||||
rpc.UserCount,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Perflib: [20524] MSExchangeTransport Queues
|
||||
type perflibTransportQueues struct {
|
||||
Name string
|
||||
|
||||
ExternalActiveRemoteDeliveryQueueLength float64 `perflib:"External Active Remote Delivery Queue Length"`
|
||||
InternalActiveRemoteDeliveryQueueLength float64 `perflib:"Internal Active Remote Delivery Queue Length"`
|
||||
ActiveMailboxDeliveryQueueLength float64 `perflib:"Active Mailbox Delivery Queue Length"`
|
||||
RetryMailboxDeliveryQueueLength float64 `perflib:"Retry Mailbox Delivery Queue Length"`
|
||||
UnreachableQueueLength float64 `perflib:"Unreachable Queue Length"`
|
||||
ExternalLargestDeliveryQueueLength float64 `perflib:"External Largest Delivery Queue Length"`
|
||||
InternalLargestDeliveryQueueLength float64 `perflib:"Internal Largest Delivery Queue Length"`
|
||||
PoisonQueueLength float64 `perflib:"Poison Queue Length"`
|
||||
}
|
||||
|
||||
func (c *exchangeCollector) collectTransportQueues(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
var data []perflibTransportQueues
|
||||
if err := unmarshalObject(ctx.perfObjects["MSExchangeTransport Queues"], &data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, queue := range data {
|
||||
labelName := c.toLabelName(queue.Name)
|
||||
if strings.HasSuffix(labelName, "_total") {
|
||||
continue
|
||||
}
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ExternalActiveRemoteDeliveryQueueLength,
|
||||
prometheus.GaugeValue,
|
||||
queue.ExternalActiveRemoteDeliveryQueueLength,
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.InternalActiveRemoteDeliveryQueueLength,
|
||||
prometheus.GaugeValue,
|
||||
queue.InternalActiveRemoteDeliveryQueueLength,
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ActiveMailboxDeliveryQueueLength,
|
||||
prometheus.GaugeValue,
|
||||
queue.ActiveMailboxDeliveryQueueLength,
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RetryMailboxDeliveryQueueLength,
|
||||
prometheus.GaugeValue,
|
||||
queue.RetryMailboxDeliveryQueueLength,
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.UnreachableQueueLength,
|
||||
prometheus.GaugeValue,
|
||||
queue.UnreachableQueueLength,
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ExternalLargestDeliveryQueueLength,
|
||||
prometheus.GaugeValue,
|
||||
queue.ExternalLargestDeliveryQueueLength,
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.InternalLargestDeliveryQueueLength,
|
||||
prometheus.GaugeValue,
|
||||
queue.InternalLargestDeliveryQueueLength,
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PoisonQueueLength,
|
||||
prometheus.GaugeValue,
|
||||
queue.PoisonQueueLength,
|
||||
labelName,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Perflib: [19430] MSExchange WorkloadManagement Workloads
|
||||
type perflibWorkloadManagementWorkloads struct {
|
||||
Name string
|
||||
|
||||
ActiveTasks float64 `perflib:"ActiveTasks"`
|
||||
CompletedTasks float64 `perflib:"CompletedTasks"`
|
||||
QueuedTasks float64 `perflib:"QueuedTasks"`
|
||||
YieldedTasks float64 `perflib:"YieldedTasks"`
|
||||
IsActive float64 `perflib:"Active"`
|
||||
}
|
||||
|
||||
func (c *exchangeCollector) collectWorkloadManagementWorkloads(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
var data []perflibWorkloadManagementWorkloads
|
||||
if err := unmarshalObject(ctx.perfObjects["MSExchange WorkloadManagement Workloads"], &data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, instance := range data {
|
||||
labelName := c.toLabelName(instance.Name)
|
||||
if strings.HasSuffix(labelName, "_total") {
|
||||
continue
|
||||
}
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ActiveTasks,
|
||||
prometheus.GaugeValue,
|
||||
instance.ActiveTasks,
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CompletedTasks,
|
||||
prometheus.CounterValue,
|
||||
instance.CompletedTasks,
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.QueuedTasks,
|
||||
prometheus.CounterValue,
|
||||
instance.QueuedTasks,
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.YieldedTasks,
|
||||
prometheus.CounterValue,
|
||||
instance.YieldedTasks,
|
||||
labelName,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.IsActive,
|
||||
prometheus.GaugeValue,
|
||||
instance.IsActive,
|
||||
labelName,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// [29240] MSExchangeAutodiscover
|
||||
type perflibAutodiscover struct {
|
||||
RequestsPerSec float64 `perflib:"Requests/sec"`
|
||||
}
|
||||
|
||||
func (c *exchangeCollector) collectAutoDiscover(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
var data []perflibAutodiscover
|
||||
if err := unmarshalObject(ctx.perfObjects["MSExchangeAutodiscover"], &data); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, autodisc := range data {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.AutodiscoverRequestsPerSec,
|
||||
prometheus.CounterValue,
|
||||
autodisc.RequestsPerSec,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// toLabelName converts strings to lowercase and replaces all whitespace and dots with underscores
|
||||
func (c *exchangeCollector) toLabelName(name string) string {
|
||||
s := strings.ReplaceAll(strings.Join(strings.Fields(strings.ToLower(name)), "_"), ".", "_")
|
||||
s = strings.ReplaceAll(s, "__", "_")
|
||||
return s
|
||||
}
|
||||
|
||||
// msToSec converts from ms to seconds
|
||||
func (c *exchangeCollector) msToSec(t float64) float64 {
|
||||
return t / 1000
|
||||
}
|
||||
9
collector/exchange_test.go
Normal file
9
collector/exchange_test.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkExchangeCollector(b *testing.B) {
|
||||
benchmarkCollector(b, "exchange", newExchangeCollector)
|
||||
}
|
||||
187
collector/fsrmquota.go
Normal file
187
collector/fsrmquota.go
Normal file
@@ -0,0 +1,187 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus-community/windows_exporter/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerCollector("fsrmquota", newFSRMQuotaCollector)
|
||||
}
|
||||
|
||||
type FSRMQuotaCollector struct {
|
||||
QuotasCount *prometheus.Desc
|
||||
Path *prometheus.Desc
|
||||
PeakUsage *prometheus.Desc
|
||||
Size *prometheus.Desc
|
||||
Usage *prometheus.Desc
|
||||
|
||||
Description *prometheus.Desc
|
||||
Disabled *prometheus.Desc
|
||||
MatchesTemplate *prometheus.Desc
|
||||
SoftLimit *prometheus.Desc
|
||||
Template *prometheus.Desc
|
||||
}
|
||||
|
||||
func newFSRMQuotaCollector() (Collector, error) {
|
||||
const subsystem = "fsrmquota"
|
||||
return &FSRMQuotaCollector{
|
||||
QuotasCount: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "count"),
|
||||
"Number of Quotas",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
PeakUsage: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "peak_usage_bytes"),
|
||||
"The highest amount of disk space usage charged to this quota. (PeakUsage)",
|
||||
[]string{"path", "template"},
|
||||
nil,
|
||||
),
|
||||
Size: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "size_bytes"),
|
||||
"The size of the quota. (Size)",
|
||||
[]string{"path", "template"},
|
||||
nil,
|
||||
),
|
||||
Usage: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "usage_bytes"),
|
||||
"The current amount of disk space usage charged to this quota. (Usage)",
|
||||
[]string{"path", "template"},
|
||||
nil,
|
||||
),
|
||||
Description: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "description"),
|
||||
"Description of the quota (Description)",
|
||||
[]string{"path", "template", "description"},
|
||||
nil,
|
||||
),
|
||||
Disabled: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "disabled"),
|
||||
"If 1, the quota is disabled. The default value is 0. (Disabled)",
|
||||
[]string{"path", "template"},
|
||||
nil,
|
||||
),
|
||||
SoftLimit: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "softlimit"),
|
||||
"If 1, the quota is a soft limit. If 0, the quota is a hard limit. The default value is 0. Optional (SoftLimit)",
|
||||
[]string{"path", "template"},
|
||||
nil,
|
||||
),
|
||||
Template: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "template"),
|
||||
"Quota template name. (Template)",
|
||||
[]string{"path", "template"},
|
||||
nil,
|
||||
),
|
||||
MatchesTemplate: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "matchestemplate"),
|
||||
"If 1, the property values of this quota match those values of the template from which it was derived. (MatchesTemplate)",
|
||||
[]string{"path", "template"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *FSRMQuotaCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting fsrmquota metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MSFT_FSRMQuota docs:
|
||||
// https://docs.microsoft.com/en-us/previous-versions/windows/desktop/fsrm/msft-fsrmquota
|
||||
type MSFT_FSRMQuota struct {
|
||||
Name string
|
||||
|
||||
Path string
|
||||
PeakUsage uint64
|
||||
Size uint64
|
||||
Usage uint64
|
||||
Description string
|
||||
Template string
|
||||
//Threshold string
|
||||
Disabled bool
|
||||
MatchesTemplate bool
|
||||
SoftLimit bool
|
||||
}
|
||||
|
||||
func (c *FSRMQuotaCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []MSFT_FSRMQuota
|
||||
q := queryAll(&dst)
|
||||
|
||||
var count int
|
||||
|
||||
if err := wmi.QueryNamespace(q, &dst, "root/microsoft/windows/fsrm"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, quota := range dst {
|
||||
|
||||
count++
|
||||
path := quota.Path
|
||||
template := quota.Template
|
||||
Description := quota.Description
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PeakUsage,
|
||||
prometheus.GaugeValue,
|
||||
float64(quota.PeakUsage),
|
||||
path,
|
||||
template,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Size,
|
||||
prometheus.GaugeValue,
|
||||
float64(quota.Size),
|
||||
path,
|
||||
template,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Usage,
|
||||
prometheus.GaugeValue,
|
||||
float64(quota.Usage),
|
||||
path,
|
||||
template,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Description,
|
||||
prometheus.GaugeValue,
|
||||
1.0,
|
||||
path, template, Description,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Disabled,
|
||||
prometheus.GaugeValue,
|
||||
boolToFloat(quota.Disabled),
|
||||
path,
|
||||
template,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MatchesTemplate,
|
||||
prometheus.GaugeValue,
|
||||
boolToFloat(quota.MatchesTemplate),
|
||||
path,
|
||||
template,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SoftLimit,
|
||||
prometheus.GaugeValue,
|
||||
boolToFloat(quota.SoftLimit),
|
||||
path,
|
||||
template,
|
||||
)
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.QuotasCount,
|
||||
prometheus.GaugeValue,
|
||||
float64(count),
|
||||
)
|
||||
return nil, nil
|
||||
}
|
||||
9
collector/fsrmquota_test.go
Normal file
9
collector/fsrmquota_test.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkFsrmQuotaCollector(b *testing.B) {
|
||||
benchmarkCollector(b, "fsrmquota", newFSRMQuotaCollector)
|
||||
}
|
||||
1698
collector/hyperv.go
Normal file
1698
collector/hyperv.go
Normal file
File diff suppressed because it is too large
Load Diff
9
collector/hyperv_test.go
Normal file
9
collector/hyperv_test.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkHypervCollector(b *testing.B) {
|
||||
benchmarkCollector(b, "hyperv", NewHyperVCollector)
|
||||
}
|
||||
1766
collector/iis.go
1766
collector/iis.go
File diff suppressed because it is too large
Load Diff
9
collector/iis_test.go
Normal file
9
collector/iis_test.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkIISCollector(b *testing.B) {
|
||||
benchmarkCollector(b, "iis", NewIISCollector)
|
||||
}
|
||||
@@ -1,41 +1,50 @@
|
||||
// returns data points from Win32_PerfRawData_PerfDisk_LogicalDisk
|
||||
// https://msdn.microsoft.com/en-us/windows/hardware/aa394307(v=vs.71) - Win32_PerfRawData_PerfDisk_LogicalDisk class
|
||||
// https://msdn.microsoft.com/en-us/library/ms803973.aspx - LogicalDisk object reference
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"regexp"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus-community/windows_exporter/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"gopkg.in/alecthomas/kingpin.v2"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["logical_disk"] = NewLogicalDiskCollector
|
||||
registerCollector("logical_disk", NewLogicalDiskCollector, "LogicalDisk")
|
||||
}
|
||||
|
||||
var (
|
||||
volumeWhitelist = flag.String("collector.logical_disk.volume-whitelist", ".+", "Regexp of volumes to whitelist. Volume name must both match whitelist and not match blacklist to be included.")
|
||||
volumeBlacklist = flag.String("collector.logical_disk.volume-blacklist", "", "Regexp of volumes to blacklist. Volume name must both match whitelist and not match blacklist to be included.")
|
||||
volumeWhitelist = kingpin.Flag(
|
||||
"collector.logical_disk.volume-whitelist",
|
||||
"Regexp of volumes to whitelist. Volume name must both match whitelist and not match blacklist to be included.",
|
||||
).Default(".+").String()
|
||||
volumeBlacklist = kingpin.Flag(
|
||||
"collector.logical_disk.volume-blacklist",
|
||||
"Regexp of volumes to blacklist. Volume name must both match whitelist and not match blacklist to be included.",
|
||||
).Default("").String()
|
||||
)
|
||||
|
||||
// A LogicalDiskCollector is a Prometheus collector for WMI Win32_PerfRawData_PerfDisk_LogicalDisk metrics
|
||||
// A LogicalDiskCollector is a Prometheus collector for perflib logicalDisk metrics
|
||||
type LogicalDiskCollector struct {
|
||||
RequestsQueued *prometheus.Desc
|
||||
ReadBytesTotal *prometheus.Desc
|
||||
ReadsTotal *prometheus.Desc
|
||||
WriteBytesTotal *prometheus.Desc
|
||||
WritesTotal *prometheus.Desc
|
||||
ReadTime *prometheus.Desc
|
||||
WriteTime *prometheus.Desc
|
||||
TotalSpace *prometheus.Desc
|
||||
FreeSpace *prometheus.Desc
|
||||
IdleTime *prometheus.Desc
|
||||
SplitIOs *prometheus.Desc
|
||||
RequestsQueued *prometheus.Desc
|
||||
AvgReadQueue *prometheus.Desc
|
||||
AvgWriteQueue *prometheus.Desc
|
||||
ReadBytesTotal *prometheus.Desc
|
||||
ReadsTotal *prometheus.Desc
|
||||
WriteBytesTotal *prometheus.Desc
|
||||
WritesTotal *prometheus.Desc
|
||||
ReadTime *prometheus.Desc
|
||||
WriteTime *prometheus.Desc
|
||||
TotalSpace *prometheus.Desc
|
||||
FreeSpace *prometheus.Desc
|
||||
IdleTime *prometheus.Desc
|
||||
SplitIOs *prometheus.Desc
|
||||
ReadLatency *prometheus.Desc
|
||||
WriteLatency *prometheus.Desc
|
||||
ReadWriteLatency *prometheus.Desc
|
||||
|
||||
volumeWhitelistPattern *regexp.Regexp
|
||||
volumeBlacklistPattern *regexp.Regexp
|
||||
@@ -53,6 +62,20 @@ func NewLogicalDiskCollector() (Collector, error) {
|
||||
nil,
|
||||
),
|
||||
|
||||
AvgReadQueue: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "avg_read_requests_queued"),
|
||||
"Average number of read requests that were queued for the selected disk during the sample interval (LogicalDisk.AvgDiskReadQueueLength)",
|
||||
[]string{"volume"},
|
||||
nil,
|
||||
),
|
||||
|
||||
AvgWriteQueue: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "avg_write_requests_queued"),
|
||||
"Average number of write requests that were queued for the selected disk during the sample interval (LogicalDisk.AvgDiskWriteQueueLength)",
|
||||
[]string{"volume"},
|
||||
nil,
|
||||
),
|
||||
|
||||
ReadBytesTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "read_bytes_total"),
|
||||
"The number of bytes transferred from the disk during read operations (LogicalDisk.DiskReadBytesPerSec)",
|
||||
@@ -97,14 +120,14 @@ func NewLogicalDiskCollector() (Collector, error) {
|
||||
|
||||
FreeSpace: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "free_bytes"),
|
||||
"Free space in bytes (LogicalDisk.PercentFreeSpace)",
|
||||
"Free space in bytes, updates every 10-15 min (LogicalDisk.PercentFreeSpace)",
|
||||
[]string{"volume"},
|
||||
nil,
|
||||
),
|
||||
|
||||
TotalSpace: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "size_bytes"),
|
||||
"Total space in bytes (LogicalDisk.PercentFreeSpace_Base)",
|
||||
"Total space in bytes, updates every 10-15 min (LogicalDisk.PercentFreeSpace_Base)",
|
||||
[]string{"volume"},
|
||||
nil,
|
||||
),
|
||||
@@ -123,6 +146,27 @@ func NewLogicalDiskCollector() (Collector, error) {
|
||||
nil,
|
||||
),
|
||||
|
||||
ReadLatency: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "read_latency_seconds_total"),
|
||||
"Shows the average time, in seconds, of a read operation from the disk (LogicalDisk.AvgDiskSecPerRead)",
|
||||
[]string{"volume"},
|
||||
nil,
|
||||
),
|
||||
|
||||
WriteLatency: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "write_latency_seconds_total"),
|
||||
"Shows the average time, in seconds, of a write operation to the disk (LogicalDisk.AvgDiskSecPerWrite)",
|
||||
[]string{"volume"},
|
||||
nil,
|
||||
),
|
||||
|
||||
ReadWriteLatency: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "read_write_latency_seconds_total"),
|
||||
"Shows the time, in seconds, of the average disk transfer (LogicalDisk.AvgDiskSecPerTransfer)",
|
||||
[]string{"volume"},
|
||||
nil,
|
||||
),
|
||||
|
||||
volumeWhitelistPattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *volumeWhitelist)),
|
||||
volumeBlacklistPattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *volumeBlacklist)),
|
||||
}, nil
|
||||
@@ -130,33 +174,40 @@ func NewLogicalDiskCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *LogicalDiskCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Println("[ERROR] failed collecting logical_disk metrics:", desc, err)
|
||||
func (c *LogicalDiskCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ctx, ch); err != nil {
|
||||
log.Error("failed collecting logical_disk metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_PerfDisk_LogicalDisk struct {
|
||||
Name string
|
||||
CurrentDiskQueueLength uint32
|
||||
DiskReadBytesPerSec uint64
|
||||
DiskReadsPerSec uint32
|
||||
DiskWriteBytesPerSec uint64
|
||||
DiskWritesPerSec uint32
|
||||
PercentDiskReadTime uint64
|
||||
PercentDiskWriteTime uint64
|
||||
PercentFreeSpace uint32
|
||||
PercentFreeSpace_Base uint32
|
||||
PercentIdleTime uint64
|
||||
SplitIOPerSec uint32
|
||||
// Win32_PerfRawData_PerfDisk_LogicalDisk docs:
|
||||
// - https://msdn.microsoft.com/en-us/windows/hardware/aa394307(v=vs.71) - Win32_PerfRawData_PerfDisk_LogicalDisk class
|
||||
// - https://msdn.microsoft.com/en-us/library/ms803973.aspx - LogicalDisk object reference
|
||||
type logicalDisk struct {
|
||||
Name string
|
||||
CurrentDiskQueueLength float64 `perflib:"Current Disk Queue Length"`
|
||||
AvgDiskReadQueueLength float64 `perflib:"Avg. Disk Read Queue Length"`
|
||||
AvgDiskWriteQueueLength float64 `perflib:"Avg. Disk Write Queue Length"`
|
||||
DiskReadBytesPerSec float64 `perflib:"Disk Read Bytes/sec"`
|
||||
DiskReadsPerSec float64 `perflib:"Disk Reads/sec"`
|
||||
DiskWriteBytesPerSec float64 `perflib:"Disk Write Bytes/sec"`
|
||||
DiskWritesPerSec float64 `perflib:"Disk Writes/sec"`
|
||||
PercentDiskReadTime float64 `perflib:"% Disk Read Time"`
|
||||
PercentDiskWriteTime float64 `perflib:"% Disk Write Time"`
|
||||
PercentFreeSpace float64 `perflib:"% Free Space_Base"`
|
||||
PercentFreeSpace_Base float64 `perflib:"Free Megabytes"`
|
||||
PercentIdleTime float64 `perflib:"% Idle Time"`
|
||||
SplitIOPerSec float64 `perflib:"Split IO/Sec"`
|
||||
AvgDiskSecPerRead float64 `perflib:"Avg. Disk sec/Read"`
|
||||
AvgDiskSecPerWrite float64 `perflib:"Avg. Disk sec/Write"`
|
||||
AvgDiskSecPerTransfer float64 `perflib:"Avg. Disk sec/Transfer"`
|
||||
}
|
||||
|
||||
func (c *LogicalDiskCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_PerfDisk_LogicalDisk
|
||||
q := wmi.CreateQuery(&dst, "")
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
func (c *LogicalDiskCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []logicalDisk
|
||||
if err := unmarshalObject(ctx.perfObjects["LogicalDisk"], &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -170,77 +221,112 @@ func (c *LogicalDiskCollector) collect(ch chan<- prometheus.Metric) (*prometheus
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RequestsQueued,
|
||||
prometheus.GaugeValue,
|
||||
float64(volume.CurrentDiskQueueLength),
|
||||
volume.CurrentDiskQueueLength,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.AvgReadQueue,
|
||||
prometheus.GaugeValue,
|
||||
volume.AvgDiskReadQueueLength*ticksToSecondsScaleFactor,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.AvgWriteQueue,
|
||||
prometheus.GaugeValue,
|
||||
volume.AvgDiskWriteQueueLength*ticksToSecondsScaleFactor,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ReadBytesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(volume.DiskReadBytesPerSec),
|
||||
volume.DiskReadBytesPerSec,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(volume.DiskReadsPerSec),
|
||||
volume.DiskReadsPerSec,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.WriteBytesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(volume.DiskWriteBytesPerSec),
|
||||
volume.DiskWriteBytesPerSec,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.WritesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(volume.DiskWritesPerSec),
|
||||
volume.DiskWritesPerSec,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ReadTime,
|
||||
prometheus.CounterValue,
|
||||
float64(volume.PercentDiskReadTime)*ticksToSecondsScaleFactor,
|
||||
volume.PercentDiskReadTime,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.WriteTime,
|
||||
prometheus.CounterValue,
|
||||
float64(volume.PercentDiskWriteTime)*ticksToSecondsScaleFactor,
|
||||
volume.PercentDiskWriteTime,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FreeSpace,
|
||||
prometheus.GaugeValue,
|
||||
float64(volume.PercentFreeSpace)*1024*1024,
|
||||
volume.PercentFreeSpace_Base*1024*1024,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TotalSpace,
|
||||
prometheus.GaugeValue,
|
||||
float64(volume.PercentFreeSpace_Base)*1024*1024,
|
||||
volume.PercentFreeSpace*1024*1024,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.IdleTime,
|
||||
prometheus.CounterValue,
|
||||
float64(volume.PercentIdleTime)*ticksToSecondsScaleFactor,
|
||||
volume.PercentIdleTime,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SplitIOs,
|
||||
prometheus.CounterValue,
|
||||
float64(volume.SplitIOPerSec),
|
||||
volume.SplitIOPerSec,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ReadLatency,
|
||||
prometheus.CounterValue,
|
||||
volume.AvgDiskSecPerRead*ticksToSecondsScaleFactor,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.WriteLatency,
|
||||
prometheus.CounterValue,
|
||||
volume.AvgDiskSecPerWrite*ticksToSecondsScaleFactor,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ReadWriteLatency,
|
||||
prometheus.CounterValue,
|
||||
volume.AvgDiskSecPerTransfer*ticksToSecondsScaleFactor,
|
||||
volume.Name,
|
||||
)
|
||||
}
|
||||
|
||||
13
collector/logical_disk_test.go
Normal file
13
collector/logical_disk_test.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkLogicalDiskCollector(b *testing.B) {
|
||||
// Whitelist is not set in testing context (kingpin flags not parsed), causing the collector to skip all disks.
|
||||
localVolumeWhitelist := ".+"
|
||||
volumeWhitelist = &localVolumeWhitelist
|
||||
|
||||
benchmarkCollector(b, "logical_disk", NewLogicalDiskCollector)
|
||||
}
|
||||
200
collector/logon.go
Normal file
200
collector/logon.go
Normal file
@@ -0,0 +1,200 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus-community/windows_exporter/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerCollector("logon", NewLogonCollector)
|
||||
}
|
||||
|
||||
// A LogonCollector is a Prometheus collector for WMI metrics
|
||||
type LogonCollector struct {
|
||||
LogonType *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewLogonCollector ...
|
||||
func NewLogonCollector() (Collector, error) {
|
||||
const subsystem = "logon"
|
||||
|
||||
return &LogonCollector{
|
||||
LogonType: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "logon_type"),
|
||||
"Number of active logon sessions (LogonSession.LogonType)",
|
||||
[]string{"status"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *LogonCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting user metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_LogonSession docs:
|
||||
// - https://docs.microsoft.com/en-us/windows/win32/cimwin32prov/win32-logonsession
|
||||
type Win32_LogonSession struct {
|
||||
LogonType uint32
|
||||
}
|
||||
|
||||
func (c *LogonCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_LogonSession
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(dst) == 0 {
|
||||
return nil, errors.New("WMI query returned empty result set")
|
||||
}
|
||||
|
||||
// Init counters
|
||||
system := 0
|
||||
interactive := 0
|
||||
network := 0
|
||||
batch := 0
|
||||
service := 0
|
||||
proxy := 0
|
||||
unlock := 0
|
||||
networkcleartext := 0
|
||||
newcredentials := 0
|
||||
remoteinteractive := 0
|
||||
cachedinteractive := 0
|
||||
cachedremoteinteractive := 0
|
||||
cachedunlock := 0
|
||||
|
||||
for _, entry := range dst {
|
||||
switch entry.LogonType {
|
||||
case 0:
|
||||
system++
|
||||
case 2:
|
||||
interactive++
|
||||
case 3:
|
||||
network++
|
||||
case 4:
|
||||
batch++
|
||||
case 5:
|
||||
service++
|
||||
case 6:
|
||||
proxy++
|
||||
case 7:
|
||||
unlock++
|
||||
case 8:
|
||||
networkcleartext++
|
||||
case 9:
|
||||
newcredentials++
|
||||
case 10:
|
||||
remoteinteractive++
|
||||
case 11:
|
||||
cachedinteractive++
|
||||
case 12:
|
||||
cachedremoteinteractive++
|
||||
case 13:
|
||||
cachedunlock++
|
||||
}
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(system),
|
||||
"system",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(interactive),
|
||||
"interactive",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(network),
|
||||
"network",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(batch),
|
||||
"batch",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(service),
|
||||
"service",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(proxy),
|
||||
"proxy",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(unlock),
|
||||
"unlock",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(networkcleartext),
|
||||
"network_clear_text",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(newcredentials),
|
||||
"new_credentials",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(remoteinteractive),
|
||||
"remote_interactive",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(cachedinteractive),
|
||||
"cached_interactive",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(remoteinteractive),
|
||||
"cached_remote_interactive",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(cachedunlock),
|
||||
"cached_unlock",
|
||||
)
|
||||
return nil, nil
|
||||
}
|
||||
10
collector/logon_test.go
Normal file
10
collector/logon_test.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkLogonCollector(b *testing.B) {
|
||||
// No context name required as collector source is WMI
|
||||
benchmarkCollector(b, "", NewLogonCollector)
|
||||
}
|
||||
512
collector/memory.go
Normal file
512
collector/memory.go
Normal file
@@ -0,0 +1,512 @@
|
||||
// returns data points from Win32_PerfRawData_PerfOS_Memory
|
||||
// <add link to documentation here> - Win32_PerfRawData_PerfOS_Memory class
|
||||
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/prometheus-community/windows_exporter/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerCollector("memory", NewMemoryCollector, "Memory")
|
||||
}
|
||||
|
||||
// A MemoryCollector is a Prometheus collector for perflib Memory metrics
|
||||
type MemoryCollector struct {
|
||||
AvailableBytes *prometheus.Desc
|
||||
CacheBytes *prometheus.Desc
|
||||
CacheBytesPeak *prometheus.Desc
|
||||
CacheFaultsTotal *prometheus.Desc
|
||||
CommitLimit *prometheus.Desc
|
||||
CommittedBytes *prometheus.Desc
|
||||
DemandZeroFaultsTotal *prometheus.Desc
|
||||
FreeAndZeroPageListBytes *prometheus.Desc
|
||||
FreeSystemPageTableEntries *prometheus.Desc
|
||||
ModifiedPageListBytes *prometheus.Desc
|
||||
PageFaultsTotal *prometheus.Desc
|
||||
SwapPageReadsTotal *prometheus.Desc
|
||||
SwapPagesReadTotal *prometheus.Desc
|
||||
SwapPagesWrittenTotal *prometheus.Desc
|
||||
SwapPageOperationsTotal *prometheus.Desc
|
||||
SwapPageWritesTotal *prometheus.Desc
|
||||
PoolNonpagedAllocsTotal *prometheus.Desc
|
||||
PoolNonpagedBytes *prometheus.Desc
|
||||
PoolPagedAllocsTotal *prometheus.Desc
|
||||
PoolPagedBytes *prometheus.Desc
|
||||
PoolPagedResidentBytes *prometheus.Desc
|
||||
StandbyCacheCoreBytes *prometheus.Desc
|
||||
StandbyCacheNormalPriorityBytes *prometheus.Desc
|
||||
StandbyCacheReserveBytes *prometheus.Desc
|
||||
SystemCacheResidentBytes *prometheus.Desc
|
||||
SystemCodeResidentBytes *prometheus.Desc
|
||||
SystemCodeTotalBytes *prometheus.Desc
|
||||
SystemDriverResidentBytes *prometheus.Desc
|
||||
SystemDriverTotalBytes *prometheus.Desc
|
||||
TransitionFaultsTotal *prometheus.Desc
|
||||
TransitionPagesRepurposedTotal *prometheus.Desc
|
||||
WriteCopiesTotal *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewMemoryCollector ...
|
||||
func NewMemoryCollector() (Collector, error) {
|
||||
const subsystem = "memory"
|
||||
|
||||
return &MemoryCollector{
|
||||
AvailableBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "available_bytes"),
|
||||
"The amount of physical memory immediately available for allocation to a process or for system use. It is equal to the sum of memory assigned to"+
|
||||
" the standby (cached), free and zero page lists (AvailableBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
CacheBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cache_bytes"),
|
||||
"(CacheBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
CacheBytesPeak: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cache_bytes_peak"),
|
||||
"(CacheBytesPeak)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
CacheFaultsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cache_faults_total"),
|
||||
"Number of faults which occur when a page sought in the file system cache is not found there and must be retrieved from elsewhere in memory (soft fault) "+
|
||||
"or from disk (hard fault) (Cache Faults/sec)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
CommitLimit: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "commit_limit"),
|
||||
"(CommitLimit)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
CommittedBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "committed_bytes"),
|
||||
"(CommittedBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
DemandZeroFaultsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "demand_zero_faults_total"),
|
||||
"The number of zeroed pages required to satisfy faults. Zeroed pages, pages emptied of previously stored data and filled with zeros, are a security"+
|
||||
" feature of Windows that prevent processes from seeing data stored by earlier processes that used the memory space (Demand Zero Faults/sec)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
FreeAndZeroPageListBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "free_and_zero_page_list_bytes"),
|
||||
"The amount of physical memory, in bytes, that is assigned to the free and zero page lists. This memory does not contain cached data. It is immediately"+
|
||||
" available for allocation to a process or for system use (FreeAndZeroPageListBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
FreeSystemPageTableEntries: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "free_system_page_table_entries"),
|
||||
"(FreeSystemPageTableEntries)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
ModifiedPageListBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "modified_page_list_bytes"),
|
||||
"The amount of physical memory, in bytes, that is assigned to the modified page list. This memory contains cached data and code that is not actively in "+
|
||||
"use by processes, the system and the system cache (ModifiedPageListBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
PageFaultsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "page_faults_total"),
|
||||
"Overall rate at which faulted pages are handled by the processor (Page Faults/sec)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SwapPageReadsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "swap_page_reads_total"),
|
||||
"Number of disk page reads (a single read operation reading several pages is still only counted once) (PageReadsPersec)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SwapPagesReadTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "swap_pages_read_total"),
|
||||
"Number of pages read across all page reads (ie counting all pages read even if they are read in a single operation) (PagesInputPersec)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SwapPagesWrittenTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "swap_pages_written_total"),
|
||||
"Number of pages written across all page writes (ie counting all pages written even if they are written in a single operation) (PagesOutputPersec)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SwapPageOperationsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "swap_page_operations_total"),
|
||||
"Total number of swap page read and writes (PagesPersec)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SwapPageWritesTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "swap_page_writes_total"),
|
||||
"Number of disk page writes (a single write operation writing several pages is still only counted once) (PageWritesPersec)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
PoolNonpagedAllocsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "pool_nonpaged_allocs_total"),
|
||||
"The number of calls to allocate space in the nonpaged pool. The nonpaged pool is an area of system memory area for objects that cannot be written"+
|
||||
" to disk, and must remain in physical memory as long as they are allocated (PoolNonpagedAllocs)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
PoolNonpagedBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "pool_nonpaged_bytes"),
|
||||
"Number of bytes in the non-paged pool, an area of the system virtual memory that is used for objects that cannot be written to disk, but must "+
|
||||
"remain in physical memory as long as they are allocated (PoolNonpagedBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
PoolPagedAllocsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "pool_paged_allocs_total"),
|
||||
"Number of calls to allocate space in the paged pool, regardless of the amount of space allocated in each call (PoolPagedAllocs)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
PoolPagedBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "pool_paged_bytes"),
|
||||
"(PoolPagedBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
PoolPagedResidentBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "pool_paged_resident_bytes"),
|
||||
"The size, in bytes, of the portion of the paged pool that is currently resident and active in physical memory. The paged pool is an area of the "+
|
||||
"system virtual memory that is used for objects that can be written to disk when they are not being used (PoolPagedResidentBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
StandbyCacheCoreBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "standby_cache_core_bytes"),
|
||||
"The amount of physical memory, in bytes, that is assigned to the core standby cache page lists. This memory contains cached data and code that is "+
|
||||
"not actively in use by processes, the system and the system cache (StandbyCacheCoreBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
StandbyCacheNormalPriorityBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "standby_cache_normal_priority_bytes"),
|
||||
"The amount of physical memory, in bytes, that is assigned to the normal priority standby cache page lists. This memory contains cached data and "+
|
||||
"code that is not actively in use by processes, the system and the system cache (StandbyCacheNormalPriorityBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
StandbyCacheReserveBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "standby_cache_reserve_bytes"),
|
||||
"The amount of physical memory, in bytes, that is assigned to the reserve standby cache page lists. This memory contains cached data and code "+
|
||||
"that is not actively in use by processes, the system and the system cache (StandbyCacheReserveBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SystemCacheResidentBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "system_cache_resident_bytes"),
|
||||
"The size, in bytes, of the portion of the system file cache which is currently resident and active in physical memory (SystemCacheResidentBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SystemCodeResidentBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "system_code_resident_bytes"),
|
||||
"The size, in bytes, of the pageable operating system code that is currently resident and active in physical memory (SystemCodeResidentBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SystemCodeTotalBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "system_code_total_bytes"),
|
||||
"The size, in bytes, of the pageable operating system code currently mapped into the system virtual address space (SystemCodeTotalBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SystemDriverResidentBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "system_driver_resident_bytes"),
|
||||
"The size, in bytes, of the pageable physical memory being used by device drivers. It is the working set (physical memory area) of the drivers (SystemDriverResidentBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SystemDriverTotalBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "system_driver_total_bytes"),
|
||||
"The size, in bytes, of the pageable virtual memory currently being used by device drivers. Pageable memory can be written to disk when it is not being used (SystemDriverTotalBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
TransitionFaultsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "transition_faults_total"),
|
||||
"Number of faults rate at which page faults are resolved by recovering pages that were being used by another process sharing the page, or were on the "+
|
||||
"modified page list or the standby list, or were being written to disk at the time of the page fault (TransitionFaultsPersec)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
TransitionPagesRepurposedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "transition_pages_repurposed_total"),
|
||||
"Transition Pages RePurposed is the rate at which the number of transition cache pages were reused for a different purpose (TransitionPagesRePurposedPersec)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
WriteCopiesTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "write_copies_total"),
|
||||
"The number of page faults caused by attempting to write that were satisfied by copying the page from elsewhere in physical memory (WriteCopiesPersec)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *MemoryCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ctx, ch); err != nil {
|
||||
log.Error("failed collecting memory metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type memory struct {
|
||||
AvailableBytes float64 `perflib:"Available Bytes"`
|
||||
AvailableKBytes float64 `perflib:"Available KBytes"`
|
||||
AvailableMBytes float64 `perflib:"Available MBytes"`
|
||||
CacheBytes float64 `perflib:"Cache Bytes"`
|
||||
CacheBytesPeak float64 `perflib:"Cache Bytes Peak"`
|
||||
CacheFaultsPersec float64 `perflib:"Cache Faults/sec"`
|
||||
CommitLimit float64 `perflib:"Commit Limit"`
|
||||
CommittedBytes float64 `perflib:"Committed Bytes"`
|
||||
DemandZeroFaultsPersec float64 `perflib:"Demand Zero Faults/sec"`
|
||||
FreeAndZeroPageListBytes float64 `perflib:"Free & Zero Page List Bytes"`
|
||||
FreeSystemPageTableEntries float64 `perflib:"Free System Page Table Entries"`
|
||||
ModifiedPageListBytes float64 `perflib:"Modified Page List Bytes"`
|
||||
PageFaultsPersec float64 `perflib:"Page Faults/sec"`
|
||||
PageReadsPersec float64 `perflib:"Page Reads/sec"`
|
||||
PagesInputPersec float64 `perflib:"Pages Input/sec"`
|
||||
PagesOutputPersec float64 `perflib:"Pages Output/sec"`
|
||||
PagesPersec float64 `perflib:"Pages/sec"`
|
||||
PageWritesPersec float64 `perflib:"Page Writes/sec"`
|
||||
PoolNonpagedAllocs float64 `perflib:"Pool Nonpaged Allocs"`
|
||||
PoolNonpagedBytes float64 `perflib:"Pool Nonpaged Bytes"`
|
||||
PoolPagedAllocs float64 `perflib:"Pool Paged Allocs"`
|
||||
PoolPagedBytes float64 `perflib:"Pool Paged Bytes"`
|
||||
PoolPagedResidentBytes float64 `perflib:"Pool Paged Resident Bytes"`
|
||||
StandbyCacheCoreBytes float64 `perflib:"Standby Cache Core Bytes"`
|
||||
StandbyCacheNormalPriorityBytes float64 `perflib:"Standby Cache Normal Priority Bytes"`
|
||||
StandbyCacheReserveBytes float64 `perflib:"Standby Cache Reserve Bytes"`
|
||||
SystemCacheResidentBytes float64 `perflib:"System Cache Resident Bytes"`
|
||||
SystemCodeResidentBytes float64 `perflib:"System Code Resident Bytes"`
|
||||
SystemCodeTotalBytes float64 `perflib:"System Code Total Bytes"`
|
||||
SystemDriverResidentBytes float64 `perflib:"System Driver Resident Bytes"`
|
||||
SystemDriverTotalBytes float64 `perflib:"System Driver Total Bytes"`
|
||||
TransitionFaultsPersec float64 `perflib:"Transition Faults/sec"`
|
||||
TransitionPagesRePurposedPersec float64 `perflib:"Transition Pages RePurposed/sec"`
|
||||
WriteCopiesPersec float64 `perflib:"Write Copies/sec"`
|
||||
}
|
||||
|
||||
func (c *MemoryCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []memory
|
||||
if err := unmarshalObject(ctx.perfObjects["Memory"], &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.AvailableBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].AvailableBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CacheBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].CacheBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CacheBytesPeak,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].CacheBytesPeak,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CacheFaultsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].CacheFaultsPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CommitLimit,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].CommitLimit,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CommittedBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].CommittedBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DemandZeroFaultsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].DemandZeroFaultsPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FreeAndZeroPageListBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].FreeAndZeroPageListBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FreeSystemPageTableEntries,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].FreeSystemPageTableEntries,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ModifiedPageListBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].ModifiedPageListBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PageFaultsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].PageFaultsPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SwapPageReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].PageReadsPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SwapPagesReadTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].PagesInputPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SwapPagesWrittenTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].PagesOutputPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SwapPageOperationsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].PagesPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SwapPageWritesTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].PageWritesPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PoolNonpagedAllocsTotal,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].PoolNonpagedAllocs,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PoolNonpagedBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].PoolNonpagedBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PoolPagedAllocsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].PoolPagedAllocs,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PoolPagedBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].PoolPagedBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PoolPagedResidentBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].PoolPagedResidentBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.StandbyCacheCoreBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].StandbyCacheCoreBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.StandbyCacheNormalPriorityBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].StandbyCacheNormalPriorityBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.StandbyCacheReserveBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].StandbyCacheReserveBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SystemCacheResidentBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].SystemCacheResidentBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SystemCodeResidentBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].SystemCodeResidentBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SystemCodeTotalBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].SystemCodeTotalBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SystemDriverResidentBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].SystemDriverResidentBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SystemDriverTotalBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].SystemDriverTotalBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TransitionFaultsTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].TransitionFaultsPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TransitionPagesRepurposedTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].TransitionPagesRePurposedPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.WriteCopiesTotal,
|
||||
prometheus.CounterValue,
|
||||
dst[0].WriteCopiesPersec,
|
||||
)
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
9
collector/memory_test.go
Normal file
9
collector/memory_test.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkMemoryCollector(b *testing.B) {
|
||||
benchmarkCollector(b, "memory", NewMemoryCollector)
|
||||
}
|
||||
1199
collector/mscluster_cluster.go
Normal file
1199
collector/mscluster_cluster.go
Normal file
File diff suppressed because it is too large
Load Diff
117
collector/mscluster_network.go
Normal file
117
collector/mscluster_network.go
Normal file
@@ -0,0 +1,117 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerCollector("mscluster_network", newMSCluster_NetworkCollector)
|
||||
}
|
||||
|
||||
// A MSCluster_NetworkCollector is a Prometheus collector for WMI MSCluster_Network metrics
|
||||
type MSCluster_NetworkCollector struct {
|
||||
Characteristics *prometheus.Desc
|
||||
Flags *prometheus.Desc
|
||||
Metric *prometheus.Desc
|
||||
Role *prometheus.Desc
|
||||
State *prometheus.Desc
|
||||
}
|
||||
|
||||
func newMSCluster_NetworkCollector() (Collector, error) {
|
||||
const subsystem = "mscluster_network"
|
||||
return &MSCluster_NetworkCollector{
|
||||
Characteristics: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "characteristics"),
|
||||
"Provides the characteristics of the network.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
Flags: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "flags"),
|
||||
"Provides access to the flags set for the node. ",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
Metric: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "metric"),
|
||||
"The metric of a cluster network (networks with lower values are used first). If this value is set, then the AutoMetric property is set to false.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
Role: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "role"),
|
||||
"Provides access to the network's Role property. The Role property describes the role of the network in the cluster. 0: None; 1: Cluster; 2: Client; 3: Both ",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
State: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "state"),
|
||||
"Provides the current state of the network. 1-1: Unknown; 0: Unavailable; 1: Down; 2: Partitioned; 3: Up",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// MSCluster_Network docs:
|
||||
// - https://docs.microsoft.com/en-us/previous-versions/windows/desktop/cluswmi/mscluster-network
|
||||
//
|
||||
type MSCluster_Network struct {
|
||||
Name string
|
||||
|
||||
Characteristics uint
|
||||
Flags uint
|
||||
Metric uint
|
||||
Role uint
|
||||
State uint
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *MSCluster_NetworkCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
var dst []MSCluster_Network
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.QueryNamespace(q, &dst, "root/MSCluster"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, v := range dst {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Characteristics,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.Characteristics),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Flags,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.Flags),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Metric,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.Metric),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Role,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.Role),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.State,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.State),
|
||||
v.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
253
collector/mscluster_node.go
Normal file
253
collector/mscluster_node.go
Normal file
@@ -0,0 +1,253 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerCollector("mscluster_node", newMSCluster_NodeCollector)
|
||||
}
|
||||
|
||||
// A MSCluster_NodeCollector is a Prometheus collector for WMI MSCluster_Node metrics
|
||||
type MSCluster_NodeCollector struct {
|
||||
BuildNumber *prometheus.Desc
|
||||
Characteristics *prometheus.Desc
|
||||
DetectedCloudPlatform *prometheus.Desc
|
||||
DynamicWeight *prometheus.Desc
|
||||
Flags *prometheus.Desc
|
||||
MajorVersion *prometheus.Desc
|
||||
MinorVersion *prometheus.Desc
|
||||
NeedsPreventQuorum *prometheus.Desc
|
||||
NodeDrainStatus *prometheus.Desc
|
||||
NodeHighestVersion *prometheus.Desc
|
||||
NodeLowestVersion *prometheus.Desc
|
||||
NodeWeight *prometheus.Desc
|
||||
State *prometheus.Desc
|
||||
StatusInformation *prometheus.Desc
|
||||
}
|
||||
|
||||
func newMSCluster_NodeCollector() (Collector, error) {
|
||||
const subsystem = "mscluster_node"
|
||||
return &MSCluster_NodeCollector{
|
||||
BuildNumber: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "build_number"),
|
||||
"Provides access to the node's BuildNumber property.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
Characteristics: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "characteristics"),
|
||||
"Provides access to the characteristics set for the node.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
DetectedCloudPlatform: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "detected_cloud_platform"),
|
||||
"(DetectedCloudPlatform)",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
DynamicWeight: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "dynamic_weight"),
|
||||
"The dynamic vote weight of the node adjusted by dynamic quorum feature.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
Flags: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "flags"),
|
||||
"Provides access to the flags set for the node.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
MajorVersion: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "major_version"),
|
||||
"Provides access to the node's MajorVersion property, which specifies the major portion of the Windows version installed.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
MinorVersion: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "minor_version"),
|
||||
"Provides access to the node's MinorVersion property, which specifies the minor portion of the Windows version installed.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
NeedsPreventQuorum: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "needs_prevent_quorum"),
|
||||
"Whether the cluster service on that node should be started with prevent quorum flag.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
NodeDrainStatus: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "node_drain_status"),
|
||||
"The current node drain status of a node. 0: Not Initiated; 1: In Progress; 2: Completed; 3: Failed",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
NodeHighestVersion: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "node_highest_version"),
|
||||
"Provides access to the node's NodeHighestVersion property, which specifies the highest possible version of the cluster service with which the node can join or communicate.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
NodeLowestVersion: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "node_lowest_version"),
|
||||
"Provides access to the node's NodeLowestVersion property, which specifies the lowest possible version of the cluster service with which the node can join or communicate.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
NodeWeight: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "node_weight"),
|
||||
"The vote weight of the node.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
State: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "state"),
|
||||
"Returns the current state of a node. -1: Unknown; 0: Up; 1: Down; 2: Paused; 3: Joining",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
StatusInformation: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "status_information"),
|
||||
"The isolation or quarantine status of the node.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// MSCluster_Node docs:
|
||||
// - https://docs.microsoft.com/en-us/previous-versions/windows/desktop/cluswmi/mscluster-node
|
||||
//
|
||||
type MSCluster_Node struct {
|
||||
Name string
|
||||
|
||||
BuildNumber uint
|
||||
Characteristics uint
|
||||
DetectedCloudPlatform uint
|
||||
DynamicWeight uint
|
||||
Flags uint
|
||||
MajorVersion uint
|
||||
MinorVersion uint
|
||||
NeedsPreventQuorum uint
|
||||
NodeDrainStatus uint
|
||||
NodeHighestVersion uint
|
||||
NodeLowestVersion uint
|
||||
NodeWeight uint
|
||||
State uint
|
||||
StatusInformation uint
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *MSCluster_NodeCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
var dst []MSCluster_Node
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.QueryNamespace(q, &dst, "root/MSCluster"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, v := range dst {
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BuildNumber,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.BuildNumber),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Characteristics,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.Characteristics),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DetectedCloudPlatform,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.DetectedCloudPlatform),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DynamicWeight,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.DynamicWeight),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Flags,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.Flags),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MajorVersion,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.MajorVersion),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MinorVersion,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.MinorVersion),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NeedsPreventQuorum,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.NeedsPreventQuorum),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NodeDrainStatus,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.NodeDrainStatus),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NodeHighestVersion,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.NodeHighestVersion),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NodeLowestVersion,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.NodeLowestVersion),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NodeWeight,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.NodeWeight),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.State,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.State),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.StatusInformation,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.StatusInformation),
|
||||
v.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
285
collector/mscluster_resource.go
Normal file
285
collector/mscluster_resource.go
Normal file
@@ -0,0 +1,285 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerCollector("mscluster_resource", newMSCluster_ResourceCollector)
|
||||
}
|
||||
|
||||
// A MSCluster_ResourceCollector is a Prometheus collector for WMI MSCluster_Resource metrics
|
||||
type MSCluster_ResourceCollector struct {
|
||||
Characteristics *prometheus.Desc
|
||||
DeadlockTimeout *prometheus.Desc
|
||||
EmbeddedFailureAction *prometheus.Desc
|
||||
Flags *prometheus.Desc
|
||||
IsAlivePollInterval *prometheus.Desc
|
||||
LooksAlivePollInterval *prometheus.Desc
|
||||
MonitorProcessId *prometheus.Desc
|
||||
PendingTimeout *prometheus.Desc
|
||||
ResourceClass *prometheus.Desc
|
||||
RestartAction *prometheus.Desc
|
||||
RestartDelay *prometheus.Desc
|
||||
RestartPeriod *prometheus.Desc
|
||||
RestartThreshold *prometheus.Desc
|
||||
RetryPeriodOnFailure *prometheus.Desc
|
||||
State *prometheus.Desc
|
||||
Subclass *prometheus.Desc
|
||||
}
|
||||
|
||||
func newMSCluster_ResourceCollector() (Collector, error) {
|
||||
const subsystem = "mscluster_resource"
|
||||
return &MSCluster_ResourceCollector{
|
||||
Characteristics: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "characteristics"),
|
||||
"Provides the characteristics of the object.",
|
||||
[]string{"type", "owner_group", "name"},
|
||||
nil,
|
||||
),
|
||||
DeadlockTimeout: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "deadlock_timeout"),
|
||||
"Indicates the length of time to wait, in milliseconds, before declaring a deadlock in any call into a resource.",
|
||||
[]string{"type", "owner_group", "name"},
|
||||
nil,
|
||||
),
|
||||
EmbeddedFailureAction: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "embedded_failure_action"),
|
||||
"The time, in milliseconds, that a resource should remain in a failed state before the Cluster service attempts to restart it.",
|
||||
[]string{"type", "owner_group", "name"},
|
||||
nil,
|
||||
),
|
||||
Flags: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "flags"),
|
||||
"Provides access to the flags set for the object.",
|
||||
[]string{"type", "owner_group", "name"},
|
||||
nil,
|
||||
),
|
||||
IsAlivePollInterval: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "is_alive_poll_interval"),
|
||||
"Provides access to the resource's IsAlivePollInterval property, which is the recommended interval in milliseconds at which the Cluster Service should poll the resource to determine whether it is operational. If the property is set to 0xFFFFFFFF, the Cluster Service uses the IsAlivePollInterval property for the resource type associated with the resource.",
|
||||
[]string{"type", "owner_group", "name"},
|
||||
nil,
|
||||
),
|
||||
LooksAlivePollInterval: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "looks_alive_poll_interval"),
|
||||
"Provides access to the resource's LooksAlivePollInterval property, which is the recommended interval in milliseconds at which the Cluster Service should poll the resource to determine whether it appears operational. If the property is set to 0xFFFFFFFF, the Cluster Service uses the LooksAlivePollInterval property for the resource type associated with the resource.",
|
||||
[]string{"type", "owner_group", "name"},
|
||||
nil,
|
||||
),
|
||||
MonitorProcessId: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "monitor_process_id"),
|
||||
"Provides the process ID of the resource host service that is currently hosting the resource.",
|
||||
[]string{"type", "owner_group", "name"},
|
||||
nil,
|
||||
),
|
||||
PendingTimeout: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "pending_timeout"),
|
||||
"Provides access to the resource's PendingTimeout property. If a resource cannot be brought online or taken offline in the number of milliseconds specified by the PendingTimeout property, the resource is forcibly terminated.",
|
||||
[]string{"type", "owner_group", "name"},
|
||||
nil,
|
||||
),
|
||||
ResourceClass: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "resource_class"),
|
||||
"Gets or sets the resource class of a resource. 0: Unknown; 1: Storage; 2: Network; 32768: Unknown ",
|
||||
[]string{"type", "owner_group", "name"},
|
||||
nil,
|
||||
),
|
||||
RestartAction: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "restart_action"),
|
||||
"Provides access to the resource's RestartAction property, which is the action to be taken by the Cluster Service if the resource fails.",
|
||||
[]string{"type", "owner_group", "name"},
|
||||
nil,
|
||||
),
|
||||
RestartDelay: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "restart_delay"),
|
||||
"Indicates the time delay before a failed resource is restarted.",
|
||||
[]string{"type", "owner_group", "name"},
|
||||
nil,
|
||||
),
|
||||
RestartPeriod: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "restart_period"),
|
||||
"Provides access to the resource's RestartPeriod property, which is interval of time, in milliseconds, during which a specified number of restart attempts can be made on a nonresponsive resource.",
|
||||
[]string{"type", "owner_group", "name"},
|
||||
nil,
|
||||
),
|
||||
RestartThreshold: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "restart_threshold"),
|
||||
"Provides access to the resource's RestartThreshold property which is the maximum number of restart attempts that can be made on a resource within an interval defined by the RestartPeriod property before the Cluster Service initiates the action specified by the RestartAction property.",
|
||||
[]string{"type", "owner_group", "name"},
|
||||
nil,
|
||||
),
|
||||
RetryPeriodOnFailure: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "retry_period_on_failure"),
|
||||
"Provides access to the resource's RetryPeriodOnFailure property, which is the interval of time (in milliseconds) that a resource should remain in a failed state before the Cluster service attempts to restart it.",
|
||||
[]string{"type", "owner_group", "name"},
|
||||
nil,
|
||||
),
|
||||
State: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "state"),
|
||||
"The current state of the resource. -1: Unknown; 0: Inherited; 1: Initializing; 2: Online; 3: Offline; 4: Failed; 128: Pending; 129: Online Pending; 130: Offline Pending ",
|
||||
[]string{"type", "owner_group", "name"},
|
||||
nil,
|
||||
),
|
||||
Subclass: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "subclass"),
|
||||
"Provides the list of references to nodes that can be the owner of this resource.",
|
||||
[]string{"type", "owner_group", "name"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// MSCluster_Resource docs:
|
||||
// - https://docs.microsoft.com/en-us/previous-versions/windows/desktop/cluswmi/mscluster-resource
|
||||
//
|
||||
type MSCluster_Resource struct {
|
||||
Name string
|
||||
Type string
|
||||
OwnerGroup string
|
||||
|
||||
Characteristics uint
|
||||
DeadlockTimeout uint
|
||||
EmbeddedFailureAction uint
|
||||
Flags uint
|
||||
IsAlivePollInterval uint
|
||||
LooksAlivePollInterval uint
|
||||
MonitorProcessId uint
|
||||
PendingTimeout uint
|
||||
ResourceClass uint
|
||||
RestartAction uint
|
||||
RestartDelay uint
|
||||
RestartPeriod uint
|
||||
RestartThreshold uint
|
||||
RetryPeriodOnFailure uint
|
||||
State uint
|
||||
Subclass uint
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *MSCluster_ResourceCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
var dst []MSCluster_Resource
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.QueryNamespace(q, &dst, "root/MSCluster"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, v := range dst {
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Characteristics,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.Characteristics),
|
||||
v.Type, v.OwnerGroup, v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DeadlockTimeout,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.DeadlockTimeout),
|
||||
v.Type, v.OwnerGroup, v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.EmbeddedFailureAction,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.EmbeddedFailureAction),
|
||||
v.Type, v.OwnerGroup, v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Flags,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.Flags),
|
||||
v.Type, v.OwnerGroup, v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.IsAlivePollInterval,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.IsAlivePollInterval),
|
||||
v.Type, v.OwnerGroup, v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LooksAlivePollInterval,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.LooksAlivePollInterval),
|
||||
v.Type, v.OwnerGroup, v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MonitorProcessId,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.MonitorProcessId),
|
||||
v.Type, v.OwnerGroup, v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PendingTimeout,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.PendingTimeout),
|
||||
v.Type, v.OwnerGroup, v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ResourceClass,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.ResourceClass),
|
||||
v.Type, v.OwnerGroup, v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RestartAction,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.RestartAction),
|
||||
v.Type, v.OwnerGroup, v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RestartDelay,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.RestartDelay),
|
||||
v.Type, v.OwnerGroup, v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RestartPeriod,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.RestartPeriod),
|
||||
v.Type, v.OwnerGroup, v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RestartThreshold,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.RestartThreshold),
|
||||
v.Type, v.OwnerGroup, v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RetryPeriodOnFailure,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.RetryPeriodOnFailure),
|
||||
v.Type, v.OwnerGroup, v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.State,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.State),
|
||||
v.Type, v.OwnerGroup, v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Subclass,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.Subclass),
|
||||
v.Type, v.OwnerGroup, v.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
241
collector/mscluster_resourcegroup.go
Normal file
241
collector/mscluster_resourcegroup.go
Normal file
@@ -0,0 +1,241 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerCollector("mscluster_resourcegroup", newMSCluster_ResourceGroupCollector)
|
||||
}
|
||||
|
||||
// A MSCluster_ResourceGroupCollector is a Prometheus collector for WMI MSCluster_ResourceGroup metrics
|
||||
type MSCluster_ResourceGroupCollector struct {
|
||||
AutoFailbackType *prometheus.Desc
|
||||
Characteristics *prometheus.Desc
|
||||
ColdStartSetting *prometheus.Desc
|
||||
DefaultOwner *prometheus.Desc
|
||||
FailbackWindowEnd *prometheus.Desc
|
||||
FailbackWindowStart *prometheus.Desc
|
||||
FailoverPeriod *prometheus.Desc
|
||||
FailoverThreshold *prometheus.Desc
|
||||
FaultDomain *prometheus.Desc
|
||||
Flags *prometheus.Desc
|
||||
GroupType *prometheus.Desc
|
||||
PlacementOptions *prometheus.Desc
|
||||
Priority *prometheus.Desc
|
||||
ResiliencyPeriod *prometheus.Desc
|
||||
State *prometheus.Desc
|
||||
}
|
||||
|
||||
func newMSCluster_ResourceGroupCollector() (Collector, error) {
|
||||
const subsystem = "mscluster_resourcegroup"
|
||||
return &MSCluster_ResourceGroupCollector{
|
||||
AutoFailbackType: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "auto_failback_type"),
|
||||
"Provides access to the group's AutoFailbackType property.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
Characteristics: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "characteristics"),
|
||||
"Provides the characteristics of the group.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
ColdStartSetting: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cold_start_setting"),
|
||||
"Indicates whether a group can start after a cluster cold start.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
DefaultOwner: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "default_owner"),
|
||||
"Number of the last node the resource group was activated on or explicitly moved to.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
FailbackWindowEnd: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "failback_window_end"),
|
||||
"The FailbackWindowEnd property provides the latest time that the group can be moved back to the node identified as its preferred node.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
FailbackWindowStart: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "failback_window_start"),
|
||||
"The FailbackWindowStart property provides the earliest time (that is, local time as kept by the cluster) that the group can be moved back to the node identified as its preferred node.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
FailoverPeriod: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "failover_period"),
|
||||
"The FailoverPeriod property specifies a number of hours during which a maximum number of failover attempts, specified by the FailoverThreshold property, can occur.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
FailoverThreshold: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "failover_threshold"),
|
||||
"The FailoverThreshold property specifies the maximum number of failover attempts.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
Flags: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "flags"),
|
||||
"Provides access to the flags set for the group. ",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
GroupType: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "group_type"),
|
||||
"The Type of the resource group.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
Priority: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "priority"),
|
||||
"Priority value of the resource group",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
ResiliencyPeriod: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "resiliency_period"),
|
||||
"The resiliency period for this group, in seconds.",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
State: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "state"),
|
||||
"The current state of the resource group. -1: Unknown; 0: Online; 1: Offline; 2: Failed; 3: Partial Online; 4: Pending",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// MSCluster_ResourceGroup docs:
|
||||
// - https://docs.microsoft.com/en-us/previous-versions/windows/desktop/cluswmi/mscluster-resourcegroup
|
||||
//
|
||||
type MSCluster_ResourceGroup struct {
|
||||
Name string
|
||||
|
||||
AutoFailbackType uint
|
||||
Characteristics uint
|
||||
ColdStartSetting uint
|
||||
DefaultOwner uint
|
||||
FailbackWindowEnd int
|
||||
FailbackWindowStart int
|
||||
FailoverPeriod uint
|
||||
FailoverThreshold uint
|
||||
Flags uint
|
||||
GroupType uint
|
||||
Priority uint
|
||||
ResiliencyPeriod uint
|
||||
State uint
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *MSCluster_ResourceGroupCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
var dst []MSCluster_ResourceGroup
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.QueryNamespace(q, &dst, "root/MSCluster"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, v := range dst {
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.AutoFailbackType,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.AutoFailbackType),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Characteristics,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.Characteristics),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ColdStartSetting,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.ColdStartSetting),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DefaultOwner,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.DefaultOwner),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FailbackWindowEnd,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.FailbackWindowEnd),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FailbackWindowStart,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.FailbackWindowStart),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FailoverPeriod,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.FailoverPeriod),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FailoverThreshold,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.FailoverThreshold),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Flags,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.Flags),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.GroupType,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.GroupType),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Priority,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.Priority),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ResiliencyPeriod,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.ResiliencyPeriod),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.State,
|
||||
prometheus.GaugeValue,
|
||||
float64(v.State),
|
||||
v.Name,
|
||||
)
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
126
collector/msmq.go
Normal file
126
collector/msmq.go
Normal file
@@ -0,0 +1,126 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus-community/windows_exporter/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"gopkg.in/alecthomas/kingpin.v2"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerCollector("msmq", NewMSMQCollector)
|
||||
}
|
||||
|
||||
var (
|
||||
msmqWhereClause = kingpin.Flag("collector.msmq.msmq-where", "WQL 'where' clause to use in WMI metrics query. Limits the response to the msmqs you specify and reduces the size of the response.").String()
|
||||
)
|
||||
|
||||
// A Win32_PerfRawData_MSMQ_MSMQQueueCollector is a Prometheus collector for WMI Win32_PerfRawData_MSMQ_MSMQQueue metrics
|
||||
type Win32_PerfRawData_MSMQ_MSMQQueueCollector struct {
|
||||
BytesinJournalQueue *prometheus.Desc
|
||||
BytesinQueue *prometheus.Desc
|
||||
MessagesinJournalQueue *prometheus.Desc
|
||||
MessagesinQueue *prometheus.Desc
|
||||
|
||||
queryWhereClause string
|
||||
}
|
||||
|
||||
// NewWin32_PerfRawData_MSMQ_MSMQQueueCollector ...
|
||||
func NewMSMQCollector() (Collector, error) {
|
||||
const subsystem = "msmq"
|
||||
|
||||
if *msmqWhereClause == "" {
|
||||
log.Warn("No where-clause specified for msmq collector. This will generate a very large number of metrics!")
|
||||
}
|
||||
|
||||
return &Win32_PerfRawData_MSMQ_MSMQQueueCollector{
|
||||
BytesinJournalQueue: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "bytes_in_journal_queue"),
|
||||
"Size of queue journal in bytes",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
BytesinQueue: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "bytes_in_queue"),
|
||||
"Size of queue in bytes",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
MessagesinJournalQueue: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "messages_in_journal_queue"),
|
||||
"Count messages in queue journal",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
MessagesinQueue: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "messages_in_queue"),
|
||||
"Count messages in queue",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
queryWhereClause: *msmqWhereClause,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Win32_PerfRawData_MSMQ_MSMQQueueCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting msmq metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_MSMQ_MSMQQueue struct {
|
||||
Name string
|
||||
|
||||
BytesinJournalQueue uint64
|
||||
BytesinQueue uint64
|
||||
MessagesinJournalQueue uint64
|
||||
MessagesinQueue uint64
|
||||
}
|
||||
|
||||
func (c *Win32_PerfRawData_MSMQ_MSMQQueueCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_MSMQ_MSMQQueue
|
||||
q := queryAllWhere(&dst, c.queryWhereClause)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, msmq := range dst {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BytesinJournalQueue,
|
||||
prometheus.GaugeValue,
|
||||
float64(msmq.BytesinJournalQueue),
|
||||
strings.ToLower(msmq.Name),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BytesinQueue,
|
||||
prometheus.GaugeValue,
|
||||
float64(msmq.BytesinQueue),
|
||||
strings.ToLower(msmq.Name),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MessagesinJournalQueue,
|
||||
prometheus.GaugeValue,
|
||||
float64(msmq.MessagesinJournalQueue),
|
||||
strings.ToLower(msmq.Name),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MessagesinQueue,
|
||||
prometheus.GaugeValue,
|
||||
float64(msmq.MessagesinQueue),
|
||||
strings.ToLower(msmq.Name),
|
||||
)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
10
collector/msmq_test.go
Normal file
10
collector/msmq_test.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkMsmqCollector(b *testing.B) {
|
||||
// No context name required as collector source is WMI
|
||||
benchmarkCollector(b, "", NewMSMQCollector)
|
||||
}
|
||||
4106
collector/mssql.go
Normal file
4106
collector/mssql.go
Normal file
File diff suppressed because it is too large
Load Diff
9
collector/mssql_test.go
Normal file
9
collector/mssql_test.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkMSSQLCollector(b *testing.B) {
|
||||
benchmarkCollector(b, "mssql", NewMSSQLCollector)
|
||||
}
|
||||
114
collector/net.go
114
collector/net.go
@@ -1,32 +1,34 @@
|
||||
// returns data points from Win32_PerfRawData_Tcpip_NetworkInterface
|
||||
|
||||
// https://technet.microsoft.com/en-us/security/aa394340(v=vs.80) (Win32_PerfRawData_Tcpip_NetworkInterface class)
|
||||
// https://msdn.microsoft.com/en-us/library/aa394216 (Win32_NetworkAdapter class)
|
||||
// https://msdn.microsoft.com/en-us/library/aa394353 (Win32_PnPEntity class)
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"regexp"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus-community/windows_exporter/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"gopkg.in/alecthomas/kingpin.v2"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["net"] = NewNetworkCollector
|
||||
registerCollector("net", NewNetworkCollector, "Network Interface")
|
||||
}
|
||||
|
||||
var (
|
||||
nicWhitelist = flag.String("collector.net.nic-whitelist", ".+", "Regexp of NIC:s to whitelist. NIC name must both match whitelist and not match blacklist to be included.")
|
||||
nicBlacklist = flag.String("collector.net.nic-blacklist", "", "Regexp of NIC:s to blacklist. NIC name must both match whitelist and not match blacklist to be included.")
|
||||
nicWhitelist = kingpin.Flag(
|
||||
"collector.net.nic-whitelist",
|
||||
"Regexp of NIC:s to whitelist. NIC name must both match whitelist and not match blacklist to be included.",
|
||||
).Default(".+").String()
|
||||
nicBlacklist = kingpin.Flag(
|
||||
"collector.net.nic-blacklist",
|
||||
"Regexp of NIC:s to blacklist. NIC name must both match whitelist and not match blacklist to be included.",
|
||||
).Default("").String()
|
||||
nicNameToUnderscore = regexp.MustCompile("[^a-zA-Z0-9]")
|
||||
)
|
||||
|
||||
// A NetworkCollector is a Prometheus collector for WMI Win32_PerfRawData_Tcpip_NetworkInterface metrics
|
||||
// A NetworkCollector is a Prometheus collector for Perflib Network Interface metrics
|
||||
type NetworkCollector struct {
|
||||
BytesReceivedTotal *prometheus.Desc
|
||||
BytesSentTotal *prometheus.Desc
|
||||
@@ -39,6 +41,7 @@ type NetworkCollector struct {
|
||||
PacketsReceivedTotal *prometheus.Desc
|
||||
PacketsReceivedUnknown *prometheus.Desc
|
||||
PacketsSentTotal *prometheus.Desc
|
||||
CurrentBandwidth *prometheus.Desc
|
||||
|
||||
nicWhitelistPattern *regexp.Regexp
|
||||
nicBlacklistPattern *regexp.Regexp
|
||||
@@ -68,25 +71,25 @@ func NewNetworkCollector() (Collector, error) {
|
||||
nil,
|
||||
),
|
||||
PacketsOutboundDiscarded: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "packets_outbound_discarded"),
|
||||
prometheus.BuildFQName(Namespace, subsystem, "packets_outbound_discarded_total"),
|
||||
"(Network.PacketsOutboundDiscarded)",
|
||||
[]string{"nic"},
|
||||
nil,
|
||||
),
|
||||
PacketsOutboundErrors: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "packets_outbound_errors"),
|
||||
prometheus.BuildFQName(Namespace, subsystem, "packets_outbound_errors_total"),
|
||||
"(Network.PacketsOutboundErrors)",
|
||||
[]string{"nic"},
|
||||
nil,
|
||||
),
|
||||
PacketsReceivedDiscarded: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "packets_received_discarded"),
|
||||
prometheus.BuildFQName(Namespace, subsystem, "packets_received_discarded_total"),
|
||||
"(Network.PacketsReceivedDiscarded)",
|
||||
[]string{"nic"},
|
||||
nil,
|
||||
),
|
||||
PacketsReceivedErrors: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "packets_received_errors"),
|
||||
prometheus.BuildFQName(Namespace, subsystem, "packets_received_errors_total"),
|
||||
"(Network.PacketsReceivedErrors)",
|
||||
[]string{"nic"},
|
||||
nil,
|
||||
@@ -98,7 +101,7 @@ func NewNetworkCollector() (Collector, error) {
|
||||
nil,
|
||||
),
|
||||
PacketsReceivedUnknown: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "packets_received_unknown"),
|
||||
prometheus.BuildFQName(Namespace, subsystem, "packets_received_unknown_total"),
|
||||
"(Network.PacketsReceivedUnknown)",
|
||||
[]string{"nic"},
|
||||
nil,
|
||||
@@ -115,6 +118,12 @@ func NewNetworkCollector() (Collector, error) {
|
||||
[]string{"nic"},
|
||||
nil,
|
||||
),
|
||||
CurrentBandwidth: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "current_bandwidth_bytes"),
|
||||
"(Network.CurrentBandwidth)",
|
||||
[]string{"nic"},
|
||||
nil,
|
||||
),
|
||||
|
||||
nicWhitelistPattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *nicWhitelist)),
|
||||
nicBlacklistPattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *nicBlacklist)),
|
||||
@@ -123,40 +132,42 @@ func NewNetworkCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NetworkCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Println("[ERROR] failed collecting net metrics:", desc, err)
|
||||
func (c *NetworkCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ctx, ch); err != nil {
|
||||
log.Error("failed collecting net metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// mangleNetworkName mangles Network Adapter name (non-alphanumeric to _)
|
||||
// that is used in Win32_PerfRawData_Tcpip_NetworkInterface.
|
||||
// that is used in networkInterface.
|
||||
func mangleNetworkName(name string) string {
|
||||
return nicNameToUnderscore.ReplaceAllString(name, "_")
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_Tcpip_NetworkInterface struct {
|
||||
BytesReceivedPerSec uint32
|
||||
BytesSentPerSec uint32
|
||||
BytesTotalPerSec uint64
|
||||
// Win32_PerfRawData_Tcpip_NetworkInterface docs:
|
||||
// - https://technet.microsoft.com/en-us/security/aa394340(v=vs.80)
|
||||
type networkInterface struct {
|
||||
BytesReceivedPerSec float64 `perflib:"Bytes Received/sec"`
|
||||
BytesSentPerSec float64 `perflib:"Bytes Sent/sec"`
|
||||
BytesTotalPerSec float64 `perflib:"Bytes Total/sec"`
|
||||
Name string
|
||||
PacketsOutboundDiscarded uint32
|
||||
PacketsOutboundErrors uint32
|
||||
PacketsPerSec uint32
|
||||
PacketsReceivedDiscarded uint32
|
||||
PacketsReceivedErrors uint32
|
||||
PacketsReceivedPerSec uint32
|
||||
PacketsReceivedUnknown uint32
|
||||
PacketsSentPerSec uint32
|
||||
PacketsOutboundDiscarded float64 `perflib:"Packets Outbound Discarded"`
|
||||
PacketsOutboundErrors float64 `perflib:"Packets Outbound Errors"`
|
||||
PacketsPerSec float64 `perflib:"Packets/sec"`
|
||||
PacketsReceivedDiscarded float64 `perflib:"Packets Received Discarded"`
|
||||
PacketsReceivedErrors float64 `perflib:"Packets Received Errors"`
|
||||
PacketsReceivedPerSec float64 `perflib:"Packets Received/sec"`
|
||||
PacketsReceivedUnknown float64 `perflib:"Packets Received Unknown"`
|
||||
PacketsSentPerSec float64 `perflib:"Packets Sent/sec"`
|
||||
CurrentBandwidth float64 `perflib:"Current Bandwidth"`
|
||||
}
|
||||
|
||||
func (c *NetworkCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_Tcpip_NetworkInterface
|
||||
func (c *NetworkCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []networkInterface
|
||||
|
||||
q := wmi.CreateQuery(&dst, "")
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
if err := unmarshalObject(ctx.perfObjects["Network Interface"], &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -175,70 +186,75 @@ func (c *NetworkCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Des
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BytesReceivedTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.BytesReceivedPerSec),
|
||||
nic.BytesReceivedPerSec,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BytesSentTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.BytesSentPerSec),
|
||||
nic.BytesSentPerSec,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BytesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.BytesTotalPerSec),
|
||||
nic.BytesTotalPerSec,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsOutboundDiscarded,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.PacketsOutboundDiscarded),
|
||||
nic.PacketsOutboundDiscarded,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsOutboundErrors,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.PacketsOutboundErrors),
|
||||
nic.PacketsOutboundErrors,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.PacketsPerSec),
|
||||
nic.PacketsPerSec,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsReceivedDiscarded,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.PacketsReceivedDiscarded),
|
||||
nic.PacketsReceivedDiscarded,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsReceivedErrors,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.PacketsReceivedErrors),
|
||||
nic.PacketsReceivedErrors,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsReceivedTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.PacketsReceivedPerSec),
|
||||
nic.PacketsReceivedPerSec,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsReceivedUnknown,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.PacketsReceivedUnknown),
|
||||
nic.PacketsReceivedUnknown,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsSentTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.PacketsSentPerSec),
|
||||
nic.PacketsSentPerSec,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CurrentBandwidth,
|
||||
prometheus.GaugeValue,
|
||||
nic.CurrentBandwidth/8,
|
||||
name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import "testing"
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNetworkToInstanceName(t *testing.T) {
|
||||
data := map[string]string{
|
||||
@@ -13,3 +18,10 @@ func TestNetworkToInstanceName(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkNetCollector(b *testing.B) {
|
||||
// Whitelist is not set in testing context (kingpin flags not parsed), causing the collector to skip all interfaces.
|
||||
localNicWhitelist := ".+"
|
||||
nicWhitelist = &localNicWhitelist
|
||||
benchmarkCollector(b, "net", NewNetworkCollector)
|
||||
}
|
||||
|
||||
118
collector/netframework_clrexceptions.go
Normal file
118
collector/netframework_clrexceptions.go
Normal file
@@ -0,0 +1,118 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus-community/windows_exporter/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerCollector("netframework_clrexceptions", NewNETFramework_NETCLRExceptionsCollector)
|
||||
}
|
||||
|
||||
// A NETFramework_NETCLRExceptionsCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRExceptions metrics
|
||||
type NETFramework_NETCLRExceptionsCollector struct {
|
||||
NumberofExcepsThrown *prometheus.Desc
|
||||
NumberofFilters *prometheus.Desc
|
||||
NumberofFinallys *prometheus.Desc
|
||||
ThrowToCatchDepth *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewNETFramework_NETCLRExceptionsCollector ...
|
||||
func NewNETFramework_NETCLRExceptionsCollector() (Collector, error) {
|
||||
const subsystem = "netframework_clrexceptions"
|
||||
return &NETFramework_NETCLRExceptionsCollector{
|
||||
NumberofExcepsThrown: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "exceptions_thrown_total"),
|
||||
"Displays the total number of exceptions thrown since the application started. This includes both .NET exceptions and unmanaged exceptions that are converted into .NET exceptions.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
NumberofFilters: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "exceptions_filters_total"),
|
||||
"Displays the total number of .NET exception filters executed. An exception filter evaluates regardless of whether an exception is handled.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
NumberofFinallys: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "exceptions_finallys_total"),
|
||||
"Displays the total number of finally blocks executed. Only the finally blocks executed for an exception are counted; finally blocks on normal code paths are not counted by this counter.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
ThrowToCatchDepth: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "throw_to_catch_depth_total"),
|
||||
"Displays the total number of stack frames traversed, from the frame that threw the exception to the frame that handled the exception.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRExceptionsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrexceptions metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_NETFramework_NETCLRExceptions struct {
|
||||
Name string
|
||||
|
||||
NumberofExcepsThrown uint32
|
||||
NumberofExcepsThrownPersec uint32
|
||||
NumberofFiltersPersec uint32
|
||||
NumberofFinallysPersec uint32
|
||||
ThrowToCatchDepthPersec uint32
|
||||
}
|
||||
|
||||
func (c *NETFramework_NETCLRExceptionsCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_NETFramework_NETCLRExceptions
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, process := range dst {
|
||||
|
||||
if process.Name == "_Global_" {
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NumberofExcepsThrown,
|
||||
prometheus.CounterValue,
|
||||
float64(process.NumberofExcepsThrown),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NumberofFilters,
|
||||
prometheus.CounterValue,
|
||||
float64(process.NumberofFiltersPersec),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NumberofFinallys,
|
||||
prometheus.CounterValue,
|
||||
float64(process.NumberofFinallysPersec),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ThrowToCatchDepth,
|
||||
prometheus.CounterValue,
|
||||
float64(process.ThrowToCatchDepthPersec),
|
||||
process.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
10
collector/netframework_clrexceptions_test.go
Normal file
10
collector/netframework_clrexceptions_test.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkNetFrameworkNETCLRExceptionsCollector(b *testing.B) {
|
||||
// No context name required as collector source is WMI
|
||||
benchmarkCollector(b, "", NewNETFramework_NETCLRExceptionsCollector)
|
||||
}
|
||||
104
collector/netframework_clrinterop.go
Normal file
104
collector/netframework_clrinterop.go
Normal file
@@ -0,0 +1,104 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus-community/windows_exporter/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerCollector("netframework_clrinterop", NewNETFramework_NETCLRInteropCollector)
|
||||
}
|
||||
|
||||
// A NETFramework_NETCLRInteropCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRInterop metrics
|
||||
type NETFramework_NETCLRInteropCollector struct {
|
||||
NumberofCCWs *prometheus.Desc
|
||||
Numberofmarshalling *prometheus.Desc
|
||||
NumberofStubs *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewNETFramework_NETCLRInteropCollector ...
|
||||
func NewNETFramework_NETCLRInteropCollector() (Collector, error) {
|
||||
const subsystem = "netframework_clrinterop"
|
||||
return &NETFramework_NETCLRInteropCollector{
|
||||
NumberofCCWs: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "com_callable_wrappers_total"),
|
||||
"Displays the current number of COM callable wrappers (CCWs). A CCW is a proxy for a managed object being referenced from an unmanaged COM client.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
Numberofmarshalling: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "interop_marshalling_total"),
|
||||
"Displays the total number of times arguments and return values have been marshaled from managed to unmanaged code, and vice versa, since the application started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
NumberofStubs: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "interop_stubs_created_total"),
|
||||
"Displays the current number of stubs created by the common language runtime. Stubs are responsible for marshaling arguments and return values from managed to unmanaged code, and vice versa, during a COM interop call or a platform invoke call.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRInteropCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrinterop metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_NETFramework_NETCLRInterop struct {
|
||||
Name string
|
||||
|
||||
NumberofCCWs uint32
|
||||
Numberofmarshalling uint32
|
||||
NumberofStubs uint32
|
||||
NumberofTLBexportsPersec uint32
|
||||
NumberofTLBimportsPersec uint32
|
||||
}
|
||||
|
||||
func (c *NETFramework_NETCLRInteropCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_NETFramework_NETCLRInterop
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, process := range dst {
|
||||
|
||||
if process.Name == "_Global_" {
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NumberofCCWs,
|
||||
prometheus.CounterValue,
|
||||
float64(process.NumberofCCWs),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Numberofmarshalling,
|
||||
prometheus.CounterValue,
|
||||
float64(process.Numberofmarshalling),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NumberofStubs,
|
||||
prometheus.CounterValue,
|
||||
float64(process.NumberofStubs),
|
||||
process.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
10
collector/netframework_clrinterop_test.go
Normal file
10
collector/netframework_clrinterop_test.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkNETFrameworkNETCLRInteropCollector(b *testing.B) {
|
||||
// No context name required as collector source is WMI
|
||||
benchmarkCollector(b, "", NewNETFramework_NETCLRInteropCollector)
|
||||
}
|
||||
120
collector/netframework_clrjit.go
Normal file
120
collector/netframework_clrjit.go
Normal file
@@ -0,0 +1,120 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus-community/windows_exporter/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerCollector("netframework_clrjit", NewNETFramework_NETCLRJitCollector)
|
||||
}
|
||||
|
||||
// A NETFramework_NETCLRJitCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRJit metrics
|
||||
type NETFramework_NETCLRJitCollector struct {
|
||||
NumberofMethodsJitted *prometheus.Desc
|
||||
TimeinJit *prometheus.Desc
|
||||
StandardJitFailures *prometheus.Desc
|
||||
TotalNumberofILBytesJitted *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewNETFramework_NETCLRJitCollector ...
|
||||
func NewNETFramework_NETCLRJitCollector() (Collector, error) {
|
||||
const subsystem = "netframework_clrjit"
|
||||
return &NETFramework_NETCLRJitCollector{
|
||||
NumberofMethodsJitted: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "jit_methods_total"),
|
||||
"Displays the total number of methods JIT-compiled since the application started. This counter does not include pre-JIT-compiled methods.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
TimeinJit: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "jit_time_percent"),
|
||||
"Displays the percentage of time spent in JIT compilation. This counter is updated at the end of every JIT compilation phase. A JIT compilation phase occurs when a method and its dependencies are compiled.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
StandardJitFailures: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "jit_standard_failures_total"),
|
||||
"Displays the peak number of methods the JIT compiler has failed to compile since the application started. This failure can occur if the MSIL cannot be verified or if there is an internal error in the JIT compiler.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
TotalNumberofILBytesJitted: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "jit_il_bytes_total"),
|
||||
"Displays the total number of Microsoft intermediate language (MSIL) bytes compiled by the just-in-time (JIT) compiler since the application started",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRJitCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrjit metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_NETFramework_NETCLRJit struct {
|
||||
Name string
|
||||
|
||||
Frequency_PerfTime uint32
|
||||
ILBytesJittedPersec uint32
|
||||
NumberofILBytesJitted uint32
|
||||
NumberofMethodsJitted uint32
|
||||
PercentTimeinJit uint32
|
||||
StandardJitFailures uint32
|
||||
TotalNumberofILBytesJitted uint32
|
||||
}
|
||||
|
||||
func (c *NETFramework_NETCLRJitCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_NETFramework_NETCLRJit
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, process := range dst {
|
||||
|
||||
if process.Name == "_Global_" {
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NumberofMethodsJitted,
|
||||
prometheus.CounterValue,
|
||||
float64(process.NumberofMethodsJitted),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeinJit,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.PercentTimeinJit)/float64(process.Frequency_PerfTime),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.StandardJitFailures,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.StandardJitFailures),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TotalNumberofILBytesJitted,
|
||||
prometheus.CounterValue,
|
||||
float64(process.TotalNumberofILBytesJitted),
|
||||
process.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
10
collector/netframework_clrjit_test.go
Normal file
10
collector/netframework_clrjit_test.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkNETFrameworkNETCLRJitCollector(b *testing.B) {
|
||||
// No context name required as collector source is WMI
|
||||
benchmarkCollector(b, "", NewNETFramework_NETCLRJitCollector)
|
||||
}
|
||||
199
collector/netframework_clrloading.go
Normal file
199
collector/netframework_clrloading.go
Normal file
@@ -0,0 +1,199 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus-community/windows_exporter/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerCollector("netframework_clrloading", NewNETFramework_NETCLRLoadingCollector)
|
||||
}
|
||||
|
||||
// A NETFramework_NETCLRLoadingCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRLoading metrics
|
||||
type NETFramework_NETCLRLoadingCollector struct {
|
||||
BytesinLoaderHeap *prometheus.Desc
|
||||
Currentappdomains *prometheus.Desc
|
||||
CurrentAssemblies *prometheus.Desc
|
||||
CurrentClassesLoaded *prometheus.Desc
|
||||
TotalAppdomains *prometheus.Desc
|
||||
Totalappdomainsunloaded *prometheus.Desc
|
||||
TotalAssemblies *prometheus.Desc
|
||||
TotalClassesLoaded *prometheus.Desc
|
||||
TotalNumberofLoadFailures *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewNETFramework_NETCLRLoadingCollector ...
|
||||
func NewNETFramework_NETCLRLoadingCollector() (Collector, error) {
|
||||
const subsystem = "netframework_clrloading"
|
||||
return &NETFramework_NETCLRLoadingCollector{
|
||||
BytesinLoaderHeap: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "loader_heap_size_bytes"),
|
||||
"Displays the current size, in bytes, of the memory committed by the class loader across all application domains. Committed memory is the physical space reserved in the disk paging file.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
Currentappdomains: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "appdomains_loaded_current"),
|
||||
"Displays the current number of application domains loaded in this application.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
CurrentAssemblies: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "assemblies_loaded_current"),
|
||||
"Displays the current number of assemblies loaded across all application domains in the currently running application. If the assembly is loaded as domain-neutral from multiple application domains, this counter is incremented only once.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
CurrentClassesLoaded: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "classes_loaded_current"),
|
||||
"Displays the current number of classes loaded in all assemblies.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
TotalAppdomains: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "appdomains_loaded_total"),
|
||||
"Displays the peak number of application domains loaded since the application started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
Totalappdomainsunloaded: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "appdomains_unloaded_total"),
|
||||
"Displays the total number of application domains unloaded since the application started. If an application domain is loaded and unloaded multiple times, this counter increments each time the application domain is unloaded.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
TotalAssemblies: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "assemblies_loaded_total"),
|
||||
"Displays the total number of assemblies loaded since the application started. If the assembly is loaded as domain-neutral from multiple application domains, this counter is incremented only once.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
TotalClassesLoaded: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "classes_loaded_total"),
|
||||
"Displays the cumulative number of classes loaded in all assemblies since the application started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
TotalNumberofLoadFailures: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "class_load_failures_total"),
|
||||
"Displays the peak number of classes that have failed to load since the application started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRLoadingCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrloading metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_NETFramework_NETCLRLoading struct {
|
||||
Name string
|
||||
|
||||
AssemblySearchLength uint32
|
||||
BytesinLoaderHeap uint64
|
||||
Currentappdomains uint32
|
||||
CurrentAssemblies uint32
|
||||
CurrentClassesLoaded uint32
|
||||
PercentTimeLoading uint64
|
||||
Rateofappdomains uint32
|
||||
Rateofappdomainsunloaded uint32
|
||||
RateofAssemblies uint32
|
||||
RateofClassesLoaded uint32
|
||||
RateofLoadFailures uint32
|
||||
TotalAppdomains uint32
|
||||
Totalappdomainsunloaded uint32
|
||||
TotalAssemblies uint32
|
||||
TotalClassesLoaded uint32
|
||||
TotalNumberofLoadFailures uint32
|
||||
}
|
||||
|
||||
func (c *NETFramework_NETCLRLoadingCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_NETFramework_NETCLRLoading
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, process := range dst {
|
||||
|
||||
if process.Name == "_Global_" {
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BytesinLoaderHeap,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.BytesinLoaderHeap),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Currentappdomains,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.Currentappdomains),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CurrentAssemblies,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.CurrentAssemblies),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CurrentClassesLoaded,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.CurrentClassesLoaded),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TotalAppdomains,
|
||||
prometheus.CounterValue,
|
||||
float64(process.TotalAppdomains),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Totalappdomainsunloaded,
|
||||
prometheus.CounterValue,
|
||||
float64(process.Totalappdomainsunloaded),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TotalAssemblies,
|
||||
prometheus.CounterValue,
|
||||
float64(process.TotalAssemblies),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TotalClassesLoaded,
|
||||
prometheus.CounterValue,
|
||||
float64(process.TotalClassesLoaded),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TotalNumberofLoadFailures,
|
||||
prometheus.CounterValue,
|
||||
float64(process.TotalNumberofLoadFailures),
|
||||
process.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
10
collector/netframework_clrloading_test.go
Normal file
10
collector/netframework_clrloading_test.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkNETFrameworkNETCLRLoadingCollector(b *testing.B) {
|
||||
// No context name required as collector source is WMI
|
||||
benchmarkCollector(b, "", NewNETFramework_NETCLRLoadingCollector)
|
||||
}
|
||||
165
collector/netframework_clrlocksandthreads.go
Normal file
165
collector/netframework_clrlocksandthreads.go
Normal file
@@ -0,0 +1,165 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus-community/windows_exporter/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerCollector("netframework_clrlocksandthreads", NewNETFramework_NETCLRLocksAndThreadsCollector)
|
||||
}
|
||||
|
||||
// A NETFramework_NETCLRLocksAndThreadsCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads metrics
|
||||
type NETFramework_NETCLRLocksAndThreadsCollector struct {
|
||||
CurrentQueueLength *prometheus.Desc
|
||||
NumberofcurrentlogicalThreads *prometheus.Desc
|
||||
NumberofcurrentphysicalThreads *prometheus.Desc
|
||||
Numberofcurrentrecognizedthreads *prometheus.Desc
|
||||
Numberoftotalrecognizedthreads *prometheus.Desc
|
||||
QueueLengthPeak *prometheus.Desc
|
||||
TotalNumberofContentions *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewNETFramework_NETCLRLocksAndThreadsCollector ...
|
||||
func NewNETFramework_NETCLRLocksAndThreadsCollector() (Collector, error) {
|
||||
const subsystem = "netframework_clrlocksandthreads"
|
||||
return &NETFramework_NETCLRLocksAndThreadsCollector{
|
||||
CurrentQueueLength: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "current_queue_length"),
|
||||
"Displays the total number of threads that are currently waiting to acquire a managed lock in the application.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
NumberofcurrentlogicalThreads: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "current_logical_threads"),
|
||||
"Displays the number of current managed thread objects in the application. This counter maintains the count of both running and stopped threads. ",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
NumberofcurrentphysicalThreads: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "physical_threads_current"),
|
||||
"Displays the number of native operating system threads created and owned by the common language runtime to act as underlying threads for managed thread objects. This counter's value does not include the threads used by the runtime in its internal operations; it is a subset of the threads in the operating system process.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
Numberofcurrentrecognizedthreads: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "recognized_threads_current"),
|
||||
"Displays the number of threads that are currently recognized by the runtime. These threads are associated with a corresponding managed thread object. The runtime does not create these threads, but they have run inside the runtime at least once.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
Numberoftotalrecognizedthreads: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "recognized_threads_total"),
|
||||
"Displays the total number of threads that have been recognized by the runtime since the application started. These threads are associated with a corresponding managed thread object. The runtime does not create these threads, but they have run inside the runtime at least once.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
QueueLengthPeak: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "queue_length_total"),
|
||||
"Displays the total number of threads that waited to acquire a managed lock since the application started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
TotalNumberofContentions: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "contentions_total"),
|
||||
"Displays the total number of times that threads in the runtime have attempted to acquire a managed lock unsuccessfully.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRLocksAndThreadsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrlocksandthreads metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads struct {
|
||||
Name string
|
||||
|
||||
ContentionRatePersec uint32
|
||||
CurrentQueueLength uint32
|
||||
NumberofcurrentlogicalThreads uint32
|
||||
NumberofcurrentphysicalThreads uint32
|
||||
Numberofcurrentrecognizedthreads uint32
|
||||
Numberoftotalrecognizedthreads uint32
|
||||
QueueLengthPeak uint32
|
||||
QueueLengthPersec uint32
|
||||
RateOfRecognizedThreadsPersec uint32
|
||||
TotalNumberofContentions uint32
|
||||
}
|
||||
|
||||
func (c *NETFramework_NETCLRLocksAndThreadsCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, process := range dst {
|
||||
|
||||
if process.Name == "_Global_" {
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CurrentQueueLength,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.CurrentQueueLength),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NumberofcurrentlogicalThreads,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.NumberofcurrentlogicalThreads),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NumberofcurrentphysicalThreads,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.NumberofcurrentphysicalThreads),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Numberofcurrentrecognizedthreads,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.Numberofcurrentrecognizedthreads),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Numberoftotalrecognizedthreads,
|
||||
prometheus.CounterValue,
|
||||
float64(process.Numberoftotalrecognizedthreads),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.QueueLengthPeak,
|
||||
prometheus.CounterValue,
|
||||
float64(process.QueueLengthPeak),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TotalNumberofContentions,
|
||||
prometheus.CounterValue,
|
||||
float64(process.TotalNumberofContentions),
|
||||
process.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
10
collector/netframework_clrlocksandthreads_test.go
Normal file
10
collector/netframework_clrlocksandthreads_test.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkNETFrameworkNETCLRLocksAndThreadsCollector(b *testing.B) {
|
||||
// No context name required as collector source is WMI
|
||||
benchmarkCollector(b, "", NewNETFramework_NETCLRLocksAndThreadsCollector)
|
||||
}
|
||||
308
collector/netframework_clrmemory.go
Normal file
308
collector/netframework_clrmemory.go
Normal file
@@ -0,0 +1,308 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus-community/windows_exporter/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerCollector("netframework_clrmemory", NewNETFramework_NETCLRMemoryCollector)
|
||||
}
|
||||
|
||||
// A NETFramework_NETCLRMemoryCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRMemory metrics
|
||||
type NETFramework_NETCLRMemoryCollector struct {
|
||||
AllocatedBytes *prometheus.Desc
|
||||
FinalizationSurvivors *prometheus.Desc
|
||||
HeapSize *prometheus.Desc
|
||||
PromotedBytes *prometheus.Desc
|
||||
NumberGCHandles *prometheus.Desc
|
||||
NumberCollections *prometheus.Desc
|
||||
NumberInducedGC *prometheus.Desc
|
||||
NumberofPinnedObjects *prometheus.Desc
|
||||
NumberofSinkBlocksinuse *prometheus.Desc
|
||||
NumberTotalCommittedBytes *prometheus.Desc
|
||||
NumberTotalreservedBytes *prometheus.Desc
|
||||
TimeinGC *prometheus.Desc
|
||||
PromotedFinalizationMemoryfromGen0 *prometheus.Desc
|
||||
PromotedMemoryfromGen0 *prometheus.Desc
|
||||
PromotedMemoryfromGen1 *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewNETFramework_NETCLRMemoryCollector ...
|
||||
func NewNETFramework_NETCLRMemoryCollector() (Collector, error) {
|
||||
const subsystem = "netframework_clrmemory"
|
||||
return &NETFramework_NETCLRMemoryCollector{
|
||||
AllocatedBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "allocated_bytes_total"),
|
||||
"Displays the total number of bytes allocated on the garbage collection heap.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
FinalizationSurvivors: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "finalization_survivors"),
|
||||
"Displays the number of garbage-collected objects that survive a collection because they are waiting to be finalized.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
HeapSize: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "heap_size_bytes"),
|
||||
"Displays the maximum bytes that can be allocated; it does not indicate the current number of bytes allocated.",
|
||||
[]string{"process", "area"},
|
||||
nil,
|
||||
),
|
||||
PromotedBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "promoted_bytes"),
|
||||
"Displays the bytes that were promoted from the generation to the next one during the last GC. Memory is promoted when it survives a garbage collection.",
|
||||
[]string{"process", "area"},
|
||||
nil,
|
||||
),
|
||||
NumberGCHandles: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "number_gc_handles"),
|
||||
"Displays the current number of garbage collection handles in use. Garbage collection handles are handles to resources external to the common language runtime and the managed environment.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
NumberCollections: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "collections_total"),
|
||||
"Displays the number of times the generation objects are garbage collected since the application started.",
|
||||
[]string{"process", "area"},
|
||||
nil,
|
||||
),
|
||||
NumberInducedGC: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "induced_gc_total"),
|
||||
"Displays the peak number of times garbage collection was performed because of an explicit call to GC.Collect.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
NumberofPinnedObjects: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "number_pinned_objects"),
|
||||
"Displays the number of pinned objects encountered in the last garbage collection.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
NumberofSinkBlocksinuse: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "number_sink_blocksinuse"),
|
||||
"Displays the current number of synchronization blocks in use. Synchronization blocks are per-object data structures allocated for storing synchronization information. They hold weak references to managed objects and must be scanned by the garbage collector.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
NumberTotalCommittedBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "committed_bytes"),
|
||||
"Displays the amount of virtual memory, in bytes, currently committed by the garbage collector. Committed memory is the physical memory for which space has been reserved in the disk paging file.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
NumberTotalreservedBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "reserved_bytes"),
|
||||
"Displays the amount of virtual memory, in bytes, currently reserved by the garbage collector. Reserved memory is the virtual memory space reserved for the application when no disk or main memory pages have been used.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
TimeinGC: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "gc_time_percent"),
|
||||
"Displays the percentage of time that was spent performing a garbage collection in the last sample.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRMemoryCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrmemory metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_NETFramework_NETCLRMemory struct {
|
||||
Name string
|
||||
|
||||
AllocatedBytesPersec uint64
|
||||
FinalizationSurvivors uint64
|
||||
Frequency_PerfTime uint64
|
||||
Gen0heapsize uint64
|
||||
Gen0PromotedBytesPerSec uint64
|
||||
Gen1heapsize uint64
|
||||
Gen1PromotedBytesPerSec uint64
|
||||
Gen2heapsize uint64
|
||||
LargeObjectHeapsize uint64
|
||||
NumberBytesinallHeaps uint64
|
||||
NumberGCHandles uint64
|
||||
NumberGen0Collections uint64
|
||||
NumberGen1Collections uint64
|
||||
NumberGen2Collections uint64
|
||||
NumberInducedGC uint64
|
||||
NumberofPinnedObjects uint64
|
||||
NumberofSinkBlocksinuse uint64
|
||||
NumberTotalcommittedBytes uint64
|
||||
NumberTotalreservedBytes uint64
|
||||
// PercentTimeinGC has countertype=PERF_RAW_FRACTION.
|
||||
// Formula: (100 * CounterValue) / BaseValue
|
||||
// By docs https://docs.microsoft.com/en-us/previous-versions/windows/internet-explorer/ie-developer/scripting-articles/ms974615(v=msdn.10)#perf_raw_fraction
|
||||
PercentTimeinGC uint32
|
||||
// BaseValue is just a "magic" number used to make the calculation come out right.
|
||||
PercentTimeinGC_base uint32
|
||||
ProcessID uint64
|
||||
PromotedFinalizationMemoryfromGen0 uint64
|
||||
PromotedMemoryfromGen0 uint64
|
||||
PromotedMemoryfromGen1 uint64
|
||||
}
|
||||
|
||||
func (c *NETFramework_NETCLRMemoryCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_NETFramework_NETCLRMemory
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, process := range dst {
|
||||
|
||||
if process.Name == "_Global_" {
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.AllocatedBytes,
|
||||
prometheus.CounterValue,
|
||||
float64(process.AllocatedBytesPersec),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FinalizationSurvivors,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.FinalizationSurvivors),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.HeapSize,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.Gen0heapsize),
|
||||
process.Name,
|
||||
"Gen0",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PromotedBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.Gen0PromotedBytesPerSec),
|
||||
process.Name,
|
||||
"Gen0",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.HeapSize,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.Gen1heapsize),
|
||||
process.Name,
|
||||
"Gen1",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PromotedBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.Gen1PromotedBytesPerSec),
|
||||
process.Name,
|
||||
"Gen1",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.HeapSize,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.Gen2heapsize),
|
||||
process.Name,
|
||||
"Gen2",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.HeapSize,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.LargeObjectHeapsize),
|
||||
process.Name,
|
||||
"LOH",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NumberGCHandles,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.NumberGCHandles),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NumberCollections,
|
||||
prometheus.CounterValue,
|
||||
float64(process.NumberGen0Collections),
|
||||
process.Name,
|
||||
"Gen0",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NumberCollections,
|
||||
prometheus.CounterValue,
|
||||
float64(process.NumberGen1Collections),
|
||||
process.Name,
|
||||
"Gen1",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NumberCollections,
|
||||
prometheus.CounterValue,
|
||||
float64(process.NumberGen2Collections),
|
||||
process.Name,
|
||||
"Gen2",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NumberInducedGC,
|
||||
prometheus.CounterValue,
|
||||
float64(process.NumberInducedGC),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NumberofPinnedObjects,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.NumberofPinnedObjects),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NumberofSinkBlocksinuse,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.NumberofSinkBlocksinuse),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NumberTotalCommittedBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.NumberTotalcommittedBytes),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NumberTotalreservedBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.NumberTotalreservedBytes),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeinGC,
|
||||
prometheus.GaugeValue,
|
||||
float64(100*process.PercentTimeinGC)/float64(process.PercentTimeinGC_base),
|
||||
process.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
10
collector/netframework_clrmemory_test.go
Normal file
10
collector/netframework_clrmemory_test.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkNETFrameworkNETCLRMemoryCollector(b *testing.B) {
|
||||
// No context name required as collector source is WMI
|
||||
benchmarkCollector(b, "", NewNETFramework_NETCLRMemoryCollector)
|
||||
}
|
||||
148
collector/netframework_clrremoting.go
Normal file
148
collector/netframework_clrremoting.go
Normal file
@@ -0,0 +1,148 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus-community/windows_exporter/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerCollector("netframework_clrremoting", NewNETFramework_NETCLRRemotingCollector)
|
||||
}
|
||||
|
||||
// A NETFramework_NETCLRRemotingCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRRemoting metrics
|
||||
type NETFramework_NETCLRRemotingCollector struct {
|
||||
Channels *prometheus.Desc
|
||||
ContextBoundClassesLoaded *prometheus.Desc
|
||||
ContextBoundObjects *prometheus.Desc
|
||||
ContextProxies *prometheus.Desc
|
||||
Contexts *prometheus.Desc
|
||||
TotalRemoteCalls *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewNETFramework_NETCLRRemotingCollector ...
|
||||
func NewNETFramework_NETCLRRemotingCollector() (Collector, error) {
|
||||
const subsystem = "netframework_clrremoting"
|
||||
return &NETFramework_NETCLRRemotingCollector{
|
||||
Channels: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "channels_total"),
|
||||
"Displays the total number of remoting channels registered across all application domains since application started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
ContextBoundClassesLoaded: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "context_bound_classes_loaded"),
|
||||
"Displays the current number of context-bound classes that are loaded.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
ContextBoundObjects: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "context_bound_objects_total"),
|
||||
"Displays the total number of context-bound objects allocated.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
ContextProxies: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "context_proxies_total"),
|
||||
"Displays the total number of remoting proxy objects in this process since it started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
Contexts: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "contexts"),
|
||||
"Displays the current number of remoting contexts in the application.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
TotalRemoteCalls: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "remote_calls_total"),
|
||||
"Displays the total number of remote procedure calls invoked since the application started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRRemotingCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrremoting metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_NETFramework_NETCLRRemoting struct {
|
||||
Name string
|
||||
|
||||
Channels uint32
|
||||
ContextBoundClassesLoaded uint32
|
||||
ContextBoundObjectsAllocPersec uint32
|
||||
ContextProxies uint32
|
||||
Contexts uint32
|
||||
RemoteCallsPersec uint32
|
||||
TotalRemoteCalls uint32
|
||||
}
|
||||
|
||||
func (c *NETFramework_NETCLRRemotingCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_NETFramework_NETCLRRemoting
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, process := range dst {
|
||||
|
||||
if process.Name == "_Global_" {
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Channels,
|
||||
prometheus.CounterValue,
|
||||
float64(process.Channels),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ContextBoundClassesLoaded,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.ContextBoundClassesLoaded),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ContextBoundObjects,
|
||||
prometheus.CounterValue,
|
||||
float64(process.ContextBoundObjectsAllocPersec),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ContextProxies,
|
||||
prometheus.CounterValue,
|
||||
float64(process.ContextProxies),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Contexts,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.Contexts),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TotalRemoteCalls,
|
||||
prometheus.CounterValue,
|
||||
float64(process.TotalRemoteCalls),
|
||||
process.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
10
collector/netframework_clrremoting_test.go
Normal file
10
collector/netframework_clrremoting_test.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkNETFrameworkNETCLRRemotingCollector(b *testing.B) {
|
||||
// No context name required as collector source is WMI
|
||||
benchmarkCollector(b, "", NewNETFramework_NETCLRRemotingCollector)
|
||||
}
|
||||
119
collector/netframework_clrsecurity.go
Normal file
119
collector/netframework_clrsecurity.go
Normal file
@@ -0,0 +1,119 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus-community/windows_exporter/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerCollector("netframework_clrsecurity", NewNETFramework_NETCLRSecurityCollector)
|
||||
}
|
||||
|
||||
// A NETFramework_NETCLRSecurityCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRSecurity metrics
|
||||
type NETFramework_NETCLRSecurityCollector struct {
|
||||
NumberLinkTimeChecks *prometheus.Desc
|
||||
TimeinRTchecks *prometheus.Desc
|
||||
StackWalkDepth *prometheus.Desc
|
||||
TotalRuntimeChecks *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewNETFramework_NETCLRSecurityCollector ...
|
||||
func NewNETFramework_NETCLRSecurityCollector() (Collector, error) {
|
||||
const subsystem = "netframework_clrsecurity"
|
||||
return &NETFramework_NETCLRSecurityCollector{
|
||||
NumberLinkTimeChecks: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "link_time_checks_total"),
|
||||
"Displays the total number of link-time code access security checks since the application started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
TimeinRTchecks: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "rt_checks_time_percent"),
|
||||
"Displays the percentage of time spent performing runtime code access security checks in the last sample.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
StackWalkDepth: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "stack_walk_depth"),
|
||||
"Displays the depth of the stack during that last runtime code access security check.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
TotalRuntimeChecks: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "runtime_checks_total"),
|
||||
"Displays the total number of runtime code access security checks performed since the application started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRSecurityCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrsecurity metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_NETFramework_NETCLRSecurity struct {
|
||||
Name string
|
||||
|
||||
Frequency_PerfTime uint32
|
||||
NumberLinkTimeChecks uint32
|
||||
PercentTimeinRTchecks uint32
|
||||
PercentTimeSigAuthenticating uint64
|
||||
StackWalkDepth uint32
|
||||
TotalRuntimeChecks uint32
|
||||
}
|
||||
|
||||
func (c *NETFramework_NETCLRSecurityCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_NETFramework_NETCLRSecurity
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, process := range dst {
|
||||
|
||||
if process.Name == "_Global_" {
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NumberLinkTimeChecks,
|
||||
prometheus.CounterValue,
|
||||
float64(process.NumberLinkTimeChecks),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeinRTchecks,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.PercentTimeinRTchecks)/float64(process.Frequency_PerfTime),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.StackWalkDepth,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.StackWalkDepth),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TotalRuntimeChecks,
|
||||
prometheus.CounterValue,
|
||||
float64(process.TotalRuntimeChecks),
|
||||
process.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
10
collector/netframework_clrsecurity_test.go
Normal file
10
collector/netframework_clrsecurity_test.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkNETFrameworkNETCLRSecurityCollector(b *testing.B) {
|
||||
// No context name required as collector source is WMI
|
||||
benchmarkCollector(b, "", NewNETFramework_NETCLRSecurityCollector)
|
||||
}
|
||||
193
collector/os.go
193
collector/os.go
@@ -1,21 +1,29 @@
|
||||
// returns data points from Win32_OperatingSystem
|
||||
// https://msdn.microsoft.com/en-us/library/aa394239 - Win32_OperatingSystem class
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"log"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus-community/windows_exporter/headers/netapi32"
|
||||
"github.com/prometheus-community/windows_exporter/headers/psapi"
|
||||
"github.com/prometheus-community/windows_exporter/headers/sysinfoapi"
|
||||
"github.com/prometheus-community/windows_exporter/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"golang.org/x/sys/windows/registry"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["os"] = NewOSCollector
|
||||
registerCollector("os", NewOSCollector, "Paging File")
|
||||
}
|
||||
|
||||
// A OSCollector is a Prometheus collector for WMI metrics
|
||||
type OSCollector struct {
|
||||
OSInformation *prometheus.Desc
|
||||
PhysicalMemoryFreeBytes *prometheus.Desc
|
||||
PagingFreeBytes *prometheus.Desc
|
||||
VirtualMemoryFreeBytes *prometheus.Desc
|
||||
@@ -26,6 +34,14 @@ type OSCollector struct {
|
||||
PagingLimitBytes *prometheus.Desc
|
||||
VirtualMemoryBytes *prometheus.Desc
|
||||
VisibleMemoryBytes *prometheus.Desc
|
||||
Time *prometheus.Desc
|
||||
Timezone *prometheus.Desc
|
||||
}
|
||||
|
||||
type pagingFileCounter struct {
|
||||
Name string
|
||||
Usage float64 `perflib:"% Usage"`
|
||||
UsagePeak float64 `perflib:"% Usage Peak"`
|
||||
}
|
||||
|
||||
// NewOSCollector ...
|
||||
@@ -33,6 +49,12 @@ func NewOSCollector() (Collector, error) {
|
||||
const subsystem = "os"
|
||||
|
||||
return &OSCollector{
|
||||
OSInformation: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "info"),
|
||||
"OperatingSystem.Caption, OperatingSystem.Version",
|
||||
[]string{"product", "version", "major_version", "minor_version", "build_number"},
|
||||
nil,
|
||||
),
|
||||
PagingLimitBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "paging_limit_bytes"),
|
||||
"OperatingSystem.SizeStoredInPagingFiles",
|
||||
@@ -51,6 +73,18 @@ func NewOSCollector() (Collector, error) {
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
Time: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "time"),
|
||||
"OperatingSystem.LocalDateTime",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
Timezone: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "timezone"),
|
||||
"OperatingSystem.LocalDateTime",
|
||||
[]string{"timezone"},
|
||||
nil,
|
||||
),
|
||||
Processes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "processes"),
|
||||
"OperatingSystem.NumberOfProcesses",
|
||||
@@ -64,7 +98,7 @@ func NewOSCollector() (Collector, error) {
|
||||
nil,
|
||||
),
|
||||
ProcessMemoryLimitBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "process_memory_limix_bytes"),
|
||||
prometheus.BuildFQName(Namespace, subsystem, "process_memory_limit_bytes"),
|
||||
"OperatingSystem.MaxProcessMemorySize",
|
||||
nil,
|
||||
nil,
|
||||
@@ -98,18 +132,22 @@ func NewOSCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *OSCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Println("[ERROR] failed collecting os metrics:", desc, err)
|
||||
func (c *OSCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ctx, ch); err != nil {
|
||||
log.Error("failed collecting os metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_OperatingSystem docs:
|
||||
// - https://msdn.microsoft.com/en-us/library/aa394239 - Win32_OperatingSystem class
|
||||
type Win32_OperatingSystem struct {
|
||||
Caption string
|
||||
FreePhysicalMemory uint64
|
||||
FreeSpaceInPagingFiles uint64
|
||||
FreeVirtualMemory uint64
|
||||
LocalDateTime time.Time
|
||||
MaxNumberOfProcesses uint32
|
||||
MaxProcessMemorySize uint64
|
||||
NumberOfProcesses uint32
|
||||
@@ -117,72 +155,171 @@ type Win32_OperatingSystem struct {
|
||||
SizeStoredInPagingFiles uint64
|
||||
TotalVirtualMemorySize uint64
|
||||
TotalVisibleMemorySize uint64
|
||||
Version string
|
||||
}
|
||||
|
||||
func (c *OSCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_OperatingSystem
|
||||
if err := wmi.Query(wmi.CreateQuery(&dst, ""), &dst); err != nil {
|
||||
func (c *OSCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
nwgi, err := netapi32.GetWorkstationInfo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gmse, err := sysinfoapi.GlobalMemoryStatusEx()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
currentTime := time.Now()
|
||||
timezoneName, _ := currentTime.Zone()
|
||||
|
||||
// Get total allocation of paging files across all disks.
|
||||
memManKey, err := registry.OpenKey(registry.LOCAL_MACHINE, `SYSTEM\CurrentControlSet\Control\Session Manager\Memory Management`, registry.QUERY_VALUE)
|
||||
defer memManKey.Close()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pagingFiles, _, pagingErr := memManKey.GetStringsValue("ExistingPageFiles")
|
||||
// Get build number and product name from registry
|
||||
ntKey, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
|
||||
defer ntKey.Close()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pn, _, err := ntKey.GetStringValue("ProductName")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bn, _, err := ntKey.GetStringValue("CurrentBuildNumber")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var fsipf float64
|
||||
for _, pagingFile := range pagingFiles {
|
||||
fileString := strings.ReplaceAll(pagingFile, `\??\`, "")
|
||||
file, err := os.Stat(fileString)
|
||||
// For unknown reasons, Windows doesn't always create a page file. Continue collection rather than aborting.
|
||||
if err != nil {
|
||||
log.Debugf("Failed to read page file (reason: %s): %s\n", err, fileString)
|
||||
} else {
|
||||
fsipf += float64(file.Size())
|
||||
}
|
||||
}
|
||||
|
||||
gpi, err := psapi.GetPerformanceInfo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var pfc = make([]pagingFileCounter, 0)
|
||||
if err := unmarshalObject(ctx.perfObjects["Paging File"], &pfc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get current page file usage.
|
||||
var pfbRaw float64
|
||||
for _, pageFile := range pfc {
|
||||
if strings.Contains(strings.ToLower(pageFile.Name), "_total") {
|
||||
continue
|
||||
}
|
||||
pfbRaw += pageFile.Usage
|
||||
}
|
||||
|
||||
// Subtract from total page file allocation on disk.
|
||||
pfb := fsipf - (pfbRaw * float64(gpi.PageSize))
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.OSInformation,
|
||||
prometheus.GaugeValue,
|
||||
1.0,
|
||||
fmt.Sprintf("Microsoft %s", pn), // Caption
|
||||
fmt.Sprintf("%d.%d.%s", nwgi.VersionMajor, nwgi.VersionMinor, bn), // Version
|
||||
fmt.Sprintf("%d", nwgi.VersionMajor), // Major Version
|
||||
fmt.Sprintf("%d", nwgi.VersionMinor), // Minor Version
|
||||
bn, // Build number
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PhysicalMemoryFreeBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].FreePhysicalMemory*1024), // KiB -> bytes
|
||||
float64(gmse.AvailPhys),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PagingFreeBytes,
|
||||
c.Time,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].FreeSpaceInPagingFiles*1024), // KiB -> bytes
|
||||
float64(currentTime.Unix()),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Timezone,
|
||||
prometheus.GaugeValue,
|
||||
1.0,
|
||||
timezoneName,
|
||||
)
|
||||
|
||||
if pagingErr == nil {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PagingFreeBytes,
|
||||
prometheus.GaugeValue,
|
||||
pfb,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PagingLimitBytes,
|
||||
prometheus.GaugeValue,
|
||||
fsipf,
|
||||
)
|
||||
} else {
|
||||
log.Debugln("Could not find HKLM:\\SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Memory Management key. windows_os_paging_free_bytes and windows_os_paging_limit_bytes will be omitted.")
|
||||
}
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.VirtualMemoryFreeBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].FreeVirtualMemory*1024), // KiB -> bytes
|
||||
float64(gmse.AvailPageFile),
|
||||
)
|
||||
|
||||
// Windows has no defined limit, and is based off available resources. This currently isn't calculated by WMI and is set to default value.
|
||||
// https://techcommunity.microsoft.com/t5/windows-blog-archive/pushing-the-limits-of-windows-processes-and-threads/ba-p/723824
|
||||
// https://docs.microsoft.com/en-us/windows/win32/cimwin32prov/win32-operatingsystem
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ProcessesLimit,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].MaxNumberOfProcesses),
|
||||
float64(4294967295),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ProcessMemoryLimitBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].MaxProcessMemorySize*1024), // KiB -> bytes
|
||||
float64(gmse.TotalVirtual),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Processes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].NumberOfProcesses),
|
||||
float64(gpi.ProcessCount),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Users,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].NumberOfUsers),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PagingLimitBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].SizeStoredInPagingFiles*1024), // KiB -> bytes
|
||||
float64(nwgi.LoggedOnUsers),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.VirtualMemoryBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].TotalVirtualMemorySize*1024), // KiB -> bytes
|
||||
float64(gmse.TotalPageFile),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.VisibleMemoryBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].TotalVisibleMemorySize*1024), // KiB -> bytes
|
||||
float64(gmse.TotalPhys),
|
||||
)
|
||||
|
||||
return nil, nil
|
||||
|
||||
9
collector/os_test.go
Normal file
9
collector/os_test.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkOSCollector(b *testing.B) {
|
||||
benchmarkCollector(b, "os", NewOSCollector)
|
||||
}
|
||||
126
collector/perflib.go
Normal file
126
collector/perflib.go
Normal file
@@ -0,0 +1,126 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
perflibCollector "github.com/leoluk/perflib_exporter/collector"
|
||||
"github.com/leoluk/perflib_exporter/perflib"
|
||||
"github.com/prometheus-community/windows_exporter/log"
|
||||
)
|
||||
|
||||
var nametable = perflib.QueryNameTable("Counter 009") // Reads the names in English TODO: validate that the English names are always present
|
||||
|
||||
func MapCounterToIndex(name string) string {
|
||||
return strconv.Itoa(int(nametable.LookupIndex(name)))
|
||||
}
|
||||
|
||||
func getPerflibSnapshot(objNames string) (map[string]*perflib.PerfObject, error) {
|
||||
objects, err := perflib.QueryPerformanceData(objNames)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
indexed := make(map[string]*perflib.PerfObject)
|
||||
for _, obj := range objects {
|
||||
indexed[obj.Name] = obj
|
||||
}
|
||||
return indexed, nil
|
||||
}
|
||||
|
||||
func unmarshalObject(obj *perflib.PerfObject, vs interface{}) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("counter not found")
|
||||
}
|
||||
rv := reflect.ValueOf(vs)
|
||||
if rv.Kind() != reflect.Ptr || rv.IsNil() {
|
||||
return fmt.Errorf("%v is nil or not a pointer to slice", reflect.TypeOf(vs))
|
||||
}
|
||||
ev := rv.Elem()
|
||||
if ev.Kind() != reflect.Slice {
|
||||
return fmt.Errorf("%v is not slice", reflect.TypeOf(vs))
|
||||
}
|
||||
|
||||
// Ensure sufficient length
|
||||
if ev.Cap() < len(obj.Instances) {
|
||||
nvs := reflect.MakeSlice(ev.Type(), len(obj.Instances), len(obj.Instances))
|
||||
ev.Set(nvs)
|
||||
}
|
||||
|
||||
for idx, instance := range obj.Instances {
|
||||
target := ev.Index(idx)
|
||||
rt := target.Type()
|
||||
|
||||
counters := make(map[string]*perflib.PerfCounter, len(instance.Counters))
|
||||
for _, ctr := range instance.Counters {
|
||||
if ctr.Def.IsBaseValue && !ctr.Def.IsNanosecondCounter {
|
||||
counters[ctr.Def.Name+"_Base"] = ctr
|
||||
} else {
|
||||
counters[ctr.Def.Name] = ctr
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < target.NumField(); i++ {
|
||||
f := rt.Field(i)
|
||||
tag := f.Tag.Get("perflib")
|
||||
if tag == "" {
|
||||
continue
|
||||
}
|
||||
secondValue := false
|
||||
|
||||
st := strings.Split(tag, ",")
|
||||
tag = st[0]
|
||||
|
||||
for _, t := range st {
|
||||
if t == "secondvalue" {
|
||||
secondValue = true
|
||||
}
|
||||
}
|
||||
|
||||
ctr, found := counters[tag]
|
||||
if !found {
|
||||
log.Debugf("missing counter %q, have %v", tag, counterMapKeys(counters))
|
||||
continue
|
||||
}
|
||||
if !target.Field(i).CanSet() {
|
||||
return fmt.Errorf("tagged field %v cannot be written to", f.Name)
|
||||
}
|
||||
if fieldType := target.Field(i).Type(); fieldType != reflect.TypeOf((*float64)(nil)).Elem() {
|
||||
return fmt.Errorf("tagged field %v has wrong type %v, must be float64", f.Name, fieldType)
|
||||
}
|
||||
|
||||
if secondValue {
|
||||
if !ctr.Def.HasSecondValue {
|
||||
return fmt.Errorf("tagged field %v expected a SecondValue, which was not present", f.Name)
|
||||
}
|
||||
target.Field(i).SetFloat(float64(ctr.SecondValue))
|
||||
continue
|
||||
}
|
||||
|
||||
switch ctr.Def.CounterType {
|
||||
case perflibCollector.PERF_ELAPSED_TIME:
|
||||
target.Field(i).SetFloat(float64(ctr.Value-windowsEpoch) / float64(obj.Frequency))
|
||||
case perflibCollector.PERF_100NSEC_TIMER, perflibCollector.PERF_PRECISION_100NS_TIMER:
|
||||
target.Field(i).SetFloat(float64(ctr.Value) * ticksToSecondsScaleFactor)
|
||||
default:
|
||||
target.Field(i).SetFloat(float64(ctr.Value))
|
||||
}
|
||||
}
|
||||
|
||||
if instance.Name != "" && target.FieldByName("Name").CanSet() {
|
||||
target.FieldByName("Name").SetString(instance.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func counterMapKeys(m map[string]*perflib.PerfCounter) []string {
|
||||
keys := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
return keys
|
||||
}
|
||||
128
collector/perflib_test.go
Normal file
128
collector/perflib_test.go
Normal file
@@ -0,0 +1,128 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
perflibCollector "github.com/leoluk/perflib_exporter/collector"
|
||||
"github.com/leoluk/perflib_exporter/perflib"
|
||||
)
|
||||
|
||||
type simple struct {
|
||||
ValA float64 `perflib:"Something"`
|
||||
ValB float64 `perflib:"Something Else"`
|
||||
ValC float64 `perflib:"Something Else,secondvalue"`
|
||||
}
|
||||
|
||||
func TestUnmarshalPerflib(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
obj *perflib.PerfObject
|
||||
|
||||
expectedOutput []simple
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "nil check",
|
||||
obj: nil,
|
||||
expectedOutput: []simple{},
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "Simple",
|
||||
obj: &perflib.PerfObject{
|
||||
Instances: []*perflib.PerfInstance{
|
||||
{
|
||||
Counters: []*perflib.PerfCounter{
|
||||
{
|
||||
Def: &perflib.PerfCounterDef{
|
||||
Name: "Something",
|
||||
CounterType: perflibCollector.PERF_COUNTER_COUNTER,
|
||||
},
|
||||
Value: 123,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedOutput: []simple{{ValA: 123}},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "Multiple properties",
|
||||
obj: &perflib.PerfObject{
|
||||
Instances: []*perflib.PerfInstance{
|
||||
{
|
||||
Counters: []*perflib.PerfCounter{
|
||||
{
|
||||
Def: &perflib.PerfCounterDef{
|
||||
Name: "Something",
|
||||
CounterType: perflibCollector.PERF_COUNTER_COUNTER,
|
||||
},
|
||||
Value: 123,
|
||||
},
|
||||
{
|
||||
Def: &perflib.PerfCounterDef{
|
||||
Name: "Something Else",
|
||||
CounterType: perflibCollector.PERF_COUNTER_COUNTER,
|
||||
HasSecondValue: true,
|
||||
},
|
||||
Value: 256,
|
||||
SecondValue: 222,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedOutput: []simple{{ValA: 123, ValB: 256, ValC: 222}},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "Multiple instances",
|
||||
obj: &perflib.PerfObject{
|
||||
Instances: []*perflib.PerfInstance{
|
||||
{
|
||||
Counters: []*perflib.PerfCounter{
|
||||
{
|
||||
Def: &perflib.PerfCounterDef{
|
||||
Name: "Something",
|
||||
CounterType: perflibCollector.PERF_COUNTER_COUNTER,
|
||||
},
|
||||
Value: 321,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Counters: []*perflib.PerfCounter{
|
||||
{
|
||||
Def: &perflib.PerfCounterDef{
|
||||
Name: "Something",
|
||||
CounterType: perflibCollector.PERF_COUNTER_COUNTER,
|
||||
},
|
||||
Value: 231,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedOutput: []simple{{ValA: 321}, {ValA: 231}},
|
||||
expectError: false,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
output := make([]simple, 0)
|
||||
err := unmarshalObject(c.obj, &output)
|
||||
if err != nil && !c.expectError {
|
||||
t.Errorf("Did not expect error, got %q", err)
|
||||
}
|
||||
if err == nil && c.expectError {
|
||||
t.Errorf("Expected an error, but got ok")
|
||||
}
|
||||
|
||||
if err == nil && !reflect.DeepEqual(output, c.expectedOutput) {
|
||||
t.Errorf("Output mismatch, expected %+v, got %+v", c.expectedOutput, output)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
427
collector/process.go
Normal file
427
collector/process.go
Normal file
@@ -0,0 +1,427 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus-community/windows_exporter/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"gopkg.in/alecthomas/kingpin.v2"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerCollector("process", newProcessCollector, "Process")
|
||||
}
|
||||
|
||||
var (
|
||||
processWhitelist = kingpin.Flag(
|
||||
"collector.process.whitelist",
|
||||
"Regexp of processes to include. Process name must both match whitelist and not match blacklist to be included.",
|
||||
).Default(".*").String()
|
||||
processBlacklist = kingpin.Flag(
|
||||
"collector.process.blacklist",
|
||||
"Regexp of processes to exclude. Process name must both match whitelist and not match blacklist to be included.",
|
||||
).Default("").String()
|
||||
)
|
||||
|
||||
type processCollector struct {
|
||||
StartTime *prometheus.Desc
|
||||
CPUTimeTotal *prometheus.Desc
|
||||
HandleCount *prometheus.Desc
|
||||
IOBytesTotal *prometheus.Desc
|
||||
IOOperationsTotal *prometheus.Desc
|
||||
PageFaultsTotal *prometheus.Desc
|
||||
PageFileBytes *prometheus.Desc
|
||||
PoolBytes *prometheus.Desc
|
||||
PriorityBase *prometheus.Desc
|
||||
PrivateBytes *prometheus.Desc
|
||||
ThreadCount *prometheus.Desc
|
||||
VirtualBytes *prometheus.Desc
|
||||
WorkingSetPrivate *prometheus.Desc
|
||||
WorkingSetPeak *prometheus.Desc
|
||||
WorkingSet *prometheus.Desc
|
||||
|
||||
processWhitelistPattern *regexp.Regexp
|
||||
processBlacklistPattern *regexp.Regexp
|
||||
}
|
||||
|
||||
// NewProcessCollector ...
|
||||
func newProcessCollector() (Collector, error) {
|
||||
const subsystem = "process"
|
||||
|
||||
if *processWhitelist == ".*" && *processBlacklist == "" {
|
||||
log.Warn("No filters specified for process collector. This will generate a very large number of metrics!")
|
||||
}
|
||||
|
||||
return &processCollector{
|
||||
StartTime: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "start_time"),
|
||||
"Time of process start.",
|
||||
[]string{"process", "process_id", "creating_process_id"},
|
||||
nil,
|
||||
),
|
||||
CPUTimeTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cpu_time_total"),
|
||||
"Returns elapsed time that all of the threads of this process used the processor to execute instructions by mode (privileged, user).",
|
||||
[]string{"process", "process_id", "creating_process_id", "mode"},
|
||||
nil,
|
||||
),
|
||||
HandleCount: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "handles"),
|
||||
"Total number of handles the process has open. This number is the sum of the handles currently open by each thread in the process.",
|
||||
[]string{"process", "process_id", "creating_process_id"},
|
||||
nil,
|
||||
),
|
||||
IOBytesTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "io_bytes_total"),
|
||||
"Bytes issued to I/O operations in different modes (read, write, other).",
|
||||
[]string{"process", "process_id", "creating_process_id", "mode"},
|
||||
nil,
|
||||
),
|
||||
IOOperationsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "io_operations_total"),
|
||||
"I/O operations issued in different modes (read, write, other).",
|
||||
[]string{"process", "process_id", "creating_process_id", "mode"},
|
||||
nil,
|
||||
),
|
||||
PageFaultsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "page_faults_total"),
|
||||
"Page faults by the threads executing in this process.",
|
||||
[]string{"process", "process_id", "creating_process_id"},
|
||||
nil,
|
||||
),
|
||||
PageFileBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "page_file_bytes"),
|
||||
"Current number of bytes this process has used in the paging file(s).",
|
||||
[]string{"process", "process_id", "creating_process_id"},
|
||||
nil,
|
||||
),
|
||||
PoolBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "pool_bytes"),
|
||||
"Pool Bytes is the last observed number of bytes in the paged or nonpaged pool.",
|
||||
[]string{"process", "process_id", "creating_process_id", "pool"},
|
||||
nil,
|
||||
),
|
||||
PriorityBase: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "priority_base"),
|
||||
"Current base priority of this process. Threads within a process can raise and lower their own base priority relative to the process base priority of the process.",
|
||||
[]string{"process", "process_id", "creating_process_id"},
|
||||
nil,
|
||||
),
|
||||
PrivateBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "private_bytes"),
|
||||
"Current number of bytes this process has allocated that cannot be shared with other processes.",
|
||||
[]string{"process", "process_id", "creating_process_id"},
|
||||
nil,
|
||||
),
|
||||
ThreadCount: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "threads"),
|
||||
"Number of threads currently active in this process.",
|
||||
[]string{"process", "process_id", "creating_process_id"},
|
||||
nil,
|
||||
),
|
||||
VirtualBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "virtual_bytes"),
|
||||
"Current size, in bytes, of the virtual address space that the process is using.",
|
||||
[]string{"process", "process_id", "creating_process_id"},
|
||||
nil,
|
||||
),
|
||||
WorkingSetPrivate: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "working_set_private_bytes"),
|
||||
"Size of the working set, in bytes, that is use for this process only and not shared nor shareable by other processes.",
|
||||
[]string{"process", "process_id", "creating_process_id"},
|
||||
nil,
|
||||
),
|
||||
WorkingSetPeak: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "working_set_peak_bytes"),
|
||||
"Maximum size, in bytes, of the Working Set of this process at any point in time. The Working Set is the set of memory pages touched recently by the threads in the process.",
|
||||
[]string{"process", "process_id", "creating_process_id"},
|
||||
nil,
|
||||
),
|
||||
WorkingSet: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "working_set_bytes"),
|
||||
"Maximum number of bytes in the working set of this process at any point in time. The working set is the set of memory pages touched recently by the threads in the process.",
|
||||
[]string{"process", "process_id", "creating_process_id"},
|
||||
nil,
|
||||
),
|
||||
processWhitelistPattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *processWhitelist)),
|
||||
processBlacklistPattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *processBlacklist)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
type perflibProcess struct {
|
||||
Name string
|
||||
PercentProcessorTime float64 `perflib:"% Processor Time"`
|
||||
PercentPrivilegedTime float64 `perflib:"% Privileged Time"`
|
||||
PercentUserTime float64 `perflib:"% User Time"`
|
||||
CreatingProcessID float64 `perflib:"Creating Process ID"`
|
||||
ElapsedTime float64 `perflib:"Elapsed Time"`
|
||||
HandleCount float64 `perflib:"Handle Count"`
|
||||
IDProcess float64 `perflib:"ID Process"`
|
||||
IODataBytesPerSec float64 `perflib:"IO Data Bytes/sec"`
|
||||
IODataOperationsPerSec float64 `perflib:"IO Data Operations/sec"`
|
||||
IOOtherBytesPerSec float64 `perflib:"IO Other Bytes/sec"`
|
||||
IOOtherOperationsPerSec float64 `perflib:"IO Other Operations/sec"`
|
||||
IOReadBytesPerSec float64 `perflib:"IO Read Bytes/sec"`
|
||||
IOReadOperationsPerSec float64 `perflib:"IO Read Operations/sec"`
|
||||
IOWriteBytesPerSec float64 `perflib:"IO Write Bytes/sec"`
|
||||
IOWriteOperationsPerSec float64 `perflib:"IO Write Operations/sec"`
|
||||
PageFaultsPerSec float64 `perflib:"Page Faults/sec"`
|
||||
PageFileBytesPeak float64 `perflib:"Page File Bytes Peak"`
|
||||
PageFileBytes float64 `perflib:"Page File Bytes"`
|
||||
PoolNonpagedBytes float64 `perflib:"Pool Nonpaged Bytes"`
|
||||
PoolPagedBytes float64 `perflib:"Pool Paged Bytes"`
|
||||
PriorityBase float64 `perflib:"Priority Base"`
|
||||
PrivateBytes float64 `perflib:"Private Bytes"`
|
||||
ThreadCount float64 `perflib:"Thread Count"`
|
||||
VirtualBytesPeak float64 `perflib:"Virtual Bytes Peak"`
|
||||
VirtualBytes float64 `perflib:"Virtual Bytes"`
|
||||
WorkingSetPrivate float64 `perflib:"Working Set - Private"`
|
||||
WorkingSetPeak float64 `perflib:"Working Set Peak"`
|
||||
WorkingSet float64 `perflib:"Working Set"`
|
||||
}
|
||||
|
||||
type WorkerProcess struct {
|
||||
AppPoolName string
|
||||
ProcessId uint64
|
||||
}
|
||||
|
||||
func (c *processCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
data := make([]perflibProcess, 0)
|
||||
err := unmarshalObject(ctx.perfObjects["Process"], &data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var dst_wp []WorkerProcess
|
||||
q_wp := queryAll(&dst_wp)
|
||||
if err := wmi.QueryNamespace(q_wp, &dst_wp, "root\\WebAdministration"); err != nil {
|
||||
log.Debugf("Could not query WebAdministration namespace for IIS worker processes: %v. Skipping", err)
|
||||
}
|
||||
|
||||
for _, process := range data {
|
||||
if process.Name == "_Total" ||
|
||||
c.processBlacklistPattern.MatchString(process.Name) ||
|
||||
!c.processWhitelistPattern.MatchString(process.Name) {
|
||||
continue
|
||||
}
|
||||
// Duplicate processes are suffixed # and an index number. Remove those.
|
||||
processName := strings.Split(process.Name, "#")[0]
|
||||
pid := strconv.FormatUint(uint64(process.IDProcess), 10)
|
||||
cpid := strconv.FormatUint(uint64(process.CreatingProcessID), 10)
|
||||
|
||||
for _, wp := range dst_wp {
|
||||
if wp.ProcessId == uint64(process.IDProcess) {
|
||||
processName = strings.Join([]string{processName, wp.AppPoolName}, "_")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.StartTime,
|
||||
prometheus.GaugeValue,
|
||||
process.ElapsedTime,
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.HandleCount,
|
||||
prometheus.GaugeValue,
|
||||
process.HandleCount,
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CPUTimeTotal,
|
||||
prometheus.CounterValue,
|
||||
process.PercentPrivilegedTime,
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
"privileged",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CPUTimeTotal,
|
||||
prometheus.CounterValue,
|
||||
process.PercentUserTime,
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
"user",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.IOBytesTotal,
|
||||
prometheus.CounterValue,
|
||||
process.IOOtherBytesPerSec,
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
"other",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.IOOperationsTotal,
|
||||
prometheus.CounterValue,
|
||||
process.IOOtherOperationsPerSec,
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
"other",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.IOBytesTotal,
|
||||
prometheus.CounterValue,
|
||||
process.IOReadBytesPerSec,
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
"read",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.IOOperationsTotal,
|
||||
prometheus.CounterValue,
|
||||
process.IOReadOperationsPerSec,
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
"read",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.IOBytesTotal,
|
||||
prometheus.CounterValue,
|
||||
process.IOWriteBytesPerSec,
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
"write",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.IOOperationsTotal,
|
||||
prometheus.CounterValue,
|
||||
process.IOWriteOperationsPerSec,
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
"write",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PageFaultsTotal,
|
||||
prometheus.CounterValue,
|
||||
process.PageFaultsPerSec,
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PageFileBytes,
|
||||
prometheus.GaugeValue,
|
||||
process.PageFileBytes,
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PoolBytes,
|
||||
prometheus.GaugeValue,
|
||||
process.PoolNonpagedBytes,
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
"nonpaged",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PoolBytes,
|
||||
prometheus.GaugeValue,
|
||||
process.PoolPagedBytes,
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
"paged",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PriorityBase,
|
||||
prometheus.GaugeValue,
|
||||
process.PriorityBase,
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PrivateBytes,
|
||||
prometheus.GaugeValue,
|
||||
process.PrivateBytes,
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ThreadCount,
|
||||
prometheus.GaugeValue,
|
||||
process.ThreadCount,
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.VirtualBytes,
|
||||
prometheus.GaugeValue,
|
||||
process.VirtualBytes,
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.WorkingSetPrivate,
|
||||
prometheus.GaugeValue,
|
||||
process.WorkingSetPrivate,
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.WorkingSetPeak,
|
||||
prometheus.GaugeValue,
|
||||
process.WorkingSetPeak,
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.WorkingSet,
|
||||
prometheus.GaugeValue,
|
||||
process.WorkingSet,
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
14
collector/process_test.go
Normal file
14
collector/process_test.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkProcessCollector(b *testing.B) {
|
||||
// Whitelist is not set in testing context (kingpin flags not parsed), causing the collector to skip all processes.
|
||||
localProcessWhitelist := ".+"
|
||||
processWhitelist = &localProcessWhitelist
|
||||
|
||||
// No context name required as collector source is WMI
|
||||
benchmarkCollector(b, "", newProcessCollector)
|
||||
}
|
||||
348
collector/remote_fx.go
Normal file
348
collector/remote_fx.go
Normal file
@@ -0,0 +1,348 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus-community/windows_exporter/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerCollector("remote_fx", NewRemoteFx, "RemoteFX Network", "RemoteFX Graphics")
|
||||
}
|
||||
|
||||
// A RemoteFxNetworkCollector is a Prometheus collector for
|
||||
// WMI Win32_PerfRawData_Counters_RemoteFXNetwork & Win32_PerfRawData_Counters_RemoteFXGraphics metrics
|
||||
// https://wutils.com/wmi/root/cimv2/win32_perfrawdata_counters_remotefxnetwork/
|
||||
// https://wutils.com/wmi/root/cimv2/win32_perfrawdata_counters_remotefxgraphics/
|
||||
|
||||
type RemoteFxCollector struct {
|
||||
// net
|
||||
BaseTCPRTT *prometheus.Desc
|
||||
BaseUDPRTT *prometheus.Desc
|
||||
CurrentTCPBandwidth *prometheus.Desc
|
||||
CurrentTCPRTT *prometheus.Desc
|
||||
CurrentUDPBandwidth *prometheus.Desc
|
||||
CurrentUDPRTT *prometheus.Desc
|
||||
TotalReceivedBytes *prometheus.Desc
|
||||
TotalSentBytes *prometheus.Desc
|
||||
UDPPacketsReceivedPersec *prometheus.Desc
|
||||
UDPPacketsSentPersec *prometheus.Desc
|
||||
|
||||
//gfx
|
||||
AverageEncodingTime *prometheus.Desc
|
||||
FrameQuality *prometheus.Desc
|
||||
FramesSkippedPerSecondInsufficientResources *prometheus.Desc
|
||||
GraphicsCompressionratio *prometheus.Desc
|
||||
InputFramesPerSecond *prometheus.Desc
|
||||
OutputFramesPerSecond *prometheus.Desc
|
||||
SourceFramesPerSecond *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewRemoteFx ...
|
||||
func NewRemoteFx() (Collector, error) {
|
||||
const subsystem = "remote_fx"
|
||||
return &RemoteFxCollector{
|
||||
// net
|
||||
BaseTCPRTT: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "net_base_tcp_rtt_seconds"),
|
||||
"Base TCP round-trip time (RTT) detected in seconds",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
),
|
||||
BaseUDPRTT: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "net_base_udp_rtt_seconds"),
|
||||
"Base UDP round-trip time (RTT) detected in seconds.",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
),
|
||||
CurrentTCPBandwidth: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "net_current_tcp_bandwidth"),
|
||||
"TCP Bandwidth detected in bytes per second.",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
),
|
||||
CurrentTCPRTT: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "net_current_tcp_rtt_seconds"),
|
||||
"Average TCP round-trip time (RTT) detected in seconds.",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
),
|
||||
CurrentUDPBandwidth: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "net_current_udp_bandwidth"),
|
||||
"UDP Bandwidth detected in bytes per second.",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
),
|
||||
CurrentUDPRTT: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "net_current_udp_rtt_seconds"),
|
||||
"Average UDP round-trip time (RTT) detected in seconds.",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
),
|
||||
TotalReceivedBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "net_received_bytes_total"),
|
||||
"(TotalReceivedBytes)",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
),
|
||||
TotalSentBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "net_sent_bytes_total"),
|
||||
"(TotalSentBytes)",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
),
|
||||
UDPPacketsReceivedPersec: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "net_udp_packets_received_total"),
|
||||
"Rate in packets per second at which packets are received over UDP.",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
),
|
||||
UDPPacketsSentPersec: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "net_udp_packets_sent_total"),
|
||||
"Rate in packets per second at which packets are sent over UDP.",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
),
|
||||
|
||||
//gfx
|
||||
AverageEncodingTime: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "gfx_average_encoding_time_seconds"),
|
||||
"Average frame encoding time in seconds",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
),
|
||||
FrameQuality: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "gfx_frame_quality"),
|
||||
"Quality of the output frame expressed as a percentage of the quality of the source frame.",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
),
|
||||
FramesSkippedPerSecondInsufficientResources: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "gfx_frames_skipped_insufficient_resource_total"),
|
||||
"Number of frames skipped per second due to insufficient client resources.",
|
||||
[]string{"session_name", "resource"},
|
||||
nil,
|
||||
),
|
||||
GraphicsCompressionratio: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "gfx_graphics_compression_ratio"),
|
||||
"Ratio of the number of bytes encoded to the number of bytes input.",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
),
|
||||
InputFramesPerSecond: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "gfx_input_frames_total"),
|
||||
"Number of sources frames provided as input to RemoteFX graphics per second.",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
),
|
||||
OutputFramesPerSecond: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "gfx_output_frames_total"),
|
||||
"Number of frames sent to the client per second.",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
),
|
||||
SourceFramesPerSecond: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "gfx_source_frames_total"),
|
||||
"Number of frames composed by the source (DWM) per second.",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *RemoteFxCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collectRemoteFXNetworkCount(ctx, ch); err != nil {
|
||||
log.Error("failed collecting terminal services session count metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
if desc, err := c.collectRemoteFXGraphicsCounters(ctx, ch); err != nil {
|
||||
log.Error("failed collecting terminal services session count metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type perflibRemoteFxNetwork struct {
|
||||
Name string
|
||||
BaseTCPRTT float64 `perflib:"Base TCP RTT"`
|
||||
BaseUDPRTT float64 `perflib:"Base UDP RTT"`
|
||||
CurrentTCPBandwidth float64 `perflib:"Current TCP Bandwidth"`
|
||||
CurrentTCPRTT float64 `perflib:"Current TCP RTT"`
|
||||
CurrentUDPBandwidth float64 `perflib:"Current UDP Bandwidth"`
|
||||
CurrentUDPRTT float64 `perflib:"Current UDP RTT"`
|
||||
TotalReceivedBytes float64 `perflib:"Total Received Bytes"`
|
||||
TotalSentBytes float64 `perflib:"Total Sent Bytes"`
|
||||
UDPPacketsReceivedPersec float64 `perflib:"UDP Packets Received/sec"`
|
||||
UDPPacketsSentPersec float64 `perflib:"UDP Packets Sent/sec"`
|
||||
}
|
||||
|
||||
func (c *RemoteFxCollector) collectRemoteFXNetworkCount(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
dst := make([]perflibRemoteFxNetwork, 0)
|
||||
err := unmarshalObject(ctx.perfObjects["RemoteFX Network"], &dst)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, d := range dst {
|
||||
// only connect metrics for remote named sessions
|
||||
n := strings.ToLower(d.Name)
|
||||
if n == "" || n == "services" || n == "console" {
|
||||
continue
|
||||
}
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BaseTCPRTT,
|
||||
prometheus.GaugeValue,
|
||||
milliSecToSec(d.BaseTCPRTT),
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BaseUDPRTT,
|
||||
prometheus.GaugeValue,
|
||||
milliSecToSec(d.BaseUDPRTT),
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CurrentTCPBandwidth,
|
||||
prometheus.GaugeValue,
|
||||
(d.CurrentTCPBandwidth*1000)/8,
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CurrentTCPRTT,
|
||||
prometheus.GaugeValue,
|
||||
milliSecToSec(d.CurrentTCPRTT),
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CurrentUDPBandwidth,
|
||||
prometheus.GaugeValue,
|
||||
(d.CurrentUDPBandwidth*1000)/8,
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CurrentUDPRTT,
|
||||
prometheus.GaugeValue,
|
||||
milliSecToSec(d.CurrentUDPRTT),
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TotalReceivedBytes,
|
||||
prometheus.CounterValue,
|
||||
d.TotalReceivedBytes,
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TotalSentBytes,
|
||||
prometheus.CounterValue,
|
||||
d.TotalSentBytes,
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.UDPPacketsReceivedPersec,
|
||||
prometheus.CounterValue,
|
||||
d.UDPPacketsReceivedPersec,
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.UDPPacketsSentPersec,
|
||||
prometheus.CounterValue,
|
||||
d.UDPPacketsSentPersec,
|
||||
d.Name,
|
||||
)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type perflibRemoteFxGraphics struct {
|
||||
Name string
|
||||
AverageEncodingTime float64 `perflib:"Average Encoding Time"`
|
||||
FrameQuality float64 `perflib:"Frame Quality"`
|
||||
FramesSkippedPerSecondInsufficientClientResources float64 `perflib:"Frames Skipped/Second - Insufficient Server Resources"`
|
||||
FramesSkippedPerSecondInsufficientNetworkResources float64 `perflib:"Frames Skipped/Second - Insufficient Network Resources"`
|
||||
FramesSkippedPerSecondInsufficientServerResources float64 `perflib:"Frames Skipped/Second - Insufficient Client Resources"`
|
||||
GraphicsCompressionratio float64 `perflib:"Graphics Compression ratio"`
|
||||
InputFramesPerSecond float64 `perflib:"Input Frames/Second"`
|
||||
OutputFramesPerSecond float64 `perflib:"Output Frames/Second"`
|
||||
SourceFramesPerSecond float64 `perflib:"Source Frames/Second"`
|
||||
}
|
||||
|
||||
func (c *RemoteFxCollector) collectRemoteFXGraphicsCounters(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
dst := make([]perflibRemoteFxGraphics, 0)
|
||||
err := unmarshalObject(ctx.perfObjects["RemoteFX Graphics"], &dst)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, d := range dst {
|
||||
// only connect metrics for remote named sessions
|
||||
n := strings.ToLower(d.Name)
|
||||
if n == "" || n == "services" || n == "console" {
|
||||
continue
|
||||
}
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.AverageEncodingTime,
|
||||
prometheus.GaugeValue,
|
||||
milliSecToSec(d.AverageEncodingTime),
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FrameQuality,
|
||||
prometheus.GaugeValue,
|
||||
d.FrameQuality,
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FramesSkippedPerSecondInsufficientResources,
|
||||
prometheus.CounterValue,
|
||||
d.FramesSkippedPerSecondInsufficientClientResources,
|
||||
d.Name,
|
||||
"client",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FramesSkippedPerSecondInsufficientResources,
|
||||
prometheus.CounterValue,
|
||||
d.FramesSkippedPerSecondInsufficientNetworkResources,
|
||||
d.Name,
|
||||
"network",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FramesSkippedPerSecondInsufficientResources,
|
||||
prometheus.CounterValue,
|
||||
d.FramesSkippedPerSecondInsufficientServerResources,
|
||||
d.Name,
|
||||
"server",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.GraphicsCompressionratio,
|
||||
prometheus.GaugeValue,
|
||||
d.GraphicsCompressionratio,
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.InputFramesPerSecond,
|
||||
prometheus.CounterValue,
|
||||
d.InputFramesPerSecond,
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.OutputFramesPerSecond,
|
||||
prometheus.CounterValue,
|
||||
d.OutputFramesPerSecond,
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SourceFramesPerSecond,
|
||||
prometheus.CounterValue,
|
||||
d.SourceFramesPerSecond,
|
||||
d.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
9
collector/remote_fx_test.go
Normal file
9
collector/remote_fx_test.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkRemoteFXCollector(b *testing.B) {
|
||||
benchmarkCollector(b, "remote_fx", NewRemoteFx)
|
||||
}
|
||||
346
collector/scheduled_task.go
Normal file
346
collector/scheduled_task.go
Normal file
@@ -0,0 +1,346 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
ole "github.com/go-ole/go-ole"
|
||||
"github.com/go-ole/go-ole/oleutil"
|
||||
"github.com/prometheus-community/windows_exporter/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"gopkg.in/alecthomas/kingpin.v2"
|
||||
)
|
||||
|
||||
var (
|
||||
taskWhitelist = kingpin.Flag(
|
||||
"collector.scheduled_task.whitelist",
|
||||
"Regexp of tasks to whitelist. Task path must both match whitelist and not match blacklist to be included.",
|
||||
).Default(".+").String()
|
||||
taskBlacklist = kingpin.Flag(
|
||||
"collector.scheduled_task.blacklist",
|
||||
"Regexp of tasks to blacklist. Task path must both match whitelist and not match blacklist to be included.",
|
||||
).String()
|
||||
)
|
||||
|
||||
type ScheduledTaskCollector struct {
|
||||
LastResult *prometheus.Desc
|
||||
MissedRuns *prometheus.Desc
|
||||
State *prometheus.Desc
|
||||
|
||||
taskWhitelistPattern *regexp.Regexp
|
||||
taskBlacklistPattern *regexp.Regexp
|
||||
}
|
||||
|
||||
// TaskState ...
|
||||
// https://docs.microsoft.com/en-us/windows/desktop/api/taskschd/ne-taskschd-task_state
|
||||
type TaskState uint
|
||||
|
||||
type TaskResult uint
|
||||
|
||||
const (
|
||||
TASK_STATE_UNKNOWN TaskState = iota
|
||||
TASK_STATE_DISABLED
|
||||
TASK_STATE_QUEUED
|
||||
TASK_STATE_READY
|
||||
TASK_STATE_RUNNING
|
||||
TASK_RESULT_SUCCESS TaskResult = 0x0
|
||||
)
|
||||
|
||||
// RegisteredTask ...
|
||||
type ScheduledTask struct {
|
||||
Name string
|
||||
Path string
|
||||
Enabled bool
|
||||
State TaskState
|
||||
MissedRunsCount float64
|
||||
LastTaskResult TaskResult
|
||||
}
|
||||
|
||||
type ScheduledTasks []ScheduledTask
|
||||
|
||||
func init() {
|
||||
registerCollector("scheduled_task", NewScheduledTask)
|
||||
}
|
||||
|
||||
// NewScheduledTask ...
|
||||
func NewScheduledTask() (Collector, error) {
|
||||
const subsystem = "scheduled_task"
|
||||
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
err := ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED)
|
||||
if err != nil {
|
||||
code := err.(*ole.OleError).Code()
|
||||
if code != ole.S_OK && code != S_FALSE {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
defer ole.CoUninitialize()
|
||||
|
||||
return &ScheduledTaskCollector{
|
||||
LastResult: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "last_result"),
|
||||
"The result that was returned the last time the registered task was run",
|
||||
[]string{"task"},
|
||||
nil,
|
||||
),
|
||||
|
||||
MissedRuns: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "missed_runs"),
|
||||
"The number of times the registered task missed a scheduled run",
|
||||
[]string{"task"},
|
||||
nil,
|
||||
),
|
||||
|
||||
State: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "state"),
|
||||
"The current state of a scheduled task",
|
||||
[]string{"task", "state"},
|
||||
nil,
|
||||
),
|
||||
|
||||
taskWhitelistPattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *taskWhitelist)),
|
||||
taskBlacklistPattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *taskBlacklist)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *ScheduledTaskCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting user metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var TASK_STATES = []string{"disabled", "queued", "ready", "running", "unknown"}
|
||||
|
||||
func (c *ScheduledTaskCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
scheduledTasks, err := getScheduledTasks()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, task := range scheduledTasks {
|
||||
if c.taskBlacklistPattern.MatchString(task.Path) ||
|
||||
!c.taskWhitelistPattern.MatchString(task.Path) {
|
||||
continue
|
||||
}
|
||||
|
||||
lastResult := 0.0
|
||||
if task.LastTaskResult == TASK_RESULT_SUCCESS {
|
||||
lastResult = 1.0
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LastResult,
|
||||
prometheus.GaugeValue,
|
||||
lastResult,
|
||||
task.Path,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MissedRuns,
|
||||
prometheus.GaugeValue,
|
||||
task.MissedRunsCount,
|
||||
task.Path,
|
||||
)
|
||||
|
||||
for _, state := range TASK_STATES {
|
||||
var stateValue float64
|
||||
|
||||
if strings.ToLower(task.State.String()) == state {
|
||||
stateValue = 1.0
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.State,
|
||||
prometheus.GaugeValue,
|
||||
stateValue,
|
||||
task.Path,
|
||||
state,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
const SCHEDULED_TASK_PROGRAM_ID = "Schedule.Service.1"
|
||||
|
||||
// S_FALSE is returned by CoInitialize if it was already called on this thread.
|
||||
const S_FALSE = 0x00000001
|
||||
|
||||
func getScheduledTasks() (scheduledTasks ScheduledTasks, err error) {
|
||||
schedClassID, err := ole.ClassIDFrom(SCHEDULED_TASK_PROGRAM_ID)
|
||||
if err != nil {
|
||||
return scheduledTasks, err
|
||||
}
|
||||
|
||||
taskSchedulerObj, err := ole.CreateInstance(schedClassID, nil)
|
||||
if err != nil || taskSchedulerObj == nil {
|
||||
return scheduledTasks, err
|
||||
}
|
||||
defer taskSchedulerObj.Release()
|
||||
|
||||
taskServiceObj := taskSchedulerObj.MustQueryInterface(ole.IID_IDispatch)
|
||||
_, err = oleutil.CallMethod(taskServiceObj, "Connect")
|
||||
if err != nil {
|
||||
return scheduledTasks, err
|
||||
}
|
||||
defer taskServiceObj.Release()
|
||||
|
||||
res, err := oleutil.CallMethod(taskServiceObj, "GetFolder", `\`)
|
||||
if err != nil {
|
||||
return scheduledTasks, err
|
||||
}
|
||||
|
||||
rootFolderObj := res.ToIDispatch()
|
||||
defer rootFolderObj.Release()
|
||||
|
||||
err = fetchTasksRecursively(rootFolderObj, &scheduledTasks)
|
||||
|
||||
return scheduledTasks, err
|
||||
}
|
||||
|
||||
func fetchTasksInFolder(folder *ole.IDispatch, scheduledTasks *ScheduledTasks) error {
|
||||
res, err := oleutil.CallMethod(folder, "GetTasks", 1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tasks := res.ToIDispatch()
|
||||
defer tasks.Release()
|
||||
|
||||
err = oleutil.ForEach(tasks, func(v *ole.VARIANT) error {
|
||||
task := v.ToIDispatch()
|
||||
defer task.Release()
|
||||
|
||||
parsedTask, err := parseTask(task)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*scheduledTasks = append(*scheduledTasks, parsedTask)
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func fetchTasksRecursively(folder *ole.IDispatch, scheduledTasks *ScheduledTasks) error {
|
||||
if err := fetchTasksInFolder(folder, scheduledTasks); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
res, err := oleutil.CallMethod(folder, "GetFolders", 1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
subFolders := res.ToIDispatch()
|
||||
defer subFolders.Release()
|
||||
|
||||
err = oleutil.ForEach(subFolders, func(v *ole.VARIANT) error {
|
||||
subFolder := v.ToIDispatch()
|
||||
defer subFolder.Release()
|
||||
return fetchTasksRecursively(subFolder, scheduledTasks)
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func parseTask(task *ole.IDispatch) (scheduledTask ScheduledTask, err error) {
|
||||
taskNameVar, err := oleutil.GetProperty(task, "Name")
|
||||
if err != nil {
|
||||
return scheduledTask, err
|
||||
}
|
||||
defer func() {
|
||||
if tempErr := taskNameVar.Clear(); tempErr != nil {
|
||||
err = tempErr
|
||||
}
|
||||
}()
|
||||
|
||||
taskPathVar, err := oleutil.GetProperty(task, "Path")
|
||||
if err != nil {
|
||||
return scheduledTask, err
|
||||
}
|
||||
defer func() {
|
||||
if tempErr := taskPathVar.Clear(); tempErr != nil {
|
||||
err = tempErr
|
||||
}
|
||||
}()
|
||||
|
||||
taskEnabledVar, err := oleutil.GetProperty(task, "Enabled")
|
||||
if err != nil {
|
||||
return scheduledTask, err
|
||||
}
|
||||
defer func() {
|
||||
if tempErr := taskEnabledVar.Clear(); tempErr != nil {
|
||||
err = tempErr
|
||||
}
|
||||
}()
|
||||
|
||||
taskStateVar, err := oleutil.GetProperty(task, "State")
|
||||
if err != nil {
|
||||
return scheduledTask, err
|
||||
}
|
||||
defer func() {
|
||||
if tempErr := taskStateVar.Clear(); tempErr != nil {
|
||||
err = tempErr
|
||||
}
|
||||
}()
|
||||
|
||||
taskNumberOfMissedRunsVar, err := oleutil.GetProperty(task, "NumberOfMissedRuns")
|
||||
if err != nil {
|
||||
return scheduledTask, err
|
||||
}
|
||||
defer func() {
|
||||
if tempErr := taskNumberOfMissedRunsVar.Clear(); tempErr != nil {
|
||||
err = tempErr
|
||||
}
|
||||
}()
|
||||
|
||||
taskLastTaskResultVar, err := oleutil.GetProperty(task, "LastTaskResult")
|
||||
if err != nil {
|
||||
return scheduledTask, err
|
||||
}
|
||||
defer func() {
|
||||
if tempErr := taskLastTaskResultVar.Clear(); tempErr != nil {
|
||||
err = tempErr
|
||||
}
|
||||
}()
|
||||
|
||||
scheduledTask.Name = taskNameVar.ToString()
|
||||
scheduledTask.Path = strings.ReplaceAll(taskPathVar.ToString(), "\\", "/")
|
||||
scheduledTask.Enabled = taskEnabledVar.Value().(bool)
|
||||
scheduledTask.State = TaskState(taskStateVar.Val)
|
||||
scheduledTask.MissedRunsCount = float64(taskNumberOfMissedRunsVar.Val)
|
||||
scheduledTask.LastTaskResult = TaskResult(taskLastTaskResultVar.Val)
|
||||
|
||||
return scheduledTask, err
|
||||
}
|
||||
|
||||
func (t TaskState) String() string {
|
||||
switch t {
|
||||
case TASK_STATE_UNKNOWN:
|
||||
return "Unknown"
|
||||
case TASK_STATE_DISABLED:
|
||||
return "Disabled"
|
||||
case TASK_STATE_QUEUED:
|
||||
return "Queued"
|
||||
case TASK_STATE_READY:
|
||||
return "Ready"
|
||||
case TASK_STATE_RUNNING:
|
||||
return "Running"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
9
collector/scheduled_task_test.go
Normal file
9
collector/scheduled_task_test.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkScheduledTaskCollector(b *testing.B) {
|
||||
benchmarkCollector(b, "scheduled_task", NewScheduledTask)
|
||||
}
|
||||
@@ -1,29 +1,64 @@
|
||||
// returns data points from Win32_Service
|
||||
// https://msdn.microsoft.com/en-us/library/aa394418(v=vs.85).aspx - Win32_Service class
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"log"
|
||||
"fmt"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus-community/windows_exporter/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"golang.org/x/sys/windows"
|
||||
"golang.org/x/sys/windows/svc/mgr"
|
||||
"gopkg.in/alecthomas/kingpin.v2"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["service"] = NewserviceCollector
|
||||
registerCollector("service", NewserviceCollector)
|
||||
}
|
||||
|
||||
var (
|
||||
serviceWhereClause = kingpin.Flag(
|
||||
"collector.service.services-where",
|
||||
"WQL 'where' clause to use in WMI metrics query. Limits the response to the services you specify and reduces the size of the response.",
|
||||
).Default("").String()
|
||||
useAPI = kingpin.Flag(
|
||||
"collector.service.use-api",
|
||||
"Use API calls to collect service data instead of WMI. Flag 'collector.service.services-where' won't be effective.",
|
||||
).Default("false").Bool()
|
||||
)
|
||||
|
||||
// A serviceCollector is a Prometheus collector for WMI Win32_Service metrics
|
||||
type serviceCollector struct {
|
||||
State *prometheus.Desc
|
||||
StartMode *prometheus.Desc
|
||||
Information *prometheus.Desc
|
||||
State *prometheus.Desc
|
||||
StartMode *prometheus.Desc
|
||||
Status *prometheus.Desc
|
||||
|
||||
queryWhereClause string
|
||||
}
|
||||
|
||||
// NewserviceCollector ...
|
||||
func NewserviceCollector() (Collector, error) {
|
||||
const subsystem = "service"
|
||||
|
||||
if *serviceWhereClause == "" {
|
||||
log.Warn("No where-clause specified for service collector. This will generate a very large number of metrics!")
|
||||
}
|
||||
if *useAPI {
|
||||
log.Warn("API collection is enabled.")
|
||||
}
|
||||
|
||||
return &serviceCollector{
|
||||
Information: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "info"),
|
||||
"A metric with a constant '1' value labeled with service information",
|
||||
[]string{"name", "display_name", "process_id", "run_as"},
|
||||
nil,
|
||||
),
|
||||
State: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "state"),
|
||||
"The state of the service (State)",
|
||||
@@ -36,48 +71,249 @@ func NewserviceCollector() (Collector, error) {
|
||||
[]string{"name", "start_mode"},
|
||||
nil,
|
||||
),
|
||||
Status: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "status"),
|
||||
"The status of the service (Status)",
|
||||
[]string{"name", "status"},
|
||||
nil,
|
||||
),
|
||||
queryWhereClause: *serviceWhereClause,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *serviceCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Println("[ERROR] failed collecting service metrics:", desc, err)
|
||||
return err
|
||||
func (c *serviceCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if *useAPI {
|
||||
if err := c.collectAPI(ch); err != nil {
|
||||
log.Error("failed collecting API service metrics:", err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := c.collectWMI(ch); err != nil {
|
||||
log.Error("failed collecting WMI service metrics:", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_Service docs:
|
||||
// - https://msdn.microsoft.com/en-us/library/aa394418(v=vs.85).aspx
|
||||
type Win32_Service struct {
|
||||
Name string
|
||||
State string
|
||||
StartMode string
|
||||
DisplayName string
|
||||
Name string
|
||||
ProcessId uint32
|
||||
State string
|
||||
Status string
|
||||
StartMode string
|
||||
StartName *string
|
||||
}
|
||||
|
||||
func (c *serviceCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var (
|
||||
allStates = []string{
|
||||
"stopped",
|
||||
"start pending",
|
||||
"stop pending",
|
||||
"running",
|
||||
"continue pending",
|
||||
"pause pending",
|
||||
"paused",
|
||||
"unknown",
|
||||
}
|
||||
apiStateValues = map[uint]string{
|
||||
windows.SERVICE_CONTINUE_PENDING: "continue pending",
|
||||
windows.SERVICE_PAUSE_PENDING: "pause pending",
|
||||
windows.SERVICE_PAUSED: "paused",
|
||||
windows.SERVICE_RUNNING: "running",
|
||||
windows.SERVICE_START_PENDING: "start pending",
|
||||
windows.SERVICE_STOP_PENDING: "stop pending",
|
||||
windows.SERVICE_STOPPED: "stopped",
|
||||
}
|
||||
allStartModes = []string{
|
||||
"boot",
|
||||
"system",
|
||||
"auto",
|
||||
"manual",
|
||||
"disabled",
|
||||
}
|
||||
apiStartModeValues = map[uint32]string{
|
||||
windows.SERVICE_AUTO_START: "auto",
|
||||
windows.SERVICE_BOOT_START: "boot",
|
||||
windows.SERVICE_DEMAND_START: "manual",
|
||||
windows.SERVICE_DISABLED: "disabled",
|
||||
windows.SERVICE_SYSTEM_START: "system",
|
||||
}
|
||||
allStatuses = []string{
|
||||
"ok",
|
||||
"error",
|
||||
"degraded",
|
||||
"unknown",
|
||||
"pred fail",
|
||||
"starting",
|
||||
"stopping",
|
||||
"service",
|
||||
"stressed",
|
||||
"nonrecover",
|
||||
"no contact",
|
||||
"lost comm",
|
||||
}
|
||||
)
|
||||
|
||||
func (c *serviceCollector) collectWMI(ch chan<- prometheus.Metric) error {
|
||||
var dst []Win32_Service
|
||||
q := wmi.CreateQuery(&dst, "")
|
||||
q := queryAllWhere(&dst, c.queryWhereClause)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
for _, service := range dst {
|
||||
pid := fmt.Sprintf("%d", uint64(service.ProcessId))
|
||||
|
||||
runAs := ""
|
||||
if service.StartName != nil {
|
||||
runAs = *service.StartName
|
||||
}
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.State,
|
||||
c.Information,
|
||||
prometheus.GaugeValue,
|
||||
1.0,
|
||||
strings.ToLower(service.Name),
|
||||
strings.ToLower(service.State),
|
||||
service.DisplayName,
|
||||
pid,
|
||||
runAs,
|
||||
)
|
||||
|
||||
for _, state := range allStates {
|
||||
isCurrentState := 0.0
|
||||
if state == strings.ToLower(service.State) {
|
||||
isCurrentState = 1.0
|
||||
}
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.State,
|
||||
prometheus.GaugeValue,
|
||||
isCurrentState,
|
||||
strings.ToLower(service.Name),
|
||||
state,
|
||||
)
|
||||
}
|
||||
|
||||
for _, startMode := range allStartModes {
|
||||
isCurrentStartMode := 0.0
|
||||
if startMode == strings.ToLower(service.StartMode) {
|
||||
isCurrentStartMode = 1.0
|
||||
}
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.StartMode,
|
||||
prometheus.GaugeValue,
|
||||
isCurrentStartMode,
|
||||
strings.ToLower(service.Name),
|
||||
startMode,
|
||||
)
|
||||
}
|
||||
|
||||
for _, status := range allStatuses {
|
||||
isCurrentStatus := 0.0
|
||||
if status == strings.ToLower(service.Status) {
|
||||
isCurrentStatus = 1.0
|
||||
}
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Status,
|
||||
prometheus.GaugeValue,
|
||||
isCurrentStatus,
|
||||
strings.ToLower(service.Name),
|
||||
status,
|
||||
)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *serviceCollector) collectAPI(ch chan<- prometheus.Metric) error {
|
||||
svcmgrConnection, err := mgr.Connect()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer svcmgrConnection.Disconnect() //nolint:errcheck
|
||||
|
||||
// List All Services from the Services Manager.
|
||||
serviceList, err := svcmgrConnection.ListServices()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Iterate through the Services List.
|
||||
for _, service := range serviceList {
|
||||
// Get UTF16 service name.
|
||||
serviceName, err := syscall.UTF16PtrFromString(service)
|
||||
if err != nil {
|
||||
log.Warnf("Service %s get name error: %#v", service, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Open connection for service handler.
|
||||
serviceHandle, err := windows.OpenService(svcmgrConnection.Handle, serviceName, windows.GENERIC_READ)
|
||||
if err != nil {
|
||||
log.Warnf("Open service %s error: %#v", service, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Create handle for each service.
|
||||
serviceManager := &mgr.Service{Name: service, Handle: serviceHandle}
|
||||
defer serviceManager.Close()
|
||||
|
||||
// Get Service Configuration.
|
||||
serviceConfig, err := serviceManager.Config()
|
||||
if err != nil {
|
||||
log.Warnf("Get ervice %s config error: %#v", service, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Get Service Current Status.
|
||||
serviceStatus, err := serviceManager.Query()
|
||||
if err != nil {
|
||||
log.Warnf("Get service %s status error: %#v", service, err)
|
||||
continue
|
||||
}
|
||||
|
||||
pid := fmt.Sprintf("%d", uint64(serviceStatus.ProcessId))
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.StartMode,
|
||||
c.Information,
|
||||
prometheus.GaugeValue,
|
||||
1.0,
|
||||
strings.ToLower(service.Name),
|
||||
strings.ToLower(service.StartMode),
|
||||
strings.ToLower(service),
|
||||
serviceConfig.DisplayName,
|
||||
pid,
|
||||
serviceConfig.ServiceStartName,
|
||||
)
|
||||
|
||||
for _, state := range apiStateValues {
|
||||
isCurrentState := 0.0
|
||||
if state == apiStateValues[uint(serviceStatus.State)] {
|
||||
isCurrentState = 1.0
|
||||
}
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.State,
|
||||
prometheus.GaugeValue,
|
||||
isCurrentState,
|
||||
strings.ToLower(service),
|
||||
state,
|
||||
)
|
||||
}
|
||||
|
||||
for _, startMode := range apiStartModeValues {
|
||||
isCurrentStartMode := 0.0
|
||||
if startMode == apiStartModeValues[serviceConfig.StartType] {
|
||||
isCurrentStartMode = 1.0
|
||||
}
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.StartMode,
|
||||
prometheus.GaugeValue,
|
||||
isCurrentStartMode,
|
||||
strings.ToLower(service),
|
||||
startMode,
|
||||
)
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
9
collector/service_test.go
Normal file
9
collector/service_test.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkServiceCollector(b *testing.B) {
|
||||
benchmarkCollector(b, "service", NewserviceCollector)
|
||||
}
|
||||
694
collector/smtp.go
Normal file
694
collector/smtp.go
Normal file
@@ -0,0 +1,694 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/prometheus-community/windows_exporter/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"gopkg.in/alecthomas/kingpin.v2"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerCollector("smtp", NewSMTPCollector, "SMTP Server")
|
||||
}
|
||||
|
||||
var (
|
||||
serverWhitelist = kingpin.Flag("collector.smtp.server-whitelist", "Regexp of virtual servers to whitelist. Server name must both match whitelist and not match blacklist to be included.").Default(".+").String()
|
||||
serverBlacklist = kingpin.Flag("collector.smtp.server-blacklist", "Regexp of virtual servers to blacklist. Server name must both match whitelist and not match blacklist to be included.").String()
|
||||
)
|
||||
|
||||
type SMTPCollector struct {
|
||||
BadmailedMessagesBadPickupFileTotal *prometheus.Desc
|
||||
BadmailedMessagesGeneralFailureTotal *prometheus.Desc
|
||||
BadmailedMessagesHopCountExceededTotal *prometheus.Desc
|
||||
BadmailedMessagesNDROfDSNTotal *prometheus.Desc
|
||||
BadmailedMessagesNoRecipientsTotal *prometheus.Desc
|
||||
BadmailedMessagesTriggeredViaEventTotal *prometheus.Desc
|
||||
BytesSentTotal *prometheus.Desc
|
||||
BytesReceivedTotal *prometheus.Desc
|
||||
CategorizerQueueLength *prometheus.Desc
|
||||
ConnectionErrorsTotal *prometheus.Desc
|
||||
CurrentMessagesInLocalDelivery *prometheus.Desc
|
||||
DirectoryDropsTotal *prometheus.Desc
|
||||
DNSQueriesTotal *prometheus.Desc
|
||||
DSNFailuresTotal *prometheus.Desc
|
||||
ETRNMessagesTotal *prometheus.Desc
|
||||
InboundConnectionsCurrent *prometheus.Desc
|
||||
InboundConnectionsTotal *prometheus.Desc
|
||||
LocalQueueLength *prometheus.Desc
|
||||
LocalRetryQueueLength *prometheus.Desc
|
||||
MailFilesOpen *prometheus.Desc
|
||||
MessageBytesReceivedTotal *prometheus.Desc
|
||||
MessageBytesSentTotal *prometheus.Desc
|
||||
MessageDeliveryRetriesTotal *prometheus.Desc
|
||||
MessageSendRetriesTotal *prometheus.Desc
|
||||
MessagesCurrentlyUndeliverable *prometheus.Desc
|
||||
MessagesDeliveredTotal *prometheus.Desc
|
||||
MessagesPendingRouting *prometheus.Desc
|
||||
MessagesReceivedTotal *prometheus.Desc
|
||||
MessagesRefusedForAddressObjectsTotal *prometheus.Desc
|
||||
MessagesRefusedForMailObjectsTotal *prometheus.Desc
|
||||
MessagesRefusedForSizeTotal *prometheus.Desc
|
||||
MessagesSentTotal *prometheus.Desc
|
||||
MessagesSubmittedTotal *prometheus.Desc
|
||||
NDRsGeneratedTotal *prometheus.Desc
|
||||
OutboundConnectionsCurrent *prometheus.Desc
|
||||
OutboundConnectionsRefusedTotal *prometheus.Desc
|
||||
OutboundConnectionsTotal *prometheus.Desc
|
||||
QueueFilesOpen *prometheus.Desc
|
||||
PickupDirectoryMessagesRetrievedTotal *prometheus.Desc
|
||||
RemoteQueueLength *prometheus.Desc
|
||||
RemoteRetryQueueLength *prometheus.Desc
|
||||
RoutingTableLookupsTotal *prometheus.Desc
|
||||
|
||||
serverWhitelistPattern *regexp.Regexp
|
||||
serverBlacklistPattern *regexp.Regexp
|
||||
}
|
||||
|
||||
func NewSMTPCollector() (Collector, error) {
|
||||
log.Info("smtp collector is in an experimental state! Metrics for this collector have not been tested.")
|
||||
const subsystem = "smtp"
|
||||
|
||||
return &SMTPCollector{
|
||||
BadmailedMessagesBadPickupFileTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "badmailed_messages_bad_pickup_file_total"),
|
||||
"Total number of malformed pickup messages sent to badmail",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
BadmailedMessagesGeneralFailureTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "badmailed_messages_general_failure_total"),
|
||||
"Total number of messages sent to badmail for reasons not associated with a specific counter",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
BadmailedMessagesHopCountExceededTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "badmailed_messages_hop_count_exceeded_total"),
|
||||
"Total number of messages sent to badmail because they had exceeded the maximum hop count",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
BadmailedMessagesNDROfDSNTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "badmailed_messages_ndr_of_dns_total"),
|
||||
"Total number of Delivery Status Notifications sent to badmail because they could not be delivered",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
BadmailedMessagesNoRecipientsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "badmailed_messages_no_recipients_total"),
|
||||
"Total number of messages sent to badmail because they had no recipients",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
BadmailedMessagesTriggeredViaEventTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "badmailed_messages_triggered_via_event_total"),
|
||||
"Total number of messages sent to badmail at the request of a server event sink",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
BytesSentTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "bytes_sent_total"),
|
||||
"Total number of bytes sent",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
BytesReceivedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "bytes_received_total"),
|
||||
"Total number of bytes received",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
CategorizerQueueLength: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "categorizer_queue_length"),
|
||||
"Number of messages in the categorizer queue",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
ConnectionErrorsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "connection_errors_total"),
|
||||
"Total number of connection errors",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
CurrentMessagesInLocalDelivery: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "current_messages_in_local_delivery"),
|
||||
"Number of messages that are currently being processed by a server event sink for local delivery",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
DirectoryDropsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "directory_drops_total"),
|
||||
"Total number of messages placed in a drop directory",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
DSNFailuresTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "dsn_failures_total"),
|
||||
"Total number of failed DSN generation attempts",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
DNSQueriesTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "dns_queries_total"),
|
||||
"Total number of DNS lookups",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
ETRNMessagesTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "etrn_messages_total"),
|
||||
"Total number of ETRN messages received by the server",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
InboundConnectionsCurrent: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "inbound_connections_current"),
|
||||
"Total number of connections currently inbound",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
InboundConnectionsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "inbound_connections_total"),
|
||||
"Total number of inbound connections received",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
LocalQueueLength: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "local_queue_length"),
|
||||
"Number of messages in the local queue",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
LocalRetryQueueLength: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "local_retry_queue_length"),
|
||||
"Number of messages in the local retry queue",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
MailFilesOpen: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "mail_files_open"),
|
||||
"Number of handles to open mail files",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
MessageBytesReceivedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "message_bytes_received_total"),
|
||||
"Total number of bytes received in messages",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
MessageBytesSentTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "message_bytes_sent_total"),
|
||||
"Total number of bytes sent in messages",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
MessageDeliveryRetriesTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "message_delivery_retries_total"),
|
||||
"Total number of local deliveries that were retried",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
MessageSendRetriesTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "message_send_retries_total"),
|
||||
"Total number of outbound message sends that were retried",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
MessagesCurrentlyUndeliverable: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "messages_currently_undeliverable"),
|
||||
"Number of messages that have been reported as currently undeliverable by routing",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
MessagesDeliveredTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "messages_delivered_total"),
|
||||
"Total number of messages delivered to local mailboxes",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
MessagesPendingRouting: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "messages_pending_routing"),
|
||||
"Number of messages that have been categorized but not routed",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
MessagesReceivedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "messages_received_total"),
|
||||
"Total number of inbound messages accepted",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
MessagesRefusedForAddressObjectsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "messages_refused_for_address_objects_total"),
|
||||
"Total number of messages refused due to no address objects",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
MessagesRefusedForMailObjectsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "messages_refused_for_mail_objects_total"),
|
||||
"Total number of messages refused due to no mail objects",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
MessagesRefusedForSizeTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "messages_refused_for_size_total"),
|
||||
"Total number of messages rejected because they were too big",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
MessagesSentTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "messages_sent_total"),
|
||||
"Total number of outbound messages sent",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
MessagesSubmittedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "messages_submitted_total"),
|
||||
"Total number of messages submitted to queuing for delivery",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
NDRsGeneratedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "ndrs_generated_total"),
|
||||
"Total number of non-delivery reports that have been generated",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
OutboundConnectionsCurrent: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "outbound_connections_current"),
|
||||
"Number of connections currently outbound",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
OutboundConnectionsRefusedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "outbound_connections_refused_total"),
|
||||
"Total number of connection attempts refused by remote sites",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
OutboundConnectionsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "outbound_connections_total"),
|
||||
"Total number of outbound connections attempted",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
PickupDirectoryMessagesRetrievedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "pickup_directory_messages_retrieved_total"),
|
||||
"Total number of messages retrieved from the mail pick-up directory",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
QueueFilesOpen: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "queue_files_open"),
|
||||
"Number of handles to open queue files",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
RemoteQueueLength: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "remote_queue_length"),
|
||||
"Number of messages in the remote queue",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
RemoteRetryQueueLength: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "remote_retry_queue_length"),
|
||||
"Number of messages in the retry queue for remote delivery",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
RoutingTableLookupsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "routing_table_lookups_total"),
|
||||
"Total number of routing table lookups",
|
||||
[]string{"site"},
|
||||
nil,
|
||||
),
|
||||
|
||||
serverWhitelistPattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *serverWhitelist)),
|
||||
serverBlacklistPattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *serverBlacklist)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *SMTPCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ctx, ch); err != nil {
|
||||
log.Error("failed collecting smtp metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Perflib: "SMTP Server"
|
||||
type PerflibSMTPServer struct {
|
||||
Name string
|
||||
|
||||
BadmailedMessagesBadPickupFileTotal float64 `perflib:"Badmailed Messages (Bad Pickup File)"`
|
||||
BadmailedMessagesGeneralFailureTotal float64 `perflib:"Badmailed Messages (General Failure)"`
|
||||
BadmailedMessagesHopCountExceededTotal float64 `perflib:"Badmailed Messages (Hop Count Exceeded)"`
|
||||
BadmailedMessagesNDROfDSNTotal float64 `perflib:"Badmailed Messages (NDR of DSN)"`
|
||||
BadmailedMessagesNoRecipientsTotal float64 `perflib:"Badmailed Messages (No Recipients)"`
|
||||
BadmailedMessagesTriggeredViaEventTotal float64 `perflib:"Badmailed Messages (Triggered via Event)"`
|
||||
BytesSentTotal float64 `perflib:"Bytes Sent Total"`
|
||||
BytesReceivedTotal float64 `perflib:"Bytes Received Total"`
|
||||
CategorizerQueueLength float64 `perflib:"Categorizer Queue Length"`
|
||||
ConnectionErrorsTotal float64 `perflib:"Total Connection Errors"`
|
||||
CurrentMessagesInLocalDelivery float64 `perflib:"Current Messages in Local Delivery"`
|
||||
DirectoryDropsTotal float64 `perflib:"Directory Drops Total"`
|
||||
DNSQueriesTotal float64 `perflib:"DNS Queries Total"`
|
||||
DSNFailuresTotal float64 `perflib:"Total DSN Failures"`
|
||||
ETRNMessagesTotal float64 `perflib:"ETRN Messages Total"`
|
||||
InboundConnectionsCurrent float64 `perflib:"Inbound Connections Current"`
|
||||
InboundConnectionsTotal float64 `perflib:"Inbound Connections Total"`
|
||||
LocalQueueLength float64 `perflib:"Local Queue Length"`
|
||||
LocalRetryQueueLength float64 `perflib:"Local Retry Queue Length"`
|
||||
MailFilesOpen float64 `perflib:"Number of MailFiles Open"`
|
||||
MessageBytesReceivedTotal float64 `perflib:"Message Bytes Received Total"`
|
||||
MessageBytesSentTotal float64 `perflib:"Message Bytes Sent Total"`
|
||||
MessageDeliveryRetriesTotal float64 `perflib:"Message Delivery Retries"`
|
||||
MessageSendRetriesTotal float64 `perflib:"Message Send Retries"`
|
||||
MessagesCurrentlyUndeliverable float64 `perflib:"Messages Currently Undeliverable"`
|
||||
MessagesDeliveredTotal float64 `perflib:"Messages Delivered Total"`
|
||||
MessagesPendingRouting float64 `perflib:"Messages Pending Routing"`
|
||||
MessagesReceivedTotal float64 `perflib:"Messages Received Total"`
|
||||
MessagesRefusedForAddressObjectsTotal float64 `perflib:"Messages Refused for Address Objects"`
|
||||
MessagesRefusedForMailObjectsTotal float64 `perflib:"Messages Refused for Mail Objects"`
|
||||
MessagesRefusedForSizeTotal float64 `perflib:"Messages Refused for Size"`
|
||||
MessagesSentTotal float64 `perflib:"Messages Sent Total"`
|
||||
MessagesSubmittedTotal float64 `perflib:"Total messages submitted"`
|
||||
NDRsGeneratedTotal float64 `perflib:"NDRs Generated"`
|
||||
OutboundConnectionsCurrent float64 `perflib:"Outbound Connections Current"`
|
||||
OutboundConnectionsRefusedTotal float64 `perflib:"Outbound Connections Refused"`
|
||||
OutboundConnectionsTotal float64 `perflib:"Outbound Connections Total"`
|
||||
QueueFilesOpen float64 `perflib:"Number of QueueFiles Open"`
|
||||
PickupDirectoryMessagesRetrievedTotal float64 `perflib:"Pickup Directory Messages Retrieved Total"`
|
||||
RemoteQueueLength float64 `perflib:"Remote Queue Length"`
|
||||
RemoteRetryQueueLength float64 `perflib:"Remote Retry Queue Length"`
|
||||
RoutingTableLookupsTotal float64 `perflib:"Routing Table Lookups Total"`
|
||||
}
|
||||
|
||||
func (c *SMTPCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []PerflibSMTPServer
|
||||
if err := unmarshalObject(ctx.perfObjects["SMTP Server"], &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, server := range dst {
|
||||
if server.Name == "_Total" ||
|
||||
c.serverBlacklistPattern.MatchString(server.Name) ||
|
||||
!c.serverWhitelistPattern.MatchString(server.Name) {
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BadmailedMessagesBadPickupFileTotal,
|
||||
prometheus.CounterValue,
|
||||
server.BadmailedMessagesBadPickupFileTotal,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BadmailedMessagesHopCountExceededTotal,
|
||||
prometheus.CounterValue,
|
||||
server.BadmailedMessagesHopCountExceededTotal,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BadmailedMessagesNDROfDSNTotal,
|
||||
prometheus.CounterValue,
|
||||
server.BadmailedMessagesNDROfDSNTotal,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BadmailedMessagesNoRecipientsTotal,
|
||||
prometheus.CounterValue,
|
||||
server.BadmailedMessagesNoRecipientsTotal,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BadmailedMessagesTriggeredViaEventTotal,
|
||||
prometheus.CounterValue,
|
||||
server.BadmailedMessagesTriggeredViaEventTotal,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BytesSentTotal,
|
||||
prometheus.CounterValue,
|
||||
server.BytesSentTotal,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BytesReceivedTotal,
|
||||
prometheus.CounterValue,
|
||||
server.BytesReceivedTotal,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CategorizerQueueLength,
|
||||
prometheus.GaugeValue,
|
||||
server.CategorizerQueueLength,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ConnectionErrorsTotal,
|
||||
prometheus.CounterValue,
|
||||
server.ConnectionErrorsTotal,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CurrentMessagesInLocalDelivery,
|
||||
prometheus.GaugeValue,
|
||||
server.CurrentMessagesInLocalDelivery,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DirectoryDropsTotal,
|
||||
prometheus.CounterValue,
|
||||
server.DirectoryDropsTotal,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DSNFailuresTotal,
|
||||
prometheus.CounterValue,
|
||||
server.DSNFailuresTotal,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DNSQueriesTotal,
|
||||
prometheus.CounterValue,
|
||||
server.DNSQueriesTotal,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ETRNMessagesTotal,
|
||||
prometheus.CounterValue,
|
||||
server.ETRNMessagesTotal,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.InboundConnectionsTotal,
|
||||
prometheus.CounterValue,
|
||||
server.InboundConnectionsTotal,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.InboundConnectionsCurrent,
|
||||
prometheus.GaugeValue,
|
||||
server.InboundConnectionsCurrent,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LocalQueueLength,
|
||||
prometheus.GaugeValue,
|
||||
server.LocalQueueLength,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LocalRetryQueueLength,
|
||||
prometheus.GaugeValue,
|
||||
server.LocalRetryQueueLength,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MailFilesOpen,
|
||||
prometheus.GaugeValue,
|
||||
server.MailFilesOpen,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MessageBytesReceivedTotal,
|
||||
prometheus.CounterValue,
|
||||
server.MessageBytesReceivedTotal,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MessageBytesSentTotal,
|
||||
prometheus.CounterValue,
|
||||
server.MessageBytesSentTotal,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MessageDeliveryRetriesTotal,
|
||||
prometheus.CounterValue,
|
||||
server.MessageDeliveryRetriesTotal,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MessageSendRetriesTotal,
|
||||
prometheus.CounterValue,
|
||||
server.MessageSendRetriesTotal,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MessagesCurrentlyUndeliverable,
|
||||
prometheus.GaugeValue,
|
||||
server.MessagesCurrentlyUndeliverable,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MessagesDeliveredTotal,
|
||||
prometheus.CounterValue,
|
||||
server.MessagesDeliveredTotal,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MessagesPendingRouting,
|
||||
prometheus.GaugeValue,
|
||||
server.MessagesPendingRouting,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MessagesReceivedTotal,
|
||||
prometheus.CounterValue,
|
||||
server.MessagesReceivedTotal,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MessagesRefusedForAddressObjectsTotal,
|
||||
prometheus.CounterValue,
|
||||
server.MessagesRefusedForAddressObjectsTotal,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MessagesRefusedForMailObjectsTotal,
|
||||
prometheus.CounterValue,
|
||||
server.MessagesRefusedForMailObjectsTotal,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MessagesRefusedForSizeTotal,
|
||||
prometheus.CounterValue,
|
||||
server.MessagesRefusedForSizeTotal,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MessagesSentTotal,
|
||||
prometheus.CounterValue,
|
||||
server.MessagesSentTotal,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MessagesSubmittedTotal,
|
||||
prometheus.CounterValue,
|
||||
server.MessagesSubmittedTotal,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NDRsGeneratedTotal,
|
||||
prometheus.CounterValue,
|
||||
server.NDRsGeneratedTotal,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.OutboundConnectionsCurrent,
|
||||
prometheus.GaugeValue,
|
||||
server.OutboundConnectionsCurrent,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.OutboundConnectionsRefusedTotal,
|
||||
prometheus.CounterValue,
|
||||
server.OutboundConnectionsRefusedTotal,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.OutboundConnectionsTotal,
|
||||
prometheus.CounterValue,
|
||||
server.OutboundConnectionsTotal,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.QueueFilesOpen,
|
||||
prometheus.GaugeValue,
|
||||
server.QueueFilesOpen,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PickupDirectoryMessagesRetrievedTotal,
|
||||
prometheus.CounterValue,
|
||||
server.PickupDirectoryMessagesRetrievedTotal,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RemoteQueueLength,
|
||||
prometheus.GaugeValue,
|
||||
server.RemoteQueueLength,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RemoteRetryQueueLength,
|
||||
prometheus.GaugeValue,
|
||||
server.RemoteRetryQueueLength,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RoutingTableLookupsTotal,
|
||||
prometheus.CounterValue,
|
||||
server.RoutingTableLookupsTotal,
|
||||
server.Name,
|
||||
)
|
||||
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
9
collector/smtp_test.go
Normal file
9
collector/smtp_test.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkSmtpCollector(b *testing.B) {
|
||||
benchmarkCollector(b, "smtp", NewSMTPCollector)
|
||||
}
|
||||
@@ -1,17 +1,15 @@
|
||||
// returns data points from Win32_PerfRawData_PerfOS_System class
|
||||
// https://web.archive.org/web/20050830140516/http://msdn.microsoft.com/library/en-us/wmisdk/wmi/win32_perfrawdata_perfos_system.asp
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus-community/windows_exporter/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["system"] = NewSystemCollector
|
||||
registerCollector("system", NewSystemCollector, "System")
|
||||
}
|
||||
|
||||
// A SystemCollector is a Prometheus collector for WMI metrics
|
||||
@@ -31,37 +29,37 @@ func NewSystemCollector() (Collector, error) {
|
||||
return &SystemCollector{
|
||||
ContextSwitchesTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "context_switches_total"),
|
||||
"PerfOS_System.ContextSwitchesPersec",
|
||||
"Total number of context switches (WMI source: PerfOS_System.ContextSwitchesPersec)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
ExceptionDispatchesTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "exception_dispatches_total"),
|
||||
"PerfOS_System.ExceptionDispatchesPersec",
|
||||
"Total number of exceptions dispatched (WMI source: PerfOS_System.ExceptionDispatchesPersec)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
ProcessorQueueLength: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "processor_queue_length"),
|
||||
"PerfOS_System.ProcessorQueueLength",
|
||||
"Length of processor queue (WMI source: PerfOS_System.ProcessorQueueLength)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SystemCallsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "system_calls_total"),
|
||||
"PerfOS_System.SystemCallsPersec",
|
||||
"Total number of system calls (WMI source: PerfOS_System.SystemCallsPersec)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SystemUpTime: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "system_up_time"),
|
||||
"SystemUpTime/Frequency_Object",
|
||||
"System boot time (WMI source: PerfOS_System.SystemUpTime)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
Threads: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "threads"),
|
||||
"PerfOS_System.Threads",
|
||||
"Current number of threads (WMI source: PerfOS_System.Threads)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
@@ -70,61 +68,60 @@ func NewSystemCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *SystemCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Println("[ERROR] failed collecting os metrics:", desc, err)
|
||||
func (c *SystemCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ctx, ch); err != nil {
|
||||
log.Error("failed collecting system metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_PerfOS_System struct {
|
||||
ContextSwitchesPersec uint32
|
||||
ExceptionDispatchesPersec uint32
|
||||
Frequency_Object uint64
|
||||
ProcessorQueueLength uint32
|
||||
SystemCallsPersec uint32
|
||||
SystemUpTime uint64
|
||||
Threads uint32
|
||||
Timestamp_Object uint64
|
||||
// Win32_PerfRawData_PerfOS_System docs:
|
||||
// - https://web.archive.org/web/20050830140516/http://msdn.microsoft.com/library/en-us/wmisdk/wmi/win32_perfrawdata_perfos_system.asp
|
||||
type system struct {
|
||||
ContextSwitchesPersec float64 `perflib:"Context Switches/sec"`
|
||||
ExceptionDispatchesPersec float64 `perflib:"Exception Dispatches/sec"`
|
||||
ProcessorQueueLength float64 `perflib:"Processor Queue Length"`
|
||||
SystemCallsPersec float64 `perflib:"System Calls/sec"`
|
||||
SystemUpTime float64 `perflib:"System Up Time"`
|
||||
Threads float64 `perflib:"Threads"`
|
||||
}
|
||||
|
||||
func (c *SystemCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_PerfOS_System
|
||||
if err := wmi.Query(wmi.CreateQuery(&dst, ""), &dst); err != nil {
|
||||
func (c *SystemCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []system
|
||||
if err := unmarshalObject(ctx.perfObjects["System"], &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ContextSwitchesTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].ContextSwitchesPersec),
|
||||
prometheus.CounterValue,
|
||||
dst[0].ContextSwitchesPersec,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ExceptionDispatchesTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].ExceptionDispatchesPersec),
|
||||
prometheus.CounterValue,
|
||||
dst[0].ExceptionDispatchesPersec,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ProcessorQueueLength,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].ProcessorQueueLength),
|
||||
dst[0].ProcessorQueueLength,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SystemCallsTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].SystemCallsPersec),
|
||||
prometheus.CounterValue,
|
||||
dst[0].SystemCallsPersec,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SystemUpTime,
|
||||
prometheus.GaugeValue,
|
||||
// convert from Windows timestamp (1 jan 1601) to unix timestamp (1 jan 1970)
|
||||
float64(dst[0].SystemUpTime-116444736000000000)/float64(dst[0].Frequency_Object),
|
||||
dst[0].SystemUpTime,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Threads,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].Threads),
|
||||
dst[0].Threads,
|
||||
)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
9
collector/system_test.go
Normal file
9
collector/system_test.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkSystemCollector(b *testing.B) {
|
||||
benchmarkCollector(b, "system", NewSystemCollector)
|
||||
}
|
||||
192
collector/tcp.go
Normal file
192
collector/tcp.go
Normal file
@@ -0,0 +1,192 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/prometheus-community/windows_exporter/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerCollector("tcp", NewTCPCollector, "TCPv4", "TCPv6")
|
||||
}
|
||||
|
||||
// A TCPCollector is a Prometheus collector for WMI Win32_PerfRawData_Tcpip_TCPv{4,6} metrics
|
||||
type TCPCollector struct {
|
||||
ConnectionFailures *prometheus.Desc
|
||||
ConnectionsActive *prometheus.Desc
|
||||
ConnectionsEstablished *prometheus.Desc
|
||||
ConnectionsPassive *prometheus.Desc
|
||||
ConnectionsReset *prometheus.Desc
|
||||
SegmentsTotal *prometheus.Desc
|
||||
SegmentsReceivedTotal *prometheus.Desc
|
||||
SegmentsRetransmittedTotal *prometheus.Desc
|
||||
SegmentsSentTotal *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewTCPCollector ...
|
||||
func NewTCPCollector() (Collector, error) {
|
||||
const subsystem = "tcp"
|
||||
|
||||
return &TCPCollector{
|
||||
ConnectionFailures: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "connection_failures_total"),
|
||||
"(TCP.ConnectionFailures)",
|
||||
[]string{"af"},
|
||||
nil,
|
||||
),
|
||||
ConnectionsActive: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "connections_active_total"),
|
||||
"(TCP.ConnectionsActive)",
|
||||
[]string{"af"},
|
||||
nil,
|
||||
),
|
||||
ConnectionsEstablished: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "connections_established"),
|
||||
"(TCP.ConnectionsEstablished)",
|
||||
[]string{"af"},
|
||||
nil,
|
||||
),
|
||||
ConnectionsPassive: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "connections_passive_total"),
|
||||
"(TCP.ConnectionsPassive)",
|
||||
[]string{"af"},
|
||||
nil,
|
||||
),
|
||||
ConnectionsReset: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "connections_reset_total"),
|
||||
"(TCP.ConnectionsReset)",
|
||||
[]string{"af"},
|
||||
nil,
|
||||
),
|
||||
SegmentsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "segments_total"),
|
||||
"(TCP.SegmentsTotal)",
|
||||
[]string{"af"},
|
||||
nil,
|
||||
),
|
||||
SegmentsReceivedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "segments_received_total"),
|
||||
"(TCP.SegmentsReceivedTotal)",
|
||||
[]string{"af"},
|
||||
nil,
|
||||
),
|
||||
SegmentsRetransmittedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "segments_retransmitted_total"),
|
||||
"(TCP.SegmentsRetransmittedTotal)",
|
||||
[]string{"af"},
|
||||
nil,
|
||||
),
|
||||
SegmentsSentTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "segments_sent_total"),
|
||||
"(TCP.SegmentsSentTotal)",
|
||||
[]string{"af"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *TCPCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ctx, ch); err != nil {
|
||||
log.Error("failed collecting tcp metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_Tcpip_TCPv4 docs
|
||||
// - https://msdn.microsoft.com/en-us/library/aa394341(v=vs.85).aspx
|
||||
// The TCPv6 performance object uses the same fields.
|
||||
type tcp struct {
|
||||
ConnectionFailures float64 `perflib:"Connection Failures"`
|
||||
ConnectionsActive float64 `perflib:"Connections Active"`
|
||||
ConnectionsEstablished float64 `perflib:"Connections Established"`
|
||||
ConnectionsPassive float64 `perflib:"Connections Passive"`
|
||||
ConnectionsReset float64 `perflib:"Connections Reset"`
|
||||
SegmentsPersec float64 `perflib:"Segments/sec"`
|
||||
SegmentsReceivedPersec float64 `perflib:"Segments Received/sec"`
|
||||
SegmentsRetransmittedPersec float64 `perflib:"Segments Retransmitted/sec"`
|
||||
SegmentsSentPersec float64 `perflib:"Segments Sent/sec"`
|
||||
}
|
||||
|
||||
func writeTCPCounters(metrics tcp, labels []string, c *TCPCollector, ch chan<- prometheus.Metric) {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ConnectionFailures,
|
||||
prometheus.CounterValue,
|
||||
metrics.ConnectionFailures,
|
||||
labels...,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ConnectionsActive,
|
||||
prometheus.CounterValue,
|
||||
metrics.ConnectionsActive,
|
||||
labels...,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ConnectionsEstablished,
|
||||
prometheus.GaugeValue,
|
||||
metrics.ConnectionsEstablished,
|
||||
labels...,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ConnectionsPassive,
|
||||
prometheus.CounterValue,
|
||||
metrics.ConnectionsPassive,
|
||||
labels...,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ConnectionsReset,
|
||||
prometheus.CounterValue,
|
||||
metrics.ConnectionsReset,
|
||||
labels...,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SegmentsTotal,
|
||||
prometheus.CounterValue,
|
||||
metrics.SegmentsPersec,
|
||||
labels...,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SegmentsReceivedTotal,
|
||||
prometheus.CounterValue,
|
||||
metrics.SegmentsReceivedPersec,
|
||||
labels...,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SegmentsRetransmittedTotal,
|
||||
prometheus.CounterValue,
|
||||
metrics.SegmentsRetransmittedPersec,
|
||||
labels...,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SegmentsSentTotal,
|
||||
prometheus.CounterValue,
|
||||
metrics.SegmentsSentPersec,
|
||||
labels...,
|
||||
)
|
||||
}
|
||||
|
||||
func (c *TCPCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []tcp
|
||||
|
||||
// TCPv4 counters
|
||||
if err := unmarshalObject(ctx.perfObjects["TCPv4"], &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(dst) != 0 {
|
||||
writeTCPCounters(dst[0], []string{"ipv4"}, c, ch)
|
||||
}
|
||||
|
||||
// TCPv6 counters
|
||||
if err := unmarshalObject(ctx.perfObjects["TCPv6"], &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(dst) != 0 {
|
||||
writeTCPCounters(dst[0], []string{"ipv6"}, c, ch)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
9
collector/tcp_test.go
Normal file
9
collector/tcp_test.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkTCPCollector(b *testing.B) {
|
||||
benchmarkCollector(b, "tcp", NewTCPCollector)
|
||||
}
|
||||
404
collector/terminal_services.go
Normal file
404
collector/terminal_services.go
Normal file
@@ -0,0 +1,404 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus-community/windows_exporter/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const ConnectionBrokerFeatureID uint32 = 133
|
||||
|
||||
func init() {
|
||||
registerCollector("terminal_services", NewTerminalServicesCollector, "Terminal Services", "Terminal Services Session", "Remote Desktop Connection Broker Counterset")
|
||||
}
|
||||
|
||||
var (
|
||||
connectionBrokerEnabled = isConnectionBrokerServer()
|
||||
)
|
||||
|
||||
type Win32_ServerFeature struct {
|
||||
ID uint32
|
||||
}
|
||||
|
||||
func isConnectionBrokerServer() bool {
|
||||
var dst []Win32_ServerFeature
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return false
|
||||
}
|
||||
for _, d := range dst {
|
||||
if d.ID == ConnectionBrokerFeatureID {
|
||||
return true
|
||||
}
|
||||
}
|
||||
log.Debug("host is not a connection broker skipping Connection Broker performance metrics.")
|
||||
return false
|
||||
}
|
||||
|
||||
// A TerminalServicesCollector is a Prometheus collector for WMI
|
||||
// Win32_PerfRawData_LocalSessionManager_TerminalServices & Win32_PerfRawData_TermService_TerminalServicesSession metrics
|
||||
// https://docs.microsoft.com/en-us/previous-versions/aa394344(v%3Dvs.85)
|
||||
// https://wutils.com/wmi/root/cimv2/win32_perfrawdata_localsessionmanager_terminalservices/
|
||||
type TerminalServicesCollector struct {
|
||||
LocalSessionCount *prometheus.Desc
|
||||
ConnectionBrokerPerformance *prometheus.Desc
|
||||
HandleCount *prometheus.Desc
|
||||
PageFaultsPersec *prometheus.Desc
|
||||
PageFileBytes *prometheus.Desc
|
||||
PageFileBytesPeak *prometheus.Desc
|
||||
PercentPrivilegedTime *prometheus.Desc
|
||||
PercentProcessorTime *prometheus.Desc
|
||||
PercentUserTime *prometheus.Desc
|
||||
PoolNonpagedBytes *prometheus.Desc
|
||||
PoolPagedBytes *prometheus.Desc
|
||||
PrivateBytes *prometheus.Desc
|
||||
ThreadCount *prometheus.Desc
|
||||
VirtualBytes *prometheus.Desc
|
||||
VirtualBytesPeak *prometheus.Desc
|
||||
WorkingSet *prometheus.Desc
|
||||
WorkingSetPeak *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewTerminalServicesCollector ...
|
||||
func NewTerminalServicesCollector() (Collector, error) {
|
||||
const subsystem = "terminal_services"
|
||||
return &TerminalServicesCollector{
|
||||
LocalSessionCount: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "local_session_count"),
|
||||
"Number of Terminal Services sessions",
|
||||
[]string{"session"},
|
||||
nil,
|
||||
),
|
||||
ConnectionBrokerPerformance: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "connection_broker_performance_total"),
|
||||
"The total number of connections handled by the Connection Brokers since the service started.",
|
||||
[]string{"connection"},
|
||||
nil,
|
||||
),
|
||||
HandleCount: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "handles"),
|
||||
"Total number of handles currently opened by this process. This number is the sum of the handles currently opened by each thread in this process.",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
),
|
||||
PageFaultsPersec: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "page_fault_total"),
|
||||
"Rate at which page faults occur in the threads executing in this process. A page fault occurs when a thread refers to a virtual memory page that is not in its working set in main memory. The page may not be retrieved from disk if it is on the standby list and therefore already in main memory. The page also may not be retrieved if it is in use by another process which shares the page.",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
),
|
||||
PageFileBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "page_file_bytes"),
|
||||
"Current number of bytes this process has used in the paging file(s). Paging files are used to store pages of memory used by the process that are not contained in other files. Paging files are shared by all processes, and lack of space in paging files can prevent other processes from allocating memory.",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
),
|
||||
PageFileBytesPeak: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "page_file_bytes_peak"),
|
||||
"Maximum number of bytes this process has used in the paging file(s). Paging files are used to store pages of memory used by the process that are not contained in other files. Paging files are shared by all processes, and lack of space in paging files can prevent other processes from allocating memory.",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
),
|
||||
PercentPrivilegedTime: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "privileged_time_seconds_total"),
|
||||
"Total elapsed time that the threads of the process have spent executing code in privileged mode.",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
),
|
||||
PercentProcessorTime: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "processor_time_seconds_total"),
|
||||
"Total elapsed time that all of the threads of this process used the processor to execute instructions.",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
),
|
||||
PercentUserTime: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "user_time_seconds_total"),
|
||||
"Total elapsed time that this process's threads have spent executing code in user mode. Applications, environment subsystems, and integral subsystems execute in user mode.",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
),
|
||||
PoolNonpagedBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "pool_non_paged_bytes"),
|
||||
"Number of bytes in the non-paged pool, an area of system memory (physical memory used by the operating system) for objects that cannot be written to disk, but must remain in physical memory as long as they are allocated. This property displays the last observed value only; it is not an average.",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
),
|
||||
PoolPagedBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "pool_paged_bytes"),
|
||||
"Number of bytes in the paged pool, an area of system memory (physical memory used by the operating system) for objects that can be written to disk when they are not being used. This property displays the last observed value only; it is not an average.",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
),
|
||||
PrivateBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "private_bytes"),
|
||||
"Current number of bytes this process has allocated that cannot be shared with other processes.",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
),
|
||||
ThreadCount: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "threads"),
|
||||
"Number of threads currently active in this process. An instruction is the basic unit of execution in a processor, and a thread is the object that executes instructions. Every running process has at least one thread.",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
),
|
||||
VirtualBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "virtual_bytes"),
|
||||
"Current size, in bytes, of the virtual address space the process is using. Use of virtual address space does not necessarily imply corresponding use of either disk or main memory pages. Virtual space is finite and, by using too much, the process can limit its ability to load libraries.",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
),
|
||||
VirtualBytesPeak: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "virtual_bytes_peak"),
|
||||
"Maximum number of bytes of virtual address space the process has used at any one time. Use of virtual address space does not necessarily imply corresponding use of either disk or main memory pages. Virtual space is finite and, by using too much, the process might limit its ability to load libraries.",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
),
|
||||
WorkingSet: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "working_set_bytes"),
|
||||
"Current number of bytes in the working set of this process. The working set is the set of memory pages touched recently by the threads in the process. If free memory in the computer is above a threshold, pages are left in the working set of a process even if they are not in use. When free memory falls below a threshold, pages are trimmed from working sets. If they are needed, they are then soft-faulted back into the working set before they leave main memory.",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
),
|
||||
WorkingSetPeak: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "working_set_bytes_peak"),
|
||||
"Maximum number of bytes in the working set of this process at any point in time. The working set is the set of memory pages touched recently by the threads in the process. If free memory in the computer is above a threshold, pages are left in the working set of a process even if they are not in use. When free memory falls below a threshold, pages are trimmed from working sets. If they are needed, they are then soft-faulted back into the working set before they leave main memory.",
|
||||
[]string{"session_name"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *TerminalServicesCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collectTSSessionCount(ctx, ch); err != nil {
|
||||
log.Error("failed collecting terminal services session count metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
if desc, err := c.collectTSSessionCounters(ctx, ch); err != nil {
|
||||
log.Error("failed collecting terminal services session count metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// only collect CollectionBrokerPerformance if host is a Connection Broker
|
||||
if connectionBrokerEnabled {
|
||||
if desc, err := c.collectCollectionBrokerPerformanceCounter(ctx, ch); err != nil {
|
||||
log.Error("failed collecting Connection Broker performance metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type perflibTerminalServices struct {
|
||||
ActiveSessions float64 `perflib:"Active Sessions"`
|
||||
InactiveSessions float64 `perflib:"Inactive Sessions"`
|
||||
TotalSessions float64 `perflib:"Total Sessions"`
|
||||
}
|
||||
|
||||
func (c *TerminalServicesCollector) collectTSSessionCount(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
dst := make([]perflibTerminalServices, 0)
|
||||
err := unmarshalObject(ctx.perfObjects["Terminal Services"], &dst)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(dst) == 0 {
|
||||
return nil, errors.New("WMI query returned empty result set")
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LocalSessionCount,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].ActiveSessions,
|
||||
"active",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LocalSessionCount,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].InactiveSessions,
|
||||
"inactive",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LocalSessionCount,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].TotalSessions,
|
||||
"total",
|
||||
)
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type perflibTerminalServicesSession struct {
|
||||
Name string
|
||||
HandleCount float64 `perflib:"Handle Count"`
|
||||
PageFaultsPersec float64 `perflib:"Page Faults/sec"`
|
||||
PageFileBytes float64 `perflib:"Page File Bytes"`
|
||||
PageFileBytesPeak float64 `perflib:"Page File Bytes Peak"`
|
||||
PercentPrivilegedTime float64 `perflib:"% Privileged Time"`
|
||||
PercentProcessorTime float64 `perflib:"% Processor Time"`
|
||||
PercentUserTime float64 `perflib:"% User Time"`
|
||||
PoolNonpagedBytes float64 `perflib:"Pool Nonpaged Bytes"`
|
||||
PoolPagedBytes float64 `perflib:"Pool Paged Bytes"`
|
||||
PrivateBytes float64 `perflib:"Private Bytes"`
|
||||
ThreadCount float64 `perflib:"Thread Count"`
|
||||
VirtualBytes float64 `perflib:"Virtual Bytes"`
|
||||
VirtualBytesPeak float64 `perflib:"Virtual Bytes Peak"`
|
||||
WorkingSet float64 `perflib:"Working Set"`
|
||||
WorkingSetPeak float64 `perflib:"Working Set Peak"`
|
||||
}
|
||||
|
||||
func (c *TerminalServicesCollector) collectTSSessionCounters(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
dst := make([]perflibTerminalServicesSession, 0)
|
||||
err := unmarshalObject(ctx.perfObjects["Terminal Services Session"], &dst)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, d := range dst {
|
||||
// only connect metrics for remote named sessions
|
||||
n := strings.ToLower(d.Name)
|
||||
if n == "" || n == "services" || n == "console" {
|
||||
continue
|
||||
}
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.HandleCount,
|
||||
prometheus.GaugeValue,
|
||||
d.HandleCount,
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PageFaultsPersec,
|
||||
prometheus.CounterValue,
|
||||
d.PageFaultsPersec,
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PageFileBytes,
|
||||
prometheus.GaugeValue,
|
||||
d.PageFileBytes,
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PageFileBytesPeak,
|
||||
prometheus.GaugeValue,
|
||||
d.PageFileBytesPeak,
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PercentPrivilegedTime,
|
||||
prometheus.CounterValue,
|
||||
d.PercentPrivilegedTime,
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PercentProcessorTime,
|
||||
prometheus.CounterValue,
|
||||
d.PercentProcessorTime,
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PercentUserTime,
|
||||
prometheus.CounterValue,
|
||||
d.PercentUserTime,
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PoolNonpagedBytes,
|
||||
prometheus.GaugeValue,
|
||||
d.PoolNonpagedBytes,
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PoolPagedBytes,
|
||||
prometheus.GaugeValue,
|
||||
d.PoolPagedBytes,
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PrivateBytes,
|
||||
prometheus.GaugeValue,
|
||||
d.PrivateBytes,
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ThreadCount,
|
||||
prometheus.GaugeValue,
|
||||
d.ThreadCount,
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.VirtualBytes,
|
||||
prometheus.GaugeValue,
|
||||
d.VirtualBytes,
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.VirtualBytesPeak,
|
||||
prometheus.GaugeValue,
|
||||
d.VirtualBytesPeak,
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.WorkingSet,
|
||||
prometheus.GaugeValue,
|
||||
d.WorkingSet,
|
||||
d.Name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.WorkingSetPeak,
|
||||
prometheus.GaugeValue,
|
||||
d.WorkingSetPeak,
|
||||
d.Name,
|
||||
)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type perflibRemoteDesktopConnectionBrokerCounterset struct {
|
||||
SuccessfulConnections float64 `perflib:"Successful Connections"`
|
||||
PendingConnections float64 `perflib:"Pending Connections"`
|
||||
FailedConnections float64 `perflib:"Failed Connections"`
|
||||
}
|
||||
|
||||
func (c *TerminalServicesCollector) collectCollectionBrokerPerformanceCounter(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
|
||||
dst := make([]perflibRemoteDesktopConnectionBrokerCounterset, 0)
|
||||
err := unmarshalObject(ctx.perfObjects["Remote Desktop Connection Broker Counterset"], &dst)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(dst) == 0 {
|
||||
return nil, errors.New("WMI query returned empty result set")
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ConnectionBrokerPerformance,
|
||||
prometheus.CounterValue,
|
||||
dst[0].SuccessfulConnections,
|
||||
"Successful",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ConnectionBrokerPerformance,
|
||||
prometheus.CounterValue,
|
||||
dst[0].PendingConnections,
|
||||
"Pending",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ConnectionBrokerPerformance,
|
||||
prometheus.CounterValue,
|
||||
dst[0].FailedConnections,
|
||||
"Failed",
|
||||
)
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
9
collector/terminal_services_test.go
Normal file
9
collector/terminal_services_test.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkTerminalServicesCollector(b *testing.B) {
|
||||
benchmarkCollector(b, "terminal_services", NewTerminalServicesCollector)
|
||||
}
|
||||
357
collector/textfile.go
Normal file
357
collector/textfile.go
Normal file
@@ -0,0 +1,357 @@
|
||||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !notextfile
|
||||
// +build !notextfile
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/dimchansky/utfbom"
|
||||
"github.com/prometheus-community/windows_exporter/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
"github.com/prometheus/common/expfmt"
|
||||
kingpin "gopkg.in/alecthomas/kingpin.v2"
|
||||
)
|
||||
|
||||
var (
|
||||
textFileDirectory = kingpin.Flag(
|
||||
"collector.textfile.directory",
|
||||
"Directory to read text files with metrics from.",
|
||||
).Default(getDefaultPath()).String()
|
||||
|
||||
mtimeDesc = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, "textfile", "mtime_seconds"),
|
||||
"Unixtime mtime of textfiles successfully read.",
|
||||
[]string{"file"},
|
||||
nil,
|
||||
)
|
||||
)
|
||||
|
||||
type textFileCollector struct {
|
||||
path string
|
||||
// Only set for testing to get predictable output.
|
||||
mtime *float64
|
||||
}
|
||||
|
||||
func init() {
|
||||
registerCollector("textfile", NewTextFileCollector)
|
||||
}
|
||||
|
||||
// NewTextFileCollector returns a new Collector exposing metrics read from files
|
||||
// in the given textfile directory.
|
||||
func NewTextFileCollector() (Collector, error) {
|
||||
return &textFileCollector{
|
||||
path: *textFileDirectory,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Given a slice of metric families, determine if any two entries are duplicates.
|
||||
// Duplicates will be detected where the metric name, labels and label values are identical.
|
||||
func duplicateMetricEntry(metricFamilies []*dto.MetricFamily) bool {
|
||||
uniqueMetrics := make(map[string]map[string]string)
|
||||
for _, metricFamily := range metricFamilies {
|
||||
metric_name := *metricFamily.Name
|
||||
for _, metric := range metricFamily.Metric {
|
||||
metric_labels := metric.GetLabel()
|
||||
labels := make(map[string]string)
|
||||
for _, label := range metric_labels {
|
||||
labels[label.GetName()] = label.GetValue()
|
||||
}
|
||||
// Check if key is present before appending
|
||||
_, mapContainsKey := uniqueMetrics[metric_name]
|
||||
|
||||
// Duplicate metric found with identical labels & label values
|
||||
if mapContainsKey == true && reflect.DeepEqual(uniqueMetrics[metric_name], labels) {
|
||||
return true
|
||||
}
|
||||
uniqueMetrics[metric_name] = labels
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func convertMetricFamily(metricFamily *dto.MetricFamily, ch chan<- prometheus.Metric) {
|
||||
var valType prometheus.ValueType
|
||||
var val float64
|
||||
|
||||
allLabelNames := map[string]struct{}{}
|
||||
for _, metric := range metricFamily.Metric {
|
||||
labels := metric.GetLabel()
|
||||
for _, label := range labels {
|
||||
if _, ok := allLabelNames[label.GetName()]; !ok {
|
||||
allLabelNames[label.GetName()] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, metric := range metricFamily.Metric {
|
||||
if metric.TimestampMs != nil {
|
||||
log.Warnf("Ignoring unsupported custom timestamp on textfile collector metric %v", metric)
|
||||
}
|
||||
|
||||
labels := metric.GetLabel()
|
||||
var names []string
|
||||
var values []string
|
||||
for _, label := range labels {
|
||||
names = append(names, label.GetName())
|
||||
values = append(values, label.GetValue())
|
||||
}
|
||||
|
||||
for k := range allLabelNames {
|
||||
present := false
|
||||
for _, name := range names {
|
||||
if k == name {
|
||||
present = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if present == false {
|
||||
names = append(names, k)
|
||||
values = append(values, "")
|
||||
}
|
||||
}
|
||||
|
||||
metricType := metricFamily.GetType()
|
||||
switch metricType {
|
||||
case dto.MetricType_COUNTER:
|
||||
valType = prometheus.CounterValue
|
||||
val = metric.Counter.GetValue()
|
||||
|
||||
case dto.MetricType_GAUGE:
|
||||
valType = prometheus.GaugeValue
|
||||
val = metric.Gauge.GetValue()
|
||||
|
||||
case dto.MetricType_UNTYPED:
|
||||
valType = prometheus.UntypedValue
|
||||
val = metric.Untyped.GetValue()
|
||||
|
||||
case dto.MetricType_SUMMARY:
|
||||
quantiles := map[float64]float64{}
|
||||
for _, q := range metric.Summary.Quantile {
|
||||
quantiles[q.GetQuantile()] = q.GetValue()
|
||||
}
|
||||
ch <- prometheus.MustNewConstSummary(
|
||||
prometheus.NewDesc(
|
||||
*metricFamily.Name,
|
||||
metricFamily.GetHelp(),
|
||||
names, nil,
|
||||
),
|
||||
metric.Summary.GetSampleCount(),
|
||||
metric.Summary.GetSampleSum(),
|
||||
quantiles, values...,
|
||||
)
|
||||
case dto.MetricType_HISTOGRAM:
|
||||
buckets := map[float64]uint64{}
|
||||
for _, b := range metric.Histogram.Bucket {
|
||||
buckets[b.GetUpperBound()] = b.GetCumulativeCount()
|
||||
}
|
||||
ch <- prometheus.MustNewConstHistogram(
|
||||
prometheus.NewDesc(
|
||||
*metricFamily.Name,
|
||||
metricFamily.GetHelp(),
|
||||
names, nil,
|
||||
),
|
||||
metric.Histogram.GetSampleCount(),
|
||||
metric.Histogram.GetSampleSum(),
|
||||
buckets, values...,
|
||||
)
|
||||
default:
|
||||
log.Errorf("unknown metric type for file")
|
||||
continue
|
||||
}
|
||||
if metricType == dto.MetricType_GAUGE || metricType == dto.MetricType_COUNTER || metricType == dto.MetricType_UNTYPED {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
prometheus.NewDesc(
|
||||
*metricFamily.Name,
|
||||
metricFamily.GetHelp(),
|
||||
names, nil,
|
||||
),
|
||||
valType, val, values...,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *textFileCollector) exportMTimes(mtimes map[string]time.Time, ch chan<- prometheus.Metric) {
|
||||
// Export the mtimes of the successful files.
|
||||
if len(mtimes) > 0 {
|
||||
// Sorting is needed for predictable output comparison in tests.
|
||||
filenames := make([]string, 0, len(mtimes))
|
||||
for filename := range mtimes {
|
||||
filenames = append(filenames, filename)
|
||||
}
|
||||
sort.Strings(filenames)
|
||||
|
||||
for _, filename := range filenames {
|
||||
mtime := float64(mtimes[filename].UnixNano() / 1e9)
|
||||
if c.mtime != nil {
|
||||
mtime = *c.mtime
|
||||
}
|
||||
ch <- prometheus.MustNewConstMetric(mtimeDesc, prometheus.GaugeValue, mtime, filename)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type carriageReturnFilteringReader struct {
|
||||
r io.Reader
|
||||
}
|
||||
|
||||
// Read returns data from the underlying io.Reader, but with \r filtered out
|
||||
func (cr carriageReturnFilteringReader) Read(p []byte) (int, error) {
|
||||
buf := make([]byte, len(p))
|
||||
n, err := cr.r.Read(buf)
|
||||
|
||||
if err != nil && err != io.EOF {
|
||||
return n, err
|
||||
}
|
||||
|
||||
pi := 0
|
||||
for i := 0; i < n; i++ {
|
||||
if buf[i] != '\r' {
|
||||
p[pi] = buf[i]
|
||||
pi++
|
||||
}
|
||||
}
|
||||
|
||||
return pi, err
|
||||
}
|
||||
|
||||
// Update implements the Collector interface.
|
||||
func (c *textFileCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
error := 0.0
|
||||
mtimes := map[string]time.Time{}
|
||||
|
||||
// Iterate over files and accumulate their metrics.
|
||||
files, err := ioutil.ReadDir(c.path)
|
||||
if err != nil && c.path != "" {
|
||||
log.Errorf("Error reading textfile collector directory %q: %s", c.path, err)
|
||||
error = 1.0
|
||||
}
|
||||
|
||||
// Create empty metricFamily slice here and append parsedFamilies to it inside the loop.
|
||||
// Once loop is complete, raise error if any duplicates are present.
|
||||
// This will ensure that duplicate metrics are correctly detected between multiple .prom files.
|
||||
var metricFamilies = []*dto.MetricFamily{}
|
||||
fileLoop:
|
||||
for _, f := range files {
|
||||
if !strings.HasSuffix(f.Name(), ".prom") {
|
||||
continue
|
||||
}
|
||||
path := filepath.Join(c.path, f.Name())
|
||||
log.Debugf("Processing file %q", path)
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
log.Errorf("Error opening %q: %v", path, err)
|
||||
error = 1.0
|
||||
continue
|
||||
}
|
||||
var parser expfmt.TextParser
|
||||
r, encoding := utfbom.Skip(carriageReturnFilteringReader{r: file})
|
||||
if err = checkBOM(encoding); err != nil {
|
||||
log.Errorf("Invalid file encoding detected in %s: %s - file must be UTF8", path, err.Error())
|
||||
error = 1.0
|
||||
continue
|
||||
}
|
||||
parsedFamilies, err := parser.TextToMetricFamilies(r)
|
||||
closeErr := file.Close()
|
||||
if closeErr != nil {
|
||||
log.Warnf("Error closing file: %v", err)
|
||||
}
|
||||
if err != nil {
|
||||
log.Errorf("Error parsing %q: %v", path, err)
|
||||
error = 1.0
|
||||
continue
|
||||
}
|
||||
|
||||
// Use temporary array to check for duplicates
|
||||
var families_array []*dto.MetricFamily
|
||||
|
||||
for _, mf := range parsedFamilies {
|
||||
families_array = append(families_array, mf)
|
||||
for _, m := range mf.Metric {
|
||||
if m.TimestampMs != nil {
|
||||
log.Errorf("Textfile %q contains unsupported client-side timestamps, skipping entire file", path)
|
||||
error = 1.0
|
||||
continue fileLoop
|
||||
}
|
||||
}
|
||||
if mf.Help == nil {
|
||||
help := fmt.Sprintf("Metric read from %s", path)
|
||||
mf.Help = &help
|
||||
}
|
||||
}
|
||||
|
||||
// If duplicate metrics are detected in a *single* file, skip processing of file metrics
|
||||
if duplicateMetricEntry(families_array) {
|
||||
log.Errorf("Duplicate metrics detected in file %s. Skipping file processing.", f.Name())
|
||||
error = 1.0
|
||||
continue
|
||||
}
|
||||
|
||||
// Only set this once it has been parsed and validated, so that
|
||||
// a failure does not appear fresh.
|
||||
mtimes[f.Name()] = f.ModTime()
|
||||
|
||||
for _, metricFamily := range parsedFamilies {
|
||||
metricFamilies = append(metricFamilies, metricFamily)
|
||||
}
|
||||
}
|
||||
|
||||
// If duplicates are detected across *multiple* files, return error.
|
||||
if duplicateMetricEntry(metricFamilies) {
|
||||
log.Errorf("Duplicate metrics detected across multiple files")
|
||||
error = 1.0
|
||||
} else {
|
||||
for _, mf := range metricFamilies {
|
||||
convertMetricFamily(mf, ch)
|
||||
}
|
||||
}
|
||||
|
||||
c.exportMTimes(mtimes, ch)
|
||||
|
||||
// Export if there were errors.
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, "textfile", "scrape_error"),
|
||||
"1 if there was an error opening or reading a file, 0 otherwise",
|
||||
nil, nil,
|
||||
),
|
||||
prometheus.GaugeValue, error,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkBOM(encoding utfbom.Encoding) error {
|
||||
if encoding == utfbom.Unknown || encoding == utfbom.UTF8 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf(encoding.String())
|
||||
}
|
||||
|
||||
func getDefaultPath() string {
|
||||
execPath, _ := os.Executable()
|
||||
return filepath.Join(filepath.Dir(execPath), "textfile_inputs")
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user