mirror of
https://github.com/prometheus-community/windows_exporter.git
synced 2026-02-09 06:26:39 +00:00
Compare commits
210 Commits
v0.0.1
...
v0.7.999-p
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
daa6f3d111 | ||
|
|
85fdfb44b8 | ||
|
|
33879449a2 | ||
|
|
462a136673 | ||
|
|
d5e39892cf | ||
|
|
ec0d863c29 | ||
|
|
afc3655a41 | ||
|
|
e25e96a62e | ||
|
|
23d92cfcae | ||
|
|
1258703f23 | ||
|
|
8841091f9c | ||
|
|
517cd3b04b | ||
|
|
9daa8c8775 | ||
|
|
e04d3f414d | ||
|
|
4c69ed1610 | ||
|
|
a171401f57 | ||
|
|
e24e0dc9f5 | ||
|
|
0eab86c731 | ||
|
|
13c68634ce | ||
|
|
73ad1ba960 | ||
|
|
0121fd6471 | ||
|
|
93904954f4 | ||
|
|
f2462b26c8 | ||
|
|
7e05621b26 | ||
|
|
76ddad34b8 | ||
|
|
2053dea3ac | ||
|
|
35b81dcdd0 | ||
|
|
39b0000514 | ||
|
|
76ec763c42 | ||
|
|
7ccc47cc51 | ||
|
|
ad29ac0792 | ||
|
|
d58ce114d9 | ||
|
|
5f9dfcc378 | ||
|
|
f4e5bc3d29 | ||
|
|
f4362c5987 | ||
|
|
f691b48304 | ||
|
|
d12d31a17f | ||
|
|
48d23cfb12 | ||
|
|
17039b8206 | ||
|
|
2993552e19 | ||
|
|
5d4cafc0a1 | ||
|
|
a70c57ffd1 | ||
|
|
b2cb04834a | ||
|
|
f27fdbbbf5 | ||
|
|
7dda8eba03 | ||
|
|
080f80eb26 | ||
|
|
2766f0e3af | ||
|
|
939f4832ee | ||
|
|
9e1d4bbaed | ||
|
|
8ef341a51c | ||
|
|
1fde8bae5b | ||
|
|
48220d825e | ||
|
|
9ed68ae86c | ||
|
|
700bbb37c5 | ||
|
|
cb9da1ae22 | ||
|
|
7de316af9f | ||
|
|
263ab8c444 | ||
|
|
57449c4768 | ||
|
|
fe7e5cb4d8 | ||
|
|
c156f2bcbe | ||
|
|
832771b4a2 | ||
|
|
16fecfbc67 | ||
|
|
bad1e7f7b0 | ||
|
|
c868c00e89 | ||
|
|
5035e97369 | ||
|
|
144715e3d2 | ||
|
|
a20cf1274a | ||
|
|
626a25cd00 | ||
|
|
96dd456bb1 | ||
|
|
af1b8bf4d0 | ||
|
|
d83615a818 | ||
|
|
fe4c61a70e | ||
|
|
143705bbf6 | ||
|
|
e8ffb736d0 | ||
|
|
21e0f926a3 | ||
|
|
8ea862a3da | ||
|
|
bb67658853 | ||
|
|
aecd90dcf1 | ||
|
|
cd365c6a3b | ||
|
|
667d06116d | ||
|
|
f3072bb4f3 | ||
|
|
17072bf257 | ||
|
|
d3d8537201 | ||
|
|
2951a9ef80 | ||
|
|
3141fc3ed3 | ||
|
|
a0333ee256 | ||
|
|
c9fc76de4c | ||
|
|
3752a547d5 | ||
|
|
0ab6c191be | ||
|
|
467e83722a | ||
|
|
7fe8ca8554 | ||
|
|
4b3d1d60d9 | ||
|
|
1358123482 | ||
|
|
ec79488478 | ||
|
|
c3b227a4f2 | ||
|
|
c241513d56 | ||
|
|
5a538d7682 | ||
|
|
a0ec1e2da6 | ||
|
|
353de09798 | ||
|
|
de4838454a | ||
|
|
86d2b8bdc3 | ||
|
|
041ff0351d | ||
|
|
076af99418 | ||
|
|
76bb06b32f | ||
|
|
be5ac7b440 | ||
|
|
63e51a554b | ||
|
|
cf792394f3 | ||
|
|
5db7c0a936 | ||
|
|
2461407277 | ||
|
|
33c5e99e0f | ||
|
|
5ecdfe9498 | ||
|
|
caa46799f8 | ||
|
|
aee1e4b1fd | ||
|
|
69c83b6a39 | ||
|
|
cede267565 | ||
|
|
191debeed6 | ||
|
|
df0db7a54f | ||
|
|
bda7dd18cf | ||
|
|
617d795383 | ||
|
|
b9b8cfd1ca | ||
|
|
6b98771187 | ||
|
|
afa17b2a1b | ||
|
|
3b88460eb5 | ||
|
|
a52df7696a | ||
|
|
105a1c866b | ||
|
|
69c1d0faad | ||
|
|
809fe9becf | ||
|
|
7b6974e595 | ||
|
|
caf8742dcd | ||
|
|
94caf8ee61 | ||
|
|
2f7a372429 | ||
|
|
e547c13716 | ||
|
|
44c39405c7 | ||
|
|
96faedf481 | ||
|
|
3cfc11c6d2 | ||
|
|
80f1cf0546 | ||
|
|
691b672a1e | ||
|
|
7206f020cb | ||
|
|
2e0842573d | ||
|
|
7537c9896e | ||
|
|
88271ddf14 | ||
|
|
b0f1e1843b | ||
|
|
00f57c183d | ||
|
|
dc76c4227d | ||
|
|
f4195aa435 | ||
|
|
1ebee26c30 | ||
|
|
1db60e22b9 | ||
|
|
e8cfeef26c | ||
|
|
0c4c15c4ce | ||
|
|
175c54acf1 | ||
|
|
22a9c96ffd | ||
|
|
95b04ec0a1 | ||
|
|
2c84c5ad8a | ||
|
|
d163a30e15 | ||
|
|
ec6b786608 | ||
|
|
781ab7ca20 | ||
|
|
6bb522b6c3 | ||
|
|
9d515255a6 | ||
|
|
012e143601 | ||
|
|
880ad4d07c | ||
|
|
b6d5367093 | ||
|
|
4895e707ef | ||
|
|
0616dd6690 | ||
|
|
53048a42fd | ||
|
|
a41b9de37c | ||
|
|
745f0a6f61 | ||
|
|
8ca60af43a | ||
|
|
a66f0b5475 | ||
|
|
25b8dba6df | ||
|
|
f5365c96f6 | ||
|
|
a05febe069 | ||
|
|
47dc455dc2 | ||
|
|
9ea0c95ca5 | ||
|
|
91374a75f2 | ||
|
|
b4ca3412dd | ||
|
|
7fdbdad140 | ||
|
|
e65b785995 | ||
|
|
8482814343 | ||
|
|
8df4ea3f51 | ||
|
|
f60211aba3 | ||
|
|
2808da68d8 | ||
|
|
a3f44f60c5 | ||
|
|
5925816a9a | ||
|
|
0beff465c0 | ||
|
|
b40d60b8a4 | ||
|
|
a1910c3963 | ||
|
|
4c39660624 | ||
|
|
2fce1541d8 | ||
|
|
66b6145835 | ||
|
|
e0b6bf5b4f | ||
|
|
56eef24660 | ||
|
|
fab166f70a | ||
|
|
5dd08db8c4 | ||
|
|
a973ce9bca | ||
|
|
8974d840d0 | ||
|
|
c5d7bb8375 | ||
|
|
2fa997dc7e | ||
|
|
5aba27bfe3 | ||
|
|
ef04d2c51c | ||
|
|
2ff652ed86 | ||
|
|
6a5ae712d6 | ||
|
|
2544995940 | ||
|
|
88863c1107 | ||
|
|
141b28a656 | ||
|
|
497ee6563f | ||
|
|
c0b133d157 | ||
|
|
92b762f6a1 | ||
|
|
c98a0c16b2 | ||
|
|
15036bbed3 | ||
|
|
52592bb097 |
6
.gitignore
vendored
6
.gitignore
vendored
@@ -1 +1,5 @@
|
||||
/wmi_exporter.exe
|
||||
*.exe
|
||||
VERSION
|
||||
*.swp
|
||||
*.un~
|
||||
output/
|
||||
|
||||
18
.promu.yml
Normal file
18
.promu.yml
Normal file
@@ -0,0 +1,18 @@
|
||||
repository:
|
||||
path: github.com/martinlindhe/wmi_exporter
|
||||
build:
|
||||
binaries:
|
||||
- name: wmi_exporter
|
||||
ldflags: |
|
||||
-X {{repoPath}}/vendor/github.com/prometheus/common/version.Version={{.Version}}
|
||||
-X {{repoPath}}/vendor/github.com/prometheus/common/version.Revision={{.Revision}}
|
||||
-X {{repoPath}}/vendor/github.com/prometheus/common/version.Branch={{.Branch}}
|
||||
-X {{repoPath}}/vendor/github.com/prometheus/common/version.BuildUser={{user}}@{{host}}
|
||||
-X {{repoPath}}/vendor/github.com/prometheus/common/version.BuildDate={{date "20060102-15:04:05"}}
|
||||
tarball:
|
||||
files:
|
||||
- LICENSE
|
||||
crossbuild:
|
||||
platforms:
|
||||
- windows/amd64
|
||||
- windows/386
|
||||
4
AUTHORS.md
Normal file
4
AUTHORS.md
Normal file
@@ -0,0 +1,4 @@
|
||||
Contributors in alphabetical order
|
||||
|
||||
* [Martin Lindhe](https://github.com/martinlindhe)
|
||||
* [Calle Pettersson](https://github.com/carlpett)
|
||||
175
Gopkg.lock
generated
Normal file
175
Gopkg.lock
generated
Normal file
@@ -0,0 +1,175 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
digest = "1:3ccf8ba7afe02fd470c4f07d6eea4d0e6875da3d129f95b925f2003ce5dd2024"
|
||||
name = "github.com/StackExchange/wmi"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "5d049714c4a64225c3c79a7cf7d02f7fb5b96338"
|
||||
version = "1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:f3793f8a708522400cef1dba23385e901aede5519f68971fd69938ef330b07a1"
|
||||
name = "github.com/alecthomas/template"
|
||||
packages = [
|
||||
".",
|
||||
"parse",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "a0175ee3bccc567396460bf5acd36800cb10c49c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:fdd419e104ec26bb5bd63cc62637c640453ed2929a7453f3afadbd9a0223da66"
|
||||
name = "github.com/alecthomas/units"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "2efee857e7cfd4f3d0138cc3cbb1b4966962b93a"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:cb0535f5823b47df7dcb9768ebb6c000b79ad115472910c70efe93c9ed9b2315"
|
||||
name = "github.com/beorn7/perks"
|
||||
packages = ["quantile"]
|
||||
pruneopts = "NUT"
|
||||
revision = "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:f9adc21a937e5da643ea14a3488cb7506788876737a5e205394e508627a6eec8"
|
||||
name = "github.com/dimchansky/utfbom"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "d2133a1ce379ef6fa992b0514a77146c60db9d1c"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:cb4e216bd9f58866f42dc65893455b24f879b026fdaa1ecc3aafff625fdb5a66"
|
||||
name = "github.com/go-ole/go-ole"
|
||||
packages = [
|
||||
".",
|
||||
"oleutil",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "a41e3c4b706f6ae8dfbff342b06e40fa4d2d0506"
|
||||
version = "v1.2.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:9f35c1344b56e5868d511d231f215edd0650aa572664f856444affdd256e43e4"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["proto"]
|
||||
pruneopts = "NUT"
|
||||
revision = "925541529c1fa6821df4e44ce2723319eb2be768"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6"
|
||||
name = "github.com/matttproud/golang_protobuf_extensions"
|
||||
packages = ["pbutil"]
|
||||
pruneopts = "NUT"
|
||||
revision = "3247c84500bff8d9fb6d579d800f20b3e091582c"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:03bca087b180bf24c4f9060775f137775550a0834e18f0bca0520a868679dbd7"
|
||||
name = "github.com/prometheus/client_golang"
|
||||
packages = [
|
||||
"prometheus",
|
||||
"prometheus/promhttp",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "c5b7fccd204277076155f10851dad72b76a49317"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:32d10bdfa8f09ecf13598324dba86ab891f11db3c538b6a34d1c3b5b99d7c36b"
|
||||
name = "github.com/prometheus/client_model"
|
||||
packages = ["go"]
|
||||
pruneopts = "NUT"
|
||||
revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:ce98e83b2b9486b6a9ce5e44fd4097c64e8f2f0eaa6c5041a8f12d3aaa5c17b3"
|
||||
name = "github.com/prometheus/common"
|
||||
packages = [
|
||||
"expfmt",
|
||||
"internal/bitbucket.org/ww/goautoneg",
|
||||
"log",
|
||||
"model",
|
||||
"version",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "e4aa40a9169a88835b849a6efb71e05dc04b88f0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:61a95e8d3e39e94207fba1b56d3c2182a356a1e41017aa647f523ae964b6bb0c"
|
||||
name = "github.com/prometheus/procfs"
|
||||
packages = [
|
||||
".",
|
||||
"internal/util",
|
||||
"nfs",
|
||||
"xfs",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "54d17b57dd7d4a3aa092476596b3f8a933bde349"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6989062eb7ccf25cf38bf4fe3dba097ee209f896cda42cefdca3927047bef7b6"
|
||||
name = "github.com/sirupsen/logrus"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc"
|
||||
version = "v1.0.5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:3f3a05ae0b95893d90b9b3b5afdb79a9b3d96e4e36e099d841ae602e4aca0da8"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["ssh/terminal"]
|
||||
pruneopts = "NUT"
|
||||
revision = "182114d582623c1caa54f73de9c7224e23a48487"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:ea69008276e11262595a1f9a279ffd51d93e21c32c13b0f81856e962c6f607dd"
|
||||
name = "golang.org/x/sys"
|
||||
packages = [
|
||||
"unix",
|
||||
"windows",
|
||||
"windows/registry",
|
||||
"windows/svc",
|
||||
"windows/svc/eventlog",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "8c0ece68c28377f4c326d85b94f8df0dace46f80"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:22b2dee6f30bc8601f087449a2a819df8388e54e9547349c658f14d8f8c590f2"
|
||||
name = "gopkg.in/alecthomas/kingpin.v2"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "947dcec5ba9c011838740e680966fd7087a71d0d"
|
||||
version = "v2.2.6"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
input-imports = [
|
||||
"github.com/StackExchange/wmi",
|
||||
"github.com/dimchansky/utfbom",
|
||||
"github.com/prometheus/client_golang/prometheus",
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp",
|
||||
"github.com/prometheus/client_model/go",
|
||||
"github.com/prometheus/common/expfmt",
|
||||
"github.com/prometheus/common/log",
|
||||
"github.com/prometheus/common/version",
|
||||
"golang.org/x/sys/windows/registry",
|
||||
"golang.org/x/sys/windows/svc",
|
||||
"gopkg.in/alecthomas/kingpin.v2",
|
||||
]
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
4
Gopkg.toml
Normal file
4
Gopkg.toml
Normal file
@@ -0,0 +1,4 @@
|
||||
[prune]
|
||||
non-go = true
|
||||
go-tests = true
|
||||
unused-packages = true
|
||||
19
Makefile
Normal file
19
Makefile
Normal file
@@ -0,0 +1,19 @@
|
||||
export GOOS=windows
|
||||
|
||||
build:
|
||||
promu build -v
|
||||
|
||||
test:
|
||||
go test -v ./...
|
||||
|
||||
lint:
|
||||
gometalinter --vendor --config gometalinter.config ./...
|
||||
|
||||
fmt:
|
||||
gofmt -l -w -s .
|
||||
|
||||
crossbuild:
|
||||
# The prometheus/golang-builder image for promu crossbuild doesn't exist
|
||||
# on Windows, so for now, we'll just build twice
|
||||
GOARCH=amd64 promu build --prefix=output/amd64
|
||||
GOARCH=386 promu build --prefix=output/386
|
||||
71
README.md
71
README.md
@@ -4,52 +4,97 @@
|
||||
|
||||
Prometheus exporter for Windows machines, using the WMI (Windows Management Instrumentation).
|
||||
|
||||
**EXPERIMENTAL, use at your own risk!**
|
||||
|
||||
|
||||
## Collectors
|
||||
|
||||
Name | Description
|
||||
---------|-------------
|
||||
os | [Win32_OperatingSystem](https://msdn.microsoft.com/en-us/library/aa394239) metrics (memory, processes, users)
|
||||
logical_disk | [Win32_PerfRawData_PerfDisk_LogicalDisk](https://msdn.microsoft.com/en-us/windows/hardware/aa394307(v=vs.71)) metrics (disk I/O)
|
||||
iis | [Win32_PerfRawData_W3SVC_WebService](https://msdn.microsoft.com/en-us/library/aa394345) IIS metrics
|
||||
Name | Description | Enabled by default
|
||||
---------|-------------|--------------------
|
||||
[ad](docs/collector.ad.md) | Active Directory Domain Services |
|
||||
[cpu](docs/collector.cpu.md) | CPU usage | ✓
|
||||
[cs](docs/collector.cs.md) | "Computer System" metrics (system properties, num cpus/total memory) | ✓
|
||||
[dns](docs/collector.dns.md) | DNS Server |
|
||||
[hyperv](docs/collector.hyperv.md) | Hyper-V hosts |
|
||||
[iis](docs/collector.iis.md) | IIS sites and applications |
|
||||
[logical_disk](docs/collector.logical_disk.md) | Logical disks, disk I/O | ✓
|
||||
[memory](docs/collector.memory.md) | Memory usage metrics |
|
||||
[msmq](docs/collector.msmq.md) | MSMQ queues |
|
||||
[mssql](docs/collector.mssql.md) | [SQL Server Performance Objects](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/use-sql-server-objects#SQLServerPOs) metrics |
|
||||
[netframework_clrexceptions](docs/collector.netframework_clrexceptions.md) | .NET Framework CLR Exceptions |
|
||||
[netframework_clrinterop](docs/collector.netframework_clrinterop.md) | .NET Framework Interop Metrics |
|
||||
[netframework_clrjit](docs/collector.netframework_clrjit.md) | .NET Framework JIT metrics |
|
||||
[netframework_clrloading](docs/collector.netframework_clrloading.md) | .NET Framework CLR Loading metrics |
|
||||
[netframework_clrlocksandthreads](docs/collector.netframework_clrlocksandthreads.md) | .NET Framework locks and metrics threads |
|
||||
[netframework_clrmemory](docs/collector.netframework_clrmemory.md) | .NET Framework Memory metrics |
|
||||
[netframework_clrremoting](docs/collector.netframework_clrremoting.md) | .NET Framework Remoting metrics |
|
||||
[netframework_clrsecurity](docs/collector.netframework_clrsecurity.md) | .NET Framework Security Check metrics |
|
||||
[net](docs/collector.net.md) | Network interface I/O | ✓
|
||||
[os](docs/collector.os.md) | OS metrics (memory, processes, users) | ✓
|
||||
[process](docs/collector.process.md) | Per-process metrics |
|
||||
[service](docs/collector.service.md) | Service state metrics | ✓
|
||||
[system](docs/collector.system.md) | System calls | ✓
|
||||
[tcp](docs/collector.tcp.md) | TCP connections |
|
||||
[textfile](docs/collector.textfile.md) | Read prometheus metrics from a text file | ✓
|
||||
[vmware](docs/collector.vmware.md) | Performance counters installed by the Vmware Guest agent |
|
||||
|
||||
The HELP texts shows the WMI data source, please see MSDN documentation for details.
|
||||
See the linked documentation on each collector for more information on reported metrics, configuration settings and usage examples.
|
||||
|
||||
## Installation
|
||||
The latest release can be downloaded from the [releases page](https://github.com/martinlindhe/wmi_exporter/releases).
|
||||
|
||||
Each release provides a .msi installer. The installer will setup the WMI Exporter as a Windows service, as well as create an exception in the Windows Firewall.
|
||||
|
||||
If the installer is run without any parameters, the exporter will run with default settings for enabled collectors, ports, etc. The following parameters are available:
|
||||
|
||||
Name | Description
|
||||
-----|------------
|
||||
`ENABLED_COLLECTORS` | As the `-collectors.enabled` flag, provide a comma-separated list of enabled collectors
|
||||
`ENABLED_COLLECTORS` | As the `--collectors.enabled` flag, provide a comma-separated list of enabled collectors
|
||||
`LISTEN_ADDR` | The IP address to bind to. Defaults to 0.0.0.0
|
||||
`LISTEN_PORT` | The port to bind to. Defaults to 9182.
|
||||
`METRICS_PATH` | The path at which to serve metrics. Defaults to `/metrics`
|
||||
`TEXTFILE_DIR` | As the `--collector.textfile.directory` flag, provide a directory to read text files with metrics from
|
||||
`EXTRA_FLAGS` | Allows passing full CLI flags. Defaults to an empty string.
|
||||
|
||||
Parameters are sent to the installer via `msiexec`. Example invocation:
|
||||
Parameters are sent to the installer via `msiexec`. Example invocations:
|
||||
|
||||
```powershell
|
||||
msiexec /i <path-to-msi-file> ENABLED_COLLECTORS=os,iis LISTEN_PORT=5000
|
||||
```
|
||||
|
||||
Example service collector with a custom query.
|
||||
```powershell
|
||||
msiexec /i <path-to-msi-file> ENABLED_COLLECTORS=os,service --% EXTRA_FLAGS="--collector.service.services-where ""Name LIKE 'sql%'"""
|
||||
```
|
||||
|
||||
## Roadmap
|
||||
|
||||
See [Wiki](https://github.com/martinlindhe/wmi_exporter/wiki/TODO)
|
||||
See [open issues](https://github.com/martinlindhe/wmi_exporter/issues)
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
go get -u github.com/kardianos/govendor
|
||||
go get -u github.com/golang/dep
|
||||
go get -u github.com/prometheus/promu
|
||||
go get -u github.com/martinlindhe/wmi_exporter
|
||||
cd $env:GOPATH/src/github.com/martinlindhe/wmi_exporter
|
||||
govendor build +local
|
||||
promu build -v .
|
||||
.\wmi_exporter.exe
|
||||
|
||||
The prometheus metrics will be exposed on [localhost:9182](http://localhost:9182)
|
||||
|
||||
## Examples
|
||||
|
||||
### Enable only service collector and specify a custom query
|
||||
|
||||
.\wmi_exporter.exe --collectors.enabled "service" --collector.service.services-where "Name='wmi_exporter'"
|
||||
|
||||
### Enable only process collector and specify a custom query
|
||||
|
||||
.\wmi_exporter.exe --collectors.enabled "process" --collector.process.processes-where "Name LIKE 'firefox%'"
|
||||
|
||||
When there are multiple processes with the same name, WMI represents those after the first instance as `process-name#index`. So to get them all, rather than just the first one, the query needs to be a wildcard search using a `%` character.
|
||||
|
||||
Please note that in Windows batch scripts (and when using the `cmd` command prompt), the `%` character is reserved, so it has to be escaped with another `%`. For example, the wildcard syntax for searching for all firefox processes is `firefox%%`.
|
||||
|
||||
|
||||
## License
|
||||
|
||||
|
||||
112
appveyor.yml
112
appveyor.yml
@@ -1,46 +1,66 @@
|
||||
version: "{build}"
|
||||
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
GOVERSION: 1.7
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\martinlindhe\wmi_exporter
|
||||
|
||||
install:
|
||||
- go version
|
||||
- set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
|
||||
- rmdir c:\go /s /q
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go%GOVERSION%.windows-amd64.zip
|
||||
- 7z x go%GOVERSION%.windows-amd64.zip -y -oC:\ > NUL
|
||||
- go version
|
||||
- go env
|
||||
- go get -u github.com/kardianos/govendor
|
||||
|
||||
build_script:
|
||||
- govendor build -v +p
|
||||
- govendor test -v +local
|
||||
- ps: |
|
||||
if($env:APPVEYOR_REPO_TAG -eq "True") {
|
||||
# The MSI version is not semver compliant, so just take the numerical parts
|
||||
$Version = $env:APPVEYOR_REPO_TAG_NAME -replace '^v?([0-9\.]+).*$','$1'
|
||||
Write-Verbose "Setting msi version to $Version"
|
||||
.\installer\build.ps1 -PathToExecutable .\wmi_exporter.exe -Version $Version -Arch "amd64"
|
||||
Push-AppveyorArtifact installer\Output\wmi_exporter-$Version-amd64.msi -DeploymentName Installer
|
||||
}
|
||||
|
||||
artifacts:
|
||||
- name: Executable
|
||||
path: wmi_exporter.exe
|
||||
|
||||
deploy:
|
||||
- provider: GitHub
|
||||
description: WMI Exporter version $(appveyor_build_version)
|
||||
artifact: Executable,Installer
|
||||
auth_token:
|
||||
secure: 'CrXWeTf7qONUOEki5olFfGEUPMLDeHj61koDXV3OVEaLgtACmnVHsKUub9POflda'
|
||||
draft: false
|
||||
prerelease: false
|
||||
on:
|
||||
appveyor_repo_tag: true
|
||||
version: "{build}"
|
||||
|
||||
os: Visual Studio 2017
|
||||
build: off
|
||||
stack: go 1.10
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\martinlindhe\wmi_exporter
|
||||
|
||||
install:
|
||||
- set PATH=%GOPATH%\bin;%PATH%
|
||||
- set PATH=%PATH%;C:\mingw-w64\x86_64-7.2.0-posix-seh-rt_v5-rev1\mingw64\bin
|
||||
- go get -u github.com/prometheus/promu
|
||||
- go get -u github.com/alecthomas/gometalinter && gometalinter --install
|
||||
- choco install gitversion.portable make -y
|
||||
|
||||
test_script:
|
||||
- make test
|
||||
|
||||
after_test:
|
||||
- make lint
|
||||
|
||||
build_script:
|
||||
- ps: |
|
||||
$ErrorActionPreference = "Stop"
|
||||
gitversion /output json /showvariable FullSemVer | Set-Content VERSION -PassThru
|
||||
$Version = Get-Content VERSION
|
||||
make crossbuild
|
||||
# GH requires all files to have different names, so add version/arch to differentiate
|
||||
foreach($Arch in "amd64","386") {
|
||||
Rename-Item output\$Arch\wmi_exporter.exe -NewName wmi_exporter-$Version-$Arch.exe
|
||||
}
|
||||
|
||||
after_build:
|
||||
- ps: |
|
||||
# Build installer packages only on tagged releases
|
||||
if($env:APPVEYOR_REPO_TAG -ne "True") {
|
||||
return
|
||||
}
|
||||
$ErrorActionPreference = "Stop"
|
||||
$BuildVersion = Get-Content VERSION
|
||||
# The MSI version is not semver compliant, so just take the numerical parts
|
||||
$MSIVersion = $env:APPVEYOR_REPO_TAG_NAME -replace '^v?([0-9\.]+).*$','$1'
|
||||
foreach($Arch in "amd64","386") {
|
||||
Write-Verbose "Building wmi_exporter $MSIVersion msi for $Arch"
|
||||
.\installer\build.ps1 -PathToExecutable .\output\$Arch\wmi_exporter-$BuildVersion-$Arch.exe -Version $MSIVersion -Arch "$Arch"
|
||||
Move-Item installer\Output\wmi_exporter-$MSIVersion-$Arch.msi output\$Arch\
|
||||
}
|
||||
- promu checksum output\
|
||||
|
||||
artifacts:
|
||||
- name: Artifacts
|
||||
path: output\**\*
|
||||
|
||||
deploy:
|
||||
- provider: GitHub
|
||||
description: WMI Exporter version $(appveyor_build_version)
|
||||
artifact: Artifacts
|
||||
auth_token:
|
||||
secure: 'CrXWeTf7qONUOEki5olFfGEUPMLDeHj61koDXV3OVEaLgtACmnVHsKUub9POflda'
|
||||
draft: false
|
||||
prerelease: false
|
||||
on:
|
||||
appveyor_repo_tag: true
|
||||
|
||||
1314
collector/ad.go
Normal file
1314
collector/ad.go
Normal file
File diff suppressed because it is too large
Load Diff
39
collector/collector.go
Normal file
39
collector/collector.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/leoluk/perflib_exporter/perflib"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// ...
|
||||
const (
|
||||
// TODO: Make package-local
|
||||
Namespace = "wmi"
|
||||
|
||||
// Conversion factors
|
||||
ticksToSecondsScaleFactor = 1 / 1e7
|
||||
windowsEpoch = 116444736000000000
|
||||
)
|
||||
|
||||
// Factories ...
|
||||
var Factories = make(map[string]func() (Collector, error))
|
||||
|
||||
// Collector is the interface a collector has to implement.
|
||||
type Collector interface {
|
||||
// Get new metrics and expose them via prometheus registry.
|
||||
Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (err error)
|
||||
}
|
||||
|
||||
type ScrapeContext struct {
|
||||
perfObjects map[string]*perflib.PerfObject
|
||||
}
|
||||
|
||||
// PrepareScrapeContext creates a ScrapeContext to be used during a single scrape
|
||||
func PrepareScrapeContext() (*ScrapeContext, error) {
|
||||
objs, err := getPerflibSnapshot()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ScrapeContext{objs}, nil
|
||||
}
|
||||
282
collector/container.go
Normal file
282
collector/container.go
Normal file
@@ -0,0 +1,282 @@
|
||||
// +build windows,cgo
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/Microsoft/hcsshim"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["container"] = NewContainerMetricsCollector
|
||||
}
|
||||
|
||||
// A ContainerMetricsCollector is a Prometheus collector for containers metrics
|
||||
type ContainerMetricsCollector struct {
|
||||
// Presence
|
||||
ContainerAvailable *prometheus.Desc
|
||||
|
||||
// Number of containers
|
||||
ContainersCount *prometheus.Desc
|
||||
// memory
|
||||
UsageCommitBytes *prometheus.Desc
|
||||
UsageCommitPeakBytes *prometheus.Desc
|
||||
UsagePrivateWorkingSetBytes *prometheus.Desc
|
||||
|
||||
// CPU
|
||||
RuntimeTotal *prometheus.Desc
|
||||
RuntimeUser *prometheus.Desc
|
||||
RuntimeKernel *prometheus.Desc
|
||||
|
||||
// Network
|
||||
BytesReceived *prometheus.Desc
|
||||
BytesSent *prometheus.Desc
|
||||
PacketsReceived *prometheus.Desc
|
||||
PacketsSent *prometheus.Desc
|
||||
DroppedPacketsIncoming *prometheus.Desc
|
||||
DroppedPacketsOutgoing *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewContainerMetricsCollector constructs a new ContainerMetricsCollector
|
||||
func NewContainerMetricsCollector() (Collector, error) {
|
||||
const subsystem = "container"
|
||||
return &ContainerMetricsCollector{
|
||||
ContainerAvailable: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "available"),
|
||||
"Available",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
ContainersCount: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "count"),
|
||||
"Number of containers",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
UsageCommitBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "memory_usage_commit_bytes"),
|
||||
"Memory Usage Commit Bytes",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
UsageCommitPeakBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "memory_usage_commit_peak_bytes"),
|
||||
"Memory Usage Commit Peak Bytes",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
UsagePrivateWorkingSetBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "memory_usage_private_working_set_bytes"),
|
||||
"Memory Usage Private Working Set Bytes",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
RuntimeTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cpu_usage_seconds_total"),
|
||||
"Total Run time in Seconds",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
RuntimeUser: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cpu_usage_seconds_usermode"),
|
||||
"Run Time in User mode in Seconds",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
RuntimeKernel: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cpu_usage_seconds_kernelmode"),
|
||||
"Run time in Kernel mode in Seconds",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
BytesReceived: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "network_receive_bytes_total"),
|
||||
"Bytes Received on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
),
|
||||
BytesSent: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "network_transmit_bytes_total"),
|
||||
"Bytes Sent on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
),
|
||||
PacketsReceived: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "network_receive_packets_total"),
|
||||
"Packets Received on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
),
|
||||
PacketsSent: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "network_transmit_packets_total"),
|
||||
"Packets Sent on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
),
|
||||
DroppedPacketsIncoming: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "network_receive_packets_dropped_total"),
|
||||
"Dropped Incoming Packets on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
),
|
||||
DroppedPacketsOutgoing: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "network_transmit_packets_dropped_total"),
|
||||
"Dropped Outgoing Packets on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *ContainerMetricsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting ContainerMetricsCollector metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// containerClose closes the container resource
|
||||
func containerClose(c hcsshim.Container) {
|
||||
err := c.Close()
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ContainerMetricsCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
|
||||
// Types Container is passed to get the containers compute systems only
|
||||
containers, err := hcsshim.GetContainers(hcsshim.ComputeSystemQuery{Types: []string{"Container"}})
|
||||
if err != nil {
|
||||
log.Error("Err in Getting containers:", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
count := len(containers)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ContainersCount,
|
||||
prometheus.GaugeValue,
|
||||
float64(count),
|
||||
)
|
||||
if count == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
for _, containerDetails := range containers {
|
||||
containerId := containerDetails.ID
|
||||
|
||||
container, err := hcsshim.OpenContainer(containerId)
|
||||
if container != nil {
|
||||
defer containerClose(container)
|
||||
}
|
||||
if err != nil {
|
||||
log.Error("err in opening container: ", containerId, err)
|
||||
continue
|
||||
}
|
||||
|
||||
cstats, err := container.Statistics()
|
||||
if err != nil {
|
||||
log.Error("err in fetching container Statistics: ", containerId, err)
|
||||
continue
|
||||
}
|
||||
// HCS V1 is for docker runtime. Add the docker:// prefix on container_id
|
||||
containerId = "docker://" + containerId
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ContainerAvailable,
|
||||
prometheus.CounterValue,
|
||||
1,
|
||||
containerId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.UsageCommitBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(cstats.Memory.UsageCommitBytes),
|
||||
containerId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.UsageCommitPeakBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(cstats.Memory.UsageCommitPeakBytes),
|
||||
containerId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.UsagePrivateWorkingSetBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(cstats.Memory.UsagePrivateWorkingSetBytes),
|
||||
containerId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RuntimeTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(cstats.Processor.TotalRuntime100ns)*ticksToSecondsScaleFactor,
|
||||
containerId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RuntimeUser,
|
||||
prometheus.CounterValue,
|
||||
float64(cstats.Processor.RuntimeUser100ns)*ticksToSecondsScaleFactor,
|
||||
containerId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RuntimeKernel,
|
||||
prometheus.CounterValue,
|
||||
float64(cstats.Processor.RuntimeKernel100ns)*ticksToSecondsScaleFactor,
|
||||
containerId,
|
||||
)
|
||||
|
||||
if len(cstats.Network) == 0 {
|
||||
log.Info("No Network Stats for container: ", containerId)
|
||||
continue
|
||||
}
|
||||
|
||||
networkStats := cstats.Network
|
||||
|
||||
for _, networkInterface := range networkStats {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BytesReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(networkInterface.BytesReceived),
|
||||
containerId, networkInterface.EndpointId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BytesSent,
|
||||
prometheus.CounterValue,
|
||||
float64(networkInterface.BytesSent),
|
||||
containerId, networkInterface.EndpointId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(networkInterface.PacketsReceived),
|
||||
containerId, networkInterface.EndpointId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsSent,
|
||||
prometheus.CounterValue,
|
||||
float64(networkInterface.PacketsSent),
|
||||
containerId, networkInterface.EndpointId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DroppedPacketsIncoming,
|
||||
prometheus.CounterValue,
|
||||
float64(networkInterface.DroppedPacketsIncoming),
|
||||
containerId, networkInterface.EndpointId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DroppedPacketsOutgoing,
|
||||
prometheus.CounterValue,
|
||||
float64(networkInterface.DroppedPacketsOutgoing),
|
||||
containerId, networkInterface.EndpointId,
|
||||
)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
397
collector/cpu.go
Normal file
397
collector/cpu.go
Normal file
@@ -0,0 +1,397 @@
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/sys/windows/registry"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["cpu"] = newCPUCollector
|
||||
}
|
||||
|
||||
// A function to get Windows version from registry
|
||||
func getWindowsVersion() float64 {
|
||||
k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
|
||||
if err != nil {
|
||||
log.Warn("Couldn't open registry", err)
|
||||
return 0
|
||||
}
|
||||
defer func() {
|
||||
err = k.Close()
|
||||
if err != nil {
|
||||
log.Warnf("Failed to close registry key: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
currentv, _, err := k.GetStringValue("CurrentVersion")
|
||||
if err != nil {
|
||||
log.Warn("Couldn't open registry to determine current Windows version:", err)
|
||||
return 0
|
||||
}
|
||||
|
||||
currentv_flt, err := strconv.ParseFloat(currentv, 64)
|
||||
|
||||
log.Debugf("Detected Windows version %f\n", currentv_flt)
|
||||
|
||||
return currentv_flt
|
||||
}
|
||||
|
||||
type cpuCollectorBasic struct {
|
||||
CStateSecondsTotal *prometheus.Desc
|
||||
TimeTotal *prometheus.Desc
|
||||
InterruptsTotal *prometheus.Desc
|
||||
DPCsTotal *prometheus.Desc
|
||||
}
|
||||
type cpuCollectorFull struct {
|
||||
CStateSecondsTotal *prometheus.Desc
|
||||
TimeTotal *prometheus.Desc
|
||||
InterruptsTotal *prometheus.Desc
|
||||
DPCsTotal *prometheus.Desc
|
||||
ClockInterruptsTotal *prometheus.Desc
|
||||
IdleBreakEventsTotal *prometheus.Desc
|
||||
ParkingStatus *prometheus.Desc
|
||||
ProcessorFrequencyMHz *prometheus.Desc
|
||||
ProcessorMaxFrequencyMHz *prometheus.Desc
|
||||
ProcessorPerformance *prometheus.Desc
|
||||
}
|
||||
|
||||
// newCPUCollector constructs a new cpuCollector, appropriate for the running OS
|
||||
func newCPUCollector() (Collector, error) {
|
||||
const subsystem = "cpu"
|
||||
|
||||
version := getWindowsVersion()
|
||||
// Windows version by number https://docs.microsoft.com/en-us/windows/desktop/sysinfo/operating-system-version
|
||||
// For Windows 2008 or earlier Windows version is 6.0 or lower, where we only have the older "Processor" counters
|
||||
// For Windows 2008 R2 or later Windows version is 6.1 or higher, so we can use "ProcessorInformation" counters
|
||||
// Value 6.05 was selected just to split between Windows versions
|
||||
if version < 6.05 {
|
||||
return &cpuCollectorBasic{
|
||||
CStateSecondsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cstate_seconds_total"),
|
||||
"Time spent in low-power idle state",
|
||||
[]string{"core", "state"},
|
||||
nil,
|
||||
),
|
||||
TimeTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "time_total"),
|
||||
"Time that processor spent in different modes (idle, user, system, ...)",
|
||||
[]string{"core", "mode"},
|
||||
nil,
|
||||
),
|
||||
InterruptsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "interrupts_total"),
|
||||
"Total number of received and serviced hardware interrupts",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
DPCsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "dpcs_total"),
|
||||
"Total number of received and serviced deferred procedure calls (DPCs)",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
return &cpuCollectorFull{
|
||||
CStateSecondsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cstate_seconds_total"),
|
||||
"Time spent in low-power idle state",
|
||||
[]string{"core", "state"},
|
||||
nil,
|
||||
),
|
||||
TimeTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "time_total"),
|
||||
"Time that processor spent in different modes (idle, user, system, ...)",
|
||||
[]string{"core", "mode"},
|
||||
nil,
|
||||
),
|
||||
InterruptsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "interrupts_total"),
|
||||
"Total number of received and serviced hardware interrupts",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
DPCsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "dpcs_total"),
|
||||
"Total number of received and serviced deferred procedure calls (DPCs)",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
ClockInterruptsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "clock_interrupts_total"),
|
||||
"Total number of received and serviced clock tick interrupts",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
IdleBreakEventsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "idle_break_events_total"),
|
||||
"Total number of time processor was woken from idle",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
ParkingStatus: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "parking_status"),
|
||||
"Parking Status represents whether a processor is parked or not",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
ProcessorFrequencyMHz: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "core_frequency_mhz"),
|
||||
"Core frequency in megahertz",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
ProcessorPerformance: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "processor_performance"),
|
||||
"Processor Performance is the average performance of the processor while it is executing instructions, as a percentage of the nominal performance of the processor. On some processors, Processor Performance may exceed 100%",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
type perflibProcessor struct {
|
||||
Name string
|
||||
C1Transitions float64 `perflib:"C1 Transitions/sec"`
|
||||
C2Transitions float64 `perflib:"C2 Transitions/sec"`
|
||||
C3Transitions float64 `perflib:"C3 Transitions/sec"`
|
||||
DPCRate float64 `perflib:"DPC Rate"`
|
||||
DPCsQueued float64 `perflib:"DPCs Queued/sec"`
|
||||
Interrupts float64 `perflib:"Interrupts/sec"`
|
||||
PercentC2Time float64 `perflib:"% C1 Time"`
|
||||
PercentC3Time float64 `perflib:"% C2 Time"`
|
||||
PercentC1Time float64 `perflib:"% C3 Time"`
|
||||
PercentDPCTime float64 `perflib:"% DPC Time"`
|
||||
PercentIdleTime float64 `perflib:"% Idle Time"`
|
||||
PercentInterruptTime float64 `perflib:"% Interrupt Time"`
|
||||
PercentPrivilegedTime float64 `perflib:"% Privileged Time"`
|
||||
PercentProcessorTime float64 `perflib:"% Processor Time"`
|
||||
PercentUserTime float64 `perflib:"% User Time"`
|
||||
}
|
||||
|
||||
func (c *cpuCollectorBasic) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
data := make([]perflibProcessor, 0)
|
||||
err := unmarshalObject(ctx.perfObjects["Processor"], &data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, cpu := range data {
|
||||
if strings.Contains(strings.ToLower(cpu.Name), "_total") {
|
||||
continue
|
||||
}
|
||||
core := cpu.Name
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentC1Time,
|
||||
core, "c1",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentC2Time,
|
||||
core, "c2",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentC3Time,
|
||||
core, "c3",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentIdleTime,
|
||||
core, "idle",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentInterruptTime,
|
||||
core, "interrupt",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentDPCTime,
|
||||
core, "dpc",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentPrivilegedTime,
|
||||
core, "privileged",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentUserTime,
|
||||
core, "user",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.InterruptsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.Interrupts,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DPCsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.DPCsQueued,
|
||||
core,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type perflibProcessorInformation struct {
|
||||
Name string
|
||||
C1TimeSeconds float64 `perflib:"% C1 Time"`
|
||||
C2TimeSeconds float64 `perflib:"% C2 Time"`
|
||||
C3TimeSeconds float64 `perflib:"% C3 Time"`
|
||||
C1TransitionsTotal float64 `perflib:"C1 Transitions/sec"`
|
||||
C2TransitionsTotal float64 `perflib:"C2 Transitions/sec"`
|
||||
C3TransitionsTotal float64 `perflib:"C3 Transitions/sec"`
|
||||
ClockInterruptsTotal float64 `perflib:"Clock Interrupts/sec"`
|
||||
DPCsQueuedTotal float64 `perflib:"DPCs Queued/sec"`
|
||||
DPCTimeSeconds float64 `perflib:"% DPC Time"`
|
||||
IdleBreakEventsTotal float64 `perflib:"Idle Break Events/sec"`
|
||||
IdleTimeSeconds float64 `perflib:"% Idle Time"`
|
||||
InterruptsTotal float64 `perflib:"Interrupts/sec"`
|
||||
InterruptTimeSeconds float64 `perflib:"% Interrupt Time"`
|
||||
ParkingStatus float64 `perflib:"Parking Status"`
|
||||
PerformanceLimitPercent float64 `perflib:"% Performance Limit"`
|
||||
PriorityTimeSeconds float64 `perflib:"% Priority Time"`
|
||||
PrivilegedTimeSeconds float64 `perflib:"% Privileged Time"`
|
||||
PrivilegedUtilitySeconds float64 `perflib:"% Privileged Utility"`
|
||||
ProcessorFrequencyMHz float64 `perflib:"Processor Frequency"`
|
||||
ProcessorPerformance float64 `perflib:"% Processor Performance"`
|
||||
ProcessorTimeSeconds float64 `perflib:"% Processor Time"`
|
||||
ProcessorUtilityRate float64 `perflib:"% Processor Utility"`
|
||||
UserTimeSeconds float64 `perflib:"% User Time"`
|
||||
}
|
||||
|
||||
func (c *cpuCollectorFull) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
data := make([]perflibProcessorInformation, 0)
|
||||
err := unmarshalObject(ctx.perfObjects["Processor Information"], &data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, cpu := range data {
|
||||
if strings.Contains(strings.ToLower(cpu.Name), "_total") {
|
||||
continue
|
||||
}
|
||||
core := cpu.Name
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.C1TimeSeconds,
|
||||
core, "c1",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.C2TimeSeconds,
|
||||
core, "c2",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.C3TimeSeconds,
|
||||
core, "c3",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.IdleTimeSeconds,
|
||||
core, "idle",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.InterruptTimeSeconds,
|
||||
core, "interrupt",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.DPCTimeSeconds,
|
||||
core, "dpc",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.PrivilegedTimeSeconds,
|
||||
core, "privileged",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.UserTimeSeconds,
|
||||
core, "user",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.InterruptsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.InterruptsTotal,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DPCsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.DPCsQueuedTotal,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ClockInterruptsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.ClockInterruptsTotal,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.IdleBreakEventsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.IdleBreakEventsTotal,
|
||||
core,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ParkingStatus,
|
||||
prometheus.GaugeValue,
|
||||
cpu.ParkingStatus,
|
||||
core,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ProcessorFrequencyMHz,
|
||||
prometheus.GaugeValue,
|
||||
cpu.ProcessorFrequencyMHz,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ProcessorPerformance,
|
||||
prometheus.GaugeValue,
|
||||
cpu.ProcessorPerformance,
|
||||
core,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
83
collector/cs.go
Normal file
83
collector/cs.go
Normal file
@@ -0,0 +1,83 @@
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["cs"] = NewCSCollector
|
||||
}
|
||||
|
||||
// A CSCollector is a Prometheus collector for WMI metrics
|
||||
type CSCollector struct {
|
||||
PhysicalMemoryBytes *prometheus.Desc
|
||||
LogicalProcessors *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewCSCollector ...
|
||||
func NewCSCollector() (Collector, error) {
|
||||
const subsystem = "cs"
|
||||
|
||||
return &CSCollector{
|
||||
LogicalProcessors: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "logical_processors"),
|
||||
"ComputerSystem.NumberOfLogicalProcessors",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
PhysicalMemoryBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "physical_memory_bytes"),
|
||||
"ComputerSystem.TotalPhysicalMemory",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *CSCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting cs metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_ComputerSystem docs:
|
||||
// - https://msdn.microsoft.com/en-us/library/aa394102
|
||||
type Win32_ComputerSystem struct {
|
||||
NumberOfLogicalProcessors uint32
|
||||
TotalPhysicalMemory uint64
|
||||
}
|
||||
|
||||
func (c *CSCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_ComputerSystem
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(dst) == 0 {
|
||||
return nil, errors.New("WMI query returned empty result set")
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogicalProcessors,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].NumberOfLogicalProcessors),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PhysicalMemoryBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].TotalPhysicalMemory),
|
||||
)
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
498
collector/dns.go
Normal file
498
collector/dns.go
Normal file
@@ -0,0 +1,498 @@
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["dns"] = NewDNSCollector
|
||||
}
|
||||
|
||||
// A DNSCollector is a Prometheus collector for WMI Win32_PerfRawData_DNS_DNS metrics
|
||||
type DNSCollector struct {
|
||||
ZoneTransferRequestsReceived *prometheus.Desc
|
||||
ZoneTransferRequestsSent *prometheus.Desc
|
||||
ZoneTransferResponsesReceived *prometheus.Desc
|
||||
ZoneTransferSuccessReceived *prometheus.Desc
|
||||
ZoneTransferSuccessSent *prometheus.Desc
|
||||
ZoneTransferFailures *prometheus.Desc
|
||||
MemoryUsedBytes *prometheus.Desc
|
||||
DynamicUpdatesQueued *prometheus.Desc
|
||||
DynamicUpdatesReceived *prometheus.Desc
|
||||
DynamicUpdatesFailures *prometheus.Desc
|
||||
NotifyReceived *prometheus.Desc
|
||||
NotifySent *prometheus.Desc
|
||||
SecureUpdateFailures *prometheus.Desc
|
||||
SecureUpdateReceived *prometheus.Desc
|
||||
Queries *prometheus.Desc
|
||||
Responses *prometheus.Desc
|
||||
RecursiveQueries *prometheus.Desc
|
||||
RecursiveQueryFailures *prometheus.Desc
|
||||
RecursiveQuerySendTimeouts *prometheus.Desc
|
||||
WinsQueries *prometheus.Desc
|
||||
WinsResponses *prometheus.Desc
|
||||
UnmatchedResponsesReceived *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewDNSCollector ...
|
||||
func NewDNSCollector() (Collector, error) {
|
||||
const subsystem = "dns"
|
||||
return &DNSCollector{
|
||||
ZoneTransferRequestsReceived: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "zone_transfer_requests_received_total"),
|
||||
"Number of zone transfer requests (AXFR/IXFR) received by the master DNS server",
|
||||
[]string{"qtype"},
|
||||
nil,
|
||||
),
|
||||
ZoneTransferRequestsSent: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "zone_transfer_requests_sent_total"),
|
||||
"Number of zone transfer requests (AXFR/IXFR) sent by the secondary DNS server",
|
||||
[]string{"qtype"},
|
||||
nil,
|
||||
),
|
||||
ZoneTransferResponsesReceived: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "zone_transfer_response_received_total"),
|
||||
"Number of zone transfer responses (AXFR/IXFR) received by the secondary DNS server",
|
||||
[]string{"qtype"},
|
||||
nil,
|
||||
),
|
||||
ZoneTransferSuccessReceived: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "zone_transfer_success_received_total"),
|
||||
"Number of successful zone transfers (AXFR/IXFR) received by the secondary DNS server",
|
||||
[]string{"qtype", "protocol"},
|
||||
nil,
|
||||
),
|
||||
ZoneTransferSuccessSent: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "zone_transfer_success_sent_total"),
|
||||
"Number of successful zone transfers (AXFR/IXFR) of the master DNS server",
|
||||
[]string{"qtype"},
|
||||
nil,
|
||||
),
|
||||
ZoneTransferFailures: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "zone_transfer_failures_total"),
|
||||
"Number of failed zone transfers of the master DNS server",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
MemoryUsedBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "memory_used_bytes_total"),
|
||||
"Total memory used by DNS server",
|
||||
[]string{"area"},
|
||||
nil,
|
||||
),
|
||||
DynamicUpdatesQueued: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "dynamic_updates_queued"),
|
||||
"Number of dynamic updates queued by the DNS server",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
DynamicUpdatesReceived: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "dynamic_updates_received_total"),
|
||||
"Number of secure update requests received by the DNS server",
|
||||
[]string{"operation"},
|
||||
nil,
|
||||
),
|
||||
DynamicUpdatesFailures: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "dynamic_updates_failures_total"),
|
||||
"Number of dynamic updates which timed out or were rejected by the DNS server",
|
||||
[]string{"reason"},
|
||||
nil,
|
||||
),
|
||||
NotifyReceived: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "notify_received_total"),
|
||||
"Number of notifies received by the secondary DNS server",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
NotifySent: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "notify_sent_total"),
|
||||
"Number of notifies sent by the master DNS server",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SecureUpdateFailures: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "secure_update_failures_total"),
|
||||
"Number of secure updates that failed on the DNS server",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SecureUpdateReceived: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "secure_update_received_total"),
|
||||
"Number of secure update requests received by the DNS server",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
Queries: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "queries_total"),
|
||||
"Number of queries received by DNS server",
|
||||
[]string{"protocol"},
|
||||
nil,
|
||||
),
|
||||
Responses: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "responses_total"),
|
||||
"Number of reponses sent by DNS server",
|
||||
[]string{"protocol"},
|
||||
nil,
|
||||
),
|
||||
RecursiveQueries: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "recursive_queries_total"),
|
||||
"Number of recursive queries received by DNS server",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
RecursiveQueryFailures: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "recursive_query_failures_total"),
|
||||
"Number of recursive query failures",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
RecursiveQuerySendTimeouts: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "recursive_query_send_timeouts_total"),
|
||||
"Number of recursive query sending timeouts",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
WinsQueries: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "wins_queries_total"),
|
||||
"Number of WINS lookup requests received by the server",
|
||||
[]string{"direction"},
|
||||
nil,
|
||||
),
|
||||
WinsResponses: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "wins_responses_total"),
|
||||
"Number of WINS lookup responses sent by the server",
|
||||
[]string{"direction"},
|
||||
nil,
|
||||
),
|
||||
UnmatchedResponsesReceived: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "unmatched_responses_total"),
|
||||
"Number of response packets received by the DNS server that do not match any outstanding remote query",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *DNSCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting dns metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_DNS_DNS docs:
|
||||
// - https://msdn.microsoft.com/en-us/library/ms803992.aspx?f=255&MSPPError=-2147217396
|
||||
// - https://technet.microsoft.com/en-us/library/cc977686.aspx
|
||||
type Win32_PerfRawData_DNS_DNS struct {
|
||||
AXFRRequestReceived uint32
|
||||
AXFRRequestSent uint32
|
||||
AXFRResponseReceived uint32
|
||||
AXFRSuccessReceived uint32
|
||||
AXFRSuccessSent uint32
|
||||
CachingMemory uint32
|
||||
DatabaseNodeMemory uint32
|
||||
DynamicUpdateNoOperation uint32
|
||||
DynamicUpdateQueued uint32
|
||||
DynamicUpdateRejected uint32
|
||||
DynamicUpdateTimeOuts uint32
|
||||
DynamicUpdateWrittentoDatabase uint32
|
||||
IXFRRequestReceived uint32
|
||||
IXFRRequestSent uint32
|
||||
IXFRResponseReceived uint32
|
||||
IXFRSuccessSent uint32
|
||||
IXFRTCPSuccessReceived uint32
|
||||
IXFRUDPSuccessReceived uint32
|
||||
NbstatMemory uint32
|
||||
NotifyReceived uint32
|
||||
NotifySent uint32
|
||||
RecordFlowMemory uint32
|
||||
RecursiveQueries uint32
|
||||
RecursiveQueryFailure uint32
|
||||
RecursiveSendTimeOuts uint32
|
||||
SecureUpdateFailure uint32
|
||||
SecureUpdateReceived uint32
|
||||
TCPMessageMemory uint32
|
||||
TCPQueryReceived uint32
|
||||
TCPResponseSent uint32
|
||||
UDPMessageMemory uint32
|
||||
UDPQueryReceived uint32
|
||||
UDPResponseSent uint32
|
||||
UnmatchedResponsesReceived uint32
|
||||
WINSLookupReceived uint32
|
||||
WINSResponseSent uint32
|
||||
WINSReverseLookupReceived uint32
|
||||
WINSReverseResponseSent uint32
|
||||
ZoneTransferFailure uint32
|
||||
ZoneTransferSOARequestSent uint32
|
||||
}
|
||||
|
||||
func (c *DNSCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_DNS_DNS
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(dst) == 0 {
|
||||
return nil, errors.New("WMI query returned empty result set")
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ZoneTransferRequestsReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].AXFRRequestReceived),
|
||||
"full",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ZoneTransferRequestsReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].IXFRRequestReceived),
|
||||
"incremental",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ZoneTransferRequestsSent,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].AXFRRequestSent),
|
||||
"full",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ZoneTransferRequestsSent,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].IXFRRequestSent),
|
||||
"incremental",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ZoneTransferRequestsSent,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].ZoneTransferSOARequestSent),
|
||||
"soa",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ZoneTransferResponsesReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].AXFRResponseReceived),
|
||||
"full",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ZoneTransferResponsesReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].IXFRResponseReceived),
|
||||
"incremental",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ZoneTransferSuccessReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].AXFRSuccessReceived),
|
||||
"full",
|
||||
"tcp",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ZoneTransferSuccessReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].IXFRTCPSuccessReceived),
|
||||
"incremental",
|
||||
"tcp",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ZoneTransferSuccessReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].IXFRTCPSuccessReceived),
|
||||
"incremental",
|
||||
"udp",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ZoneTransferSuccessSent,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].AXFRSuccessSent),
|
||||
"full",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ZoneTransferSuccessSent,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].IXFRSuccessSent),
|
||||
"incremental",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ZoneTransferFailures,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].ZoneTransferFailure),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MemoryUsedBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].CachingMemory),
|
||||
"caching",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MemoryUsedBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].DatabaseNodeMemory),
|
||||
"database_node",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MemoryUsedBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].NbstatMemory),
|
||||
"nbstat",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MemoryUsedBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].RecordFlowMemory),
|
||||
"record_flow",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MemoryUsedBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].TCPMessageMemory),
|
||||
"tcp_message",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MemoryUsedBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].UDPMessageMemory),
|
||||
"udp_message",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DynamicUpdatesReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].DynamicUpdateNoOperation),
|
||||
"noop",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DynamicUpdatesReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].DynamicUpdateWrittentoDatabase),
|
||||
"written",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DynamicUpdatesQueued,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].DynamicUpdateQueued),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DynamicUpdatesFailures,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].DynamicUpdateRejected),
|
||||
"rejected",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DynamicUpdatesFailures,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].DynamicUpdateTimeOuts),
|
||||
"timeout",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NotifyReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].NotifyReceived),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NotifySent,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].NotifySent),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RecursiveQueries,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].RecursiveQueries),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RecursiveQueryFailures,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].RecursiveQueryFailure),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RecursiveQuerySendTimeouts,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].RecursiveSendTimeOuts),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Queries,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].TCPQueryReceived),
|
||||
"tcp",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Queries,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].UDPQueryReceived),
|
||||
"udp",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Responses,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].TCPResponseSent),
|
||||
"tcp",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Responses,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].UDPResponseSent),
|
||||
"udp",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.UnmatchedResponsesReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].UnmatchedResponsesReceived),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.WinsQueries,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].WINSLookupReceived),
|
||||
"forward",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.WinsQueries,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].WINSReverseLookupReceived),
|
||||
"reverse",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.WinsResponses,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].WINSResponseSent),
|
||||
"forward",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.WinsResponses,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].WINSReverseResponseSent),
|
||||
"reverse",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SecureUpdateFailures,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].SecureUpdateFailure),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SecureUpdateReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].SecureUpdateReceived),
|
||||
)
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
1423
collector/hyperv.go
Normal file
1423
collector/hyperv.go
Normal file
File diff suppressed because it is too large
Load Diff
1478
collector/iis.go
1478
collector/iis.go
File diff suppressed because it is too large
Load Diff
@@ -1,30 +1,30 @@
|
||||
// returns data points from Win32_PerfRawData_PerfDisk_LogicalDisk
|
||||
// https://msdn.microsoft.com/en-us/windows/hardware/aa394307(v=vs.71) - Win32_PerfRawData_PerfDisk_LogicalDisk class
|
||||
// https://msdn.microsoft.com/en-us/library/ms803973.aspx - LogicalDisk object reference
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"regexp"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
"gopkg.in/alecthomas/kingpin.v2"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["logical_disk"] = NewLogicalDiskCollector
|
||||
}
|
||||
|
||||
const (
|
||||
ticksToSecondsScaleFactor = 1 / 1e7
|
||||
)
|
||||
|
||||
var (
|
||||
volumeWhitelist = flag.String("collector.logical_disk.volume-whitelist", ".+", "Regexp of volumes to whitelist. Volume name must both match whitelist and not match blacklist to be included.")
|
||||
volumeBlacklist = flag.String("collector.logical_disk.volume-blacklist", "", "Regexp of volumes to blacklist. Volume name must both match whitelist and not match blacklist to be included.")
|
||||
volumeWhitelist = kingpin.Flag(
|
||||
"collector.logical_disk.volume-whitelist",
|
||||
"Regexp of volumes to whitelist. Volume name must both match whitelist and not match blacklist to be included.",
|
||||
).Default(".+").String()
|
||||
volumeBlacklist = kingpin.Flag(
|
||||
"collector.logical_disk.volume-blacklist",
|
||||
"Regexp of volumes to blacklist. Volume name must both match whitelist and not match blacklist to be included.",
|
||||
).Default("").String()
|
||||
)
|
||||
|
||||
// A LogicalDiskCollector is a Prometheus collector for WMI Win32_PerfRawData_PerfDisk_LogicalDisk metrics
|
||||
@@ -134,14 +134,17 @@ func NewLogicalDiskCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *LogicalDiskCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *LogicalDiskCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Println("[ERROR] failed collecting logical_disk metrics:", desc, err)
|
||||
log.Error("failed collecting logical_disk metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_PerfDisk_LogicalDisk docs:
|
||||
// - https://msdn.microsoft.com/en-us/windows/hardware/aa394307(v=vs.71) - Win32_PerfRawData_PerfDisk_LogicalDisk class
|
||||
// - https://msdn.microsoft.com/en-us/library/ms803973.aspx - LogicalDisk object reference
|
||||
type Win32_PerfRawData_PerfDisk_LogicalDisk struct {
|
||||
Name string
|
||||
CurrentDiskQueueLength uint32
|
||||
@@ -159,7 +162,7 @@ type Win32_PerfRawData_PerfDisk_LogicalDisk struct {
|
||||
|
||||
func (c *LogicalDiskCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_PerfDisk_LogicalDisk
|
||||
q := wmi.CreateQuery(&dst, "")
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
504
collector/memory.go
Normal file
504
collector/memory.go
Normal file
@@ -0,0 +1,504 @@
|
||||
// returns data points from Win32_PerfRawData_PerfOS_Memory
|
||||
// <add link to documentation here> - Win32_PerfRawData_PerfOS_Memory class
|
||||
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["memory"] = NewMemoryCollector
|
||||
}
|
||||
|
||||
// A MemoryCollector is a Prometheus collector for WMI Win32_PerfRawData_PerfOS_Memory metrics
|
||||
type MemoryCollector struct {
|
||||
AvailableBytes *prometheus.Desc
|
||||
CacheBytes *prometheus.Desc
|
||||
CacheBytesPeak *prometheus.Desc
|
||||
CacheFaultsTotal *prometheus.Desc
|
||||
CommitLimit *prometheus.Desc
|
||||
CommittedBytes *prometheus.Desc
|
||||
DemandZeroFaultsTotal *prometheus.Desc
|
||||
FreeAndZeroPageListBytes *prometheus.Desc
|
||||
FreeSystemPageTableEntries *prometheus.Desc
|
||||
ModifiedPageListBytes *prometheus.Desc
|
||||
PageFaultsTotal *prometheus.Desc
|
||||
SwapPageReadsTotal *prometheus.Desc
|
||||
SwapPagesReadTotal *prometheus.Desc
|
||||
SwapPagesWrittenTotal *prometheus.Desc
|
||||
SwapPageOperationsTotal *prometheus.Desc
|
||||
SwapPageWritesTotal *prometheus.Desc
|
||||
PoolNonpagedAllocsTotal *prometheus.Desc
|
||||
PoolNonpagedBytes *prometheus.Desc
|
||||
PoolPagedAllocsTotal *prometheus.Desc
|
||||
PoolPagedBytes *prometheus.Desc
|
||||
PoolPagedResidentBytes *prometheus.Desc
|
||||
StandbyCacheCoreBytes *prometheus.Desc
|
||||
StandbyCacheNormalPriorityBytes *prometheus.Desc
|
||||
StandbyCacheReserveBytes *prometheus.Desc
|
||||
SystemCacheResidentBytes *prometheus.Desc
|
||||
SystemCodeResidentBytes *prometheus.Desc
|
||||
SystemCodeTotalBytes *prometheus.Desc
|
||||
SystemDriverResidentBytes *prometheus.Desc
|
||||
SystemDriverTotalBytes *prometheus.Desc
|
||||
TransitionFaultsTotal *prometheus.Desc
|
||||
TransitionPagesRepurposedTotal *prometheus.Desc
|
||||
WriteCopiesTotal *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewMemoryCollector ...
|
||||
func NewMemoryCollector() (Collector, error) {
|
||||
const subsystem = "memory"
|
||||
|
||||
return &MemoryCollector{
|
||||
AvailableBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "available_bytes"),
|
||||
"The amount of physical memory immediately available for allocation to a process or for system use. It is equal to the sum of memory assigned to"+
|
||||
" the standby (cached), free and zero page lists (AvailableBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
CacheBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cache_bytes"),
|
||||
"(CacheBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
CacheBytesPeak: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cache_bytes_peak"),
|
||||
"(CacheBytesPeak)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
CacheFaultsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cache_faults_total"),
|
||||
"(CacheFaultsPersec)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
CommitLimit: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "commit_limit"),
|
||||
"(CommitLimit)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
CommittedBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "committed_bytes"),
|
||||
"(CommittedBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
DemandZeroFaultsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "demand_zero_faults_total"),
|
||||
"The number of zeroed pages required to satisfy faults. Zeroed pages, pages emptied of previously stored data and filled with zeros, are a security"+
|
||||
" feature of Windows that prevent processes from seeing data stored by earlier processes that used the memory space (DemandZeroFaults)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
FreeAndZeroPageListBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "free_and_zero_page_list_bytes"),
|
||||
"(FreeAndZeroPageListBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
FreeSystemPageTableEntries: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "free_system_page_table_entries"),
|
||||
"(FreeSystemPageTableEntries)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
ModifiedPageListBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "modified_page_list_bytes"),
|
||||
"(ModifiedPageListBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
PageFaultsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "page_faults_total"),
|
||||
"(PageFaultsPersec)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SwapPageReadsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "swap_page_reads_total"),
|
||||
"Number of disk page reads (a single read operation reading several pages is still only counted once) (PageReadsPersec)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SwapPagesReadTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "swap_pages_read_total"),
|
||||
"Number of pages read across all page reads (ie counting all pages read even if they are read in a single operation) (PagesInputPersec)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SwapPagesWrittenTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "swap_pages_written_total"),
|
||||
"Number of pages written across all page writes (ie counting all pages written even if they are written in a single operation) (PagesOutputPersec)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SwapPageOperationsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "swap_page_operations_total"),
|
||||
"Total number of swap page read and writes (PagesPersec)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SwapPageWritesTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "swap_page_writes_total"),
|
||||
"Number of disk page writes (a single write operation writing several pages is still only counted once) (PageWritesPersec)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
PoolNonpagedAllocsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "pool_nonpaged_allocs_total"),
|
||||
"The number of calls to allocate space in the nonpaged pool. The nonpaged pool is an area of system memory area for objects that cannot be written"+
|
||||
" to disk, and must remain in physical memory as long as they are allocated (PoolNonpagedAllocs)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
PoolNonpagedBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "pool_nonpaged_bytes_total"),
|
||||
"(PoolNonpagedBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
PoolPagedAllocsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "pool_paged_allocs_total"),
|
||||
"(PoolPagedAllocs)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
PoolPagedBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "pool_paged_bytes"),
|
||||
"(PoolPagedBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
PoolPagedResidentBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "pool_paged_resident_bytes"),
|
||||
"(PoolPagedResidentBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
StandbyCacheCoreBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "standby_cache_core_bytes"),
|
||||
"(StandbyCacheCoreBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
StandbyCacheNormalPriorityBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "standby_cache_normal_priority_bytes"),
|
||||
"(StandbyCacheNormalPriorityBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
StandbyCacheReserveBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "standby_cache_reserve_bytes"),
|
||||
"(StandbyCacheReserveBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SystemCacheResidentBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "system_cache_resident_bytes"),
|
||||
"(SystemCacheResidentBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SystemCodeResidentBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "system_code_resident_bytes"),
|
||||
"(SystemCodeResidentBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SystemCodeTotalBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "system_code_total_bytes"),
|
||||
"(SystemCodeTotalBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SystemDriverResidentBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "system_driver_resident_bytes"),
|
||||
"(SystemDriverResidentBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SystemDriverTotalBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "system_driver_total_bytes"),
|
||||
"(SystemDriverTotalBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
TransitionFaultsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "transition_faults_total"),
|
||||
"(TransitionFaultsPersec)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
TransitionPagesRepurposedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "transition_pages_repurposed_total"),
|
||||
"(TransitionPagesRePurposedPersec)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
WriteCopiesTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "write_copies_total"),
|
||||
"The number of page faults caused by attempting to write that were satisfied by copying the page from elsewhere in physical memory (WriteCopiesPersec)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *MemoryCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting memory metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_PerfOS_Memory struct {
|
||||
AvailableBytes uint64
|
||||
AvailableKBytes uint64
|
||||
AvailableMBytes uint64
|
||||
CacheBytes uint64
|
||||
CacheBytesPeak uint64
|
||||
CacheFaultsPersec uint32
|
||||
CommitLimit uint64
|
||||
CommittedBytes uint64
|
||||
DemandZeroFaultsPersec uint32
|
||||
FreeAndZeroPageListBytes uint64
|
||||
FreeSystemPageTableEntries uint32
|
||||
ModifiedPageListBytes uint64
|
||||
PageFaultsPersec uint32
|
||||
PageReadsPersec uint32
|
||||
PagesInputPersec uint32
|
||||
PagesOutputPersec uint32
|
||||
PagesPersec uint32
|
||||
PageWritesPersec uint32
|
||||
PoolNonpagedAllocs uint32
|
||||
PoolNonpagedBytes uint64
|
||||
PoolPagedAllocs uint32
|
||||
PoolPagedBytes uint64
|
||||
PoolPagedResidentBytes uint64
|
||||
StandbyCacheCoreBytes uint64
|
||||
StandbyCacheNormalPriorityBytes uint64
|
||||
StandbyCacheReserveBytes uint64
|
||||
SystemCacheResidentBytes uint64
|
||||
SystemCodeResidentBytes uint64
|
||||
SystemCodeTotalBytes uint64
|
||||
SystemDriverResidentBytes uint64
|
||||
SystemDriverTotalBytes uint64
|
||||
TransitionFaultsPersec uint32
|
||||
TransitionPagesRePurposedPersec uint32
|
||||
WriteCopiesPersec uint32
|
||||
}
|
||||
|
||||
func (c *MemoryCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_PerfOS_Memory
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.AvailableBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].AvailableBytes),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CacheBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].CacheBytes),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CacheBytesPeak,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].CacheBytesPeak),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CacheFaultsTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].CacheFaultsPersec),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CommitLimit,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].CommitLimit),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CommittedBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].CommittedBytes),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DemandZeroFaultsTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].DemandZeroFaultsPersec),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FreeAndZeroPageListBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].FreeAndZeroPageListBytes),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FreeSystemPageTableEntries,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].FreeSystemPageTableEntries),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ModifiedPageListBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].ModifiedPageListBytes),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PageFaultsTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].PageFaultsPersec),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SwapPageReadsTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].PageReadsPersec),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SwapPagesReadTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].PagesInputPersec),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SwapPagesWrittenTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].PagesOutputPersec),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SwapPageOperationsTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].PagesPersec),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SwapPageWritesTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].PageWritesPersec),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PoolNonpagedAllocsTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].PoolNonpagedAllocs),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PoolNonpagedBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].PoolNonpagedBytes),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PoolPagedAllocsTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].PoolPagedAllocs),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PoolPagedBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].PoolPagedBytes),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PoolPagedResidentBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].PoolPagedResidentBytes),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.StandbyCacheCoreBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].StandbyCacheCoreBytes),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.StandbyCacheNormalPriorityBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].StandbyCacheNormalPriorityBytes),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.StandbyCacheReserveBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].StandbyCacheReserveBytes),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SystemCacheResidentBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].SystemCacheResidentBytes),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SystemCodeResidentBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].SystemCodeResidentBytes),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SystemCodeTotalBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].SystemCodeTotalBytes),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SystemDriverResidentBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].SystemDriverResidentBytes),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SystemDriverTotalBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].SystemDriverTotalBytes),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TransitionFaultsTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].TransitionFaultsPersec),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TransitionPagesRepurposedTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].TransitionPagesRePurposedPersec),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.WriteCopiesTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].WriteCopiesPersec),
|
||||
)
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
127
collector/msmq.go
Normal file
127
collector/msmq.go
Normal file
@@ -0,0 +1,127 @@
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
"gopkg.in/alecthomas/kingpin.v2"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["msmq"] = NewMSMQCollector
|
||||
}
|
||||
|
||||
var (
|
||||
msmqWhereClause = kingpin.Flag("collector.msmq.msmq-where", "WQL 'where' clause to use in WMI metrics query. Limits the response to the msmqs you specify and reduces the size of the response.").String()
|
||||
)
|
||||
|
||||
// A Win32_PerfRawData_MSMQ_MSMQQueueCollector is a Prometheus collector for WMI Win32_PerfRawData_MSMQ_MSMQQueue metrics
|
||||
type Win32_PerfRawData_MSMQ_MSMQQueueCollector struct {
|
||||
BytesinJournalQueue *prometheus.Desc
|
||||
BytesinQueue *prometheus.Desc
|
||||
MessagesinJournalQueue *prometheus.Desc
|
||||
MessagesinQueue *prometheus.Desc
|
||||
|
||||
queryWhereClause string
|
||||
}
|
||||
|
||||
// NewWin32_PerfRawData_MSMQ_MSMQQueueCollector ...
|
||||
func NewMSMQCollector() (Collector, error) {
|
||||
const subsystem = "msmq"
|
||||
|
||||
if *msmqWhereClause == "" {
|
||||
log.Warn("No where-clause specified for msmq collector. This will generate a very large number of metrics!")
|
||||
}
|
||||
|
||||
return &Win32_PerfRawData_MSMQ_MSMQQueueCollector{
|
||||
BytesinJournalQueue: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "bytes_in_journal_queue"),
|
||||
"Size of queue journal in bytes",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
BytesinQueue: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "bytes_in_queue"),
|
||||
"Size of queue in bytes",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
MessagesinJournalQueue: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "messages_in_journal_queue"),
|
||||
"Count messages in queue journal",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
MessagesinQueue: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "messages_in_queue"),
|
||||
"Count messages in queue",
|
||||
[]string{"name"},
|
||||
nil,
|
||||
),
|
||||
queryWhereClause: *msmqWhereClause,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Win32_PerfRawData_MSMQ_MSMQQueueCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting msmq metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_MSMQ_MSMQQueue struct {
|
||||
Name string
|
||||
|
||||
BytesinJournalQueue uint64
|
||||
BytesinQueue uint64
|
||||
MessagesinJournalQueue uint64
|
||||
MessagesinQueue uint64
|
||||
}
|
||||
|
||||
func (c *Win32_PerfRawData_MSMQ_MSMQQueueCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_MSMQ_MSMQQueue
|
||||
q := queryAllWhere(&dst, c.queryWhereClause)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, msmq := range dst {
|
||||
|
||||
if msmq.Name == "Computer Queues" {
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BytesinJournalQueue,
|
||||
prometheus.GaugeValue,
|
||||
float64(msmq.BytesinJournalQueue),
|
||||
strings.ToLower(msmq.Name),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BytesinQueue,
|
||||
prometheus.GaugeValue,
|
||||
float64(msmq.BytesinQueue),
|
||||
strings.ToLower(msmq.Name),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MessagesinJournalQueue,
|
||||
prometheus.GaugeValue,
|
||||
float64(msmq.MessagesinJournalQueue),
|
||||
strings.ToLower(msmq.Name),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MessagesinQueue,
|
||||
prometheus.GaugeValue,
|
||||
float64(msmq.MessagesinQueue),
|
||||
strings.ToLower(msmq.Name),
|
||||
)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
3560
collector/mssql.go
Normal file
3560
collector/mssql.go
Normal file
File diff suppressed because it is too large
Load Diff
262
collector/net.go
Normal file
262
collector/net.go
Normal file
@@ -0,0 +1,262 @@
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
"gopkg.in/alecthomas/kingpin.v2"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["net"] = NewNetworkCollector
|
||||
}
|
||||
|
||||
var (
|
||||
nicWhitelist = kingpin.Flag(
|
||||
"collector.net.nic-whitelist",
|
||||
"Regexp of NIC:s to whitelist. NIC name must both match whitelist and not match blacklist to be included.",
|
||||
).Default(".+").String()
|
||||
nicBlacklist = kingpin.Flag(
|
||||
"collector.net.nic-blacklist",
|
||||
"Regexp of NIC:s to blacklist. NIC name must both match whitelist and not match blacklist to be included.",
|
||||
).Default("").String()
|
||||
nicNameToUnderscore = regexp.MustCompile("[^a-zA-Z0-9]")
|
||||
)
|
||||
|
||||
// A NetworkCollector is a Prometheus collector for WMI Win32_PerfRawData_Tcpip_NetworkInterface metrics
|
||||
type NetworkCollector struct {
|
||||
BytesReceivedTotal *prometheus.Desc
|
||||
BytesSentTotal *prometheus.Desc
|
||||
BytesTotal *prometheus.Desc
|
||||
PacketsOutboundDiscarded *prometheus.Desc
|
||||
PacketsOutboundErrors *prometheus.Desc
|
||||
PacketsTotal *prometheus.Desc
|
||||
PacketsReceivedDiscarded *prometheus.Desc
|
||||
PacketsReceivedErrors *prometheus.Desc
|
||||
PacketsReceivedTotal *prometheus.Desc
|
||||
PacketsReceivedUnknown *prometheus.Desc
|
||||
PacketsSentTotal *prometheus.Desc
|
||||
CurrentBandwidth *prometheus.Desc
|
||||
|
||||
nicWhitelistPattern *regexp.Regexp
|
||||
nicBlacklistPattern *regexp.Regexp
|
||||
}
|
||||
|
||||
// NewNetworkCollector ...
|
||||
func NewNetworkCollector() (Collector, error) {
|
||||
const subsystem = "net"
|
||||
|
||||
return &NetworkCollector{
|
||||
BytesReceivedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "bytes_received_total"),
|
||||
"(Network.BytesReceivedPerSec)",
|
||||
[]string{"nic"},
|
||||
nil,
|
||||
),
|
||||
BytesSentTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "bytes_sent_total"),
|
||||
"(Network.BytesSentPerSec)",
|
||||
[]string{"nic"},
|
||||
nil,
|
||||
),
|
||||
BytesTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "bytes_total"),
|
||||
"(Network.BytesTotalPerSec)",
|
||||
[]string{"nic"},
|
||||
nil,
|
||||
),
|
||||
PacketsOutboundDiscarded: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "packets_outbound_discarded"),
|
||||
"(Network.PacketsOutboundDiscarded)",
|
||||
[]string{"nic"},
|
||||
nil,
|
||||
),
|
||||
PacketsOutboundErrors: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "packets_outbound_errors"),
|
||||
"(Network.PacketsOutboundErrors)",
|
||||
[]string{"nic"},
|
||||
nil,
|
||||
),
|
||||
PacketsReceivedDiscarded: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "packets_received_discarded"),
|
||||
"(Network.PacketsReceivedDiscarded)",
|
||||
[]string{"nic"},
|
||||
nil,
|
||||
),
|
||||
PacketsReceivedErrors: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "packets_received_errors"),
|
||||
"(Network.PacketsReceivedErrors)",
|
||||
[]string{"nic"},
|
||||
nil,
|
||||
),
|
||||
PacketsReceivedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "packets_received_total"),
|
||||
"(Network.PacketsReceivedPerSec)",
|
||||
[]string{"nic"},
|
||||
nil,
|
||||
),
|
||||
PacketsReceivedUnknown: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "packets_received_unknown"),
|
||||
"(Network.PacketsReceivedUnknown)",
|
||||
[]string{"nic"},
|
||||
nil,
|
||||
),
|
||||
PacketsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "packets_total"),
|
||||
"(Network.PacketsPerSec)",
|
||||
[]string{"nic"},
|
||||
nil,
|
||||
),
|
||||
PacketsSentTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "packets_sent_total"),
|
||||
"(Network.PacketsSentPerSec)",
|
||||
[]string{"nic"},
|
||||
nil,
|
||||
),
|
||||
CurrentBandwidth: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "current_bandwidth"),
|
||||
"(Network.CurrentBandwidth)",
|
||||
[]string{"nic"},
|
||||
nil,
|
||||
),
|
||||
|
||||
nicWhitelistPattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *nicWhitelist)),
|
||||
nicBlacklistPattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *nicBlacklist)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NetworkCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting net metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// mangleNetworkName mangles Network Adapter name (non-alphanumeric to _)
|
||||
// that is used in Win32_PerfRawData_Tcpip_NetworkInterface.
|
||||
func mangleNetworkName(name string) string {
|
||||
return nicNameToUnderscore.ReplaceAllString(name, "_")
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_Tcpip_NetworkInterface docs:
|
||||
// - https://technet.microsoft.com/en-us/security/aa394340(v=vs.80)
|
||||
type Win32_PerfRawData_Tcpip_NetworkInterface struct {
|
||||
BytesReceivedPerSec uint64
|
||||
BytesSentPerSec uint64
|
||||
BytesTotalPerSec uint64
|
||||
Name string
|
||||
PacketsOutboundDiscarded uint64
|
||||
PacketsOutboundErrors uint64
|
||||
PacketsPerSec uint64
|
||||
PacketsReceivedDiscarded uint64
|
||||
PacketsReceivedErrors uint64
|
||||
PacketsReceivedPerSec uint64
|
||||
PacketsReceivedUnknown uint64
|
||||
PacketsSentPerSec uint64
|
||||
CurrentBandwidth uint64
|
||||
}
|
||||
|
||||
func (c *NetworkCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_Tcpip_NetworkInterface
|
||||
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, nic := range dst {
|
||||
if c.nicBlacklistPattern.MatchString(nic.Name) ||
|
||||
!c.nicWhitelistPattern.MatchString(nic.Name) {
|
||||
continue
|
||||
}
|
||||
|
||||
name := mangleNetworkName(nic.Name)
|
||||
if name == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Counters
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BytesReceivedTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.BytesReceivedPerSec),
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BytesSentTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.BytesSentPerSec),
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BytesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.BytesTotalPerSec),
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsOutboundDiscarded,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.PacketsOutboundDiscarded),
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsOutboundErrors,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.PacketsOutboundErrors),
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.PacketsPerSec),
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsReceivedDiscarded,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.PacketsReceivedDiscarded),
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsReceivedErrors,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.PacketsReceivedErrors),
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsReceivedTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.PacketsReceivedPerSec),
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsReceivedUnknown,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.PacketsReceivedUnknown),
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsSentTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.PacketsSentPerSec),
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CurrentBandwidth,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.CurrentBandwidth),
|
||||
name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
17
collector/net_test.go
Normal file
17
collector/net_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestNetworkToInstanceName(t *testing.T) {
|
||||
data := map[string]string{
|
||||
"Intel[R] Dual Band Wireless-AC 8260": "Intel_R__Dual_Band_Wireless_AC_8260",
|
||||
}
|
||||
for in, out := range data {
|
||||
got := mangleNetworkName(in)
|
||||
if got != out {
|
||||
t.Error("expected", out, "got", got)
|
||||
}
|
||||
}
|
||||
}
|
||||
117
collector/netframework_clrexceptions.go
Normal file
117
collector/netframework_clrexceptions.go
Normal file
@@ -0,0 +1,117 @@
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["netframework_clrexceptions"] = NewNETFramework_NETCLRExceptionsCollector
|
||||
}
|
||||
|
||||
// A NETFramework_NETCLRExceptionsCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRExceptions metrics
|
||||
type NETFramework_NETCLRExceptionsCollector struct {
|
||||
NumberofExcepsThrown *prometheus.Desc
|
||||
NumberofFilters *prometheus.Desc
|
||||
NumberofFinallys *prometheus.Desc
|
||||
ThrowToCatchDepth *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewNETFramework_NETCLRExceptionsCollector ...
|
||||
func NewNETFramework_NETCLRExceptionsCollector() (Collector, error) {
|
||||
const subsystem = "netframework_clrexceptions"
|
||||
return &NETFramework_NETCLRExceptionsCollector{
|
||||
NumberofExcepsThrown: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "exceptions_thrown_total"),
|
||||
"Displays the total number of exceptions thrown since the application started. This includes both .NET exceptions and unmanaged exceptions that are converted into .NET exceptions.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
NumberofFilters: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "exceptions_filters_total"),
|
||||
"Displays the total number of .NET exception filters executed. An exception filter evaluates regardless of whether an exception is handled.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
NumberofFinallys: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "exceptions_finallys_total"),
|
||||
"Displays the total number of finally blocks executed. Only the finally blocks executed for an exception are counted; finally blocks on normal code paths are not counted by this counter.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
ThrowToCatchDepth: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "throw_to_catch_depth_total"),
|
||||
"Displays the total number of stack frames traversed, from the frame that threw the exception to the frame that handled the exception.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRExceptionsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrexceptions metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_NETFramework_NETCLRExceptions struct {
|
||||
Name string
|
||||
|
||||
NumberofExcepsThrown uint32
|
||||
NumberofExcepsThrownPersec uint32
|
||||
NumberofFiltersPersec uint32
|
||||
NumberofFinallysPersec uint32
|
||||
ThrowToCatchDepthPersec uint32
|
||||
}
|
||||
|
||||
func (c *NETFramework_NETCLRExceptionsCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_NETFramework_NETCLRExceptions
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, process := range dst {
|
||||
|
||||
if process.Name == "_Global_" {
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NumberofExcepsThrown,
|
||||
prometheus.CounterValue,
|
||||
float64(process.NumberofExcepsThrown),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NumberofFilters,
|
||||
prometheus.CounterValue,
|
||||
float64(process.NumberofFiltersPersec),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NumberofFinallys,
|
||||
prometheus.CounterValue,
|
||||
float64(process.NumberofFinallysPersec),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ThrowToCatchDepth,
|
||||
prometheus.CounterValue,
|
||||
float64(process.ThrowToCatchDepthPersec),
|
||||
process.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
103
collector/netframework_clrinterop.go
Normal file
103
collector/netframework_clrinterop.go
Normal file
@@ -0,0 +1,103 @@
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["netframework_clrinterop"] = NewNETFramework_NETCLRInteropCollector
|
||||
}
|
||||
|
||||
// A NETFramework_NETCLRInteropCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRInterop metrics
|
||||
type NETFramework_NETCLRInteropCollector struct {
|
||||
NumberofCCWs *prometheus.Desc
|
||||
Numberofmarshalling *prometheus.Desc
|
||||
NumberofStubs *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewNETFramework_NETCLRInteropCollector ...
|
||||
func NewNETFramework_NETCLRInteropCollector() (Collector, error) {
|
||||
const subsystem = "netframework_clrinterop"
|
||||
return &NETFramework_NETCLRInteropCollector{
|
||||
NumberofCCWs: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "com_callable_wrappers_total"),
|
||||
"Displays the current number of COM callable wrappers (CCWs). A CCW is a proxy for a managed object being referenced from an unmanaged COM client.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
Numberofmarshalling: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "interop_marshalling_total"),
|
||||
"Displays the total number of times arguments and return values have been marshaled from managed to unmanaged code, and vice versa, since the application started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
NumberofStubs: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "interop_stubs_created_total"),
|
||||
"Displays the current number of stubs created by the common language runtime. Stubs are responsible for marshaling arguments and return values from managed to unmanaged code, and vice versa, during a COM interop call or a platform invoke call.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRInteropCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrinterop metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_NETFramework_NETCLRInterop struct {
|
||||
Name string
|
||||
|
||||
NumberofCCWs uint32
|
||||
Numberofmarshalling uint32
|
||||
NumberofStubs uint32
|
||||
NumberofTLBexportsPersec uint32
|
||||
NumberofTLBimportsPersec uint32
|
||||
}
|
||||
|
||||
func (c *NETFramework_NETCLRInteropCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_NETFramework_NETCLRInterop
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, process := range dst {
|
||||
|
||||
if process.Name == "_Global_" {
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NumberofCCWs,
|
||||
prometheus.CounterValue,
|
||||
float64(process.NumberofCCWs),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Numberofmarshalling,
|
||||
prometheus.CounterValue,
|
||||
float64(process.Numberofmarshalling),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NumberofStubs,
|
||||
prometheus.CounterValue,
|
||||
float64(process.NumberofStubs),
|
||||
process.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
119
collector/netframework_clrjit.go
Normal file
119
collector/netframework_clrjit.go
Normal file
@@ -0,0 +1,119 @@
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["netframework_clrjit"] = NewNETFramework_NETCLRJitCollector
|
||||
}
|
||||
|
||||
// A NETFramework_NETCLRJitCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRJit metrics
|
||||
type NETFramework_NETCLRJitCollector struct {
|
||||
NumberofMethodsJitted *prometheus.Desc
|
||||
TimeinJit *prometheus.Desc
|
||||
StandardJitFailures *prometheus.Desc
|
||||
TotalNumberofILBytesJitted *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewNETFramework_NETCLRJitCollector ...
|
||||
func NewNETFramework_NETCLRJitCollector() (Collector, error) {
|
||||
const subsystem = "netframework_clrjit"
|
||||
return &NETFramework_NETCLRJitCollector{
|
||||
NumberofMethodsJitted: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "jit_methods_total"),
|
||||
"Displays the total number of methods JIT-compiled since the application started. This counter does not include pre-JIT-compiled methods.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
TimeinJit: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "jit_time_percent"),
|
||||
"Displays the percentage of time spent in JIT compilation. This counter is updated at the end of every JIT compilation phase. A JIT compilation phase occurs when a method and its dependencies are compiled.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
StandardJitFailures: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "jit_standard_failures_total"),
|
||||
"Displays the peak number of methods the JIT compiler has failed to compile since the application started. This failure can occur if the MSIL cannot be verified or if there is an internal error in the JIT compiler.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
TotalNumberofILBytesJitted: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "jit_il_bytes_total"),
|
||||
"Displays the total number of Microsoft intermediate language (MSIL) bytes compiled by the just-in-time (JIT) compiler since the application started",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRJitCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrjit metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_NETFramework_NETCLRJit struct {
|
||||
Name string
|
||||
|
||||
Frequency_PerfTime uint32
|
||||
ILBytesJittedPersec uint32
|
||||
NumberofILBytesJitted uint32
|
||||
NumberofMethodsJitted uint32
|
||||
PercentTimeinJit uint32
|
||||
StandardJitFailures uint32
|
||||
TotalNumberofILBytesJitted uint32
|
||||
}
|
||||
|
||||
func (c *NETFramework_NETCLRJitCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_NETFramework_NETCLRJit
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, process := range dst {
|
||||
|
||||
if process.Name == "_Global_" {
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NumberofMethodsJitted,
|
||||
prometheus.CounterValue,
|
||||
float64(process.NumberofMethodsJitted),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeinJit,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.PercentTimeinJit)/float64(process.Frequency_PerfTime),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.StandardJitFailures,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.StandardJitFailures),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TotalNumberofILBytesJitted,
|
||||
prometheus.CounterValue,
|
||||
float64(process.TotalNumberofILBytesJitted),
|
||||
process.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
198
collector/netframework_clrloading.go
Normal file
198
collector/netframework_clrloading.go
Normal file
@@ -0,0 +1,198 @@
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["netframework_clrloading"] = NewNETFramework_NETCLRLoadingCollector
|
||||
}
|
||||
|
||||
// A NETFramework_NETCLRLoadingCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRLoading metrics
|
||||
type NETFramework_NETCLRLoadingCollector struct {
|
||||
BytesinLoaderHeap *prometheus.Desc
|
||||
Currentappdomains *prometheus.Desc
|
||||
CurrentAssemblies *prometheus.Desc
|
||||
CurrentClassesLoaded *prometheus.Desc
|
||||
TotalAppdomains *prometheus.Desc
|
||||
Totalappdomainsunloaded *prometheus.Desc
|
||||
TotalAssemblies *prometheus.Desc
|
||||
TotalClassesLoaded *prometheus.Desc
|
||||
TotalNumberofLoadFailures *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewNETFramework_NETCLRLoadingCollector ...
|
||||
func NewNETFramework_NETCLRLoadingCollector() (Collector, error) {
|
||||
const subsystem = "netframework_clrloading"
|
||||
return &NETFramework_NETCLRLoadingCollector{
|
||||
BytesinLoaderHeap: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "loader_heap_size_bytes"),
|
||||
"Displays the current size, in bytes, of the memory committed by the class loader across all application domains. Committed memory is the physical space reserved in the disk paging file.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
Currentappdomains: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "appdomains_loaded_current"),
|
||||
"Displays the current number of application domains loaded in this application.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
CurrentAssemblies: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "assemblies_loaded_current"),
|
||||
"Displays the current number of assemblies loaded across all application domains in the currently running application. If the assembly is loaded as domain-neutral from multiple application domains, this counter is incremented only once.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
CurrentClassesLoaded: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "classes_loaded_current"),
|
||||
"Displays the current number of classes loaded in all assemblies.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
TotalAppdomains: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "appdomains_loaded_total"),
|
||||
"Displays the peak number of application domains loaded since the application started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
Totalappdomainsunloaded: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "appdomains_unloaded_total"),
|
||||
"Displays the total number of application domains unloaded since the application started. If an application domain is loaded and unloaded multiple times, this counter increments each time the application domain is unloaded.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
TotalAssemblies: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "assemblies_loaded_total"),
|
||||
"Displays the total number of assemblies loaded since the application started. If the assembly is loaded as domain-neutral from multiple application domains, this counter is incremented only once.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
TotalClassesLoaded: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "classes_loaded_total"),
|
||||
"Displays the cumulative number of classes loaded in all assemblies since the application started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
TotalNumberofLoadFailures: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "class_load_failures_total"),
|
||||
"Displays the peak number of classes that have failed to load since the application started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRLoadingCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrloading metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_NETFramework_NETCLRLoading struct {
|
||||
Name string
|
||||
|
||||
AssemblySearchLength uint32
|
||||
BytesinLoaderHeap uint64
|
||||
Currentappdomains uint32
|
||||
CurrentAssemblies uint32
|
||||
CurrentClassesLoaded uint32
|
||||
PercentTimeLoading uint64
|
||||
Rateofappdomains uint32
|
||||
Rateofappdomainsunloaded uint32
|
||||
RateofAssemblies uint32
|
||||
RateofClassesLoaded uint32
|
||||
RateofLoadFailures uint32
|
||||
TotalAppdomains uint32
|
||||
Totalappdomainsunloaded uint32
|
||||
TotalAssemblies uint32
|
||||
TotalClassesLoaded uint32
|
||||
TotalNumberofLoadFailures uint32
|
||||
}
|
||||
|
||||
func (c *NETFramework_NETCLRLoadingCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_NETFramework_NETCLRLoading
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, process := range dst {
|
||||
|
||||
if process.Name == "_Global_" {
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BytesinLoaderHeap,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.BytesinLoaderHeap),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Currentappdomains,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.Currentappdomains),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CurrentAssemblies,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.CurrentAssemblies),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CurrentClassesLoaded,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.CurrentClassesLoaded),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TotalAppdomains,
|
||||
prometheus.CounterValue,
|
||||
float64(process.TotalAppdomains),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Totalappdomainsunloaded,
|
||||
prometheus.CounterValue,
|
||||
float64(process.Totalappdomainsunloaded),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TotalAssemblies,
|
||||
prometheus.CounterValue,
|
||||
float64(process.TotalAssemblies),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TotalClassesLoaded,
|
||||
prometheus.CounterValue,
|
||||
float64(process.TotalClassesLoaded),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TotalNumberofLoadFailures,
|
||||
prometheus.CounterValue,
|
||||
float64(process.TotalNumberofLoadFailures),
|
||||
process.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
164
collector/netframework_clrlocksandthreads.go
Normal file
164
collector/netframework_clrlocksandthreads.go
Normal file
@@ -0,0 +1,164 @@
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["netframework_clrlocksandthreads"] = NewNETFramework_NETCLRLocksAndThreadsCollector
|
||||
}
|
||||
|
||||
// A NETFramework_NETCLRLocksAndThreadsCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads metrics
|
||||
type NETFramework_NETCLRLocksAndThreadsCollector struct {
|
||||
CurrentQueueLength *prometheus.Desc
|
||||
NumberofcurrentlogicalThreads *prometheus.Desc
|
||||
NumberofcurrentphysicalThreads *prometheus.Desc
|
||||
Numberofcurrentrecognizedthreads *prometheus.Desc
|
||||
Numberoftotalrecognizedthreads *prometheus.Desc
|
||||
QueueLengthPeak *prometheus.Desc
|
||||
TotalNumberofContentions *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewNETFramework_NETCLRLocksAndThreadsCollector ...
|
||||
func NewNETFramework_NETCLRLocksAndThreadsCollector() (Collector, error) {
|
||||
const subsystem = "netframework_clrlocksandthreads"
|
||||
return &NETFramework_NETCLRLocksAndThreadsCollector{
|
||||
CurrentQueueLength: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "current_queue_length"),
|
||||
"Displays the total number of threads that are currently waiting to acquire a managed lock in the application.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
NumberofcurrentlogicalThreads: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "current_logical_threads"),
|
||||
"Displays the number of current managed thread objects in the application. This counter maintains the count of both running and stopped threads. ",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
NumberofcurrentphysicalThreads: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "physical_threads_current"),
|
||||
"Displays the number of native operating system threads created and owned by the common language runtime to act as underlying threads for managed thread objects. This counter's value does not include the threads used by the runtime in its internal operations; it is a subset of the threads in the operating system process.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
Numberofcurrentrecognizedthreads: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "recognized_threads_current"),
|
||||
"Displays the number of threads that are currently recognized by the runtime. These threads are associated with a corresponding managed thread object. The runtime does not create these threads, but they have run inside the runtime at least once.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
Numberoftotalrecognizedthreads: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "recognized_threads_total"),
|
||||
"Displays the total number of threads that have been recognized by the runtime since the application started. These threads are associated with a corresponding managed thread object. The runtime does not create these threads, but they have run inside the runtime at least once.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
QueueLengthPeak: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "queue_length_total"),
|
||||
"Displays the total number of threads that waited to acquire a managed lock since the application started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
TotalNumberofContentions: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "contentions_total"),
|
||||
"Displays the total number of times that threads in the runtime have attempted to acquire a managed lock unsuccessfully.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRLocksAndThreadsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrlocksandthreads metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads struct {
|
||||
Name string
|
||||
|
||||
ContentionRatePersec uint32
|
||||
CurrentQueueLength uint32
|
||||
NumberofcurrentlogicalThreads uint32
|
||||
NumberofcurrentphysicalThreads uint32
|
||||
Numberofcurrentrecognizedthreads uint32
|
||||
Numberoftotalrecognizedthreads uint32
|
||||
QueueLengthPeak uint32
|
||||
QueueLengthPersec uint32
|
||||
RateOfRecognizedThreadsPersec uint32
|
||||
TotalNumberofContentions uint32
|
||||
}
|
||||
|
||||
func (c *NETFramework_NETCLRLocksAndThreadsCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, process := range dst {
|
||||
|
||||
if process.Name == "_Global_" {
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CurrentQueueLength,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.CurrentQueueLength),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NumberofcurrentlogicalThreads,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.NumberofcurrentlogicalThreads),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NumberofcurrentphysicalThreads,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.NumberofcurrentphysicalThreads),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Numberofcurrentrecognizedthreads,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.Numberofcurrentrecognizedthreads),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Numberoftotalrecognizedthreads,
|
||||
prometheus.CounterValue,
|
||||
float64(process.Numberoftotalrecognizedthreads),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.QueueLengthPeak,
|
||||
prometheus.CounterValue,
|
||||
float64(process.QueueLengthPeak),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TotalNumberofContentions,
|
||||
prometheus.CounterValue,
|
||||
float64(process.TotalNumberofContentions),
|
||||
process.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
302
collector/netframework_clrmemory.go
Normal file
302
collector/netframework_clrmemory.go
Normal file
@@ -0,0 +1,302 @@
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["netframework_clrmemory"] = NewNETFramework_NETCLRMemoryCollector
|
||||
}
|
||||
|
||||
// A NETFramework_NETCLRMemoryCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRMemory metrics
|
||||
type NETFramework_NETCLRMemoryCollector struct {
|
||||
AllocatedBytes *prometheus.Desc
|
||||
FinalizationSurvivors *prometheus.Desc
|
||||
HeapSize *prometheus.Desc
|
||||
PromotedBytes *prometheus.Desc
|
||||
NumberGCHandles *prometheus.Desc
|
||||
NumberCollections *prometheus.Desc
|
||||
NumberInducedGC *prometheus.Desc
|
||||
NumberofPinnedObjects *prometheus.Desc
|
||||
NumberofSinkBlocksinuse *prometheus.Desc
|
||||
NumberTotalCommittedBytes *prometheus.Desc
|
||||
NumberTotalreservedBytes *prometheus.Desc
|
||||
TimeinGC *prometheus.Desc
|
||||
PromotedFinalizationMemoryfromGen0 *prometheus.Desc
|
||||
PromotedMemoryfromGen0 *prometheus.Desc
|
||||
PromotedMemoryfromGen1 *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewNETFramework_NETCLRMemoryCollector ...
|
||||
func NewNETFramework_NETCLRMemoryCollector() (Collector, error) {
|
||||
const subsystem = "netframework_clrmemory"
|
||||
return &NETFramework_NETCLRMemoryCollector{
|
||||
AllocatedBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "allocated_bytes_total"),
|
||||
"Displays the total number of bytes allocated on the garbage collection heap.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
FinalizationSurvivors: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "finalization_survivors"),
|
||||
"Displays the number of garbage-collected objects that survive a collection because they are waiting to be finalized.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
HeapSize: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "heap_size_bytes"),
|
||||
"Displays the maximum bytes that can be allocated; it does not indicate the current number of bytes allocated.",
|
||||
[]string{"process", "area"},
|
||||
nil,
|
||||
),
|
||||
PromotedBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "promoted_bytes"),
|
||||
"Displays the bytes that were promoted from the generation to the next one during the last GC. Memory is promoted when it survives a garbage collection.",
|
||||
[]string{"process", "area"},
|
||||
nil,
|
||||
),
|
||||
NumberGCHandles: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "number_gc_handles"),
|
||||
"Displays the current number of garbage collection handles in use. Garbage collection handles are handles to resources external to the common language runtime and the managed environment.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
NumberCollections: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "collections_total"),
|
||||
"Displays the number of times the generation objects are garbage collected since the application started.",
|
||||
[]string{"process", "area"},
|
||||
nil,
|
||||
),
|
||||
NumberInducedGC: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "induced_gc_total"),
|
||||
"Displays the peak number of times garbage collection was performed because of an explicit call to GC.Collect.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
NumberofPinnedObjects: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "number_pinned_objects"),
|
||||
"Displays the number of pinned objects encountered in the last garbage collection.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
NumberofSinkBlocksinuse: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "number_sink_blocksinuse"),
|
||||
"Displays the current number of synchronization blocks in use. Synchronization blocks are per-object data structures allocated for storing synchronization information. They hold weak references to managed objects and must be scanned by the garbage collector.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
NumberTotalCommittedBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "committed_bytes"),
|
||||
"Displays the amount of virtual memory, in bytes, currently committed by the garbage collector. Committed memory is the physical memory for which space has been reserved in the disk paging file.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
NumberTotalreservedBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "reserved_bytes"),
|
||||
"Displays the amount of virtual memory, in bytes, currently reserved by the garbage collector. Reserved memory is the virtual memory space reserved for the application when no disk or main memory pages have been used.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
TimeinGC: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "gc_time_percent"),
|
||||
"Displays the percentage of time that was spent performing a garbage collection in the last sample.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRMemoryCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrmemory metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_NETFramework_NETCLRMemory struct {
|
||||
Name string
|
||||
|
||||
AllocatedBytesPersec uint64
|
||||
FinalizationSurvivors uint64
|
||||
Frequency_PerfTime uint64
|
||||
Gen0heapsize uint64
|
||||
Gen0PromotedBytesPerSec uint64
|
||||
Gen1heapsize uint64
|
||||
Gen1PromotedBytesPerSec uint64
|
||||
Gen2heapsize uint64
|
||||
LargeObjectHeapsize uint64
|
||||
NumberBytesinallHeaps uint64
|
||||
NumberGCHandles uint64
|
||||
NumberGen0Collections uint64
|
||||
NumberGen1Collections uint64
|
||||
NumberGen2Collections uint64
|
||||
NumberInducedGC uint64
|
||||
NumberofPinnedObjects uint64
|
||||
NumberofSinkBlocksinuse uint64
|
||||
NumberTotalcommittedBytes uint64
|
||||
NumberTotalreservedBytes uint64
|
||||
PercentTimeinGC uint32
|
||||
ProcessID uint64
|
||||
PromotedFinalizationMemoryfromGen0 uint64
|
||||
PromotedMemoryfromGen0 uint64
|
||||
PromotedMemoryfromGen1 uint64
|
||||
}
|
||||
|
||||
func (c *NETFramework_NETCLRMemoryCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_NETFramework_NETCLRMemory
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, process := range dst {
|
||||
|
||||
if process.Name == "_Global_" {
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.AllocatedBytes,
|
||||
prometheus.CounterValue,
|
||||
float64(process.AllocatedBytesPersec),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FinalizationSurvivors,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.FinalizationSurvivors),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.HeapSize,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.Gen0heapsize),
|
||||
process.Name,
|
||||
"Gen0",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PromotedBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.Gen0PromotedBytesPerSec),
|
||||
process.Name,
|
||||
"Gen0",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.HeapSize,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.Gen1heapsize),
|
||||
process.Name,
|
||||
"Gen1",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PromotedBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.Gen1PromotedBytesPerSec),
|
||||
process.Name,
|
||||
"Gen1",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.HeapSize,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.Gen2heapsize),
|
||||
process.Name,
|
||||
"Gen2",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.HeapSize,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.LargeObjectHeapsize),
|
||||
process.Name,
|
||||
"LOH",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NumberGCHandles,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.NumberGCHandles),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NumberCollections,
|
||||
prometheus.CounterValue,
|
||||
float64(process.NumberGen0Collections),
|
||||
process.Name,
|
||||
"Gen0",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NumberCollections,
|
||||
prometheus.CounterValue,
|
||||
float64(process.NumberGen1Collections),
|
||||
process.Name,
|
||||
"Gen1",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NumberCollections,
|
||||
prometheus.CounterValue,
|
||||
float64(process.NumberGen2Collections),
|
||||
process.Name,
|
||||
"Gen2",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NumberInducedGC,
|
||||
prometheus.CounterValue,
|
||||
float64(process.NumberInducedGC),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NumberofPinnedObjects,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.NumberofPinnedObjects),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NumberofSinkBlocksinuse,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.NumberofSinkBlocksinuse),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NumberTotalCommittedBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.NumberTotalcommittedBytes),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NumberTotalreservedBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.NumberTotalreservedBytes),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeinGC,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.PercentTimeinGC)/float64(process.Frequency_PerfTime),
|
||||
process.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
147
collector/netframework_clrremoting.go
Normal file
147
collector/netframework_clrremoting.go
Normal file
@@ -0,0 +1,147 @@
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["netframework_clrremoting"] = NewNETFramework_NETCLRRemotingCollector
|
||||
}
|
||||
|
||||
// A NETFramework_NETCLRRemotingCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRRemoting metrics
|
||||
type NETFramework_NETCLRRemotingCollector struct {
|
||||
Channels *prometheus.Desc
|
||||
ContextBoundClassesLoaded *prometheus.Desc
|
||||
ContextBoundObjects *prometheus.Desc
|
||||
ContextProxies *prometheus.Desc
|
||||
Contexts *prometheus.Desc
|
||||
TotalRemoteCalls *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewNETFramework_NETCLRRemotingCollector ...
|
||||
func NewNETFramework_NETCLRRemotingCollector() (Collector, error) {
|
||||
const subsystem = "netframework_clrremoting"
|
||||
return &NETFramework_NETCLRRemotingCollector{
|
||||
Channels: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "channels_total"),
|
||||
"Displays the total number of remoting channels registered across all application domains since application started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
ContextBoundClassesLoaded: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "context_bound_classes_loaded"),
|
||||
"Displays the current number of context-bound classes that are loaded.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
ContextBoundObjects: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "context_bound_objects_total"),
|
||||
"Displays the total number of context-bound objects allocated.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
ContextProxies: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "context_proxies_total"),
|
||||
"Displays the total number of remoting proxy objects in this process since it started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
Contexts: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "contexts"),
|
||||
"Displays the current number of remoting contexts in the application.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
TotalRemoteCalls: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "remote_calls_total"),
|
||||
"Displays the total number of remote procedure calls invoked since the application started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRRemotingCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrremoting metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_NETFramework_NETCLRRemoting struct {
|
||||
Name string
|
||||
|
||||
Channels uint32
|
||||
ContextBoundClassesLoaded uint32
|
||||
ContextBoundObjectsAllocPersec uint32
|
||||
ContextProxies uint32
|
||||
Contexts uint32
|
||||
RemoteCallsPersec uint32
|
||||
TotalRemoteCalls uint32
|
||||
}
|
||||
|
||||
func (c *NETFramework_NETCLRRemotingCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_NETFramework_NETCLRRemoting
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, process := range dst {
|
||||
|
||||
if process.Name == "_Global_" {
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Channels,
|
||||
prometheus.CounterValue,
|
||||
float64(process.Channels),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ContextBoundClassesLoaded,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.ContextBoundClassesLoaded),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ContextBoundObjects,
|
||||
prometheus.CounterValue,
|
||||
float64(process.ContextBoundObjectsAllocPersec),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ContextProxies,
|
||||
prometheus.CounterValue,
|
||||
float64(process.ContextProxies),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Contexts,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.Contexts),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TotalRemoteCalls,
|
||||
prometheus.CounterValue,
|
||||
float64(process.TotalRemoteCalls),
|
||||
process.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
118
collector/netframework_clrsecurity.go
Normal file
118
collector/netframework_clrsecurity.go
Normal file
@@ -0,0 +1,118 @@
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["netframework_clrsecurity"] = NewNETFramework_NETCLRSecurityCollector
|
||||
}
|
||||
|
||||
// A NETFramework_NETCLRSecurityCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRSecurity metrics
|
||||
type NETFramework_NETCLRSecurityCollector struct {
|
||||
NumberLinkTimeChecks *prometheus.Desc
|
||||
TimeinRTchecks *prometheus.Desc
|
||||
StackWalkDepth *prometheus.Desc
|
||||
TotalRuntimeChecks *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewNETFramework_NETCLRSecurityCollector ...
|
||||
func NewNETFramework_NETCLRSecurityCollector() (Collector, error) {
|
||||
const subsystem = "netframework_clrsecurity"
|
||||
return &NETFramework_NETCLRSecurityCollector{
|
||||
NumberLinkTimeChecks: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "link_time_checks_total"),
|
||||
"Displays the total number of link-time code access security checks since the application started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
TimeinRTchecks: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "rt_checks_time_percent"),
|
||||
"Displays the percentage of time spent performing runtime code access security checks in the last sample.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
StackWalkDepth: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "stack_walk_depth"),
|
||||
"Displays the depth of the stack during that last runtime code access security check.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
TotalRuntimeChecks: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "runtime_checks_total"),
|
||||
"Displays the total number of runtime code access security checks performed since the application started.",
|
||||
[]string{"process"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRSecurityCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrsecurity metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_NETFramework_NETCLRSecurity struct {
|
||||
Name string
|
||||
|
||||
Frequency_PerfTime uint32
|
||||
NumberLinkTimeChecks uint32
|
||||
PercentTimeinRTchecks uint32
|
||||
PercentTimeSigAuthenticating uint64
|
||||
StackWalkDepth uint32
|
||||
TotalRuntimeChecks uint32
|
||||
}
|
||||
|
||||
func (c *NETFramework_NETCLRSecurityCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_NETFramework_NETCLRSecurity
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, process := range dst {
|
||||
|
||||
if process.Name == "_Global_" {
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.NumberLinkTimeChecks,
|
||||
prometheus.CounterValue,
|
||||
float64(process.NumberLinkTimeChecks),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeinRTchecks,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.PercentTimeinRTchecks)/float64(process.Frequency_PerfTime),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.StackWalkDepth,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.StackWalkDepth),
|
||||
process.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TotalRuntimeChecks,
|
||||
prometheus.CounterValue,
|
||||
float64(process.TotalRuntimeChecks),
|
||||
process.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
108
collector/os.go
108
collector/os.go
@@ -1,106 +1,110 @@
|
||||
// returns data points from Win32_OperatingSystem
|
||||
// https://msdn.microsoft.com/en-us/library/aa394239 - Win32_OperatingSystem class
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"log"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["os"] = NewOSCollector
|
||||
}
|
||||
|
||||
// A OSCollector is a Prometheus collector for WMI Win32_OperatingSystem metrics
|
||||
// A OSCollector is a Prometheus collector for WMI metrics
|
||||
type OSCollector struct {
|
||||
PhysicalMemoryFreeBytes *prometheus.Desc
|
||||
PagingFreeBytes *prometheus.Desc
|
||||
VirtualMemoryFreeBytes *prometheus.Desc
|
||||
ProcessesMax *prometheus.Desc
|
||||
ProcessMemoryMaxBytes *prometheus.Desc
|
||||
ProcessesLimit *prometheus.Desc
|
||||
ProcessMemoryLimitBytes *prometheus.Desc
|
||||
Processes *prometheus.Desc
|
||||
Users *prometheus.Desc
|
||||
PagingMaxBytes *prometheus.Desc
|
||||
PagingLimitBytes *prometheus.Desc
|
||||
VirtualMemoryBytes *prometheus.Desc
|
||||
VisibleMemoryBytes *prometheus.Desc
|
||||
Time *prometheus.Desc
|
||||
Timezone *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewOSCollector ...
|
||||
func NewOSCollector() (Collector, error) {
|
||||
|
||||
const subsystem = "os"
|
||||
|
||||
return &OSCollector{
|
||||
|
||||
PagingMaxBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "paging_max_bytes"),
|
||||
"SizeStoredInPagingFiles",
|
||||
PagingLimitBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "paging_limit_bytes"),
|
||||
"OperatingSystem.SizeStoredInPagingFiles",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
|
||||
PagingFreeBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "paging_free_bytes"),
|
||||
"FreeSpaceInPagingFiles",
|
||||
"OperatingSystem.FreeSpaceInPagingFiles",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
|
||||
PhysicalMemoryFreeBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "physical_memory_free_bytes"),
|
||||
"FreePhysicalMemory",
|
||||
"OperatingSystem.FreePhysicalMemory",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
|
||||
Time: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "time"),
|
||||
"OperatingSystem.LocalDateTime",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
Timezone: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "timezone"),
|
||||
"OperatingSystem.LocalDateTime",
|
||||
[]string{"timezone"},
|
||||
nil,
|
||||
),
|
||||
Processes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "processes"),
|
||||
"NumberOfProcesses",
|
||||
"OperatingSystem.NumberOfProcesses",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
|
||||
ProcessesMax: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "processes_max"),
|
||||
"MaxNumberOfProcesses",
|
||||
ProcessesLimit: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "processes_limit"),
|
||||
"OperatingSystem.MaxNumberOfProcesses",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
|
||||
ProcessMemoryMaxBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "process_memory_max_bytes"),
|
||||
"MaxProcessMemorySize",
|
||||
ProcessMemoryLimitBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "process_memory_limix_bytes"),
|
||||
"OperatingSystem.MaxProcessMemorySize",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
|
||||
Users: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "users"),
|
||||
"NumberOfUsers",
|
||||
"OperatingSystem.NumberOfUsers",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
|
||||
VirtualMemoryBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "virtual_memory_bytes"),
|
||||
"TotalVirtualMemorySize",
|
||||
"OperatingSystem.TotalVirtualMemorySize",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
|
||||
VisibleMemoryBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "visible_memory_bytes"),
|
||||
"TotalVisibleMemorySize",
|
||||
"OperatingSystem.TotalVisibleMemorySize",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
|
||||
VirtualMemoryFreeBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "virtual_memory_free_bytes"),
|
||||
"FreeVirtualMemory",
|
||||
"OperatingSystem.FreeVirtualMemory",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
@@ -109,14 +113,16 @@ func NewOSCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *OSCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *OSCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Println("[ERROR] failed collecting os metrics:", desc, err)
|
||||
log.Error("failed collecting os metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_OperatingSystem docs:
|
||||
// - https://msdn.microsoft.com/en-us/library/aa394239 - Win32_OperatingSystem class
|
||||
type Win32_OperatingSystem struct {
|
||||
FreePhysicalMemory uint64
|
||||
FreeSpaceInPagingFiles uint64
|
||||
@@ -128,21 +134,43 @@ type Win32_OperatingSystem struct {
|
||||
SizeStoredInPagingFiles uint64
|
||||
TotalVirtualMemorySize uint64
|
||||
TotalVisibleMemorySize uint64
|
||||
LocalDateTime time.Time
|
||||
}
|
||||
|
||||
func (c *OSCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_OperatingSystem
|
||||
q := wmi.CreateQuery(&dst, "")
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(dst) == 0 {
|
||||
return nil, errors.New("WMI query returned empty result set")
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PhysicalMemoryFreeBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].FreePhysicalMemory*1024), // KiB -> bytes
|
||||
)
|
||||
|
||||
time := dst[0].LocalDateTime
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Time,
|
||||
prometheus.GaugeValue,
|
||||
float64(time.Unix()),
|
||||
)
|
||||
|
||||
timezoneName, _ := time.Zone()
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Timezone,
|
||||
prometheus.GaugeValue,
|
||||
1.0,
|
||||
timezoneName,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PagingFreeBytes,
|
||||
prometheus.GaugeValue,
|
||||
@@ -156,13 +184,13 @@ func (c *OSCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, er
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ProcessesMax,
|
||||
c.ProcessesLimit,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].MaxNumberOfProcesses),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ProcessMemoryMaxBytes,
|
||||
c.ProcessMemoryLimitBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].MaxProcessMemorySize*1024), // KiB -> bytes
|
||||
)
|
||||
@@ -180,7 +208,7 @@ func (c *OSCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, er
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PagingMaxBytes,
|
||||
c.PagingLimitBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].SizeStoredInPagingFiles*1024), // KiB -> bytes
|
||||
)
|
||||
|
||||
89
collector/perflib.go
Normal file
89
collector/perflib.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
perflibCollector "github.com/leoluk/perflib_exporter/collector"
|
||||
"github.com/leoluk/perflib_exporter/perflib"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func getPerflibSnapshot() (map[string]*perflib.PerfObject, error) {
|
||||
objects, err := perflib.QueryPerformanceData("Global")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
indexed := make(map[string]*perflib.PerfObject)
|
||||
for _, obj := range objects {
|
||||
indexed[obj.Name] = obj
|
||||
}
|
||||
return indexed, nil
|
||||
}
|
||||
|
||||
func unmarshalObject(obj *perflib.PerfObject, vs interface{}) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("counter not found")
|
||||
}
|
||||
rv := reflect.ValueOf(vs)
|
||||
if rv.Kind() != reflect.Ptr || rv.IsNil() {
|
||||
return fmt.Errorf("%v is nil or not a pointer to slice", reflect.TypeOf(vs))
|
||||
}
|
||||
ev := rv.Elem()
|
||||
if ev.Kind() != reflect.Slice {
|
||||
return fmt.Errorf("%v is not slice", reflect.TypeOf(vs))
|
||||
}
|
||||
|
||||
// Ensure sufficient length
|
||||
if ev.Cap() < len(obj.Instances) {
|
||||
nvs := reflect.MakeSlice(ev.Type(), len(obj.Instances), len(obj.Instances))
|
||||
ev.Set(nvs)
|
||||
}
|
||||
|
||||
for idx, instance := range obj.Instances {
|
||||
target := ev.Index(idx)
|
||||
rt := target.Type()
|
||||
|
||||
counters := make(map[string]*perflib.PerfCounter, len(instance.Counters))
|
||||
for _, ctr := range instance.Counters {
|
||||
if ctr.Def.IsBaseValue && !ctr.Def.IsNanosecondCounter {
|
||||
counters[ctr.Def.Name+"_Base"] = ctr
|
||||
} else {
|
||||
counters[ctr.Def.Name] = ctr
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < target.NumField(); i++ {
|
||||
f := rt.Field(i)
|
||||
tag := f.Tag.Get("perflib")
|
||||
if tag == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
ctr, found := counters[tag]
|
||||
if !found {
|
||||
log.Debugf("missing counter %q, has %v", tag, counters)
|
||||
return fmt.Errorf("could not find counter %q on instance", tag)
|
||||
}
|
||||
if !target.Field(i).CanSet() {
|
||||
return fmt.Errorf("tagged field %v cannot be written to", f)
|
||||
}
|
||||
|
||||
switch ctr.Def.CounterType {
|
||||
case perflibCollector.PERF_ELAPSED_TIME:
|
||||
target.Field(i).SetFloat(float64(ctr.Value-windowsEpoch) / float64(obj.Frequency))
|
||||
case perflibCollector.PERF_100NSEC_TIMER, perflibCollector.PERF_PRECISION_100NS_TIMER:
|
||||
target.Field(i).SetFloat(float64(ctr.Value) * ticksToSecondsScaleFactor)
|
||||
default:
|
||||
target.Field(i).SetFloat(float64(ctr.Value))
|
||||
}
|
||||
}
|
||||
|
||||
if instance.Name != "" && target.FieldByName("Name").CanSet() {
|
||||
target.FieldByName("Name").SetString(instance.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
401
collector/process.go
Normal file
401
collector/process.go
Normal file
@@ -0,0 +1,401 @@
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
"gopkg.in/alecthomas/kingpin.v2"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["process"] = NewProcessCollector
|
||||
}
|
||||
|
||||
var (
|
||||
processWhereClause = kingpin.Flag(
|
||||
"collector.process.processes-where",
|
||||
"WQL 'where' clause to use in WMI metrics query. Limits the response to the processes you specify and reduces the size of the response.",
|
||||
).Default("").String()
|
||||
)
|
||||
|
||||
// A ProcessCollector is a Prometheus collector for WMI Win32_PerfRawData_PerfProc_Process metrics
|
||||
type ProcessCollector struct {
|
||||
StartTime *prometheus.Desc
|
||||
CPUTimeTotal *prometheus.Desc
|
||||
HandleCount *prometheus.Desc
|
||||
IOBytesTotal *prometheus.Desc
|
||||
IOOperationsTotal *prometheus.Desc
|
||||
PageFaultsTotal *prometheus.Desc
|
||||
PageFileBytes *prometheus.Desc
|
||||
PoolBytes *prometheus.Desc
|
||||
PriorityBase *prometheus.Desc
|
||||
PrivateBytes *prometheus.Desc
|
||||
ThreadCount *prometheus.Desc
|
||||
VirtualBytes *prometheus.Desc
|
||||
WorkingSet *prometheus.Desc
|
||||
|
||||
queryWhereClause string
|
||||
}
|
||||
|
||||
// NewProcessCollector ...
|
||||
func NewProcessCollector() (Collector, error) {
|
||||
const subsystem = "process"
|
||||
|
||||
if *processWhereClause == "" {
|
||||
log.Warn("No where-clause specified for process collector. This will generate a very large number of metrics!")
|
||||
}
|
||||
|
||||
return &ProcessCollector{
|
||||
StartTime: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "start_time"),
|
||||
"Time of process start.",
|
||||
[]string{"process", "process_id", "creating_process_id"},
|
||||
nil,
|
||||
),
|
||||
CPUTimeTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cpu_time_total"),
|
||||
"Returns elapsed time that all of the threads of this process used the processor to execute instructions by mode (privileged, user). An instruction is the basic unit of execution in a computer, a thread is the object that executes instructions, and a process is the object created when a program is run. Code executed to handle some hardware interrupts and trap conditions is included in this count.",
|
||||
[]string{"process", "process_id", "creating_process_id", "mode"},
|
||||
nil,
|
||||
),
|
||||
HandleCount: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "handle_count"),
|
||||
"Total number of handles the process has open. This number is the sum of the handles currently open by each thread in the process.",
|
||||
[]string{"process", "process_id", "creating_process_id"},
|
||||
nil,
|
||||
),
|
||||
IOBytesTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "io_bytes_total"),
|
||||
"Bytes issued to I/O operations in different modes (read, write, other). This property counts all I/O activity generated by the process to include file, network, and device I/Os. Read and write mode includes data operations; other mode includes those that do not involve data, such as control operations. ",
|
||||
[]string{"process", "process_id", "creating_process_id", "mode"},
|
||||
nil,
|
||||
),
|
||||
IOOperationsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "io_operations_total"),
|
||||
"I/O operations issued in different modes (read, write, other). This property counts all I/O activity generated by the process to include file, network, and device I/Os. Read and write mode includes data operations; other mode includes those that do not involve data, such as control operations. ",
|
||||
[]string{"process", "process_id", "creating_process_id", "mode"},
|
||||
nil,
|
||||
),
|
||||
PageFaultsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "page_faults_total"),
|
||||
"Page faults by the threads executing in this process. A page fault occurs when a thread refers to a virtual memory page that is not in its working set in main memory. This can cause the page not to be fetched from disk if it is on the standby list and hence already in main memory, or if it is in use by another process with which the page is shared.",
|
||||
[]string{"process", "process_id", "creating_process_id"},
|
||||
nil,
|
||||
),
|
||||
PageFileBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "page_file_bytes"),
|
||||
"Current number of bytes this process has used in the paging file(s). Paging files are used to store pages of memory used by the process that are not contained in other files. Paging files are shared by all processes, and lack of space in paging files can prevent other processes from allocating memory.",
|
||||
[]string{"process", "process_id", "creating_process_id"},
|
||||
nil,
|
||||
),
|
||||
PoolBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "pool_bytes"),
|
||||
"Pool Bytes is the last observed number of bytes in the paged or nonpaged pool. The nonpaged pool is an area of system memory (physical memory used by the operating system) for objects that cannot be written to disk, but must remain in physical memory as long as they are allocated. The paged pool is an area of system memory (physical memory used by the operating system) for objects that can be written to disk when they are not being used. Nonpaged pool bytes is calculated differently than paged pool bytes, so it might not equal the total of paged pool bytes.",
|
||||
[]string{"process", "process_id", "creating_process_id", "pool"},
|
||||
nil,
|
||||
),
|
||||
PriorityBase: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "priority_base"),
|
||||
"Current base priority of this process. Threads within a process can raise and lower their own base priority relative to the process base priority of the process.",
|
||||
[]string{"process", "process_id", "creating_process_id"},
|
||||
nil,
|
||||
),
|
||||
PrivateBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "private_bytes"),
|
||||
"Current number of bytes this process has allocated that cannot be shared with other processes.",
|
||||
[]string{"process", "process_id", "creating_process_id"},
|
||||
nil,
|
||||
),
|
||||
ThreadCount: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "thread_count"),
|
||||
"Number of threads currently active in this process. An instruction is the basic unit of execution in a processor, and a thread is the object that executes instructions. Every running process has at least one thread.",
|
||||
[]string{"process", "process_id", "creating_process_id"},
|
||||
nil,
|
||||
),
|
||||
VirtualBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "virtual_bytes"),
|
||||
"Current size, in bytes, of the virtual address space that the process is using. Use of virtual address space does not necessarily imply corresponding use of either disk or main memory pages. Virtual space is finite and, by using too much, the process can limit its ability to load libraries.",
|
||||
[]string{"process", "process_id", "creating_process_id"},
|
||||
nil,
|
||||
),
|
||||
WorkingSet: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "working_set"),
|
||||
"Maximum number of bytes in the working set of this process at any point in time. The working set is the set of memory pages touched recently by the threads in the process. If free memory in the computer is above a threshold, pages are left in the working set of a process even if they are not in use. When free memory falls below a threshold, pages are trimmed from working sets. If they are needed, they are then soft-faulted back into the working set before they leave main memory.",
|
||||
[]string{"process", "process_id", "creating_process_id"},
|
||||
nil,
|
||||
),
|
||||
queryWhereClause: *processWhereClause,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *ProcessCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting process metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_PerfProc_Process docs:
|
||||
// - https://msdn.microsoft.com/en-us/library/aa394323(v=vs.85).aspx
|
||||
type Win32_PerfRawData_PerfProc_Process struct {
|
||||
Name string
|
||||
CreatingProcessID uint32
|
||||
ElapsedTime uint64
|
||||
Frequency_Object uint64
|
||||
HandleCount uint32
|
||||
IDProcess uint32
|
||||
IODataBytesPersec uint64
|
||||
IODataOperationsPersec uint64
|
||||
IOOtherBytesPersec uint64
|
||||
IOOtherOperationsPersec uint64
|
||||
IOReadBytesPersec uint64
|
||||
IOReadOperationsPersec uint64
|
||||
IOWriteBytesPersec uint64
|
||||
IOWriteOperationsPersec uint64
|
||||
PageFaultsPersec uint32
|
||||
PageFileBytes uint64
|
||||
PageFileBytesPeak uint64
|
||||
PercentPrivilegedTime uint64
|
||||
PercentProcessorTime uint64
|
||||
PercentUserTime uint64
|
||||
PoolNonpagedBytes uint32
|
||||
PoolPagedBytes uint32
|
||||
PriorityBase uint32
|
||||
PrivateBytes uint64
|
||||
ThreadCount uint32
|
||||
Timestamp_Object uint64
|
||||
VirtualBytes uint64
|
||||
VirtualBytesPeak uint64
|
||||
WorkingSet uint64
|
||||
WorkingSetPeak uint64
|
||||
WorkingSetPrivate uint64
|
||||
}
|
||||
|
||||
type WorkerProcess struct {
|
||||
AppPoolName string
|
||||
ProcessId uint32
|
||||
}
|
||||
|
||||
func (c *ProcessCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_PerfProc_Process
|
||||
q := queryAllWhere(&dst, c.queryWhereClause)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var dst_wp []WorkerProcess
|
||||
q_wp := queryAll(&dst_wp)
|
||||
if err := wmi.QueryNamespace(q_wp, &dst_wp, "root\\WebAdministration"); err != nil {
|
||||
log.Debugf("Could not query WebAdministration namespace for IIS worker processes: %v. Skipping", err)
|
||||
}
|
||||
|
||||
for _, process := range dst {
|
||||
|
||||
if process.Name == "_Total" {
|
||||
continue
|
||||
}
|
||||
// Duplicate processes are suffixed # and an index number. Remove those.
|
||||
processName := strings.Split(process.Name, "#")[0]
|
||||
pid := strconv.FormatUint(uint64(process.IDProcess), 10)
|
||||
cpid := strconv.FormatUint(uint64(process.CreatingProcessID), 10)
|
||||
|
||||
for _, wp := range dst_wp {
|
||||
if wp.ProcessId == process.IDProcess {
|
||||
processName = strings.Join([]string{processName, wp.AppPoolName}, "_")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.StartTime,
|
||||
prometheus.GaugeValue,
|
||||
// convert from Windows timestamp (1 jan 1601) to unix timestamp (1 jan 1970)
|
||||
float64(process.ElapsedTime-116444736000000000)/float64(process.Frequency_Object),
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.HandleCount,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.HandleCount),
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CPUTimeTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(process.PercentPrivilegedTime)*ticksToSecondsScaleFactor,
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
"privileged",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CPUTimeTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(process.PercentUserTime)*ticksToSecondsScaleFactor,
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
"user",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.IOBytesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(process.IOOtherBytesPersec),
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
"other",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.IOOperationsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(process.IOOtherOperationsPersec),
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
"other",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.IOBytesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(process.IOReadBytesPersec),
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
"read",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.IOOperationsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(process.IOReadOperationsPersec),
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
"read",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.IOBytesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(process.IOWriteBytesPersec),
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
"write",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.IOOperationsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(process.IOWriteOperationsPersec),
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
"write",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PageFaultsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(process.PageFaultsPersec),
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PageFileBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.PageFileBytes),
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PoolBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.PoolNonpagedBytes),
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
"nonpaged",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PoolBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.PoolPagedBytes),
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
"paged",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PriorityBase,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.PriorityBase),
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PrivateBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.PrivateBytes),
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ThreadCount,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.ThreadCount),
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.VirtualBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.VirtualBytes),
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.WorkingSet,
|
||||
prometheus.GaugeValue,
|
||||
float64(process.WorkingSet),
|
||||
processName,
|
||||
pid,
|
||||
cpid,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
169
collector/service.go
Normal file
169
collector/service.go
Normal file
@@ -0,0 +1,169 @@
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
"gopkg.in/alecthomas/kingpin.v2"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["service"] = NewserviceCollector
|
||||
}
|
||||
|
||||
var (
|
||||
serviceWhereClause = kingpin.Flag(
|
||||
"collector.service.services-where",
|
||||
"WQL 'where' clause to use in WMI metrics query. Limits the response to the services you specify and reduces the size of the response.",
|
||||
).Default("").String()
|
||||
)
|
||||
|
||||
// A serviceCollector is a Prometheus collector for WMI Win32_Service metrics
|
||||
type serviceCollector struct {
|
||||
State *prometheus.Desc
|
||||
StartMode *prometheus.Desc
|
||||
Status *prometheus.Desc
|
||||
|
||||
queryWhereClause string
|
||||
}
|
||||
|
||||
// NewserviceCollector ...
|
||||
func NewserviceCollector() (Collector, error) {
|
||||
const subsystem = "service"
|
||||
|
||||
if *serviceWhereClause == "" {
|
||||
log.Warn("No where-clause specified for service collector. This will generate a very large number of metrics!")
|
||||
}
|
||||
|
||||
return &serviceCollector{
|
||||
State: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "state"),
|
||||
"The state of the service (State)",
|
||||
[]string{"name", "state"},
|
||||
nil,
|
||||
),
|
||||
StartMode: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "start_mode"),
|
||||
"The start mode of the service (StartMode)",
|
||||
[]string{"name", "start_mode"},
|
||||
nil,
|
||||
),
|
||||
Status: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "status"),
|
||||
"The status of the service (Status)",
|
||||
[]string{"name", "status"},
|
||||
nil,
|
||||
),
|
||||
queryWhereClause: *serviceWhereClause,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *serviceCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting service metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_Service docs:
|
||||
// - https://msdn.microsoft.com/en-us/library/aa394418(v=vs.85).aspx
|
||||
type Win32_Service struct {
|
||||
Name string
|
||||
State string
|
||||
Status string
|
||||
StartMode string
|
||||
}
|
||||
|
||||
var (
|
||||
allStates = []string{
|
||||
"stopped",
|
||||
"start pending",
|
||||
"stop pending",
|
||||
"running",
|
||||
"continue pending",
|
||||
"pause pending",
|
||||
"paused",
|
||||
"unknown",
|
||||
}
|
||||
allStartModes = []string{
|
||||
"boot",
|
||||
"system",
|
||||
"auto",
|
||||
"manual",
|
||||
"disabled",
|
||||
}
|
||||
allStatuses = []string{
|
||||
"ok",
|
||||
"error",
|
||||
"degraded",
|
||||
"unknown",
|
||||
"pred fail",
|
||||
"starting",
|
||||
"stopping",
|
||||
"service",
|
||||
"stressed",
|
||||
"nonrecover",
|
||||
"no contact",
|
||||
"lost comm",
|
||||
}
|
||||
)
|
||||
|
||||
func (c *serviceCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_Service
|
||||
q := queryAllWhere(&dst, c.queryWhereClause)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, service := range dst {
|
||||
for _, state := range allStates {
|
||||
isCurrentState := 0.0
|
||||
if state == strings.ToLower(service.State) {
|
||||
isCurrentState = 1.0
|
||||
}
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.State,
|
||||
prometheus.GaugeValue,
|
||||
isCurrentState,
|
||||
strings.ToLower(service.Name),
|
||||
state,
|
||||
)
|
||||
}
|
||||
|
||||
for _, startMode := range allStartModes {
|
||||
isCurrentStartMode := 0.0
|
||||
if startMode == strings.ToLower(service.StartMode) {
|
||||
isCurrentStartMode = 1.0
|
||||
}
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.StartMode,
|
||||
prometheus.GaugeValue,
|
||||
isCurrentStartMode,
|
||||
strings.ToLower(service.Name),
|
||||
startMode,
|
||||
)
|
||||
}
|
||||
|
||||
for _, status := range allStatuses {
|
||||
isCurrentStatus := 0.0
|
||||
if status == strings.ToLower(service.Status) {
|
||||
isCurrentStatus = 1.0
|
||||
}
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Status,
|
||||
prometheus.GaugeValue,
|
||||
isCurrentStatus,
|
||||
strings.ToLower(service.Name),
|
||||
status,
|
||||
)
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
135
collector/system.go
Normal file
135
collector/system.go
Normal file
@@ -0,0 +1,135 @@
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["system"] = NewSystemCollector
|
||||
}
|
||||
|
||||
// A SystemCollector is a Prometheus collector for WMI metrics
|
||||
type SystemCollector struct {
|
||||
ContextSwitchesTotal *prometheus.Desc
|
||||
ExceptionDispatchesTotal *prometheus.Desc
|
||||
ProcessorQueueLength *prometheus.Desc
|
||||
SystemCallsTotal *prometheus.Desc
|
||||
SystemUpTime *prometheus.Desc
|
||||
Threads *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewSystemCollector ...
|
||||
func NewSystemCollector() (Collector, error) {
|
||||
const subsystem = "system"
|
||||
|
||||
return &SystemCollector{
|
||||
ContextSwitchesTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "context_switches_total"),
|
||||
"Total number of context switches (WMI source: PerfOS_System.ContextSwitchesPersec)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
ExceptionDispatchesTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "exception_dispatches_total"),
|
||||
"Total number of exceptions dispatched (WMI source: PerfOS_System.ExceptionDispatchesPersec)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
ProcessorQueueLength: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "processor_queue_length"),
|
||||
"Length of processor queue (WMI source: PerfOS_System.ProcessorQueueLength)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SystemCallsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "system_calls_total"),
|
||||
"Total number of system calls (WMI source: PerfOS_System.SystemCallsPersec)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SystemUpTime: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "system_up_time"),
|
||||
"System boot time (WMI source: PerfOS_System.SystemUpTime)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
Threads: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "threads"),
|
||||
"Current number of threads (WMI source: PerfOS_System.Threads)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *SystemCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting system metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_PerfOS_System docs:
|
||||
// - https://web.archive.org/web/20050830140516/http://msdn.microsoft.com/library/en-us/wmisdk/wmi/win32_perfrawdata_perfos_system.asp
|
||||
type Win32_PerfRawData_PerfOS_System struct {
|
||||
ContextSwitchesPersec uint32
|
||||
ExceptionDispatchesPersec uint32
|
||||
Frequency_Object uint64
|
||||
ProcessorQueueLength uint32
|
||||
SystemCallsPersec uint32
|
||||
SystemUpTime uint64
|
||||
Threads uint32
|
||||
Timestamp_Object uint64
|
||||
}
|
||||
|
||||
func (c *SystemCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_PerfOS_System
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(dst) == 0 {
|
||||
return nil, errors.New("WMI query returned empty result set")
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ContextSwitchesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].ContextSwitchesPersec),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ExceptionDispatchesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].ExceptionDispatchesPersec),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ProcessorQueueLength,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].ProcessorQueueLength),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SystemCallsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].SystemCallsPersec),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SystemUpTime,
|
||||
prometheus.GaugeValue,
|
||||
// convert from Windows timestamp (1 jan 1601) to unix timestamp (1 jan 1970)
|
||||
float64(dst[0].SystemUpTime-116444736000000000)/float64(dst[0].Frequency_Object),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Threads,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].Threads),
|
||||
)
|
||||
return nil, nil
|
||||
}
|
||||
174
collector/tcp.go
Normal file
174
collector/tcp.go
Normal file
@@ -0,0 +1,174 @@
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["tcp"] = NewTCPCollector
|
||||
}
|
||||
|
||||
// A TCPCollector is a Prometheus collector for WMI Win32_PerfRawData_Tcpip_TCPv4 metrics
|
||||
type TCPCollector struct {
|
||||
ConnectionFailures *prometheus.Desc
|
||||
ConnectionsActive *prometheus.Desc
|
||||
ConnectionsEstablished *prometheus.Desc
|
||||
ConnectionsPassive *prometheus.Desc
|
||||
ConnectionsReset *prometheus.Desc
|
||||
SegmentsTotal *prometheus.Desc
|
||||
SegmentsReceivedTotal *prometheus.Desc
|
||||
SegmentsRetransmittedTotal *prometheus.Desc
|
||||
SegmentsSentTotal *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewTCPCollector ...
|
||||
func NewTCPCollector() (Collector, error) {
|
||||
const subsystem = "tcp"
|
||||
|
||||
return &TCPCollector{
|
||||
ConnectionFailures: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "connection_failures"),
|
||||
"(TCP.ConnectionFailures)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
ConnectionsActive: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "connections_active"),
|
||||
"(TCP.ConnectionsActive)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
ConnectionsEstablished: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "connections_established"),
|
||||
"(TCP.ConnectionsEstablished)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
ConnectionsPassive: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "connections_passive"),
|
||||
"(TCP.ConnectionsPassive)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
ConnectionsReset: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "connections_reset"),
|
||||
"(TCP.ConnectionsReset)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SegmentsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "segments_total"),
|
||||
"(TCP.SegmentsTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SegmentsReceivedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "segments_received_total"),
|
||||
"(TCP.SegmentsReceivedTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SegmentsRetransmittedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "segments_retransmitted_total"),
|
||||
"(TCP.SegmentsRetransmittedTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SegmentsSentTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "segments_sent_total"),
|
||||
"(TCP.SegmentsSentTotal)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *TCPCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting tcp metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_Tcpip_TCPv4 docs
|
||||
// - https://msdn.microsoft.com/en-us/library/aa394341(v=vs.85).aspx
|
||||
type Win32_PerfRawData_Tcpip_TCPv4 struct {
|
||||
ConnectionFailures uint64
|
||||
ConnectionsActive uint64
|
||||
ConnectionsEstablished uint64
|
||||
ConnectionsPassive uint64
|
||||
ConnectionsReset uint64
|
||||
SegmentsPersec uint64
|
||||
SegmentsReceivedPersec uint64
|
||||
SegmentsRetransmittedPersec uint64
|
||||
SegmentsSentPersec uint64
|
||||
}
|
||||
|
||||
func (c *TCPCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_Tcpip_TCPv4
|
||||
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(dst) == 0 {
|
||||
return nil, errors.New("WMI query returned empty result set")
|
||||
}
|
||||
|
||||
// Counters
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ConnectionFailures,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].ConnectionFailures),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ConnectionsActive,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].ConnectionsActive),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ConnectionsEstablished,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].ConnectionsEstablished),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ConnectionsPassive,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].ConnectionsPassive),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ConnectionsReset,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].ConnectionsReset),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SegmentsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].SegmentsPersec),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SegmentsReceivedTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].SegmentsReceivedPersec),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SegmentsRetransmittedTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].SegmentsRetransmittedPersec),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SegmentsSentTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].SegmentsSentPersec),
|
||||
)
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
299
collector/textfile.go
Normal file
299
collector/textfile.go
Normal file
@@ -0,0 +1,299 @@
|
||||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !notextfile
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/dimchansky/utfbom"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
"github.com/prometheus/common/expfmt"
|
||||
"github.com/prometheus/common/log"
|
||||
kingpin "gopkg.in/alecthomas/kingpin.v2"
|
||||
)
|
||||
|
||||
var (
|
||||
textFileDirectory = kingpin.Flag(
|
||||
"collector.textfile.directory",
|
||||
"Directory to read text files with metrics from.",
|
||||
).Default("C:\\Program Files\\wmi_exporter\\textfile_inputs").String()
|
||||
|
||||
mtimeDesc = prometheus.NewDesc(
|
||||
"wmi_textfile_mtime_seconds",
|
||||
"Unixtime mtime of textfiles successfully read.",
|
||||
[]string{"file"},
|
||||
nil,
|
||||
)
|
||||
)
|
||||
|
||||
type textFileCollector struct {
|
||||
path string
|
||||
// Only set for testing to get predictable output.
|
||||
mtime *float64
|
||||
}
|
||||
|
||||
func init() {
|
||||
Factories["textfile"] = NewTextFileCollector
|
||||
}
|
||||
|
||||
// NewTextFileCollector returns a new Collector exposing metrics read from files
|
||||
// in the given textfile directory.
|
||||
func NewTextFileCollector() (Collector, error) {
|
||||
return &textFileCollector{
|
||||
path: *textFileDirectory,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func convertMetricFamily(metricFamily *dto.MetricFamily, ch chan<- prometheus.Metric) {
|
||||
var valType prometheus.ValueType
|
||||
var val float64
|
||||
|
||||
allLabelNames := map[string]struct{}{}
|
||||
for _, metric := range metricFamily.Metric {
|
||||
labels := metric.GetLabel()
|
||||
for _, label := range labels {
|
||||
if _, ok := allLabelNames[label.GetName()]; !ok {
|
||||
allLabelNames[label.GetName()] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, metric := range metricFamily.Metric {
|
||||
if metric.TimestampMs != nil {
|
||||
log.Warnf("Ignoring unsupported custom timestamp on textfile collector metric %v", metric)
|
||||
}
|
||||
|
||||
labels := metric.GetLabel()
|
||||
var names []string
|
||||
var values []string
|
||||
for _, label := range labels {
|
||||
names = append(names, label.GetName())
|
||||
values = append(values, label.GetValue())
|
||||
}
|
||||
|
||||
for k := range allLabelNames {
|
||||
present := false
|
||||
for _, name := range names {
|
||||
if k == name {
|
||||
present = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if present == false {
|
||||
names = append(names, k)
|
||||
values = append(values, "")
|
||||
}
|
||||
}
|
||||
|
||||
metricType := metricFamily.GetType()
|
||||
switch metricType {
|
||||
case dto.MetricType_COUNTER:
|
||||
valType = prometheus.CounterValue
|
||||
val = metric.Counter.GetValue()
|
||||
|
||||
case dto.MetricType_GAUGE:
|
||||
valType = prometheus.GaugeValue
|
||||
val = metric.Gauge.GetValue()
|
||||
|
||||
case dto.MetricType_UNTYPED:
|
||||
valType = prometheus.UntypedValue
|
||||
val = metric.Untyped.GetValue()
|
||||
|
||||
case dto.MetricType_SUMMARY:
|
||||
quantiles := map[float64]float64{}
|
||||
for _, q := range metric.Summary.Quantile {
|
||||
quantiles[q.GetQuantile()] = q.GetValue()
|
||||
}
|
||||
ch <- prometheus.MustNewConstSummary(
|
||||
prometheus.NewDesc(
|
||||
*metricFamily.Name,
|
||||
metricFamily.GetHelp(),
|
||||
names, nil,
|
||||
),
|
||||
metric.Summary.GetSampleCount(),
|
||||
metric.Summary.GetSampleSum(),
|
||||
quantiles, values...,
|
||||
)
|
||||
case dto.MetricType_HISTOGRAM:
|
||||
buckets := map[float64]uint64{}
|
||||
for _, b := range metric.Histogram.Bucket {
|
||||
buckets[b.GetUpperBound()] = b.GetCumulativeCount()
|
||||
}
|
||||
ch <- prometheus.MustNewConstHistogram(
|
||||
prometheus.NewDesc(
|
||||
*metricFamily.Name,
|
||||
metricFamily.GetHelp(),
|
||||
names, nil,
|
||||
),
|
||||
metric.Histogram.GetSampleCount(),
|
||||
metric.Histogram.GetSampleSum(),
|
||||
buckets, values...,
|
||||
)
|
||||
default:
|
||||
log.Errorf("unknown metric type for file")
|
||||
continue
|
||||
}
|
||||
if metricType == dto.MetricType_GAUGE || metricType == dto.MetricType_COUNTER || metricType == dto.MetricType_UNTYPED {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
prometheus.NewDesc(
|
||||
*metricFamily.Name,
|
||||
metricFamily.GetHelp(),
|
||||
names, nil,
|
||||
),
|
||||
valType, val, values...,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *textFileCollector) exportMTimes(mtimes map[string]time.Time, ch chan<- prometheus.Metric) {
|
||||
// Export the mtimes of the successful files.
|
||||
if len(mtimes) > 0 {
|
||||
// Sorting is needed for predictable output comparison in tests.
|
||||
filenames := make([]string, 0, len(mtimes))
|
||||
for filename := range mtimes {
|
||||
filenames = append(filenames, filename)
|
||||
}
|
||||
sort.Strings(filenames)
|
||||
|
||||
for _, filename := range filenames {
|
||||
mtime := float64(mtimes[filename].UnixNano() / 1e9)
|
||||
if c.mtime != nil {
|
||||
mtime = *c.mtime
|
||||
}
|
||||
ch <- prometheus.MustNewConstMetric(mtimeDesc, prometheus.GaugeValue, mtime, filename)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type carriageReturnFilteringReader struct {
|
||||
r io.Reader
|
||||
}
|
||||
|
||||
// Read returns data from the underlying io.Reader, but with \r filtered out
|
||||
func (cr carriageReturnFilteringReader) Read(p []byte) (int, error) {
|
||||
buf := make([]byte, len(p))
|
||||
n, err := cr.r.Read(buf)
|
||||
|
||||
if err != nil && err != io.EOF {
|
||||
return n, err
|
||||
}
|
||||
|
||||
pi := 0
|
||||
for i := 0; i < n; i++ {
|
||||
if buf[i] != '\r' {
|
||||
p[pi] = buf[i]
|
||||
pi++
|
||||
}
|
||||
}
|
||||
|
||||
return pi, err
|
||||
}
|
||||
|
||||
// Update implements the Collector interface.
|
||||
func (c *textFileCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
error := 0.0
|
||||
mtimes := map[string]time.Time{}
|
||||
|
||||
// Iterate over files and accumulate their metrics.
|
||||
files, err := ioutil.ReadDir(c.path)
|
||||
if err != nil && c.path != "" {
|
||||
log.Errorf("Error reading textfile collector directory %q: %s", c.path, err)
|
||||
error = 1.0
|
||||
}
|
||||
|
||||
fileLoop:
|
||||
for _, f := range files {
|
||||
if !strings.HasSuffix(f.Name(), ".prom") {
|
||||
continue
|
||||
}
|
||||
path := filepath.Join(c.path, f.Name())
|
||||
log.Debugf("Processing file %q", path)
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
log.Errorf("Error opening %q: %v", path, err)
|
||||
error = 1.0
|
||||
continue
|
||||
}
|
||||
var parser expfmt.TextParser
|
||||
r, encoding := utfbom.Skip(carriageReturnFilteringReader{r: file})
|
||||
if err = checkBOM(encoding); err != nil {
|
||||
log.Errorf("Invalid file encoding detected in %s: %s - file must be UTF8", path, err.Error())
|
||||
error = 1.0
|
||||
continue
|
||||
}
|
||||
parsedFamilies, err := parser.TextToMetricFamilies(r)
|
||||
closeErr := file.Close()
|
||||
if closeErr != nil {
|
||||
log.Warnf("Error closing file: %v", err)
|
||||
}
|
||||
if err != nil {
|
||||
log.Errorf("Error parsing %q: %v", path, err)
|
||||
error = 1.0
|
||||
continue
|
||||
}
|
||||
for _, mf := range parsedFamilies {
|
||||
for _, m := range mf.Metric {
|
||||
if m.TimestampMs != nil {
|
||||
log.Errorf("Textfile %q contains unsupported client-side timestamps, skipping entire file", path)
|
||||
error = 1.0
|
||||
continue fileLoop
|
||||
}
|
||||
}
|
||||
if mf.Help == nil {
|
||||
help := fmt.Sprintf("Metric read from %s", path)
|
||||
mf.Help = &help
|
||||
}
|
||||
}
|
||||
|
||||
// Only set this once it has been parsed and validated, so that
|
||||
// a failure does not appear fresh.
|
||||
mtimes[f.Name()] = f.ModTime()
|
||||
|
||||
for _, mf := range parsedFamilies {
|
||||
convertMetricFamily(mf, ch)
|
||||
}
|
||||
}
|
||||
|
||||
c.exportMTimes(mtimes, ch)
|
||||
|
||||
// Export if there were errors.
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
prometheus.NewDesc(
|
||||
"wmi_textfile_scrape_error",
|
||||
"1 if there was an error opening or reading a file, 0 otherwise",
|
||||
nil, nil,
|
||||
),
|
||||
prometheus.GaugeValue, error,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkBOM(encoding utfbom.Encoding) error {
|
||||
if encoding == utfbom.Unknown || encoding == utfbom.UTF8 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf(encoding.String())
|
||||
}
|
||||
47
collector/textfile_test.go
Normal file
47
collector/textfile_test.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/dimchansky/utfbom"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCRFilter(t *testing.T) {
|
||||
sr := strings.NewReader("line 1\r\nline 2")
|
||||
cr := carriageReturnFilteringReader{r: sr}
|
||||
b, err := ioutil.ReadAll(cr)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if string(b) != "line 1\nline 2" {
|
||||
t.Errorf("Unexpected output %q", b)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckBOM(t *testing.T) {
|
||||
testdata := []struct {
|
||||
encoding utfbom.Encoding
|
||||
err string
|
||||
}{
|
||||
{utfbom.Unknown, ""},
|
||||
{utfbom.UTF8, ""},
|
||||
{utfbom.UTF16BigEndian, "UTF16BigEndian"},
|
||||
{utfbom.UTF16LittleEndian, "UTF16LittleEndian"},
|
||||
{utfbom.UTF32BigEndian, "UTF32BigEndian"},
|
||||
{utfbom.UTF32LittleEndian, "UTF32LittleEndian"},
|
||||
}
|
||||
for _, d := range testdata {
|
||||
err := checkBOM(d.encoding)
|
||||
if d.err == "" && err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if d.err != "" && err == nil {
|
||||
t.Errorf("Missing expected error %s", d.err)
|
||||
}
|
||||
if err != nil && !strings.Contains(err.Error(), d.err) {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
344
collector/vmware.go
Normal file
344
collector/vmware.go
Normal file
@@ -0,0 +1,344 @@
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["vmware"] = NewVmwareCollector
|
||||
}
|
||||
|
||||
// A VmwareCollector is a Prometheus collector for WMI Win32_PerfRawData_vmGuestLib_VMem/Win32_PerfRawData_vmGuestLib_VCPU metrics
|
||||
type VmwareCollector struct {
|
||||
MemActive *prometheus.Desc
|
||||
MemBallooned *prometheus.Desc
|
||||
MemLimit *prometheus.Desc
|
||||
MemMapped *prometheus.Desc
|
||||
MemOverhead *prometheus.Desc
|
||||
MemReservation *prometheus.Desc
|
||||
MemShared *prometheus.Desc
|
||||
MemSharedSaved *prometheus.Desc
|
||||
MemShares *prometheus.Desc
|
||||
MemSwapped *prometheus.Desc
|
||||
MemTargetSize *prometheus.Desc
|
||||
MemUsed *prometheus.Desc
|
||||
|
||||
CpuLimitMHz *prometheus.Desc
|
||||
CpuReservationMHz *prometheus.Desc
|
||||
CpuShares *prometheus.Desc
|
||||
CpuStolenTotal *prometheus.Desc
|
||||
CpuTimeTotal *prometheus.Desc
|
||||
EffectiveVMSpeedMHz *prometheus.Desc
|
||||
HostProcessorSpeedMHz *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewVmwareCollector constructs a new VmwareCollector
|
||||
func NewVmwareCollector() (Collector, error) {
|
||||
const subsystem = "vmware"
|
||||
return &VmwareCollector{
|
||||
MemActive: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "mem_active_bytes"),
|
||||
"(MemActiveMB)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
MemBallooned: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "mem_ballooned_bytes"),
|
||||
"(MemBalloonedMB)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
MemLimit: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "mem_limit_bytes"),
|
||||
"(MemLimitMB)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
MemMapped: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "mem_mapped_bytes"),
|
||||
"(MemMappedMB)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
MemOverhead: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "mem_overhead_bytes"),
|
||||
"(MemOverheadMB)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
MemReservation: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "mem_reservation_bytes"),
|
||||
"(MemReservationMB)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
MemShared: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "mem_shared_bytes"),
|
||||
"(MemSharedMB)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
MemSharedSaved: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "mem_shared_saved_bytes"),
|
||||
"(MemSharedSavedMB)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
MemShares: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "mem_shares"),
|
||||
"(MemShares)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
MemSwapped: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "mem_swapped_bytes"),
|
||||
"(MemSwappedMB)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
MemTargetSize: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "mem_target_size_bytes"),
|
||||
"(MemTargetSizeMB)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
MemUsed: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "mem_used_bytes"),
|
||||
"(MemUsedMB)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
|
||||
CpuLimitMHz: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cpu_limit_mhz"),
|
||||
"(CpuLimitMHz)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
CpuReservationMHz: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cpu_reservation_mhz"),
|
||||
"(CpuReservationMHz)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
CpuShares: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cpu_shares"),
|
||||
"(CpuShares)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
CpuStolenTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cpu_stolen_seconds_total"),
|
||||
"(CpuStolenMs)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
CpuTimeTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cpu_time_seconds_total"),
|
||||
"(CpuTimePercents)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
EffectiveVMSpeedMHz: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "effective_vm_speed_mhz"),
|
||||
"(EffectiveVMSpeedMHz)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
HostProcessorSpeedMHz: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "host_processor_speed_mhz"),
|
||||
"(HostProcessorSpeedMHz)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *VmwareCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collectMem(ch); err != nil {
|
||||
log.Error("failed collecting vmware memory metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
if desc, err := c.collectCpu(ch); err != nil {
|
||||
log.Error("failed collecting vmware cpu metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_vmGuestLib_VMem struct {
|
||||
MemActiveMB uint64
|
||||
MemBalloonedMB uint64
|
||||
MemLimitMB uint64
|
||||
MemMappedMB uint64
|
||||
MemOverheadMB uint64
|
||||
MemReservationMB uint64
|
||||
MemSharedMB uint64
|
||||
MemSharedSavedMB uint64
|
||||
MemShares uint64
|
||||
MemSwappedMB uint64
|
||||
MemTargetSizeMB uint64
|
||||
MemUsedMB uint64
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_vmGuestLib_VCPU struct {
|
||||
CpuLimitMHz uint64
|
||||
CpuReservationMHz uint64
|
||||
CpuShares uint64
|
||||
CpuStolenMs uint64
|
||||
CpuTimePercents uint64
|
||||
EffectiveVMSpeedMHz uint64
|
||||
HostProcessorSpeedMHz uint64
|
||||
}
|
||||
|
||||
func (c *VmwareCollector) collectMem(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_vmGuestLib_VMem
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(dst) == 0 {
|
||||
return nil, errors.New("WMI query returned empty result set")
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MemActive,
|
||||
prometheus.GaugeValue,
|
||||
mbToBytes(dst[0].MemActiveMB),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MemBallooned,
|
||||
prometheus.GaugeValue,
|
||||
mbToBytes(dst[0].MemBalloonedMB),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MemLimit,
|
||||
prometheus.GaugeValue,
|
||||
mbToBytes(dst[0].MemLimitMB),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MemMapped,
|
||||
prometheus.GaugeValue,
|
||||
mbToBytes(dst[0].MemMappedMB),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MemOverhead,
|
||||
prometheus.GaugeValue,
|
||||
mbToBytes(dst[0].MemOverheadMB),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MemReservation,
|
||||
prometheus.GaugeValue,
|
||||
mbToBytes(dst[0].MemReservationMB),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MemShared,
|
||||
prometheus.GaugeValue,
|
||||
mbToBytes(dst[0].MemSharedMB),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MemSharedSaved,
|
||||
prometheus.GaugeValue,
|
||||
mbToBytes(dst[0].MemSharedSavedMB),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MemShares,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].MemShares),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MemSwapped,
|
||||
prometheus.GaugeValue,
|
||||
mbToBytes(dst[0].MemSwappedMB),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MemTargetSize,
|
||||
prometheus.GaugeValue,
|
||||
mbToBytes(dst[0].MemTargetSizeMB),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MemUsed,
|
||||
prometheus.GaugeValue,
|
||||
mbToBytes(dst[0].MemUsedMB),
|
||||
)
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func mbToBytes(mb uint64) float64 {
|
||||
return float64(mb * 1024 * 1024)
|
||||
}
|
||||
|
||||
func (c *VmwareCollector) collectCpu(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_vmGuestLib_VCPU
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(dst) == 0 {
|
||||
return nil, errors.New("WMI query returned empty result set")
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CpuLimitMHz,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].CpuLimitMHz),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CpuReservationMHz,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].CpuReservationMHz),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CpuShares,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].CpuShares),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CpuStolenTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].CpuStolenMs)*ticksToSecondsScaleFactor,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CpuTimeTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].CpuTimePercents)*ticksToSecondsScaleFactor,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.EffectiveVMSpeedMHz,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].EffectiveVMSpeedMHz),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.HostProcessorSpeedMHz,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].HostProcessorSpeedMHz),
|
||||
)
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
@@ -1,17 +1,63 @@
|
||||
package collector
|
||||
|
||||
import "github.com/prometheus/client_golang/prometheus"
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
|
||||
// ...
|
||||
const (
|
||||
Namespace = "wmi"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
// Factories ...
|
||||
var Factories = make(map[string]func() (Collector, error))
|
||||
|
||||
// Collector is the interface a collector has to implement.
|
||||
type Collector interface {
|
||||
// Get new metrics and expose them via prometheus registry.
|
||||
Collect(ch chan<- prometheus.Metric) (err error)
|
||||
func className(src interface{}) string {
|
||||
s := reflect.Indirect(reflect.ValueOf(src))
|
||||
t := s.Type()
|
||||
if s.Kind() == reflect.Slice {
|
||||
t = t.Elem()
|
||||
}
|
||||
return t.Name()
|
||||
}
|
||||
|
||||
func queryAll(src interface{}) string {
|
||||
var b bytes.Buffer
|
||||
b.WriteString("SELECT * FROM ")
|
||||
b.WriteString(className(src))
|
||||
|
||||
log.Debugf("Generated WMI query %s", b.String())
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func queryAllForClass(src interface{}, class string) string {
|
||||
var b bytes.Buffer
|
||||
b.WriteString("SELECT * FROM ")
|
||||
b.WriteString(class)
|
||||
|
||||
log.Debugf("Generated WMI query %s", b.String())
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func queryAllWhere(src interface{}, where string) string {
|
||||
var b bytes.Buffer
|
||||
b.WriteString("SELECT * FROM ")
|
||||
b.WriteString(className(src))
|
||||
|
||||
if where != "" {
|
||||
b.WriteString(" WHERE ")
|
||||
b.WriteString(where)
|
||||
}
|
||||
|
||||
log.Debugf("Generated WMI query %s", b.String())
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func queryAllForClassWhere(src interface{}, class string, where string) string {
|
||||
var b bytes.Buffer
|
||||
b.WriteString("SELECT * FROM ")
|
||||
b.WriteString(class)
|
||||
|
||||
if where != "" {
|
||||
b.WriteString(" WHERE ")
|
||||
b.WriteString(where)
|
||||
}
|
||||
|
||||
log.Debugf("Generated WMI query %s", b.String())
|
||||
return b.String()
|
||||
}
|
||||
|
||||
115
collector/wmi_test.go
Normal file
115
collector/wmi_test.go
Normal file
@@ -0,0 +1,115 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
type fakeWmiClass struct {
|
||||
Name string
|
||||
SomeProperty int
|
||||
}
|
||||
|
||||
var (
|
||||
mapQueryAll = func(src interface{}, class string, where string) string {
|
||||
return queryAll(src)
|
||||
}
|
||||
mapQueryAllWhere = func(src interface{}, class string, where string) string {
|
||||
return queryAllWhere(src, where)
|
||||
}
|
||||
mapQueryAllForClass = func(src interface{}, class string, where string) string {
|
||||
return queryAllForClass(src, class)
|
||||
}
|
||||
mapQueryAllForClassWhere = func(src interface{}, class string, where string) string {
|
||||
return queryAllForClassWhere(src, class, where)
|
||||
}
|
||||
)
|
||||
|
||||
type queryFunc func(src interface{}, class string, where string) string
|
||||
|
||||
func TestCreateQuery(t *testing.T) {
|
||||
cases := []struct {
|
||||
desc string
|
||||
dst interface{}
|
||||
class string
|
||||
where string
|
||||
queryFunc queryFunc
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
desc: "queryAll on single instance",
|
||||
dst: fakeWmiClass{},
|
||||
queryFunc: mapQueryAll,
|
||||
expected: "SELECT * FROM fakeWmiClass",
|
||||
},
|
||||
{
|
||||
desc: "queryAll on slice",
|
||||
dst: []fakeWmiClass{},
|
||||
queryFunc: mapQueryAll,
|
||||
expected: "SELECT * FROM fakeWmiClass",
|
||||
},
|
||||
{
|
||||
desc: "queryAllWhere on single instance",
|
||||
dst: fakeWmiClass{},
|
||||
where: "foo = bar",
|
||||
queryFunc: mapQueryAllWhere,
|
||||
expected: "SELECT * FROM fakeWmiClass WHERE foo = bar",
|
||||
},
|
||||
{
|
||||
desc: "queryAllWhere on slice",
|
||||
dst: []fakeWmiClass{},
|
||||
where: "foo = bar",
|
||||
queryFunc: mapQueryAllWhere,
|
||||
expected: "SELECT * FROM fakeWmiClass WHERE foo = bar",
|
||||
},
|
||||
{
|
||||
desc: "queryAllWhere on single instance with empty where",
|
||||
dst: fakeWmiClass{},
|
||||
queryFunc: mapQueryAllWhere,
|
||||
expected: "SELECT * FROM fakeWmiClass",
|
||||
},
|
||||
{
|
||||
desc: "queryAllForClass on single instance",
|
||||
dst: fakeWmiClass{},
|
||||
class: "someClass",
|
||||
queryFunc: mapQueryAllForClass,
|
||||
expected: "SELECT * FROM someClass",
|
||||
},
|
||||
{
|
||||
desc: "queryAllForClass on slice",
|
||||
dst: []fakeWmiClass{},
|
||||
class: "someClass",
|
||||
queryFunc: mapQueryAllForClass,
|
||||
expected: "SELECT * FROM someClass",
|
||||
},
|
||||
{
|
||||
desc: "queryAllForClassWhere on single instance",
|
||||
dst: fakeWmiClass{},
|
||||
class: "someClass",
|
||||
where: "foo = bar",
|
||||
queryFunc: mapQueryAllForClassWhere,
|
||||
expected: "SELECT * FROM someClass WHERE foo = bar",
|
||||
},
|
||||
{
|
||||
desc: "queryAllForClassWhere on slice",
|
||||
dst: []fakeWmiClass{},
|
||||
class: "someClass",
|
||||
where: "foo = bar",
|
||||
queryFunc: mapQueryAllForClassWhere,
|
||||
expected: "SELECT * FROM someClass WHERE foo = bar",
|
||||
},
|
||||
{
|
||||
desc: "queryAllForClassWhere on single instance with empty where",
|
||||
dst: fakeWmiClass{},
|
||||
class: "someClass",
|
||||
queryFunc: mapQueryAllForClassWhere,
|
||||
expected: "SELECT * FROM someClass",
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.desc, func(t *testing.T) {
|
||||
if q := c.queryFunc(c.dst, c.class, c.where); q != c.expected {
|
||||
t.Errorf("Case %q failed: Expected %q, got %q", c.desc, c.expected, q)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
140
contrib/console_templates/wmi-overview.html
Normal file
140
contrib/console_templates/wmi-overview.html
Normal file
@@ -0,0 +1,140 @@
|
||||
{{ template "head" . }}
|
||||
|
||||
{{ template "prom_content_head" . }}
|
||||
<h1>Node Overview - {{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Params.instance }}</h1>
|
||||
|
||||
<h3>CPU Usage</h3>
|
||||
<div id="cpuGraph"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#cpuGraph"),
|
||||
expr: "sum by (mode)(irate(wmi_cpu_time_total{job='node',instance='{{ .Params.instance }}',mode!='idle'}[5m]))",
|
||||
renderer: 'area',
|
||||
max: {{ with printf "count(count by (cpu)(wmi_cpu_time_total{job='node',instance='%s'}))" .Params.instance | query }}{{ . | first | value }}{{ else}}undefined{{end}},
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yTitle: 'Cores'
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Network Utilization</h3>
|
||||
<div id="networkioGraph"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#networkioGraph"),
|
||||
expr: [
|
||||
"irate(wmi_net_bytes_sent_total{job='node',instance='{{ .Params.instance }}',nic!~'^isatap_ec2_internal'}[5m])",
|
||||
"irate(wmi_net_bytes_received_total{job='node',instance='{{ .Params.instance }}',nic!~'^isatap_ec2_internal'}[5m])",
|
||||
],
|
||||
min: 0,
|
||||
name: [ 'sent', 'received' ],
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yUnits: "B",
|
||||
yTitle: 'Network IO'
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Disk I/O Utilization</h3>
|
||||
<div id="diskioGraph"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#diskioGraph"),
|
||||
expr: [
|
||||
"100 - irate(wmi_logical_disk_idle_seconds_total{job='node',instance='{{ .Params.instance }}',volume!~'^HarddiskVolume.*$'}[5m]) * 100",
|
||||
],
|
||||
min: 0,
|
||||
name: '[[ volume ]]',
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yUnits: "%",
|
||||
yTitle: 'Disk I/O Utilization'
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Memory</h3>
|
||||
<div id="memoryGraph"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#memoryGraph"),
|
||||
renderer: 'area',
|
||||
expr: [
|
||||
"wmi_cs_physical_memory_bytes{job='node',instance='{{ .Params.instance }}'}",
|
||||
"wmi_os_physical_memory_free_bytes{job='node',instance='{{ .Params.instance }}'}",
|
||||
"wmi_cs_physical_memory__bytes{job='node',instance='{{ .Params.instance }}'} - wmi_os_physical_memory_free_bytes{job='node',instance='{{.Params.instance}}'}",
|
||||
"wmi_os_virtual_memory_bytes{job='node',instance='{{ .Params.instance }}'}",
|
||||
],
|
||||
name: ["Physical", "Free", "Used", "Virtual"],
|
||||
min: 0,
|
||||
yUnits: "B",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize1024,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize1024,
|
||||
yTitle: 'Memory'
|
||||
})
|
||||
</script>
|
||||
|
||||
{{ template "prom_right_table_head" }}
|
||||
<tr><th colspan="2">Overview</th></tr>
|
||||
<tr>
|
||||
<td>User CPU</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "sum(irate(wmi_cpu_time_total{job='node',instance='%s',mode='user'}[5m])) * 100 / count(count by (cpu)(wmi_cpu_time_total{job='node',instance='%s'}))" .Params.instance .Params.instance) "%" "printf.1f") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Privileged CPU</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "sum(irate(wmi_cpu_time_total{job='node',instance='%s',mode='privileged'}[5m])) * 100 / count(count by (cpu)(wmi_cpu_time_total{job='node',instance='%s'}))" .Params.instance .Params.instance) "%" "printf.1f") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Memory Total</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "wmi_cs_physical_memory_bytes{job='node',instance='%s'}" .Params.instance) "B" "humanize1024") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Memory Free</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "wmi_os_physical_memory_free_bytes{job='node',instance='%s'}" .Params.instance) "B" "humanize1024") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th colspan="2">Network</th>
|
||||
</tr>
|
||||
{{ range printf "wmi_net_bytes_received_total{job='node',instance='%s',nic!='isatap_ec2_internal'}" .Params.instance | query | sortByLabel "nic" }}
|
||||
<tr>
|
||||
<td>{{ .Labels.nic }} Received</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(wmi_net_bytes_received_total{job='node',instance='%s',nic='%s'}[5m])" .Labels.instance .Labels.nic) "B/s" "humanize") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>{{ .Labels.nic }} Transmitted</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(wmi_net_bytes_sent_total{job='node',instance='%s',nic='%s'}[5m])" .Labels.instance .Labels.nic) "B/s" "humanize") }}</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
</tr>
|
||||
<tr>
|
||||
<th colspan="2">Disks</th>
|
||||
</tr>
|
||||
{{ range printf "wmi_logical_disk_size_bytes{job='node',instance='%s',volume!~'^HarddiskVolume.*$'}" .Params.instance | query | sortByLabel "volume" }}
|
||||
<tr>
|
||||
<td>{{ .Labels.volume }} Utilization</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "100 - irate(wmi_logical_disk_idle_seconds_total{job='node',instance='%s',volume='%s'}[5m]) * 100" .Labels.instance .Labels.volume) "%" "printf.1f") }}</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
{{ range printf "wmi_logical_disk_size_bytes{job='node',instance='%s',volume!~'^HarddiskVolume.*$'}" .Params.instance | query | sortByLabel "volume" }}
|
||||
<tr>
|
||||
<td>{{ .Labels.volume }} Throughput</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(wmi_logical_disk_read_bytes_total{job='node',instance='%s',volume='%s'}[5m]) + irate(wmi_logical_disk_write_bytes_total{job='node',instance='%s',volume='%s'}[5m])" .Labels.instance .Labels.volume .Labels.instance .Labels.volume) "B/s" "humanize") }}</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
<tr>
|
||||
<th colspan="2">Filesystem Fullness</th>
|
||||
</tr>
|
||||
{{ define "roughlyNearZero" }}
|
||||
{{ if gt .1 . }}~0{{ else }}{{ printf "%.1f" . }}{{ end }}
|
||||
{{ end }}
|
||||
{{ range printf "wmi_logical_disk_size_bytes{job='node',instance='%s',volume!~'^HarddiskVolume.*$'}" .Params.instance | query | sortByLabel "volume" }}
|
||||
<tr>
|
||||
<td>{{ .Labels.volume }}</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "100 - wmi_logical_disk_free_bytes{job='node',instance='%s',volume='%s'} / wmi_logical_disk_size_bytes{job='node'} * 100" .Labels.instance .Labels.volume) "%" "roughlyNearZero") }}</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
</tr>
|
||||
{{ template "prom_right_table_tail" }}
|
||||
|
||||
{{ template "prom_content_tail" . }}
|
||||
|
||||
{{ template "tail" }}
|
||||
30
docs/README.md
Normal file
30
docs/README.md
Normal file
@@ -0,0 +1,30 @@
|
||||
# Documentation
|
||||
This directory contains documentation of the collectors in the WMI exporter, with information such as what metrics are exported, any flags for additional configuration, and some example usage in alerts and queries.
|
||||
|
||||
# Collectors
|
||||
- [`ad`](collector.ad.md)
|
||||
- [`cpu`](collector.cpu.md)
|
||||
- [`cs`](collector.cs.md)
|
||||
- [`dns`](collector.dns.md)
|
||||
- [`hyperv`](collector.hyperv.md)
|
||||
- [`iis`](collector.iis.md)
|
||||
- [`logical_disk`](collector.logical_disk.md)
|
||||
- [`memory`](collector.memory.md)
|
||||
- [`msmq`](collector.msmq.md)
|
||||
- [`mssql`](collector.mssql.md)
|
||||
- [`netframework_clrexceptions`](collector.netframework_clrexceptions.md)
|
||||
- [`netframework_clrinterop`](collector.netframework_clrinterop.md)
|
||||
- [`netframework_clrjit`](collector.netframework_clrjit.md)
|
||||
- [`netframework_clrloading`](collector.netframework_clrloading.md)
|
||||
- [`netframework_clrlocksandthreads`](collector.netframework_clrlocksandthreads.md)
|
||||
- [`netframework_clrmemory`](collector.netframework_clrmemory.md)
|
||||
- [`netframework_clrremoting`](collector.netframework_clrremoting.md)
|
||||
- [`netframework_clrsecurity`](collector.netframework_clrsecurity.md)
|
||||
- [`net`](collector.net.md)
|
||||
- [`os`](collector.os.md)
|
||||
- [`process`](collector.process.md)
|
||||
- [`service`](collector.service.md)
|
||||
- [`system`](collector.system.md)
|
||||
- [`tcp`](collector.tcp.md)
|
||||
- [`textfile`](collector.textfile.md)
|
||||
- [`vmware`](collector.vmware.md)
|
||||
47
docs/collector-template.md
Normal file
47
docs/collector-template.md
Normal file
@@ -0,0 +1,47 @@
|
||||
# %name% collector
|
||||
|
||||
The %name% collector exposes metrics about ...
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `%name%`
|
||||
Classes | [`...`](https://msdn.microsoft.com/en-us/library/...)
|
||||
Enabled by default? | Yes/No
|
||||
|
||||
## Flags
|
||||
|
||||
### `--collector....`
|
||||
|
||||
Add description...
|
||||
|
||||
Example: `--collector....`
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_...` | ... | counter/gauge/histogram/summary | ...
|
||||
|
||||
### Example metric
|
||||
|
||||
...:
|
||||
|
||||
`wmi_...{...} 1`
|
||||
|
||||
## Useful queries
|
||||
### Add queries...
|
||||
|
||||
`...`
|
||||
|
||||
## Alerting examples
|
||||
### Add alerts...
|
||||
|
||||
```yaml
|
||||
- alert: ""
|
||||
expr: ""
|
||||
for: ""
|
||||
labels:
|
||||
urgency: ""
|
||||
annotations:
|
||||
summary: ""
|
||||
```
|
||||
88
docs/collector.ad.md
Normal file
88
docs/collector.ad.md
Normal file
@@ -0,0 +1,88 @@
|
||||
# ad collector
|
||||
|
||||
The ad collector exposes metrics about a Active Directory Domain Services domain controller
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `ad`
|
||||
Classes | [`Win32_PerfRawData_DirectoryServices_DirectoryServices`](https://msdn.microsoft.com/en-us/library/ms803980.aspx)
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_ad_address_book_operations_total` | _Not yet documented_ | counter | `operation`
|
||||
`wmi_ad_address_book_client_sessions` | _Not yet documented_ | gauge | None
|
||||
`wmi_ad_approximate_highest_distinguished_name_tag` | _Not yet documented_ | gauge | None
|
||||
`wmi_ad_atq_estimated_delay_seconds` | _Not yet documented_ | gauge | None
|
||||
`wmi_ad_atq_outstanding_requests` | _Not yet documented_ | gauge | None
|
||||
`wmi_ad_atq_average_request_latency` | _Not yet documented_ | gauge | None
|
||||
`wmi_ad_atq_current_threads` | _Not yet documented_ | gauge | `service`
|
||||
`wmi_ad_searches_total` | _Not yet documented_ | counter | `scope`
|
||||
`wmi_ad_database_operations_total` | _Not yet documented_ | counter | `operation`
|
||||
`wmi_ad_binds_total` | _Not yet documented_ | counter | `bind_method`
|
||||
`wmi_ad_replication_highest_usn` | _Not yet documented_ | counter | `state`
|
||||
`wmi_ad_replication_data_intrasite_bytes_total` | _Not yet documented_ | counter | `direction`
|
||||
`wmi_ad_replication_data_intersite_bytes_total` | _Not yet documented_ | counter | `direction`
|
||||
`wmi_ad_replication_inbound_sync_objects_remaining` | _Not yet documented_ | gauge | None
|
||||
`wmi_ad_replication_inbound_link_value_updates_remaining` | _Not yet documented_ | gauge | None
|
||||
`wmi_ad_replication_inbound_objects_updated_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_replication_inbound_objects_filtered_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_replication_inbound_properties_updated_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_replication_inbound_properties_filtered_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_replication_pending_operations` | _Not yet documented_ | gauge | None
|
||||
`wmi_ad_replication_pending_synchronizations` | _Not yet documented_ | gauge | None
|
||||
`wmi_ad_replication_sync_requests_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_replication_sync_requests_success_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_replication_sync_requests_schema_mismatch_failure_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_name_translations_total` | _Not yet documented_ | counter | `target_name`
|
||||
`wmi_ad_change_monitors_registered` | _Not yet documented_ | gauge | None
|
||||
`wmi_ad_change_monitor_updates_pending` | _Not yet documented_ | gauge | None
|
||||
`wmi_ad_name_cache_hits_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_name_cache_lookups_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_directory_operations_total` | _Not yet documented_ | counter | `operation`, `origin`
|
||||
`wmi_ad_directory_search_suboperations_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_security_descriptor_propagation_events_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_security_descriptor_propagation_events_queued` | _Not yet documented_ | gauge | None
|
||||
`wmi_ad_security_descriptor_propagation_access_wait_total_seconds` | _Not yet documented_ | gauge | None
|
||||
`wmi_ad_security_descriptor_propagation_items_queued_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_directory_service_threads` | _Not yet documented_ | gauge | None
|
||||
`wmi_ad_ldap_closed_connections_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_ldap_opened_connections_total` | _Not yet documented_ | counter | `type`
|
||||
`wmi_ad_ldap_active_threads` | _Not yet documented_ | gauge | None
|
||||
`wmi_ad_ldap_last_bind_time_seconds` | _Not yet documented_ | gauge | None
|
||||
`wmi_ad_ldap_searches_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_ldap_udp_operations_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_ldap_writes_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_link_values_cleaned_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_phantom_objects_cleaned_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_phantom_objects_visited_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_sam_group_membership_evaluations_total` | _Not yet documented_ | counter | `group_type`
|
||||
`wmi_ad_sam_group_membership_global_catalog_evaluations_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_sam_group_membership_evaluations_nontransitive_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_sam_group_membership_evaluations_transitive_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_sam_group_evaluation_latency` | _Not yet documented_ | gauge | `evaluation_type`
|
||||
`wmi_ad_sam_computer_creation_requests_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_sam_computer_creation_successful_requests_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_sam_user_creation_requests_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_sam_user_creation_successful_requests_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_sam_query_display_requests_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_sam_enumerations_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_sam_membership_changes_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_sam_password_changes_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_tombstoned_objects_collected_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_tombstoned_objects_visited_total` | _Not yet documented_ | counter | None
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
42
docs/collector.container.md
Normal file
42
docs/collector.container.md
Normal file
@@ -0,0 +1,42 @@
|
||||
# container collector
|
||||
|
||||
The container collector exposes metrics about containers running on system
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `container`
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_container_available` | Available | counter | `container_id`
|
||||
`wmi_container_count` | Number of containers | gauge | `container_id`
|
||||
`wmi_container_cpu_usage_seconds_kernelmode` | Run time in Kernel mode in Seconds | counter | `container_id`
|
||||
`wmi_container_cpu_usage_seconds_usermode` | Run Time in User mode in Seconds | counter | `container_id`
|
||||
`wmi_container_cpu_usage_seconds_total` | Total Run time in Seconds | counter | `container_id`
|
||||
`wmi_container_memory_usage_commit_bytes` | Memory Usage Commit Bytes | gauge | `container_id`
|
||||
`wmi_container_memory_usage_commit_peak_bytes` | Memory Usage Commit Peak Bytes | gauge | `container_id`
|
||||
`wmi_container_memory_usage_private_working_set_bytes` | Memory Usage Private Working Set Bytes | gauge | `container_id`
|
||||
`wmi_container_network_receive_bytes_total` | Bytes Received on Interface | counter | `container_id`, `interface`
|
||||
`wmi_container_network_receive_packets_total` | Packets Received on Interface | counter | `container_id`, `interface`
|
||||
`wmi_container_network_receive_packets_dropped_total` | Dropped Incoming Packets on Interface | counter | `container_id`, `interface`
|
||||
`wmi_container_network_transmit_bytes_total` | Bytes Sent on Interface | counter | `container_id`, `interface`
|
||||
`wmi_container_network_transmit_packets_total` | Packets Sent on Interface | counter | `container_id`, `interface`
|
||||
`wmi_container_network_transmit_packets_dropped_total` | Dropped Outgoing Packets on Interface | counter | `container_id`, `interface`
|
||||
|
||||
### Example metric
|
||||
_wmi_container_network_receive_bytes_total{container_id="docker://1bd30e8b8ac28cbd76a9b697b4d7bb9d760267b0733d1bc55c60024e98d1e43e",interface="822179E7-002C-4280-ABBA-28BCFE401826"} 9.3305343e+07_
|
||||
|
||||
This metric means that total _9.3305343e+07_ bytes received on interface _822179E7-002C-4280-ABBA-28BCFE401826_ for container _docker://1bd30e8b8ac28cbd76a9b697b4d7bb9d760267b0733d1bc55c60024e98d1e43e_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
43
docs/collector.cpu.md
Normal file
43
docs/collector.cpu.md
Normal file
@@ -0,0 +1,43 @@
|
||||
# cpu collector
|
||||
|
||||
The cpu collector exposes metrics about CPU usage
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `cpu`
|
||||
Data source | Perflib
|
||||
Counters | `ProcessorInformation` (Windows Server 2008R2 and later) `Processor` (older versions)
|
||||
Enabled by default? | Yes
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
These metrics are available on all versions of Windows:
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_cpu_cstate_seconds_total` | Time spent in low-power idle states | counter | `core`, `state`
|
||||
`wmi_cpu_time_total` | Time that processor spent in different modes (idle, user, system, ...) | counter | `core`, `mode`
|
||||
`wmi_cpu_interrupts_total` | Total number of received and serviced hardware interrupts | counter | `core`
|
||||
`wmi_cpu_dpcs_total` | Total number of received and serviced deferred procedure calls (DPCs) | counter | `core`
|
||||
|
||||
These metrics are only exposed on Windows Server 2008R2 and later:
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_cpu_clock_interrupts_total` | Total number of received and serviced clock tick interrupts | `core`
|
||||
`wmi_cpu_idle_break_events_total` | Total number of time processor was woken from idle | `core`
|
||||
`wmi_cpu_parking_status` | Parking Status represents whether a processor is parked or not | `gauge`
|
||||
`wmi_cpu_core_frequency_mhz` | Core frequency in megahertz | `gauge`
|
||||
`wmi_cpu_processor_performance` | Processor Performance is the average performance of the processor while it is executing instructions, as a percentage of the nominal performance of the processor. On some processors, Processor Performance may exceed 100% | `gauge`
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
29
docs/collector.cs.md
Normal file
29
docs/collector.cs.md
Normal file
@@ -0,0 +1,29 @@
|
||||
# cs collector
|
||||
|
||||
The cs collector exposes metrics detailing the hardware of the computer system
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `cs`
|
||||
Classes | [`Win32_ComputerSystem`](https://msdn.microsoft.com/en-us/library/aa394102)
|
||||
Enabled by default? | Yes
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_cs_logical_processors` | Number of installed logical processors | gauge | None
|
||||
`wmi_cs_physical_memory_bytes` | Total installed physical memory | gauge | None
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
49
docs/collector.dns.md
Normal file
49
docs/collector.dns.md
Normal file
@@ -0,0 +1,49 @@
|
||||
# dns collector
|
||||
|
||||
The dns collector exposes metrics about the DNS server
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `dns`
|
||||
Classes | [`Win32_PerfRawData_DNS_DNS`](https://technet.microsoft.com/en-us/library/cc977686.aspx)
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_dns_zone_transfer_requests_received_total` | _Not yet documented_ | counter | `qtype`
|
||||
`wmi_dns_zone_transfer_requests_sent_total` | _Not yet documented_ | counter | `qtype`
|
||||
`wmi_dns_zone_transfer_response_received_total` | _Not yet documented_ | counter | `qtype`
|
||||
`wmi_dns_zone_transfer_success_received_total` | _Not yet documented_ | counter | `qtype`, `protocol`
|
||||
`wmi_dns_zone_transfer_success_sent_total` | _Not yet documented_ | counter | `qtype`
|
||||
`wmi_dns_zone_transfer_failures_total` | _Not yet documented_ | counter | None
|
||||
`wmi_dns_memory_used_bytes_total` | _Not yet documented_ | gauge | `area`
|
||||
`wmi_dns_dynamic_updates_queued` | _Not yet documented_ | gauge | None
|
||||
`wmi_dns_dynamic_updates_received_total` | _Not yet documented_ | counter | `operation`
|
||||
`wmi_dns_dynamic_updates_failures_total` | _Not yet documented_ | counter | `reason`
|
||||
`wmi_dns_notify_received_total` | _Not yet documented_ | counter | None
|
||||
`wmi_dns_notify_sent_total` | _Not yet documented_ | counter | None
|
||||
`wmi_dns_secure_update_failures_total` | _Not yet documented_ | counter | None
|
||||
`wmi_dns_secure_update_received_total` | _Not yet documented_ | counter | None
|
||||
`wmi_dns_queries_total` | _Not yet documented_ | counter | `protocol`
|
||||
`wmi_dns_responses_total` | _Not yet documented_ | counter | `protocol`
|
||||
`wmi_dns_recursive_queries_total` | _Not yet documented_ | counter | None
|
||||
`wmi_dns_recursive_query_failures_total` | _Not yet documented_ | counter | None
|
||||
`wmi_dns_recursive_query_send_timeouts_total` | _Not yet documented_ | counter | None
|
||||
`wmi_dns_wins_queries_total` | _Not yet documented_ | counter | `direction`
|
||||
`wmi_dns_wins_responses_total` | _Not yet documented_ | counter | `direction`
|
||||
`wmi_dns_unmatched_responses_total` | _Not yet documented_ | counter | None
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
102
docs/collector.hyperv.md
Normal file
102
docs/collector.hyperv.md
Normal file
@@ -0,0 +1,102 @@
|
||||
# hyperv collector
|
||||
|
||||
The hyperv collector exposes metrics about the Hyper-V hypervisor
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `hyperv`
|
||||
Classes | `Win32_PerfRawData_VmmsVirtualMachineStats_HyperVVirtualMachineHealthSummary`<br/>`Win32_PerfRawData_VidPerfProvider_HyperVVMVidPartition`<br/>`Win32_PerfRawData_HvStats_HyperVHypervisorRootPartition`<br/>`Win32_PerfRawData_HvStats_HyperVHypervisor`<br/>`Win32_PerfRawData_HvStats_HyperVHypervisorRootVirtualProcessor`<br/>`Win32_PerfRawData_HvStats_HyperVHypervisorVirtualProcessor`<br/>`Win32_PerfRawData_NvspSwitchStats_HyperVVirtualSwitch`<br/>`Win32_PerfRawData_EthernetPerfProvider_HyperVLegacyNetworkAdapter`<br/>`Win32_PerfRawData_Counters_HyperVVirtualStorageDevice`<br/>`Win32_PerfRawData_NvspNicStats_HyperVVirtualNetworkAdapter`
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_hyper_health_critical` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_health_ok` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_vid_physical_pages_allocated` | _Not yet documented_ | counter | `vm`
|
||||
`wmi_hyper_vid_preferred_numa_node_index` | _Not yet documented_ | counter | `vm`
|
||||
`wmi_hyper_vid_remote_physical_pages` | _Not yet documented_ | counter | `vm`
|
||||
`wmi_hyper_root_partition_address_spaces` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_attached_devices` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_deposited_pages` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_device_dma_errors` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_device_interrupt_errors` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_device_interrupt_mappings` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_device_interrupt_throttle_events` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_preferred_numa_node_index` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_gpa_space_modifications` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_io_tlb_flush_cost` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_io_tlb_flush` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_recommended_virtual_tlb_size` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_physical_pages_allocated` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_1G_device_pages` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_1G_gpa_pages` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_2M_device_pages` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_2M_gpa_pages` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_4K_device_pages` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_4K_gpa_pages` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_virtual_tlb_flush_entires` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_root_partition_virtual_tlb_pages` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_hypervisor_virtual_processors` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_hypervisor_logical_processors` | _Not yet documented_ | counter | None
|
||||
`wmi_hyper_host_cpu_guest_run_time` | _Not yet documented_ | counter | `core`
|
||||
`wmi_hyper_host_cpu_hypervisor_run_time` | _Not yet documented_ | counter | `core`
|
||||
`wmi_hyper_host_cpu_remote_run_time` | _Not yet documented_ | counter | `core`
|
||||
`wmi_hyper_host_cpu_total_run_time` | _Not yet documented_ | counter | `core`
|
||||
`wmi_hyper_vm_cpu_guest_run_time` | _Not yet documented_ | counter | `vm`, `core`
|
||||
`wmi_hyper_vm_cpu_hypervisor_run_time` | _Not yet documented_ | counter | `vm`, `core`
|
||||
`wmi_hyper_vm_cpu_remote_run_time` | _Not yet documented_ | counter | `vm`, `core`
|
||||
`wmi_hyper_vm_cpu_total_run_time` | _Not yet documented_ | counter | `vm`, `core`
|
||||
`wmi_hyper_vswitch_broadcast_packets_received_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_broadcast_packets_sent_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_bytes_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_bytes_received_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_bytes_sent_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_directed_packets_received_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_directed_packets_send_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_dropped_packets_incoming_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_dropped_packets_outcoming_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_extensions_dropped_packets_incoming_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_extensions_dropped_packets_outcoming_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_learned_mac_addresses_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_multicast_packets_received_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_multicast_packets_sent_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_number_of_send_channel_moves_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_number_of_vmq_moves_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_packets_flooded_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_packets_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_packets_received_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_packets_sent_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_vswitch_purged_mac_addresses_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyper_ethernet_bytes_dropped` | _Not yet documented_ | counter | `adapter`
|
||||
`wmi_hyper_ethernet_bytes_received` | _Not yet documented_ | counter | `adapter`
|
||||
`wmi_hyper_ethernet_bytes_sent` | _Not yet documented_ | counter | `adapter`
|
||||
`wmi_hyper_ethernet_frames_dropped` | _Not yet documented_ | counter | `adapter`
|
||||
`wmi_hyper_ethernet_frames_received` | _Not yet documented_ | counter | `adapter`
|
||||
`wmi_hyper_ethernet_frames_sent` | _Not yet documented_ | counter | `adapter`
|
||||
`wmi_hyper_vm_device_error_count` | _Not yet documented_ | counter | `vm_device`
|
||||
`wmi_hyper_vm_device_queue_length` | _Not yet documented_ | counter | `vm_device`
|
||||
`wmi_hyper_vm_device_bytes_read` | _Not yet documented_ | counter | `vm_device`
|
||||
`wmi_hyper_vm_device_operations_read` | _Not yet documented_ | counter | `vm_device`
|
||||
`wmi_hyper_vm_device_bytes_written` | _Not yet documented_ | counter | `vm_device`
|
||||
`wmi_hyper_vm_device_operations_written` | _Not yet documented_ | counter | `vm_device`
|
||||
`wmi_hyper_vm_interface_bytes_received` | _Not yet documented_ | counter | `vm_interface`
|
||||
`wmi_hyper_vm_interface_bytes_sent` | _Not yet documented_ | counter | `vm_interface`
|
||||
`wmi_hyper_vm_interface_packets_incoming_dropped` | _Not yet documented_ | counter | `vm_interface`
|
||||
`wmi_hyper_vm_interface_packets_outgoing_dropped` | _Not yet documented_ | counter | `vm_interface`
|
||||
`wmi_hyper_vm_interface_packets_received` | _Not yet documented_ | counter | `vm_interface`
|
||||
`wmi_hyper_vm_interface_packets_sent` | _Not yet documented_ | counter | `vm_interface`
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
140
docs/collector.iis.md
Normal file
140
docs/collector.iis.md
Normal file
@@ -0,0 +1,140 @@
|
||||
# iis collector
|
||||
|
||||
The iis collector exposes metrics about the IIS server
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `iis`
|
||||
Classes | `Win32_PerfRawData_W3SVC_WebService`<br/>`Win32_PerfRawData_APPPOOLCountersProvider_APPPOOLWAS`<br/>`Win32_PerfRawData_W3SVCW3WPCounterProvider_W3SVCW3WP`<br/>`Win32_PerfRawData_W3SVC_WebServiceCache`
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
### `--collector.iis.site-whitelist`
|
||||
|
||||
If given, a site needs to match the whitelist regexp in order for the corresponding metrics to be reported.
|
||||
|
||||
### `--collector.iis.site-blacklist`
|
||||
|
||||
If given, a site needs to *not* match the blacklist regexp in order for the corresponding metrics to be reported.
|
||||
|
||||
### `--collector.iis.app-whitelist`
|
||||
|
||||
If given, an application needs to match the whitelist regexp in order for the corresponding metrics to be reported.
|
||||
|
||||
### `--collector.iis.app-blacklist`
|
||||
|
||||
If given, an application needs to *not* match the blacklist regexp in order for the corresponding metrics to be reported.
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_iis_current_anonymous_users` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_current_blocked_async_io_requests` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_current_cgi_requests` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_current_connections` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_current_isapi_extension_requests` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_current_non_anonymous_users` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_received_bytes_total` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_sent_bytes_total` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_anonymous_users_total` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_blocked_async_io_requests_total` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_cgi_requests_total` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_connection_attempts_all_instances_total` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_requests_total` | _Not yet documented_ | counter | `site`, `method`
|
||||
`wmi_iis_files_received_total` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_files_sent_total` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_ipapi_extension_requests_total` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_locked_errors_total` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_logon_attempts_total` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_non_anonymous_users_total` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_not_found_errors_total` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_rejected_async_io_requests_total` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_current_application_pool_state` | _Not yet documented_ | counter | `app`, `state`
|
||||
`wmi_iis_current_application_pool_start_time` | _Not yet documented_ | counter | `app`
|
||||
`wmi_iis_current_worker_processes` | _Not yet documented_ | counter | `app`
|
||||
`wmi_iis_maximum_worker_processes` | _Not yet documented_ | counter | `app`
|
||||
`wmi_iis_recent_worker_process_failures` | _Not yet documented_ | counter | `app`
|
||||
`wmi_iis_time_since_last_worker_process_failure` | _Not yet documented_ | counter | `app`
|
||||
`wmi_iis_total_application_pool_recycles` | _Not yet documented_ | counter | `app`
|
||||
`wmi_iis_total_application_pool_start_time` | _Not yet documented_ | counter | `app`
|
||||
`wmi_iis_total_worker_processes_created` | _Not yet documented_ | counter | `app`
|
||||
`wmi_iis_total_worker_process_failures` | _Not yet documented_ | counter | `app`
|
||||
`wmi_iis_total_worker_process_ping_failures` | _Not yet documented_ | counter | `app`
|
||||
`wmi_iis_total_worker_process_shutdown_failures` | _Not yet documented_ | counter | `app`
|
||||
`wmi_iis_total_worker_process_startup_failures` | _Not yet documented_ | counter | `app`
|
||||
`wmi_iis_worker_cache_active_flushed_entries` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_file_cache_memory_bytes` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_file_cache_max_memory_bytes` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_file_cache_flushes_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_file_cache_queries_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_file_cache_hits_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_file_cache_items` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_file_cache_items_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_file_cache_items_flushed_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_uri_cache_flushes_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_uri_cache_queries_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_uri_cache_hits_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_uri_cache_items` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_uri_cache_items_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_uri_cache_items_flushed_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_metadata_cache_items` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_metadata_cache_flushes_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_metadata_cache_queries_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_metadata_cache_hits_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_metadata_cache_items_cached_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_metadata_cache_items_flushed_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_output_cache_active_flushed_items` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_output_cache_items` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_output_cache_memory_bytes` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_output_queries_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_output_cache_hits_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_output_cache_items_flushed_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_output_cache_flushes_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_threads` | _Not yet documented_ | counter | `app`, `pid`, `state`
|
||||
`wmi_iis_worker_max_threads` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_requests_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_current_requests` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_request_errors_total` | _Not yet documented_ | counter | `app`, `pid`, `status_code`
|
||||
`wmi_iis_worker_current_websocket_requests` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_websocket_connection_attempts_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_websocket_connection_accepted_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_websocket_connection_rejected_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_server_cache_active_flushed_entries` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_file_cache_memory_bytes` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_file_cache_max_memory_bytes` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_file_cache_flushes_total` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_file_cache_queries_total` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_file_cache_hits_total` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_file_cache_items` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_file_cache_items_total` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_file_cache_items_flushed_total` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_uri_cache_flushes_total` | _Not yet documented_ | counter | `mode`
|
||||
`wmi_iis_server_uri_cache_queries_total` | _Not yet documented_ | counter | `mode`
|
||||
`wmi_iis_server_uri_cache_hits_total` | _Not yet documented_ | counter | `mode`
|
||||
`wmi_iis_server_uri_cache_items` | _Not yet documented_ | counter | `mode`
|
||||
`wmi_iis_server_uri_cache_items_total` | _Not yet documented_ | counter | `mode`
|
||||
`wmi_iis_server_uri_cache_items_flushed_total` | _Not yet documented_ | counter | `mode`
|
||||
`wmi_iis_server_metadata_cache_items` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_metadata_cache_flushes_total` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_metadata_cache_queries_total` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_metadata_cache_hits_total` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_metadata_cache_items_cached_total` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_metadata_cache_items_flushed_total` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_output_cache_active_flushed_items` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_output_cache_items` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_output_cache_memory_bytes` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_output_cache_queries_total` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_output_cache_hits_total` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_output_cache_items_flushed_total` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_output_cache_flushes_total` | _Not yet documented_ | counter | None
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
44
docs/collector.logical_disk.md
Normal file
44
docs/collector.logical_disk.md
Normal file
@@ -0,0 +1,44 @@
|
||||
# logical_disk collector
|
||||
|
||||
The logical_disk collector exposes metrics about logical disks (in contrast to physical disks)
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `logical_disk`
|
||||
Classes | [`Win32_PerfRawData_PerfDisk_LogicalDisk`](https://msdn.microsoft.com/en-us/windows/hardware/aa394307(v=vs.71))
|
||||
Enabled by default? | Yes
|
||||
|
||||
## Flags
|
||||
|
||||
### `--collector.logical_disk.volume-whitelist`
|
||||
|
||||
If given, a disk needs to match the whitelist regexp in order for the corresponding disk metrics to be reported
|
||||
|
||||
### `--collector.logical_disk.volume-blacklist`
|
||||
|
||||
If given, a disk needs to *not* match the blacklist regexp in order for the corresponding disk metrics to be reported
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`requests_queued` | _Not yet documented_ | gauge | `volume`
|
||||
`read_bytes_total` | _Not yet documented_ | counter | `volume`
|
||||
`reads_total` | _Not yet documented_ | counter | `volume`
|
||||
`write_bytes_total` | _Not yet documented_ | counter | `volume`
|
||||
`writes_total` | _Not yet documented_ | counter | `volume`
|
||||
`read_seconds_total` | _Not yet documented_ | counter | `volume`
|
||||
`write_seconds_total` | _Not yet documented_ | counter | `volume`
|
||||
`free_bytes` | _Not yet documented_ | gauge | `volume`
|
||||
`size_bytes` | _Not yet documented_ | gauge | `volume`
|
||||
`idle_seconds_total` | _Not yet documented_ | counter | `volume`
|
||||
`split_ios_total` | _Not yet documented_ | counter | `volume`
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
61
docs/collector.memory.md
Normal file
61
docs/collector.memory.md
Normal file
@@ -0,0 +1,61 @@
|
||||
# memory collector
|
||||
|
||||
The memory collector exposes metrics about system memory usage
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `memory`
|
||||
Classes | `Win32_PerfRawData_PerfOS_Memory`
|
||||
Enabled by default? | Yes
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_cs_logical_processors` | Number of installed logical processors | gauge | None
|
||||
`wmi_cs_physical_memory_bytes` | Total installed physical memory | gauge | None
|
||||
`wmi_memory_available_bytes` | The amount of physical memory immediately available for allocation to a process or for system use. It is equal to the sum of memory assigned to the standby (cached), free and zero page lists | gauge | None
|
||||
`wmi_memory_cache_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_cache_bytes_peak` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_cache_faults_total` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_commit_limit` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_committed_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_demand_zero_faults_total` | The number of zeroed pages required to satisfy faults. Zeroed pages, pages emptied of previously stored data and filled with zeros, are a security feature of Windows that prevent processes from seeing data stored by earlier processes that used the memory space | gauge | None
|
||||
`wmi_memory_free_and_zero_page_list_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_free_system_page_table_entries` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_modified_page_list_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_page_faults_total` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_swap_page_reads_total` | Number of disk page reads (a single read operation reading several pages is still only counted once) | gauge | None
|
||||
`wmi_memory_swap_pages_read_total` | Number of pages read across all page reads (ie counting all pages read even if they are read in a single operation) | gauge | None
|
||||
`wmi_memory_swap_pages_written_total` | Number of pages written across all page writes (ie counting all pages written even if they are written in a single operation) | gauge | None
|
||||
`wmi_memory_swap_page_operations_total` | Total number of swap page read and writes (PagesPersec) | gauge | None
|
||||
`wmi_memory_swap_page_writes_total` | Number of disk page writes (a single write operation writing several pages is still only counted once) | gauge | None
|
||||
`wmi_memory_pool_nonpaged_allocs_total` | The number of calls to allocate space in the nonpaged pool. The nonpaged pool is an area of system memory area for objects that cannot be written to disk, and must remain in physical memory as long as they are allocated | gauge | None
|
||||
`wmi_memory_pool_nonpaged_bytes_total` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_pool_paged_allocs_total` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_pool_paged_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_pool_paged_resident_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_standby_cache_core_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_standby_cache_normal_priority_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_standby_cache_reserve_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_system_cache_resident_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_system_code_resident_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_system_code_total_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_system_driver_resident_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_system_driver_total_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_transition_faults_total` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_transition_pages_repurposed_total` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_write_copies_total` | The number of page faults caused by attempting to write that were satisfied by copying the page from elsewhere in physical memory | gauge | None
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
33
docs/collector.msmq.md
Normal file
33
docs/collector.msmq.md
Normal file
@@ -0,0 +1,33 @@
|
||||
# msmq collector
|
||||
|
||||
The msmq collector exposes metrics about the queues on a MSMQ server
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `msmq`
|
||||
Classes | `Win32_PerfRawData_MSMQ_MSMQQueue`
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
### `--collector.msmq.msmq-where`
|
||||
|
||||
A WMI filter on which queues to include. `%` is a wildcard, and can be used to match on substrings.
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_msmq_bytes_in_journal_queue` | Size of queue journal in bytes | gauge | `name`
|
||||
`wmi_msmq_bytes_in_queue` | Size of queue in bytes | gauge | `name`
|
||||
`wmi_msmq_messages_in_journal_queue` | Count messages in queue journal | gauge | `name`
|
||||
`wmi_msmq_messages_in_queue` | Count messages in queue | gauge | `name`
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
241
docs/collector.mssql.md
Normal file
241
docs/collector.mssql.md
Normal file
@@ -0,0 +1,241 @@
|
||||
# mssql collector
|
||||
|
||||
The mssql collector exposes metrics about the MSSQL server
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `mssql`
|
||||
Classes | [`Win32_PerfRawData_MSSQLSERVER_SQLServerAccessMethods`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-access-methods-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerAvailabilityReplica`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-availability-replica)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerBufferManager`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-buffer-manager-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerDatabaseReplica`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-database-replica)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerDatabases`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-databases-object?view=sql-server-2017)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerGeneralStatistics`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-general-statistics-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerLocks`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-locks-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerMemoryManager`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-memory-manager-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerSQLStatistics`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-sql-statistics-object)
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
### `--collectors.mssql.classes-enabled`
|
||||
|
||||
Comma-separated list of MSSQL WMI classes to use. Supported values are `accessmethods`, `availreplica`, `bufman`, `databases`, `dbreplica`, `genstats`, `locks`, `memmgr` and `sqlstats`.
|
||||
|
||||
### `--collectors.mssql.class-print`
|
||||
|
||||
If true, print available mssql WMI classes and exit. Only displays if the mssql collector is enabled.
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_mssql_collector_duration_seconds` | The time taken for each sub-collector to return | counter | `collector`, `instance`
|
||||
`wmi_mssql_collector_success` | 1 if sub-collector succeeded, 0 otherwise | counter | `collector`, `instance`
|
||||
`wmi_mssql_accessmethods_au_batch_cleanups` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_au_cleanups` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_by_reference_lob_creates` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_by_reference_lob_uses` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_lob_read_aheads` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_column_value_pulls` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_column_value_pushes` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_deferred_dropped_aus` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_deferred_dropped_rowsets` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_dropped_rowset_cleanups` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_dropped_rowset_skips` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_extent_deallocations` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_extent_allocations` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_au_batch_cleanup_failures` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_leaf_page_cookie_failures` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_tree_page_cookie_failures` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_forwarded_records` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_free_space_page_fetches` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_free_space_scans` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_full_scans` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_index_searches` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_insysxact_waits` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_lob_handle_creates` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_lob_handle_destroys` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_lob_ss_provider_creates` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_lob_ss_provider_destroys` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_lob_ss_provider_truncations` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_mixed_page_allocations` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_page_compression_attempts` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_page_deallocations` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_page_allocations` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_page_compressions` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_page_splits` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_probe_scans` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_range_scans` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_scan_point_revalidations` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_ghost_record_skips` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_table_lock_escalations` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_leaf_page_cookie_uses` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_tree_page_cookie_uses` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_workfile_creates` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_worktables_creates` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_accessmethods_worktables_from_cache_ratio` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_availreplica_received_from_replica_bytes` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_availreplica_sent_to_replica_bytes` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_availreplica_sent_to_transport_bytes` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_availreplica_initiated_flow_controls` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_availreplica_flow_control_wait_seconds` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_availreplica_receives_from_replica` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_availreplica_resent_messages` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_availreplica_sends_to_replica` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_availreplica_sends_to_transport` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_bufman_background_writer_pages` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_buffer_cache_hit_ratio` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_checkpoint_pages` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_database_pages` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_extension_allocated_pages` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_extension_free_pages` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_extension_in_use_as_percentage` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_extension_outstanding_io` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_extension_page_evictions` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_extension_page_reads` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_extension_page_unreferenced_seconds` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_extension_page_writes` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_free_list_stalls` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_integral_controller_slope` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_lazywrites` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_page_life_expectancy_seconds` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_page_lookups` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_page_reads` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_page_writes` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_read_ahead_pages` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_read_ahead_issuing_seconds` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_target_pages` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_dbreplica_database_flow_control_wait_seconds` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_database_initiated_flow_controls` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_received_file_bytes` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_group_commits` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_group_commit_stall_seconds` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_log_apply_pending_queue` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_log_apply_ready_queue` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_log_compressed_bytes` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_log_decompressed_bytes` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_log_received_bytes` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_log_compression_cachehits` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_log_compression_cachemisses` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_log_compressions` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_log_decompressions` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_log_remaining_for_undo` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_log_send_queue` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_mirrored_write_transactions` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_recovery_queue_records` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_redo_blocks` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_redo_remaining_bytes` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_redone_bytes` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_redones` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_total_log_requiring_undo` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_transaction_delay_seconds` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_databases_active_transactions` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_backup_restore_operations` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_bulk_copy_rows` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_bulk_copy_bytes` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_commit_table_entries` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_data_files_size_bytes` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_dbcc_logical_scan_bytes` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_group_commit_stall_seconds` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_flushed_bytes` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_cache_hit_ratio` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_cache_reads` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_files_size_bytes` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_files_used_size_bytes` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_flushes` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_flush_waits` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_flush_wait_seconds` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_flush_write_seconds` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_growths` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_cache_misses` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_disk_reads` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_hash_deletes` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_hash_inserts` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_invalid_hash_entries` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_log_scan_pushes` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_log_writer_pushes` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_empty_free_pool_pushes` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_low_memory_pushes` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_no_free_buffer_pushes` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_req_behind_trunc` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_requests_old_vlf` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_requests` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_total_active_log_bytes` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_total_shared_pool_bytes` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_shrinks` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_truncations` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_used_percent` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_pending_repl_transactions` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_repl_transactions` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_shrink_data_movement_bytes` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_tracked_transactions` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_transactions` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_write_transactions` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_xtp_controller_dlc_fetch_latency_seconds` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_xtp_controller_dlc_peak_latency_seconds` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_xtp_controller_log_processed_bytes` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_xtp_memory_used_bytes` | _Not yet documented_ | counter | `instance`, `database`
|
||||
`wmi_mssql_genstats_active_temp_tables` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_connection_resets` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_event_notifications_delayed_drop` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_http_authenticated_requests` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_logical_connections` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_logins` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_logouts` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_mars_deadlocks` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_non_atomic_yields` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_blocked_processes` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_soap_empty_requests` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_soap_method_invocations` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_soap_session_initiate_requests` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_soap_session_terminate_requests` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_soapsql_requests` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_soapwsdl_requests` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_sql_trace_io_provider_lock_waits` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_tempdb_recovery_unit_ids_generated` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_tempdb_rowset_ids_generated` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_temp_tables_creations` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_temp_tables_awaiting_destruction` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_trace_event_notification_queue_size` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_transactions` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_genstats_user_connections` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_locks_average_wait_seconds` | _Not yet documented_ | counter | `instance`, `resource`
|
||||
`wmi_mssql_locks_lock_requests` | _Not yet documented_ | counter | `instance`, `resource`
|
||||
`wmi_mssql_locks_lock_timeouts` | _Not yet documented_ | counter | `instance`, `resource`
|
||||
`wmi_mssql_locks_lock_timeouts_excluding_NOWAIT` | _Not yet documented_ | counter | `instance`, `resource`
|
||||
`wmi_mssql_locks_lock_waits` | _Not yet documented_ | counter | `instance`, `resource`
|
||||
`wmi_mssql_locks_lock_wait_seconds` | _Not yet documented_ | counter | `instance`, `resource`
|
||||
`wmi_mssql_locks_deadlocks` | _Not yet documented_ | counter | `instance`, `resource`
|
||||
`wmi_mssql_memmgr_connection_memory_bytes` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_memmgr_database_cache_memory_bytes` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_memmgr_external_benefit_of_memory` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_memmgr_free_memory_bytes` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_memmgr_granted_workspace_memory_bytes` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_memmgr_lock_blocks` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_memmgr_allocated_lock_blocks` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_memmgr_lock_memory_bytes` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_memmgr_lock_owner_blocks` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_memmgr_allocated_lock_owner_blocks` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_memmgr_log_pool_memory_bytes` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_memmgr_maximum_workspace_memory_bytes` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_memmgr_outstanding_memory_grants` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_memmgr_pending_memory_grants` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_memmgr_optimizer_memory_bytes` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_memmgr_reserved_server_memory_bytes` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_memmgr_sql_cache_memory_bytes` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_memmgr_stolen_server_memory_bytes` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_memmgr_target_server_memory_bytes` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_memmgr_total_server_memory_bytes` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_sqlstats_auto_parameterization_attempts` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_sqlstats_batch_requests` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_sqlstats_failed_auto_parameterization_attempts` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_sqlstats_forced_parameterizations` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_sqlstats_guided_plan_executions` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_sqlstats_misguided_plan_executions` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_sqlstats_safe_auto_parameterization_attempts` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_sqlstats_sql_attentions` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_sqlstats_sql_compilations` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_sqlstats_sql_recompilations` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_sqlstats_unsafe_auto_parameterization_attempts` | _Not yet documented_ | counter | `instance`
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
45
docs/collector.net.md
Normal file
45
docs/collector.net.md
Normal file
@@ -0,0 +1,45 @@
|
||||
# net collector
|
||||
|
||||
The net collector exposes metrics about network interfaces
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `net`
|
||||
Classes | [`Win32_PerfRawData_Tcpip_NetworkInterface`](https://technet.microsoft.com/en-us/security/aa394340(v=vs.80))
|
||||
Enabled by default? | Yes
|
||||
|
||||
## Flags
|
||||
|
||||
### `--collector.net.nic-whitelist`
|
||||
|
||||
If given, an interface name needs to match the whitelist regexp in order for the corresponding metrics to be reported
|
||||
|
||||
### `--collector.net.nic-blacklist`
|
||||
|
||||
If given, an interface name needs to *not* match the blacklist regexp in order for the corresponding metrics to be reported
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_net_bytes_received_total` | _Not yet documented_ | counter | `nic`
|
||||
`wmi_net_bytes_sent_total` | _Not yet documented_ | counter | `nic`
|
||||
`wmi_net_bytes_total` | _Not yet documented_ | counter | `nic`
|
||||
`wmi_net_packets_outbound_discarded` | _Not yet documented_ | counter | `nic`
|
||||
`wmi_net_packets_outbound_errors` | _Not yet documented_ | counter | `nic`
|
||||
`wmi_net_packets_received_discarded` | _Not yet documented_ | counter | `nic`
|
||||
`wmi_net_packets_received_errors` | _Not yet documented_ | counter | `nic`
|
||||
`wmi_net_packets_received_total` | _Not yet documented_ | counter | `nic`
|
||||
`wmi_net_packets_received_unknown` | _Not yet documented_ | counter | `nic`
|
||||
`wmi_net_packets_total` | _Not yet documented_ | counter | `nic`
|
||||
`wmi_net_packets_sent_total` | _Not yet documented_ | counter | `nic`
|
||||
`wmi_net_current_bandwidth` | _Not yet documented_ | counter | `nic`
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
32
docs/collector.netframework_clrexceptions.md
Normal file
32
docs/collector.netframework_clrexceptions.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# netframework_clrexceptions collector
|
||||
|
||||
The netframework_clrexceptions collector exposes metrics about CLR exceptions in the dotnet framework.
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `netframework_clrexceptions`
|
||||
Classes | `Win32_PerfRawData_NETFramework_NETCLRExceptions`
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_...` | ... | counter/gauge/histogram/summary | ...
|
||||
`wmi_netframework_clrexceptions_exceptions_thrown_total` | Displays the total number of exceptions thrown since the application started. This includes both .NET exceptions and unmanaged exceptions that are converted into .NET exceptions. | counter | `process`
|
||||
`wmi_netframework_clrexceptions_exceptions_filters_total` | Displays the total number of .NET exception filters executed. An exception filter evaluates regardless of whether an exception is handled. | counter | `process`
|
||||
`wmi_netframework_clrexceptions_exceptions_finallys_total` | Displays the total number of finally blocks executed. Only the finally blocks executed for an exception are counted; finally blocks on normal code paths are not counted by this counter. | counter | `process`
|
||||
`wmi_netframework_clrexceptions_throw_to_catch_depth_total` | Displays the total number of stack frames traversed, from the frame that threw the exception to the frame that handled the exception. | counter | `process`
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
31
docs/collector.netframework_clrinterop.md
Normal file
31
docs/collector.netframework_clrinterop.md
Normal file
@@ -0,0 +1,31 @@
|
||||
# netframework_clrinterop collector
|
||||
|
||||
The netframework_clrinterop collector exposes metrics about interop between the dotnet framework and outside components.
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `netframework_clrinterop`
|
||||
Classes | `Win32_PerfRawData_NETFramework_NETCLRInterop`
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_...` | ... | counter/gauge/histogram/summary | ...
|
||||
`wmi_netframework_clrinterop_com_callable_wrappers_total` | Displays the current number of COM callable wrappers (CCWs). A CCW is a proxy for a managed object being referenced from an unmanaged COM client. | counter | `process`
|
||||
`wmi_netframework_clrinterop_interop_marshalling_total` | Displays the total number of times arguments and return values have been marshaled from managed to unmanaged code, and vice versa, since the application started. | counter | `process`
|
||||
`wmi_netframework_clrinterop_interop_stubs_created_total` | Displays the current number of stubs created by the common language runtime. Stubs are responsible for marshaling arguments and return values from managed to unmanaged code, and vice versa, during a COM interop call or a platform invoke call. | counter | `process`
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
32
docs/collector.netframework_clrjit.md
Normal file
32
docs/collector.netframework_clrjit.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# netframework_clrjit collector
|
||||
|
||||
The netframework_clrjit collector exposes metrics about the dotnet Just-in-Time compiler.
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `netframework_clrjit`
|
||||
Classes | `Win32_PerfRawData_NETFramework_NETCLRJit`
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_...` | ... | counter/gauge/histogram/summary | ...
|
||||
`wmi_netframework_clrjit_jit_methods_total` | Displays the total number of methods JIT-compiled since the application started. This counter does not include pre-JIT-compiled methods. | counter | `process`
|
||||
`wmi_netframework_clrjit_jit_time_percent` | Displays the percentage of time spent in JIT compilation. This counter is updated at the end of every JIT compilation phase. A JIT compilation phase occurs when a method and its dependencies are compiled. | gauge | `process`
|
||||
`wmi_netframework_clrjit_jit_standard_failures_total` | Displays the peak number of methods the JIT compiler has failed to compile since the application started. This failure can occur if the MSIL cannot be verified or if there is an internal error in the JIT compiler. | counter | `process`
|
||||
`wmi_netframework_clrjit_jit_il_bytes_total` | Displays the total number of Microsoft intermediate language (MSIL) bytes compiled by the just-in-time (JIT) compiler since the application started | counter | `process`
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
37
docs/collector.netframework_clrloading.md
Normal file
37
docs/collector.netframework_clrloading.md
Normal file
@@ -0,0 +1,37 @@
|
||||
# netframework_clrloading collector
|
||||
|
||||
The netframework_clrloading collector exposes metrics about the dotnet loader.
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `netframework_clrloading`
|
||||
Classes | `Win32_PerfRawData_NETFramework_NETCLRLoading`
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_...` | ... | counter/gauge/histogram/summary | ...
|
||||
`wmi_netframework_clrloading_loader_heap_size_bytes` | Displays the current size, in bytes, of the memory committed by the class loader across all application domains. Committed memory is the physical space reserved in the disk paging file. | gauge | `process`
|
||||
`wmi_netframework_clrloading_appdomains_loaded_current` | Displays the current number of application domains loaded in this application. | gauge | `process`
|
||||
`wmi_netframework_clrloading_assemblies_loaded_current` | Displays the current number of assemblies loaded across all application domains in the currently running application. If the assembly is loaded as domain-neutral from multiple application domains, this counter is incremented only once. | gauge | `process`
|
||||
`wmi_netframework_clrloading_classes_loaded_current` | Displays the current number of classes loaded in all assemblies. | gauge | `process`
|
||||
`wmi_netframework_clrloading_appdomains_loaded_total` | Displays the peak number of application domains loaded since the application started. | counter | `process`
|
||||
`wmi_netframework_clrloading_appdomains_unloaded_total` | Displays the total number of application domains unloaded since the application started. If an application domain is loaded and unloaded multiple times, this counter increments each time the application domain is unloaded. | counter | `process`
|
||||
`wmi_netframework_clrloading_assemblies_loaded_total` | Displays the total number of assemblies loaded since the application started. If the assembly is loaded as domain-neutral from multiple application domains, this counter is incremented only once. | counter | `process`
|
||||
`wmi_netframework_clrloading_classes_loaded_total` | Displays the cumulative number of classes loaded in all assemblies since the application started. | counter | `process`
|
||||
`wmi_netframework_clrloading_class_load_failures_total` | Displays the peak number of classes that have failed to load since the application started. | counter | `process`
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
35
docs/collector.netframework_clrlocksandthreads.md
Normal file
35
docs/collector.netframework_clrlocksandthreads.md
Normal file
@@ -0,0 +1,35 @@
|
||||
# netframework_clrlocksandthreads collector
|
||||
|
||||
The netframework_clrlocksandthreads collector exposes metrics about locks and threads in dotnet applications.
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `netframework_clrlocksandthreads`
|
||||
Classes | `Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads`
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_...` | ... | counter/gauge/histogram/summary | ...
|
||||
`wmi_netframework_clrlocksandthreads_current_queue_length` | Displays the total number of threads that are currently waiting to acquire a managed lock in the application. | gauge | `process`
|
||||
`wmi_netframework_clrlocksandthreads_current_logical_threads` | Displays the number of current managed thread objects in the application. This counter maintains the count of both running and stopped threads. | gauge | `process`
|
||||
`wmi_netframework_clrlocksandthreads_physical_threads_current` | Displays the number of native operating system threads created and owned by the common language runtime to act as underlying threads for managed thread objects. This counter's value does not include the threads used by the runtime in its internal operations; it is a subset of the threads in the operating system process. | gauge | `process`
|
||||
`wmi_netframework_clrlocksandthreads_recognized_threads_current` | Displays the number of threads that are currently recognized by the runtime. These threads are associated with a corresponding managed thread object. The runtime does not create these threads, but they have run inside the runtime at least once. | gauge | `process`
|
||||
`wmi_netframework_clrlocksandthreads_recognized_threads_total` | Displays the total number of threads that have been recognized by the runtime since the application started. These threads are associated with a corresponding managed thread object. The runtime does not create these threads, but they have run inside the runtime at least once. | counter | `process`
|
||||
`wmi_netframework_clrlocksandthreads_queue_length_total` | Displays the total number of threads that waited to acquire a managed lock since the application started. | counter | `process`
|
||||
`wmi_netframework_clrlocksandthreads_contentions_total` | Displays the total number of times that threads in the runtime have attempted to acquire a managed lock unsuccessfully. | counter | `process`
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
40
docs/collector.netframework_clrmemory.md
Normal file
40
docs/collector.netframework_clrmemory.md
Normal file
@@ -0,0 +1,40 @@
|
||||
# netframework_clrmemory collector
|
||||
|
||||
The netframework_clrmemory collector exposes metrics about memory in dotnet applications.
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `netframework_clrmemory`
|
||||
Classes | `Win32_PerfRawData_NETFramework_NETCLRMemory`
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_...` | ... | counter/gauge/histogram/summary | ...
|
||||
`wmi_netframework_clrmemory_allocated_bytes_total` | Displays the total number of bytes allocated on the garbage collection heap. | counter | `process`
|
||||
`wmi_netframework_clrmemory_finalization_survivors` | Displays the number of garbage-collected objects that survive a collection because they are waiting to be finalized. | gauge | `process`
|
||||
`wmi_netframework_clrmemory_heap_size_bytes` | Displays the maximum bytes that can be allocated; it does not indicate the current number of bytes allocated. | gauge | `process`
|
||||
`wmi_netframework_clrmemory_promoted_bytes` | Displays the bytes that were promoted from the generation to the next one during the last GC. Memory is promoted when it survives a garbage collection. | gauge | `process`
|
||||
`wmi_netframework_clrmemory_number_gc_handles` | Displays the current number of garbage collection handles in use. Garbage collection handles are handles to resources external to the common language runtime and the managed environment. | gauge | `process`
|
||||
`wmi_netframework_clrmemory_collections_total` | Displays the number of times the generation objects are garbage collected since the application started. | counter | `process`
|
||||
`wmi_netframework_clrmemory_induced_gc_total` | Displays the peak number of times garbage collection was performed because of an explicit call to GC.Collect. | counter | `process`
|
||||
`wmi_netframework_clrmemory_number_pinned_objects` | Displays the number of pinned objects encountered in the last garbage collection. | gauge | `process`
|
||||
`wmi_netframework_clrmemory_number_sink_blocksinuse` | Displays the current number of synchronization blocks in use. Synchronization blocks are per-object data structures allocated for storing synchronization information. They hold weak references to managed objects and must be scanned by the garbage collector. | gauge | `process`
|
||||
`wmi_netframework_clrmemory_committed_bytes` | Displays the amount of virtual memory, in bytes, currently committed by the garbage collector. Committed memory is the physical memory for which space has been reserved in the disk paging file. | gauge | `process`
|
||||
`wmi_netframework_clrmemory_reserved_bytes` | Displays the amount of virtual memory, in bytes, currently reserved by the garbage collector. Reserved memory is the virtual memory space reserved for the application when no disk or main memory pages have been used. | gauge | `process`
|
||||
`wmi_netframework_clrmemory_gc_time_percent` | Displays the percentage of time that was spent performing a garbage collection in the last sample. | gauge | `process`
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
33
docs/collector.netframework_clrremoting.md
Normal file
33
docs/collector.netframework_clrremoting.md
Normal file
@@ -0,0 +1,33 @@
|
||||
# netframework_clrremoting collector
|
||||
|
||||
The netframework_clrremoting collector exposes metrics about dotnet remoting.
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `netframework_clrremoting`
|
||||
Classes | `Win32_PerfRawData_NETFramework_NETCLRRemoting`
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_netframework_clrremoting_channels_total` | Displays the total number of remoting channels registered across all application domains since application started. | counter | `process`
|
||||
`wmi_netframework_clrremoting_context_bound_classes_loaded` | Displays the current number of context-bound classes that are loaded. | gauge | `process`
|
||||
`wmi_netframework_clrremoting_context_bound_objects_total` | Displays the total number of context-bound objects allocated. | counter | `process`
|
||||
`wmi_netframework_clrremoting_context_proxies_total` | Displays the total number of remoting proxy objects in this process since it started. | counter | `process`
|
||||
`wmi_netframework_clrremoting_contexts` | Displays the current number of remoting contexts in the application. | gauge | `process`
|
||||
`wmi_netframework_clrremoting_remote_calls_total` | Displays the total number of remote procedure calls invoked since the application started. | counter | `process`
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
31
docs/collector.netframework_clrsecurity.md
Normal file
31
docs/collector.netframework_clrsecurity.md
Normal file
@@ -0,0 +1,31 @@
|
||||
# netframework_clrsecurity collector
|
||||
|
||||
The netframework_clrsecurity collector exposes metrics about security checks in dotnet applications
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `netframework_clrsecurity`
|
||||
Classes | `Win32_PerfRawData_NETFramework_NETCLRSecurity`
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_netframework_clrsecurity_link_time_checks_total` | Displays the total number of link-time code access security checks since the application started. | counter | `process`
|
||||
`wmi_netframework_clrsecurity_rt_checks_time_percent` | Displays the percentage of time spent performing runtime code access security checks in the last sample. | gauge | `process`
|
||||
`wmi_netframework_clrsecurity_stack_walk_depth` | Displays the depth of the stack during that last runtime code access security check. | gauge | `process`
|
||||
`wmi_netframework_clrsecurity_runtime_checks_total` | Displays the total number of runtime code access security checks performed since the application started. | counter | `process`
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
39
docs/collector.os.md
Normal file
39
docs/collector.os.md
Normal file
@@ -0,0 +1,39 @@
|
||||
# os collector
|
||||
|
||||
The os collector exposes metrics about the operating system
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `os`
|
||||
Classes | [`Win32_OperatingSystem`](https://msdn.microsoft.com/en-us/library/aa394239)
|
||||
Enabled by default? | Yes
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_os_paging_limit_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_os_paging_free_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_os_physical_memory_free_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_os_time` | _Not yet documented_ | gauge | None
|
||||
`wmi_os_timezone` | _Not yet documented_ | gauge | `timezone`
|
||||
`wmi_os_processes` | _Not yet documented_ | gauge | None
|
||||
`wmi_os_processes_limit` | _Not yet documented_ | gauge | None
|
||||
`wmi_os_process_memory_limix_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_os_users` | _Not yet documented_ | gauge | None
|
||||
`wmi_os_virtual_memory_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_os_visible_memory_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_os_virtual_memory_free_bytes` | _Not yet documented_ | gauge | None
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
46
docs/collector.process.md
Normal file
46
docs/collector.process.md
Normal file
@@ -0,0 +1,46 @@
|
||||
# process collector
|
||||
|
||||
The process collector exposes metrics about processes
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `process`
|
||||
Classes | [`Win32_PerfRawData_PerfProc_Process`](https://msdn.microsoft.com/en-us/library/aa394323(v=vs.85).aspx)
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
### `--collector.process.processes-where`
|
||||
|
||||
A WMI filter on which processes to include. Recommended to keep down number of returned metrics.
|
||||
|
||||
`%` is a wildcard, and can be used to match on substrings.
|
||||
|
||||
Example: `--collector.process.processes-where="Name LIKE 'firefox%'`
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_process_start_time` | _Not yet documented_ | gauge | `process`, `process_id`, `creating_process_id`
|
||||
`wmi_process_cpu_time_total` | _Not yet documented_ | counter | `process`, `process_id`, `creating_process_id`
|
||||
`wmi_process_handle_count` | _Not yet documented_ | gauge | `process`, `process_id`, `creating_process_id`
|
||||
`wmi_process_io_bytes_total` | _Not yet documented_ | counter | `process`, `process_id`, `creating_process_id`
|
||||
`wmi_process_io_operations_total` | _Not yet documented_ | counter | `process`, `process_id`, `creating_process_id`
|
||||
`wmi_process_page_faults_total` | _Not yet documented_ | counter | `process`, `process_id`, `creating_process_id`
|
||||
`wmi_process_page_file_bytes` | _Not yet documented_ | gauge | `process`, `process_id`, `creating_process_id`
|
||||
`wmi_process_pool_bytes` | _Not yet documented_ | gauge | `process`, `process_id`, `creating_process_id`
|
||||
`wmi_process_priority_base` | _Not yet documented_ | gauge | `process`, `process_id`, `creating_process_id`
|
||||
`wmi_process_private_bytes` | _Not yet documented_ | gauge | `process`, `process_id`, `creating_process_id`
|
||||
`wmi_process_thread_count` | _Not yet documented_ | gauge | `process`, `process_id`, `creating_process_id`
|
||||
`wmi_process_virtual_bytes` | _Not yet documented_ | gauge | `process`, `process_id`, `creating_process_id`
|
||||
`wmi_process_working_set` | _Not yet documented_ | gauge | `process`, `process_id`, `creating_process_id`
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
75
docs/collector.service.md
Normal file
75
docs/collector.service.md
Normal file
@@ -0,0 +1,75 @@
|
||||
# service collector
|
||||
|
||||
The service collector exposes metrics about Windows Services
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `service`
|
||||
Classes | [`Win32_Service`](https://msdn.microsoft.com/en-us/library/aa394418(v=vs.85).aspx)
|
||||
Enabled by default? | Yes
|
||||
|
||||
## Flags
|
||||
|
||||
### `--collector.service.services-where`
|
||||
|
||||
A WMI filter on which services to include. Recommended to keep down number of returned metrics.
|
||||
|
||||
Example: `--collector.service.services-where="Name='wmi_exporter'"`
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_service_state` | The state of the service, 1 if the current state, 0 otherwise | gauge | name, state
|
||||
`wmi_service_start_mode` | The start mode of the service, 1 if the current start mode, 0 otherwise | gauge | name, start_mode
|
||||
`wmi_service_status` | The status of the service, 1 if the current status, 0 otherwise | gauge | name, status
|
||||
|
||||
For the values of the `state`, `start_mode` and `status` labels, see below.
|
||||
|
||||
### States
|
||||
|
||||
A service can be in the following states:
|
||||
- `stopped`
|
||||
- `start pending`
|
||||
- `stop pending`
|
||||
- `running`
|
||||
- `continue pending`
|
||||
- `pause pending`
|
||||
- `paused`
|
||||
- `unknown`
|
||||
|
||||
### Start modes
|
||||
|
||||
A service can have the following start modes:
|
||||
- `boot`
|
||||
- `system`
|
||||
- `auto`
|
||||
- `manual`
|
||||
- `disabled`
|
||||
|
||||
### Status
|
||||
|
||||
A service can have any of the following statuses:
|
||||
- `ok`
|
||||
- `error`
|
||||
- `degraded`
|
||||
- `unknown`
|
||||
- `pred fail`
|
||||
- `starting`
|
||||
- `stopping`
|
||||
- `service`
|
||||
- `stressed`
|
||||
- `nonrecover`
|
||||
- `no contact`
|
||||
- `lost comm`
|
||||
|
||||
Note that there is some overlap with service state.
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
33
docs/collector.system.md
Normal file
33
docs/collector.system.md
Normal file
@@ -0,0 +1,33 @@
|
||||
# system collector
|
||||
|
||||
The system collector exposes metrics about ...
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `system`
|
||||
Classes | [`Win32_PerfRawData_PerfOS_System`](https://web.archive.org/web/20050830140516/http://msdn.microsoft.com/library/en-us/wmisdk/wmi/win32_perfrawdata_perfos_system.asp)
|
||||
Enabled by default? | Yes
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_system_context_switches_total` | _Not yet documented_ | counter | None
|
||||
`wmi_system_exception_dispatches_total` | _Not yet documented_ | counter | None
|
||||
`wmi_system_processor_queue_length` | _Not yet documented_ | gauge | None
|
||||
`wmi_system_system_calls_total` | _Not yet documented_ | counter | None
|
||||
`wmi_system_system_up_time` | _Not yet documented_ | gauge | None
|
||||
`wmi_system_threads` | _Not yet documented_ | gauge | None
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
36
docs/collector.tcp.md
Normal file
36
docs/collector.tcp.md
Normal file
@@ -0,0 +1,36 @@
|
||||
# tcp collector
|
||||
|
||||
The tcp collector exposes metrics about the TCP/IPv4 network stack.
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `tcp`
|
||||
Classes | [`Win32_PerfRawData_Tcpip_TCPv4`](https://msdn.microsoft.com/en-us/library/aa394341(v=vs.85).aspx)
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_tcp_connection_failures` | _Not yet documented_ | counter | None
|
||||
`wmi_tcp_connections_active` | _Not yet documented_ | counter | None
|
||||
`wmi_tcp_connections_established` | _Not yet documented_ | counter | None
|
||||
`wmi_tcp_connections_passive` | _Not yet documented_ | counter | None
|
||||
`wmi_tcp_connections_reset` | _Not yet documented_ | counter | None
|
||||
`wmi_tcp_segments_total` | _Not yet documented_ | counter | None
|
||||
`wmi_tcp_segments_received_total` | _Not yet documented_ | counter | None
|
||||
`wmi_tcp_segments_retransmitted_total` | _Not yet documented_ | counter | None
|
||||
`wmi_tcp_segments_sent_total` | _Not yet documented_ | counter | None
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
56
docs/collector.textfile.md
Normal file
56
docs/collector.textfile.md
Normal file
@@ -0,0 +1,56 @@
|
||||
# textfile collector
|
||||
|
||||
The textfile collector exposes metrics from files written by other processes.
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `textfile`
|
||||
Classes | None
|
||||
Enabled by default? | Yes
|
||||
|
||||
## Flags
|
||||
|
||||
### `--collector.textfile.directory`
|
||||
|
||||
The directory containing the files to be ingested. Only files with the extension `.prom` are read.
|
||||
|
||||
Default value: `C:\Program Files\wmi_exporter\textfile_inputs`
|
||||
|
||||
Required: No
|
||||
|
||||
## Metrics
|
||||
|
||||
Metrics will primarily come from the files on disk. The below listed metrics
|
||||
are collected to give information about the reading of the metrics themselves.
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_textfile_scrape_error` | 1 if there was an error opening or reading a file, 0 otherwise | gauge | None
|
||||
`wmi_textfile_mtime_seconds` | Unix epoch-formatted mtime (modified time) of textfiles successfully read | gauge | file
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
|
||||
# Example use
|
||||
This Powershell script, when run in the `collector.textfile.directory` (default `C:\Program Files\wmi_exporter\textfile_inputs`), generates a valid `.prom` file that should successfully ingested by wmi_exporter.
|
||||
|
||||
```Powershell
|
||||
$alpha = 42
|
||||
$beta = @{ left=3.1415; right=2.718281828; }
|
||||
|
||||
Set-Content -Path test1.prom -Encoding Ascii -NoNewline -Value ""
|
||||
Add-Content -Path test1.prom -Encoding Ascii -NoNewline -Value "# HELP test_alpha_total Some random metric.`n"
|
||||
Add-Content -Path test1.prom -Encoding Ascii -NoNewline -Value "# TYPE test_alpha_total counter`n"
|
||||
Add-Content -Path test1.prom -Encoding Ascii -NoNewline -Value "test_alpha_total ${alpha}`n"
|
||||
Add-Content -Path test1.prom -Encoding Ascii -NoNewline -Value "# HELP test_beta_bytes Some other metric.`n"
|
||||
Add-Content -Path test1.prom -Encoding Ascii -NoNewline -Value "# TYPE test_beta_bytes gauge`n"
|
||||
foreach ($k in $beta.Keys) {
|
||||
Add-Content -Path test1.prom -Encoding Ascii -NoNewline -Value "test_beta_bytes{spin=""${k}""} $( $beta[$k] )`n"
|
||||
}
|
||||
```
|
||||
46
docs/collector.vmware.md
Normal file
46
docs/collector.vmware.md
Normal file
@@ -0,0 +1,46 @@
|
||||
# vmware collector
|
||||
|
||||
The vmware collector exposes metrics about a VMware guest VM
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `vmware`
|
||||
Classes | `Win32_PerfRawData_vmGuestLib_VMem`, `Win32_PerfRawData_vmGuestLib_VCPU`
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_vmware_mem_active_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_vmware_mem_ballooned_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_vmware_mem_limit_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_vmware_mem_mapped_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_vmware_mem_overhead_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_vmware_mem_reservation_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_vmware_mem_shared_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_vmware_mem_shared_saved_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_vmware_mem_shares` | _Not yet documented_ | gauge | None
|
||||
`wmi_vmware_mem_swapped_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_vmware_mem_target_size_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_vmware_mem_used_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_vmware_cpu_limit_mhz` | _Not yet documented_ | gauge | None
|
||||
`wmi_vmware_cpu_reservation_mhz` | _Not yet documented_ | gauge | None
|
||||
`wmi_vmware_cpu_shares` | _Not yet documented_ | gauge | None
|
||||
`wmi_vmware_cpu_stolen_seconds_total` | _Not yet documented_ | counter | None
|
||||
`wmi_vmware_cpu_time_seconds_total` | _Not yet documented_ | counter | None
|
||||
`wmi_vmware_effective_vm_speed_mhz` | _Not yet documented_ | gauge | None
|
||||
`wmi_vmware_host_processor_speed_mhz` | _Not yet documented_ | gauge | None
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
260
exporter.go
260
exporter.go
@@ -1,10 +1,10 @@
|
||||
// +build windows
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -12,54 +12,143 @@ import (
|
||||
|
||||
"golang.org/x/sys/windows/svc"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/martinlindhe/wmi_exporter/collector"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/prometheus/common/log"
|
||||
"github.com/prometheus/common/version"
|
||||
"gopkg.in/alecthomas/kingpin.v2"
|
||||
)
|
||||
|
||||
// WmiCollector implements the prometheus.Collector interface.
|
||||
type WmiCollector struct {
|
||||
collectors map[string]collector.Collector
|
||||
maxScrapeDuration time.Duration
|
||||
collectors map[string]collector.Collector
|
||||
}
|
||||
|
||||
const (
|
||||
defaultCollectors = "logical_disk,os"
|
||||
serviceName = "wmi_exporter"
|
||||
defaultCollectors = "cpu,cs,logical_disk,net,os,service,system,textfile"
|
||||
defaultCollectorsPlaceholder = "[defaults]"
|
||||
serviceName = "wmi_exporter"
|
||||
)
|
||||
|
||||
var (
|
||||
scrapeDurations = prometheus.NewSummaryVec(
|
||||
prometheus.SummaryOpts{
|
||||
Namespace: collector.Namespace,
|
||||
Subsystem: "exporter",
|
||||
Name: "scrape_duration_seconds",
|
||||
Help: "wmi_exporter: Duration of a scrape job.",
|
||||
},
|
||||
[]string{"collector", "result"},
|
||||
scrapeDurationDesc = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(collector.Namespace, "exporter", "collector_duration_seconds"),
|
||||
"wmi_exporter: Duration of a collection.",
|
||||
[]string{"collector"},
|
||||
nil,
|
||||
)
|
||||
scrapeSuccessDesc = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(collector.Namespace, "exporter", "collector_success"),
|
||||
"wmi_exporter: Whether the collector was successful.",
|
||||
[]string{"collector"},
|
||||
nil,
|
||||
)
|
||||
scrapeTimeoutDesc = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(collector.Namespace, "exporter", "collector_timeout"),
|
||||
"wmi_exporter: Whether the collector timed out.",
|
||||
[]string{"collector"},
|
||||
nil,
|
||||
)
|
||||
|
||||
// This can be removed when client_golang exposes this on Windows
|
||||
// (See https://github.com/prometheus/client_golang/issues/376)
|
||||
startTime = float64(time.Now().Unix())
|
||||
startTimeDesc = prometheus.NewDesc(
|
||||
"process_start_time_seconds",
|
||||
"Start time of the process since unix epoch in seconds.",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
)
|
||||
|
||||
// Describe sends all the descriptors of the collectors included to
|
||||
// the provided channel.
|
||||
func (coll WmiCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||
scrapeDurations.Describe(ch)
|
||||
ch <- scrapeDurationDesc
|
||||
ch <- scrapeSuccessDesc
|
||||
}
|
||||
|
||||
// Collect sends the collected metrics from each of the collectors to
|
||||
// prometheus. Collect could be called several times concurrently
|
||||
// and thus its run is protected by a single mutex.
|
||||
// prometheus.
|
||||
func (coll WmiCollector) Collect(ch chan<- prometheus.Metric) {
|
||||
scrapeContext, err := collector.PrepareScrapeContext()
|
||||
if err != nil {
|
||||
ch <- prometheus.NewInvalidMetric(scrapeSuccessDesc, fmt.Errorf("failed to prepare scrape: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
remainingCollectors := make(map[string]bool)
|
||||
for name := range coll.collectors {
|
||||
remainingCollectors[name] = true
|
||||
}
|
||||
|
||||
metricsBuffer := make(chan prometheus.Metric)
|
||||
allDone := make(chan struct{})
|
||||
stopped := false
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case m := <-metricsBuffer:
|
||||
if !stopped {
|
||||
ch <- m
|
||||
}
|
||||
case <-allDone:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(len(coll.collectors))
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(allDone)
|
||||
close(metricsBuffer)
|
||||
}()
|
||||
|
||||
for name, c := range coll.collectors {
|
||||
go func(name string, c collector.Collector) {
|
||||
execute(name, c, ch)
|
||||
execute(name, c, scrapeContext, metricsBuffer)
|
||||
wg.Done()
|
||||
delete(remainingCollectors, name)
|
||||
}(name, c)
|
||||
}
|
||||
wg.Wait()
|
||||
scrapeDurations.Collect(ch)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
startTimeDesc,
|
||||
prometheus.CounterValue,
|
||||
startTime,
|
||||
)
|
||||
|
||||
select {
|
||||
case <-allDone:
|
||||
stopped = true
|
||||
return
|
||||
case <-time.After(coll.maxScrapeDuration):
|
||||
stopped = true
|
||||
remainingCollectorNames := make([]string, 0, len(remainingCollectors))
|
||||
for rc := range remainingCollectors {
|
||||
remainingCollectorNames = append(remainingCollectorNames, rc)
|
||||
}
|
||||
log.Warn("Collection timed out, still waiting for ", remainingCollectorNames)
|
||||
for name := range remainingCollectors {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
scrapeSuccessDesc,
|
||||
prometheus.GaugeValue,
|
||||
0.0,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
scrapeTimeoutDesc,
|
||||
prometheus.GaugeValue,
|
||||
1.0,
|
||||
name,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func filterAvailableCollectors(collectors string) string {
|
||||
@@ -73,25 +162,60 @@ func filterAvailableCollectors(collectors string) string {
|
||||
return strings.Join(availableCollectors, ",")
|
||||
}
|
||||
|
||||
func execute(name string, c collector.Collector, ch chan<- prometheus.Metric) {
|
||||
func execute(name string, c collector.Collector, ctx *collector.ScrapeContext, ch chan<- prometheus.Metric) {
|
||||
begin := time.Now()
|
||||
err := c.Collect(ch)
|
||||
err := c.Collect(ctx, ch)
|
||||
duration := time.Since(begin)
|
||||
var result string
|
||||
var success float64
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("ERROR: %s collector failed after %fs: %s", name, duration.Seconds(), err)
|
||||
result = "error"
|
||||
log.Errorf("collector %s failed after %fs: %s", name, duration.Seconds(), err)
|
||||
success = 0
|
||||
} else {
|
||||
log.Debugf("OK: %s collector succeeded after %fs.", name, duration.Seconds())
|
||||
result = "success"
|
||||
log.Debugf("collector %s succeeded after %fs.", name, duration.Seconds())
|
||||
success = 1
|
||||
}
|
||||
scrapeDurations.WithLabelValues(name, result).Observe(duration.Seconds())
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
scrapeDurationDesc,
|
||||
prometheus.GaugeValue,
|
||||
duration.Seconds(),
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
scrapeSuccessDesc,
|
||||
prometheus.GaugeValue,
|
||||
success,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
scrapeTimeoutDesc,
|
||||
prometheus.GaugeValue,
|
||||
0.0,
|
||||
name,
|
||||
)
|
||||
}
|
||||
|
||||
func expandEnabledCollectors(enabled string) []string {
|
||||
expanded := strings.Replace(enabled, defaultCollectorsPlaceholder, defaultCollectors, -1)
|
||||
separated := strings.Split(expanded, ",")
|
||||
unique := map[string]bool{}
|
||||
for _, s := range separated {
|
||||
if s != "" {
|
||||
unique[s] = true
|
||||
}
|
||||
}
|
||||
result := make([]string, 0, len(unique))
|
||||
for s := range unique {
|
||||
result = append(result, s)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func loadCollectors(list string) (map[string]collector.Collector, error) {
|
||||
collectors := map[string]collector.Collector{}
|
||||
for _, name := range strings.Split(list, ",") {
|
||||
enabled := expandEnabledCollectors(list)
|
||||
|
||||
for _, name := range enabled {
|
||||
fn, ok := collector.Factories[name]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("collector '%s' not available", name)
|
||||
@@ -109,20 +233,47 @@ func init() {
|
||||
prometheus.MustRegister(version.NewCollector("wmi_exporter"))
|
||||
}
|
||||
|
||||
func initWbem() {
|
||||
// This initialization prevents a memory leak on WMF 5+. See
|
||||
// https://github.com/martinlindhe/wmi_exporter/issues/77 and linked issues
|
||||
// for details.
|
||||
log.Debugf("Initializing SWbemServices")
|
||||
s, err := wmi.InitializeSWbemServices(wmi.DefaultClient)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
wmi.DefaultClient.AllowMissingFields = true
|
||||
wmi.DefaultClient.SWbemServicesClient = s
|
||||
}
|
||||
|
||||
func main() {
|
||||
var (
|
||||
showVersion = flag.Bool("version", false, "Print version information.")
|
||||
listenAddress = flag.String("telemetry.addr", ":9182", "host:port for WMI exporter.")
|
||||
metricsPath = flag.String("telemetry.path", "/metrics", "URL path for surfacing collected metrics.")
|
||||
enabledCollectors = flag.String("collectors.enabled", filterAvailableCollectors(defaultCollectors), "Comma-separated list of collectors to use.")
|
||||
printCollectors = flag.Bool("collectors.print", false, "If true, print available collectors and exit.")
|
||||
listenAddress = kingpin.Flag(
|
||||
"telemetry.addr",
|
||||
"host:port for WMI exporter.",
|
||||
).Default(":9182").String()
|
||||
metricsPath = kingpin.Flag(
|
||||
"telemetry.path",
|
||||
"URL path for surfacing collected metrics.",
|
||||
).Default("/metrics").String()
|
||||
enabledCollectors = kingpin.Flag(
|
||||
"collectors.enabled",
|
||||
"Comma-separated list of collectors to use. Use '[defaults]' as a placeholder for all the collectors enabled by default.").
|
||||
Default(filterAvailableCollectors(defaultCollectors)).String()
|
||||
printCollectors = kingpin.Flag(
|
||||
"collectors.print",
|
||||
"If true, print available collectors and exit.",
|
||||
).Bool()
|
||||
maxScrapeDuration = kingpin.Flag(
|
||||
"scrape.max-duration",
|
||||
"Time after which collectors are aborted during a scrape",
|
||||
).Default("30s").Duration()
|
||||
)
|
||||
flag.Parse()
|
||||
|
||||
if *showVersion {
|
||||
fmt.Fprintln(os.Stdout, version.Print("wmi_exporter"))
|
||||
os.Exit(0)
|
||||
}
|
||||
log.AddFlags(kingpin.CommandLine)
|
||||
kingpin.Version(version.Print("wmi_exporter"))
|
||||
kingpin.HelpFlag.Short('h')
|
||||
kingpin.Parse()
|
||||
|
||||
if *printCollectors {
|
||||
collectorNames := make(sort.StringSlice, 0, len(collector.Factories))
|
||||
@@ -137,6 +288,8 @@ func main() {
|
||||
return
|
||||
}
|
||||
|
||||
initWbem()
|
||||
|
||||
isInteractive, err := svc.IsAnInteractiveSession()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
@@ -144,7 +297,12 @@ func main() {
|
||||
|
||||
stopCh := make(chan bool)
|
||||
if !isInteractive {
|
||||
go svc.Run(serviceName, &wmiExporterService{stopCh: stopCh})
|
||||
go func() {
|
||||
err = svc.Run(serviceName, &wmiExporterService{stopCh: stopCh})
|
||||
if err != nil {
|
||||
log.Errorf("Failed to start service: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
collectors, err := loadCollectors(*enabledCollectors)
|
||||
@@ -154,10 +312,14 @@ func main() {
|
||||
|
||||
log.Infof("Enabled collectors: %v", strings.Join(keys(collectors), ", "))
|
||||
|
||||
nodeCollector := WmiCollector{collectors: collectors}
|
||||
prometheus.MustRegister(nodeCollector)
|
||||
exporter := WmiCollector{
|
||||
collectors: collectors,
|
||||
maxScrapeDuration: *maxScrapeDuration,
|
||||
}
|
||||
prometheus.MustRegister(exporter)
|
||||
|
||||
http.Handle(*metricsPath, prometheus.Handler())
|
||||
http.Handle(*metricsPath, promhttp.Handler())
|
||||
http.HandleFunc("/health", healthCheck)
|
||||
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
http.Redirect(w, r, *metricsPath, http.StatusMovedPermanently)
|
||||
})
|
||||
@@ -167,9 +329,7 @@ func main() {
|
||||
|
||||
go func() {
|
||||
log.Infoln("Starting server on", *listenAddress)
|
||||
if err := http.ListenAndServe(*listenAddress, nil); err != nil {
|
||||
log.Fatalf("cannot start WMI exporter: %s", err)
|
||||
}
|
||||
log.Fatalf("cannot start WMI exporter: %s", http.ListenAndServe(*listenAddress, nil))
|
||||
}()
|
||||
|
||||
for {
|
||||
@@ -180,9 +340,17 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
func healthCheck(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_, err := fmt.Fprintln(w, `{"status":"ok"}`)
|
||||
if err != nil {
|
||||
log.Debugf("Failed to write to stream: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func keys(m map[string]collector.Collector) []string {
|
||||
ret := make([]string, 0, len(m))
|
||||
for key, _ := range m {
|
||||
for key := range m {
|
||||
ret = append(ret, key)
|
||||
}
|
||||
return ret
|
||||
|
||||
51
exporter_test.go
Normal file
51
exporter_test.go
Normal file
@@ -0,0 +1,51 @@
|
||||
// +build windows
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type expansionTestCase struct {
|
||||
input string
|
||||
expectedOutput []string
|
||||
}
|
||||
|
||||
func TestExpandEnabled(t *testing.T) {
|
||||
expansionTests := []expansionTestCase{
|
||||
{"", []string{}},
|
||||
// Default case
|
||||
{"cs,os", []string{"cs", "os"}},
|
||||
// Placeholder expansion
|
||||
{defaultCollectorsPlaceholder, strings.Split(defaultCollectors, ",")},
|
||||
// De-duplication
|
||||
{"cs,cs", []string{"cs"}},
|
||||
// De-duplicate placeholder
|
||||
{defaultCollectorsPlaceholder + "," + defaultCollectorsPlaceholder, strings.Split(defaultCollectors, ",")},
|
||||
// Composite case
|
||||
{"foo," + defaultCollectorsPlaceholder + ",bar", append(strings.Split(defaultCollectors, ","), "foo", "bar")},
|
||||
}
|
||||
|
||||
for _, testCase := range expansionTests {
|
||||
output := expandEnabledCollectors(testCase.input)
|
||||
sort.Strings(output)
|
||||
|
||||
success := true
|
||||
if len(output) != len(testCase.expectedOutput) {
|
||||
success = false
|
||||
} else {
|
||||
sort.Strings(testCase.expectedOutput)
|
||||
for idx := range output {
|
||||
if output[idx] != testCase.expectedOutput[idx] {
|
||||
success = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !success {
|
||||
t.Error("For", testCase.input, "expected", testCase.expectedOutput, "got", output)
|
||||
}
|
||||
}
|
||||
}
|
||||
28
gometalinter.config
Normal file
28
gometalinter.config
Normal file
@@ -0,0 +1,28 @@
|
||||
{
|
||||
"Disable": [
|
||||
"goconst",
|
||||
"gocyclo",
|
||||
"gosec",
|
||||
"maligned",
|
||||
"megacheck"
|
||||
],
|
||||
"Enable": [
|
||||
"deadcode",
|
||||
"errcheck",
|
||||
"golint",
|
||||
"gotype",
|
||||
"gotypex",
|
||||
"ineffassign",
|
||||
"interfacer",
|
||||
"structcheck",
|
||||
"unconvert",
|
||||
"varcheck",
|
||||
"vet",
|
||||
"vetshadow"
|
||||
],
|
||||
"Exclude": [
|
||||
"don't use underscores in Go names",
|
||||
"exported type .+ should have comment or be unexported",
|
||||
"should be"
|
||||
]
|
||||
}
|
||||
@@ -1,12 +1,12 @@
|
||||
[CmdletBinding()]
|
||||
Param (
|
||||
[Parameter(Mandatory=$true)]
|
||||
[String] $PathToExecutable,
|
||||
[Parameter(Mandatory=$true)]
|
||||
[String] $Version,
|
||||
[Parameter(Mandatory=$false)]
|
||||
[ValidateSet("amd64","386")]
|
||||
[String] $Arch = "amd64"
|
||||
[Parameter(Mandatory = $true)]
|
||||
[String] $PathToExecutable,
|
||||
[Parameter(Mandatory = $true)]
|
||||
[String] $Version,
|
||||
[Parameter(Mandatory = $false)]
|
||||
[ValidateSet("amd64", "386")]
|
||||
[String] $Arch = "amd64"
|
||||
)
|
||||
$ErrorActionPreference = "Stop"
|
||||
|
||||
@@ -15,47 +15,46 @@ $PathToExecutable = Resolve-Path $PathToExecutable
|
||||
# Set working dir to this directory, reset previous on exit
|
||||
Push-Location $PSScriptRoot
|
||||
Trap {
|
||||
# Reset working dir on error
|
||||
Pop-Location
|
||||
# Reset working dir on error
|
||||
Pop-Location
|
||||
}
|
||||
|
||||
if ($PSVersionTable.PSVersion.Major -lt 5) {
|
||||
Write-Error "Powershell version 5 required"
|
||||
exit 1
|
||||
Write-Error "Powershell version 5 required"
|
||||
exit 1
|
||||
}
|
||||
|
||||
$wc = New-Object System.Net.WebClient
|
||||
function Get-FileIfNotExists {
|
||||
Param (
|
||||
$Url,
|
||||
$Destination
|
||||
)
|
||||
if(-not (Test-Path $Destination)) {
|
||||
Write-Verbose "Downloading $Url"
|
||||
$wc.DownloadFile($Url, $Destination)
|
||||
}
|
||||
else {
|
||||
Write-Verbose "${Destination} already exists. Skipping."
|
||||
}
|
||||
Param (
|
||||
$Url,
|
||||
$Destination
|
||||
)
|
||||
if (-not (Test-Path $Destination)) {
|
||||
Write-Verbose "Downloading $Url"
|
||||
$wc.DownloadFile($Url, $Destination)
|
||||
}
|
||||
else {
|
||||
Write-Verbose "${Destination} already exists. Skipping."
|
||||
}
|
||||
}
|
||||
|
||||
$sourceDir = mkdir -Force Source
|
||||
mkdir -Force Work,Output | Out-Null
|
||||
mkdir -Force Work, Output | Out-Null
|
||||
|
||||
Write-Verbose "Downloading files"
|
||||
# Somewhat obscure url, points to WiX 3.10 binary release
|
||||
Write-Verbose "Downloading WiX..."
|
||||
Get-FileIfNotExists "http://download-codeplex.sec.s-msft.com/Download/Release?ProjectName=wix&DownloadId=1504735&FileTime=130906491728530000&Build=21031" "$sourceDir\wix-binaries.zip"
|
||||
[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12
|
||||
Get-FileIfNotExists "https://github.com/wixtoolset/wix3/releases/download/wix311rtm/wix311-binaries.zip" "$sourceDir\wix-binaries.zip"
|
||||
mkdir -Force WiX | Out-Null
|
||||
Expand-Archive -Path "${sourceDir}\wix-binaries.zip" -DestinationPath WiX -Force
|
||||
|
||||
Copy-Item -Force $PathToExecutable Work/wmi_exporter.exe
|
||||
|
||||
Write-Verbose "Creating wmi_exporter-${Version}-${Arch}.msi"
|
||||
$wixArch = @{"amd64"="x64"; "386"="x86"}[$Arch]
|
||||
$wixOpts = "-ext WixFirewallExtension"
|
||||
$wixArch = @{"amd64" = "x64"; "386" = "x86"}[$Arch]
|
||||
$wixOpts = "-ext WixFirewallExtension -ext WixUtilExtension"
|
||||
Invoke-Expression "WiX\candle.exe -nologo -arch $wixArch $wixOpts -out Work\wmi_exporter.wixobj -dVersion=`"$Version`" wmi_exporter.wxs"
|
||||
Invoke-Expression "WiX\light.exe -nologo -spdb $wixOpts -out `"Output\wmi_exporter-${Version}-${Arch}.msi`" Work\wmi_exporter.wixobj"
|
||||
|
||||
Write-Verbose "Done!"
|
||||
Pop-Location
|
||||
Pop-Location
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Wix xmlns="http://schemas.microsoft.com/wix/2006/wi"
|
||||
xmlns:fw="http://schemas.microsoft.com/wix/FirewallExtension">
|
||||
xmlns:fw="http://schemas.microsoft.com/wix/FirewallExtension"
|
||||
xmlns:util="http://schemas.microsoft.com/wix/UtilExtension">
|
||||
<?if $(sys.BUILDARCH)=x64 ?>
|
||||
<?define PlatformProgramFiles = "ProgramFiles64Folder" ?>
|
||||
<?else ?>
|
||||
@@ -16,28 +17,42 @@
|
||||
<MajorUpgrade Schedule="afterInstallExecute" DowngradeErrorMessage="A later version of [ProductName] is already installed. Setup will now exit." />
|
||||
|
||||
<Property Id="ENABLED_COLLECTORS" Secure="yes"/>
|
||||
<SetProperty Id="CollectorsFlag" After="InstallFiles" Sequence="execute" Value="-collectors.enabled [ENABLED_COLLECTORS]">ENABLED_COLLECTORS</SetProperty>
|
||||
<SetProperty Id="CollectorsFlag" After="InstallFiles" Sequence="execute" Value="--collectors.enabled [ENABLED_COLLECTORS]">ENABLED_COLLECTORS</SetProperty>
|
||||
|
||||
<Property Id="EXTRA_FLAGS" Secure="yes"/>
|
||||
<SetProperty Id="ExtraFlags" After="InstallFiles" Sequence="execute" Value="[EXTRA_FLAGS]">EXTRA_FLAGS</SetProperty>
|
||||
|
||||
<Property Id="LISTEN_ADDR" Secure="yes" />
|
||||
<Property Id="LISTEN_PORT" Secure="yes" Value="9182" />
|
||||
<SetProperty Id="ListenFlag" After="InstallFiles" Sequence="execute" Value="-telemetry.addr [LISTEN_ADDR]:[LISTEN_PORT]">LISTEN_ADDR OR LISTEN_PORT</SetProperty>
|
||||
<SetProperty Id="ListenFlag" After="InstallFiles" Sequence="execute" Value="--telemetry.addr [LISTEN_ADDR]:[LISTEN_PORT]">LISTEN_ADDR OR LISTEN_PORT</SetProperty>
|
||||
|
||||
<Property Id="METRICS_PATH" Secure="yes"/>
|
||||
<SetProperty Id="MetricsPathFlag" After="InstallFiles" Sequence="execute" Value="-telemetry.path [METRICS_PATH]">METRICS_PATH</SetProperty>
|
||||
<SetProperty Id="MetricsPathFlag" After="InstallFiles" Sequence="execute" Value="--telemetry.path [METRICS_PATH]">METRICS_PATH</SetProperty>
|
||||
|
||||
<Directory Id="TARGETDIR" Name="SourceDir">
|
||||
<Directory Id="$(var.PlatformProgramFiles)">
|
||||
<Directory Id="APPLICATIONROOTDIRECTORY" Name="wmi_exporter" />
|
||||
<Directory Id="APPLICATIONROOTDIRECTORY" Name="wmi_exporter">
|
||||
<Directory Id="textfile_inputs" Name="textfile_inputs" />
|
||||
</Directory>
|
||||
</Directory>
|
||||
</Directory>
|
||||
|
||||
<ComponentGroup Id="Files" Directory="APPLICATIONROOTDIRECTORY">
|
||||
<Component>
|
||||
<Property Id="TEXTFILE_DIR" Secure="yes"/>
|
||||
<SetProperty Id="TextfileDirFlag" After="InstallFiles" Sequence="execute" Value="--collector.textfile.directory [TEXTFILE_DIR]">TEXTFILE_DIR</SetProperty>
|
||||
|
||||
<ComponentGroup Id="Files">
|
||||
<Component Directory="APPLICATIONROOTDIRECTORY">
|
||||
<File Id="wmi_exporter.exe" Name="wmi_exporter.exe" Source="Work\wmi_exporter.exe" KeyPath="yes">
|
||||
<fw:FirewallException Id="MetricsEndpoint" Name="WMI Exporter (HTTP [LISTEN_PORT])" Description="WMI Exporter HTTP endpoint" Port="[LISTEN_PORT]" Protocol="tcp" Scope="any" />
|
||||
<fw:FirewallException Id="MetricsEndpoint" Name="WMI Exporter (HTTP [LISTEN_PORT])" Description="WMI Exporter HTTP endpoint" Port="[LISTEN_PORT]" Protocol="tcp" Scope="any" IgnoreFailure="yes" />
|
||||
</File>
|
||||
<ServiceInstall Id="InstallExporterService" Name="wmi_exporter" ErrorControl="normal" Start="auto" Type="ownProcess" Arguments="-log.format logger:eventlog?name=wmi_exporter [CollectorsFlag] [ListenFlag] [MetricsPathFlag]" />
|
||||
<ServiceInstall Id="InstallExporterService" Name="wmi_exporter" DisplayName="WMI exporter" Description="Exports Prometheus metrics from WMI queries" ErrorControl="normal" Start="auto" Type="ownProcess" Arguments="--log.format logger:eventlog?name=wmi_exporter [CollectorsFlag] [ListenFlag] [MetricsPathFlag] [TextfileDirFlag] [ExtraFlags]">
|
||||
<util:ServiceConfig FirstFailureActionType="restart" SecondFailureActionType="restart" ThirdFailureActionType="restart" RestartServiceDelayInSeconds="5" />
|
||||
</ServiceInstall>
|
||||
<ServiceControl Id="ServiceStateControl" Name="wmi_exporter" Remove="uninstall" Start="install" Stop="both" />
|
||||
<util:EventSource Log="Application" Name="wmi_exporter" EventMessageFile="%SystemRoot%\System32\EventCreate.exe" />
|
||||
</Component>
|
||||
<Component Id="CreateTextfileDirectory" Directory="textfile_inputs" Guid="d03ef58a-9cbf-4165-ad39-d143e9b27e14">
|
||||
<CreateFolder />
|
||||
</Component>
|
||||
</ComponentGroup>
|
||||
|
||||
@@ -45,4 +60,4 @@
|
||||
<ComponentGroupRef Id="Files" />
|
||||
</Feature>
|
||||
</Product>
|
||||
</Wix>
|
||||
</Wix>
|
||||
|
||||
31
tools/collector-generator/New-Collector.ps1
Normal file
31
tools/collector-generator/New-Collector.ps1
Normal file
@@ -0,0 +1,31 @@
|
||||
Param(
|
||||
[Parameter(Mandatory=$true)]
|
||||
$Class,
|
||||
[Parameter(Mandatory=$false)]
|
||||
$CollectorName = ($Class -replace 'Win32_PerfRawData_Perf',''),
|
||||
[Parameter(Mandatory=$false)]
|
||||
$ComputerName = "localhost",
|
||||
[Parameter(Mandatory=$false)]
|
||||
$Credential
|
||||
)
|
||||
$ErrorActionPreference = "Stop"
|
||||
|
||||
if($Credential -ne $null) {
|
||||
$wmiObject = Get-WMIObject -ComputerName $ComputerName -Credential $Credential -Class $Class
|
||||
}
|
||||
else {
|
||||
$wmiObject = Get-WMIObject -ComputerName $ComputerName -Class $Class
|
||||
}
|
||||
|
||||
$members = $wmiObject `
|
||||
| Get-Member -MemberType Properties `
|
||||
| Where-Object { $_.Definition -Match '^u?int' -and $_.Name -NotMatch '_' } `
|
||||
| Select-Object Name, @{Name="Type";Expression={$_.Definition.Split(" ")[0]}}
|
||||
$input = @{
|
||||
"Class"=$Class;
|
||||
"CollectorName"=$CollectorName;
|
||||
"Members"=$members
|
||||
} | ConvertTo-Json
|
||||
$outFileName = "..\..\collector\$CollectorName.go".ToLower()
|
||||
$input | .\collector-generator.exe | Out-File -NoClobber -Encoding UTF8 $outFileName
|
||||
go fmt $outFileName
|
||||
17
tools/collector-generator/README.md
Normal file
17
tools/collector-generator/README.md
Normal file
@@ -0,0 +1,17 @@
|
||||
# Collector generator
|
||||
Generates a collector skeleton implementation from a WMI class.
|
||||
|
||||
## Usage
|
||||
Build the generator:
|
||||
|
||||
```bash
|
||||
go build .
|
||||
```
|
||||
|
||||
Run the script to query the WMI service and send the output to the generator:
|
||||
|
||||
```powershell
|
||||
.\New-Collector.ps1 -Class Win32_PerfRawData_PerfOS_Processor
|
||||
```
|
||||
|
||||
This will generate a collector. The collector name is generated by first removing `Win32_PerfRawData_Perf` and lower-casing, so `Win32_PerfRawData_PerfOS_Processor` will generate `os_processor.go`. This can be overridden by passing `-CollectorName` to the script.
|
||||
61
tools/collector-generator/collector.template
Normal file
61
tools/collector-generator/collector.template
Normal file
@@ -0,0 +1,61 @@
|
||||
package collector
|
||||
import (
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
func init() {
|
||||
Factories["{{ .CollectorName | toLower }}"] = New{{ .CollectorName }}Collector
|
||||
}
|
||||
// A {{ .CollectorName }}Collector is a Prometheus collector for WMI {{ .Class }} metrics
|
||||
type {{ .CollectorName }}Collector struct {
|
||||
{{- range $m := .Members }}
|
||||
{{ $m.Name }} *prometheus.Desc
|
||||
{{- end }}
|
||||
}
|
||||
// New{{ .CollectorName }}Collector ...
|
||||
func New{{ .CollectorName }}Collector() (Collector, error) {
|
||||
const subsystem = "{{ .CollectorName | toLower }}"
|
||||
return &{{ .CollectorName }}Collector{
|
||||
{{- range $m := .Members }}
|
||||
{{ $m.Name }}: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "{{ $m.Name | toSnakeCase }}"),
|
||||
"({{ $m.Name }})",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
{{- end }}
|
||||
}, nil
|
||||
}
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *{{ .CollectorName }}Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting {{ .CollectorName | toLower }} metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// {{ .Class }} docs:
|
||||
// - <add link to documentation here>
|
||||
type {{ .Class }} struct {
|
||||
Name string
|
||||
{{ range $m := .Members }}
|
||||
{{ $m.Name }} {{ $m.Type }}
|
||||
{{- end }}
|
||||
}
|
||||
func (c *{{ .CollectorName }}Collector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []{{ .Class }}
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
{{ range $m := .Members }}
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.{{ $m.Name }},
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].{{ $m.Name }}),
|
||||
)
|
||||
{{ end }}
|
||||
return nil, nil
|
||||
}
|
||||
60
tools/collector-generator/generate-collector.go
Normal file
60
tools/collector-generator/generate-collector.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"text/template"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
type TemplateData struct {
|
||||
CollectorName string
|
||||
Class string
|
||||
Members []Member
|
||||
}
|
||||
type Member struct {
|
||||
Name string
|
||||
Type string
|
||||
}
|
||||
|
||||
func main() {
|
||||
bytes, err := ioutil.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
var data TemplateData
|
||||
if err = json.Unmarshal(bytes, &data); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
funcs := template.FuncMap{
|
||||
"toLower": strings.ToLower,
|
||||
"toSnakeCase": toSnakeCase,
|
||||
}
|
||||
tmpl, err := template.New("template").Funcs(funcs).ParseFiles("collector.template")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = tmpl.ExecuteTemplate(os.Stdout, "collector.template", data)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// https://gist.github.com/elwinar/14e1e897fdbe4d3432e1
|
||||
func toSnakeCase(in string) string {
|
||||
runes := []rune(in)
|
||||
length := len(runes)
|
||||
|
||||
var out []rune
|
||||
for i := 0; i < length; i++ {
|
||||
if i > 0 && unicode.IsUpper(runes[i]) && ((i+1 < length && unicode.IsLower(runes[i+1])) || unicode.IsLower(runes[i-1])) {
|
||||
out = append(out, '_')
|
||||
}
|
||||
out = append(out, unicode.ToLower(runes[i]))
|
||||
}
|
||||
|
||||
return string(out)
|
||||
}
|
||||
1
vendor/github.com/Microsoft/go-winio/.gitignore
generated
vendored
Normal file
1
vendor/github.com/Microsoft/go-winio/.gitignore
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
*.exe
|
||||
22
vendor/github.com/Microsoft/go-winio/LICENSE
generated
vendored
Normal file
22
vendor/github.com/Microsoft/go-winio/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Microsoft
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
22
vendor/github.com/Microsoft/go-winio/README.md
generated
vendored
Normal file
22
vendor/github.com/Microsoft/go-winio/README.md
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
# go-winio
|
||||
|
||||
This repository contains utilities for efficiently performing Win32 IO operations in
|
||||
Go. Currently, this is focused on accessing named pipes and other file handles, and
|
||||
for using named pipes as a net transport.
|
||||
|
||||
This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go
|
||||
to reuse the thread to schedule another goroutine. This limits support to Windows Vista and
|
||||
newer operating systems. This is similar to the implementation of network sockets in Go's net
|
||||
package.
|
||||
|
||||
Please see the LICENSE file for licensing information.
|
||||
|
||||
This project has adopted the [Microsoft Open Source Code of
|
||||
Conduct](https://opensource.microsoft.com/codeofconduct/). For more information
|
||||
see the [Code of Conduct
|
||||
FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact
|
||||
[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional
|
||||
questions or comments.
|
||||
|
||||
Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe
|
||||
for another named pipe implementation.
|
||||
27
vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE
generated
vendored
Normal file
27
vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
344
vendor/github.com/Microsoft/go-winio/archive/tar/common.go
generated
vendored
Normal file
344
vendor/github.com/Microsoft/go-winio/archive/tar/common.go
generated
vendored
Normal file
@@ -0,0 +1,344 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package tar implements access to tar archives.
|
||||
// It aims to cover most of the variations, including those produced
|
||||
// by GNU and BSD tars.
|
||||
//
|
||||
// References:
|
||||
// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5
|
||||
// http://www.gnu.org/software/tar/manual/html_node/Standard.html
|
||||
// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html
|
||||
package tar
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
blockSize = 512
|
||||
|
||||
// Types
|
||||
TypeReg = '0' // regular file
|
||||
TypeRegA = '\x00' // regular file
|
||||
TypeLink = '1' // hard link
|
||||
TypeSymlink = '2' // symbolic link
|
||||
TypeChar = '3' // character device node
|
||||
TypeBlock = '4' // block device node
|
||||
TypeDir = '5' // directory
|
||||
TypeFifo = '6' // fifo node
|
||||
TypeCont = '7' // reserved
|
||||
TypeXHeader = 'x' // extended header
|
||||
TypeXGlobalHeader = 'g' // global extended header
|
||||
TypeGNULongName = 'L' // Next file has a long name
|
||||
TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name
|
||||
TypeGNUSparse = 'S' // sparse file
|
||||
)
|
||||
|
||||
// A Header represents a single header in a tar archive.
|
||||
// Some fields may not be populated.
|
||||
type Header struct {
|
||||
Name string // name of header file entry
|
||||
Mode int64 // permission and mode bits
|
||||
Uid int // user id of owner
|
||||
Gid int // group id of owner
|
||||
Size int64 // length in bytes
|
||||
ModTime time.Time // modified time
|
||||
Typeflag byte // type of header entry
|
||||
Linkname string // target name of link
|
||||
Uname string // user name of owner
|
||||
Gname string // group name of owner
|
||||
Devmajor int64 // major number of character or block device
|
||||
Devminor int64 // minor number of character or block device
|
||||
AccessTime time.Time // access time
|
||||
ChangeTime time.Time // status change time
|
||||
CreationTime time.Time // creation time
|
||||
Xattrs map[string]string
|
||||
Winheaders map[string]string
|
||||
}
|
||||
|
||||
// File name constants from the tar spec.
|
||||
const (
|
||||
fileNameSize = 100 // Maximum number of bytes in a standard tar name.
|
||||
fileNamePrefixSize = 155 // Maximum number of ustar extension bytes.
|
||||
)
|
||||
|
||||
// FileInfo returns an os.FileInfo for the Header.
|
||||
func (h *Header) FileInfo() os.FileInfo {
|
||||
return headerFileInfo{h}
|
||||
}
|
||||
|
||||
// headerFileInfo implements os.FileInfo.
|
||||
type headerFileInfo struct {
|
||||
h *Header
|
||||
}
|
||||
|
||||
func (fi headerFileInfo) Size() int64 { return fi.h.Size }
|
||||
func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
|
||||
func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime }
|
||||
func (fi headerFileInfo) Sys() interface{} { return fi.h }
|
||||
|
||||
// Name returns the base name of the file.
|
||||
func (fi headerFileInfo) Name() string {
|
||||
if fi.IsDir() {
|
||||
return path.Base(path.Clean(fi.h.Name))
|
||||
}
|
||||
return path.Base(fi.h.Name)
|
||||
}
|
||||
|
||||
// Mode returns the permission and mode bits for the headerFileInfo.
|
||||
func (fi headerFileInfo) Mode() (mode os.FileMode) {
|
||||
// Set file permission bits.
|
||||
mode = os.FileMode(fi.h.Mode).Perm()
|
||||
|
||||
// Set setuid, setgid and sticky bits.
|
||||
if fi.h.Mode&c_ISUID != 0 {
|
||||
// setuid
|
||||
mode |= os.ModeSetuid
|
||||
}
|
||||
if fi.h.Mode&c_ISGID != 0 {
|
||||
// setgid
|
||||
mode |= os.ModeSetgid
|
||||
}
|
||||
if fi.h.Mode&c_ISVTX != 0 {
|
||||
// sticky
|
||||
mode |= os.ModeSticky
|
||||
}
|
||||
|
||||
// Set file mode bits.
|
||||
// clear perm, setuid, setgid and sticky bits.
|
||||
m := os.FileMode(fi.h.Mode) &^ 07777
|
||||
if m == c_ISDIR {
|
||||
// directory
|
||||
mode |= os.ModeDir
|
||||
}
|
||||
if m == c_ISFIFO {
|
||||
// named pipe (FIFO)
|
||||
mode |= os.ModeNamedPipe
|
||||
}
|
||||
if m == c_ISLNK {
|
||||
// symbolic link
|
||||
mode |= os.ModeSymlink
|
||||
}
|
||||
if m == c_ISBLK {
|
||||
// device file
|
||||
mode |= os.ModeDevice
|
||||
}
|
||||
if m == c_ISCHR {
|
||||
// Unix character device
|
||||
mode |= os.ModeDevice
|
||||
mode |= os.ModeCharDevice
|
||||
}
|
||||
if m == c_ISSOCK {
|
||||
// Unix domain socket
|
||||
mode |= os.ModeSocket
|
||||
}
|
||||
|
||||
switch fi.h.Typeflag {
|
||||
case TypeSymlink:
|
||||
// symbolic link
|
||||
mode |= os.ModeSymlink
|
||||
case TypeChar:
|
||||
// character device node
|
||||
mode |= os.ModeDevice
|
||||
mode |= os.ModeCharDevice
|
||||
case TypeBlock:
|
||||
// block device node
|
||||
mode |= os.ModeDevice
|
||||
case TypeDir:
|
||||
// directory
|
||||
mode |= os.ModeDir
|
||||
case TypeFifo:
|
||||
// fifo node
|
||||
mode |= os.ModeNamedPipe
|
||||
}
|
||||
|
||||
return mode
|
||||
}
|
||||
|
||||
// sysStat, if non-nil, populates h from system-dependent fields of fi.
|
||||
var sysStat func(fi os.FileInfo, h *Header) error
|
||||
|
||||
// Mode constants from the tar spec.
|
||||
const (
|
||||
c_ISUID = 04000 // Set uid
|
||||
c_ISGID = 02000 // Set gid
|
||||
c_ISVTX = 01000 // Save text (sticky bit)
|
||||
c_ISDIR = 040000 // Directory
|
||||
c_ISFIFO = 010000 // FIFO
|
||||
c_ISREG = 0100000 // Regular file
|
||||
c_ISLNK = 0120000 // Symbolic link
|
||||
c_ISBLK = 060000 // Block special file
|
||||
c_ISCHR = 020000 // Character special file
|
||||
c_ISSOCK = 0140000 // Socket
|
||||
)
|
||||
|
||||
// Keywords for the PAX Extended Header
|
||||
const (
|
||||
paxAtime = "atime"
|
||||
paxCharset = "charset"
|
||||
paxComment = "comment"
|
||||
paxCtime = "ctime" // please note that ctime is not a valid pax header.
|
||||
paxCreationTime = "LIBARCHIVE.creationtime"
|
||||
paxGid = "gid"
|
||||
paxGname = "gname"
|
||||
paxLinkpath = "linkpath"
|
||||
paxMtime = "mtime"
|
||||
paxPath = "path"
|
||||
paxSize = "size"
|
||||
paxUid = "uid"
|
||||
paxUname = "uname"
|
||||
paxXattr = "SCHILY.xattr."
|
||||
paxWindows = "MSWINDOWS."
|
||||
paxNone = ""
|
||||
)
|
||||
|
||||
// FileInfoHeader creates a partially-populated Header from fi.
|
||||
// If fi describes a symlink, FileInfoHeader records link as the link target.
|
||||
// If fi describes a directory, a slash is appended to the name.
|
||||
// Because os.FileInfo's Name method returns only the base name of
|
||||
// the file it describes, it may be necessary to modify the Name field
|
||||
// of the returned header to provide the full path name of the file.
|
||||
func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
|
||||
if fi == nil {
|
||||
return nil, errors.New("tar: FileInfo is nil")
|
||||
}
|
||||
fm := fi.Mode()
|
||||
h := &Header{
|
||||
Name: fi.Name(),
|
||||
ModTime: fi.ModTime(),
|
||||
Mode: int64(fm.Perm()), // or'd with c_IS* constants later
|
||||
}
|
||||
switch {
|
||||
case fm.IsRegular():
|
||||
h.Mode |= c_ISREG
|
||||
h.Typeflag = TypeReg
|
||||
h.Size = fi.Size()
|
||||
case fi.IsDir():
|
||||
h.Typeflag = TypeDir
|
||||
h.Mode |= c_ISDIR
|
||||
h.Name += "/"
|
||||
case fm&os.ModeSymlink != 0:
|
||||
h.Typeflag = TypeSymlink
|
||||
h.Mode |= c_ISLNK
|
||||
h.Linkname = link
|
||||
case fm&os.ModeDevice != 0:
|
||||
if fm&os.ModeCharDevice != 0 {
|
||||
h.Mode |= c_ISCHR
|
||||
h.Typeflag = TypeChar
|
||||
} else {
|
||||
h.Mode |= c_ISBLK
|
||||
h.Typeflag = TypeBlock
|
||||
}
|
||||
case fm&os.ModeNamedPipe != 0:
|
||||
h.Typeflag = TypeFifo
|
||||
h.Mode |= c_ISFIFO
|
||||
case fm&os.ModeSocket != 0:
|
||||
h.Mode |= c_ISSOCK
|
||||
default:
|
||||
return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm)
|
||||
}
|
||||
if fm&os.ModeSetuid != 0 {
|
||||
h.Mode |= c_ISUID
|
||||
}
|
||||
if fm&os.ModeSetgid != 0 {
|
||||
h.Mode |= c_ISGID
|
||||
}
|
||||
if fm&os.ModeSticky != 0 {
|
||||
h.Mode |= c_ISVTX
|
||||
}
|
||||
// If possible, populate additional fields from OS-specific
|
||||
// FileInfo fields.
|
||||
if sys, ok := fi.Sys().(*Header); ok {
|
||||
// This FileInfo came from a Header (not the OS). Use the
|
||||
// original Header to populate all remaining fields.
|
||||
h.Uid = sys.Uid
|
||||
h.Gid = sys.Gid
|
||||
h.Uname = sys.Uname
|
||||
h.Gname = sys.Gname
|
||||
h.AccessTime = sys.AccessTime
|
||||
h.ChangeTime = sys.ChangeTime
|
||||
if sys.Xattrs != nil {
|
||||
h.Xattrs = make(map[string]string)
|
||||
for k, v := range sys.Xattrs {
|
||||
h.Xattrs[k] = v
|
||||
}
|
||||
}
|
||||
if sys.Typeflag == TypeLink {
|
||||
// hard link
|
||||
h.Typeflag = TypeLink
|
||||
h.Size = 0
|
||||
h.Linkname = sys.Linkname
|
||||
}
|
||||
}
|
||||
if sysStat != nil {
|
||||
return h, sysStat(fi, h)
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
var zeroBlock = make([]byte, blockSize)
|
||||
|
||||
// POSIX specifies a sum of the unsigned byte values, but the Sun tar uses signed byte values.
|
||||
// We compute and return both.
|
||||
func checksum(header []byte) (unsigned int64, signed int64) {
|
||||
for i := 0; i < len(header); i++ {
|
||||
if i == 148 {
|
||||
// The chksum field (header[148:156]) is special: it should be treated as space bytes.
|
||||
unsigned += ' ' * 8
|
||||
signed += ' ' * 8
|
||||
i += 7
|
||||
continue
|
||||
}
|
||||
unsigned += int64(header[i])
|
||||
signed += int64(int8(header[i]))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type slicer []byte
|
||||
|
||||
func (sp *slicer) next(n int) (b []byte) {
|
||||
s := *sp
|
||||
b, *sp = s[0:n], s[n:]
|
||||
return
|
||||
}
|
||||
|
||||
func isASCII(s string) bool {
|
||||
for _, c := range s {
|
||||
if c >= 0x80 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func toASCII(s string) string {
|
||||
if isASCII(s) {
|
||||
return s
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
for _, c := range s {
|
||||
if c < 0x80 {
|
||||
buf.WriteByte(byte(c))
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// isHeaderOnlyType checks if the given type flag is of the type that has no
|
||||
// data section even if a size is specified.
|
||||
func isHeaderOnlyType(flag byte) bool {
|
||||
switch flag {
|
||||
case TypeLink, TypeSymlink, TypeChar, TypeBlock, TypeDir, TypeFifo:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
80
vendor/github.com/Microsoft/go-winio/archive/tar/example_test.go
generated
vendored
Normal file
80
vendor/github.com/Microsoft/go-winio/archive/tar/example_test.go
generated
vendored
Normal file
@@ -0,0 +1,80 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package tar_test
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
func Example() {
|
||||
// Create a buffer to write our archive to.
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
// Create a new tar archive.
|
||||
tw := tar.NewWriter(buf)
|
||||
|
||||
// Add some files to the archive.
|
||||
var files = []struct {
|
||||
Name, Body string
|
||||
}{
|
||||
{"readme.txt", "This archive contains some text files."},
|
||||
{"gopher.txt", "Gopher names:\nGeorge\nGeoffrey\nGonzo"},
|
||||
{"todo.txt", "Get animal handling license."},
|
||||
}
|
||||
for _, file := range files {
|
||||
hdr := &tar.Header{
|
||||
Name: file.Name,
|
||||
Mode: 0600,
|
||||
Size: int64(len(file.Body)),
|
||||
}
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
if _, err := tw.Write([]byte(file.Body)); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
// Make sure to check the error on Close.
|
||||
if err := tw.Close(); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
// Open the tar archive for reading.
|
||||
r := bytes.NewReader(buf.Bytes())
|
||||
tr := tar.NewReader(r)
|
||||
|
||||
// Iterate through the files in the archive.
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
// end of tar archive
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
fmt.Printf("Contents of %s:\n", hdr.Name)
|
||||
if _, err := io.Copy(os.Stdout, tr); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
// Output:
|
||||
// Contents of readme.txt:
|
||||
// This archive contains some text files.
|
||||
// Contents of gopher.txt:
|
||||
// Gopher names:
|
||||
// George
|
||||
// Geoffrey
|
||||
// Gonzo
|
||||
// Contents of todo.txt:
|
||||
// Get animal handling license.
|
||||
}
|
||||
1002
vendor/github.com/Microsoft/go-winio/archive/tar/reader.go
generated
vendored
Normal file
1002
vendor/github.com/Microsoft/go-winio/archive/tar/reader.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1125
vendor/github.com/Microsoft/go-winio/archive/tar/reader_test.go
generated
vendored
Normal file
1125
vendor/github.com/Microsoft/go-winio/archive/tar/reader_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
20
vendor/github.com/Microsoft/go-winio/archive/tar/stat_atim.go
generated
vendored
Normal file
20
vendor/github.com/Microsoft/go-winio/archive/tar/stat_atim.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux dragonfly openbsd solaris
|
||||
|
||||
package tar
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func statAtime(st *syscall.Stat_t) time.Time {
|
||||
return time.Unix(st.Atim.Unix())
|
||||
}
|
||||
|
||||
func statCtime(st *syscall.Stat_t) time.Time {
|
||||
return time.Unix(st.Ctim.Unix())
|
||||
}
|
||||
20
vendor/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go
generated
vendored
Normal file
20
vendor/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin freebsd netbsd
|
||||
|
||||
package tar
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func statAtime(st *syscall.Stat_t) time.Time {
|
||||
return time.Unix(st.Atimespec.Unix())
|
||||
}
|
||||
|
||||
func statCtime(st *syscall.Stat_t) time.Time {
|
||||
return time.Unix(st.Ctimespec.Unix())
|
||||
}
|
||||
32
vendor/github.com/Microsoft/go-winio/archive/tar/stat_unix.go
generated
vendored
Normal file
32
vendor/github.com/Microsoft/go-winio/archive/tar/stat_unix.go
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux darwin dragonfly freebsd openbsd netbsd solaris
|
||||
|
||||
package tar
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func init() {
|
||||
sysStat = statUnix
|
||||
}
|
||||
|
||||
func statUnix(fi os.FileInfo, h *Header) error {
|
||||
sys, ok := fi.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
h.Uid = int(sys.Uid)
|
||||
h.Gid = int(sys.Gid)
|
||||
// TODO(bradfitz): populate username & group. os/user
|
||||
// doesn't cache LookupId lookups, and lacks group
|
||||
// lookup functions.
|
||||
h.AccessTime = statAtime(sys)
|
||||
h.ChangeTime = statCtime(sys)
|
||||
// TODO(bradfitz): major/minor device numbers?
|
||||
return nil
|
||||
}
|
||||
325
vendor/github.com/Microsoft/go-winio/archive/tar/tar_test.go
generated
vendored
Normal file
325
vendor/github.com/Microsoft/go-winio/archive/tar/tar_test.go
generated
vendored
Normal file
@@ -0,0 +1,325 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package tar
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestFileInfoHeader(t *testing.T) {
|
||||
fi, err := os.Stat("testdata/small.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
h, err := FileInfoHeader(fi, "")
|
||||
if err != nil {
|
||||
t.Fatalf("FileInfoHeader: %v", err)
|
||||
}
|
||||
if g, e := h.Name, "small.txt"; g != e {
|
||||
t.Errorf("Name = %q; want %q", g, e)
|
||||
}
|
||||
if g, e := h.Mode, int64(fi.Mode().Perm())|c_ISREG; g != e {
|
||||
t.Errorf("Mode = %#o; want %#o", g, e)
|
||||
}
|
||||
if g, e := h.Size, int64(5); g != e {
|
||||
t.Errorf("Size = %v; want %v", g, e)
|
||||
}
|
||||
if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {
|
||||
t.Errorf("ModTime = %v; want %v", g, e)
|
||||
}
|
||||
// FileInfoHeader should error when passing nil FileInfo
|
||||
if _, err := FileInfoHeader(nil, ""); err == nil {
|
||||
t.Fatalf("Expected error when passing nil to FileInfoHeader")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileInfoHeaderDir(t *testing.T) {
|
||||
fi, err := os.Stat("testdata")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
h, err := FileInfoHeader(fi, "")
|
||||
if err != nil {
|
||||
t.Fatalf("FileInfoHeader: %v", err)
|
||||
}
|
||||
if g, e := h.Name, "testdata/"; g != e {
|
||||
t.Errorf("Name = %q; want %q", g, e)
|
||||
}
|
||||
// Ignoring c_ISGID for golang.org/issue/4867
|
||||
if g, e := h.Mode&^c_ISGID, int64(fi.Mode().Perm())|c_ISDIR; g != e {
|
||||
t.Errorf("Mode = %#o; want %#o", g, e)
|
||||
}
|
||||
if g, e := h.Size, int64(0); g != e {
|
||||
t.Errorf("Size = %v; want %v", g, e)
|
||||
}
|
||||
if g, e := h.ModTime, fi.ModTime(); !g.Equal(e) {
|
||||
t.Errorf("ModTime = %v; want %v", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileInfoHeaderSymlink(t *testing.T) {
|
||||
h, err := FileInfoHeader(symlink{}, "some-target")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if g, e := h.Name, "some-symlink"; g != e {
|
||||
t.Errorf("Name = %q; want %q", g, e)
|
||||
}
|
||||
if g, e := h.Linkname, "some-target"; g != e {
|
||||
t.Errorf("Linkname = %q; want %q", g, e)
|
||||
}
|
||||
}
|
||||
|
||||
type symlink struct{}
|
||||
|
||||
func (symlink) Name() string { return "some-symlink" }
|
||||
func (symlink) Size() int64 { return 0 }
|
||||
func (symlink) Mode() os.FileMode { return os.ModeSymlink }
|
||||
func (symlink) ModTime() time.Time { return time.Time{} }
|
||||
func (symlink) IsDir() bool { return false }
|
||||
func (symlink) Sys() interface{} { return nil }
|
||||
|
||||
func TestRoundTrip(t *testing.T) {
|
||||
data := []byte("some file contents")
|
||||
|
||||
var b bytes.Buffer
|
||||
tw := NewWriter(&b)
|
||||
hdr := &Header{
|
||||
Name: "file.txt",
|
||||
Uid: 1 << 21, // too big for 8 octal digits
|
||||
Size: int64(len(data)),
|
||||
ModTime: time.Now(),
|
||||
}
|
||||
// tar only supports second precision.
|
||||
hdr.ModTime = hdr.ModTime.Add(-time.Duration(hdr.ModTime.Nanosecond()) * time.Nanosecond)
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
t.Fatalf("tw.WriteHeader: %v", err)
|
||||
}
|
||||
if _, err := tw.Write(data); err != nil {
|
||||
t.Fatalf("tw.Write: %v", err)
|
||||
}
|
||||
if err := tw.Close(); err != nil {
|
||||
t.Fatalf("tw.Close: %v", err)
|
||||
}
|
||||
|
||||
// Read it back.
|
||||
tr := NewReader(&b)
|
||||
rHdr, err := tr.Next()
|
||||
if err != nil {
|
||||
t.Fatalf("tr.Next: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(rHdr, hdr) {
|
||||
t.Errorf("Header mismatch.\n got %+v\nwant %+v", rHdr, hdr)
|
||||
}
|
||||
rData, err := ioutil.ReadAll(tr)
|
||||
if err != nil {
|
||||
t.Fatalf("Read: %v", err)
|
||||
}
|
||||
if !bytes.Equal(rData, data) {
|
||||
t.Errorf("Data mismatch.\n got %q\nwant %q", rData, data)
|
||||
}
|
||||
}
|
||||
|
||||
type headerRoundTripTest struct {
|
||||
h *Header
|
||||
fm os.FileMode
|
||||
}
|
||||
|
||||
func TestHeaderRoundTrip(t *testing.T) {
|
||||
golden := []headerRoundTripTest{
|
||||
// regular file.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "test.txt",
|
||||
Mode: 0644 | c_ISREG,
|
||||
Size: 12,
|
||||
ModTime: time.Unix(1360600916, 0),
|
||||
Typeflag: TypeReg,
|
||||
},
|
||||
fm: 0644,
|
||||
},
|
||||
// symbolic link.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "link.txt",
|
||||
Mode: 0777 | c_ISLNK,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1360600852, 0),
|
||||
Typeflag: TypeSymlink,
|
||||
},
|
||||
fm: 0777 | os.ModeSymlink,
|
||||
},
|
||||
// character device node.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "dev/null",
|
||||
Mode: 0666 | c_ISCHR,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1360578951, 0),
|
||||
Typeflag: TypeChar,
|
||||
},
|
||||
fm: 0666 | os.ModeDevice | os.ModeCharDevice,
|
||||
},
|
||||
// block device node.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "dev/sda",
|
||||
Mode: 0660 | c_ISBLK,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1360578954, 0),
|
||||
Typeflag: TypeBlock,
|
||||
},
|
||||
fm: 0660 | os.ModeDevice,
|
||||
},
|
||||
// directory.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "dir/",
|
||||
Mode: 0755 | c_ISDIR,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1360601116, 0),
|
||||
Typeflag: TypeDir,
|
||||
},
|
||||
fm: 0755 | os.ModeDir,
|
||||
},
|
||||
// fifo node.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "dev/initctl",
|
||||
Mode: 0600 | c_ISFIFO,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1360578949, 0),
|
||||
Typeflag: TypeFifo,
|
||||
},
|
||||
fm: 0600 | os.ModeNamedPipe,
|
||||
},
|
||||
// setuid.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "bin/su",
|
||||
Mode: 0755 | c_ISREG | c_ISUID,
|
||||
Size: 23232,
|
||||
ModTime: time.Unix(1355405093, 0),
|
||||
Typeflag: TypeReg,
|
||||
},
|
||||
fm: 0755 | os.ModeSetuid,
|
||||
},
|
||||
// setguid.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "group.txt",
|
||||
Mode: 0750 | c_ISREG | c_ISGID,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1360602346, 0),
|
||||
Typeflag: TypeReg,
|
||||
},
|
||||
fm: 0750 | os.ModeSetgid,
|
||||
},
|
||||
// sticky.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "sticky.txt",
|
||||
Mode: 0600 | c_ISREG | c_ISVTX,
|
||||
Size: 7,
|
||||
ModTime: time.Unix(1360602540, 0),
|
||||
Typeflag: TypeReg,
|
||||
},
|
||||
fm: 0600 | os.ModeSticky,
|
||||
},
|
||||
// hard link.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "hard.txt",
|
||||
Mode: 0644 | c_ISREG,
|
||||
Size: 0,
|
||||
Linkname: "file.txt",
|
||||
ModTime: time.Unix(1360600916, 0),
|
||||
Typeflag: TypeLink,
|
||||
},
|
||||
fm: 0644,
|
||||
},
|
||||
// More information.
|
||||
{
|
||||
h: &Header{
|
||||
Name: "info.txt",
|
||||
Mode: 0600 | c_ISREG,
|
||||
Size: 0,
|
||||
Uid: 1000,
|
||||
Gid: 1000,
|
||||
ModTime: time.Unix(1360602540, 0),
|
||||
Uname: "slartibartfast",
|
||||
Gname: "users",
|
||||
Typeflag: TypeReg,
|
||||
},
|
||||
fm: 0600,
|
||||
},
|
||||
}
|
||||
|
||||
for i, g := range golden {
|
||||
fi := g.h.FileInfo()
|
||||
h2, err := FileInfoHeader(fi, "")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
if strings.Contains(fi.Name(), "/") {
|
||||
t.Errorf("FileInfo of %q contains slash: %q", g.h.Name, fi.Name())
|
||||
}
|
||||
name := path.Base(g.h.Name)
|
||||
if fi.IsDir() {
|
||||
name += "/"
|
||||
}
|
||||
if got, want := h2.Name, name; got != want {
|
||||
t.Errorf("i=%d: Name: got %v, want %v", i, got, want)
|
||||
}
|
||||
if got, want := h2.Size, g.h.Size; got != want {
|
||||
t.Errorf("i=%d: Size: got %v, want %v", i, got, want)
|
||||
}
|
||||
if got, want := h2.Uid, g.h.Uid; got != want {
|
||||
t.Errorf("i=%d: Uid: got %d, want %d", i, got, want)
|
||||
}
|
||||
if got, want := h2.Gid, g.h.Gid; got != want {
|
||||
t.Errorf("i=%d: Gid: got %d, want %d", i, got, want)
|
||||
}
|
||||
if got, want := h2.Uname, g.h.Uname; got != want {
|
||||
t.Errorf("i=%d: Uname: got %q, want %q", i, got, want)
|
||||
}
|
||||
if got, want := h2.Gname, g.h.Gname; got != want {
|
||||
t.Errorf("i=%d: Gname: got %q, want %q", i, got, want)
|
||||
}
|
||||
if got, want := h2.Linkname, g.h.Linkname; got != want {
|
||||
t.Errorf("i=%d: Linkname: got %v, want %v", i, got, want)
|
||||
}
|
||||
if got, want := h2.Typeflag, g.h.Typeflag; got != want {
|
||||
t.Logf("%#v %#v", g.h, fi.Sys())
|
||||
t.Errorf("i=%d: Typeflag: got %q, want %q", i, got, want)
|
||||
}
|
||||
if got, want := h2.Mode, g.h.Mode; got != want {
|
||||
t.Errorf("i=%d: Mode: got %o, want %o", i, got, want)
|
||||
}
|
||||
if got, want := fi.Mode(), g.fm; got != want {
|
||||
t.Errorf("i=%d: fi.Mode: got %o, want %o", i, got, want)
|
||||
}
|
||||
if got, want := h2.AccessTime, g.h.AccessTime; got != want {
|
||||
t.Errorf("i=%d: AccessTime: got %v, want %v", i, got, want)
|
||||
}
|
||||
if got, want := h2.ChangeTime, g.h.ChangeTime; got != want {
|
||||
t.Errorf("i=%d: ChangeTime: got %v, want %v", i, got, want)
|
||||
}
|
||||
if got, want := h2.ModTime, g.h.ModTime; got != want {
|
||||
t.Errorf("i=%d: ModTime: got %v, want %v", i, got, want)
|
||||
}
|
||||
if sysh, ok := fi.Sys().(*Header); !ok || sysh != g.h {
|
||||
t.Errorf("i=%d: Sys didn't return original *Header", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
444
vendor/github.com/Microsoft/go-winio/archive/tar/writer.go
generated
vendored
Normal file
444
vendor/github.com/Microsoft/go-winio/archive/tar/writer.go
generated
vendored
Normal file
@@ -0,0 +1,444 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package tar
|
||||
|
||||
// TODO(dsymonds):
|
||||
// - catch more errors (no first header, etc.)
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrWriteTooLong = errors.New("archive/tar: write too long")
|
||||
ErrFieldTooLong = errors.New("archive/tar: header field too long")
|
||||
ErrWriteAfterClose = errors.New("archive/tar: write after close")
|
||||
errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values")
|
||||
)
|
||||
|
||||
// A Writer provides sequential writing of a tar archive in POSIX.1 format.
|
||||
// A tar archive consists of a sequence of files.
|
||||
// Call WriteHeader to begin a new file, and then call Write to supply that file's data,
|
||||
// writing at most hdr.Size bytes in total.
|
||||
type Writer struct {
|
||||
w io.Writer
|
||||
err error
|
||||
nb int64 // number of unwritten bytes for current file entry
|
||||
pad int64 // amount of padding to write after current file entry
|
||||
closed bool
|
||||
usedBinary bool // whether the binary numeric field extension was used
|
||||
preferPax bool // use pax header instead of binary numeric header
|
||||
hdrBuff [blockSize]byte // buffer to use in writeHeader when writing a regular header
|
||||
paxHdrBuff [blockSize]byte // buffer to use in writeHeader when writing a pax header
|
||||
}
|
||||
|
||||
type formatter struct {
|
||||
err error // Last error seen
|
||||
}
|
||||
|
||||
// NewWriter creates a new Writer writing to w.
|
||||
func NewWriter(w io.Writer) *Writer { return &Writer{w: w, preferPax: true} }
|
||||
|
||||
// Flush finishes writing the current file (optional).
|
||||
func (tw *Writer) Flush() error {
|
||||
if tw.nb > 0 {
|
||||
tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb)
|
||||
return tw.err
|
||||
}
|
||||
|
||||
n := tw.nb + tw.pad
|
||||
for n > 0 && tw.err == nil {
|
||||
nr := n
|
||||
if nr > blockSize {
|
||||
nr = blockSize
|
||||
}
|
||||
var nw int
|
||||
nw, tw.err = tw.w.Write(zeroBlock[0:nr])
|
||||
n -= int64(nw)
|
||||
}
|
||||
tw.nb = 0
|
||||
tw.pad = 0
|
||||
return tw.err
|
||||
}
|
||||
|
||||
// Write s into b, terminating it with a NUL if there is room.
|
||||
func (f *formatter) formatString(b []byte, s string) {
|
||||
if len(s) > len(b) {
|
||||
f.err = ErrFieldTooLong
|
||||
return
|
||||
}
|
||||
ascii := toASCII(s)
|
||||
copy(b, ascii)
|
||||
if len(ascii) < len(b) {
|
||||
b[len(ascii)] = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Encode x as an octal ASCII string and write it into b with leading zeros.
|
||||
func (f *formatter) formatOctal(b []byte, x int64) {
|
||||
s := strconv.FormatInt(x, 8)
|
||||
// leading zeros, but leave room for a NUL.
|
||||
for len(s)+1 < len(b) {
|
||||
s = "0" + s
|
||||
}
|
||||
f.formatString(b, s)
|
||||
}
|
||||
|
||||
// fitsInBase256 reports whether x can be encoded into n bytes using base-256
|
||||
// encoding. Unlike octal encoding, base-256 encoding does not require that the
|
||||
// string ends with a NUL character. Thus, all n bytes are available for output.
|
||||
//
|
||||
// If operating in binary mode, this assumes strict GNU binary mode; which means
|
||||
// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is
|
||||
// equivalent to the sign bit in two's complement form.
|
||||
func fitsInBase256(n int, x int64) bool {
|
||||
var binBits = uint(n-1) * 8
|
||||
return n >= 9 || (x >= -1<<binBits && x < 1<<binBits)
|
||||
}
|
||||
|
||||
// Write x into b, as binary (GNUtar/star extension).
|
||||
func (f *formatter) formatNumeric(b []byte, x int64) {
|
||||
if fitsInBase256(len(b), x) {
|
||||
for i := len(b) - 1; i >= 0; i-- {
|
||||
b[i] = byte(x)
|
||||
x >>= 8
|
||||
}
|
||||
b[0] |= 0x80 // Highest bit indicates binary format
|
||||
return
|
||||
}
|
||||
|
||||
f.formatOctal(b, 0) // Last resort, just write zero
|
||||
f.err = ErrFieldTooLong
|
||||
}
|
||||
|
||||
var (
|
||||
minTime = time.Unix(0, 0)
|
||||
// There is room for 11 octal digits (33 bits) of mtime.
|
||||
maxTime = minTime.Add((1<<33 - 1) * time.Second)
|
||||
)
|
||||
|
||||
// WriteHeader writes hdr and prepares to accept the file's contents.
|
||||
// WriteHeader calls Flush if it is not the first header.
|
||||
// Calling after a Close will return ErrWriteAfterClose.
|
||||
func (tw *Writer) WriteHeader(hdr *Header) error {
|
||||
return tw.writeHeader(hdr, true)
|
||||
}
|
||||
|
||||
// WriteHeader writes hdr and prepares to accept the file's contents.
|
||||
// WriteHeader calls Flush if it is not the first header.
|
||||
// Calling after a Close will return ErrWriteAfterClose.
|
||||
// As this method is called internally by writePax header to allow it to
|
||||
// suppress writing the pax header.
|
||||
func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
|
||||
if tw.closed {
|
||||
return ErrWriteAfterClose
|
||||
}
|
||||
if tw.err == nil {
|
||||
tw.Flush()
|
||||
}
|
||||
if tw.err != nil {
|
||||
return tw.err
|
||||
}
|
||||
|
||||
// a map to hold pax header records, if any are needed
|
||||
paxHeaders := make(map[string]string)
|
||||
|
||||
// TODO(shanemhansen): we might want to use PAX headers for
|
||||
// subsecond time resolution, but for now let's just capture
|
||||
// too long fields or non ascii characters
|
||||
|
||||
var f formatter
|
||||
var header []byte
|
||||
|
||||
// We need to select which scratch buffer to use carefully,
|
||||
// since this method is called recursively to write PAX headers.
|
||||
// If allowPax is true, this is the non-recursive call, and we will use hdrBuff.
|
||||
// If allowPax is false, we are being called by writePAXHeader, and hdrBuff is
|
||||
// already being used by the non-recursive call, so we must use paxHdrBuff.
|
||||
header = tw.hdrBuff[:]
|
||||
if !allowPax {
|
||||
header = tw.paxHdrBuff[:]
|
||||
}
|
||||
copy(header, zeroBlock)
|
||||
s := slicer(header)
|
||||
|
||||
// Wrappers around formatter that automatically sets paxHeaders if the
|
||||
// argument extends beyond the capacity of the input byte slice.
|
||||
var formatString = func(b []byte, s string, paxKeyword string) {
|
||||
needsPaxHeader := paxKeyword != paxNone && len(s) > len(b) || !isASCII(s)
|
||||
if needsPaxHeader {
|
||||
paxHeaders[paxKeyword] = s
|
||||
return
|
||||
}
|
||||
f.formatString(b, s)
|
||||
}
|
||||
var formatNumeric = func(b []byte, x int64, paxKeyword string) {
|
||||
// Try octal first.
|
||||
s := strconv.FormatInt(x, 8)
|
||||
if len(s) < len(b) {
|
||||
f.formatOctal(b, x)
|
||||
return
|
||||
}
|
||||
|
||||
// If it is too long for octal, and PAX is preferred, use a PAX header.
|
||||
if paxKeyword != paxNone && tw.preferPax {
|
||||
f.formatOctal(b, 0)
|
||||
s := strconv.FormatInt(x, 10)
|
||||
paxHeaders[paxKeyword] = s
|
||||
return
|
||||
}
|
||||
|
||||
tw.usedBinary = true
|
||||
f.formatNumeric(b, x)
|
||||
}
|
||||
var formatTime = func(b []byte, t time.Time, paxKeyword string) {
|
||||
var unixTime int64
|
||||
if !t.Before(minTime) && !t.After(maxTime) {
|
||||
unixTime = t.Unix()
|
||||
}
|
||||
formatNumeric(b, unixTime, paxNone)
|
||||
|
||||
// Write a PAX header if the time didn't fit precisely.
|
||||
if paxKeyword != "" && tw.preferPax && allowPax && (t.Nanosecond() != 0 || !t.Before(minTime) || !t.After(maxTime)) {
|
||||
paxHeaders[paxKeyword] = formatPAXTime(t)
|
||||
}
|
||||
}
|
||||
|
||||
// keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
|
||||
pathHeaderBytes := s.next(fileNameSize)
|
||||
|
||||
formatString(pathHeaderBytes, hdr.Name, paxPath)
|
||||
|
||||
f.formatOctal(s.next(8), hdr.Mode) // 100:108
|
||||
formatNumeric(s.next(8), int64(hdr.Uid), paxUid) // 108:116
|
||||
formatNumeric(s.next(8), int64(hdr.Gid), paxGid) // 116:124
|
||||
formatNumeric(s.next(12), hdr.Size, paxSize) // 124:136
|
||||
formatTime(s.next(12), hdr.ModTime, paxMtime) // 136:148
|
||||
s.next(8) // chksum (148:156)
|
||||
s.next(1)[0] = hdr.Typeflag // 156:157
|
||||
|
||||
formatString(s.next(100), hdr.Linkname, paxLinkpath)
|
||||
|
||||
copy(s.next(8), []byte("ustar\x0000")) // 257:265
|
||||
formatString(s.next(32), hdr.Uname, paxUname) // 265:297
|
||||
formatString(s.next(32), hdr.Gname, paxGname) // 297:329
|
||||
formatNumeric(s.next(8), hdr.Devmajor, paxNone) // 329:337
|
||||
formatNumeric(s.next(8), hdr.Devminor, paxNone) // 337:345
|
||||
|
||||
// keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax
|
||||
prefixHeaderBytes := s.next(155)
|
||||
formatString(prefixHeaderBytes, "", paxNone) // 345:500 prefix
|
||||
|
||||
// Use the GNU magic instead of POSIX magic if we used any GNU extensions.
|
||||
if tw.usedBinary {
|
||||
copy(header[257:265], []byte("ustar \x00"))
|
||||
}
|
||||
|
||||
_, paxPathUsed := paxHeaders[paxPath]
|
||||
// try to use a ustar header when only the name is too long
|
||||
if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed {
|
||||
prefix, suffix, ok := splitUSTARPath(hdr.Name)
|
||||
if ok {
|
||||
// Since we can encode in USTAR format, disable PAX header.
|
||||
delete(paxHeaders, paxPath)
|
||||
|
||||
// Update the path fields
|
||||
formatString(pathHeaderBytes, suffix, paxNone)
|
||||
formatString(prefixHeaderBytes, prefix, paxNone)
|
||||
}
|
||||
}
|
||||
|
||||
// The chksum field is terminated by a NUL and a space.
|
||||
// This is different from the other octal fields.
|
||||
chksum, _ := checksum(header)
|
||||
f.formatOctal(header[148:155], chksum) // Never fails
|
||||
header[155] = ' '
|
||||
|
||||
// Check if there were any formatting errors.
|
||||
if f.err != nil {
|
||||
tw.err = f.err
|
||||
return tw.err
|
||||
}
|
||||
|
||||
if allowPax {
|
||||
if !hdr.AccessTime.IsZero() {
|
||||
paxHeaders[paxAtime] = formatPAXTime(hdr.AccessTime)
|
||||
}
|
||||
if !hdr.ChangeTime.IsZero() {
|
||||
paxHeaders[paxCtime] = formatPAXTime(hdr.ChangeTime)
|
||||
}
|
||||
if !hdr.CreationTime.IsZero() {
|
||||
paxHeaders[paxCreationTime] = formatPAXTime(hdr.CreationTime)
|
||||
}
|
||||
for k, v := range hdr.Xattrs {
|
||||
paxHeaders[paxXattr+k] = v
|
||||
}
|
||||
for k, v := range hdr.Winheaders {
|
||||
paxHeaders[paxWindows+k] = v
|
||||
}
|
||||
}
|
||||
|
||||
if len(paxHeaders) > 0 {
|
||||
if !allowPax {
|
||||
return errInvalidHeader
|
||||
}
|
||||
if err := tw.writePAXHeader(hdr, paxHeaders); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
tw.nb = int64(hdr.Size)
|
||||
tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize
|
||||
|
||||
_, tw.err = tw.w.Write(header)
|
||||
return tw.err
|
||||
}
|
||||
|
||||
func formatPAXTime(t time.Time) string {
|
||||
sec := t.Unix()
|
||||
usec := t.Nanosecond()
|
||||
s := strconv.FormatInt(sec, 10)
|
||||
if usec != 0 {
|
||||
s = fmt.Sprintf("%s.%09d", s, usec)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// splitUSTARPath splits a path according to USTAR prefix and suffix rules.
|
||||
// If the path is not splittable, then it will return ("", "", false).
|
||||
func splitUSTARPath(name string) (prefix, suffix string, ok bool) {
|
||||
length := len(name)
|
||||
if length <= fileNameSize || !isASCII(name) {
|
||||
return "", "", false
|
||||
} else if length > fileNamePrefixSize+1 {
|
||||
length = fileNamePrefixSize + 1
|
||||
} else if name[length-1] == '/' {
|
||||
length--
|
||||
}
|
||||
|
||||
i := strings.LastIndex(name[:length], "/")
|
||||
nlen := len(name) - i - 1 // nlen is length of suffix
|
||||
plen := i // plen is length of prefix
|
||||
if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize {
|
||||
return "", "", false
|
||||
}
|
||||
return name[:i], name[i+1:], true
|
||||
}
|
||||
|
||||
// writePaxHeader writes an extended pax header to the
|
||||
// archive.
|
||||
func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error {
|
||||
// Prepare extended header
|
||||
ext := new(Header)
|
||||
ext.Typeflag = TypeXHeader
|
||||
// Setting ModTime is required for reader parsing to
|
||||
// succeed, and seems harmless enough.
|
||||
ext.ModTime = hdr.ModTime
|
||||
// The spec asks that we namespace our pseudo files
|
||||
// with the current pid. However, this results in differing outputs
|
||||
// for identical inputs. As such, the constant 0 is now used instead.
|
||||
// golang.org/issue/12358
|
||||
dir, file := path.Split(hdr.Name)
|
||||
fullName := path.Join(dir, "PaxHeaders.0", file)
|
||||
|
||||
ascii := toASCII(fullName)
|
||||
if len(ascii) > 100 {
|
||||
ascii = ascii[:100]
|
||||
}
|
||||
ext.Name = ascii
|
||||
// Construct the body
|
||||
var buf bytes.Buffer
|
||||
|
||||
// Keys are sorted before writing to body to allow deterministic output.
|
||||
var keys []string
|
||||
for k := range paxHeaders {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
for _, k := range keys {
|
||||
fmt.Fprint(&buf, formatPAXRecord(k, paxHeaders[k]))
|
||||
}
|
||||
|
||||
ext.Size = int64(len(buf.Bytes()))
|
||||
if err := tw.writeHeader(ext, false); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := tw.Write(buf.Bytes()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tw.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// formatPAXRecord formats a single PAX record, prefixing it with the
|
||||
// appropriate length.
|
||||
func formatPAXRecord(k, v string) string {
|
||||
const padding = 3 // Extra padding for ' ', '=', and '\n'
|
||||
size := len(k) + len(v) + padding
|
||||
size += len(strconv.Itoa(size))
|
||||
record := fmt.Sprintf("%d %s=%s\n", size, k, v)
|
||||
|
||||
// Final adjustment if adding size field increased the record size.
|
||||
if len(record) != size {
|
||||
size = len(record)
|
||||
record = fmt.Sprintf("%d %s=%s\n", size, k, v)
|
||||
}
|
||||
return record
|
||||
}
|
||||
|
||||
// Write writes to the current entry in the tar archive.
|
||||
// Write returns the error ErrWriteTooLong if more than
|
||||
// hdr.Size bytes are written after WriteHeader.
|
||||
func (tw *Writer) Write(b []byte) (n int, err error) {
|
||||
if tw.closed {
|
||||
err = ErrWriteAfterClose
|
||||
return
|
||||
}
|
||||
overwrite := false
|
||||
if int64(len(b)) > tw.nb {
|
||||
b = b[0:tw.nb]
|
||||
overwrite = true
|
||||
}
|
||||
n, err = tw.w.Write(b)
|
||||
tw.nb -= int64(n)
|
||||
if err == nil && overwrite {
|
||||
err = ErrWriteTooLong
|
||||
return
|
||||
}
|
||||
tw.err = err
|
||||
return
|
||||
}
|
||||
|
||||
// Close closes the tar archive, flushing any unwritten
|
||||
// data to the underlying writer.
|
||||
func (tw *Writer) Close() error {
|
||||
if tw.err != nil || tw.closed {
|
||||
return tw.err
|
||||
}
|
||||
tw.Flush()
|
||||
tw.closed = true
|
||||
if tw.err != nil {
|
||||
return tw.err
|
||||
}
|
||||
|
||||
// trailer: two zero blocks
|
||||
for i := 0; i < 2; i++ {
|
||||
_, tw.err = tw.w.Write(zeroBlock)
|
||||
if tw.err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return tw.err
|
||||
}
|
||||
739
vendor/github.com/Microsoft/go-winio/archive/tar/writer_test.go
generated
vendored
Normal file
739
vendor/github.com/Microsoft/go-winio/archive/tar/writer_test.go
generated
vendored
Normal file
@@ -0,0 +1,739 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package tar
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"testing/iotest"
|
||||
"time"
|
||||
)
|
||||
|
||||
type writerTestEntry struct {
|
||||
header *Header
|
||||
contents string
|
||||
}
|
||||
|
||||
type writerTest struct {
|
||||
file string // filename of expected output
|
||||
entries []*writerTestEntry
|
||||
}
|
||||
|
||||
var writerTests = []*writerTest{
|
||||
// The writer test file was produced with this command:
|
||||
// tar (GNU tar) 1.26
|
||||
// ln -s small.txt link.txt
|
||||
// tar -b 1 --format=ustar -c -f writer.tar small.txt small2.txt link.txt
|
||||
{
|
||||
file: "testdata/writer.tar",
|
||||
entries: []*writerTestEntry{
|
||||
{
|
||||
header: &Header{
|
||||
Name: "small.txt",
|
||||
Mode: 0640,
|
||||
Uid: 73025,
|
||||
Gid: 5000,
|
||||
Size: 5,
|
||||
ModTime: time.Unix(1246508266, 0),
|
||||
Typeflag: '0',
|
||||
Uname: "dsymonds",
|
||||
Gname: "eng",
|
||||
},
|
||||
contents: "Kilts",
|
||||
},
|
||||
{
|
||||
header: &Header{
|
||||
Name: "small2.txt",
|
||||
Mode: 0640,
|
||||
Uid: 73025,
|
||||
Gid: 5000,
|
||||
Size: 11,
|
||||
ModTime: time.Unix(1245217492, 0),
|
||||
Typeflag: '0',
|
||||
Uname: "dsymonds",
|
||||
Gname: "eng",
|
||||
},
|
||||
contents: "Google.com\n",
|
||||
},
|
||||
{
|
||||
header: &Header{
|
||||
Name: "link.txt",
|
||||
Mode: 0777,
|
||||
Uid: 1000,
|
||||
Gid: 1000,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1314603082, 0),
|
||||
Typeflag: '2',
|
||||
Linkname: "small.txt",
|
||||
Uname: "strings",
|
||||
Gname: "strings",
|
||||
},
|
||||
// no contents
|
||||
},
|
||||
},
|
||||
},
|
||||
// The truncated test file was produced using these commands:
|
||||
// dd if=/dev/zero bs=1048576 count=16384 > /tmp/16gig.txt
|
||||
// tar -b 1 -c -f- /tmp/16gig.txt | dd bs=512 count=8 > writer-big.tar
|
||||
{
|
||||
file: "testdata/writer-big.tar",
|
||||
entries: []*writerTestEntry{
|
||||
{
|
||||
header: &Header{
|
||||
Name: "tmp/16gig.txt",
|
||||
Mode: 0640,
|
||||
Uid: 73025,
|
||||
Gid: 5000,
|
||||
Size: 16 << 30,
|
||||
ModTime: time.Unix(1254699560, 0),
|
||||
Typeflag: '0',
|
||||
Uname: "dsymonds",
|
||||
Gname: "eng",
|
||||
},
|
||||
// fake contents
|
||||
contents: strings.Repeat("\x00", 4<<10),
|
||||
},
|
||||
},
|
||||
},
|
||||
// The truncated test file was produced using these commands:
|
||||
// dd if=/dev/zero bs=1048576 count=16384 > (longname/)*15 /16gig.txt
|
||||
// tar -b 1 -c -f- (longname/)*15 /16gig.txt | dd bs=512 count=8 > writer-big-long.tar
|
||||
{
|
||||
file: "testdata/writer-big-long.tar",
|
||||
entries: []*writerTestEntry{
|
||||
{
|
||||
header: &Header{
|
||||
Name: strings.Repeat("longname/", 15) + "16gig.txt",
|
||||
Mode: 0644,
|
||||
Uid: 1000,
|
||||
Gid: 1000,
|
||||
Size: 16 << 30,
|
||||
ModTime: time.Unix(1399583047, 0),
|
||||
Typeflag: '0',
|
||||
Uname: "guillaume",
|
||||
Gname: "guillaume",
|
||||
},
|
||||
// fake contents
|
||||
contents: strings.Repeat("\x00", 4<<10),
|
||||
},
|
||||
},
|
||||
},
|
||||
// This file was produced using gnu tar 1.17
|
||||
// gnutar -b 4 --format=ustar (longname/)*15 + file.txt
|
||||
{
|
||||
file: "testdata/ustar.tar",
|
||||
entries: []*writerTestEntry{
|
||||
{
|
||||
header: &Header{
|
||||
Name: strings.Repeat("longname/", 15) + "file.txt",
|
||||
Mode: 0644,
|
||||
Uid: 0765,
|
||||
Gid: 024,
|
||||
Size: 06,
|
||||
ModTime: time.Unix(1360135598, 0),
|
||||
Typeflag: '0',
|
||||
Uname: "shane",
|
||||
Gname: "staff",
|
||||
},
|
||||
contents: "hello\n",
|
||||
},
|
||||
},
|
||||
},
|
||||
// This file was produced using gnu tar 1.26
|
||||
// echo "Slartibartfast" > file.txt
|
||||
// ln file.txt hard.txt
|
||||
// tar -b 1 --format=ustar -c -f hardlink.tar file.txt hard.txt
|
||||
{
|
||||
file: "testdata/hardlink.tar",
|
||||
entries: []*writerTestEntry{
|
||||
{
|
||||
header: &Header{
|
||||
Name: "file.txt",
|
||||
Mode: 0644,
|
||||
Uid: 1000,
|
||||
Gid: 100,
|
||||
Size: 15,
|
||||
ModTime: time.Unix(1425484303, 0),
|
||||
Typeflag: '0',
|
||||
Uname: "vbatts",
|
||||
Gname: "users",
|
||||
},
|
||||
contents: "Slartibartfast\n",
|
||||
},
|
||||
{
|
||||
header: &Header{
|
||||
Name: "hard.txt",
|
||||
Mode: 0644,
|
||||
Uid: 1000,
|
||||
Gid: 100,
|
||||
Size: 0,
|
||||
ModTime: time.Unix(1425484303, 0),
|
||||
Typeflag: '1',
|
||||
Linkname: "file.txt",
|
||||
Uname: "vbatts",
|
||||
Gname: "users",
|
||||
},
|
||||
// no contents
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Render byte array in a two-character hexadecimal string, spaced for easy visual inspection.
|
||||
func bytestr(offset int, b []byte) string {
|
||||
const rowLen = 32
|
||||
s := fmt.Sprintf("%04x ", offset)
|
||||
for _, ch := range b {
|
||||
switch {
|
||||
case '0' <= ch && ch <= '9', 'A' <= ch && ch <= 'Z', 'a' <= ch && ch <= 'z':
|
||||
s += fmt.Sprintf(" %c", ch)
|
||||
default:
|
||||
s += fmt.Sprintf(" %02x", ch)
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Render a pseudo-diff between two blocks of bytes.
|
||||
func bytediff(a []byte, b []byte) string {
|
||||
const rowLen = 32
|
||||
s := fmt.Sprintf("(%d bytes vs. %d bytes)\n", len(a), len(b))
|
||||
for offset := 0; len(a)+len(b) > 0; offset += rowLen {
|
||||
na, nb := rowLen, rowLen
|
||||
if na > len(a) {
|
||||
na = len(a)
|
||||
}
|
||||
if nb > len(b) {
|
||||
nb = len(b)
|
||||
}
|
||||
sa := bytestr(offset, a[0:na])
|
||||
sb := bytestr(offset, b[0:nb])
|
||||
if sa != sb {
|
||||
s += fmt.Sprintf("-%v\n+%v\n", sa, sb)
|
||||
}
|
||||
a = a[na:]
|
||||
b = b[nb:]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func TestWriter(t *testing.T) {
|
||||
testLoop:
|
||||
for i, test := range writerTests {
|
||||
expected, err := ioutil.ReadFile(test.file)
|
||||
if err != nil {
|
||||
t.Errorf("test %d: Unexpected error: %v", i, err)
|
||||
continue
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
tw := NewWriter(iotest.TruncateWriter(buf, 4<<10)) // only catch the first 4 KB
|
||||
big := false
|
||||
for j, entry := range test.entries {
|
||||
big = big || entry.header.Size > 1<<10
|
||||
if err := tw.WriteHeader(entry.header); err != nil {
|
||||
t.Errorf("test %d, entry %d: Failed writing header: %v", i, j, err)
|
||||
continue testLoop
|
||||
}
|
||||
if _, err := io.WriteString(tw, entry.contents); err != nil {
|
||||
t.Errorf("test %d, entry %d: Failed writing contents: %v", i, j, err)
|
||||
continue testLoop
|
||||
}
|
||||
}
|
||||
// Only interested in Close failures for the small tests.
|
||||
if err := tw.Close(); err != nil && !big {
|
||||
t.Errorf("test %d: Failed closing archive: %v", i, err)
|
||||
continue testLoop
|
||||
}
|
||||
|
||||
actual := buf.Bytes()
|
||||
if !bytes.Equal(expected, actual) {
|
||||
t.Errorf("test %d: Incorrect result: (-=expected, +=actual)\n%v",
|
||||
i, bytediff(expected, actual))
|
||||
}
|
||||
if testing.Short() { // The second test is expensive.
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPax(t *testing.T) {
|
||||
// Create an archive with a large name
|
||||
fileinfo, err := os.Stat("testdata/small.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hdr, err := FileInfoHeader(fileinfo, "")
|
||||
if err != nil {
|
||||
t.Fatalf("os.Stat: %v", err)
|
||||
}
|
||||
// Force a PAX long name to be written
|
||||
longName := strings.Repeat("ab", 100)
|
||||
contents := strings.Repeat(" ", int(hdr.Size))
|
||||
hdr.Name = longName
|
||||
var buf bytes.Buffer
|
||||
writer := NewWriter(&buf)
|
||||
if err := writer.WriteHeader(hdr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err = writer.Write([]byte(contents)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Simple test to make sure PAX extensions are in effect
|
||||
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
|
||||
t.Fatal("Expected at least one PAX header to be written.")
|
||||
}
|
||||
// Test that we can get a long name back out of the archive.
|
||||
reader := NewReader(&buf)
|
||||
hdr, err = reader.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if hdr.Name != longName {
|
||||
t.Fatal("Couldn't recover long file name")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPaxSymlink(t *testing.T) {
|
||||
// Create an archive with a large linkname
|
||||
fileinfo, err := os.Stat("testdata/small.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hdr, err := FileInfoHeader(fileinfo, "")
|
||||
hdr.Typeflag = TypeSymlink
|
||||
if err != nil {
|
||||
t.Fatalf("os.Stat:1 %v", err)
|
||||
}
|
||||
// Force a PAX long linkname to be written
|
||||
longLinkname := strings.Repeat("1234567890/1234567890", 10)
|
||||
hdr.Linkname = longLinkname
|
||||
|
||||
hdr.Size = 0
|
||||
var buf bytes.Buffer
|
||||
writer := NewWriter(&buf)
|
||||
if err := writer.WriteHeader(hdr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Simple test to make sure PAX extensions are in effect
|
||||
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
|
||||
t.Fatal("Expected at least one PAX header to be written.")
|
||||
}
|
||||
// Test that we can get a long name back out of the archive.
|
||||
reader := NewReader(&buf)
|
||||
hdr, err = reader.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if hdr.Linkname != longLinkname {
|
||||
t.Fatal("Couldn't recover long link name")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPaxNonAscii(t *testing.T) {
|
||||
// Create an archive with non ascii. These should trigger a pax header
|
||||
// because pax headers have a defined utf-8 encoding.
|
||||
fileinfo, err := os.Stat("testdata/small.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
hdr, err := FileInfoHeader(fileinfo, "")
|
||||
if err != nil {
|
||||
t.Fatalf("os.Stat:1 %v", err)
|
||||
}
|
||||
|
||||
// some sample data
|
||||
chineseFilename := "文件名"
|
||||
chineseGroupname := "組"
|
||||
chineseUsername := "用戶名"
|
||||
|
||||
hdr.Name = chineseFilename
|
||||
hdr.Gname = chineseGroupname
|
||||
hdr.Uname = chineseUsername
|
||||
|
||||
contents := strings.Repeat(" ", int(hdr.Size))
|
||||
|
||||
var buf bytes.Buffer
|
||||
writer := NewWriter(&buf)
|
||||
if err := writer.WriteHeader(hdr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err = writer.Write([]byte(contents)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Simple test to make sure PAX extensions are in effect
|
||||
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
|
||||
t.Fatal("Expected at least one PAX header to be written.")
|
||||
}
|
||||
// Test that we can get a long name back out of the archive.
|
||||
reader := NewReader(&buf)
|
||||
hdr, err = reader.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if hdr.Name != chineseFilename {
|
||||
t.Fatal("Couldn't recover unicode name")
|
||||
}
|
||||
if hdr.Gname != chineseGroupname {
|
||||
t.Fatal("Couldn't recover unicode group")
|
||||
}
|
||||
if hdr.Uname != chineseUsername {
|
||||
t.Fatal("Couldn't recover unicode user")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPaxXattrs(t *testing.T) {
|
||||
xattrs := map[string]string{
|
||||
"user.key": "value",
|
||||
}
|
||||
|
||||
// Create an archive with an xattr
|
||||
fileinfo, err := os.Stat("testdata/small.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hdr, err := FileInfoHeader(fileinfo, "")
|
||||
if err != nil {
|
||||
t.Fatalf("os.Stat: %v", err)
|
||||
}
|
||||
contents := "Kilts"
|
||||
hdr.Xattrs = xattrs
|
||||
var buf bytes.Buffer
|
||||
writer := NewWriter(&buf)
|
||||
if err := writer.WriteHeader(hdr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err = writer.Write([]byte(contents)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Test that we can get the xattrs back out of the archive.
|
||||
reader := NewReader(&buf)
|
||||
hdr, err = reader.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(hdr.Xattrs, xattrs) {
|
||||
t.Fatalf("xattrs did not survive round trip: got %+v, want %+v",
|
||||
hdr.Xattrs, xattrs)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPaxHeadersSorted(t *testing.T) {
|
||||
fileinfo, err := os.Stat("testdata/small.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hdr, err := FileInfoHeader(fileinfo, "")
|
||||
if err != nil {
|
||||
t.Fatalf("os.Stat: %v", err)
|
||||
}
|
||||
contents := strings.Repeat(" ", int(hdr.Size))
|
||||
|
||||
hdr.Xattrs = map[string]string{
|
||||
"foo": "foo",
|
||||
"bar": "bar",
|
||||
"baz": "baz",
|
||||
"qux": "qux",
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
writer := NewWriter(&buf)
|
||||
if err := writer.WriteHeader(hdr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err = writer.Write([]byte(contents)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Simple test to make sure PAX extensions are in effect
|
||||
if !bytes.Contains(buf.Bytes(), []byte("PaxHeaders.0")) {
|
||||
t.Fatal("Expected at least one PAX header to be written.")
|
||||
}
|
||||
|
||||
// xattr bar should always appear before others
|
||||
indices := []int{
|
||||
bytes.Index(buf.Bytes(), []byte("bar=bar")),
|
||||
bytes.Index(buf.Bytes(), []byte("baz=baz")),
|
||||
bytes.Index(buf.Bytes(), []byte("foo=foo")),
|
||||
bytes.Index(buf.Bytes(), []byte("qux=qux")),
|
||||
}
|
||||
if !sort.IntsAreSorted(indices) {
|
||||
t.Fatal("PAX headers are not sorted")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUSTARLongName(t *testing.T) {
|
||||
// Create an archive with a path that failed to split with USTAR extension in previous versions.
|
||||
fileinfo, err := os.Stat("testdata/small.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hdr, err := FileInfoHeader(fileinfo, "")
|
||||
hdr.Typeflag = TypeDir
|
||||
if err != nil {
|
||||
t.Fatalf("os.Stat:1 %v", err)
|
||||
}
|
||||
// Force a PAX long name to be written. The name was taken from a practical example
|
||||
// that fails and replaced ever char through numbers to anonymize the sample.
|
||||
longName := "/0000_0000000/00000-000000000/0000_0000000/00000-0000000000000/0000_0000000/00000-0000000-00000000/0000_0000000/00000000/0000_0000000/000/0000_0000000/00000000v00/0000_0000000/000000/0000_0000000/0000000/0000_0000000/00000y-00/0000/0000/00000000/0x000000/"
|
||||
hdr.Name = longName
|
||||
|
||||
hdr.Size = 0
|
||||
var buf bytes.Buffer
|
||||
writer := NewWriter(&buf)
|
||||
if err := writer.WriteHeader(hdr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Test that we can get a long name back out of the archive.
|
||||
reader := NewReader(&buf)
|
||||
hdr, err = reader.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if hdr.Name != longName {
|
||||
t.Fatal("Couldn't recover long name")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidTypeflagWithPAXHeader(t *testing.T) {
|
||||
var buffer bytes.Buffer
|
||||
tw := NewWriter(&buffer)
|
||||
|
||||
fileName := strings.Repeat("ab", 100)
|
||||
|
||||
hdr := &Header{
|
||||
Name: fileName,
|
||||
Size: 4,
|
||||
Typeflag: 0,
|
||||
}
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
t.Fatalf("Failed to write header: %s", err)
|
||||
}
|
||||
if _, err := tw.Write([]byte("fooo")); err != nil {
|
||||
t.Fatalf("Failed to write the file's data: %s", err)
|
||||
}
|
||||
tw.Close()
|
||||
|
||||
tr := NewReader(&buffer)
|
||||
|
||||
for {
|
||||
header, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read header: %s", err)
|
||||
}
|
||||
if header.Typeflag != 0 {
|
||||
t.Fatalf("Typeflag should've been 0, found %d", header.Typeflag)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteAfterClose(t *testing.T) {
|
||||
var buffer bytes.Buffer
|
||||
tw := NewWriter(&buffer)
|
||||
|
||||
hdr := &Header{
|
||||
Name: "small.txt",
|
||||
Size: 5,
|
||||
}
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
t.Fatalf("Failed to write header: %s", err)
|
||||
}
|
||||
tw.Close()
|
||||
if _, err := tw.Write([]byte("Kilts")); err != ErrWriteAfterClose {
|
||||
t.Fatalf("Write: got %v; want ErrWriteAfterClose", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSplitUSTARPath(t *testing.T) {
|
||||
var sr = strings.Repeat
|
||||
|
||||
var vectors = []struct {
|
||||
input string // Input path
|
||||
prefix string // Expected output prefix
|
||||
suffix string // Expected output suffix
|
||||
ok bool // Split success?
|
||||
}{
|
||||
{"", "", "", false},
|
||||
{"abc", "", "", false},
|
||||
{"用戶名", "", "", false},
|
||||
{sr("a", fileNameSize), "", "", false},
|
||||
{sr("a", fileNameSize) + "/", "", "", false},
|
||||
{sr("a", fileNameSize) + "/a", sr("a", fileNameSize), "a", true},
|
||||
{sr("a", fileNamePrefixSize) + "/", "", "", false},
|
||||
{sr("a", fileNamePrefixSize) + "/a", sr("a", fileNamePrefixSize), "a", true},
|
||||
{sr("a", fileNameSize+1), "", "", false},
|
||||
{sr("/", fileNameSize+1), sr("/", fileNameSize-1), "/", true},
|
||||
{sr("a", fileNamePrefixSize) + "/" + sr("b", fileNameSize),
|
||||
sr("a", fileNamePrefixSize), sr("b", fileNameSize), true},
|
||||
{sr("a", fileNamePrefixSize) + "//" + sr("b", fileNameSize), "", "", false},
|
||||
{sr("a/", fileNameSize), sr("a/", 77) + "a", sr("a/", 22), true},
|
||||
}
|
||||
|
||||
for _, v := range vectors {
|
||||
prefix, suffix, ok := splitUSTARPath(v.input)
|
||||
if prefix != v.prefix || suffix != v.suffix || ok != v.ok {
|
||||
t.Errorf("splitUSTARPath(%q):\ngot (%q, %q, %v)\nwant (%q, %q, %v)",
|
||||
v.input, prefix, suffix, ok, v.prefix, v.suffix, v.ok)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFormatPAXRecord(t *testing.T) {
|
||||
var medName = strings.Repeat("CD", 50)
|
||||
var longName = strings.Repeat("AB", 100)
|
||||
|
||||
var vectors = []struct {
|
||||
inputKey string
|
||||
inputVal string
|
||||
output string
|
||||
}{
|
||||
{"k", "v", "6 k=v\n"},
|
||||
{"path", "/etc/hosts", "19 path=/etc/hosts\n"},
|
||||
{"path", longName, "210 path=" + longName + "\n"},
|
||||
{"path", medName, "110 path=" + medName + "\n"},
|
||||
{"foo", "ba", "9 foo=ba\n"},
|
||||
{"foo", "bar", "11 foo=bar\n"},
|
||||
{"foo", "b=\nar=\n==\x00", "18 foo=b=\nar=\n==\x00\n"},
|
||||
{"foo", "hello9 foo=ba\nworld", "27 foo=hello9 foo=ba\nworld\n"},
|
||||
{"☺☻☹", "日a本b語ç", "27 ☺☻☹=日a本b語ç\n"},
|
||||
{"\x00hello", "\x00world", "17 \x00hello=\x00world\n"},
|
||||
}
|
||||
|
||||
for _, v := range vectors {
|
||||
output := formatPAXRecord(v.inputKey, v.inputVal)
|
||||
if output != v.output {
|
||||
t.Errorf("formatPAXRecord(%q, %q): got %q, want %q",
|
||||
v.inputKey, v.inputVal, output, v.output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFitsInBase256(t *testing.T) {
|
||||
var vectors = []struct {
|
||||
input int64
|
||||
width int
|
||||
ok bool
|
||||
}{
|
||||
{+1, 8, true},
|
||||
{0, 8, true},
|
||||
{-1, 8, true},
|
||||
{1 << 56, 8, false},
|
||||
{(1 << 56) - 1, 8, true},
|
||||
{-1 << 56, 8, true},
|
||||
{(-1 << 56) - 1, 8, false},
|
||||
{121654, 8, true},
|
||||
{-9849849, 8, true},
|
||||
{math.MaxInt64, 9, true},
|
||||
{0, 9, true},
|
||||
{math.MinInt64, 9, true},
|
||||
{math.MaxInt64, 12, true},
|
||||
{0, 12, true},
|
||||
{math.MinInt64, 12, true},
|
||||
}
|
||||
|
||||
for _, v := range vectors {
|
||||
ok := fitsInBase256(v.width, v.input)
|
||||
if ok != v.ok {
|
||||
t.Errorf("checkNumeric(%d, %d): got %v, want %v", v.input, v.width, ok, v.ok)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFormatNumeric(t *testing.T) {
|
||||
var vectors = []struct {
|
||||
input int64
|
||||
output string
|
||||
ok bool
|
||||
}{
|
||||
// Test base-256 (binary) encoded values.
|
||||
{-1, "\xff", true},
|
||||
{-1, "\xff\xff", true},
|
||||
{-1, "\xff\xff\xff", true},
|
||||
{(1 << 0), "0", false},
|
||||
{(1 << 8) - 1, "\x80\xff", true},
|
||||
{(1 << 8), "0\x00", false},
|
||||
{(1 << 16) - 1, "\x80\xff\xff", true},
|
||||
{(1 << 16), "00\x00", false},
|
||||
{-1 * (1 << 0), "\xff", true},
|
||||
{-1*(1<<0) - 1, "0", false},
|
||||
{-1 * (1 << 8), "\xff\x00", true},
|
||||
{-1*(1<<8) - 1, "0\x00", false},
|
||||
{-1 * (1 << 16), "\xff\x00\x00", true},
|
||||
{-1*(1<<16) - 1, "00\x00", false},
|
||||
{537795476381659745, "0000000\x00", false},
|
||||
{537795476381659745, "\x80\x00\x00\x00\x07\x76\xa2\x22\xeb\x8a\x72\x61", true},
|
||||
{-615126028225187231, "0000000\x00", false},
|
||||
{-615126028225187231, "\xff\xff\xff\xff\xf7\x76\xa2\x22\xeb\x8a\x72\x61", true},
|
||||
{math.MaxInt64, "0000000\x00", false},
|
||||
{math.MaxInt64, "\x80\x00\x00\x00\x7f\xff\xff\xff\xff\xff\xff\xff", true},
|
||||
{math.MinInt64, "0000000\x00", false},
|
||||
{math.MinInt64, "\xff\xff\xff\xff\x80\x00\x00\x00\x00\x00\x00\x00", true},
|
||||
{math.MaxInt64, "\x80\x7f\xff\xff\xff\xff\xff\xff\xff", true},
|
||||
{math.MinInt64, "\xff\x80\x00\x00\x00\x00\x00\x00\x00", true},
|
||||
}
|
||||
|
||||
for _, v := range vectors {
|
||||
var f formatter
|
||||
output := make([]byte, len(v.output))
|
||||
f.formatNumeric(output, v.input)
|
||||
ok := (f.err == nil)
|
||||
if ok != v.ok {
|
||||
if v.ok {
|
||||
t.Errorf("formatNumeric(%d): got formatting failure, want success", v.input)
|
||||
} else {
|
||||
t.Errorf("formatNumeric(%d): got formatting success, want failure", v.input)
|
||||
}
|
||||
}
|
||||
if string(output) != v.output {
|
||||
t.Errorf("formatNumeric(%d): got %q, want %q", v.input, output, v.output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFormatPAXTime(t *testing.T) {
|
||||
t1 := time.Date(2000, 1, 1, 11, 0, 0, 0, time.UTC)
|
||||
t2 := time.Date(2000, 1, 1, 11, 0, 0, 100, time.UTC)
|
||||
t3 := time.Date(1960, 1, 1, 11, 0, 0, 0, time.UTC)
|
||||
t4 := time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
verify := func(time time.Time, s string) {
|
||||
p := formatPAXTime(time)
|
||||
if p != s {
|
||||
t.Errorf("for %v, expected %s, got %s", time, s, p)
|
||||
}
|
||||
}
|
||||
verify(t1, "946724400")
|
||||
verify(t2, "946724400.000000100")
|
||||
verify(t3, "-315579600")
|
||||
verify(t4, "0")
|
||||
}
|
||||
280
vendor/github.com/Microsoft/go-winio/backup.go
generated
vendored
Normal file
280
vendor/github.com/Microsoft/go-winio/backup.go
generated
vendored
Normal file
@@ -0,0 +1,280 @@
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"unicode/utf16"
|
||||
)
|
||||
|
||||
//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead
|
||||
//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite
|
||||
|
||||
const (
|
||||
BackupData = uint32(iota + 1)
|
||||
BackupEaData
|
||||
BackupSecurity
|
||||
BackupAlternateData
|
||||
BackupLink
|
||||
BackupPropertyData
|
||||
BackupObjectId
|
||||
BackupReparseData
|
||||
BackupSparseBlock
|
||||
BackupTxfsData
|
||||
)
|
||||
|
||||
const (
|
||||
StreamSparseAttributes = uint32(8)
|
||||
)
|
||||
|
||||
const (
|
||||
WRITE_DAC = 0x40000
|
||||
WRITE_OWNER = 0x80000
|
||||
ACCESS_SYSTEM_SECURITY = 0x1000000
|
||||
)
|
||||
|
||||
// BackupHeader represents a backup stream of a file.
|
||||
type BackupHeader struct {
|
||||
Id uint32 // The backup stream ID
|
||||
Attributes uint32 // Stream attributes
|
||||
Size int64 // The size of the stream in bytes
|
||||
Name string // The name of the stream (for BackupAlternateData only).
|
||||
Offset int64 // The offset of the stream in the file (for BackupSparseBlock only).
|
||||
}
|
||||
|
||||
type win32StreamId struct {
|
||||
StreamId uint32
|
||||
Attributes uint32
|
||||
Size uint64
|
||||
NameSize uint32
|
||||
}
|
||||
|
||||
// BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series
|
||||
// of BackupHeader values.
|
||||
type BackupStreamReader struct {
|
||||
r io.Reader
|
||||
bytesLeft int64
|
||||
}
|
||||
|
||||
// NewBackupStreamReader produces a BackupStreamReader from any io.Reader.
|
||||
func NewBackupStreamReader(r io.Reader) *BackupStreamReader {
|
||||
return &BackupStreamReader{r, 0}
|
||||
}
|
||||
|
||||
// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if
|
||||
// it was not completely read.
|
||||
func (r *BackupStreamReader) Next() (*BackupHeader, error) {
|
||||
if r.bytesLeft > 0 {
|
||||
if s, ok := r.r.(io.Seeker); ok {
|
||||
// Make sure Seek on io.SeekCurrent sometimes succeeds
|
||||
// before trying the actual seek.
|
||||
if _, err := s.Seek(0, io.SeekCurrent); err == nil {
|
||||
if _, err = s.Seek(r.bytesLeft, io.SeekCurrent); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.bytesLeft = 0
|
||||
}
|
||||
}
|
||||
if _, err := io.Copy(ioutil.Discard, r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var wsi win32StreamId
|
||||
if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hdr := &BackupHeader{
|
||||
Id: wsi.StreamId,
|
||||
Attributes: wsi.Attributes,
|
||||
Size: int64(wsi.Size),
|
||||
}
|
||||
if wsi.NameSize != 0 {
|
||||
name := make([]uint16, int(wsi.NameSize/2))
|
||||
if err := binary.Read(r.r, binary.LittleEndian, name); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hdr.Name = syscall.UTF16ToString(name)
|
||||
}
|
||||
if wsi.StreamId == BackupSparseBlock {
|
||||
if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hdr.Size -= 8
|
||||
}
|
||||
r.bytesLeft = hdr.Size
|
||||
return hdr, nil
|
||||
}
|
||||
|
||||
// Read reads from the current backup stream.
|
||||
func (r *BackupStreamReader) Read(b []byte) (int, error) {
|
||||
if r.bytesLeft == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
if int64(len(b)) > r.bytesLeft {
|
||||
b = b[:r.bytesLeft]
|
||||
}
|
||||
n, err := r.r.Read(b)
|
||||
r.bytesLeft -= int64(n)
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
} else if r.bytesLeft == 0 && err == nil {
|
||||
err = io.EOF
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API.
|
||||
type BackupStreamWriter struct {
|
||||
w io.Writer
|
||||
bytesLeft int64
|
||||
}
|
||||
|
||||
// NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer.
|
||||
func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter {
|
||||
return &BackupStreamWriter{w, 0}
|
||||
}
|
||||
|
||||
// WriteHeader writes the next backup stream header and prepares for calls to Write().
|
||||
func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error {
|
||||
if w.bytesLeft != 0 {
|
||||
return fmt.Errorf("missing %d bytes", w.bytesLeft)
|
||||
}
|
||||
name := utf16.Encode([]rune(hdr.Name))
|
||||
wsi := win32StreamId{
|
||||
StreamId: hdr.Id,
|
||||
Attributes: hdr.Attributes,
|
||||
Size: uint64(hdr.Size),
|
||||
NameSize: uint32(len(name) * 2),
|
||||
}
|
||||
if hdr.Id == BackupSparseBlock {
|
||||
// Include space for the int64 block offset
|
||||
wsi.Size += 8
|
||||
}
|
||||
if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(name) != 0 {
|
||||
if err := binary.Write(w.w, binary.LittleEndian, name); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if hdr.Id == BackupSparseBlock {
|
||||
if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.bytesLeft = hdr.Size
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write writes to the current backup stream.
|
||||
func (w *BackupStreamWriter) Write(b []byte) (int, error) {
|
||||
if w.bytesLeft < int64(len(b)) {
|
||||
return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft)
|
||||
}
|
||||
n, err := w.w.Write(b)
|
||||
w.bytesLeft -= int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API.
|
||||
type BackupFileReader struct {
|
||||
f *os.File
|
||||
includeSecurity bool
|
||||
ctx uintptr
|
||||
}
|
||||
|
||||
// NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true,
|
||||
// Read will attempt to read the security descriptor of the file.
|
||||
func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader {
|
||||
r := &BackupFileReader{f, includeSecurity, 0}
|
||||
return r
|
||||
}
|
||||
|
||||
// Read reads a backup stream from the file by calling the Win32 API BackupRead().
|
||||
func (r *BackupFileReader) Read(b []byte) (int, error) {
|
||||
var bytesRead uint32
|
||||
err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx)
|
||||
if err != nil {
|
||||
return 0, &os.PathError{"BackupRead", r.f.Name(), err}
|
||||
}
|
||||
runtime.KeepAlive(r.f)
|
||||
if bytesRead == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
return int(bytesRead), nil
|
||||
}
|
||||
|
||||
// Close frees Win32 resources associated with the BackupFileReader. It does not close
|
||||
// the underlying file.
|
||||
func (r *BackupFileReader) Close() error {
|
||||
if r.ctx != 0 {
|
||||
backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx)
|
||||
runtime.KeepAlive(r.f)
|
||||
r.ctx = 0
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API.
|
||||
type BackupFileWriter struct {
|
||||
f *os.File
|
||||
includeSecurity bool
|
||||
ctx uintptr
|
||||
}
|
||||
|
||||
// NewBackupFileWriter returns a new BackupFileWriter from a file handle. If includeSecurity is true,
|
||||
// Write() will attempt to restore the security descriptor from the stream.
|
||||
func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter {
|
||||
w := &BackupFileWriter{f, includeSecurity, 0}
|
||||
return w
|
||||
}
|
||||
|
||||
// Write restores a portion of the file using the provided backup stream.
|
||||
func (w *BackupFileWriter) Write(b []byte) (int, error) {
|
||||
var bytesWritten uint32
|
||||
err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx)
|
||||
if err != nil {
|
||||
return 0, &os.PathError{"BackupWrite", w.f.Name(), err}
|
||||
}
|
||||
runtime.KeepAlive(w.f)
|
||||
if int(bytesWritten) != len(b) {
|
||||
return int(bytesWritten), errors.New("not all bytes could be written")
|
||||
}
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
// Close frees Win32 resources associated with the BackupFileWriter. It does not
|
||||
// close the underlying file.
|
||||
func (w *BackupFileWriter) Close() error {
|
||||
if w.ctx != 0 {
|
||||
backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx)
|
||||
runtime.KeepAlive(w.f)
|
||||
w.ctx = 0
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// OpenForBackup opens a file or directory, potentially skipping access checks if the backup
|
||||
// or restore privileges have been acquired.
|
||||
//
|
||||
// If the file opened was a directory, it cannot be used with Readdir().
|
||||
func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) {
|
||||
winPath, err := syscall.UTF16FromString(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
h, err := syscall.CreateFile(&winPath[0], access, share, nil, createmode, syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0)
|
||||
if err != nil {
|
||||
err = &os.PathError{Op: "open", Path: path, Err: err}
|
||||
return nil, err
|
||||
}
|
||||
return os.NewFile(uintptr(h), path), nil
|
||||
}
|
||||
255
vendor/github.com/Microsoft/go-winio/backup_test.go
generated
vendored
Normal file
255
vendor/github.com/Microsoft/go-winio/backup_test.go
generated
vendored
Normal file
@@ -0,0 +1,255 @@
|
||||
package winio
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"syscall"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var testFileName string
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
f, err := ioutil.TempFile("", "tmp")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
testFileName = f.Name()
|
||||
f.Close()
|
||||
defer os.Remove(testFileName)
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func makeTestFile(makeADS bool) error {
|
||||
os.Remove(testFileName)
|
||||
f, err := os.Create(testFileName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
_, err = f.Write([]byte("testing 1 2 3\n"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if makeADS {
|
||||
a, err := os.Create(testFileName + ":ads.txt")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer a.Close()
|
||||
_, err = a.Write([]byte("alternate data stream\n"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestBackupRead(t *testing.T) {
|
||||
err := makeTestFile(true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
f, err := os.Open(testFileName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
r := NewBackupFileReader(f, false)
|
||||
defer r.Close()
|
||||
b, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(b) == 0 {
|
||||
t.Fatal("no data")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackupStreamRead(t *testing.T) {
|
||||
err := makeTestFile(true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
f, err := os.Open(testFileName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
r := NewBackupFileReader(f, false)
|
||||
defer r.Close()
|
||||
|
||||
br := NewBackupStreamReader(r)
|
||||
gotData := false
|
||||
gotAltData := false
|
||||
for {
|
||||
hdr, err := br.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
switch hdr.Id {
|
||||
case BackupData:
|
||||
if gotData {
|
||||
t.Fatal("duplicate data")
|
||||
}
|
||||
if hdr.Name != "" {
|
||||
t.Fatalf("unexpected name %s", hdr.Name)
|
||||
}
|
||||
b, err := ioutil.ReadAll(br)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(b) != "testing 1 2 3\n" {
|
||||
t.Fatalf("incorrect data %v", b)
|
||||
}
|
||||
gotData = true
|
||||
case BackupAlternateData:
|
||||
if gotAltData {
|
||||
t.Fatal("duplicate alt data")
|
||||
}
|
||||
if hdr.Name != ":ads.txt:$DATA" {
|
||||
t.Fatalf("incorrect name %s", hdr.Name)
|
||||
}
|
||||
b, err := ioutil.ReadAll(br)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(b) != "alternate data stream\n" {
|
||||
t.Fatalf("incorrect data %v", b)
|
||||
}
|
||||
gotAltData = true
|
||||
default:
|
||||
t.Fatalf("unknown stream ID %d", hdr.Id)
|
||||
}
|
||||
}
|
||||
if !gotData || !gotAltData {
|
||||
t.Fatal("missing stream")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackupStreamWrite(t *testing.T) {
|
||||
f, err := os.Create(testFileName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
w := NewBackupFileWriter(f, false)
|
||||
defer w.Close()
|
||||
|
||||
data := "testing 1 2 3\n"
|
||||
altData := "alternate stream\n"
|
||||
|
||||
br := NewBackupStreamWriter(w)
|
||||
err = br.WriteHeader(&BackupHeader{Id: BackupData, Size: int64(len(data))})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err := br.Write([]byte(data))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if n != len(data) {
|
||||
t.Fatal("short write")
|
||||
}
|
||||
|
||||
err = br.WriteHeader(&BackupHeader{Id: BackupAlternateData, Size: int64(len(altData)), Name: ":ads.txt:$DATA"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
n, err = br.Write([]byte(altData))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if n != len(altData) {
|
||||
t.Fatal("short write")
|
||||
}
|
||||
|
||||
f.Close()
|
||||
|
||||
b, err := ioutil.ReadFile(testFileName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(b) != data {
|
||||
t.Fatalf("wrong data %v", b)
|
||||
}
|
||||
|
||||
b, err = ioutil.ReadFile(testFileName + ":ads.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if string(b) != altData {
|
||||
t.Fatalf("wrong data %v", b)
|
||||
}
|
||||
}
|
||||
|
||||
func makeSparseFile() error {
|
||||
os.Remove(testFileName)
|
||||
f, err := os.Create(testFileName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
const (
|
||||
FSCTL_SET_SPARSE = 0x000900c4
|
||||
FSCTL_SET_ZERO_DATA = 0x000980c8
|
||||
)
|
||||
|
||||
err = syscall.DeviceIoControl(syscall.Handle(f.Fd()), FSCTL_SET_SPARSE, nil, 0, nil, 0, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = f.Write([]byte("testing 1 2 3\n"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = f.Seek(1000000, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = f.Write([]byte("more data later\n"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestBackupSparseFile(t *testing.T) {
|
||||
err := makeSparseFile()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
f, err := os.Open(testFileName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
r := NewBackupFileReader(f, false)
|
||||
defer r.Close()
|
||||
|
||||
br := NewBackupStreamReader(r)
|
||||
for {
|
||||
hdr, err := br.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
t.Log(hdr)
|
||||
}
|
||||
}
|
||||
4
vendor/github.com/Microsoft/go-winio/backuptar/noop.go
generated
vendored
Normal file
4
vendor/github.com/Microsoft/go-winio/backuptar/noop.go
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
// +build !windows
|
||||
// This file only exists to allow go get on non-Windows platforms.
|
||||
|
||||
package backuptar
|
||||
439
vendor/github.com/Microsoft/go-winio/backuptar/tar.go
generated
vendored
Normal file
439
vendor/github.com/Microsoft/go-winio/backuptar/tar.go
generated
vendored
Normal file
@@ -0,0 +1,439 @@
|
||||
// +build windows
|
||||
|
||||
package backuptar
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/go-winio"
|
||||
"github.com/Microsoft/go-winio/archive/tar" // until archive/tar supports pax extensions in its interface
|
||||
)
|
||||
|
||||
const (
|
||||
c_ISUID = 04000 // Set uid
|
||||
c_ISGID = 02000 // Set gid
|
||||
c_ISVTX = 01000 // Save text (sticky bit)
|
||||
c_ISDIR = 040000 // Directory
|
||||
c_ISFIFO = 010000 // FIFO
|
||||
c_ISREG = 0100000 // Regular file
|
||||
c_ISLNK = 0120000 // Symbolic link
|
||||
c_ISBLK = 060000 // Block special file
|
||||
c_ISCHR = 020000 // Character special file
|
||||
c_ISSOCK = 0140000 // Socket
|
||||
)
|
||||
|
||||
const (
|
||||
hdrFileAttributes = "fileattr"
|
||||
hdrSecurityDescriptor = "sd"
|
||||
hdrRawSecurityDescriptor = "rawsd"
|
||||
hdrMountPoint = "mountpoint"
|
||||
hdrEaPrefix = "xattr."
|
||||
)
|
||||
|
||||
func writeZeroes(w io.Writer, count int64) error {
|
||||
buf := make([]byte, 8192)
|
||||
c := len(buf)
|
||||
for i := int64(0); i < count; i += int64(c) {
|
||||
if int64(c) > count-i {
|
||||
c = int(count - i)
|
||||
}
|
||||
_, err := w.Write(buf[:c])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error {
|
||||
curOffset := int64(0)
|
||||
for {
|
||||
bhdr, err := br.Next()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if bhdr.Id != winio.BackupSparseBlock {
|
||||
return fmt.Errorf("unexpected stream %d", bhdr.Id)
|
||||
}
|
||||
|
||||
// archive/tar does not support writing sparse files
|
||||
// so just write zeroes to catch up to the current offset.
|
||||
err = writeZeroes(t, bhdr.Offset-curOffset)
|
||||
if bhdr.Size == 0 {
|
||||
break
|
||||
}
|
||||
n, err := io.Copy(t, br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
curOffset = bhdr.Offset + n
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BasicInfoHeader creates a tar header from basic file information.
|
||||
func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *tar.Header {
|
||||
hdr := &tar.Header{
|
||||
Name: filepath.ToSlash(name),
|
||||
Size: size,
|
||||
Typeflag: tar.TypeReg,
|
||||
ModTime: time.Unix(0, fileInfo.LastWriteTime.Nanoseconds()),
|
||||
ChangeTime: time.Unix(0, fileInfo.ChangeTime.Nanoseconds()),
|
||||
AccessTime: time.Unix(0, fileInfo.LastAccessTime.Nanoseconds()),
|
||||
CreationTime: time.Unix(0, fileInfo.CreationTime.Nanoseconds()),
|
||||
Winheaders: make(map[string]string),
|
||||
}
|
||||
hdr.Winheaders[hdrFileAttributes] = fmt.Sprintf("%d", fileInfo.FileAttributes)
|
||||
|
||||
if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 {
|
||||
hdr.Mode |= c_ISDIR
|
||||
hdr.Size = 0
|
||||
hdr.Typeflag = tar.TypeDir
|
||||
}
|
||||
return hdr
|
||||
}
|
||||
|
||||
// WriteTarFileFromBackupStream writes a file to a tar writer using data from a Win32 backup stream.
|
||||
//
|
||||
// This encodes Win32 metadata as tar pax vendor extensions starting with MSWINDOWS.
|
||||
//
|
||||
// The additional Win32 metadata is:
|
||||
//
|
||||
// MSWINDOWS.fileattr: The Win32 file attributes, as a decimal value
|
||||
//
|
||||
// MSWINDOWS.rawsd: The Win32 security descriptor, in raw binary format
|
||||
//
|
||||
// MSWINDOWS.mountpoint: If present, this is a mount point and not a symlink, even though the type is '2' (symlink)
|
||||
func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size int64, fileInfo *winio.FileBasicInfo) error {
|
||||
name = filepath.ToSlash(name)
|
||||
hdr := BasicInfoHeader(name, size, fileInfo)
|
||||
|
||||
// If r can be seeked, then this function is two-pass: pass 1 collects the
|
||||
// tar header data, and pass 2 copies the data stream. If r cannot be
|
||||
// seeked, then some header data (in particular EAs) will be silently lost.
|
||||
var (
|
||||
restartPos int64
|
||||
err error
|
||||
)
|
||||
sr, readTwice := r.(io.Seeker)
|
||||
if readTwice {
|
||||
if restartPos, err = sr.Seek(0, io.SeekCurrent); err != nil {
|
||||
readTwice = false
|
||||
}
|
||||
}
|
||||
|
||||
br := winio.NewBackupStreamReader(r)
|
||||
var dataHdr *winio.BackupHeader
|
||||
for dataHdr == nil {
|
||||
bhdr, err := br.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch bhdr.Id {
|
||||
case winio.BackupData:
|
||||
hdr.Mode |= c_ISREG
|
||||
if !readTwice {
|
||||
dataHdr = bhdr
|
||||
}
|
||||
case winio.BackupSecurity:
|
||||
sd, err := ioutil.ReadAll(br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.Winheaders[hdrRawSecurityDescriptor] = base64.StdEncoding.EncodeToString(sd)
|
||||
|
||||
case winio.BackupReparseData:
|
||||
hdr.Mode |= c_ISLNK
|
||||
hdr.Typeflag = tar.TypeSymlink
|
||||
reparseBuffer, err := ioutil.ReadAll(br)
|
||||
rp, err := winio.DecodeReparsePoint(reparseBuffer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rp.IsMountPoint {
|
||||
hdr.Winheaders[hdrMountPoint] = "1"
|
||||
}
|
||||
hdr.Linkname = rp.Target
|
||||
|
||||
case winio.BackupEaData:
|
||||
eab, err := ioutil.ReadAll(br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
eas, err := winio.DecodeExtendedAttributes(eab)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, ea := range eas {
|
||||
// Use base64 encoding for the binary value. Note that there
|
||||
// is no way to encode the EA's flags, since their use doesn't
|
||||
// make any sense for persisted EAs.
|
||||
hdr.Winheaders[hdrEaPrefix+ea.Name] = base64.StdEncoding.EncodeToString(ea.Value)
|
||||
}
|
||||
|
||||
case winio.BackupAlternateData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData:
|
||||
// ignore these streams
|
||||
default:
|
||||
return fmt.Errorf("%s: unknown stream ID %d", name, bhdr.Id)
|
||||
}
|
||||
}
|
||||
|
||||
err = t.WriteHeader(hdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if readTwice {
|
||||
// Get back to the data stream.
|
||||
if _, err = sr.Seek(restartPos, io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
for dataHdr == nil {
|
||||
bhdr, err := br.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if bhdr.Id == winio.BackupData {
|
||||
dataHdr = bhdr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if dataHdr != nil {
|
||||
// A data stream was found. Copy the data.
|
||||
if (dataHdr.Attributes & winio.StreamSparseAttributes) == 0 {
|
||||
if size != dataHdr.Size {
|
||||
return fmt.Errorf("%s: mismatch between file size %d and header size %d", name, size, dataHdr.Size)
|
||||
}
|
||||
_, err = io.Copy(t, br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
err = copySparse(t, br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Look for streams after the data stream. The only ones we handle are alternate data streams.
|
||||
// Other streams may have metadata that could be serialized, but the tar header has already
|
||||
// been written. In practice, this means that we don't get EA or TXF metadata.
|
||||
for {
|
||||
bhdr, err := br.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch bhdr.Id {
|
||||
case winio.BackupAlternateData:
|
||||
altName := bhdr.Name
|
||||
if strings.HasSuffix(altName, ":$DATA") {
|
||||
altName = altName[:len(altName)-len(":$DATA")]
|
||||
}
|
||||
if (bhdr.Attributes & winio.StreamSparseAttributes) == 0 {
|
||||
hdr = &tar.Header{
|
||||
Name: name + altName,
|
||||
Mode: hdr.Mode,
|
||||
Typeflag: tar.TypeReg,
|
||||
Size: bhdr.Size,
|
||||
ModTime: hdr.ModTime,
|
||||
AccessTime: hdr.AccessTime,
|
||||
ChangeTime: hdr.ChangeTime,
|
||||
}
|
||||
err = t.WriteHeader(hdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = io.Copy(t, br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
} else {
|
||||
// Unsupported for now, since the size of the alternate stream is not present
|
||||
// in the backup stream until after the data has been read.
|
||||
return errors.New("tar of sparse alternate data streams is unsupported")
|
||||
}
|
||||
case winio.BackupEaData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData:
|
||||
// ignore these streams
|
||||
default:
|
||||
return fmt.Errorf("%s: unknown stream ID %d after data", name, bhdr.Id)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// FileInfoFromHeader retrieves basic Win32 file information from a tar header, using the additional metadata written by
|
||||
// WriteTarFileFromBackupStream.
|
||||
func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *winio.FileBasicInfo, err error) {
|
||||
name = hdr.Name
|
||||
if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {
|
||||
size = hdr.Size
|
||||
}
|
||||
fileInfo = &winio.FileBasicInfo{
|
||||
LastAccessTime: syscall.NsecToFiletime(hdr.AccessTime.UnixNano()),
|
||||
LastWriteTime: syscall.NsecToFiletime(hdr.ModTime.UnixNano()),
|
||||
ChangeTime: syscall.NsecToFiletime(hdr.ChangeTime.UnixNano()),
|
||||
CreationTime: syscall.NsecToFiletime(hdr.CreationTime.UnixNano()),
|
||||
}
|
||||
if attrStr, ok := hdr.Winheaders[hdrFileAttributes]; ok {
|
||||
attr, err := strconv.ParseUint(attrStr, 10, 32)
|
||||
if err != nil {
|
||||
return "", 0, nil, err
|
||||
}
|
||||
fileInfo.FileAttributes = uint32(attr)
|
||||
} else {
|
||||
if hdr.Typeflag == tar.TypeDir {
|
||||
fileInfo.FileAttributes |= syscall.FILE_ATTRIBUTE_DIRECTORY
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// WriteBackupStreamFromTarFile writes a Win32 backup stream from the current tar file. Since this function may process multiple
|
||||
// tar file entries in order to collect all the alternate data streams for the file, it returns the next
|
||||
// tar file that was not processed, or io.EOF is there are no more.
|
||||
func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (*tar.Header, error) {
|
||||
bw := winio.NewBackupStreamWriter(w)
|
||||
var sd []byte
|
||||
var err error
|
||||
// Maintaining old SDDL-based behavior for backward compatibility. All new tar headers written
|
||||
// by this library will have raw binary for the security descriptor.
|
||||
if sddl, ok := hdr.Winheaders[hdrSecurityDescriptor]; ok {
|
||||
sd, err = winio.SddlToSecurityDescriptor(sddl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if sdraw, ok := hdr.Winheaders[hdrRawSecurityDescriptor]; ok {
|
||||
sd, err = base64.StdEncoding.DecodeString(sdraw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if len(sd) != 0 {
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupSecurity,
|
||||
Size: int64(len(sd)),
|
||||
}
|
||||
err := bw.WriteHeader(&bhdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = bw.Write(sd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var eas []winio.ExtendedAttribute
|
||||
for k, v := range hdr.Winheaders {
|
||||
if !strings.HasPrefix(k, hdrEaPrefix) {
|
||||
continue
|
||||
}
|
||||
data, err := base64.StdEncoding.DecodeString(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
eas = append(eas, winio.ExtendedAttribute{
|
||||
Name: k[len(hdrEaPrefix):],
|
||||
Value: data,
|
||||
})
|
||||
}
|
||||
if len(eas) != 0 {
|
||||
eadata, err := winio.EncodeExtendedAttributes(eas)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupEaData,
|
||||
Size: int64(len(eadata)),
|
||||
}
|
||||
err = bw.WriteHeader(&bhdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = bw.Write(eadata)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if hdr.Typeflag == tar.TypeSymlink {
|
||||
_, isMountPoint := hdr.Winheaders[hdrMountPoint]
|
||||
rp := winio.ReparsePoint{
|
||||
Target: filepath.FromSlash(hdr.Linkname),
|
||||
IsMountPoint: isMountPoint,
|
||||
}
|
||||
reparse := winio.EncodeReparsePoint(&rp)
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupReparseData,
|
||||
Size: int64(len(reparse)),
|
||||
}
|
||||
err := bw.WriteHeader(&bhdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = bw.Write(reparse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupData,
|
||||
Size: hdr.Size,
|
||||
}
|
||||
err := bw.WriteHeader(&bhdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = io.Copy(bw, t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Copy all the alternate data streams and return the next non-ADS header.
|
||||
for {
|
||||
ahdr, err := t.Next()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ahdr.Typeflag != tar.TypeReg || !strings.HasPrefix(ahdr.Name, hdr.Name+":") {
|
||||
return ahdr, nil
|
||||
}
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupAlternateData,
|
||||
Size: ahdr.Size,
|
||||
Name: ahdr.Name[len(hdr.Name):] + ":$DATA",
|
||||
}
|
||||
err = bw.WriteHeader(&bhdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = io.Copy(bw, t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
84
vendor/github.com/Microsoft/go-winio/backuptar/tar_test.go
generated
vendored
Normal file
84
vendor/github.com/Microsoft/go-winio/backuptar/tar_test.go
generated
vendored
Normal file
@@ -0,0 +1,84 @@
|
||||
package backuptar
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/Microsoft/go-winio"
|
||||
"github.com/Microsoft/go-winio/archive/tar"
|
||||
)
|
||||
|
||||
func ensurePresent(t *testing.T, m map[string]string, keys ...string) {
|
||||
for _, k := range keys {
|
||||
if _, ok := m[k]; !ok {
|
||||
t.Error(k, "not present in tar header")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRoundTrip(t *testing.T) {
|
||||
f, err := ioutil.TempFile("", "tst")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
defer os.Remove(f.Name())
|
||||
|
||||
if _, err = f.Write([]byte("testing 1 2 3\n")); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if _, err = f.Seek(0, 0); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
fi, err := f.Stat()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
bi, err := winio.GetFileBasicInfo(f)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
br := winio.NewBackupFileReader(f, true)
|
||||
defer br.Close()
|
||||
|
||||
var buf bytes.Buffer
|
||||
tw := tar.NewWriter(&buf)
|
||||
|
||||
err = WriteTarFileFromBackupStream(tw, br, f.Name(), fi.Size(), bi)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tr := tar.NewReader(&buf)
|
||||
hdr, err := tr.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
name, size, bi2, err := FileInfoFromHeader(hdr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if name != filepath.ToSlash(f.Name()) {
|
||||
t.Errorf("got name %s, expected %s", name, filepath.ToSlash(f.Name()))
|
||||
}
|
||||
|
||||
if size != fi.Size() {
|
||||
t.Errorf("got size %d, expected %d", size, fi.Size())
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(*bi, *bi2) {
|
||||
t.Errorf("got %#v, expected %#v", *bi, *bi2)
|
||||
}
|
||||
|
||||
ensurePresent(t, hdr.Winheaders, "fileattr", "rawsd")
|
||||
}
|
||||
137
vendor/github.com/Microsoft/go-winio/ea.go
generated
vendored
Normal file
137
vendor/github.com/Microsoft/go-winio/ea.go
generated
vendored
Normal file
@@ -0,0 +1,137 @@
|
||||
package winio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
)
|
||||
|
||||
type fileFullEaInformation struct {
|
||||
NextEntryOffset uint32
|
||||
Flags uint8
|
||||
NameLength uint8
|
||||
ValueLength uint16
|
||||
}
|
||||
|
||||
var (
|
||||
fileFullEaInformationSize = binary.Size(&fileFullEaInformation{})
|
||||
|
||||
errInvalidEaBuffer = errors.New("invalid extended attribute buffer")
|
||||
errEaNameTooLarge = errors.New("extended attribute name too large")
|
||||
errEaValueTooLarge = errors.New("extended attribute value too large")
|
||||
)
|
||||
|
||||
// ExtendedAttribute represents a single Windows EA.
|
||||
type ExtendedAttribute struct {
|
||||
Name string
|
||||
Value []byte
|
||||
Flags uint8
|
||||
}
|
||||
|
||||
func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
|
||||
var info fileFullEaInformation
|
||||
err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info)
|
||||
if err != nil {
|
||||
err = errInvalidEaBuffer
|
||||
return
|
||||
}
|
||||
|
||||
nameOffset := fileFullEaInformationSize
|
||||
nameLen := int(info.NameLength)
|
||||
valueOffset := nameOffset + int(info.NameLength) + 1
|
||||
valueLen := int(info.ValueLength)
|
||||
nextOffset := int(info.NextEntryOffset)
|
||||
if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) {
|
||||
err = errInvalidEaBuffer
|
||||
return
|
||||
}
|
||||
|
||||
ea.Name = string(b[nameOffset : nameOffset+nameLen])
|
||||
ea.Value = b[valueOffset : valueOffset+valueLen]
|
||||
ea.Flags = info.Flags
|
||||
if info.NextEntryOffset != 0 {
|
||||
nb = b[info.NextEntryOffset:]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION
|
||||
// buffer retrieved from BackupRead, ZwQueryEaFile, etc.
|
||||
func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) {
|
||||
for len(b) != 0 {
|
||||
ea, nb, err := parseEa(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
eas = append(eas, ea)
|
||||
b = nb
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error {
|
||||
if int(uint8(len(ea.Name))) != len(ea.Name) {
|
||||
return errEaNameTooLarge
|
||||
}
|
||||
if int(uint16(len(ea.Value))) != len(ea.Value) {
|
||||
return errEaValueTooLarge
|
||||
}
|
||||
entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value))
|
||||
withPadding := (entrySize + 3) &^ 3
|
||||
nextOffset := uint32(0)
|
||||
if !last {
|
||||
nextOffset = withPadding
|
||||
}
|
||||
info := fileFullEaInformation{
|
||||
NextEntryOffset: nextOffset,
|
||||
Flags: ea.Flags,
|
||||
NameLength: uint8(len(ea.Name)),
|
||||
ValueLength: uint16(len(ea.Value)),
|
||||
}
|
||||
|
||||
err := binary.Write(buf, binary.LittleEndian, &info)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = buf.Write([]byte(ea.Name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = buf.WriteByte(0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = buf.Write(ea.Value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION
|
||||
// buffer for use with BackupWrite, ZwSetEaFile, etc.
|
||||
func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
for i := range eas {
|
||||
last := false
|
||||
if i == len(eas)-1 {
|
||||
last = true
|
||||
}
|
||||
|
||||
err := writeEa(&buf, &eas[i], last)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user