mirror of
https://github.com/prometheus-community/windows_exporter.git
synced 2026-02-08 05:56:37 +00:00
Compare commits
188 Commits
v0.3.4
...
v0.10.2-uw
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d9404c6cc2 | ||
|
|
af250824f7 | ||
|
|
7f57491fac | ||
|
|
890fdc2996 | ||
|
|
d1a807840c | ||
|
|
74d7332b47 | ||
|
|
22d4f50c83 | ||
|
|
6dad58fc8f | ||
|
|
8231bc4395 | ||
|
|
baba51bc6a | ||
|
|
b64ccbe683 | ||
|
|
21a02c4fbe | ||
|
|
089bc3b2d4 | ||
|
|
285a165eba | ||
|
|
90b197450e | ||
|
|
0865061210 | ||
|
|
2e50f515d8 | ||
|
|
8be7dc7e83 | ||
|
|
0d4f747f8f | ||
|
|
de285e1043 | ||
|
|
7fde426e88 | ||
|
|
fa12d1476f | ||
|
|
92d0a1d8f0 | ||
|
|
2f46a088de | ||
|
|
1cc4df2bd7 | ||
|
|
feb2b18e6a | ||
|
|
012b938b54 | ||
|
|
a0e5baa171 | ||
|
|
7611e33bc7 | ||
|
|
2aafa9ebf3 | ||
|
|
f9f27b0b97 | ||
|
|
18128f48f5 | ||
|
|
2688847c2e | ||
|
|
1c605adb5e | ||
|
|
d0877d0dc0 | ||
|
|
2cd630fb2f | ||
|
|
b210986181 | ||
|
|
375a74f1e8 | ||
|
|
abd5a53045 | ||
|
|
aa394d1d8e | ||
|
|
bdcc7b0913 | ||
|
|
d7a908e6c0 | ||
|
|
c23a98ae90 | ||
|
|
f8a7c99092 | ||
|
|
29b020999d | ||
|
|
2f0a57898f | ||
|
|
1ad20d6eb8 | ||
|
|
de000b74c8 | ||
|
|
d860d92dc8 | ||
|
|
3a19fe4e7d | ||
|
|
26a468f17a | ||
|
|
a6f3b33928 | ||
|
|
8ef215cc7e | ||
|
|
2c155a12bd | ||
|
|
e1141c3ec0 | ||
|
|
b635ecc6c1 | ||
|
|
a7b5cf7aa6 | ||
|
|
719ccd4f7f | ||
|
|
7ab8c7dde4 | ||
|
|
eb002eb667 | ||
|
|
a1638cdf4c | ||
|
|
091406877a | ||
|
|
84970ac086 | ||
|
|
d86f318010 | ||
|
|
853d615673 | ||
|
|
cd9a740e2b | ||
|
|
c70e7674a5 | ||
|
|
d3e3835c29 | ||
|
|
592c8a8d69 | ||
|
|
6f6a479535 | ||
|
|
d01c66986c | ||
|
|
823ffb7597 | ||
|
|
a90f9cda0f | ||
|
|
31d4c28124 | ||
|
|
e880889f07 | ||
|
|
a283608812 | ||
|
|
8251ddd176 | ||
|
|
4f0a3a89ab | ||
|
|
27cc1072fe | ||
|
|
eb9cf56dee | ||
|
|
3c20887433 | ||
|
|
37d1c4e958 | ||
|
|
33b6e17b2d | ||
|
|
1a9d4afdd6 | ||
|
|
9e198c55a4 | ||
|
|
b309a05bde | ||
|
|
123a055242 | ||
|
|
9308108284 | ||
|
|
0ecf3cd792 | ||
|
|
801444b35b | ||
|
|
f4ab322e5b | ||
|
|
72de199528 | ||
|
|
304972580d | ||
|
|
6322bb124f | ||
|
|
cb6a91b705 | ||
|
|
4d9fb1be72 | ||
|
|
27e26037e3 | ||
|
|
e09497116f | ||
|
|
3099e10555 | ||
|
|
3900504504 | ||
|
|
2c5e30d920 | ||
|
|
b348c245e8 | ||
|
|
578bcc4959 | ||
|
|
31a30474f1 | ||
|
|
ce1005add8 | ||
|
|
6107a59306 | ||
|
|
47656b16bd | ||
|
|
8fc47669be | ||
|
|
1a67ca54b6 | ||
|
|
c73f52338d | ||
|
|
c5f23b4e64 | ||
|
|
411954cf9d | ||
|
|
56be7c63d5 | ||
|
|
6ffe504f7e | ||
|
|
daa6f3d111 | ||
|
|
85fdfb44b8 | ||
|
|
33879449a2 | ||
|
|
462a136673 | ||
|
|
d5e39892cf | ||
|
|
ec0d863c29 | ||
|
|
afc3655a41 | ||
|
|
e25e96a62e | ||
|
|
23d92cfcae | ||
|
|
1258703f23 | ||
|
|
8841091f9c | ||
|
|
517cd3b04b | ||
|
|
9daa8c8775 | ||
|
|
e04d3f414d | ||
|
|
4c69ed1610 | ||
|
|
a171401f57 | ||
|
|
e24e0dc9f5 | ||
|
|
0eab86c731 | ||
|
|
13c68634ce | ||
|
|
73ad1ba960 | ||
|
|
0121fd6471 | ||
|
|
93904954f4 | ||
|
|
f2462b26c8 | ||
|
|
7e05621b26 | ||
|
|
76ddad34b8 | ||
|
|
2053dea3ac | ||
|
|
35b81dcdd0 | ||
|
|
39b0000514 | ||
|
|
76ec763c42 | ||
|
|
7ccc47cc51 | ||
|
|
ad29ac0792 | ||
|
|
d58ce114d9 | ||
|
|
5f9dfcc378 | ||
|
|
f4e5bc3d29 | ||
|
|
f4362c5987 | ||
|
|
f691b48304 | ||
|
|
d12d31a17f | ||
|
|
48d23cfb12 | ||
|
|
17039b8206 | ||
|
|
2993552e19 | ||
|
|
5d4cafc0a1 | ||
|
|
a70c57ffd1 | ||
|
|
b2cb04834a | ||
|
|
f27fdbbbf5 | ||
|
|
7dda8eba03 | ||
|
|
080f80eb26 | ||
|
|
2766f0e3af | ||
|
|
939f4832ee | ||
|
|
9e1d4bbaed | ||
|
|
8ef341a51c | ||
|
|
1fde8bae5b | ||
|
|
48220d825e | ||
|
|
9ed68ae86c | ||
|
|
700bbb37c5 | ||
|
|
cb9da1ae22 | ||
|
|
7de316af9f | ||
|
|
263ab8c444 | ||
|
|
57449c4768 | ||
|
|
fe7e5cb4d8 | ||
|
|
c156f2bcbe | ||
|
|
832771b4a2 | ||
|
|
16fecfbc67 | ||
|
|
bad1e7f7b0 | ||
|
|
c868c00e89 | ||
|
|
5035e97369 | ||
|
|
144715e3d2 | ||
|
|
a20cf1274a | ||
|
|
626a25cd00 | ||
|
|
96dd456bb1 | ||
|
|
af1b8bf4d0 | ||
|
|
d83615a818 | ||
|
|
fe4c61a70e | ||
|
|
143705bbf6 | ||
|
|
e8ffb736d0 |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -2,3 +2,6 @@
|
||||
VERSION
|
||||
*.swp
|
||||
*.un~
|
||||
output/
|
||||
.vscode
|
||||
.idea
|
||||
23
.golangci.yaml
Normal file
23
.golangci.yaml
Normal file
@@ -0,0 +1,23 @@
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- deadcode
|
||||
- errcheck
|
||||
- golint
|
||||
- govet
|
||||
- gofmt
|
||||
- ineffassign
|
||||
- interfacer
|
||||
- structcheck
|
||||
- unconvert
|
||||
- varcheck
|
||||
|
||||
issues:
|
||||
exclude:
|
||||
- don't use underscores in Go names
|
||||
- exported type .+ should have comment or be unexported
|
||||
exclude-rules:
|
||||
- # Golint has many capitalisation complaints on WMI class names
|
||||
text: "`?\\w+`? should be `?\\w+`?"
|
||||
linters:
|
||||
- golint
|
||||
14
.promu.yml
14
.promu.yml
@@ -4,11 +4,15 @@ build:
|
||||
binaries:
|
||||
- name: wmi_exporter
|
||||
ldflags: |
|
||||
-X {{repoPath}}/vendor/github.com/prometheus/common/version.Version={{.Version}}
|
||||
-X {{repoPath}}/vendor/github.com/prometheus/common/version.Revision={{.Revision}}
|
||||
-X {{repoPath}}/vendor/github.com/prometheus/common/version.Branch={{.Branch}}
|
||||
-X {{repoPath}}/vendor/github.com/prometheus/common/version.BuildUser={{user}}@{{host}}
|
||||
-X {{repoPath}}/vendor/github.com/prometheus/common/version.BuildDate={{date "20060102-15:04:05"}}
|
||||
-X github.com/prometheus/common/version.Version={{.Version}}
|
||||
-X github.com/prometheus/common/version.Revision={{.Revision}}
|
||||
-X github.com/prometheus/common/version.Branch={{.Branch}}
|
||||
-X github.com/prometheus/common/version.BuildUser={{user}}@{{host}}
|
||||
-X github.com/prometheus/common/version.BuildDate={{date "20060102-15:04:05"}}
|
||||
tarball:
|
||||
files:
|
||||
- LICENSE
|
||||
crossbuild:
|
||||
platforms:
|
||||
- windows/amd64
|
||||
- windows/386
|
||||
|
||||
125
Gopkg.lock
generated
125
Gopkg.lock
generated
@@ -1,125 +0,0 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/StackExchange/wmi"
|
||||
packages = ["."]
|
||||
revision = "5d049714c4a64225c3c79a7cf7d02f7fb5b96338"
|
||||
version = "1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/alecthomas/template"
|
||||
packages = [
|
||||
".",
|
||||
"parse"
|
||||
]
|
||||
revision = "a0175ee3bccc567396460bf5acd36800cb10c49c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/alecthomas/units"
|
||||
packages = ["."]
|
||||
revision = "2efee857e7cfd4f3d0138cc3cbb1b4966962b93a"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/beorn7/perks"
|
||||
packages = ["quantile"]
|
||||
revision = "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/go-ole/go-ole"
|
||||
packages = [
|
||||
".",
|
||||
"oleutil"
|
||||
]
|
||||
revision = "a41e3c4b706f6ae8dfbff342b06e40fa4d2d0506"
|
||||
version = "v1.2.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["proto"]
|
||||
revision = "925541529c1fa6821df4e44ce2723319eb2be768"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/matttproud/golang_protobuf_extensions"
|
||||
packages = ["pbutil"]
|
||||
revision = "3247c84500bff8d9fb6d579d800f20b3e091582c"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/prometheus/client_golang"
|
||||
packages = [
|
||||
"prometheus",
|
||||
"prometheus/promhttp"
|
||||
]
|
||||
revision = "c5b7fccd204277076155f10851dad72b76a49317"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/prometheus/client_model"
|
||||
packages = ["go"]
|
||||
revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/prometheus/common"
|
||||
packages = [
|
||||
"expfmt",
|
||||
"internal/bitbucket.org/ww/goautoneg",
|
||||
"log",
|
||||
"model",
|
||||
"version"
|
||||
]
|
||||
revision = "e4aa40a9169a88835b849a6efb71e05dc04b88f0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/prometheus/procfs"
|
||||
packages = [
|
||||
".",
|
||||
"internal/util",
|
||||
"nfs",
|
||||
"xfs"
|
||||
]
|
||||
revision = "54d17b57dd7d4a3aa092476596b3f8a933bde349"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/sirupsen/logrus"
|
||||
packages = ["."]
|
||||
revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc"
|
||||
version = "v1.0.5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["ssh/terminal"]
|
||||
revision = "182114d582623c1caa54f73de9c7224e23a48487"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = [
|
||||
"unix",
|
||||
"windows",
|
||||
"windows/registry",
|
||||
"windows/svc",
|
||||
"windows/svc/eventlog"
|
||||
]
|
||||
revision = "8c0ece68c28377f4c326d85b94f8df0dace46f80"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/alecthomas/kingpin.v2"
|
||||
packages = ["."]
|
||||
revision = "947dcec5ba9c011838740e680966fd7087a71d0d"
|
||||
version = "v2.2.6"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "01d9c528210a99a08cc65af3a40d23a31df0c00193639751917ea1adb7d1ee1c"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
@@ -1,4 +0,0 @@
|
||||
[prune]
|
||||
non-go = true
|
||||
go-tests = true
|
||||
unused-packages = true
|
||||
18
Makefile
18
Makefile
@@ -1,5 +1,19 @@
|
||||
fmt:
|
||||
gofmt -l -w -s .
|
||||
export GOOS=windows
|
||||
|
||||
build:
|
||||
promu build -v
|
||||
|
||||
test:
|
||||
go test -v ./...
|
||||
|
||||
lint:
|
||||
golangci-lint -c .golangci.yaml run
|
||||
|
||||
fmt:
|
||||
gofmt -l -w -s .
|
||||
|
||||
crossbuild:
|
||||
# The prometheus/golang-builder image for promu crossbuild doesn't exist
|
||||
# on Windows, so for now, we'll just build twice
|
||||
GOARCH=amd64 promu build --prefix=output/amd64
|
||||
GOARCH=386 promu build --prefix=output/386
|
||||
|
||||
69
README.md
69
README.md
@@ -1,6 +1,6 @@
|
||||
# WMI exporter
|
||||
|
||||
[](https://ci.appveyor.com/project/martinlindhe/wmi-exporter)
|
||||
[](https://ci.appveyor.com/project/martinlindhe/wmi-exporter)
|
||||
|
||||
Prometheus exporter for Windows machines, using the WMI (Windows Management Instrumentation).
|
||||
|
||||
@@ -9,24 +9,38 @@ Prometheus exporter for Windows machines, using the WMI (Windows Management Inst
|
||||
|
||||
Name | Description | Enabled by default
|
||||
---------|-------------|--------------------
|
||||
ad | [Win32_PerfRawData_DirectoryServices_DirectoryServices](https://msdn.microsoft.com/en-us/library/ms803980.aspx) Active Directory |
|
||||
cpu | [Win32_PerfRawData_PerfOS_Processor](https://msdn.microsoft.com/en-us/library/aa394317(v=vs.90).aspx) metrics (cpu usage) | ✓
|
||||
cs | [Win32_ComputerSystem](https://msdn.microsoft.com/en-us/library/aa394102) metrics (system properties, num cpus/total memory) | ✓
|
||||
dns | [Win32_PerfRawData_DNS_DNS](https://technet.microsoft.com/en-us/library/cc977686.aspx) metrics (DNS Server) |
|
||||
hyperv | Performance counters for Hyper-V hosts |
|
||||
iis | [Win32_PerfRawData_W3SVC_WebService](https://msdn.microsoft.com/en-us/library/aa394345) IIS metrics |
|
||||
logical_disk | [Win32_PerfRawData_PerfDisk_LogicalDisk](https://msdn.microsoft.com/en-us/windows/hardware/aa394307(v=vs.71)) metrics (disk I/O) | ✓
|
||||
net | [Win32_PerfRawData_Tcpip_NetworkInterface](https://technet.microsoft.com/en-us/security/aa394340(v=vs.80)) metrics (network interface I/O) | ✓
|
||||
msmq | [Win32_PerfRawData_MSMQ_MSMQQueue](http://wutils.com/wmi/root/cimv2/win32_perfrawdata_msmq_msmqqueue/) metrics (MSMQ/journal count) |
|
||||
os | [Win32_OperatingSystem](https://msdn.microsoft.com/en-us/library/aa394239) metrics (memory, processes, users) | ✓
|
||||
process | [Win32_PerfRawData_PerfProc_Process](https://msdn.microsoft.com/en-us/library/aa394323(v=vs.85).aspx) metrics (per-process stats) |
|
||||
service | [Win32_Service](https://msdn.microsoft.com/en-us/library/aa394418(v=vs.85).aspx) metrics (service states) | ✓
|
||||
system | Win32_PerfRawData_PerfOS_System metrics (system calls) | ✓
|
||||
tcp | [Win32_PerfRawData_Tcpip_TCPv4](https://msdn.microsoft.com/en-us/library/aa394341(v=vs.85).aspx) metrics (tcp connections) |
|
||||
textfile | Read prometheus metrics from a text file | ✓
|
||||
vmware | Performance counters installed by the Vmware Guest agent |
|
||||
[ad](docs/collector.ad.md) | Active Directory Domain Services |
|
||||
[adfs](docs/collector.adfs.md) | Active Directory Federation Services |
|
||||
[cpu](docs/collector.cpu.md) | CPU usage | ✓
|
||||
[cs](docs/collector.cs.md) | "Computer System" metrics (system properties, num cpus/total memory) | ✓
|
||||
[container](docs/collector.container.md) | Container metrics |
|
||||
[dns](docs/collector.dns.md) | DNS Server |
|
||||
[hyperv](docs/collector.hyperv.md) | Hyper-V hosts |
|
||||
[iis](docs/collector.iis.md) | IIS sites and applications |
|
||||
[logical_disk](docs/collector.logical_disk.md) | Logical disks, disk I/O | ✓
|
||||
[logon](docs/collector.logon.md) | User logon sessions |
|
||||
[memory](docs/collector.memory.md) | Memory usage metrics |
|
||||
[msmq](docs/collector.msmq.md) | MSMQ queues |
|
||||
[mssql](docs/collector.mssql.md) | [SQL Server Performance Objects](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/use-sql-server-objects#SQLServerPOs) metrics |
|
||||
[netframework_clrexceptions](docs/collector.netframework_clrexceptions.md) | .NET Framework CLR Exceptions |
|
||||
[netframework_clrinterop](docs/collector.netframework_clrinterop.md) | .NET Framework Interop Metrics |
|
||||
[netframework_clrjit](docs/collector.netframework_clrjit.md) | .NET Framework JIT metrics |
|
||||
[netframework_clrloading](docs/collector.netframework_clrloading.md) | .NET Framework CLR Loading metrics |
|
||||
[netframework_clrlocksandthreads](docs/collector.netframework_clrlocksandthreads.md) | .NET Framework locks and metrics threads |
|
||||
[netframework_clrmemory](docs/collector.netframework_clrmemory.md) | .NET Framework Memory metrics |
|
||||
[netframework_clrremoting](docs/collector.netframework_clrremoting.md) | .NET Framework Remoting metrics |
|
||||
[netframework_clrsecurity](docs/collector.netframework_clrsecurity.md) | .NET Framework Security Check metrics |
|
||||
[net](docs/collector.net.md) | Network interface I/O | ✓
|
||||
[os](docs/collector.os.md) | OS metrics (memory, processes, users) | ✓
|
||||
[process](docs/collector.process.md) | Per-process metrics |
|
||||
[service](docs/collector.service.md) | Service state metrics | ✓
|
||||
[system](docs/collector.system.md) | System calls | ✓
|
||||
[tcp](docs/collector.tcp.md) | TCP connections |
|
||||
[thermalzone](docs/collector.thermalzone.md) | Thermal information
|
||||
[textfile](docs/collector.textfile.md) | Read prometheus metrics from a text file | ✓
|
||||
[vmware](docs/collector.vmware.md) | Performance counters installed by the Vmware Guest agent |
|
||||
|
||||
The HELP texts shows the WMI data source, please see MSDN documentation for details.
|
||||
See the linked documentation on each collector for more information on reported metrics, configuration settings and usage examples.
|
||||
|
||||
## Installation
|
||||
The latest release can be downloaded from the [releases page](https://github.com/martinlindhe/wmi_exporter/releases).
|
||||
@@ -42,13 +56,24 @@ Name | Description
|
||||
`LISTEN_PORT` | The port to bind to. Defaults to 9182.
|
||||
`METRICS_PATH` | The path at which to serve metrics. Defaults to `/metrics`
|
||||
`TEXTFILE_DIR` | As the `--collector.textfile.directory` flag, provide a directory to read text files with metrics from
|
||||
`EXTRA_FLAGS` | Allows passing full CLI flags. Defaults to an empty string.
|
||||
|
||||
Parameters are sent to the installer via `msiexec`. Example invocation:
|
||||
Parameters are sent to the installer via `msiexec`. Example invocations:
|
||||
|
||||
```powershell
|
||||
msiexec /i <path-to-msi-file> ENABLED_COLLECTORS=os,iis LISTEN_PORT=5000
|
||||
```
|
||||
|
||||
Example service collector with a custom query.
|
||||
```powershell
|
||||
msiexec /i <path-to-msi-file> ENABLED_COLLECTORS=os,service --% EXTRA_FLAGS="--collector.service.services-where ""Name LIKE 'sql%'"""
|
||||
```
|
||||
|
||||
On some older versions of Windows you may need to surround parameter values with double quotes to get the install command parsing properly:
|
||||
```powershell
|
||||
msiexec /i C:\Users\Administrator\Downloads\wmi_exporter.msi ENABLED_COLLECTORS="ad,iis,logon,memory,process,tcp,thermalzone" TEXTFILE_DIR="C:\custom_metrics\"
|
||||
```
|
||||
|
||||
## Roadmap
|
||||
|
||||
See [open issues](https://github.com/martinlindhe/wmi_exporter/issues)
|
||||
@@ -56,7 +81,6 @@ See [open issues](https://github.com/martinlindhe/wmi_exporter/issues)
|
||||
|
||||
## Usage
|
||||
|
||||
go get -u github.com/golang/dep
|
||||
go get -u github.com/prometheus/promu
|
||||
go get -u github.com/martinlindhe/wmi_exporter
|
||||
cd $env:GOPATH/src/github.com/martinlindhe/wmi_exporter
|
||||
@@ -75,7 +99,10 @@ The prometheus metrics will be exposed on [localhost:9182](http://localhost:9182
|
||||
|
||||
.\wmi_exporter.exe --collectors.enabled "process" --collector.process.processes-where "Name LIKE 'firefox%'"
|
||||
|
||||
When there are multiple processes with the same name, WMI represents those after the first instance as `process-name#index`. So to get them all, rather than just the first one, the query needs to be a wildcard search.
|
||||
When there are multiple processes with the same name, WMI represents those after the first instance as `process-name#index`. So to get them all, rather than just the first one, the query needs to be a wildcard search using a `%` character.
|
||||
|
||||
Please note that in Windows batch scripts (and when using the `cmd` command prompt), the `%` character is reserved, so it has to be escaped with another `%`. For example, the wildcard syntax for searching for all firefox processes is `firefox%%`.
|
||||
|
||||
|
||||
## License
|
||||
|
||||
|
||||
129
appveyor.yml
129
appveyor.yml
@@ -1,51 +1,78 @@
|
||||
version: "{build}"
|
||||
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
matrix:
|
||||
- MSI_ARCH: amd64
|
||||
GOARCH: amd64
|
||||
- MSI_ARCH: 386
|
||||
GOARCH: 386
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\martinlindhe\wmi_exporter
|
||||
|
||||
install:
|
||||
- go version
|
||||
- set PATH=%GOPATH%\bin;c:\go\bin;%GOPATH%\bin\windows_%GOARCH%;%PATH%
|
||||
- go get -u github.com/prometheus/promu
|
||||
- choco install gitversion.portable -y
|
||||
|
||||
build_script:
|
||||
- ps: gitversion /output json /showvariable FullSemVer | Set-Content VERSION -PassThru
|
||||
- go test -v ./...
|
||||
- promu build -v
|
||||
- ps: |
|
||||
$ErrorActionPreference = "Stop"
|
||||
if($env:APPVEYOR_REPO_TAG -eq "True") {
|
||||
# The MSI version is not semver compliant, so just take the numerical parts
|
||||
$Version = $env:APPVEYOR_REPO_TAG_NAME -replace '^v?([0-9\.]+).*$','$1'
|
||||
Write-Verbose "Setting msi version to $Version"
|
||||
.\installer\build.ps1 -PathToExecutable .\wmi_exporter.exe -Version $Version -Arch "$env:MSI_ARCH"
|
||||
Push-AppveyorArtifact installer\Output\wmi_exporter-$Version-$env:MSI_ARCH.msi -DeploymentName Installer
|
||||
}
|
||||
|
||||
after_build:
|
||||
- 7z a wmi_exporter-%MSI_ARCH%.zip wmi_exporter.exe
|
||||
|
||||
artifacts:
|
||||
- name: Executable
|
||||
path: 'wmi_exporter-*.zip'
|
||||
|
||||
deploy:
|
||||
- provider: GitHub
|
||||
description: WMI Exporter version $(appveyor_build_version)
|
||||
artifact: Executable,Installer
|
||||
auth_token:
|
||||
secure: 'CrXWeTf7qONUOEki5olFfGEUPMLDeHj61koDXV3OVEaLgtACmnVHsKUub9POflda'
|
||||
draft: false
|
||||
prerelease: false
|
||||
on:
|
||||
appveyor_repo_tag: true
|
||||
version: "{build}"
|
||||
|
||||
os: Visual Studio 2017
|
||||
build: off
|
||||
stack: go 1.13
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
GO111MODULE: on
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\martinlindhe\wmi_exporter
|
||||
|
||||
install:
|
||||
- mkdir %GOPATH%\bin
|
||||
- set PATH=%GOPATH%\bin;%PATH%
|
||||
- set PATH=%PATH%;C:\mingw-w64\x86_64-7.2.0-posix-seh-rt_v5-rev1\mingw64\bin
|
||||
- choco install gitversion.portable make -y
|
||||
- ps: |
|
||||
appveyor DownloadFile https://github.com/golangci/golangci-lint/releases/download/v1.21.0/golangci-lint-1.21.0-windows-amd64.zip
|
||||
Expand-Archive golangci-lint-1.21.0-windows-amd64.zip
|
||||
Move-Item golangci-lint-1.21.0-windows-amd64\golangci-lint-1.21.0-windows-amd64\golangci-lint.exe $env:GOPATH\bin\golangci-lint.exe
|
||||
- ps: |
|
||||
$env:GO111MODULE="off"
|
||||
go get -u github.com/prometheus/promu
|
||||
$env:GO111MODULE="on"
|
||||
|
||||
test_script:
|
||||
- make test
|
||||
|
||||
after_test:
|
||||
- make lint
|
||||
|
||||
build_script:
|
||||
- ps: |
|
||||
# go mod download (or, if we don't call it, go build) will write every dependent package name to
|
||||
# stderr, which will be interpreted as an error and abort the build if ErrorActionPreference is Stop,
|
||||
# so we need to run it before setting the preference.
|
||||
go mod download
|
||||
$ErrorActionPreference = "Stop"
|
||||
gitversion /output json /showvariable FullSemVer | Set-Content VERSION -PassThru
|
||||
$Version = Get-Content VERSION
|
||||
make crossbuild
|
||||
# GH requires all files to have different names, so add version/arch to differentiate
|
||||
foreach($Arch in "amd64","386") {
|
||||
Rename-Item output\$Arch\wmi_exporter.exe -NewName wmi_exporter-$Version-$Arch.exe
|
||||
}
|
||||
|
||||
after_build:
|
||||
- ps: |
|
||||
# Build installer packages only on tagged releases
|
||||
if($env:APPVEYOR_REPO_TAG -ne "True") {
|
||||
return
|
||||
}
|
||||
$ErrorActionPreference = "Stop"
|
||||
$BuildVersion = Get-Content VERSION
|
||||
# The MSI version is not semver compliant, so just take the numerical parts
|
||||
$MSIVersion = $env:APPVEYOR_REPO_TAG_NAME -replace '^v?([0-9\.]+).*$','$1'
|
||||
foreach($Arch in "amd64","386") {
|
||||
Write-Verbose "Building wmi_exporter $MSIVersion msi for $Arch"
|
||||
.\installer\build.ps1 -PathToExecutable .\output\$Arch\wmi_exporter-$BuildVersion-$Arch.exe -Version $MSIVersion -Arch "$Arch"
|
||||
Move-Item installer\Output\wmi_exporter-$MSIVersion-$Arch.msi output\$Arch\
|
||||
}
|
||||
- promu checksum output\
|
||||
|
||||
artifacts:
|
||||
- name: Artifacts
|
||||
path: output\**\*
|
||||
|
||||
deploy:
|
||||
- provider: GitHub
|
||||
description: WMI Exporter version $(appveyor_build_version)
|
||||
artifact: Artifacts
|
||||
auth_token:
|
||||
secure: 'CrXWeTf7qONUOEki5olFfGEUPMLDeHj61koDXV3OVEaLgtACmnVHsKUub9POflda'
|
||||
draft: false
|
||||
prerelease: false
|
||||
on:
|
||||
appveyor_repo_tag: true
|
||||
|
||||
@@ -1,15 +1,17 @@
|
||||
// returns data points from Win32_PerfRawData_DirectoryServices_DirectoryServices
|
||||
// Partial docs: https://msdn.microsoft.com/en-us/library/ms803980.aspx
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["ad"] = NewADCollector
|
||||
registerCollector("ad", NewADCollector)
|
||||
}
|
||||
|
||||
// A ADCollector is a Prometheus collector for WMI Win32_PerfRawData_DirectoryServices_DirectoryServices metrics
|
||||
@@ -452,7 +454,7 @@ func NewADCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *ADCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *ADCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting ad metrics:", desc, err)
|
||||
return err
|
||||
@@ -460,6 +462,8 @@ func (c *ADCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_DirectoryServices_DirectoryServices docs:
|
||||
// - https://msdn.microsoft.com/en-us/library/ms803980.aspx
|
||||
type Win32_PerfRawData_DirectoryServices_DirectoryServices struct {
|
||||
Name string
|
||||
|
||||
@@ -483,8 +487,8 @@ type Win32_PerfRawData_DirectoryServices_DirectoryServices struct {
|
||||
DatabasemodifysPersec uint32
|
||||
DatabaserecyclesPersec uint32
|
||||
DigestBindsPersec uint32
|
||||
DRAHighestUSNCommittedHighpart uint32
|
||||
DRAHighestUSNCommittedLowpart uint32
|
||||
DRAHighestUSNCommittedHighpart uint64
|
||||
DRAHighestUSNCommittedLowpart uint64
|
||||
DRAHighestUSNIssuedHighpart uint64
|
||||
DRAHighestUSNIssuedLowpart uint64
|
||||
DRAInboundBytesCompressedBetweenSitesAfterCompressionPersec uint32
|
||||
@@ -616,6 +620,9 @@ func (c *ADCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, er
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(dst) == 0 {
|
||||
return nil, errors.New("WMI query returned empty result set")
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.AddressBookOperationsTotal,
|
||||
|
||||
188
collector/adfs.go
Normal file
188
collector/adfs.go
Normal file
@@ -0,0 +1,188 @@
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerCollector("adfs", newADFSCollector)
|
||||
}
|
||||
|
||||
type adfsCollector struct {
|
||||
adLoginConnectionFailures *prometheus.Desc
|
||||
certificateAuthentications *prometheus.Desc
|
||||
deviceAuthentications *prometheus.Desc
|
||||
extranetAccountLockouts *prometheus.Desc
|
||||
federatedAuthentications *prometheus.Desc
|
||||
passportAuthentications *prometheus.Desc
|
||||
passiveRequests *prometheus.Desc
|
||||
passwordChangeFailed *prometheus.Desc
|
||||
passwordChangeSucceeded *prometheus.Desc
|
||||
tokenRequests *prometheus.Desc
|
||||
windowsIntegratedAuthentications *prometheus.Desc
|
||||
}
|
||||
|
||||
// newADFSCollector constructs a new adfsCollector
|
||||
func newADFSCollector() (Collector, error) {
|
||||
const subsystem = "adfs"
|
||||
|
||||
return &adfsCollector{
|
||||
adLoginConnectionFailures: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "ad_login_connection_failures"),
|
||||
"Total number of connection failures to an Active Directory domain controller",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
certificateAuthentications: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "certificate_authentications"),
|
||||
"Total number of User Certificate authentications",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
deviceAuthentications: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "device_authentications"),
|
||||
"Total number of Device authentications",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
extranetAccountLockouts: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "extranet_account_lockouts"),
|
||||
"Total number of Extranet Account Lockouts",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
federatedAuthentications: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "federated_authentications"),
|
||||
"Total number of authentications from a federated source",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
passportAuthentications: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "passport_authentications"),
|
||||
"Total number of Microsoft Passport SSO authentications",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
passiveRequests: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "passive_requests"),
|
||||
"Total number of passive (browser-based) requests",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
passwordChangeFailed: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "password_change_failed"),
|
||||
"Total number of failed password changes",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
passwordChangeSucceeded: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "password_change_succeeded"),
|
||||
"Total number of successful password changes",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
tokenRequests: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "token_requests"),
|
||||
"Total number of token requests",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
windowsIntegratedAuthentications: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "windows_integrated_authentications"),
|
||||
"Total number of Windows integrated authentications (Kerberos/NTLM)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
type perflibADFS struct {
|
||||
AdLoginConnectionFailures float64 `perflib:"AD login Connection Failures"`
|
||||
CertificateAuthentications float64 `perflib:"Certificate Authentications"`
|
||||
DeviceAuthentications float64 `perflib:"Device Authentications"`
|
||||
ExtranetAccountLockouts float64 `perflib:"Extranet Account Lockouts"`
|
||||
FederatedAuthentications float64 `perflib:"Federated Authentications"`
|
||||
PassportAuthentications float64 `perflib:"Microsoft Passport Authentications"`
|
||||
PassiveRequests float64 `perflib:"Passive Requests"`
|
||||
PasswordChangeFailed float64 `perflib:"Password Change Failed Requests"`
|
||||
PasswordChangeSucceeded float64 `perflib:"Password Change Successful Requests"`
|
||||
TokenRequests float64 `perflib:"Token Requests"`
|
||||
WindowsIntegratedAuthentications float64 `perflib:"Windows Integrated Authentications"`
|
||||
}
|
||||
|
||||
func (c *adfsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
var adfsData []perflibADFS
|
||||
err := unmarshalObject(ctx.perfObjects["AD FS"], &adfsData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.adLoginConnectionFailures,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].AdLoginConnectionFailures,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.certificateAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].CertificateAuthentications,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.deviceAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].DeviceAuthentications,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.extranetAccountLockouts,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].ExtranetAccountLockouts,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.federatedAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].FederatedAuthentications,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.passportAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].PassportAuthentications,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.passiveRequests,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].PassiveRequests,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.passwordChangeFailed,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].PasswordChangeFailed,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.passwordChangeSucceeded,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].PasswordChangeSucceeded,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.tokenRequests,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].TokenRequests,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.windowsIntegratedAuthentications,
|
||||
prometheus.CounterValue,
|
||||
adfsData[0].WindowsIntegratedAuthentications,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
106
collector/collector.go
Normal file
106
collector/collector.go
Normal file
@@ -0,0 +1,106 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/leoluk/perflib_exporter/perflib"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
"golang.org/x/sys/windows/registry"
|
||||
)
|
||||
|
||||
// ...
|
||||
const (
|
||||
// TODO: Make package-local
|
||||
Namespace = "wmi"
|
||||
|
||||
// Conversion factors
|
||||
ticksToSecondsScaleFactor = 1 / 1e7
|
||||
windowsEpoch = 116444736000000000
|
||||
)
|
||||
|
||||
// getWindowsVersion reads the version number of the OS from the Registry
|
||||
// See https://docs.microsoft.com/en-us/windows/desktop/sysinfo/operating-system-version
|
||||
func getWindowsVersion() float64 {
|
||||
k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
|
||||
if err != nil {
|
||||
log.Warn("Couldn't open registry", err)
|
||||
return 0
|
||||
}
|
||||
defer func() {
|
||||
err = k.Close()
|
||||
if err != nil {
|
||||
log.Warnf("Failed to close registry key: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
currentv, _, err := k.GetStringValue("CurrentVersion")
|
||||
if err != nil {
|
||||
log.Warn("Couldn't open registry to determine current Windows version:", err)
|
||||
return 0
|
||||
}
|
||||
|
||||
currentv_flt, err := strconv.ParseFloat(currentv, 64)
|
||||
|
||||
log.Debugf("Detected Windows version %f\n", currentv_flt)
|
||||
|
||||
return currentv_flt
|
||||
}
|
||||
|
||||
type collectorBuilder func() (Collector, error)
|
||||
|
||||
var (
|
||||
builders = make(map[string]collectorBuilder)
|
||||
perfCounterDependencies = make(map[string]string)
|
||||
)
|
||||
|
||||
func registerCollector(name string, builder collectorBuilder, perfCounterNames ...string) {
|
||||
builders[name] = builder
|
||||
perfIndicies := make([]string, 0, len(perfCounterNames))
|
||||
for _, cn := range perfCounterNames {
|
||||
perfIndicies = append(perfIndicies, MapCounterToIndex(cn))
|
||||
}
|
||||
perfCounterDependencies[name] = strings.Join(perfIndicies, " ")
|
||||
}
|
||||
|
||||
func Available() []string {
|
||||
cs := make([]string, 0, len(builders))
|
||||
for c := range builders {
|
||||
cs = append(cs, c)
|
||||
}
|
||||
return cs
|
||||
}
|
||||
func Build(collector string) (Collector, error) {
|
||||
return builders[collector]()
|
||||
}
|
||||
func getPerfQuery(collectors []string) string {
|
||||
parts := make([]string, 0, len(collectors))
|
||||
for _, c := range collectors {
|
||||
if p := perfCounterDependencies[c]; p != "" {
|
||||
parts = append(parts, p)
|
||||
}
|
||||
}
|
||||
return strings.Join(parts, " ")
|
||||
}
|
||||
|
||||
// Collector is the interface a collector has to implement.
|
||||
type Collector interface {
|
||||
// Get new metrics and expose them via prometheus registry.
|
||||
Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (err error)
|
||||
}
|
||||
|
||||
type ScrapeContext struct {
|
||||
perfObjects map[string]*perflib.PerfObject
|
||||
}
|
||||
|
||||
// PrepareScrapeContext creates a ScrapeContext to be used during a single scrape
|
||||
func PrepareScrapeContext(collectors []string) (*ScrapeContext, error) {
|
||||
q := getPerfQuery(collectors) // TODO: Memoize
|
||||
objs, err := getPerflibSnapshot(q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ScrapeContext{objs}, nil
|
||||
}
|
||||
282
collector/container.go
Normal file
282
collector/container.go
Normal file
@@ -0,0 +1,282 @@
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/Microsoft/hcsshim"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerCollector("container", NewContainerMetricsCollector)
|
||||
}
|
||||
|
||||
// A ContainerMetricsCollector is a Prometheus collector for containers metrics
|
||||
type ContainerMetricsCollector struct {
|
||||
// Presence
|
||||
ContainerAvailable *prometheus.Desc
|
||||
|
||||
// Number of containers
|
||||
ContainersCount *prometheus.Desc
|
||||
// memory
|
||||
UsageCommitBytes *prometheus.Desc
|
||||
UsageCommitPeakBytes *prometheus.Desc
|
||||
UsagePrivateWorkingSetBytes *prometheus.Desc
|
||||
|
||||
// CPU
|
||||
RuntimeTotal *prometheus.Desc
|
||||
RuntimeUser *prometheus.Desc
|
||||
RuntimeKernel *prometheus.Desc
|
||||
|
||||
// Network
|
||||
BytesReceived *prometheus.Desc
|
||||
BytesSent *prometheus.Desc
|
||||
PacketsReceived *prometheus.Desc
|
||||
PacketsSent *prometheus.Desc
|
||||
DroppedPacketsIncoming *prometheus.Desc
|
||||
DroppedPacketsOutgoing *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewContainerMetricsCollector constructs a new ContainerMetricsCollector
|
||||
func NewContainerMetricsCollector() (Collector, error) {
|
||||
const subsystem = "container"
|
||||
return &ContainerMetricsCollector{
|
||||
ContainerAvailable: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "available"),
|
||||
"Available",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
ContainersCount: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "count"),
|
||||
"Number of containers",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
UsageCommitBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "memory_usage_commit_bytes"),
|
||||
"Memory Usage Commit Bytes",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
UsageCommitPeakBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "memory_usage_commit_peak_bytes"),
|
||||
"Memory Usage Commit Peak Bytes",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
UsagePrivateWorkingSetBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "memory_usage_private_working_set_bytes"),
|
||||
"Memory Usage Private Working Set Bytes",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
RuntimeTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cpu_usage_seconds_total"),
|
||||
"Total Run time in Seconds",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
RuntimeUser: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cpu_usage_seconds_usermode"),
|
||||
"Run Time in User mode in Seconds",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
RuntimeKernel: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cpu_usage_seconds_kernelmode"),
|
||||
"Run time in Kernel mode in Seconds",
|
||||
[]string{"container_id"},
|
||||
nil,
|
||||
),
|
||||
BytesReceived: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "network_receive_bytes_total"),
|
||||
"Bytes Received on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
),
|
||||
BytesSent: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "network_transmit_bytes_total"),
|
||||
"Bytes Sent on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
),
|
||||
PacketsReceived: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "network_receive_packets_total"),
|
||||
"Packets Received on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
),
|
||||
PacketsSent: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "network_transmit_packets_total"),
|
||||
"Packets Sent on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
),
|
||||
DroppedPacketsIncoming: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "network_receive_packets_dropped_total"),
|
||||
"Dropped Incoming Packets on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
),
|
||||
DroppedPacketsOutgoing: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "network_transmit_packets_dropped_total"),
|
||||
"Dropped Outgoing Packets on Interface",
|
||||
[]string{"container_id", "interface"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *ContainerMetricsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting ContainerMetricsCollector metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// containerClose closes the container resource
|
||||
func containerClose(c hcsshim.Container) {
|
||||
err := c.Close()
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ContainerMetricsCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
|
||||
// Types Container is passed to get the containers compute systems only
|
||||
containers, err := hcsshim.GetContainers(hcsshim.ComputeSystemQuery{Types: []string{"Container"}})
|
||||
if err != nil {
|
||||
log.Error("Err in Getting containers:", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
count := len(containers)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ContainersCount,
|
||||
prometheus.GaugeValue,
|
||||
float64(count),
|
||||
)
|
||||
if count == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
for _, containerDetails := range containers {
|
||||
containerId := containerDetails.ID
|
||||
|
||||
container, err := hcsshim.OpenContainer(containerId)
|
||||
if container != nil {
|
||||
defer containerClose(container)
|
||||
}
|
||||
if err != nil {
|
||||
log.Error("err in opening container: ", containerId, err)
|
||||
continue
|
||||
}
|
||||
|
||||
cstats, err := container.Statistics()
|
||||
if err != nil {
|
||||
log.Error("err in fetching container Statistics: ", containerId, err)
|
||||
continue
|
||||
}
|
||||
// HCS V1 is for docker runtime. Add the docker:// prefix on container_id
|
||||
containerId = "docker://" + containerId
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ContainerAvailable,
|
||||
prometheus.CounterValue,
|
||||
1,
|
||||
containerId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.UsageCommitBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(cstats.Memory.UsageCommitBytes),
|
||||
containerId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.UsageCommitPeakBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(cstats.Memory.UsageCommitPeakBytes),
|
||||
containerId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.UsagePrivateWorkingSetBytes,
|
||||
prometheus.GaugeValue,
|
||||
float64(cstats.Memory.UsagePrivateWorkingSetBytes),
|
||||
containerId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RuntimeTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(cstats.Processor.TotalRuntime100ns)*ticksToSecondsScaleFactor,
|
||||
containerId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RuntimeUser,
|
||||
prometheus.CounterValue,
|
||||
float64(cstats.Processor.RuntimeUser100ns)*ticksToSecondsScaleFactor,
|
||||
containerId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RuntimeKernel,
|
||||
prometheus.CounterValue,
|
||||
float64(cstats.Processor.RuntimeKernel100ns)*ticksToSecondsScaleFactor,
|
||||
containerId,
|
||||
)
|
||||
|
||||
if len(cstats.Network) == 0 {
|
||||
log.Info("No Network Stats for container: ", containerId)
|
||||
continue
|
||||
}
|
||||
|
||||
networkStats := cstats.Network
|
||||
|
||||
for _, networkInterface := range networkStats {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BytesReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(networkInterface.BytesReceived),
|
||||
containerId, networkInterface.EndpointId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BytesSent,
|
||||
prometheus.CounterValue,
|
||||
float64(networkInterface.BytesSent),
|
||||
containerId, networkInterface.EndpointId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsReceived,
|
||||
prometheus.CounterValue,
|
||||
float64(networkInterface.PacketsReceived),
|
||||
containerId, networkInterface.EndpointId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsSent,
|
||||
prometheus.CounterValue,
|
||||
float64(networkInterface.PacketsSent),
|
||||
containerId, networkInterface.EndpointId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DroppedPacketsIncoming,
|
||||
prometheus.CounterValue,
|
||||
float64(networkInterface.DroppedPacketsIncoming),
|
||||
containerId, networkInterface.EndpointId,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DroppedPacketsOutgoing,
|
||||
prometheus.CounterValue,
|
||||
float64(networkInterface.DroppedPacketsOutgoing),
|
||||
containerId, networkInterface.EndpointId,
|
||||
)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
390
collector/cpu.go
390
collector/cpu.go
@@ -1,30 +1,84 @@
|
||||
// returns data points from Win32_PerfRawData_PerfOS_Processor
|
||||
// https://msdn.microsoft.com/en-us/library/aa394317(v=vs.90).aspx - Win32_PerfRawData_PerfOS_Processor class
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["cpu"] = NewCPUCollector
|
||||
var deps string
|
||||
// See below for 6.05 magic value
|
||||
if getWindowsVersion() > 6.05 {
|
||||
deps = "Processor Information"
|
||||
} else {
|
||||
deps = "Processor"
|
||||
}
|
||||
registerCollector("cpu", newCPUCollector, deps)
|
||||
}
|
||||
|
||||
// A CPUCollector is a Prometheus collector for WMI Win32_PerfRawData_PerfOS_Processor metrics
|
||||
type CPUCollector struct {
|
||||
type cpuCollectorBasic struct {
|
||||
CStateSecondsTotal *prometheus.Desc
|
||||
TimeTotal *prometheus.Desc
|
||||
InterruptsTotal *prometheus.Desc
|
||||
DPCsTotal *prometheus.Desc
|
||||
}
|
||||
type cpuCollectorFull struct {
|
||||
CStateSecondsTotal *prometheus.Desc
|
||||
TimeTotal *prometheus.Desc
|
||||
InterruptsTotal *prometheus.Desc
|
||||
DPCsTotal *prometheus.Desc
|
||||
ClockInterruptsTotal *prometheus.Desc
|
||||
IdleBreakEventsTotal *prometheus.Desc
|
||||
ParkingStatus *prometheus.Desc
|
||||
ProcessorFrequencyMHz *prometheus.Desc
|
||||
ProcessorMaxFrequencyMHz *prometheus.Desc
|
||||
ProcessorPerformance *prometheus.Desc
|
||||
}
|
||||
|
||||
func NewCPUCollector() (Collector, error) {
|
||||
// newCPUCollector constructs a new cpuCollector, appropriate for the running OS
|
||||
func newCPUCollector() (Collector, error) {
|
||||
const subsystem = "cpu"
|
||||
return &CPUCollector{
|
||||
|
||||
version := getWindowsVersion()
|
||||
// For Windows 2008 (version 6.0) or earlier we only have the "Processor"
|
||||
// class. As of Windows 2008 R2 (version 6.1) the more detailed
|
||||
// "Processor Information" set is available (although some of the counters
|
||||
// are added in later versions, so we aren't guaranteed to get all of
|
||||
// them).
|
||||
// Value 6.05 was selected to split between Windows versions.
|
||||
if version < 6.05 {
|
||||
return &cpuCollectorBasic{
|
||||
CStateSecondsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cstate_seconds_total"),
|
||||
"Time spent in low-power idle state",
|
||||
[]string{"core", "state"},
|
||||
nil,
|
||||
),
|
||||
TimeTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "time_total"),
|
||||
"Time that processor spent in different modes (idle, user, system, ...)",
|
||||
[]string{"core", "mode"},
|
||||
nil,
|
||||
),
|
||||
InterruptsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "interrupts_total"),
|
||||
"Total number of received and serviced hardware interrupts",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
DPCsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "dpcs_total"),
|
||||
"Total number of received and serviced deferred procedure calls (DPCs)",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
return &cpuCollectorFull{
|
||||
CStateSecondsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cstate_seconds_total"),
|
||||
"Time spent in low-power idle state",
|
||||
@@ -37,7 +91,6 @@ func NewCPUCollector() (Collector, error) {
|
||||
[]string{"core", "mode"},
|
||||
nil,
|
||||
),
|
||||
|
||||
InterruptsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "interrupts_total"),
|
||||
"Total number of received and serviced hardware interrupts",
|
||||
@@ -50,162 +103,273 @@ func NewCPUCollector() (Collector, error) {
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
ClockInterruptsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "clock_interrupts_total"),
|
||||
"Total number of received and serviced clock tick interrupts",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
IdleBreakEventsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "idle_break_events_total"),
|
||||
"Total number of time processor was woken from idle",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
ParkingStatus: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "parking_status"),
|
||||
"Parking Status represents whether a processor is parked or not",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
ProcessorFrequencyMHz: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "core_frequency_mhz"),
|
||||
"Core frequency in megahertz",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
ProcessorPerformance: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "processor_performance"),
|
||||
"Processor Performance is the average performance of the processor while it is executing instructions, as a percentage of the nominal performance of the processor. On some processors, Processor Performance may exceed 100%",
|
||||
[]string{"core"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *CPUCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting cpu metrics:", desc, err)
|
||||
type perflibProcessor struct {
|
||||
Name string
|
||||
C1Transitions float64 `perflib:"C1 Transitions/sec"`
|
||||
C2Transitions float64 `perflib:"C2 Transitions/sec"`
|
||||
C3Transitions float64 `perflib:"C3 Transitions/sec"`
|
||||
DPCRate float64 `perflib:"DPC Rate"`
|
||||
DPCsQueued float64 `perflib:"DPCs Queued/sec"`
|
||||
Interrupts float64 `perflib:"Interrupts/sec"`
|
||||
PercentC2Time float64 `perflib:"% C1 Time"`
|
||||
PercentC3Time float64 `perflib:"% C2 Time"`
|
||||
PercentC1Time float64 `perflib:"% C3 Time"`
|
||||
PercentDPCTime float64 `perflib:"% DPC Time"`
|
||||
PercentIdleTime float64 `perflib:"% Idle Time"`
|
||||
PercentInterruptTime float64 `perflib:"% Interrupt Time"`
|
||||
PercentPrivilegedTime float64 `perflib:"% Privileged Time"`
|
||||
PercentProcessorTime float64 `perflib:"% Processor Time"`
|
||||
PercentUserTime float64 `perflib:"% User Time"`
|
||||
}
|
||||
|
||||
func (c *cpuCollectorBasic) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
data := make([]perflibProcessor, 0)
|
||||
err := unmarshalObject(ctx.perfObjects["Processor"], &data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_PerfOS_Processor struct {
|
||||
Name string
|
||||
C1TransitionsPersec uint64
|
||||
C2TransitionsPersec uint64
|
||||
C3TransitionsPersec uint64
|
||||
DPCRate uint32
|
||||
DPCsQueuedPersec uint32
|
||||
InterruptsPersec uint32
|
||||
PercentC1Time uint64
|
||||
PercentC2Time uint64
|
||||
PercentC3Time uint64
|
||||
PercentDPCTime uint64
|
||||
PercentIdleTime uint64
|
||||
PercentInterruptTime uint64
|
||||
PercentPrivilegedTime uint64
|
||||
PercentProcessorTime uint64
|
||||
PercentUserTime uint64
|
||||
}
|
||||
|
||||
/* NOTE: This is an alternative class, but it is not as widely available. Decide which to use
|
||||
type Win32_PerfRawData_Counters_ProcessorInformation struct {
|
||||
Name string
|
||||
AverageIdleTime uint64
|
||||
C1TransitionsPersec uint64
|
||||
C2TransitionsPersec uint64
|
||||
C3TransitionsPersec uint64
|
||||
ClockInterruptsPersec uint64
|
||||
DPCRate uint64
|
||||
DPCsQueuedPersec uint64
|
||||
IdleBreakEventsPersec uint64
|
||||
InterruptsPersec uint64
|
||||
ParkingStatus uint64
|
||||
PercentC1Time uint64
|
||||
PercentC2Time uint64
|
||||
PercentC3Time uint64
|
||||
PercentDPCTime uint64
|
||||
PercentIdleTime uint64
|
||||
PercentInterruptTime uint64
|
||||
PercentofMaximumFrequency uint64
|
||||
PercentPerformanceLimit uint64
|
||||
PercentPriorityTime uint64
|
||||
PercentPrivilegedTime uint64
|
||||
PercentPrivilegedUtility uint64
|
||||
PercentProcessorPerformance uint64
|
||||
PercentProcessorTime uint64
|
||||
PercentProcessorUtility uint64
|
||||
PercentUserTime uint64
|
||||
PerformanceLimitFlags uint64
|
||||
ProcessorFrequency uint64
|
||||
ProcessorStateFlags uint64
|
||||
}*/
|
||||
|
||||
func (c *CPUCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_PerfOS_Processor
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, data := range dst {
|
||||
if strings.Contains(data.Name, "_Total") {
|
||||
for _, cpu := range data {
|
||||
if strings.Contains(strings.ToLower(cpu.Name), "_total") {
|
||||
continue
|
||||
}
|
||||
|
||||
core := data.Name
|
||||
|
||||
// These are only available from Win32_PerfRawData_Counters_ProcessorInformation, which is only available from Win2008R2+
|
||||
/*ch <- prometheus.MustNewConstMetric(
|
||||
c.ProcessorFrequency,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.ProcessorFrequency),
|
||||
socket, core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MaximumFrequency,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentofMaximumFrequency)/100*float64(data.ProcessorFrequency),
|
||||
socket, core,
|
||||
)*/
|
||||
core := cpu.Name
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentC1Time)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentC1Time,
|
||||
core, "c1",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentC2Time)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentC2Time,
|
||||
core, "c2",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentC3Time)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentC3Time,
|
||||
core, "c3",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentIdleTime)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentIdleTime,
|
||||
core, "idle",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentInterruptTime)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentInterruptTime,
|
||||
core, "interrupt",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentDPCTime)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentDPCTime,
|
||||
core, "dpc",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentPrivilegedTime)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentPrivilegedTime,
|
||||
core, "privileged",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.GaugeValue,
|
||||
float64(data.PercentUserTime)*ticksToSecondsScaleFactor,
|
||||
prometheus.CounterValue,
|
||||
cpu.PercentUserTime,
|
||||
core, "user",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.InterruptsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(data.InterruptsPersec),
|
||||
cpu.Interrupts,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DPCsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(data.DPCsQueuedPersec),
|
||||
cpu.DPCsQueued,
|
||||
core,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
type perflibProcessorInformation struct {
|
||||
Name string
|
||||
C1TimeSeconds float64 `perflib:"% C1 Time"`
|
||||
C2TimeSeconds float64 `perflib:"% C2 Time"`
|
||||
C3TimeSeconds float64 `perflib:"% C3 Time"`
|
||||
C1TransitionsTotal float64 `perflib:"C1 Transitions/sec"`
|
||||
C2TransitionsTotal float64 `perflib:"C2 Transitions/sec"`
|
||||
C3TransitionsTotal float64 `perflib:"C3 Transitions/sec"`
|
||||
ClockInterruptsTotal float64 `perflib:"Clock Interrupts/sec"`
|
||||
DPCsQueuedTotal float64 `perflib:"DPCs Queued/sec"`
|
||||
DPCTimeSeconds float64 `perflib:"% DPC Time"`
|
||||
IdleBreakEventsTotal float64 `perflib:"Idle Break Events/sec"`
|
||||
IdleTimeSeconds float64 `perflib:"% Idle Time"`
|
||||
InterruptsTotal float64 `perflib:"Interrupts/sec"`
|
||||
InterruptTimeSeconds float64 `perflib:"% Interrupt Time"`
|
||||
ParkingStatus float64 `perflib:"Parking Status"`
|
||||
PerformanceLimitPercent float64 `perflib:"% Performance Limit"`
|
||||
PriorityTimeSeconds float64 `perflib:"% Priority Time"`
|
||||
PrivilegedTimeSeconds float64 `perflib:"% Privileged Time"`
|
||||
PrivilegedUtilitySeconds float64 `perflib:"% Privileged Utility"`
|
||||
ProcessorFrequencyMHz float64 `perflib:"Processor Frequency"`
|
||||
ProcessorPerformance float64 `perflib:"% Processor Performance"`
|
||||
ProcessorTimeSeconds float64 `perflib:"% Processor Time"`
|
||||
ProcessorUtilityRate float64 `perflib:"% Processor Utility"`
|
||||
UserTimeSeconds float64 `perflib:"% User Time"`
|
||||
}
|
||||
|
||||
func (c *cpuCollectorFull) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
data := make([]perflibProcessorInformation, 0)
|
||||
err := unmarshalObject(ctx.perfObjects["Processor Information"], &data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, cpu := range data {
|
||||
if strings.Contains(strings.ToLower(cpu.Name), "_total") {
|
||||
continue
|
||||
}
|
||||
core := cpu.Name
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.C1TimeSeconds,
|
||||
core, "c1",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.C2TimeSeconds,
|
||||
core, "c2",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CStateSecondsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.C3TimeSeconds,
|
||||
core, "c3",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.IdleTimeSeconds,
|
||||
core, "idle",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.InterruptTimeSeconds,
|
||||
core, "interrupt",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.DPCTimeSeconds,
|
||||
core, "dpc",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.PrivilegedTimeSeconds,
|
||||
core, "privileged",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TimeTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.UserTimeSeconds,
|
||||
core, "user",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.InterruptsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.InterruptsTotal,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DPCsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.DPCsQueuedTotal,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ClockInterruptsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.ClockInterruptsTotal,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.IdleBreakEventsTotal,
|
||||
prometheus.CounterValue,
|
||||
cpu.IdleBreakEventsTotal,
|
||||
core,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ParkingStatus,
|
||||
prometheus.GaugeValue,
|
||||
cpu.ParkingStatus,
|
||||
core,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ProcessorFrequencyMHz,
|
||||
prometheus.GaugeValue,
|
||||
cpu.ProcessorFrequencyMHz,
|
||||
core,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ProcessorPerformance,
|
||||
prometheus.GaugeValue,
|
||||
cpu.ProcessorPerformance,
|
||||
core,
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,22 +1,24 @@
|
||||
// returns data points from Win32_ComputerSystem
|
||||
// https://msdn.microsoft.com/en-us/library/aa394102 - Win32_ComputerSystem class
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["cs"] = NewCSCollector
|
||||
registerCollector("cs", NewCSCollector)
|
||||
}
|
||||
|
||||
// A CSCollector is a Prometheus collector for WMI metrics
|
||||
type CSCollector struct {
|
||||
PhysicalMemoryBytes *prometheus.Desc
|
||||
LogicalProcessors *prometheus.Desc
|
||||
Hostname *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewCSCollector ...
|
||||
@@ -36,12 +38,21 @@ func NewCSCollector() (Collector, error) {
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
Hostname: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "hostname"),
|
||||
"Labeled system hostname information as provided by ComputerSystem.DNSHostName and ComputerSystem.Domain",
|
||||
[]string{
|
||||
"hostname",
|
||||
"domain",
|
||||
"fqdn"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *CSCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *CSCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting cs metrics:", desc, err)
|
||||
return err
|
||||
@@ -49,9 +60,14 @@ func (c *CSCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_ComputerSystem docs:
|
||||
// - https://msdn.microsoft.com/en-us/library/aa394102
|
||||
type Win32_ComputerSystem struct {
|
||||
NumberOfLogicalProcessors uint32
|
||||
TotalPhysicalMemory uint64
|
||||
DNSHostname string
|
||||
Domain string
|
||||
Workgroup *string
|
||||
}
|
||||
|
||||
func (c *CSCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
@@ -60,6 +76,9 @@ func (c *CSCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, er
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(dst) == 0 {
|
||||
return nil, errors.New("WMI query returned empty result set")
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogicalProcessors,
|
||||
@@ -73,5 +92,21 @@ func (c *CSCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, er
|
||||
float64(dst[0].TotalPhysicalMemory),
|
||||
)
|
||||
|
||||
var fqdn string
|
||||
if dst[0].Workgroup == nil || dst[0].Domain != *dst[0].Workgroup {
|
||||
fqdn = dst[0].DNSHostname + "." + dst[0].Domain
|
||||
} else {
|
||||
fqdn = dst[0].DNSHostname
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Hostname,
|
||||
prometheus.GaugeValue,
|
||||
1.0,
|
||||
dst[0].DNSHostname,
|
||||
dst[0].Domain,
|
||||
fqdn,
|
||||
)
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -1,16 +1,17 @@
|
||||
// returns data points from Win32_PerfRawData_DNS_DNS
|
||||
// https://msdn.microsoft.com/en-us/library/ms803992.aspx?f=255&MSPPError=-2147217396
|
||||
// https://technet.microsoft.com/en-us/library/cc977686.aspx
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["dns"] = NewDNSCollector
|
||||
registerCollector("dns", NewDNSCollector)
|
||||
}
|
||||
|
||||
// A DNSCollector is a Prometheus collector for WMI Win32_PerfRawData_DNS_DNS metrics
|
||||
@@ -180,7 +181,7 @@ func NewDNSCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *DNSCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *DNSCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting dns metrics:", desc, err)
|
||||
return err
|
||||
@@ -188,6 +189,9 @@ func (c *DNSCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_DNS_DNS docs:
|
||||
// - https://msdn.microsoft.com/en-us/library/ms803992.aspx?f=255&MSPPError=-2147217396
|
||||
// - https://technet.microsoft.com/en-us/library/cc977686.aspx
|
||||
type Win32_PerfRawData_DNS_DNS struct {
|
||||
AXFRRequestReceived uint32
|
||||
AXFRRequestSent uint32
|
||||
@@ -237,6 +241,9 @@ func (c *DNSCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, e
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(dst) == 0 {
|
||||
return nil, errors.New("WMI query returned empty result set")
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ZoneTransferRequestsReceived,
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
@@ -9,7 +11,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["hyperv"] = NewHyperVCollector
|
||||
registerCollector("hyperv", NewHyperVCollector)
|
||||
}
|
||||
|
||||
// HyperVCollector is a Prometheus collector for hyper-v
|
||||
@@ -595,7 +597,7 @@ func NewHyperVCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *HyperVCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *HyperVCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collectVmHealth(ch); err != nil {
|
||||
log.Error("failed collecting hyperV health status metrics:", desc, err)
|
||||
return err
|
||||
|
||||
446
collector/iis.go
446
collector/iis.go
@@ -1,12 +1,9 @@
|
||||
// returns data points from the following classes:
|
||||
// - Win32_PerfRawData_W3SVC_WebService
|
||||
// - Win32_PerfRawData_APPPOOLCountersProvider_APPPOOLWAS
|
||||
// - Win32_PerfRawData_W3SVCW3WPCounterProvider_W3SVCW3WP
|
||||
// - Win32_PerfRawData_W3SVC_WebServiceCache
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
@@ -19,7 +16,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["iis"] = NewIISCollector
|
||||
registerCollector("iis", NewIISCollector)
|
||||
}
|
||||
|
||||
type simple_version struct {
|
||||
@@ -33,7 +30,12 @@ func getIISVersion() simple_version {
|
||||
log.Warn("Couldn't open registry to determine IIS version:", err)
|
||||
return simple_version{}
|
||||
}
|
||||
defer k.Close()
|
||||
defer func() {
|
||||
err = k.Close()
|
||||
if err != nil {
|
||||
log.Warnf("Failed to close registry key: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
major, _, err := k.GetIntegerValue("MajorVersion")
|
||||
if err != nil {
|
||||
@@ -805,8 +807,8 @@ func NewIISCollector() (Collector, error) {
|
||||
nil,
|
||||
),
|
||||
|
||||
appWhitelistPattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *siteWhitelist)),
|
||||
appBlacklistPattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *siteBlacklist)),
|
||||
appWhitelistPattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *appWhitelist)),
|
||||
appBlacklistPattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *appBlacklist)),
|
||||
}
|
||||
|
||||
buildIIS.iis_version = getIISVersion()
|
||||
@@ -816,7 +818,7 @@ func NewIISCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *IISCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *IISCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting iis metrics:", desc, err)
|
||||
return err
|
||||
@@ -983,7 +985,7 @@ type Win32_PerfRawData_W3SVC_WebServiceCache struct {
|
||||
URICacheMisses uint32
|
||||
}
|
||||
|
||||
var ApplicationStates = map[uint32]string{
|
||||
var applicationStates = map[uint32]string{
|
||||
1: "Uninitialized",
|
||||
2: "Initialized",
|
||||
3: "Running",
|
||||
@@ -1264,7 +1266,7 @@ func (c *IISCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, e
|
||||
}
|
||||
|
||||
// Guages
|
||||
for key, label := range ApplicationStates {
|
||||
for key, label := range applicationStates {
|
||||
isCurrentState := 0.0
|
||||
if key == app.CurrentApplicationPoolState {
|
||||
isCurrentState = 1.0
|
||||
@@ -1738,216 +1740,218 @@ func (c *IISCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, e
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(dst_cache) > 0 {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_ActiveFlushedEntries,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst_cache[0].ActiveFlushedEntries),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_FileCacheMemoryUsage,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst_cache[0].CurrentFileCacheMemoryUsage),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_MaximumFileCacheMemoryUsage,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].MaximumFileCacheMemoryUsage),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_FileCacheFlushesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].TotalFlushedFiles),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_FileCacheQueriesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].FileCacheHits+dst_cache[0].FileCacheMisses),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_FileCacheHitsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].FileCacheHits),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_FilesCached,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst_cache[0].CurrentFilesCached),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_FilesCachedTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].TotalFilesCached),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_FilesFlushedTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].TotalFlushedFiles),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_URICacheFlushesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].TotalFlushedURIs),
|
||||
"user",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_URICacheFlushesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].KernelTotalFlushedURIs),
|
||||
"kernel",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_URICacheQueriesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].URICacheHits+dst_cache[0].URICacheMisses),
|
||||
"user",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_URICacheQueriesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].KernelURICacheHits+dst_cache[0].KernelURICacheMisses),
|
||||
"kernel",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_URICacheHitsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].URICacheHits),
|
||||
"user",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_URICacheHitsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].KernelURICacheHits),
|
||||
"kernel",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_URIsCached,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst_cache[0].CurrentURIsCached),
|
||||
"user",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_URIsCached,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst_cache[0].KernelCurrentURIsCached),
|
||||
"kernel",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_URIsCachedTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].TotalURIsCached),
|
||||
"user",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_URIsCachedTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].KernelTotalURIsCached),
|
||||
"kernel",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_URIsFlushedTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].TotalFlushedURIs),
|
||||
"user",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_URIsFlushedTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].KernelTotalFlushedURIs),
|
||||
"kernel",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_MetadataCached,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst_cache[0].CurrentMetadataCached),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_MetadataCacheFlushes,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].TotalFlushedMetadata),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_MetadataCacheQueriesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].MetadataCacheHits+dst_cache[0].MetadataCacheMisses),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_MetadataCacheHitsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].MetadataCacheHits),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_MetadataCachedTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].TotalMetadataCached),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_MetadataFlushedTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].TotalFlushedMetadata),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_OutputCacheActiveFlushedItems,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].OutputCacheCurrentFlushedItems),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_OutputCacheItems,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].OutputCacheCurrentItems),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_OutputCacheMemoryUsage,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].OutputCacheCurrentMemoryUsage),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_OutputCacheQueriesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].OutputCacheTotalHits+dst_cache[0].OutputCacheTotalMisses),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_OutputCacheHitsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].OutputCacheTotalHits),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_OutputCacheFlushedItemsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].OutputCacheTotalFlushedItems),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_OutputCacheFlushesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].OutputCacheTotalFlushes),
|
||||
)
|
||||
if len(dst_cache) == 0 {
|
||||
return nil, errors.New("WMI query returned empty result set")
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_ActiveFlushedEntries,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst_cache[0].ActiveFlushedEntries),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_FileCacheMemoryUsage,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst_cache[0].CurrentFileCacheMemoryUsage),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_MaximumFileCacheMemoryUsage,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].MaximumFileCacheMemoryUsage),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_FileCacheFlushesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].TotalFlushedFiles),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_FileCacheQueriesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].FileCacheHits+dst_cache[0].FileCacheMisses),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_FileCacheHitsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].FileCacheHits),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_FilesCached,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst_cache[0].CurrentFilesCached),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_FilesCachedTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].TotalFilesCached),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_FilesFlushedTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].TotalFlushedFiles),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_URICacheFlushesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].TotalFlushedURIs),
|
||||
"user",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_URICacheFlushesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].KernelTotalFlushedURIs),
|
||||
"kernel",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_URICacheQueriesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].URICacheHits+dst_cache[0].URICacheMisses),
|
||||
"user",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_URICacheQueriesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].KernelURICacheHits+dst_cache[0].KernelURICacheMisses),
|
||||
"kernel",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_URICacheHitsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].URICacheHits),
|
||||
"user",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_URICacheHitsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].KernelURICacheHits),
|
||||
"kernel",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_URIsCached,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst_cache[0].CurrentURIsCached),
|
||||
"user",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_URIsCached,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst_cache[0].KernelCurrentURIsCached),
|
||||
"kernel",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_URIsCachedTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].TotalURIsCached),
|
||||
"user",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_URIsCachedTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].KernelTotalURIsCached),
|
||||
"kernel",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_URIsFlushedTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].TotalFlushedURIs),
|
||||
"user",
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_URIsFlushedTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].KernelTotalFlushedURIs),
|
||||
"kernel",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_MetadataCached,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst_cache[0].CurrentMetadataCached),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_MetadataCacheFlushes,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].TotalFlushedMetadata),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_MetadataCacheQueriesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].MetadataCacheHits+dst_cache[0].MetadataCacheMisses),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_MetadataCacheHitsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].MetadataCacheHits),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_MetadataCachedTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].TotalMetadataCached),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_MetadataFlushedTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].TotalFlushedMetadata),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_OutputCacheActiveFlushedItems,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].OutputCacheCurrentFlushedItems),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_OutputCacheItems,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].OutputCacheCurrentItems),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_OutputCacheMemoryUsage,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].OutputCacheCurrentMemoryUsage),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_OutputCacheQueriesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].OutputCacheTotalHits+dst_cache[0].OutputCacheTotalMisses),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_OutputCacheHitsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].OutputCacheTotalHits),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_OutputCacheFlushedItemsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].OutputCacheTotalFlushedItems),
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ServiceCache_OutputCacheFlushesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst_cache[0].OutputCacheTotalFlushes),
|
||||
)
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
// returns data points from Win32_PerfRawData_PerfDisk_LogicalDisk
|
||||
// https://msdn.microsoft.com/en-us/windows/hardware/aa394307(v=vs.71) - Win32_PerfRawData_PerfDisk_LogicalDisk class
|
||||
// https://msdn.microsoft.com/en-us/library/ms803973.aspx - LogicalDisk object reference
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
@@ -8,14 +6,13 @@ import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
"gopkg.in/alecthomas/kingpin.v2"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["logical_disk"] = NewLogicalDiskCollector
|
||||
registerCollector("logical_disk", NewLogicalDiskCollector, "LogicalDisk")
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -29,19 +26,22 @@ var (
|
||||
).Default("").String()
|
||||
)
|
||||
|
||||
// A LogicalDiskCollector is a Prometheus collector for WMI Win32_PerfRawData_PerfDisk_LogicalDisk metrics
|
||||
// A LogicalDiskCollector is a Prometheus collector for perflib logicalDisk metrics
|
||||
type LogicalDiskCollector struct {
|
||||
RequestsQueued *prometheus.Desc
|
||||
ReadBytesTotal *prometheus.Desc
|
||||
ReadsTotal *prometheus.Desc
|
||||
WriteBytesTotal *prometheus.Desc
|
||||
WritesTotal *prometheus.Desc
|
||||
ReadTime *prometheus.Desc
|
||||
WriteTime *prometheus.Desc
|
||||
TotalSpace *prometheus.Desc
|
||||
FreeSpace *prometheus.Desc
|
||||
IdleTime *prometheus.Desc
|
||||
SplitIOs *prometheus.Desc
|
||||
RequestsQueued *prometheus.Desc
|
||||
ReadBytesTotal *prometheus.Desc
|
||||
ReadsTotal *prometheus.Desc
|
||||
WriteBytesTotal *prometheus.Desc
|
||||
WritesTotal *prometheus.Desc
|
||||
ReadTime *prometheus.Desc
|
||||
WriteTime *prometheus.Desc
|
||||
TotalSpace *prometheus.Desc
|
||||
FreeSpace *prometheus.Desc
|
||||
IdleTime *prometheus.Desc
|
||||
SplitIOs *prometheus.Desc
|
||||
ReadLatency *prometheus.Desc
|
||||
WriteLatency *prometheus.Desc
|
||||
ReadWriteLatency *prometheus.Desc
|
||||
|
||||
volumeWhitelistPattern *regexp.Regexp
|
||||
volumeBlacklistPattern *regexp.Regexp
|
||||
@@ -129,6 +129,27 @@ func NewLogicalDiskCollector() (Collector, error) {
|
||||
nil,
|
||||
),
|
||||
|
||||
ReadLatency: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "read_latency_seconds_total"),
|
||||
"Shows the average time, in seconds, of a read operation from the disk (LogicalDisk.AvgDiskSecPerRead)",
|
||||
[]string{"volume"},
|
||||
nil,
|
||||
),
|
||||
|
||||
WriteLatency: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "write_latency_seconds_total"),
|
||||
"Shows the average time, in seconds, of a write operation to the disk (LogicalDisk.AvgDiskSecPerWrite)",
|
||||
[]string{"volume"},
|
||||
nil,
|
||||
),
|
||||
|
||||
ReadWriteLatency: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "read_write_latency_seconds_total"),
|
||||
"Shows the time, in seconds, of the average disk transfer (LogicalDisk.AvgDiskSecPerTransfer)",
|
||||
[]string{"volume"},
|
||||
nil,
|
||||
),
|
||||
|
||||
volumeWhitelistPattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *volumeWhitelist)),
|
||||
volumeBlacklistPattern: regexp.MustCompile(fmt.Sprintf("^(?:%s)$", *volumeBlacklist)),
|
||||
}, nil
|
||||
@@ -136,33 +157,38 @@ func NewLogicalDiskCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *LogicalDiskCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
func (c *LogicalDiskCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ctx, ch); err != nil {
|
||||
log.Error("failed collecting logical_disk metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_PerfDisk_LogicalDisk struct {
|
||||
// Win32_PerfRawData_PerfDisk_LogicalDisk docs:
|
||||
// - https://msdn.microsoft.com/en-us/windows/hardware/aa394307(v=vs.71) - Win32_PerfRawData_PerfDisk_LogicalDisk class
|
||||
// - https://msdn.microsoft.com/en-us/library/ms803973.aspx - LogicalDisk object reference
|
||||
type logicalDisk struct {
|
||||
Name string
|
||||
CurrentDiskQueueLength uint32
|
||||
DiskReadBytesPerSec uint64
|
||||
DiskReadsPerSec uint32
|
||||
DiskWriteBytesPerSec uint64
|
||||
DiskWritesPerSec uint32
|
||||
PercentDiskReadTime uint64
|
||||
PercentDiskWriteTime uint64
|
||||
PercentFreeSpace uint32
|
||||
PercentFreeSpace_Base uint32
|
||||
PercentIdleTime uint64
|
||||
SplitIOPerSec uint32
|
||||
CurrentDiskQueueLength float64 `perflib:"Current Disk Queue Length"`
|
||||
DiskReadBytesPerSec float64 `perflib:"Disk Read Bytes/sec"`
|
||||
DiskReadsPerSec float64 `perflib:"Disk Reads/sec"`
|
||||
DiskWriteBytesPerSec float64 `perflib:"Disk Write Bytes/sec"`
|
||||
DiskWritesPerSec float64 `perflib:"Disk Writes/sec"`
|
||||
PercentDiskReadTime float64 `perflib:"% Disk Read Time"`
|
||||
PercentDiskWriteTime float64 `perflib:"% Disk Write Time"`
|
||||
PercentFreeSpace float64 `perflib:"% Free Space_Base"`
|
||||
PercentFreeSpace_Base float64 `perflib:"Free Megabytes"`
|
||||
PercentIdleTime float64 `perflib:"% Idle Time"`
|
||||
SplitIOPerSec float64 `perflib:"Split IO/Sec"`
|
||||
AvgDiskSecPerRead float64 `perflib:"Avg. Disk sec/Read"`
|
||||
AvgDiskSecPerWrite float64 `perflib:"Avg. Disk sec/Write"`
|
||||
AvgDiskSecPerTransfer float64 `perflib:"Avg. Disk sec/Transfer"`
|
||||
}
|
||||
|
||||
func (c *LogicalDiskCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_PerfDisk_LogicalDisk
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
func (c *LogicalDiskCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []logicalDisk
|
||||
if err := unmarshalObject(ctx.perfObjects["LogicalDisk"], &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -176,77 +202,98 @@ func (c *LogicalDiskCollector) collect(ch chan<- prometheus.Metric) (*prometheus
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RequestsQueued,
|
||||
prometheus.GaugeValue,
|
||||
float64(volume.CurrentDiskQueueLength),
|
||||
volume.CurrentDiskQueueLength,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ReadBytesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(volume.DiskReadBytesPerSec),
|
||||
volume.DiskReadBytesPerSec,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ReadsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(volume.DiskReadsPerSec),
|
||||
volume.DiskReadsPerSec,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.WriteBytesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(volume.DiskWriteBytesPerSec),
|
||||
volume.DiskWriteBytesPerSec,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.WritesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(volume.DiskWritesPerSec),
|
||||
volume.DiskWritesPerSec,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ReadTime,
|
||||
prometheus.CounterValue,
|
||||
float64(volume.PercentDiskReadTime)*ticksToSecondsScaleFactor,
|
||||
volume.PercentDiskReadTime,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.WriteTime,
|
||||
prometheus.CounterValue,
|
||||
float64(volume.PercentDiskWriteTime)*ticksToSecondsScaleFactor,
|
||||
volume.PercentDiskWriteTime,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FreeSpace,
|
||||
prometheus.GaugeValue,
|
||||
float64(volume.PercentFreeSpace)*1024*1024,
|
||||
volume.PercentFreeSpace_Base*1024*1024,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TotalSpace,
|
||||
prometheus.GaugeValue,
|
||||
float64(volume.PercentFreeSpace_Base)*1024*1024,
|
||||
volume.PercentFreeSpace*1024*1024,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.IdleTime,
|
||||
prometheus.CounterValue,
|
||||
float64(volume.PercentIdleTime)*ticksToSecondsScaleFactor,
|
||||
volume.PercentIdleTime,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SplitIOs,
|
||||
prometheus.CounterValue,
|
||||
float64(volume.SplitIOPerSec),
|
||||
volume.SplitIOPerSec,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ReadLatency,
|
||||
prometheus.CounterValue,
|
||||
volume.AvgDiskSecPerRead*ticksToSecondsScaleFactor,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.WriteLatency,
|
||||
prometheus.CounterValue,
|
||||
volume.AvgDiskSecPerWrite*ticksToSecondsScaleFactor,
|
||||
volume.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ReadWriteLatency,
|
||||
prometheus.CounterValue,
|
||||
volume.AvgDiskSecPerTransfer*ticksToSecondsScaleFactor,
|
||||
volume.Name,
|
||||
)
|
||||
}
|
||||
|
||||
199
collector/logon.go
Normal file
199
collector/logon.go
Normal file
@@ -0,0 +1,199 @@
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerCollector("logon", NewLogonCollector)
|
||||
}
|
||||
|
||||
// A LogonCollector is a Prometheus collector for WMI metrics
|
||||
type LogonCollector struct {
|
||||
LogonType *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewLogonCollector ...
|
||||
func NewLogonCollector() (Collector, error) {
|
||||
const subsystem = "logon"
|
||||
|
||||
return &LogonCollector{
|
||||
LogonType: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "logon_type"),
|
||||
"Number of active logon sessions (LogonSession.LogonType)",
|
||||
[]string{"status"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *LogonCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting user metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_LogonSession docs:
|
||||
// - https://docs.microsoft.com/en-us/windows/win32/cimwin32prov/win32-logonsession
|
||||
type Win32_LogonSession struct {
|
||||
LogonType uint32
|
||||
}
|
||||
|
||||
func (c *LogonCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_LogonSession
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(dst) == 0 {
|
||||
return nil, errors.New("WMI query returned empty result set")
|
||||
}
|
||||
|
||||
// Init counters
|
||||
system := 0
|
||||
interactive := 0
|
||||
network := 0
|
||||
batch := 0
|
||||
service := 0
|
||||
proxy := 0
|
||||
unlock := 0
|
||||
networkcleartext := 0
|
||||
newcredentials := 0
|
||||
remoteinteractive := 0
|
||||
cachedinteractive := 0
|
||||
cachedremoteinteractive := 0
|
||||
cachedunlock := 0
|
||||
|
||||
for _, entry := range dst {
|
||||
switch entry.LogonType {
|
||||
case 0:
|
||||
system++
|
||||
case 2:
|
||||
interactive++
|
||||
case 3:
|
||||
network++
|
||||
case 4:
|
||||
batch++
|
||||
case 5:
|
||||
service++
|
||||
case 6:
|
||||
proxy++
|
||||
case 7:
|
||||
unlock++
|
||||
case 8:
|
||||
networkcleartext++
|
||||
case 9:
|
||||
newcredentials++
|
||||
case 10:
|
||||
remoteinteractive++
|
||||
case 11:
|
||||
cachedinteractive++
|
||||
case 12:
|
||||
cachedremoteinteractive++
|
||||
case 13:
|
||||
cachedunlock++
|
||||
}
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(system),
|
||||
"system",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(interactive),
|
||||
"interactive",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(network),
|
||||
"network",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(batch),
|
||||
"batch",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(service),
|
||||
"service",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(proxy),
|
||||
"proxy",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(unlock),
|
||||
"unlock",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(networkcleartext),
|
||||
"network_clear_text",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(newcredentials),
|
||||
"new_credentials",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(remoteinteractive),
|
||||
"remote_interactive",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(cachedinteractive),
|
||||
"cached_interactive",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(remoteinteractive),
|
||||
"cached_remote_interactive",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.LogonType,
|
||||
prometheus.GaugeValue,
|
||||
float64(cachedunlock),
|
||||
"cached_unlock",
|
||||
)
|
||||
return nil, nil
|
||||
}
|
||||
502
collector/memory.go
Normal file
502
collector/memory.go
Normal file
@@ -0,0 +1,502 @@
|
||||
// returns data points from Win32_PerfRawData_PerfOS_Memory
|
||||
// <add link to documentation here> - Win32_PerfRawData_PerfOS_Memory class
|
||||
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerCollector("memory", NewMemoryCollector, "Memory")
|
||||
}
|
||||
|
||||
// A MemoryCollector is a Prometheus collector for perflib Memory metrics
|
||||
type MemoryCollector struct {
|
||||
AvailableBytes *prometheus.Desc
|
||||
CacheBytes *prometheus.Desc
|
||||
CacheBytesPeak *prometheus.Desc
|
||||
CacheFaultsTotal *prometheus.Desc
|
||||
CommitLimit *prometheus.Desc
|
||||
CommittedBytes *prometheus.Desc
|
||||
DemandZeroFaultsTotal *prometheus.Desc
|
||||
FreeAndZeroPageListBytes *prometheus.Desc
|
||||
FreeSystemPageTableEntries *prometheus.Desc
|
||||
ModifiedPageListBytes *prometheus.Desc
|
||||
PageFaultsTotal *prometheus.Desc
|
||||
SwapPageReadsTotal *prometheus.Desc
|
||||
SwapPagesReadTotal *prometheus.Desc
|
||||
SwapPagesWrittenTotal *prometheus.Desc
|
||||
SwapPageOperationsTotal *prometheus.Desc
|
||||
SwapPageWritesTotal *prometheus.Desc
|
||||
PoolNonpagedAllocsTotal *prometheus.Desc
|
||||
PoolNonpagedBytes *prometheus.Desc
|
||||
PoolPagedAllocsTotal *prometheus.Desc
|
||||
PoolPagedBytes *prometheus.Desc
|
||||
PoolPagedResidentBytes *prometheus.Desc
|
||||
StandbyCacheCoreBytes *prometheus.Desc
|
||||
StandbyCacheNormalPriorityBytes *prometheus.Desc
|
||||
StandbyCacheReserveBytes *prometheus.Desc
|
||||
SystemCacheResidentBytes *prometheus.Desc
|
||||
SystemCodeResidentBytes *prometheus.Desc
|
||||
SystemCodeTotalBytes *prometheus.Desc
|
||||
SystemDriverResidentBytes *prometheus.Desc
|
||||
SystemDriverTotalBytes *prometheus.Desc
|
||||
TransitionFaultsTotal *prometheus.Desc
|
||||
TransitionPagesRepurposedTotal *prometheus.Desc
|
||||
WriteCopiesTotal *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewMemoryCollector ...
|
||||
func NewMemoryCollector() (Collector, error) {
|
||||
const subsystem = "memory"
|
||||
|
||||
return &MemoryCollector{
|
||||
AvailableBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "available_bytes"),
|
||||
"The amount of physical memory immediately available for allocation to a process or for system use. It is equal to the sum of memory assigned to"+
|
||||
" the standby (cached), free and zero page lists (AvailableBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
CacheBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cache_bytes"),
|
||||
"(CacheBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
CacheBytesPeak: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cache_bytes_peak"),
|
||||
"(CacheBytesPeak)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
CacheFaultsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "cache_faults_total"),
|
||||
"(CacheFaultsPersec)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
CommitLimit: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "commit_limit"),
|
||||
"(CommitLimit)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
CommittedBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "committed_bytes"),
|
||||
"(CommittedBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
DemandZeroFaultsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "demand_zero_faults_total"),
|
||||
"The number of zeroed pages required to satisfy faults. Zeroed pages, pages emptied of previously stored data and filled with zeros, are a security"+
|
||||
" feature of Windows that prevent processes from seeing data stored by earlier processes that used the memory space (DemandZeroFaults)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
FreeAndZeroPageListBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "free_and_zero_page_list_bytes"),
|
||||
"(FreeAndZeroPageListBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
FreeSystemPageTableEntries: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "free_system_page_table_entries"),
|
||||
"(FreeSystemPageTableEntries)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
ModifiedPageListBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "modified_page_list_bytes"),
|
||||
"(ModifiedPageListBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
PageFaultsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "page_faults_total"),
|
||||
"(PageFaultsPersec)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SwapPageReadsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "swap_page_reads_total"),
|
||||
"Number of disk page reads (a single read operation reading several pages is still only counted once) (PageReadsPersec)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SwapPagesReadTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "swap_pages_read_total"),
|
||||
"Number of pages read across all page reads (ie counting all pages read even if they are read in a single operation) (PagesInputPersec)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SwapPagesWrittenTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "swap_pages_written_total"),
|
||||
"Number of pages written across all page writes (ie counting all pages written even if they are written in a single operation) (PagesOutputPersec)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SwapPageOperationsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "swap_page_operations_total"),
|
||||
"Total number of swap page read and writes (PagesPersec)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SwapPageWritesTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "swap_page_writes_total"),
|
||||
"Number of disk page writes (a single write operation writing several pages is still only counted once) (PageWritesPersec)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
PoolNonpagedAllocsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "pool_nonpaged_allocs_total"),
|
||||
"The number of calls to allocate space in the nonpaged pool. The nonpaged pool is an area of system memory area for objects that cannot be written"+
|
||||
" to disk, and must remain in physical memory as long as they are allocated (PoolNonpagedAllocs)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
PoolNonpagedBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "pool_nonpaged_bytes_total"),
|
||||
"(PoolNonpagedBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
PoolPagedAllocsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "pool_paged_allocs_total"),
|
||||
"(PoolPagedAllocs)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
PoolPagedBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "pool_paged_bytes"),
|
||||
"(PoolPagedBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
PoolPagedResidentBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "pool_paged_resident_bytes"),
|
||||
"(PoolPagedResidentBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
StandbyCacheCoreBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "standby_cache_core_bytes"),
|
||||
"(StandbyCacheCoreBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
StandbyCacheNormalPriorityBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "standby_cache_normal_priority_bytes"),
|
||||
"(StandbyCacheNormalPriorityBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
StandbyCacheReserveBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "standby_cache_reserve_bytes"),
|
||||
"(StandbyCacheReserveBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SystemCacheResidentBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "system_cache_resident_bytes"),
|
||||
"(SystemCacheResidentBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SystemCodeResidentBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "system_code_resident_bytes"),
|
||||
"(SystemCodeResidentBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SystemCodeTotalBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "system_code_total_bytes"),
|
||||
"(SystemCodeTotalBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SystemDriverResidentBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "system_driver_resident_bytes"),
|
||||
"(SystemDriverResidentBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
SystemDriverTotalBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "system_driver_total_bytes"),
|
||||
"(SystemDriverTotalBytes)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
TransitionFaultsTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "transition_faults_total"),
|
||||
"(TransitionFaultsPersec)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
TransitionPagesRepurposedTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "transition_pages_repurposed_total"),
|
||||
"(TransitionPagesRePurposedPersec)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
WriteCopiesTotal: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "write_copies_total"),
|
||||
"The number of page faults caused by attempting to write that were satisfied by copying the page from elsewhere in physical memory (WriteCopiesPersec)",
|
||||
nil,
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *MemoryCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ctx, ch); err != nil {
|
||||
log.Error("failed collecting memory metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type memory struct {
|
||||
AvailableBytes float64 `perflib:"Available Bytes"`
|
||||
AvailableKBytes float64 `perflib:"Available KBytes"`
|
||||
AvailableMBytes float64 `perflib:"Available MBytes"`
|
||||
CacheBytes float64 `perflib:"Cache Bytes"`
|
||||
CacheBytesPeak float64 `perflib:"Cache Bytes Peak"`
|
||||
CacheFaultsPersec float64 `perflib:"Cache Faults/sec"`
|
||||
CommitLimit float64 `perflib:"Commit Limit"`
|
||||
CommittedBytes float64 `perflib:"Committed Bytes"`
|
||||
DemandZeroFaultsPersec float64 `perflib:"Demand Zero Faults/sec"`
|
||||
FreeAndZeroPageListBytes float64 `perflib:"Free & Zero Page List Bytes"`
|
||||
FreeSystemPageTableEntries float64 `perflib:"Free System Page Table Entries"`
|
||||
ModifiedPageListBytes float64 `perflib:"Modified Page List Bytes"`
|
||||
PageFaultsPersec float64 `perflib:"Page Faults/sec"`
|
||||
PageReadsPersec float64 `perflib:"Page Reads/sec"`
|
||||
PagesInputPersec float64 `perflib:"Pages Input/sec"`
|
||||
PagesOutputPersec float64 `perflib:"Pages Output/sec"`
|
||||
PagesPersec float64 `perflib:"Pages/sec"`
|
||||
PageWritesPersec float64 `perflib:"Page Writes/sec"`
|
||||
PoolNonpagedAllocs float64 `perflib:"Pool Nonpaged Allocs"`
|
||||
PoolNonpagedBytes float64 `perflib:"Pool Nonpaged Bytes"`
|
||||
PoolPagedAllocs float64 `perflib:"Pool Paged Allocs"`
|
||||
PoolPagedBytes float64 `perflib:"Pool Paged Bytes"`
|
||||
PoolPagedResidentBytes float64 `perflib:"Pool Paged Resident Bytes"`
|
||||
StandbyCacheCoreBytes float64 `perflib:"Standby Cache Core Bytes"`
|
||||
StandbyCacheNormalPriorityBytes float64 `perflib:"Standby Cache Normal Priority Bytes"`
|
||||
StandbyCacheReserveBytes float64 `perflib:"Standby Cache Reserve Bytes"`
|
||||
SystemCacheResidentBytes float64 `perflib:"System Cache Resident Bytes"`
|
||||
SystemCodeResidentBytes float64 `perflib:"System Code Resident Bytes"`
|
||||
SystemCodeTotalBytes float64 `perflib:"System Code Total Bytes"`
|
||||
SystemDriverResidentBytes float64 `perflib:"System Driver Resident Bytes"`
|
||||
SystemDriverTotalBytes float64 `perflib:"System Driver Total Bytes"`
|
||||
TransitionFaultsPersec float64 `perflib:"Transition Faults/sec"`
|
||||
TransitionPagesRePurposedPersec float64 `perflib:"Transition Pages RePurposed/sec"`
|
||||
WriteCopiesPersec float64 `perflib:"Write Copies/sec"`
|
||||
}
|
||||
|
||||
func (c *MemoryCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []memory
|
||||
if err := unmarshalObject(ctx.perfObjects["Memory"], &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.AvailableBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].AvailableBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CacheBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].CacheBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CacheBytesPeak,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].CacheBytesPeak,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CacheFaultsTotal,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].CacheFaultsPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CommitLimit,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].CommitLimit,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CommittedBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].CommittedBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.DemandZeroFaultsTotal,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].DemandZeroFaultsPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FreeAndZeroPageListBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].FreeAndZeroPageListBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.FreeSystemPageTableEntries,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].FreeSystemPageTableEntries,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ModifiedPageListBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].ModifiedPageListBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PageFaultsTotal,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].PageFaultsPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SwapPageReadsTotal,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].PageReadsPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SwapPagesReadTotal,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].PagesInputPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SwapPagesWrittenTotal,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].PagesOutputPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SwapPageOperationsTotal,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].PagesPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SwapPageWritesTotal,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].PageWritesPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PoolNonpagedAllocsTotal,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].PoolNonpagedAllocs,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PoolNonpagedBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].PoolNonpagedBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PoolPagedAllocsTotal,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].PoolPagedAllocs,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PoolPagedBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].PoolPagedBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PoolPagedResidentBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].PoolPagedResidentBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.StandbyCacheCoreBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].StandbyCacheCoreBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.StandbyCacheNormalPriorityBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].StandbyCacheNormalPriorityBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.StandbyCacheReserveBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].StandbyCacheReserveBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SystemCacheResidentBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].SystemCacheResidentBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SystemCodeResidentBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].SystemCodeResidentBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SystemCodeTotalBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].SystemCodeTotalBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SystemDriverResidentBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].SystemDriverResidentBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SystemDriverTotalBytes,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].SystemDriverTotalBytes,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TransitionFaultsTotal,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].TransitionFaultsPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.TransitionPagesRepurposedTotal,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].TransitionPagesRePurposedPersec,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.WriteCopiesTotal,
|
||||
prometheus.GaugeValue,
|
||||
dst[0].WriteCopiesPersec,
|
||||
)
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
// returns data points from Win32_PerfRawData_MSMQ_MSMQQueue
|
||||
// <add link to documentation here> - Win32_PerfRawData_MSMQ_MSMQQueue class
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["msmq"] = NewMSMQCollector
|
||||
registerCollector("msmq", NewMSMQCollector)
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -33,7 +33,7 @@ type Win32_PerfRawData_MSMQ_MSMQQueueCollector struct {
|
||||
func NewMSMQCollector() (Collector, error) {
|
||||
const subsystem = "msmq"
|
||||
|
||||
if *msmqWhereClause != "" {
|
||||
if *msmqWhereClause == "" {
|
||||
log.Warn("No where-clause specified for msmq collector. This will generate a very large number of metrics!")
|
||||
}
|
||||
|
||||
@@ -68,7 +68,7 @@ func NewMSMQCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *Win32_PerfRawData_MSMQ_MSMQQueueCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *Win32_PerfRawData_MSMQ_MSMQQueueCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting msmq metrics:", desc, err)
|
||||
return err
|
||||
|
||||
3887
collector/mssql.go
Normal file
3887
collector/mssql.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,8 +1,4 @@
|
||||
// returns data points from Win32_PerfRawData_Tcpip_NetworkInterface
|
||||
|
||||
// https://technet.microsoft.com/en-us/security/aa394340(v=vs.80) (Win32_PerfRawData_Tcpip_NetworkInterface class)
|
||||
// https://msdn.microsoft.com/en-us/library/aa394216 (Win32_NetworkAdapter class)
|
||||
// https://msdn.microsoft.com/en-us/library/aa394353 (Win32_PnPEntity class)
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
@@ -10,14 +6,13 @@ import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
"gopkg.in/alecthomas/kingpin.v2"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["net"] = NewNetworkCollector
|
||||
registerCollector("net", NewNetworkCollector, "Network Interface")
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -32,7 +27,7 @@ var (
|
||||
nicNameToUnderscore = regexp.MustCompile("[^a-zA-Z0-9]")
|
||||
)
|
||||
|
||||
// A NetworkCollector is a Prometheus collector for WMI Win32_PerfRawData_Tcpip_NetworkInterface metrics
|
||||
// A NetworkCollector is a Prometheus collector for Perflib Network Interface metrics
|
||||
type NetworkCollector struct {
|
||||
BytesReceivedTotal *prometheus.Desc
|
||||
BytesSentTotal *prometheus.Desc
|
||||
@@ -136,8 +131,8 @@ func NewNetworkCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NetworkCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
func (c *NetworkCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ctx, ch); err != nil {
|
||||
log.Error("failed collecting net metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
@@ -145,32 +140,33 @@ func (c *NetworkCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
}
|
||||
|
||||
// mangleNetworkName mangles Network Adapter name (non-alphanumeric to _)
|
||||
// that is used in Win32_PerfRawData_Tcpip_NetworkInterface.
|
||||
// that is used in networkInterface.
|
||||
func mangleNetworkName(name string) string {
|
||||
return nicNameToUnderscore.ReplaceAllString(name, "_")
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_Tcpip_NetworkInterface struct {
|
||||
BytesReceivedPerSec uint64
|
||||
BytesSentPerSec uint64
|
||||
BytesTotalPerSec uint64
|
||||
// Win32_PerfRawData_Tcpip_NetworkInterface docs:
|
||||
// - https://technet.microsoft.com/en-us/security/aa394340(v=vs.80)
|
||||
type networkInterface struct {
|
||||
BytesReceivedPerSec float64 `perflib:"Bytes Received/sec"`
|
||||
BytesSentPerSec float64 `perflib:"Bytes Sent/sec"`
|
||||
BytesTotalPerSec float64 `perflib:"Bytes Total/sec"`
|
||||
Name string
|
||||
PacketsOutboundDiscarded uint64
|
||||
PacketsOutboundErrors uint64
|
||||
PacketsPerSec uint64
|
||||
PacketsReceivedDiscarded uint64
|
||||
PacketsReceivedErrors uint64
|
||||
PacketsReceivedPerSec uint64
|
||||
PacketsReceivedUnknown uint64
|
||||
PacketsSentPerSec uint64
|
||||
CurrentBandwidth uint64
|
||||
PacketsOutboundDiscarded float64 `perflib:"Packets Outbound Discarded"`
|
||||
PacketsOutboundErrors float64 `perflib:"Packets Outbound Errors"`
|
||||
PacketsPerSec float64 `perflib:"Packets/sec"`
|
||||
PacketsReceivedDiscarded float64 `perflib:"Packets Received Discarded"`
|
||||
PacketsReceivedErrors float64 `perflib:"Packets Received Errors"`
|
||||
PacketsReceivedPerSec float64 `perflib:"Packets Received/sec"`
|
||||
PacketsReceivedUnknown float64 `perflib:"Packets Received Unknown"`
|
||||
PacketsSentPerSec float64 `perflib:"Packets Sent/sec"`
|
||||
CurrentBandwidth float64 `perflib:"Current Bandwidth"`
|
||||
}
|
||||
|
||||
func (c *NetworkCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_Tcpip_NetworkInterface
|
||||
func (c *NetworkCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []networkInterface
|
||||
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
if err := unmarshalObject(ctx.perfObjects["Network Interface"], &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -189,76 +185,75 @@ func (c *NetworkCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Des
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BytesReceivedTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.BytesReceivedPerSec),
|
||||
nic.BytesReceivedPerSec,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BytesSentTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.BytesSentPerSec),
|
||||
nic.BytesSentPerSec,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.BytesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.BytesTotalPerSec),
|
||||
nic.BytesTotalPerSec,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsOutboundDiscarded,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.PacketsOutboundDiscarded),
|
||||
nic.PacketsOutboundDiscarded,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsOutboundErrors,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.PacketsOutboundErrors),
|
||||
nic.PacketsOutboundErrors,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.PacketsPerSec),
|
||||
nic.PacketsPerSec,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsReceivedDiscarded,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.PacketsReceivedDiscarded),
|
||||
nic.PacketsReceivedDiscarded,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsReceivedErrors,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.PacketsReceivedErrors),
|
||||
nic.PacketsReceivedErrors,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsReceivedTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.PacketsReceivedPerSec),
|
||||
nic.PacketsReceivedPerSec,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsReceivedUnknown,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.PacketsReceivedUnknown),
|
||||
nic.PacketsReceivedUnknown,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PacketsSentTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.PacketsSentPerSec),
|
||||
nic.PacketsSentPerSec,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CurrentBandwidth,
|
||||
prometheus.CounterValue,
|
||||
float64(nic.CurrentBandwidth),
|
||||
prometheus.GaugeValue,
|
||||
nic.CurrentBandwidth,
|
||||
name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import "testing"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// returns data points from Win32_PerfRawData_NETFramework_NETCLRExceptions
|
||||
// <add link to documentation here> - Win32_PerfRawData_NETFramework_NETCLRExceptions class
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["netframework_clrexceptions"] = NewNETFramework_NETCLRExceptionsCollector
|
||||
registerCollector("netframework_clrexceptions", NewNETFramework_NETCLRExceptionsCollector)
|
||||
}
|
||||
|
||||
// A NETFramework_NETCLRExceptionsCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRExceptions metrics
|
||||
@@ -53,7 +53,7 @@ func NewNETFramework_NETCLRExceptionsCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRExceptionsCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRExceptionsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrexceptions metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// returns data points from Win32_PerfRawData_NETFramework_NETCLRInterop
|
||||
// <add link to documentation here> - Win32_PerfRawData_NETFramework_NETCLRInterop class
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["netframework_clrinterop"] = NewNETFramework_NETCLRInteropCollector
|
||||
registerCollector("netframework_clrinterop", NewNETFramework_NETCLRInteropCollector)
|
||||
}
|
||||
|
||||
// A NETFramework_NETCLRInteropCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRInterop metrics
|
||||
@@ -46,7 +46,7 @@ func NewNETFramework_NETCLRInteropCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRInteropCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRInteropCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrinterop metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// returns data points from Win32_PerfRawData_NETFramework_NETCLRJit
|
||||
// <add link to documentation here> - Win32_PerfRawData_NETFramework_NETCLRJit class
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["netframework_clrjit"] = NewNETFramework_NETCLRJitCollector
|
||||
registerCollector("netframework_clrjit", NewNETFramework_NETCLRJitCollector)
|
||||
}
|
||||
|
||||
// A NETFramework_NETCLRJitCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRJit metrics
|
||||
@@ -53,7 +53,7 @@ func NewNETFramework_NETCLRJitCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRJitCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRJitCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrjit metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// returns data points from Win32_PerfRawData_NETFramework_NETCLRLoading
|
||||
// <add link to documentation here> - Win32_PerfRawData_NETFramework_NETCLRLoading class
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["netframework_clrloading"] = NewNETFramework_NETCLRLoadingCollector
|
||||
registerCollector("netframework_clrloading", NewNETFramework_NETCLRLoadingCollector)
|
||||
}
|
||||
|
||||
// A NETFramework_NETCLRLoadingCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRLoading metrics
|
||||
@@ -88,7 +88,7 @@ func NewNETFramework_NETCLRLoadingCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRLoadingCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRLoadingCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrloading metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// returns data points from Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads
|
||||
// <add link to documentation here> - Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads class
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["netframework_clrlocksandthreads"] = NewNETFramework_NETCLRLocksAndThreadsCollector
|
||||
registerCollector("netframework_clrlocksandthreads", NewNETFramework_NETCLRLocksAndThreadsCollector)
|
||||
}
|
||||
|
||||
// A NETFramework_NETCLRLocksAndThreadsCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads metrics
|
||||
@@ -74,7 +74,7 @@ func NewNETFramework_NETCLRLocksAndThreadsCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRLocksAndThreadsCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRLocksAndThreadsCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrlocksandthreads metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// returns data points from Win32_PerfRawData_NETFramework_NETCLRMemory
|
||||
// <add link to documentation here> - Win32_PerfRawData_NETFramework_NETCLRMemory class
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["netframework_clrmemory"] = NewNETFramework_NETCLRMemoryCollector
|
||||
registerCollector("netframework_clrmemory", NewNETFramework_NETCLRMemoryCollector)
|
||||
}
|
||||
|
||||
// A NETFramework_NETCLRMemoryCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRMemory metrics
|
||||
@@ -112,7 +112,7 @@ func NewNETFramework_NETCLRMemoryCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRMemoryCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRMemoryCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrmemory metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// returns data points from Win32_PerfRawData_NETFramework_NETCLRRemoting
|
||||
// <add link to documentation here> - Win32_PerfRawData_NETFramework_NETCLRRemoting class
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["netframework_clrremoting"] = NewNETFramework_NETCLRRemotingCollector
|
||||
registerCollector("netframework_clrremoting", NewNETFramework_NETCLRRemotingCollector)
|
||||
}
|
||||
|
||||
// A NETFramework_NETCLRRemotingCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRRemoting metrics
|
||||
@@ -67,7 +67,7 @@ func NewNETFramework_NETCLRRemotingCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRRemotingCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRRemotingCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrremoting metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// returns data points from Win32_PerfRawData_NETFramework_NETCLRSecurity
|
||||
// <add link to documentation here> - Win32_PerfRawData_NETFramework_NETCLRSecurity class
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["netframework_clrsecurity"] = NewNETFramework_NETCLRSecurityCollector
|
||||
registerCollector("netframework_clrsecurity", NewNETFramework_NETCLRSecurityCollector)
|
||||
}
|
||||
|
||||
// A NETFramework_NETCLRSecurityCollector is a Prometheus collector for WMI Win32_PerfRawData_NETFramework_NETCLRSecurity metrics
|
||||
@@ -53,7 +53,7 @@ func NewNETFramework_NETCLRSecurityCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *NETFramework_NETCLRSecurityCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *NETFramework_NETCLRSecurityCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting win32_perfrawdata_netframework_netclrsecurity metrics:", desc, err)
|
||||
return err
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
// returns data points from Win32_OperatingSystem
|
||||
// https://msdn.microsoft.com/en-us/library/aa394239 - Win32_OperatingSystem class
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
@@ -12,11 +12,12 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["os"] = NewOSCollector
|
||||
registerCollector("os", NewOSCollector)
|
||||
}
|
||||
|
||||
// A OSCollector is a Prometheus collector for WMI metrics
|
||||
type OSCollector struct {
|
||||
OSInformation *prometheus.Desc
|
||||
PhysicalMemoryFreeBytes *prometheus.Desc
|
||||
PagingFreeBytes *prometheus.Desc
|
||||
VirtualMemoryFreeBytes *prometheus.Desc
|
||||
@@ -36,6 +37,12 @@ func NewOSCollector() (Collector, error) {
|
||||
const subsystem = "os"
|
||||
|
||||
return &OSCollector{
|
||||
OSInformation: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "info"),
|
||||
"OperatingSystem.Caption, OperatingSystem.Version",
|
||||
[]string{"product", "version"},
|
||||
nil,
|
||||
),
|
||||
PagingLimitBytes: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "paging_limit_bytes"),
|
||||
"OperatingSystem.SizeStoredInPagingFiles",
|
||||
@@ -113,7 +120,7 @@ func NewOSCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *OSCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *OSCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting os metrics:", desc, err)
|
||||
return err
|
||||
@@ -121,10 +128,14 @@ func (c *OSCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_OperatingSystem docs:
|
||||
// - https://msdn.microsoft.com/en-us/library/aa394239 - Win32_OperatingSystem class
|
||||
type Win32_OperatingSystem struct {
|
||||
Caption string
|
||||
FreePhysicalMemory uint64
|
||||
FreeSpaceInPagingFiles uint64
|
||||
FreeVirtualMemory uint64
|
||||
LocalDateTime time.Time
|
||||
MaxNumberOfProcesses uint32
|
||||
MaxProcessMemorySize uint64
|
||||
NumberOfProcesses uint32
|
||||
@@ -132,7 +143,7 @@ type Win32_OperatingSystem struct {
|
||||
SizeStoredInPagingFiles uint64
|
||||
TotalVirtualMemorySize uint64
|
||||
TotalVisibleMemorySize uint64
|
||||
LocalDateTime time.Time
|
||||
Version string
|
||||
}
|
||||
|
||||
func (c *OSCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
@@ -142,6 +153,18 @@ func (c *OSCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, er
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(dst) == 0 {
|
||||
return nil, errors.New("WMI query returned empty result set")
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.OSInformation,
|
||||
prometheus.GaugeValue,
|
||||
1.0,
|
||||
dst[0].Caption,
|
||||
dst[0].Version,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PhysicalMemoryFreeBytes,
|
||||
prometheus.GaugeValue,
|
||||
|
||||
107
collector/perflib.go
Normal file
107
collector/perflib.go
Normal file
@@ -0,0 +1,107 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
|
||||
perflibCollector "github.com/leoluk/perflib_exporter/collector"
|
||||
"github.com/leoluk/perflib_exporter/perflib"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
var nametable = perflib.QueryNameTable("Counter 009") // Reads the names in English TODO: validate that the English names are always present
|
||||
|
||||
func MapCounterToIndex(name string) string {
|
||||
return strconv.Itoa(int(nametable.LookupIndex(name)))
|
||||
}
|
||||
|
||||
func getPerflibSnapshot(objNames string) (map[string]*perflib.PerfObject, error) {
|
||||
objects, err := perflib.QueryPerformanceData(objNames)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
indexed := make(map[string]*perflib.PerfObject)
|
||||
for _, obj := range objects {
|
||||
indexed[obj.Name] = obj
|
||||
}
|
||||
return indexed, nil
|
||||
}
|
||||
|
||||
func unmarshalObject(obj *perflib.PerfObject, vs interface{}) error {
|
||||
if obj == nil {
|
||||
return fmt.Errorf("counter not found")
|
||||
}
|
||||
rv := reflect.ValueOf(vs)
|
||||
if rv.Kind() != reflect.Ptr || rv.IsNil() {
|
||||
return fmt.Errorf("%v is nil or not a pointer to slice", reflect.TypeOf(vs))
|
||||
}
|
||||
ev := rv.Elem()
|
||||
if ev.Kind() != reflect.Slice {
|
||||
return fmt.Errorf("%v is not slice", reflect.TypeOf(vs))
|
||||
}
|
||||
|
||||
// Ensure sufficient length
|
||||
if ev.Cap() < len(obj.Instances) {
|
||||
nvs := reflect.MakeSlice(ev.Type(), len(obj.Instances), len(obj.Instances))
|
||||
ev.Set(nvs)
|
||||
}
|
||||
|
||||
for idx, instance := range obj.Instances {
|
||||
target := ev.Index(idx)
|
||||
rt := target.Type()
|
||||
|
||||
counters := make(map[string]*perflib.PerfCounter, len(instance.Counters))
|
||||
for _, ctr := range instance.Counters {
|
||||
if ctr.Def.IsBaseValue && !ctr.Def.IsNanosecondCounter {
|
||||
counters[ctr.Def.Name+"_Base"] = ctr
|
||||
} else {
|
||||
counters[ctr.Def.Name] = ctr
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < target.NumField(); i++ {
|
||||
f := rt.Field(i)
|
||||
tag := f.Tag.Get("perflib")
|
||||
if tag == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
ctr, found := counters[tag]
|
||||
if !found {
|
||||
log.Debugf("missing counter %q, have %v", tag, counterMapKeys(counters))
|
||||
continue
|
||||
}
|
||||
if !target.Field(i).CanSet() {
|
||||
return fmt.Errorf("tagged field %v cannot be written to", f.Name)
|
||||
}
|
||||
if fieldType := target.Field(i).Type(); fieldType != reflect.TypeOf((*float64)(nil)).Elem() {
|
||||
return fmt.Errorf("tagged field %v has wrong type %v, must be float64", f.Name, fieldType)
|
||||
}
|
||||
|
||||
switch ctr.Def.CounterType {
|
||||
case perflibCollector.PERF_ELAPSED_TIME:
|
||||
target.Field(i).SetFloat(float64(ctr.Value-windowsEpoch) / float64(obj.Frequency))
|
||||
case perflibCollector.PERF_100NSEC_TIMER, perflibCollector.PERF_PRECISION_100NS_TIMER:
|
||||
target.Field(i).SetFloat(float64(ctr.Value) * ticksToSecondsScaleFactor)
|
||||
default:
|
||||
target.Field(i).SetFloat(float64(ctr.Value))
|
||||
}
|
||||
}
|
||||
|
||||
if instance.Name != "" && target.FieldByName("Name").CanSet() {
|
||||
target.FieldByName("Name").SetString(instance.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func counterMapKeys(m map[string]*perflib.PerfCounter) []string {
|
||||
keys := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
return keys
|
||||
}
|
||||
125
collector/perflib_test.go
Normal file
125
collector/perflib_test.go
Normal file
@@ -0,0 +1,125 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
perflibCollector "github.com/leoluk/perflib_exporter/collector"
|
||||
"github.com/leoluk/perflib_exporter/perflib"
|
||||
)
|
||||
|
||||
type simple struct {
|
||||
ValA float64 `perflib:"Something"`
|
||||
ValB float64 `perflib:"Something Else"`
|
||||
}
|
||||
|
||||
func TestUnmarshalPerflib(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
obj *perflib.PerfObject
|
||||
|
||||
expectedOutput []simple
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
name: "nil check",
|
||||
obj: nil,
|
||||
expectedOutput: []simple{},
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
name: "Simple",
|
||||
obj: &perflib.PerfObject{
|
||||
Instances: []*perflib.PerfInstance{
|
||||
{
|
||||
Counters: []*perflib.PerfCounter{
|
||||
{
|
||||
Def: &perflib.PerfCounterDef{
|
||||
Name: "Something",
|
||||
CounterType: perflibCollector.PERF_COUNTER_COUNTER,
|
||||
},
|
||||
Value: 123,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedOutput: []simple{{ValA: 123}},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "Multiple properties",
|
||||
obj: &perflib.PerfObject{
|
||||
Instances: []*perflib.PerfInstance{
|
||||
{
|
||||
Counters: []*perflib.PerfCounter{
|
||||
{
|
||||
Def: &perflib.PerfCounterDef{
|
||||
Name: "Something",
|
||||
CounterType: perflibCollector.PERF_COUNTER_COUNTER,
|
||||
},
|
||||
Value: 123,
|
||||
},
|
||||
{
|
||||
Def: &perflib.PerfCounterDef{
|
||||
Name: "Something Else",
|
||||
CounterType: perflibCollector.PERF_COUNTER_COUNTER,
|
||||
},
|
||||
Value: 256,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedOutput: []simple{{ValA: 123, ValB: 256}},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "Multiple instances",
|
||||
obj: &perflib.PerfObject{
|
||||
Instances: []*perflib.PerfInstance{
|
||||
{
|
||||
Counters: []*perflib.PerfCounter{
|
||||
{
|
||||
Def: &perflib.PerfCounterDef{
|
||||
Name: "Something",
|
||||
CounterType: perflibCollector.PERF_COUNTER_COUNTER,
|
||||
},
|
||||
Value: 321,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Counters: []*perflib.PerfCounter{
|
||||
{
|
||||
Def: &perflib.PerfCounterDef{
|
||||
Name: "Something",
|
||||
CounterType: perflibCollector.PERF_COUNTER_COUNTER,
|
||||
},
|
||||
Value: 231,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedOutput: []simple{{ValA: 321}, {ValA: 231}},
|
||||
expectError: false,
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
output := make([]simple, 0)
|
||||
err := unmarshalObject(c.obj, &output)
|
||||
if err != nil && !c.expectError {
|
||||
t.Errorf("Did not expect error, got %q", err)
|
||||
}
|
||||
if err == nil && c.expectError {
|
||||
t.Errorf("Expected an error, but got ok")
|
||||
}
|
||||
|
||||
if err == nil && !reflect.DeepEqual(output, c.expectedOutput) {
|
||||
t.Errorf("Output mismatch, expected %+v, got %+v", c.expectedOutput, output)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
// returns data points from Win32_PerfRawData_PerfProc_Process
|
||||
// https://msdn.microsoft.com/en-us/library/aa394323(v=vs.85).aspx - Win32_PerfRawData_PerfProc_Process class
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["process"] = NewProcessCollector
|
||||
registerCollector("process", NewProcessCollector)
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -135,7 +135,7 @@ func NewProcessCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *ProcessCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *ProcessCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting process metrics:", desc, err)
|
||||
return err
|
||||
@@ -143,6 +143,8 @@ func (c *ProcessCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_PerfProc_Process docs:
|
||||
// - https://msdn.microsoft.com/en-us/library/aa394323(v=vs.85).aspx
|
||||
type Win32_PerfRawData_PerfProc_Process struct {
|
||||
Name string
|
||||
CreatingProcessID uint32
|
||||
@@ -177,6 +179,11 @@ type Win32_PerfRawData_PerfProc_Process struct {
|
||||
WorkingSetPrivate uint64
|
||||
}
|
||||
|
||||
type WorkerProcess struct {
|
||||
AppPoolName string
|
||||
ProcessId uint32
|
||||
}
|
||||
|
||||
func (c *ProcessCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_PerfProc_Process
|
||||
q := queryAllWhere(&dst, c.queryWhereClause)
|
||||
@@ -184,6 +191,12 @@ func (c *ProcessCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Des
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var dst_wp []WorkerProcess
|
||||
q_wp := queryAll(&dst_wp)
|
||||
if err := wmi.QueryNamespace(q_wp, &dst_wp, "root\\WebAdministration"); err != nil {
|
||||
log.Debugf("Could not query WebAdministration namespace for IIS worker processes: %v. Skipping", err)
|
||||
}
|
||||
|
||||
for _, process := range dst {
|
||||
|
||||
if process.Name == "_Total" {
|
||||
@@ -194,6 +207,13 @@ func (c *ProcessCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Des
|
||||
pid := strconv.FormatUint(uint64(process.IDProcess), 10)
|
||||
cpid := strconv.FormatUint(uint64(process.CreatingProcessID), 10)
|
||||
|
||||
for _, wp := range dst_wp {
|
||||
if wp.ProcessId == process.IDProcess {
|
||||
processName = strings.Join([]string{processName, wp.AppPoolName}, "_")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.StartTime,
|
||||
prometheus.GaugeValue,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// returns data points from Win32_Service
|
||||
// https://msdn.microsoft.com/en-us/library/aa394418(v=vs.85).aspx - Win32_Service class
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["service"] = NewserviceCollector
|
||||
registerCollector("service", NewserviceCollector)
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -64,7 +64,7 @@ func NewserviceCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *serviceCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *serviceCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting service metrics:", desc, err)
|
||||
return err
|
||||
@@ -72,6 +72,8 @@ func (c *serviceCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_Service docs:
|
||||
// - https://msdn.microsoft.com/en-us/library/aa394418(v=vs.85).aspx
|
||||
type Win32_Service struct {
|
||||
Name string
|
||||
State string
|
||||
|
||||
@@ -1,16 +1,14 @@
|
||||
// returns data points from Win32_PerfRawData_PerfOS_System class
|
||||
// https://web.archive.org/web/20050830140516/http://msdn.microsoft.com/library/en-us/wmisdk/wmi/win32_perfrawdata_perfos_system.asp
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["system"] = NewSystemCollector
|
||||
registerCollector("system", NewSystemCollector, "System")
|
||||
}
|
||||
|
||||
// A SystemCollector is a Prometheus collector for WMI metrics
|
||||
@@ -69,62 +67,60 @@ func NewSystemCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *SystemCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
func (c *SystemCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ctx, ch); err != nil {
|
||||
log.Error("failed collecting system metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_PerfOS_System struct {
|
||||
ContextSwitchesPersec uint32
|
||||
ExceptionDispatchesPersec uint32
|
||||
Frequency_Object uint64
|
||||
ProcessorQueueLength uint32
|
||||
SystemCallsPersec uint32
|
||||
SystemUpTime uint64
|
||||
Threads uint32
|
||||
Timestamp_Object uint64
|
||||
// Win32_PerfRawData_PerfOS_System docs:
|
||||
// - https://web.archive.org/web/20050830140516/http://msdn.microsoft.com/library/en-us/wmisdk/wmi/win32_perfrawdata_perfos_system.asp
|
||||
type system struct {
|
||||
ContextSwitchesPersec float64 `perflib:"Context Switches/sec"`
|
||||
ExceptionDispatchesPersec float64 `perflib:"Exception Dispatches/sec"`
|
||||
ProcessorQueueLength float64 `perflib:"Processor Queue Length"`
|
||||
SystemCallsPersec float64 `perflib:"System Calls/sec"`
|
||||
SystemUpTime float64 `perflib:"System Up Time"`
|
||||
Threads float64 `perflib:"Threads"`
|
||||
}
|
||||
|
||||
func (c *SystemCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_PerfOS_System
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
func (c *SystemCollector) collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []system
|
||||
if err := unmarshalObject(ctx.perfObjects["System"], &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ContextSwitchesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].ContextSwitchesPersec),
|
||||
dst[0].ContextSwitchesPersec,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ExceptionDispatchesTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].ExceptionDispatchesPersec),
|
||||
dst[0].ExceptionDispatchesPersec,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ProcessorQueueLength,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].ProcessorQueueLength),
|
||||
dst[0].ProcessorQueueLength,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SystemCallsTotal,
|
||||
prometheus.CounterValue,
|
||||
float64(dst[0].SystemCallsPersec),
|
||||
dst[0].SystemCallsPersec,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.SystemUpTime,
|
||||
prometheus.GaugeValue,
|
||||
// convert from Windows timestamp (1 jan 1601) to unix timestamp (1 jan 1970)
|
||||
float64(dst[0].SystemUpTime-116444736000000000)/float64(dst[0].Frequency_Object),
|
||||
dst[0].SystemUpTime,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Threads,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].Threads),
|
||||
dst[0].Threads,
|
||||
)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -1,17 +1,16 @@
|
||||
// returns data points from Win32_PerfRawData_Tcpip_TCPv4
|
||||
|
||||
// https://msdn.microsoft.com/en-us/library/aa394341(v=vs.85).aspx (Win32_PerfRawData_Tcpip_TCPv4 class)
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["tcp"] = NewTCPCollector
|
||||
registerCollector("tcp", NewTCPCollector)
|
||||
}
|
||||
|
||||
// A TCPCollector is a Prometheus collector for WMI Win32_PerfRawData_Tcpip_TCPv4 metrics
|
||||
@@ -91,7 +90,7 @@ func NewTCPCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *TCPCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *TCPCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting tcp metrics:", desc, err)
|
||||
return err
|
||||
@@ -99,6 +98,8 @@ func (c *TCPCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_Tcpip_TCPv4 docs
|
||||
// - https://msdn.microsoft.com/en-us/library/aa394341(v=vs.85).aspx
|
||||
type Win32_PerfRawData_Tcpip_TCPv4 struct {
|
||||
ConnectionFailures uint64
|
||||
ConnectionsActive uint64
|
||||
@@ -118,6 +119,9 @@ func (c *TCPCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, e
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(dst) == 0 {
|
||||
return nil, errors.New("WMI query returned empty result set")
|
||||
}
|
||||
|
||||
// Counters
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
@@ -132,7 +136,7 @@ func (c *TCPCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, e
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ConnectionsEstablished,
|
||||
prometheus.CounterValue,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].ConnectionsEstablished),
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
|
||||
84
collector/terminal_services.go
Normal file
84
collector/terminal_services.go
Normal file
@@ -0,0 +1,84 @@
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerCollector("terminal_services", NewTerminalServicesCollector)
|
||||
}
|
||||
|
||||
// A TerminalServicesCollector is a Prometheus collector for WMI
|
||||
// Win32_PerfRawData_LocalSessionManager_TerminalServices & Win32_PerfRawData_TermService_TerminalServicesSession metrics
|
||||
type TerminalServicesCollector struct {
|
||||
Local_session_count *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewTerminalServicesCollector ...
|
||||
func NewTerminalServicesCollector() (Collector, error) {
|
||||
const subsystem = "terminal_services"
|
||||
return &TerminalServicesCollector{
|
||||
Local_session_count: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "local_session_count"),
|
||||
"Number of Terminal Services sessions",
|
||||
[]string{"session"},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *TerminalServicesCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collectTSSessionCount(ch); err != nil {
|
||||
log.Error("failed collecting terminal services session count metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Win32_PerfRawData_LocalSessionManager_TerminalServices struct {
|
||||
ActiveSessions uint32
|
||||
InactiveSessions uint32
|
||||
TotalSessions uint32
|
||||
}
|
||||
|
||||
func (c *TerminalServicesCollector) collectTSSessionCount(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_LocalSessionManager_TerminalServices
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(dst) == 0 {
|
||||
return nil, errors.New("WMI query returned empty result set")
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Local_session_count,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].ActiveSessions),
|
||||
"active",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Local_session_count,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].InactiveSessions),
|
||||
"inactive",
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Local_session_count,
|
||||
prometheus.GaugeValue,
|
||||
float64(dst[0].TotalSessions),
|
||||
"total",
|
||||
)
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/dimchansky/utfbom"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
"github.com/prometheus/common/expfmt"
|
||||
@@ -53,7 +54,7 @@ type textFileCollector struct {
|
||||
}
|
||||
|
||||
func init() {
|
||||
Factories["textfile"] = NewTextFileCollector
|
||||
registerCollector("textfile", NewTextFileCollector)
|
||||
}
|
||||
|
||||
// NewTextFileCollector returns a new Collector exposing metrics read from files
|
||||
@@ -211,7 +212,7 @@ func (cr carriageReturnFilteringReader) Read(p []byte) (int, error) {
|
||||
}
|
||||
|
||||
// Update implements the Collector interface.
|
||||
func (c *textFileCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *textFileCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
error := 0.0
|
||||
mtimes := map[string]time.Time{}
|
||||
|
||||
@@ -236,8 +237,17 @@ fileLoop:
|
||||
continue
|
||||
}
|
||||
var parser expfmt.TextParser
|
||||
parsedFamilies, err := parser.TextToMetricFamilies(carriageReturnFilteringReader{r: file})
|
||||
file.Close()
|
||||
r, encoding := utfbom.Skip(carriageReturnFilteringReader{r: file})
|
||||
if err = checkBOM(encoding); err != nil {
|
||||
log.Errorf("Invalid file encoding detected in %s: %s - file must be UTF8", path, err.Error())
|
||||
error = 1.0
|
||||
continue
|
||||
}
|
||||
parsedFamilies, err := parser.TextToMetricFamilies(r)
|
||||
closeErr := file.Close()
|
||||
if closeErr != nil {
|
||||
log.Warnf("Error closing file: %v", err)
|
||||
}
|
||||
if err != nil {
|
||||
log.Errorf("Error parsing %q: %v", path, err)
|
||||
error = 1.0
|
||||
@@ -279,3 +289,11 @@ fileLoop:
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkBOM(encoding utfbom.Encoding) error {
|
||||
if encoding == utfbom.Unknown || encoding == utfbom.UTF8 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf(encoding.String())
|
||||
}
|
||||
|
||||
@@ -1,14 +1,15 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"strings"
|
||||
"github.com/dimchansky/utfbom"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCRFilter(t *testing.T) {
|
||||
sr := strings.NewReader("line 1\r\nline 2")
|
||||
cr := carriageReturnFilteringReader{ r: sr }
|
||||
cr := carriageReturnFilteringReader{r: sr}
|
||||
b, err := ioutil.ReadAll(cr)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
@@ -17,4 +18,30 @@ func TestCRFilter(t *testing.T) {
|
||||
if string(b) != "line 1\nline 2" {
|
||||
t.Errorf("Unexpected output %q", b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckBOM(t *testing.T) {
|
||||
testdata := []struct {
|
||||
encoding utfbom.Encoding
|
||||
err string
|
||||
}{
|
||||
{utfbom.Unknown, ""},
|
||||
{utfbom.UTF8, ""},
|
||||
{utfbom.UTF16BigEndian, "UTF16BigEndian"},
|
||||
{utfbom.UTF16LittleEndian, "UTF16LittleEndian"},
|
||||
{utfbom.UTF32BigEndian, "UTF32BigEndian"},
|
||||
{utfbom.UTF32LittleEndian, "UTF32LittleEndian"},
|
||||
}
|
||||
for _, d := range testdata {
|
||||
err := checkBOM(d.encoding)
|
||||
if d.err == "" && err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if d.err != "" && err == nil {
|
||||
t.Errorf("Missing expected error %s", d.err)
|
||||
}
|
||||
if err != nil && !strings.Contains(err.Error(), d.err) {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
103
collector/thermalzone.go
Normal file
103
collector/thermalzone.go
Normal file
@@ -0,0 +1,103 @@
|
||||
package collector
|
||||
|
||||
import (
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerCollector("thermalzone", NewThermalZoneCollector)
|
||||
}
|
||||
|
||||
// A thermalZoneCollector is a Prometheus collector for WMI Win32_PerfRawData_Counters_ThermalZoneInformation metrics
|
||||
type thermalZoneCollector struct {
|
||||
PercentPassiveLimit *prometheus.Desc
|
||||
Temperature *prometheus.Desc
|
||||
ThrottleReasons *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewThermalZoneCollector ...
|
||||
func NewThermalZoneCollector() (Collector, error) {
|
||||
const subsystem = "thermalzone"
|
||||
return &thermalZoneCollector{
|
||||
Temperature: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "temperature_celsius"),
|
||||
"(Temperature)",
|
||||
[]string{
|
||||
"name",
|
||||
},
|
||||
nil,
|
||||
),
|
||||
PercentPassiveLimit: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "percent_passive_limit"),
|
||||
"(PercentPassiveLimit)",
|
||||
[]string{
|
||||
"name",
|
||||
},
|
||||
nil,
|
||||
),
|
||||
ThrottleReasons: prometheus.NewDesc(
|
||||
prometheus.BuildFQName(Namespace, subsystem, "throttle_reasons"),
|
||||
"(ThrottleReasons)",
|
||||
[]string{
|
||||
"name",
|
||||
},
|
||||
nil,
|
||||
),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *thermalZoneCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting thermalzone metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Win32_PerfRawData_Counters_ThermalZoneInformation docs:
|
||||
// https://wutils.com/wmi/root/cimv2/win32_perfrawdata_counters_thermalzoneinformation/
|
||||
type Win32_PerfRawData_Counters_ThermalZoneInformation struct {
|
||||
Name string
|
||||
|
||||
HighPrecisionTemperature uint32
|
||||
PercentPassiveLimit uint32
|
||||
ThrottleReasons uint32
|
||||
}
|
||||
|
||||
func (c *thermalZoneCollector) collect(ch chan<- prometheus.Metric) (*prometheus.Desc, error) {
|
||||
var dst []Win32_PerfRawData_Counters_ThermalZoneInformation
|
||||
q := queryAll(&dst)
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, info := range dst {
|
||||
//Divide by 10 and subtract 273.15 to convert decikelvin to celsius
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.Temperature,
|
||||
prometheus.GaugeValue,
|
||||
(float64(info.HighPrecisionTemperature)/10.0)-273.15,
|
||||
info.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.PercentPassiveLimit,
|
||||
prometheus.GaugeValue,
|
||||
float64(info.PercentPassiveLimit),
|
||||
info.Name,
|
||||
)
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.ThrottleReasons,
|
||||
prometheus.GaugeValue,
|
||||
float64(info.ThrottleReasons),
|
||||
info.Name,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
@@ -1,14 +1,17 @@
|
||||
// returns data points from Win32_PerfRawData_vmGuestLib_VMem and Win32_PerfRawData_vmGuestLib_VCPU
|
||||
// +build windows
|
||||
|
||||
package collector
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/StackExchange/wmi"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Factories["vmware"] = NewVmwareCollector
|
||||
registerCollector("vmware", NewVmwareCollector)
|
||||
}
|
||||
|
||||
// A VmwareCollector is a Prometheus collector for WMI Win32_PerfRawData_vmGuestLib_VMem/Win32_PerfRawData_vmGuestLib_VCPU metrics
|
||||
@@ -35,6 +38,7 @@ type VmwareCollector struct {
|
||||
HostProcessorSpeedMHz *prometheus.Desc
|
||||
}
|
||||
|
||||
// NewVmwareCollector constructs a new VmwareCollector
|
||||
func NewVmwareCollector() (Collector, error) {
|
||||
const subsystem = "vmware"
|
||||
return &VmwareCollector{
|
||||
@@ -158,7 +162,7 @@ func NewVmwareCollector() (Collector, error) {
|
||||
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *VmwareCollector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *VmwareCollector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collectMem(ch); err != nil {
|
||||
log.Error("failed collecting vmware memory metrics:", desc, err)
|
||||
return err
|
||||
@@ -201,6 +205,9 @@ func (c *VmwareCollector) collectMem(ch chan<- prometheus.Metric) (*prometheus.D
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(dst) == 0 {
|
||||
return nil, errors.New("WMI query returned empty result set")
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.MemActive,
|
||||
@@ -287,6 +294,9 @@ func (c *VmwareCollector) collectCpu(ch chan<- prometheus.Metric) (*prometheus.D
|
||||
if err := wmi.Query(q, &dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(dst) == 0 {
|
||||
return nil, errors.New("WMI query returned empty result set")
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.CpuLimitMHz,
|
||||
|
||||
@@ -4,27 +4,9 @@ import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/common/log"
|
||||
)
|
||||
|
||||
// ...
|
||||
const (
|
||||
Namespace = "wmi"
|
||||
|
||||
// Conversion factors
|
||||
ticksToSecondsScaleFactor = 1 / 1e7
|
||||
)
|
||||
|
||||
// Factories ...
|
||||
var Factories = make(map[string]func() (Collector, error))
|
||||
|
||||
// Collector is the interface a collector has to implement.
|
||||
type Collector interface {
|
||||
// Get new metrics and expose them via prometheus registry.
|
||||
Collect(ch chan<- prometheus.Metric) (err error)
|
||||
}
|
||||
|
||||
func className(src interface{}) string {
|
||||
s := reflect.Indirect(reflect.ValueOf(src))
|
||||
t := s.Type()
|
||||
|
||||
140
contrib/console_templates/wmi-overview.html
Normal file
140
contrib/console_templates/wmi-overview.html
Normal file
@@ -0,0 +1,140 @@
|
||||
{{ template "head" . }}
|
||||
|
||||
{{ template "prom_content_head" . }}
|
||||
<h1>Node Overview - {{ reReplaceAll "(.*?://)([^:/]+?)(:\\d+)?/.*" "$2" .Params.instance }}</h1>
|
||||
|
||||
<h3>CPU Usage</h3>
|
||||
<div id="cpuGraph"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#cpuGraph"),
|
||||
expr: "sum by (mode)(irate(wmi_cpu_time_total{job='node',instance='{{ .Params.instance }}',mode!='idle'}[5m]))",
|
||||
renderer: 'area',
|
||||
max: {{ with printf "count(count by (cpu)(wmi_cpu_time_total{job='node',instance='%s'}))" .Params.instance | query }}{{ . | first | value }}{{ else}}undefined{{end}},
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yTitle: 'Cores'
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Network Utilization</h3>
|
||||
<div id="networkioGraph"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#networkioGraph"),
|
||||
expr: [
|
||||
"irate(wmi_net_bytes_sent_total{job='node',instance='{{ .Params.instance }}',nic!~'^isatap_ec2_internal'}[5m])",
|
||||
"irate(wmi_net_bytes_received_total{job='node',instance='{{ .Params.instance }}',nic!~'^isatap_ec2_internal'}[5m])",
|
||||
],
|
||||
min: 0,
|
||||
name: [ 'sent', 'received' ],
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yUnits: "B",
|
||||
yTitle: 'Network IO'
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Disk I/O Utilization</h3>
|
||||
<div id="diskioGraph"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#diskioGraph"),
|
||||
expr: [
|
||||
"100 - irate(wmi_logical_disk_idle_seconds_total{job='node',instance='{{ .Params.instance }}',volume!~'^HarddiskVolume.*$'}[5m]) * 100",
|
||||
],
|
||||
min: 0,
|
||||
name: '[[ volume ]]',
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanizeNoSmallPrefix,
|
||||
yUnits: "%",
|
||||
yTitle: 'Disk I/O Utilization'
|
||||
})
|
||||
</script>
|
||||
|
||||
<h3>Memory</h3>
|
||||
<div id="memoryGraph"></div>
|
||||
<script>
|
||||
new PromConsole.Graph({
|
||||
node: document.querySelector("#memoryGraph"),
|
||||
renderer: 'area',
|
||||
expr: [
|
||||
"wmi_cs_physical_memory_bytes{job='node',instance='{{ .Params.instance }}'}",
|
||||
"wmi_os_physical_memory_free_bytes{job='node',instance='{{ .Params.instance }}'}",
|
||||
"wmi_cs_physical_memory__bytes{job='node',instance='{{ .Params.instance }}'} - wmi_os_physical_memory_free_bytes{job='node',instance='{{.Params.instance}}'}",
|
||||
"wmi_os_virtual_memory_bytes{job='node',instance='{{ .Params.instance }}'}",
|
||||
],
|
||||
name: ["Physical", "Free", "Used", "Virtual"],
|
||||
min: 0,
|
||||
yUnits: "B",
|
||||
yAxisFormatter: PromConsole.NumberFormatter.humanize1024,
|
||||
yHoverFormatter: PromConsole.NumberFormatter.humanize1024,
|
||||
yTitle: 'Memory'
|
||||
})
|
||||
</script>
|
||||
|
||||
{{ template "prom_right_table_head" }}
|
||||
<tr><th colspan="2">Overview</th></tr>
|
||||
<tr>
|
||||
<td>User CPU</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "sum(irate(wmi_cpu_time_total{job='node',instance='%s',mode='user'}[5m])) * 100 / count(count by (cpu)(wmi_cpu_time_total{job='node',instance='%s'}))" .Params.instance .Params.instance) "%" "printf.1f") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Privileged CPU</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "sum(irate(wmi_cpu_time_total{job='node',instance='%s',mode='privileged'}[5m])) * 100 / count(count by (cpu)(wmi_cpu_time_total{job='node',instance='%s'}))" .Params.instance .Params.instance) "%" "printf.1f") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Memory Total</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "wmi_cs_physical_memory_bytes{job='node',instance='%s'}" .Params.instance) "B" "humanize1024") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Memory Free</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "wmi_os_physical_memory_free_bytes{job='node',instance='%s'}" .Params.instance) "B" "humanize1024") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<th colspan="2">Network</th>
|
||||
</tr>
|
||||
{{ range printf "wmi_net_bytes_received_total{job='node',instance='%s',nic!='isatap_ec2_internal'}" .Params.instance | query | sortByLabel "nic" }}
|
||||
<tr>
|
||||
<td>{{ .Labels.nic }} Received</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(wmi_net_bytes_received_total{job='node',instance='%s',nic='%s'}[5m])" .Labels.instance .Labels.nic) "B/s" "humanize") }}</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>{{ .Labels.nic }} Transmitted</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(wmi_net_bytes_sent_total{job='node',instance='%s',nic='%s'}[5m])" .Labels.instance .Labels.nic) "B/s" "humanize") }}</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
</tr>
|
||||
<tr>
|
||||
<th colspan="2">Disks</th>
|
||||
</tr>
|
||||
{{ range printf "wmi_logical_disk_size_bytes{job='node',instance='%s',volume!~'^HarddiskVolume.*$'}" .Params.instance | query | sortByLabel "volume" }}
|
||||
<tr>
|
||||
<td>{{ .Labels.volume }} Utilization</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "100 - irate(wmi_logical_disk_idle_seconds_total{job='node',instance='%s',volume='%s'}[5m]) * 100" .Labels.instance .Labels.volume) "%" "printf.1f") }}</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
{{ range printf "wmi_logical_disk_size_bytes{job='node',instance='%s',volume!~'^HarddiskVolume.*$'}" .Params.instance | query | sortByLabel "volume" }}
|
||||
<tr>
|
||||
<td>{{ .Labels.volume }} Throughput</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "irate(wmi_logical_disk_read_bytes_total{job='node',instance='%s',volume='%s'}[5m]) + irate(wmi_logical_disk_write_bytes_total{job='node',instance='%s',volume='%s'}[5m])" .Labels.instance .Labels.volume .Labels.instance .Labels.volume) "B/s" "humanize") }}</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
<tr>
|
||||
<th colspan="2">Filesystem Fullness</th>
|
||||
</tr>
|
||||
{{ define "roughlyNearZero" }}
|
||||
{{ if gt .1 . }}~0{{ else }}{{ printf "%.1f" . }}{{ end }}
|
||||
{{ end }}
|
||||
{{ range printf "wmi_logical_disk_size_bytes{job='node',instance='%s',volume!~'^HarddiskVolume.*$'}" .Params.instance | query | sortByLabel "volume" }}
|
||||
<tr>
|
||||
<td>{{ .Labels.volume }}</td>
|
||||
<td>{{ template "prom_query_drilldown" (args (printf "100 - wmi_logical_disk_free_bytes{job='node',instance='%s',volume='%s'} / wmi_logical_disk_size_bytes{job='node'} * 100" .Labels.instance .Labels.volume) "%" "roughlyNearZero") }}</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
</tr>
|
||||
{{ template "prom_right_table_tail" }}
|
||||
|
||||
{{ template "prom_content_tail" . }}
|
||||
|
||||
{{ template "tail" }}
|
||||
32
docs/README.md
Normal file
32
docs/README.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# Documentation
|
||||
This directory contains documentation of the collectors in the WMI exporter, with information such as what metrics are exported, any flags for additional configuration, and some example usage in alerts and queries.
|
||||
|
||||
# Collectors
|
||||
- [`ad`](collector.ad.md)
|
||||
- [`adfs`](collector.adfs.md)
|
||||
- [`cpu`](collector.cpu.md)
|
||||
- [`cs`](collector.cs.md)
|
||||
- [`dns`](collector.dns.md)
|
||||
- [`hyperv`](collector.hyperv.md)
|
||||
- [`iis`](collector.iis.md)
|
||||
- [`logical_disk`](collector.logical_disk.md)
|
||||
- [`logon`](collector.logon.md)
|
||||
- [`memory`](collector.memory.md)
|
||||
- [`msmq`](collector.msmq.md)
|
||||
- [`mssql`](collector.mssql.md)
|
||||
- [`netframework_clrexceptions`](collector.netframework_clrexceptions.md)
|
||||
- [`netframework_clrinterop`](collector.netframework_clrinterop.md)
|
||||
- [`netframework_clrjit`](collector.netframework_clrjit.md)
|
||||
- [`netframework_clrloading`](collector.netframework_clrloading.md)
|
||||
- [`netframework_clrlocksandthreads`](collector.netframework_clrlocksandthreads.md)
|
||||
- [`netframework_clrmemory`](collector.netframework_clrmemory.md)
|
||||
- [`netframework_clrremoting`](collector.netframework_clrremoting.md)
|
||||
- [`netframework_clrsecurity`](collector.netframework_clrsecurity.md)
|
||||
- [`net`](collector.net.md)
|
||||
- [`os`](collector.os.md)
|
||||
- [`process`](collector.process.md)
|
||||
- [`service`](collector.service.md)
|
||||
- [`system`](collector.system.md)
|
||||
- [`tcp`](collector.tcp.md)
|
||||
- [`textfile`](collector.textfile.md)
|
||||
- [`vmware`](collector.vmware.md)
|
||||
47
docs/collector-template.md
Normal file
47
docs/collector-template.md
Normal file
@@ -0,0 +1,47 @@
|
||||
# %name% collector
|
||||
|
||||
The %name% collector exposes metrics about ...
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `%name%`
|
||||
Classes | [`...`](https://msdn.microsoft.com/en-us/library/...)
|
||||
Enabled by default? | Yes/No
|
||||
|
||||
## Flags
|
||||
|
||||
### `--collector....`
|
||||
|
||||
Add description...
|
||||
|
||||
Example: `--collector....`
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_...` | ... | counter/gauge/histogram/summary | ...
|
||||
|
||||
### Example metric
|
||||
|
||||
...:
|
||||
|
||||
`wmi_...{...} 1`
|
||||
|
||||
## Useful queries
|
||||
### Add queries...
|
||||
|
||||
`...`
|
||||
|
||||
## Alerting examples
|
||||
### Add alerts...
|
||||
|
||||
```yaml
|
||||
- alert: ""
|
||||
expr: ""
|
||||
for: ""
|
||||
labels:
|
||||
urgency: ""
|
||||
annotations:
|
||||
summary: ""
|
||||
```
|
||||
88
docs/collector.ad.md
Normal file
88
docs/collector.ad.md
Normal file
@@ -0,0 +1,88 @@
|
||||
# ad collector
|
||||
|
||||
The ad collector exposes metrics about a Active Directory Domain Services domain controller
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `ad`
|
||||
Classes | [`Win32_PerfRawData_DirectoryServices_DirectoryServices`](https://msdn.microsoft.com/en-us/library/ms803980.aspx)
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_ad_address_book_operations_total` | _Not yet documented_ | counter | `operation`
|
||||
`wmi_ad_address_book_client_sessions` | _Not yet documented_ | gauge | None
|
||||
`wmi_ad_approximate_highest_distinguished_name_tag` | _Not yet documented_ | gauge | None
|
||||
`wmi_ad_atq_estimated_delay_seconds` | _Not yet documented_ | gauge | None
|
||||
`wmi_ad_atq_outstanding_requests` | _Not yet documented_ | gauge | None
|
||||
`wmi_ad_atq_average_request_latency` | _Not yet documented_ | gauge | None
|
||||
`wmi_ad_atq_current_threads` | _Not yet documented_ | gauge | `service`
|
||||
`wmi_ad_searches_total` | _Not yet documented_ | counter | `scope`
|
||||
`wmi_ad_database_operations_total` | _Not yet documented_ | counter | `operation`
|
||||
`wmi_ad_binds_total` | _Not yet documented_ | counter | `bind_method`
|
||||
`wmi_ad_replication_highest_usn` | _Not yet documented_ | counter | `state`
|
||||
`wmi_ad_replication_data_intrasite_bytes_total` | _Not yet documented_ | counter | `direction`
|
||||
`wmi_ad_replication_data_intersite_bytes_total` | _Not yet documented_ | counter | `direction`
|
||||
`wmi_ad_replication_inbound_sync_objects_remaining` | _Not yet documented_ | gauge | None
|
||||
`wmi_ad_replication_inbound_link_value_updates_remaining` | _Not yet documented_ | gauge | None
|
||||
`wmi_ad_replication_inbound_objects_updated_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_replication_inbound_objects_filtered_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_replication_inbound_properties_updated_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_replication_inbound_properties_filtered_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_replication_pending_operations` | _Not yet documented_ | gauge | None
|
||||
`wmi_ad_replication_pending_synchronizations` | _Not yet documented_ | gauge | None
|
||||
`wmi_ad_replication_sync_requests_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_replication_sync_requests_success_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_replication_sync_requests_schema_mismatch_failure_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_name_translations_total` | _Not yet documented_ | counter | `target_name`
|
||||
`wmi_ad_change_monitors_registered` | _Not yet documented_ | gauge | None
|
||||
`wmi_ad_change_monitor_updates_pending` | _Not yet documented_ | gauge | None
|
||||
`wmi_ad_name_cache_hits_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_name_cache_lookups_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_directory_operations_total` | _Not yet documented_ | counter | `operation`, `origin`
|
||||
`wmi_ad_directory_search_suboperations_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_security_descriptor_propagation_events_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_security_descriptor_propagation_events_queued` | _Not yet documented_ | gauge | None
|
||||
`wmi_ad_security_descriptor_propagation_access_wait_total_seconds` | _Not yet documented_ | gauge | None
|
||||
`wmi_ad_security_descriptor_propagation_items_queued_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_directory_service_threads` | _Not yet documented_ | gauge | None
|
||||
`wmi_ad_ldap_closed_connections_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_ldap_opened_connections_total` | _Not yet documented_ | counter | `type`
|
||||
`wmi_ad_ldap_active_threads` | _Not yet documented_ | gauge | None
|
||||
`wmi_ad_ldap_last_bind_time_seconds` | _Not yet documented_ | gauge | None
|
||||
`wmi_ad_ldap_searches_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_ldap_udp_operations_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_ldap_writes_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_link_values_cleaned_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_phantom_objects_cleaned_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_phantom_objects_visited_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_sam_group_membership_evaluations_total` | _Not yet documented_ | counter | `group_type`
|
||||
`wmi_ad_sam_group_membership_global_catalog_evaluations_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_sam_group_membership_evaluations_nontransitive_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_sam_group_membership_evaluations_transitive_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_sam_group_evaluation_latency` | _Not yet documented_ | gauge | `evaluation_type`
|
||||
`wmi_ad_sam_computer_creation_requests_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_sam_computer_creation_successful_requests_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_sam_user_creation_requests_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_sam_user_creation_successful_requests_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_sam_query_display_requests_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_sam_enumerations_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_sam_membership_changes_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_sam_password_changes_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_tombstoned_objects_collected_total` | _Not yet documented_ | counter | None
|
||||
`wmi_ad_tombstoned_objects_visited_total` | _Not yet documented_ | counter | None
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
51
docs/collector.adfs.md
Normal file
51
docs/collector.adfs.md
Normal file
@@ -0,0 +1,51 @@
|
||||
# adfs collector
|
||||
|
||||
The adfs collector exposes metrics about Active Directory Federation Services. Note that this collector has only been tested against ADFS 4.0 (2016).
|
||||
Other ADFS versions may work but are not tested.
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `adfs`
|
||||
Data source | Perflib
|
||||
Counters | `AD FS`
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_adfs_ad_login_connection_failures` | Total number of connection failures between the ADFS server and the Active Directory domain controller(s) | counter | None
|
||||
`wmi_adfs_certificate_authentications` | Total number of [User Certificate](https://docs.microsoft.com/en-us/windows-server/identity/ad-fs/operations/configure-user-certificate-authentication) authentications. I.E. smart cards or mobile devices with provisioned client certificates | counter | None
|
||||
`wmi_adfs_device_authentications` | Total number of [device authentications](https://docs.microsoft.com/en-us/windows-server/identity/ad-fs/operations/device-authentication-controls-in-ad-fs) (SignedToken, clientTLS, PkeyAuth). Device authentication is only available on ADFS 2016 or later | counter | None
|
||||
`wmi_adfs_extranet_account_lockouts` | Total number of [extranet lockouts](https://docs.microsoft.com/en-us/windows-server/identity/ad-fs/operations/configure-ad-fs-extranet-smart-lockout-protection). Requires the Extranet Lockout feature to be enabled | counter | None
|
||||
`wmi_adfs_federated_authentications` | Total number of authentications from federated sources. E.G. Office365 | counter | None
|
||||
`wmi_adfs_passport_authentications` | Total number of authentications from [Microsoft Passport](https://en.wikipedia.org/wiki/Microsoft_account) (now named Microsoft Account) | counter | None
|
||||
`wmi_adfs_password_change_failed` | Total number of failed password changes. The Password Change Portal must be enabled in the AD FS Management tool in order to allow user password changes | counter | None
|
||||
`wmi_adfs_password_change_succeeded` | Total number of succeeded password changes. The Password Change Portal must be enabled in the AD FS Management tool in order to allow user password changes | counter | None
|
||||
`wmi_adfs_token_requests` | Total number of requested access tokens | counter | None
|
||||
`wmi_adfs_windows_integrated_authentications` | Total number of Windows integrated authentications using Kerberos or NTLM | counter | None
|
||||
|
||||
### Example metric
|
||||
Show rate of device authentications in AD FS:
|
||||
```
|
||||
rate(wmi_adfs_device_authentications)[2m]
|
||||
```
|
||||
|
||||
## Useful queries
|
||||
|
||||
## Alerting examples
|
||||
**prometheus.rules**
|
||||
```yaml
|
||||
- alert: "HighExtranetLockouts"
|
||||
expr: "rate(wmi_adfs_extranet_account_lockouts)[2m] > 100"
|
||||
for: "10m"
|
||||
labels:
|
||||
severity: "high"
|
||||
annotations:
|
||||
summary: "High number of AD FS extranet lockouts"
|
||||
description: "High number of AD FS extranet lockouts may indicate a password spray attack.\n Server: {{ $labels.instance }}\n Number of lockouts: {{ $value }}"
|
||||
```
|
||||
42
docs/collector.container.md
Normal file
42
docs/collector.container.md
Normal file
@@ -0,0 +1,42 @@
|
||||
# container collector
|
||||
|
||||
The container collector exposes metrics about containers running on system
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `container`
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_container_available` | Available | counter | `container_id`
|
||||
`wmi_container_count` | Number of containers | gauge | `container_id`
|
||||
`wmi_container_cpu_usage_seconds_kernelmode` | Run time in Kernel mode in Seconds | counter | `container_id`
|
||||
`wmi_container_cpu_usage_seconds_usermode` | Run Time in User mode in Seconds | counter | `container_id`
|
||||
`wmi_container_cpu_usage_seconds_total` | Total Run time in Seconds | counter | `container_id`
|
||||
`wmi_container_memory_usage_commit_bytes` | Memory Usage Commit Bytes | gauge | `container_id`
|
||||
`wmi_container_memory_usage_commit_peak_bytes` | Memory Usage Commit Peak Bytes | gauge | `container_id`
|
||||
`wmi_container_memory_usage_private_working_set_bytes` | Memory Usage Private Working Set Bytes | gauge | `container_id`
|
||||
`wmi_container_network_receive_bytes_total` | Bytes Received on Interface | counter | `container_id`, `interface`
|
||||
`wmi_container_network_receive_packets_total` | Packets Received on Interface | counter | `container_id`, `interface`
|
||||
`wmi_container_network_receive_packets_dropped_total` | Dropped Incoming Packets on Interface | counter | `container_id`, `interface`
|
||||
`wmi_container_network_transmit_bytes_total` | Bytes Sent on Interface | counter | `container_id`, `interface`
|
||||
`wmi_container_network_transmit_packets_total` | Packets Sent on Interface | counter | `container_id`, `interface`
|
||||
`wmi_container_network_transmit_packets_dropped_total` | Dropped Outgoing Packets on Interface | counter | `container_id`, `interface`
|
||||
|
||||
### Example metric
|
||||
_wmi_container_network_receive_bytes_total{container_id="docker://1bd30e8b8ac28cbd76a9b697b4d7bb9d760267b0733d1bc55c60024e98d1e43e",interface="822179E7-002C-4280-ABBA-28BCFE401826"} 9.3305343e+07_
|
||||
|
||||
This metric means that total _9.3305343e+07_ bytes received on interface _822179E7-002C-4280-ABBA-28BCFE401826_ for container _docker://1bd30e8b8ac28cbd76a9b697b4d7bb9d760267b0733d1bc55c60024e98d1e43e_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
60
docs/collector.cpu.md
Normal file
60
docs/collector.cpu.md
Normal file
@@ -0,0 +1,60 @@
|
||||
# cpu collector
|
||||
|
||||
The cpu collector exposes metrics about CPU usage
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `cpu`
|
||||
Data source | Perflib
|
||||
Counters | `ProcessorInformation` (Windows Server 2008R2 and later) `Processor` (older versions)
|
||||
Enabled by default? | Yes
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
These metrics are available on all versions of Windows:
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_cpu_cstate_seconds_total` | Time spent in low-power idle states | counter | `core`, `state`
|
||||
`wmi_cpu_time_total` | Time that processor spent in different modes (idle, user, system, ...) | counter | `core`, `mode`
|
||||
`wmi_cpu_interrupts_total` | Total number of received and serviced hardware interrupts | counter | `core`
|
||||
`wmi_cpu_dpcs_total` | Total number of received and serviced deferred procedure calls (DPCs) | counter | `core`
|
||||
|
||||
These metrics are only exposed on Windows Server 2008R2 and later:
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_cpu_clock_interrupts_total` | Total number of received and serviced clock tick interrupts | `core`
|
||||
`wmi_cpu_idle_break_events_total` | Total number of time processor was woken from idle | `core`
|
||||
`wmi_cpu_parking_status` | Parking Status represents whether a processor is parked or not | `gauge`
|
||||
`wmi_cpu_core_frequency_mhz` | Core frequency in megahertz | `gauge`
|
||||
`wmi_cpu_processor_performance` | Processor Performance is the average performance of the processor while it is executing instructions, as a percentage of the nominal performance of the processor. On some processors, Processor Performance may exceed 100% | `gauge`
|
||||
|
||||
### Example metric
|
||||
Show frequency of host CPU cores
|
||||
```
|
||||
wmi_cpu_core_frequency_mhz{instance="localhost"}
|
||||
```
|
||||
|
||||
## Useful queries
|
||||
Show cpu usage by mode.
|
||||
```
|
||||
sum by (mode) (irate(wmi_cpu_time_total{instance="localhost"}[5m]))
|
||||
```
|
||||
|
||||
## Alerting examples
|
||||
**prometheus.rules**
|
||||
```yaml
|
||||
# Alert on hosts with more than 80% CPU usage over a 10 minute period
|
||||
- alert: CpuUsage
|
||||
expr: 100 - (avg by (instance) (irate(wmi_cpu_time_total{mode="idle"}[2m])) * 100) > 80
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: "CPU Usage (instance {{ $labels.instance }})"
|
||||
description: "CPU Usage is more than 80%\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
|
||||
```
|
||||
30
docs/collector.cs.md
Normal file
30
docs/collector.cs.md
Normal file
@@ -0,0 +1,30 @@
|
||||
# cs collector
|
||||
|
||||
The cs collector exposes metrics detailing the hardware of the computer system
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `cs`
|
||||
Classes | [`Win32_ComputerSystem`](https://msdn.microsoft.com/en-us/library/aa394102)
|
||||
Enabled by default? | Yes
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_cs_logical_processors` | Number of installed logical processors | gauge | None
|
||||
`wmi_cs_physical_memory_bytes` | Total installed physical memory | gauge | None
|
||||
`wmi_cs_hostname` | Labeled system hostname information | gauge | `hostname`, `domain`, `fqdn`
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
49
docs/collector.dns.md
Normal file
49
docs/collector.dns.md
Normal file
@@ -0,0 +1,49 @@
|
||||
# dns collector
|
||||
|
||||
The dns collector exposes metrics about the DNS server
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `dns`
|
||||
Classes | [`Win32_PerfRawData_DNS_DNS`](https://technet.microsoft.com/en-us/library/cc977686.aspx)
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_dns_zone_transfer_requests_received_total` | _Not yet documented_ | counter | `qtype`
|
||||
`wmi_dns_zone_transfer_requests_sent_total` | _Not yet documented_ | counter | `qtype`
|
||||
`wmi_dns_zone_transfer_response_received_total` | _Not yet documented_ | counter | `qtype`
|
||||
`wmi_dns_zone_transfer_success_received_total` | _Not yet documented_ | counter | `qtype`, `protocol`
|
||||
`wmi_dns_zone_transfer_success_sent_total` | _Not yet documented_ | counter | `qtype`
|
||||
`wmi_dns_zone_transfer_failures_total` | _Not yet documented_ | counter | None
|
||||
`wmi_dns_memory_used_bytes_total` | _Not yet documented_ | gauge | `area`
|
||||
`wmi_dns_dynamic_updates_queued` | _Not yet documented_ | gauge | None
|
||||
`wmi_dns_dynamic_updates_received_total` | _Not yet documented_ | counter | `operation`
|
||||
`wmi_dns_dynamic_updates_failures_total` | _Not yet documented_ | counter | `reason`
|
||||
`wmi_dns_notify_received_total` | _Not yet documented_ | counter | None
|
||||
`wmi_dns_notify_sent_total` | _Not yet documented_ | counter | None
|
||||
`wmi_dns_secure_update_failures_total` | _Not yet documented_ | counter | None
|
||||
`wmi_dns_secure_update_received_total` | _Not yet documented_ | counter | None
|
||||
`wmi_dns_queries_total` | _Not yet documented_ | counter | `protocol`
|
||||
`wmi_dns_responses_total` | _Not yet documented_ | counter | `protocol`
|
||||
`wmi_dns_recursive_queries_total` | _Not yet documented_ | counter | None
|
||||
`wmi_dns_recursive_query_failures_total` | _Not yet documented_ | counter | None
|
||||
`wmi_dns_recursive_query_send_timeouts_total` | _Not yet documented_ | counter | None
|
||||
`wmi_dns_wins_queries_total` | _Not yet documented_ | counter | `direction`
|
||||
`wmi_dns_wins_responses_total` | _Not yet documented_ | counter | `direction`
|
||||
`wmi_dns_unmatched_responses_total` | _Not yet documented_ | counter | None
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
102
docs/collector.hyperv.md
Normal file
102
docs/collector.hyperv.md
Normal file
@@ -0,0 +1,102 @@
|
||||
# hyperv collector
|
||||
|
||||
The hyperv collector exposes metrics about the Hyper-V hypervisor
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `hyperv`
|
||||
Classes | `Win32_PerfRawData_VmmsVirtualMachineStats_HyperVVirtualMachineHealthSummary`<br/>`Win32_PerfRawData_VidPerfProvider_HyperVVMVidPartition`<br/>`Win32_PerfRawData_HvStats_HyperVHypervisorRootPartition`<br/>`Win32_PerfRawData_HvStats_HyperVHypervisor`<br/>`Win32_PerfRawData_HvStats_HyperVHypervisorRootVirtualProcessor`<br/>`Win32_PerfRawData_HvStats_HyperVHypervisorVirtualProcessor`<br/>`Win32_PerfRawData_NvspSwitchStats_HyperVVirtualSwitch`<br/>`Win32_PerfRawData_EthernetPerfProvider_HyperVLegacyNetworkAdapter`<br/>`Win32_PerfRawData_Counters_HyperVVirtualStorageDevice`<br/>`Win32_PerfRawData_NvspNicStats_HyperVVirtualNetworkAdapter`
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_hyperv_health_critical` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_health_ok` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_vid_physical_pages_allocated` | _Not yet documented_ | counter | `vm`
|
||||
`wmi_hyperv_vid_preferred_numa_node_index` | _Not yet documented_ | counter | `vm`
|
||||
`wmi_hyperv_vid_remote_physical_pages` | _Not yet documented_ | counter | `vm`
|
||||
`wmi_hyperv_root_partition_address_spaces` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_attached_devices` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_deposited_pages` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_device_dma_errors` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_device_interrupt_errors` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_device_interrupt_mappings` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_device_interrupt_throttle_events` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_preferred_numa_node_index` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_gpa_space_modifications` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_io_tlb_flush_cost` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_io_tlb_flush` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_recommended_virtual_tlb_size` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_physical_pages_allocated` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_1G_device_pages` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_1G_gpa_pages` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_2M_device_pages` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_2M_gpa_pages` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_4K_device_pages` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_4K_gpa_pages` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_virtual_tlb_flush_entires` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_root_partition_virtual_tlb_pages` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_hypervisor_virtual_processors` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_hypervisor_logical_processors` | _Not yet documented_ | counter | None
|
||||
`wmi_hyperv_host_cpu_guest_run_time` | _Not yet documented_ | counter | `core`
|
||||
`wmi_hyperv_host_cpu_hypervisor_run_time` | _Not yet documented_ | counter | `core`
|
||||
`wmi_hyperv_host_cpu_remote_run_time` | _Not yet documented_ | counter | `core`
|
||||
`wmi_hyperv_host_cpu_total_run_time` | _Not yet documented_ | counter | `core`
|
||||
`wmi_hyperv_vm_cpu_guest_run_time` | _Not yet documented_ | counter | `vm`, `core`
|
||||
`wmi_hyperv_vm_cpu_hypervisor_run_time` | _Not yet documented_ | counter | `vm`, `core`
|
||||
`wmi_hyperv_vm_cpu_remote_run_time` | _Not yet documented_ | counter | `vm`, `core`
|
||||
`wmi_hyperv_vm_cpu_total_run_time` | _Not yet documented_ | counter | `vm`, `core`
|
||||
`wmi_hyperv_vswitch_broadcast_packets_received_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_broadcast_packets_sent_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_bytes_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_bytes_received_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_bytes_sent_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_directed_packets_received_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_directed_packets_send_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_dropped_packets_incoming_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_dropped_packets_outcoming_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_extensions_dropped_packets_incoming_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_extensions_dropped_packets_outcoming_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_learned_mac_addresses_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_multicast_packets_received_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_multicast_packets_sent_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_number_of_send_channel_moves_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_number_of_vmq_moves_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_packets_flooded_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_packets_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_packets_received_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_packets_sent_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_vswitch_purged_mac_addresses_total` | _Not yet documented_ | counter | `vswitch`
|
||||
`wmi_hyperv_ethernet_bytes_dropped` | _Not yet documented_ | counter | `adapter`
|
||||
`wmi_hyperv_ethernet_bytes_received` | _Not yet documented_ | counter | `adapter`
|
||||
`wmi_hyperv_ethernet_bytes_sent` | _Not yet documented_ | counter | `adapter`
|
||||
`wmi_hyperv_ethernet_frames_dropped` | _Not yet documented_ | counter | `adapter`
|
||||
`wmi_hyperv_ethernet_frames_received` | _Not yet documented_ | counter | `adapter`
|
||||
`wmi_hyperv_ethernet_frames_sent` | _Not yet documented_ | counter | `adapter`
|
||||
`wmi_hyperv_vm_device_error_count` | _Not yet documented_ | counter | `vm_device`
|
||||
`wmi_hyperv_vm_device_queue_length` | _Not yet documented_ | counter | `vm_device`
|
||||
`wmi_hyperv_vm_device_bytes_read` | _Not yet documented_ | counter | `vm_device`
|
||||
`wmi_hyperv_vm_device_operations_read` | _Not yet documented_ | counter | `vm_device`
|
||||
`wmi_hyperv_vm_device_bytes_written` | _Not yet documented_ | counter | `vm_device`
|
||||
`wmi_hyperv_vm_device_operations_written` | _Not yet documented_ | counter | `vm_device`
|
||||
`wmi_hyperv_vm_interface_bytes_received` | _Not yet documented_ | counter | `vm_interface`
|
||||
`wmi_hyperv_vm_interface_bytes_sent` | _Not yet documented_ | counter | `vm_interface`
|
||||
`wmi_hyperv_vm_interface_packets_incoming_dropped` | _Not yet documented_ | counter | `vm_interface`
|
||||
`wmi_hyperv_vm_interface_packets_outgoing_dropped` | _Not yet documented_ | counter | `vm_interface`
|
||||
`wmi_hyperv_vm_interface_packets_received` | _Not yet documented_ | counter | `vm_interface`
|
||||
`wmi_hyperv_vm_interface_packets_sent` | _Not yet documented_ | counter | `vm_interface`
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
140
docs/collector.iis.md
Normal file
140
docs/collector.iis.md
Normal file
@@ -0,0 +1,140 @@
|
||||
# iis collector
|
||||
|
||||
The iis collector exposes metrics about the IIS server
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `iis`
|
||||
Classes | `Win32_PerfRawData_W3SVC_WebService`<br/>`Win32_PerfRawData_APPPOOLCountersProvider_APPPOOLWAS`<br/>`Win32_PerfRawData_W3SVCW3WPCounterProvider_W3SVCW3WP`<br/>`Win32_PerfRawData_W3SVC_WebServiceCache`
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
### `--collector.iis.site-whitelist`
|
||||
|
||||
If given, a site needs to match the whitelist regexp in order for the corresponding metrics to be reported.
|
||||
|
||||
### `--collector.iis.site-blacklist`
|
||||
|
||||
If given, a site needs to *not* match the blacklist regexp in order for the corresponding metrics to be reported.
|
||||
|
||||
### `--collector.iis.app-whitelist`
|
||||
|
||||
If given, an application needs to match the whitelist regexp in order for the corresponding metrics to be reported.
|
||||
|
||||
### `--collector.iis.app-blacklist`
|
||||
|
||||
If given, an application needs to *not* match the blacklist regexp in order for the corresponding metrics to be reported.
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_iis_current_anonymous_users` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_current_blocked_async_io_requests` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_current_cgi_requests` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_current_connections` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_current_isapi_extension_requests` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_current_non_anonymous_users` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_received_bytes_total` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_sent_bytes_total` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_anonymous_users_total` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_blocked_async_io_requests_total` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_cgi_requests_total` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_connection_attempts_all_instances_total` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_requests_total` | _Not yet documented_ | counter | `site`, `method`
|
||||
`wmi_iis_files_received_total` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_files_sent_total` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_ipapi_extension_requests_total` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_locked_errors_total` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_logon_attempts_total` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_non_anonymous_users_total` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_not_found_errors_total` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_rejected_async_io_requests_total` | _Not yet documented_ | counter | `site`
|
||||
`wmi_iis_current_application_pool_state` | _Not yet documented_ | counter | `app`, `state`
|
||||
`wmi_iis_current_application_pool_start_time` | _Not yet documented_ | counter | `app`
|
||||
`wmi_iis_current_worker_processes` | _Not yet documented_ | counter | `app`
|
||||
`wmi_iis_maximum_worker_processes` | _Not yet documented_ | counter | `app`
|
||||
`wmi_iis_recent_worker_process_failures` | _Not yet documented_ | counter | `app`
|
||||
`wmi_iis_time_since_last_worker_process_failure` | _Not yet documented_ | counter | `app`
|
||||
`wmi_iis_total_application_pool_recycles` | _Not yet documented_ | counter | `app`
|
||||
`wmi_iis_total_application_pool_start_time` | _Not yet documented_ | counter | `app`
|
||||
`wmi_iis_total_worker_processes_created` | _Not yet documented_ | counter | `app`
|
||||
`wmi_iis_total_worker_process_failures` | _Not yet documented_ | counter | `app`
|
||||
`wmi_iis_total_worker_process_ping_failures` | _Not yet documented_ | counter | `app`
|
||||
`wmi_iis_total_worker_process_shutdown_failures` | _Not yet documented_ | counter | `app`
|
||||
`wmi_iis_total_worker_process_startup_failures` | _Not yet documented_ | counter | `app`
|
||||
`wmi_iis_worker_cache_active_flushed_entries` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_file_cache_memory_bytes` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_file_cache_max_memory_bytes` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_file_cache_flushes_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_file_cache_queries_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_file_cache_hits_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_file_cache_items` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_file_cache_items_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_file_cache_items_flushed_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_uri_cache_flushes_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_uri_cache_queries_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_uri_cache_hits_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_uri_cache_items` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_uri_cache_items_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_uri_cache_items_flushed_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_metadata_cache_items` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_metadata_cache_flushes_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_metadata_cache_queries_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_metadata_cache_hits_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_metadata_cache_items_cached_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_metadata_cache_items_flushed_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_output_cache_active_flushed_items` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_output_cache_items` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_output_cache_memory_bytes` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_output_queries_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_output_cache_hits_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_output_cache_items_flushed_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_output_cache_flushes_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_threads` | _Not yet documented_ | counter | `app`, `pid`, `state`
|
||||
`wmi_iis_worker_max_threads` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_requests_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_current_requests` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_request_errors_total` | _Not yet documented_ | counter | `app`, `pid`, `status_code`
|
||||
`wmi_iis_worker_current_websocket_requests` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_websocket_connection_attempts_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_websocket_connection_accepted_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_worker_websocket_connection_rejected_total` | _Not yet documented_ | counter | `app`, `pid`
|
||||
`wmi_iis_server_cache_active_flushed_entries` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_file_cache_memory_bytes` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_file_cache_max_memory_bytes` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_file_cache_flushes_total` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_file_cache_queries_total` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_file_cache_hits_total` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_file_cache_items` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_file_cache_items_total` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_file_cache_items_flushed_total` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_uri_cache_flushes_total` | _Not yet documented_ | counter | `mode`
|
||||
`wmi_iis_server_uri_cache_queries_total` | _Not yet documented_ | counter | `mode`
|
||||
`wmi_iis_server_uri_cache_hits_total` | _Not yet documented_ | counter | `mode`
|
||||
`wmi_iis_server_uri_cache_items` | _Not yet documented_ | counter | `mode`
|
||||
`wmi_iis_server_uri_cache_items_total` | _Not yet documented_ | counter | `mode`
|
||||
`wmi_iis_server_uri_cache_items_flushed_total` | _Not yet documented_ | counter | `mode`
|
||||
`wmi_iis_server_metadata_cache_items` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_metadata_cache_flushes_total` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_metadata_cache_queries_total` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_metadata_cache_hits_total` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_metadata_cache_items_cached_total` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_metadata_cache_items_flushed_total` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_output_cache_active_flushed_items` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_output_cache_items` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_output_cache_memory_bytes` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_output_cache_queries_total` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_output_cache_hits_total` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_output_cache_items_flushed_total` | _Not yet documented_ | counter | None
|
||||
`wmi_iis_server_output_cache_flushes_total` | _Not yet documented_ | counter | None
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
76
docs/collector.logical_disk.md
Normal file
76
docs/collector.logical_disk.md
Normal file
@@ -0,0 +1,76 @@
|
||||
# logical_disk collector
|
||||
|
||||
The logical_disk collector exposes metrics about logical disks (in contrast to physical disks)
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `logical_disk`
|
||||
Data source | Perflib
|
||||
Counters | `LogicalDisk` ([`Win32_PerfRawData_PerfDisk_LogicalDisk`](https://msdn.microsoft.com/en-us/windows/hardware/aa394307(v=vs.71)))
|
||||
Enabled by default? | Yes
|
||||
|
||||
## Flags
|
||||
|
||||
### `--collector.logical_disk.volume-whitelist`
|
||||
|
||||
If given, a disk needs to match the whitelist regexp in order for the corresponding disk metrics to be reported
|
||||
|
||||
### `--collector.logical_disk.volume-blacklist`
|
||||
|
||||
If given, a disk needs to *not* match the blacklist regexp in order for the corresponding disk metrics to be reported
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`requests_queued` | Number of requests outstanding on the disk at the time the performance data is collected | gauge | `volume`
|
||||
`read_bytes_total` | Rate at which bytes are transferred from the disk during read operations | counter | `volume`
|
||||
`reads_total` | Rate of read operations on the disk | counter | `volume`
|
||||
`write_bytes_total` | Rate at which bytes are transferred to the disk during write operations | counter | `volume`
|
||||
`writes_total` | Rate of write operations on the disk | counter | `volume`
|
||||
`read_seconds_total` | Seconds the disk was busy servicing read requests | counter | `volume`
|
||||
`write_seconds_total` | Seconds the disk was busy servicing write requests | counter | `volume`
|
||||
`free_bytes` | Unused space of the disk in bytes | gauge | `volume`
|
||||
`size_bytes` | Total size of the disk in bytes | gauge | `volume`
|
||||
`idle_seconds_total` | Seconds the disk was idle (not servicing read/write requests) | counter | `volume`
|
||||
`split_ios_total` | Number of I/Os to the disk split into multiple I/Os | counter | `volume`
|
||||
|
||||
### Example metric
|
||||
Query the rate of write operations to a disk
|
||||
```
|
||||
rate(wmi_logical_disk_read_bytes_total{instance="localhost", volume=~"C:"}[2m])
|
||||
```
|
||||
|
||||
## Useful queries
|
||||
Calculate rate of total IOPS for disk
|
||||
```
|
||||
rate(wmi_logical_disk_reads_total{instance="localhost", volume="C:"}[2m]) + rate(wmi_logical_disk_writes_total{instance="localhost", volume="C:"}[2m])
|
||||
```
|
||||
|
||||
## Alerting examples
|
||||
**prometheus.rules**
|
||||
```yaml
|
||||
groups:
|
||||
- name: Windows Disk Alerts
|
||||
rules:
|
||||
|
||||
# Sends an alert when disk space usage is above 95%
|
||||
- alert: DiskSpaceUsage
|
||||
expr: 100.0 - 100 * (wmi_logical_disk_free_bytes / wmi_logical_disk_size_bytes) > 95
|
||||
for: 10m
|
||||
labels:
|
||||
severity: high
|
||||
annotations:
|
||||
summary: "Disk Space Usage (instance {{ $labels.instance }})"
|
||||
description: "Disk Space on Drive is used more than 95%\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
|
||||
|
||||
# Alerts on disks with over 85% space usage predicted to fill within the next four days
|
||||
- alert: DiskFilling
|
||||
expr: 100 * (wmi_logical_disk_free_bytes / wmi_logical_disk_size_bytes) < 15 and predict_linear(wmi_logical_disk_free_bytes[6h], 4 * 24 * 3600) < 0
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: "Disk full in four days (instance {{ $labels.instance }})"
|
||||
description: "{{ $labels.volume }} is expected to fill up within four days. Currently {{ $value | humanize }}% is available.\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
|
||||
```
|
||||
34
docs/collector.logon.md
Normal file
34
docs/collector.logon.md
Normal file
@@ -0,0 +1,34 @@
|
||||
# logon collector
|
||||
|
||||
The logon collector exposes metrics detailing the active user logon sessions.
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `logon`
|
||||
Classes | [`Win32_LogonSession`](https://docs.microsoft.com/en-us/windows/win32/cimwin32prov/win32-logonsession)
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_logon_logon_type` | Number of active user logon sessions | gauge | status
|
||||
|
||||
### Example metric
|
||||
Query the total number of interactive logon sessions
|
||||
```
|
||||
wmi_logon_logon_type{status="interactive"}
|
||||
```
|
||||
|
||||
## Useful queries
|
||||
Query the total number of local and remote (I.E. Terminal Services) interactive sessions.
|
||||
```
|
||||
wmi_logon_logon_type{status=~"interactive|remoteinteractive"}
|
||||
```
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
62
docs/collector.memory.md
Normal file
62
docs/collector.memory.md
Normal file
@@ -0,0 +1,62 @@
|
||||
# memory collector
|
||||
|
||||
The memory collector exposes metrics about system memory usage
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `memory`
|
||||
Data source | Perflib
|
||||
Classes | `Win32_PerfRawData_PerfOS_Memory`
|
||||
Enabled by default? | Yes
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_cs_logical_processors` | Number of installed logical processors | gauge | None
|
||||
`wmi_cs_physical_memory_bytes` | Total installed physical memory | gauge | None
|
||||
`wmi_memory_available_bytes` | The amount of physical memory immediately available for allocation to a process or for system use. It is equal to the sum of memory assigned to the standby (cached), free and zero page lists | gauge | None
|
||||
`wmi_memory_cache_bytes` | Number of bytes currently being used by the file system cache | gauge | None
|
||||
`wmi_memory_cache_bytes_peak` | Maximum number of CacheBytes after the system was last restarted | gauge | None
|
||||
`wmi_memory_cache_faults_total` | Number of faults which occur when a page sought in the file system cache is not found there and must be retrieved from elsewhere in memory (soft fault) or from disk (hard fault) | gauge | None
|
||||
`wmi_memory_commit_limit` | Amount of virtual memory, in bytes, that can be committed without having to extend the paging file(s) | gauge | None
|
||||
`wmi_memory_committed_bytes` | Amount of committed virtual memory, in bytes | gauge | None
|
||||
`wmi_memory_demand_zero_faults_total` | The number of zeroed pages required to satisfy faults. Zeroed pages, pages emptied of previously stored data and filled with zeros, are a security feature of Windows that prevent processes from seeing data stored by earlier processes that used the memory space | gauge | None
|
||||
`wmi_memory_free_and_zero_page_list_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_free_system_page_table_entries` | Number of page table entries not being used by the system | gauge | None
|
||||
`wmi_memory_modified_page_list_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_page_faults_total` | Overall rate at which faulted pages are handled by the processor | gauge | None
|
||||
`wmi_memory_swap_page_reads_total` | Number of disk page reads (a single read operation reading several pages is still only counted once) | gauge | None
|
||||
`wmi_memory_swap_pages_read_total` | Number of pages read across all page reads (ie counting all pages read even if they are read in a single operation) | gauge | None
|
||||
`wmi_memory_swap_pages_written_total` | Number of pages written across all page writes (ie counting all pages written even if they are written in a single operation) | gauge | None
|
||||
`wmi_memory_swap_page_operations_total` | Total number of swap page read and writes (PagesPersec) | gauge | None
|
||||
`wmi_memory_swap_page_writes_total` | Number of disk page writes (a single write operation writing several pages is still only counted once) | gauge | None
|
||||
`wmi_memory_pool_nonpaged_allocs_total` | The number of calls to allocate space in the nonpaged pool. The nonpaged pool is an area of system memory area for objects that cannot be written to disk, and must remain in physical memory as long as they are allocated | gauge | None
|
||||
`wmi_memory_pool_nonpaged_bytes_total` | Number of bytes in the non-paged pool | gauge | None
|
||||
`wmi_memory_pool_paged_allocs_total` | Number of calls to allocate space in the paged pool, regardless of the amount of space allocated in each call | gauge | None
|
||||
`wmi_memory_pool_paged_bytes` | Number of bytes in the paged pool | gauge | None
|
||||
`wmi_memory_pool_paged_resident_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_standby_cache_core_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_standby_cache_normal_priority_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_standby_cache_reserve_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_system_cache_resident_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_system_code_resident_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_system_code_total_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_system_driver_resident_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_system_driver_total_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_transition_faults_total` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_transition_pages_repurposed_total` | _Not yet documented_ | gauge | None
|
||||
`wmi_memory_write_copies_total` | The number of page faults caused by attempting to write that were satisfied by copying the page from elsewhere in physical memory | gauge | None
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
33
docs/collector.msmq.md
Normal file
33
docs/collector.msmq.md
Normal file
@@ -0,0 +1,33 @@
|
||||
# msmq collector
|
||||
|
||||
The msmq collector exposes metrics about the queues on a MSMQ server
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `msmq`
|
||||
Classes | `Win32_PerfRawData_MSMQ_MSMQQueue`
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
### `--collector.msmq.msmq-where`
|
||||
|
||||
A WMI filter on which queues to include. `%` is a wildcard, and can be used to match on substrings.
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_msmq_bytes_in_journal_queue` | Size of queue journal in bytes | gauge | `name`
|
||||
`wmi_msmq_bytes_in_queue` | Size of queue in bytes | gauge | `name`
|
||||
`wmi_msmq_messages_in_journal_queue` | Count messages in queue journal | gauge | `name`
|
||||
`wmi_msmq_messages_in_queue` | Count messages in queue | gauge | `name`
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
275
docs/collector.mssql.md
Normal file
275
docs/collector.mssql.md
Normal file
@@ -0,0 +1,275 @@
|
||||
# mssql collector
|
||||
|
||||
The mssql collector exposes metrics about the MSSQL server
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `mssql`
|
||||
Classes | [`Win32_PerfRawData_MSSQLSERVER_SQLServerAccessMethods`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-access-methods-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerAvailabilityReplica`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-availability-replica)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerBufferManager`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-buffer-manager-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerDatabaseReplica`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-database-replica)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerDatabases`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-databases-object?view=sql-server-2017)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerGeneralStatistics`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-general-statistics-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerLocks`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-locks-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerMemoryManager`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-memory-manager-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerSQLStatistics`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-sql-statistics-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerSQLErrors`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-sql-errors-object)<br/>[`Win32_PerfRawData_MSSQLSERVER_SQLServerTransactions`](https://docs.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-transactions-object)
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
### `--collectors.mssql.classes-enabled`
|
||||
|
||||
Comma-separated list of MSSQL WMI classes to use. Supported values are `accessmethods`, `availreplica`, `bufman`, `databases`, `dbreplica`, `genstats`, `locks`, `memmgr`, `sqlstats`, `sqlerrors` and `transactions`.
|
||||
|
||||
### `--collectors.mssql.class-print`
|
||||
|
||||
If true, print available mssql WMI classes and exit. Only displays if the mssql collector is enabled.
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_mssql_collector_duration_seconds` | The time taken for each sub-collector to return | counter | `collector`, `instance`
|
||||
`wmi_mssql_collector_success` | 1 if sub-collector succeeded, 0 otherwise | counter | `collector`, `instance`
|
||||
`wmi_mssql_accessmethods_au_batch_cleanups` | The total number of batches that were completed successfully by the background task that cleans up deferred dropped allocation units | counter | `instance`
|
||||
`wmi_mssql_accessmethods_au_cleanups` | The total number of allocation units that were successfully dropped the background task that cleans up deferred dropped allocation units. Each allocation unit drop requires multiple batches | counter | `instance`
|
||||
`wmi_mssql_accessmethods_by_reference_lob_creates` | The total count of large object (lob) values that were passed by reference. By-reference lobs are used in certain bulk operations to avoid the cost of passing them by value | counter | `instance`
|
||||
`wmi_mssql_accessmethods_by_reference_lob_uses` | The total count of by-reference lob values that were used. By-reference lobs are used in certain bulk operations to avoid the cost of passing them by-value | counter | `instance`
|
||||
`wmi_mssql_accessmethods_lob_read_aheads` | The total count of lob pages on which readahead was issued | counter | `instance`
|
||||
`wmi_mssql_accessmethods_column_value_pulls` | The total count of column values that were pulled in-row from off-row | counter | `instance`
|
||||
`wmi_mssql_accessmethods_column_value_pushes` | The total count of column values that were pushed from in-row to off-row | counter | `instance`
|
||||
`wmi_mssql_accessmethods_deferred_dropped_aus` | The total number of allocation units waiting to be dropped by the background task that cleans up deferred dropped allocation units | counter | `instance`
|
||||
`wmi_mssql_accessmethods_deferred_dropped_rowsets` | The number of rowsets created as a result of aborted online index build operations that are waiting to be dropped by the background task that cleans up deferred dropped rowsets | counter | `instance`
|
||||
`wmi_mssql_accessmethods_dropped_rowset_cleanups` | The number of rowsets per second created as a result of aborted online index build operations that were successfully dropped by the background task that cleans up deferred dropped rowsets | counter | `instance`
|
||||
`wmi_mssql_accessmethods_dropped_rowset_skips` | The number of rowsets per second created as a result of aborted online index build operations that were skipped by the background task that cleans up deferred dropped rowsets created | counter | `instance`
|
||||
`wmi_mssql_accessmethods_extent_deallocations` | Number of extents deallocated per second in all databases in this instance of SQL Server | counter | `instance`
|
||||
`wmi_mssql_accessmethods_extent_allocations` | Number of extents allocated per second in all databases in this instance of SQL Server | counter | `instance`
|
||||
`wmi_mssql_accessmethods_au_batch_cleanup_failures` | The number of batches per second that failed and required retry, by the background task that cleans up deferred dropped allocation units. Failure could be due to lack of memory or disk space, hardware failure and other reasons | counter | `instance`
|
||||
`wmi_mssql_accessmethods_leaf_page_cookie_failures` | The number of times that a leaf page cookie could not be used during an index search since changes happened on the leaf page. The cookie is used to speed up index search | counter | `instance`
|
||||
`wmi_mssql_accessmethods_tree_page_cookie_failures` | The number of times that a tree page cookie could not be used during an index search since changes happened on the parent pages of those tree pages. The cookie is used to speed up index search | counter | `instance`
|
||||
`wmi_mssql_accessmethods_forwarded_records` | Number of records per second fetched through forwarded record pointers | counter | `instance`
|
||||
`wmi_mssql_accessmethods_free_space_page_fetches` | Number of pages fetched per second by free space scans. These scans search for free space within pages already allocated to an allocation unit, to satisfy requests to insert or modify record fragments | counter | `instance`
|
||||
`wmi_mssql_accessmethods_free_space_scans` | Number of scans per second that were initiated to search for free space within pages already allocated to an allocation unit to insert or modify record fragment. Each scan may find multiple pages | counter | `instance`
|
||||
`wmi_mssql_accessmethods_full_scans` | Number of unrestricted full scans per second. These can be either base-table or full-index scans | counter | `instance`
|
||||
`wmi_mssql_accessmethods_index_searches` | Number of index searches per second. These are used to start a range scan, reposition a range scan, revalidate a scan point, fetch a single index record, and search down the index to locate where to insert a new row | counter | `instance`
|
||||
`wmi_mssql_accessmethods_insysxact_waits` | Number of times a reader needs to wait for a page because the InSysXact bit is set | counter | `instance`
|
||||
`wmi_mssql_accessmethods_lob_handle_creates` | Count of temporary lobs created | counter | `instance`
|
||||
`wmi_mssql_accessmethods_lob_handle_destroys` | Count of temporary lobs destroyed | counter | `instance`
|
||||
`wmi_mssql_accessmethods_lob_ss_provider_creates` | Count of LOB Storage Service Providers (LobSSP) created. One worktable created per LobSSP | counter | `instance`
|
||||
`wmi_mssql_accessmethods_lob_ss_provider_destroys` | Count of LobSSP destroyed | counter | `instance`
|
||||
`wmi_mssql_accessmethods_lob_ss_provider_truncations` | Count of LobSSP truncated | counter | `instance`
|
||||
`wmi_mssql_accessmethods_mixed_page_allocations` | Number of pages allocated per second from mixed extents. These could be used for storing the IAM pages and the first eight pages that are allocated to an allocation unit | counter | `instance`
|
||||
`wmi_mssql_accessmethods_page_compression_attempts` | Number of pages evaluated for page-level compression. Includes pages that were not compressed because significant savings could be achieved. Includes all objects in the instance of SQL Server | counter | `instance`
|
||||
`wmi_mssql_accessmethods_page_deallocations` | Number of pages deallocated per second in all databases in this instance of SQL Server. These include pages from mixed extents and uniform extents | counter | `instance`
|
||||
`wmi_mssql_accessmethods_page_allocations` | Number of pages allocated per second in all databases in this instance of SQL Server. These include pages allocations from both mixed extents and uniform extents | counter | `instance`
|
||||
`wmi_mssql_accessmethods_page_compressions` | Number of data pages that are compressed by using PAGE compression. Includes all objects in the instance of SQL Server | counter | `instance`
|
||||
`wmi_mssql_accessmethods_page_splits` | Number of page splits per second that occur as the result of overflowing index pages | counter | `instance`
|
||||
`wmi_mssql_accessmethods_probe_scans` | Number of probe scans per second that are used to find at most one single qualified row in an index or base table directly | counter | `instance`
|
||||
`wmi_mssql_accessmethods_range_scans` | Number of qualified range scans through indexes per second | counter | `instance`
|
||||
`wmi_mssql_accessmethods_scan_point_revalidations` | Number of times per second that the scan point had to be revalidated to continue the scan | counter | `instance`
|
||||
`wmi_mssql_accessmethods_ghost_record_skips` | Number of ghosted records per second skipped during scans | counter | `instance`
|
||||
`wmi_mssql_accessmethods_table_lock_escalations` | Number of times locks on a table were escalated to the TABLE or HoBT granularity | counter | `instance`
|
||||
`wmi_mssql_accessmethods_leaf_page_cookie_uses` | Number of times a leaf page cookie is used successfully during an index search since no change happened on the leaf page. The cookie is used to speed up index search | counter | `instance`
|
||||
`wmi_mssql_accessmethods_tree_page_cookie_uses` | Number of times a tree page cookie is used successfully during an index search since no change happened on the parent page of the tree page. The cookie is used to speed up index search | counter | `instance`
|
||||
`wmi_mssql_accessmethods_workfile_creates` | Number of work files created per second. For example, work files could be used to store temporary results for hash joins and hash aggregates | counter | `instance`
|
||||
`wmi_mssql_accessmethods_worktables_creates` | Number of work tables created per second. For example, work tables could be used to store temporary results for query spool, lob variables, XML variables, and cursors | counter | `instance`
|
||||
`wmi_mssql_accessmethods_worktables_from_cache_ratio` | Percentage of work tables created where the initial two pages of the work table were not allocated but were immediately available from the work table cache | counter | `instance`
|
||||
`wmi_mssql_availreplica_received_from_replica_bytes` | Number of bytes received from the availability replica per second. Pings and status updates will generate network traffic even on databases with no user updates | counter | `instance`, `replica`
|
||||
`wmi_mssql_availreplica_sent_to_replica_bytes` | Number of bytes sent to the remote availability replica per second. On the primary replica this is the number of bytes sent to the secondary replica. On the secondary replica this is the number of bytes sent to the primary replica | counter | `instance`, `replica`
|
||||
`wmi_mssql_availreplica_sent_to_transport_bytes` | Actual number of bytes sent per second over the network to the remote availability replica. On the primary replica this is the number of bytes sent to the secondary replica. On the secondary replica this is the number of bytes sent to the primary replica | counter | `instance`, `replica`
|
||||
`wmi_mssql_availreplica_initiated_flow_controls` | Time in milliseconds that log stream messages waited for send flow control, in the last second | counter | `instance`, `replica`
|
||||
`wmi_mssql_availreplica_flow_control_wait_seconds` | Number of times flow-control initiated in the last second. Flow Control Time (ms/sec) divided by Flow Control/sec is the average time per wait | counter | `instance`, `replica`
|
||||
`wmi_mssql_availreplica_receives_from_replica` | Number of Always On messages received from thereplica per second | counter | `instance`, `replica`
|
||||
`wmi_mssql_availreplica_resent_messages` | Number of Always On messages resent in the last second | counter | `instance`, `replica`
|
||||
`wmi_mssql_availreplica_sends_to_replica` | Number of Always On messages sent to this availability replica per second | counter | `instance`, `replica`
|
||||
`wmi_mssql_availreplica_sends_to_transport` | Actual number of Always On messages sent per second over the network to the remote availability replica | counter | `instance`, `replica`
|
||||
`wmi_mssql_bufman_background_writer_pages` | Number of pages flushed to enforce the recovery interval settings | counter | `instance`
|
||||
`wmi_mssql_bufman_buffer_cache_hit_ratio` | Indicates the percentage of pages found in the buffer cache without having to read from disk. The ratio is the total number of cache hits divided by the total number of cache lookups over the last few thousand page accesses | counter | `instance`
|
||||
`wmi_mssql_bufman_checkpoint_pages` | Indicates the number of pages flushed to disk per second by a checkpoint or other operation that require all dirty pages to be flushed | counter | `instance`
|
||||
`wmi_mssql_bufman_database_pages` | Indicates the number of pages in the buffer pool with database content | counter | `instance`
|
||||
`wmi_mssql_bufman_extension_allocated_pages` | Total number of non-free cache pages in the buffer pool extension file | counter | `instance`
|
||||
`wmi_mssql_bufman_extension_free_pages` | Total number of free cache pages in the buffer pool extension file | counter | `instance`
|
||||
`wmi_mssql_bufman_extension_in_use_as_percentage` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_bufman_extension_outstanding_io` | Percentage of the buffer pool extension paging file occupied by buffer manager pages | counter | `instance`
|
||||
`wmi_mssql_bufman_extension_page_evictions` | Number of pages evicted from the buffer pool extension file per second | counter | `instance`
|
||||
`wmi_mssql_bufman_extension_page_reads` | Number of pages read from the buffer pool extension file per second | counter | `instance`
|
||||
`wmi_mssql_bufman_extension_page_unreferenced_seconds` | Average seconds a page will stay in the buffer pool extension without references to it | counter | `instance`
|
||||
`wmi_mssql_bufman_extension_page_writes` | Number of pages written to the buffer pool extension file per second | counter | `instance`
|
||||
`wmi_mssql_bufman_free_list_stalls` | Indicates the number of requests per second that had to wait for a free page | counter | `instance`
|
||||
`wmi_mssql_bufman_integral_controller_slope` | The slope that integral controller for the buffer pool last used, times -10 billion | counter | `instance`
|
||||
`wmi_mssql_bufman_lazywrites` | Indicates the number of buffers written per second by the buffer manager's lazy writer | counter | `instance`
|
||||
`wmi_mssql_bufman_page_life_expectancy_seconds` | Indicates the number of seconds a page will stay in the buffer pool without references | counter | `instance`
|
||||
`wmi_mssql_bufman_page_lookups` | Indicates the number of requests per second to find a page in the buffer pool | counter | `instance`
|
||||
`wmi_mssql_bufman_page_reads` | Indicates the number of physical database page reads that are issued per second | counter | `instance`
|
||||
`wmi_mssql_bufman_page_writes` | Indicates the number of physical database page writes that are issued per second | counter | `instance`
|
||||
`wmi_mssql_bufman_read_ahead_pages` | Indicates the number of pages read per second in anticipation of use | counter | `instance`
|
||||
`wmi_mssql_bufman_read_ahead_issuing_seconds` | Time (microseconds) spent issuing readahead | counter | `instance`
|
||||
`wmi_mssql_bufman_target_pages` | Ideal number of pages in the buffer pool | counter | `instance`
|
||||
`wmi_mssql_dbreplica_database_flow_control_wait_seconds` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_database_initiated_flow_controls` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_received_file_bytes` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_group_commits` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_group_commit_stall_seconds` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_log_apply_pending_queue` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_log_apply_ready_queue` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_log_compressed_bytes` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_log_decompressed_bytes` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_log_received_bytes` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_log_compression_cachehits` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_log_compression_cachemisses` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_log_compressions` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_log_decompressions` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_log_remaining_for_undo` | The amount of log, in bytes, remaining to complete the undo phase | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_log_send_queue` | Amount of log records in the log files of the primary database, in kilobytes, that haven't been sent to the secondary replica | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_mirrored_write_transactions` | Number of transactions that were written to the primary database and then waited to commit until the log was sent to the secondary database, in the last second | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_recovery_queue_records` | Amount of log records in the log files of the secondary replica that have not been redone | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_redo_blocks` | Number of times the redo thread was blocked on locks held by readers of the database | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_redo_remaining_bytes` | The amount of log, in kilobytes, remaining to be redone to finish the reverting phase | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_redone_bytes` | Amount of log records redone on the secondary database in the last second | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_redones` | _Not yet documented_ | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_total_log_requiring_undo` | Total kilobytes of log that must be undone | counter | `instance`, `replica`
|
||||
`wmi_mssql_dbreplica_transaction_delay_seconds` | Delay in waiting for unterminated commit acknowledgment for all the current transactions | counter | `instance`, `replica`
|
||||
`wmi_mssql_databases_active_transactions` | Number of active transactions for the database | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_backup_restore_operations` | Read/write throughput for backup and restore operations of a database per second | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_bulk_copy_rows` | Number of rows bulk copied per second | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_bulk_copy_bytes` | Amount of data bulk copied (in kilobytes) per second | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_commit_table_entries` | he size (row count) of the in-memory portion of the commit table for the database | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_data_files_size_bytes` | Cumulative size (in kilobytes) of all the data files in the database including any automatic growth. Monitoring this counter is useful, for example, for determining the correct size of tempdb | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_dbcc_logical_scan_bytes` | Number of logical read scan bytes per second for database console commands (DBCC) | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_group_commit_stall_seconds` | Group stall time (microseconds) per second | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_flushed_bytes` | Total number of log bytes flushed | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_cache_hit_ratio` | Percentage of log cache reads satisfied from the log cache | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_cache_reads` | Reads performed per second through the log manager cache | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_files_size_bytes` | Cumulative size (in kilobytes) of all the transaction log files in the database | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_files_used_size_bytes` | The cumulative used size of all the log files in the database | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_flushes` | Total wait time (in milliseconds) to flush the log. On an Always On secondary database, this value indicates the wait time for log records to be hardened to disk | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_flush_waits` | Number of commits per second waiting for the log flush | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_flush_wait_seconds` | Number of commits per second waiting for the log flush | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_flush_write_seconds` | Time in milliseconds for performing writes of log flushes that were completed in the last second | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_growths` | Total number of times the transaction log for the database has been expanded | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_cache_misses` | Number of requests for which the log block was not available in the log pool | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_disk_reads` | Number of disk reads that the log pool issued to fetch log blocks | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_hash_deletes` | Rate of raw hash entry deletes from the Log Pool | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_hash_inserts` | Rate of raw hash entry inserts into the Log Pool | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_invalid_hash_entries` | Rate of hash lookups failing due to being invalid | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_log_scan_pushes` | Rate of Log block pushes by log scans, which may come from disk or memory | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_log_writer_pushes` | Rate of Log block pushes by log writer thread | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_empty_free_pool_pushes` | Rate of Log block push fails due to empty free pool | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_low_memory_pushes` | Rate of Log block push fails due to being low on memory | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_no_free_buffer_pushes` | Rate of Log block push fails due to free buffer unavailable | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_req_behind_trunc` | Log pool cache misses due to block requested being behind truncation LSN | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_requests_old_vlf` | Log Pool requests that were not in the last VLF of the log | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_requests` | The number of log-block requests processed by the log pool | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_total_active_log_bytes` | Current total active log stored in the shared cache buffer manager in bytes | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_pool_total_shared_pool_bytes` | Current total memory usage of the shared cache buffer manager in bytes | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_shrinks` | Total number of log shrinks for this database | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_truncations` | The number of times the transaction log has been truncated (in Simple Recovery Model) | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_log_used_percent` | Percentage of space in the log that is in use | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_pending_repl_transactions` | Number of transactions in the transaction log of the publication database marked for replication, but not yet delivered to the distribution database | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_repl_transactions` | Number of transactions per second read out of the transaction log of the publication database and delivered to the distribution database | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_shrink_data_movement_bytes` | Amount of data being moved per second by autoshrink operations, or DBCC SHRINKDATABASE or DBCC SHRINKFILE statements | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_tracked_transactions` | Number of committed transactions recorded in the commit table for the database | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_transactions` | Number of transactions started for the database per second | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_write_transactions` | Number of transactions that wrote to the database and committed, in the last second | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_xtp_controller_dlc_fetch_latency_seconds` | Average latency in microseconds between log blocks entering the Direct Log Consumer and being retrieved by the XTP controller, per second | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_xtp_controller_dlc_peak_latency_seconds` | The largest recorded latency, in microseconds, of a fetch from the Direct Log Consumer by the XTP controller | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_xtp_controller_log_processed_bytes` | The amount of log bytes processed by the XTP controller thread, per second | counter | `instance`, `database`
|
||||
`wmi_mssql_databases_xtp_memory_used_bytes` | The amount of memory used by XTP in the database | counter | `instance`, `database`
|
||||
`wmi_mssql_genstats_active_temp_tables` | Number of temporary tables/table variables in use | counter | `instance`
|
||||
`wmi_mssql_genstats_connection_resets` | Total number of logins started from the connection pool | counter | `instance`
|
||||
`wmi_mssql_genstats_event_notifications_delayed_drop` | Number of event notifications waiting to be dropped by a system thread | counter | `instance`
|
||||
`wmi_mssql_genstats_http_authenticated_requests` | Number of authenticated HTTP requests started per second | counter | `instance`
|
||||
`wmi_mssql_genstats_logical_connections` | Number of logical connections to the system | counter | `instance`
|
||||
`wmi_mssql_genstats_logins` | Total number of logins started per second. This does not include pooled connections | counter | `instance`
|
||||
`wmi_mssql_genstats_logouts` | Total number of logout operations started per second | counter | `instance`
|
||||
`wmi_mssql_genstats_mars_deadlocks` | Number of MARS deadlocks detected | counter | `instance`
|
||||
`wmi_mssql_genstats_non_atomic_yields` | Number of non-atomic yields per second | counter | `instance`
|
||||
`wmi_mssql_genstats_blocked_processes` | Number of currently blocked processes | counter | `instance`
|
||||
`wmi_mssql_genstats_soap_empty_requests` | Number of empty SOAP requests started per second | counter | `instance`
|
||||
`wmi_mssql_genstats_soap_method_invocations` | Number of SOAP method invocations started per second | counter | `instance`
|
||||
`wmi_mssql_genstats_soap_session_initiate_requests` | Number of SOAP Session initiate requests started per second | counter | `instance`
|
||||
`wmi_mssql_genstats_soap_session_terminate_requests` | Number of SOAP Session terminate requests started per second | counter | `instance`
|
||||
`wmi_mssql_genstats_soapsql_requests` | Number of SOAP SQL requests started per second | counter | `instance`
|
||||
`wmi_mssql_genstats_soapwsdl_requests` | Number of SOAP Web Service Description Language requests started per second | counter | `instance`
|
||||
`wmi_mssql_genstats_sql_trace_io_provider_lock_waits` | Number of waits for the File IO Provider lock per second | counter | `instance`
|
||||
`wmi_mssql_genstats_tempdb_recovery_unit_ids_generated` | Number of duplicate tempdb recovery unit id generated | counter | `instance`
|
||||
`wmi_mssql_genstats_tempdb_rowset_ids_generated` | Number of duplicate tempdb rowset id generated | counter | `instance`
|
||||
`wmi_mssql_genstats_temp_tables_creations` | Number of temporary tables/table variables created per second | counter | `instance`
|
||||
`wmi_mssql_genstats_temp_tables_awaiting_destruction` | Number of temporary tables/table variables waiting to be destroyed by the cleanup system thread | counter | `instance`
|
||||
`wmi_mssql_genstats_trace_event_notification_queue_size` | Number of trace event notification instances waiting in the internal queue to be sent through Service Broker | counter | `instance`
|
||||
`wmi_mssql_genstats_transactions` | Number of transaction enlistments (local, DTC, bound all combined) | counter | `instance`
|
||||
`wmi_mssql_genstats_user_connections` | Counts the number of users currently connected to SQL Server | counter | `instance`
|
||||
`wmi_mssql_locks_average_wait_seconds` | Average amount of wait time (in milliseconds) for each lock request that resulted in a wait | counter | `instance`, `resource`
|
||||
`wmi_mssql_locks_lock_requests` | Number of new locks and lock conversions per second requested from the lock manager | counter | `instance`, `resource`
|
||||
`wmi_mssql_locks_lock_timeouts` | Number of lock requests per second that timed out, but excluding requests for NOWAIT locks | counter | `instance`, `resource`
|
||||
`wmi_mssql_locks_lock_timeouts_excluding_NOWAIT` | Number of lock requests per second that timed out, including requests for NOWAIT locks | counter | `instance`, `resource`
|
||||
`wmi_mssql_locks_lock_waits` | Total wait time (in milliseconds) for locks in the last second | counter | `instance`, `resource`
|
||||
`wmi_mssql_locks_lock_wait_seconds` | Number of lock requests per second that required the caller to wait | counter | `instance`, `resource`
|
||||
`wmi_mssql_locks_deadlocks` | Number of lock requests per second that resulted in a deadlock | counter | `instance`, `resource`
|
||||
`wmi_mssql_memmgr_connection_memory_bytes` | Specifies the total amount of dynamic memory the server is using for maintaining connections | counter | `instance`
|
||||
`wmi_mssql_memmgr_database_cache_memory_bytes` | Specifies the amount of memory the server is currently using for the database pages cache | counter | `instance`
|
||||
`wmi_mssql_memmgr_external_benefit_of_memory` | An internal estimation of the performance benefit from adding memory to a specific cache | counter | `instance`
|
||||
`wmi_mssql_memmgr_free_memory_bytes` | Specifies the amount of committed memory currently not used by the server | counter | `instance`
|
||||
`wmi_mssql_memmgr_granted_workspace_memory_bytes` | Specifies the total amount of memory currently granted to executing processes, such as hash, sort, bulk copy, and index creation operations | counter | `instance`
|
||||
`wmi_mssql_memmgr_lock_blocks` | Specifies the current number of lock blocks in use on the server (refreshed periodically). A lock block represents an individual locked resource, such as a table, page, or row | counter | `instance`
|
||||
`wmi_mssql_memmgr_allocated_lock_blocks` | Specifies the current number of allocated lock blocks. At server startup, the number of allocated lock blocks plus the number of allocated lock owner blocks depends on the SQL Server Locks configuration option. If more lock blocks are needed, the value increases | counter | `instance`
|
||||
`wmi_mssql_memmgr_lock_memory_bytes` | Specifies the total amount of dynamic memory the server is using for locks | counter | `instance`
|
||||
`wmi_mssql_memmgr_lock_owner_blocks` | Specifies the current number of allocated lock owner blocks. At server startup, the number of allocated lock owner blocks and the number of allocated lock blocks depend on the SQL Server Locks configuration option. If more lock owner blocks are needed, the value increases dynamically | counter | `instance`
|
||||
`wmi_mssql_memmgr_allocated_lock_owner_blocks` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_memmgr_log_pool_memory_bytes` | Total amount of dynamic memory the server is using for Log Pool | counter | `instance`
|
||||
`wmi_mssql_memmgr_maximum_workspace_memory_bytes` | Indicates the maximum amount of memory available for executing processes, such as hash, sort, bulk copy, and index creation operations | counter | `instance`
|
||||
`wmi_mssql_memmgr_outstanding_memory_grants` | Specifies the total number of processes that have successfully acquired a workspace memory grant | counter | `instance`
|
||||
`wmi_mssql_memmgr_pending_memory_grants` | Specifies the total number of processes waiting for a workspace memory grant | counter | `instance`
|
||||
`wmi_mssql_memmgr_optimizer_memory_bytes` | Specifies the total amount of dynamic memory the server is using for query optimization | counter | `instance`
|
||||
`wmi_mssql_memmgr_reserved_server_memory_bytes` | ndicates the amount of memory the server has reserved for future usage. This counter shows the current unused amount of memory initially granted that is shown in Granted Workspace Memory | counter | `instance`
|
||||
`wmi_mssql_memmgr_sql_cache_memory_bytes` | Specifies the total amount of dynamic memory the server is using for the dynamic SQL cache | counter | `instance`
|
||||
`wmi_mssql_memmgr_stolen_server_memory_bytes` | Specifies the amount of memory the server is using for purposes other than database pages | counter | `instance`
|
||||
`wmi_mssql_memmgr_target_server_memory_bytes` | Indicates the ideal amount of memory the server can consume | counter | `instance`
|
||||
`wmi_mssql_memmgr_total_server_memory_bytes` | Specifies the amount of memory the server has committed using the memory manager | counter | `instance`
|
||||
`wmi_mssql_sqlstats_auto_parameterization_attempts` | Number of failed auto-parameterization attempts per second. This should be small. Note that auto-parameterizations are also known as simple parameterizations in later versions of SQL Server | counter | `instance`
|
||||
`wmi_mssql_sqlstats_batch_requests` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_sqlstats_failed_auto_parameterization_attempts` | _Not yet documented_ | counter | `instance`
|
||||
`wmi_mssql_sqlstats_forced_parameterizations` | Number of successful forced parameterizations per second | counter | `instance`
|
||||
`wmi_mssql_sqlstats_guided_plan_executions` | Number of plan executions per second in which the query plan has been generated by using a plan guide | counter | `instance`
|
||||
`wmi_mssql_sqlstats_misguided_plan_executions` | Number of plan executions per second in which a plan guide could not be honored during plan generation | counter | `instance`
|
||||
`wmi_mssql_sqlstats_safe_auto_parameterization_attempts` | Number of safe auto-parameterization attempts per second | counter | `instance`
|
||||
`wmi_mssql_sqlstats_sql_attentions` | Number of attentions per second | counter | `instance`
|
||||
`wmi_mssql_sqlstats_sql_compilations` | Number of SQL compilations per second | counter | `instance`
|
||||
`wmi_mssql_sqlstats_sql_recompilations` | Number of statement recompiles per second | counter | `instance`
|
||||
`wmi_mssql_sqlstats_unsafe_auto_parameterization_attempts` | Number of unsafe auto-parameterization attempts per second. | counter | `instance`
|
||||
`wmi_mssql_sql_errors_total` | Information for all errors | counter | `instance`, `resource`
|
||||
`wmi_mssql_transactions_tempdb_free_space_bytes` | The amount of space (in kilobytes) available in tempdb | gauge | `instance`
|
||||
`wmi_mssql_transactions_longest_transaction_running_seconds` | The length of time (in seconds) since the start of the transaction that has been active longer than any other current transaction | gauge | `instance`
|
||||
`wmi_mssql_transactions_nonsnapshot_version_active_total` | The number of currently active transactions that are not using snapshot isolation level and have made data modifications that have generated row versions in the tempdb version store | counter | `instance`
|
||||
`wmi_mssql_transactions_snapshot_active_total` | The number of currently active transactions using the snapshot isolation level | counter | `instance`
|
||||
`wmi_mssql_transactions_active_total` | The number of currently active transactions of all types | counter | `instance`
|
||||
`wmi_mssql_transactions_update_conflicts_total` | The percentage of those transactions using the snapshot isolation level that have encountered update conflicts within the last second | counter | `instance`
|
||||
`wmi_mssql_transactions_update_snapshot_active_total` | The number of currently active transactions using the snapshot isolation level and have modified data | counter | `instance`
|
||||
`wmi_mssql_transactions_version_cleanup_rate_bytes` | The rate (in kilobytes per second) at which row versions are removed from the snapshot isolation version store in tempdb | gauge | `instance`
|
||||
`wmi_mssql_transactions_version_generation_rate_bytes` | The rate (in kilobytes per second) at which new row versions are added to the snapshot isolation version store in tempdb | gauge | `instance`
|
||||
`wmi_mssql_transactions_version_store_size_bytes` | he amount of space (in kilobytes) in tempdb being used to store snapshot isolation level row versions | gauge | `instance`
|
||||
`wmi_mssql_transactions_version_store_units` | The number of active allocation units in the snapshot isolation version store in tempdb | counter | `instance`
|
||||
`wmi_mssql_transactions_version_store_creation_units` | The number of allocation units that have been created in the snapshot isolation store since the instance of the Database Engine was started | counter | `instance`
|
||||
`wmi_mssql_transactions_version_store_truncation_units` | The number of allocation units that have been removed from the snapshot isolation store since the instance of the Database Engine was started | counter | `instance`
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
|
||||
### Buffer Cache Hit Ratio
|
||||
|
||||
When you read the counter in perfmon you will get the the percentage pages found in the buffer cache. This percentage is calculated internally based on the total number of cache hits divided by the total number of cache lookups over the last few thousand page accesses.
|
||||
This collector retrieves the two internal values separately. In order to calculate the Buffer Cache Hit Ratio in PromQL.
|
||||
|
||||
```
|
||||
wmi_mssql_bufman_buffer_cache_hits{instance="host:9182", exported_instance="MSSQLSERVER"} /
|
||||
wmi_mssql_bufman_buffer_cache_lookups{instance="host:9182", exported_instance="MSSQLSERVER"}
|
||||
```
|
||||
|
||||
This principal can be used for following metrics too:
|
||||
- AccessMethodsWorktablesFromCacheHitRatio
|
||||
- accessmethods_worktables_from_cache_hits
|
||||
- accessmethods_worktables_from_cache_lookups
|
||||
- LogCacheHitRatio
|
||||
- databases_log_cache_hits
|
||||
- databases_log_cache_lookups
|
||||
- AverageLockWaitTime
|
||||
- locks_wait_time_seconds
|
||||
- locks_count
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
62
docs/collector.net.md
Normal file
62
docs/collector.net.md
Normal file
@@ -0,0 +1,62 @@
|
||||
# net collector
|
||||
|
||||
The net collector exposes metrics about network interfaces
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `net`
|
||||
Data source | Perflib
|
||||
Classes | [`Win32_PerfRawData_Tcpip_NetworkInterface`](https://technet.microsoft.com/en-us/security/aa394340(v=vs.80))
|
||||
Enabled by default? | Yes
|
||||
|
||||
## Flags
|
||||
|
||||
### `--collector.net.nic-whitelist`
|
||||
|
||||
If given, an interface name needs to match the whitelist regexp in order for the corresponding metrics to be reported
|
||||
|
||||
### `--collector.net.nic-blacklist`
|
||||
|
||||
If given, an interface name needs to *not* match the blacklist regexp in order for the corresponding metrics to be reported
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_net_bytes_received_total` | Total bytes received by interface | counter | `nic`
|
||||
`wmi_net_bytes_sent_total` | Total bytes transmitted by interface | counter | `nic`
|
||||
`wmi_net_bytes_total` | Total bytes received and transmitted by interface | counter | `nic`
|
||||
`wmi_net_packets_outbound_discarded` | Total outbound packets that were chosen to be discarded even though no errors had been detected to prevent transmission | counter | `nic`
|
||||
`wmi_net_packets_outbound_errors` | Total packets that could not be transmitted due to errors | counter | `nic`
|
||||
`wmi_net_packets_received_discarded` | Total inbound packets that were chosen to be discarded even though no errors had been detected to prevent delivery | counter | `nic`
|
||||
`wmi_net_packets_received_errors` | Total packets that could not be received due to errors | counter | `nic`
|
||||
`wmi_net_packets_received_total` | Total packets received by interface | counter | `nic`
|
||||
`wmi_net_packets_received_unknown` | Total packets received by interface that were discarded because of an unknown or unsupported protocol | counter | `nic`
|
||||
`wmi_net_packets_total` | Total packets received and transmitted by interface | counter | `nic`
|
||||
`wmi_net_packets_sent_total` | Total packets transmitted by interface | counter | `nic`
|
||||
`wmi_net_current_bandwidth` | Estimate of the interface's current bandwidth in bits per second (bps) | gauge | `nic`
|
||||
|
||||
### Example metric
|
||||
Query the rate of transmitted network traffic
|
||||
```
|
||||
rate(wmi_net_bytes_sent_total{instance="localhost"}[2m])
|
||||
```
|
||||
|
||||
## Useful queries
|
||||
Get total utilisation of network interface as a percentage
|
||||
```
|
||||
rate(wmi_net_bytes_total{instance="localhost", nic="Microsoft_Hyper_V_Network_Adapter__1"}[2m]) * 8 / wmi_net_current_bandwidth{instance="locahost", nic="Microsoft_Hyper_V_Network_Adapter__1"} * 100
|
||||
```
|
||||
|
||||
## Alerting examples
|
||||
**prometheus.rules**
|
||||
```yaml
|
||||
- alert: NetInterfaceUsage
|
||||
expr: rate(wmi_net_bytes_total[2m]) * 8 / wmi_net_current_bandwidth * 100 > 95
|
||||
for: 10m
|
||||
labels:
|
||||
severity: high
|
||||
annotations:
|
||||
summary: "Network Interface Usage (instance {{ $labels.instance }})"
|
||||
description: "Network traffic usage is greater than 95% for interface {{ $labels.nic }}\n VALUE = {{ $value }}\n LABELS: {{ $labels }}"
|
||||
```
|
||||
32
docs/collector.netframework_clrexceptions.md
Normal file
32
docs/collector.netframework_clrexceptions.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# netframework_clrexceptions collector
|
||||
|
||||
The netframework_clrexceptions collector exposes metrics about CLR exceptions in the dotnet framework.
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `netframework_clrexceptions`
|
||||
Classes | `Win32_PerfRawData_NETFramework_NETCLRExceptions`
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_...` | ... | counter/gauge/histogram/summary | ...
|
||||
`wmi_netframework_clrexceptions_exceptions_thrown_total` | Displays the total number of exceptions thrown since the application started. This includes both .NET exceptions and unmanaged exceptions that are converted into .NET exceptions. | counter | `process`
|
||||
`wmi_netframework_clrexceptions_exceptions_filters_total` | Displays the total number of .NET exception filters executed. An exception filter evaluates regardless of whether an exception is handled. | counter | `process`
|
||||
`wmi_netframework_clrexceptions_exceptions_finallys_total` | Displays the total number of finally blocks executed. Only the finally blocks executed for an exception are counted; finally blocks on normal code paths are not counted by this counter. | counter | `process`
|
||||
`wmi_netframework_clrexceptions_throw_to_catch_depth_total` | Displays the total number of stack frames traversed, from the frame that threw the exception to the frame that handled the exception. | counter | `process`
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
31
docs/collector.netframework_clrinterop.md
Normal file
31
docs/collector.netframework_clrinterop.md
Normal file
@@ -0,0 +1,31 @@
|
||||
# netframework_clrinterop collector
|
||||
|
||||
The netframework_clrinterop collector exposes metrics about interop between the dotnet framework and outside components.
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `netframework_clrinterop`
|
||||
Classes | `Win32_PerfRawData_NETFramework_NETCLRInterop`
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_...` | ... | counter/gauge/histogram/summary | ...
|
||||
`wmi_netframework_clrinterop_com_callable_wrappers_total` | Displays the current number of COM callable wrappers (CCWs). A CCW is a proxy for a managed object being referenced from an unmanaged COM client. | counter | `process`
|
||||
`wmi_netframework_clrinterop_interop_marshalling_total` | Displays the total number of times arguments and return values have been marshaled from managed to unmanaged code, and vice versa, since the application started. | counter | `process`
|
||||
`wmi_netframework_clrinterop_interop_stubs_created_total` | Displays the current number of stubs created by the common language runtime. Stubs are responsible for marshaling arguments and return values from managed to unmanaged code, and vice versa, during a COM interop call or a platform invoke call. | counter | `process`
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
32
docs/collector.netframework_clrjit.md
Normal file
32
docs/collector.netframework_clrjit.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# netframework_clrjit collector
|
||||
|
||||
The netframework_clrjit collector exposes metrics about the dotnet Just-in-Time compiler.
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `netframework_clrjit`
|
||||
Classes | `Win32_PerfRawData_NETFramework_NETCLRJit`
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_...` | ... | counter/gauge/histogram/summary | ...
|
||||
`wmi_netframework_clrjit_jit_methods_total` | Displays the total number of methods JIT-compiled since the application started. This counter does not include pre-JIT-compiled methods. | counter | `process`
|
||||
`wmi_netframework_clrjit_jit_time_percent` | Displays the percentage of time spent in JIT compilation. This counter is updated at the end of every JIT compilation phase. A JIT compilation phase occurs when a method and its dependencies are compiled. | gauge | `process`
|
||||
`wmi_netframework_clrjit_jit_standard_failures_total` | Displays the peak number of methods the JIT compiler has failed to compile since the application started. This failure can occur if the MSIL cannot be verified or if there is an internal error in the JIT compiler. | counter | `process`
|
||||
`wmi_netframework_clrjit_jit_il_bytes_total` | Displays the total number of Microsoft intermediate language (MSIL) bytes compiled by the just-in-time (JIT) compiler since the application started | counter | `process`
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
37
docs/collector.netframework_clrloading.md
Normal file
37
docs/collector.netframework_clrloading.md
Normal file
@@ -0,0 +1,37 @@
|
||||
# netframework_clrloading collector
|
||||
|
||||
The netframework_clrloading collector exposes metrics about the dotnet loader.
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `netframework_clrloading`
|
||||
Classes | `Win32_PerfRawData_NETFramework_NETCLRLoading`
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_...` | ... | counter/gauge/histogram/summary | ...
|
||||
`wmi_netframework_clrloading_loader_heap_size_bytes` | Displays the current size, in bytes, of the memory committed by the class loader across all application domains. Committed memory is the physical space reserved in the disk paging file. | gauge | `process`
|
||||
`wmi_netframework_clrloading_appdomains_loaded_current` | Displays the current number of application domains loaded in this application. | gauge | `process`
|
||||
`wmi_netframework_clrloading_assemblies_loaded_current` | Displays the current number of assemblies loaded across all application domains in the currently running application. If the assembly is loaded as domain-neutral from multiple application domains, this counter is incremented only once. | gauge | `process`
|
||||
`wmi_netframework_clrloading_classes_loaded_current` | Displays the current number of classes loaded in all assemblies. | gauge | `process`
|
||||
`wmi_netframework_clrloading_appdomains_loaded_total` | Displays the peak number of application domains loaded since the application started. | counter | `process`
|
||||
`wmi_netframework_clrloading_appdomains_unloaded_total` | Displays the total number of application domains unloaded since the application started. If an application domain is loaded and unloaded multiple times, this counter increments each time the application domain is unloaded. | counter | `process`
|
||||
`wmi_netframework_clrloading_assemblies_loaded_total` | Displays the total number of assemblies loaded since the application started. If the assembly is loaded as domain-neutral from multiple application domains, this counter is incremented only once. | counter | `process`
|
||||
`wmi_netframework_clrloading_classes_loaded_total` | Displays the cumulative number of classes loaded in all assemblies since the application started. | counter | `process`
|
||||
`wmi_netframework_clrloading_class_load_failures_total` | Displays the peak number of classes that have failed to load since the application started. | counter | `process`
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
35
docs/collector.netframework_clrlocksandthreads.md
Normal file
35
docs/collector.netframework_clrlocksandthreads.md
Normal file
@@ -0,0 +1,35 @@
|
||||
# netframework_clrlocksandthreads collector
|
||||
|
||||
The netframework_clrlocksandthreads collector exposes metrics about locks and threads in dotnet applications.
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `netframework_clrlocksandthreads`
|
||||
Classes | `Win32_PerfRawData_NETFramework_NETCLRLocksAndThreads`
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_...` | ... | counter/gauge/histogram/summary | ...
|
||||
`wmi_netframework_clrlocksandthreads_current_queue_length` | Displays the total number of threads that are currently waiting to acquire a managed lock in the application. | gauge | `process`
|
||||
`wmi_netframework_clrlocksandthreads_current_logical_threads` | Displays the number of current managed thread objects in the application. This counter maintains the count of both running and stopped threads. | gauge | `process`
|
||||
`wmi_netframework_clrlocksandthreads_physical_threads_current` | Displays the number of native operating system threads created and owned by the common language runtime to act as underlying threads for managed thread objects. This counter's value does not include the threads used by the runtime in its internal operations; it is a subset of the threads in the operating system process. | gauge | `process`
|
||||
`wmi_netframework_clrlocksandthreads_recognized_threads_current` | Displays the number of threads that are currently recognized by the runtime. These threads are associated with a corresponding managed thread object. The runtime does not create these threads, but they have run inside the runtime at least once. | gauge | `process`
|
||||
`wmi_netframework_clrlocksandthreads_recognized_threads_total` | Displays the total number of threads that have been recognized by the runtime since the application started. These threads are associated with a corresponding managed thread object. The runtime does not create these threads, but they have run inside the runtime at least once. | counter | `process`
|
||||
`wmi_netframework_clrlocksandthreads_queue_length_total` | Displays the total number of threads that waited to acquire a managed lock since the application started. | counter | `process`
|
||||
`wmi_netframework_clrlocksandthreads_contentions_total` | Displays the total number of times that threads in the runtime have attempted to acquire a managed lock unsuccessfully. | counter | `process`
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
40
docs/collector.netframework_clrmemory.md
Normal file
40
docs/collector.netframework_clrmemory.md
Normal file
@@ -0,0 +1,40 @@
|
||||
# netframework_clrmemory collector
|
||||
|
||||
The netframework_clrmemory collector exposes metrics about memory in dotnet applications.
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `netframework_clrmemory`
|
||||
Classes | `Win32_PerfRawData_NETFramework_NETCLRMemory`
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_...` | ... | counter/gauge/histogram/summary | ...
|
||||
`wmi_netframework_clrmemory_allocated_bytes_total` | Displays the total number of bytes allocated on the garbage collection heap. | counter | `process`
|
||||
`wmi_netframework_clrmemory_finalization_survivors` | Displays the number of garbage-collected objects that survive a collection because they are waiting to be finalized. | gauge | `process`
|
||||
`wmi_netframework_clrmemory_heap_size_bytes` | Displays the maximum bytes that can be allocated; it does not indicate the current number of bytes allocated. | gauge | `process`
|
||||
`wmi_netframework_clrmemory_promoted_bytes` | Displays the bytes that were promoted from the generation to the next one during the last GC. Memory is promoted when it survives a garbage collection. | gauge | `process`
|
||||
`wmi_netframework_clrmemory_number_gc_handles` | Displays the current number of garbage collection handles in use. Garbage collection handles are handles to resources external to the common language runtime and the managed environment. | gauge | `process`
|
||||
`wmi_netframework_clrmemory_collections_total` | Displays the number of times the generation objects are garbage collected since the application started. | counter | `process`
|
||||
`wmi_netframework_clrmemory_induced_gc_total` | Displays the peak number of times garbage collection was performed because of an explicit call to GC.Collect. | counter | `process`
|
||||
`wmi_netframework_clrmemory_number_pinned_objects` | Displays the number of pinned objects encountered in the last garbage collection. | gauge | `process`
|
||||
`wmi_netframework_clrmemory_number_sink_blocksinuse` | Displays the current number of synchronization blocks in use. Synchronization blocks are per-object data structures allocated for storing synchronization information. They hold weak references to managed objects and must be scanned by the garbage collector. | gauge | `process`
|
||||
`wmi_netframework_clrmemory_committed_bytes` | Displays the amount of virtual memory, in bytes, currently committed by the garbage collector. Committed memory is the physical memory for which space has been reserved in the disk paging file. | gauge | `process`
|
||||
`wmi_netframework_clrmemory_reserved_bytes` | Displays the amount of virtual memory, in bytes, currently reserved by the garbage collector. Reserved memory is the virtual memory space reserved for the application when no disk or main memory pages have been used. | gauge | `process`
|
||||
`wmi_netframework_clrmemory_gc_time_percent` | Displays the percentage of time that was spent performing a garbage collection in the last sample. | gauge | `process`
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
33
docs/collector.netframework_clrremoting.md
Normal file
33
docs/collector.netframework_clrremoting.md
Normal file
@@ -0,0 +1,33 @@
|
||||
# netframework_clrremoting collector
|
||||
|
||||
The netframework_clrremoting collector exposes metrics about dotnet remoting.
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `netframework_clrremoting`
|
||||
Classes | `Win32_PerfRawData_NETFramework_NETCLRRemoting`
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_netframework_clrremoting_channels_total` | Displays the total number of remoting channels registered across all application domains since application started. | counter | `process`
|
||||
`wmi_netframework_clrremoting_context_bound_classes_loaded` | Displays the current number of context-bound classes that are loaded. | gauge | `process`
|
||||
`wmi_netframework_clrremoting_context_bound_objects_total` | Displays the total number of context-bound objects allocated. | counter | `process`
|
||||
`wmi_netframework_clrremoting_context_proxies_total` | Displays the total number of remoting proxy objects in this process since it started. | counter | `process`
|
||||
`wmi_netframework_clrremoting_contexts` | Displays the current number of remoting contexts in the application. | gauge | `process`
|
||||
`wmi_netframework_clrremoting_remote_calls_total` | Displays the total number of remote procedure calls invoked since the application started. | counter | `process`
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
31
docs/collector.netframework_clrsecurity.md
Normal file
31
docs/collector.netframework_clrsecurity.md
Normal file
@@ -0,0 +1,31 @@
|
||||
# netframework_clrsecurity collector
|
||||
|
||||
The netframework_clrsecurity collector exposes metrics about security checks in dotnet applications
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `netframework_clrsecurity`
|
||||
Classes | `Win32_PerfRawData_NETFramework_NETCLRSecurity`
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_netframework_clrsecurity_link_time_checks_total` | Displays the total number of link-time code access security checks since the application started. | counter | `process`
|
||||
`wmi_netframework_clrsecurity_rt_checks_time_percent` | Displays the percentage of time spent performing runtime code access security checks in the last sample. | gauge | `process`
|
||||
`wmi_netframework_clrsecurity_stack_walk_depth` | Displays the depth of the stack during that last runtime code access security check. | gauge | `process`
|
||||
`wmi_netframework_clrsecurity_runtime_checks_total` | Displays the total number of runtime code access security checks performed since the application started. | counter | `process`
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
66
docs/collector.os.md
Normal file
66
docs/collector.os.md
Normal file
@@ -0,0 +1,66 @@
|
||||
# os collector
|
||||
|
||||
The os collector exposes metrics about the operating system
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `os`
|
||||
Classes | [`Win32_OperatingSystem`](https://msdn.microsoft.com/en-us/library/aa394239)
|
||||
Enabled by default? | Yes
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_os_info` | Contains full product name & version in labels | gauge | `product`, `version`
|
||||
`wmi_os_paging_limit_bytes` | Total number of bytes that can be sotred in the operating system paging files. 0 (zero) indicates that there are no paging files | gauge | None
|
||||
`wmi_os_paging_free_bytes` | Number of bytes that can be mapped into the operating system paging files without causing any other pages to be swapped out | gauge | None
|
||||
`wmi_os_physical_memory_free_bytes` | Bytes of physical memory currently unused and available | gauge | None
|
||||
`wmi_os_time` | Current time as reported by the operating system, in [Unix time](https://en.wikipedia.org/wiki/Unix_time). See [time.Unix()](https://golang.org/pkg/time/#Unix) for details | gauge | None
|
||||
`wmi_os_timezone` | Current timezone as reported by the operating system. See [time.Zone()](https://golang.org/pkg/time/#Time.Zone) for details | gauge | `timezone`
|
||||
`wmi_os_processes` | Number of process contexts currently loaded or running on the operating system | gauge | None
|
||||
`wmi_os_processes_limit` | Maximum number of process contexts the operating system can support. The default value set by the provider is 4294967295 (0xFFFFFFFF) | gauge | None
|
||||
`wmi_os_process_memory_limit_bytes` | Maximum number of bytes of memory that can be allocated to a process | gauge | None
|
||||
`wmi_os_users` | Number of user sessions for which the operating system is storing state information currently. For a list of current active logon sessions, see [`logon`](collector.logon.md) | gauge | None
|
||||
`wmi_os_virtual_memory_bytes` | Bytes of virtual memory | gauge | None
|
||||
`wmi_os_visible_memory_bytes` | Total bytes of physical memory available to the operating system. This value does not necessarily indicate the true amount of physical memory, but what is reported to the operating system as available to it | gauge | None
|
||||
`wmi_os_virtual_memory_free_bytes` | Bytes of virtual memory currently unused and available | gauge | None
|
||||
|
||||
### Example metric
|
||||
Show current number of processes
|
||||
```
|
||||
wmi_os_processes{instance="localhost"}
|
||||
```
|
||||
|
||||
## Useful queries
|
||||
Find all devices not set to UTC timezone
|
||||
```
|
||||
wmi_os_timezone{timezone != "UTC"}
|
||||
```
|
||||
|
||||
## Alerting examples
|
||||
**prometheus.rules**
|
||||
```yaml
|
||||
# Alert on hosts that have exhausted all available physical memory
|
||||
- alert: MemoryExhausted
|
||||
expr: wmi_os_physical_memory_free_bytes == 0
|
||||
for: 10m
|
||||
labels:
|
||||
severity: high
|
||||
annotations:
|
||||
summary: "Host {{ $labels.instance }} is out of memory"
|
||||
description: "{{ $labels.instance }} has exhausted all available physical memory"
|
||||
|
||||
# Alert on hosts with greater than 90% memory usage
|
||||
- alert: MemoryLow
|
||||
expr: 100 - 100 * wmi_os_physical_memory_free_bytes / wmi_cs_physical_memory_bytes > 90
|
||||
for: 10m
|
||||
labels:
|
||||
severity: warning
|
||||
annotations:
|
||||
summary: "Memory usage for host {{ $labels.instance }} is greater than 90%"
|
||||
```
|
||||
46
docs/collector.process.md
Normal file
46
docs/collector.process.md
Normal file
@@ -0,0 +1,46 @@
|
||||
# process collector
|
||||
|
||||
The process collector exposes metrics about processes
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `process`
|
||||
Classes | [`Win32_PerfRawData_PerfProc_Process`](https://msdn.microsoft.com/en-us/library/aa394323(v=vs.85).aspx)
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
### `--collector.process.processes-where`
|
||||
|
||||
A WMI filter on which processes to include. Recommended to keep down number of returned metrics.
|
||||
|
||||
`%` is a wildcard, and can be used to match on substrings.
|
||||
|
||||
Example: `--collector.process.processes-where="Name LIKE 'firefox%'`
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_process_start_time` | _Not yet documented_ | gauge | `process`, `process_id`, `creating_process_id`
|
||||
`wmi_process_cpu_time_total` | _Not yet documented_ | counter | `process`, `process_id`, `creating_process_id`
|
||||
`wmi_process_handle_count` | _Not yet documented_ | gauge | `process`, `process_id`, `creating_process_id`
|
||||
`wmi_process_io_bytes_total` | _Not yet documented_ | counter | `process`, `process_id`, `creating_process_id`
|
||||
`wmi_process_io_operations_total` | _Not yet documented_ | counter | `process`, `process_id`, `creating_process_id`
|
||||
`wmi_process_page_faults_total` | _Not yet documented_ | counter | `process`, `process_id`, `creating_process_id`
|
||||
`wmi_process_page_file_bytes` | _Not yet documented_ | gauge | `process`, `process_id`, `creating_process_id`
|
||||
`wmi_process_pool_bytes` | _Not yet documented_ | gauge | `process`, `process_id`, `creating_process_id`
|
||||
`wmi_process_priority_base` | _Not yet documented_ | gauge | `process`, `process_id`, `creating_process_id`
|
||||
`wmi_process_private_bytes` | _Not yet documented_ | gauge | `process`, `process_id`, `creating_process_id`
|
||||
`wmi_process_thread_count` | _Not yet documented_ | gauge | `process`, `process_id`, `creating_process_id`
|
||||
`wmi_process_virtual_bytes` | _Not yet documented_ | gauge | `process`, `process_id`, `creating_process_id`
|
||||
`wmi_process_working_set` | _Not yet documented_ | gauge | `process`, `process_id`, `creating_process_id`
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
107
docs/collector.service.md
Normal file
107
docs/collector.service.md
Normal file
@@ -0,0 +1,107 @@
|
||||
# service collector
|
||||
|
||||
The service collector exposes metrics about Windows Services
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `service`
|
||||
Classes | [`Win32_Service`](https://msdn.microsoft.com/en-us/library/aa394418(v=vs.85).aspx)
|
||||
Enabled by default? | Yes
|
||||
|
||||
## Flags
|
||||
|
||||
### `--collector.service.services-where`
|
||||
|
||||
A WMI filter on which services to include. Recommended to keep down number of returned metrics.
|
||||
|
||||
Example: `--collector.service.services-where="Name='wmi_exporter'"`
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_service_state` | The state of the service, 1 if the current state, 0 otherwise | gauge | name, state
|
||||
`wmi_service_start_mode` | The start mode of the service, 1 if the current start mode, 0 otherwise | gauge | name, start_mode
|
||||
`wmi_service_status` | The status of the service, 1 if the current status, 0 otherwise | gauge | name, status
|
||||
|
||||
For the values of the `state`, `start_mode` and `status` labels, see below.
|
||||
|
||||
### States
|
||||
|
||||
A service can be in the following states:
|
||||
- `stopped`
|
||||
- `start pending`
|
||||
- `stop pending`
|
||||
- `running`
|
||||
- `continue pending`
|
||||
- `pause pending`
|
||||
- `paused`
|
||||
- `unknown`
|
||||
|
||||
### Start modes
|
||||
|
||||
A service can have the following start modes:
|
||||
- `boot`
|
||||
- `system`
|
||||
- `auto`
|
||||
- `manual`
|
||||
- `disabled`
|
||||
|
||||
### Status
|
||||
|
||||
A service can have any of the following statuses:
|
||||
- `ok`
|
||||
- `error`
|
||||
- `degraded`
|
||||
- `unknown`
|
||||
- `pred fail`
|
||||
- `starting`
|
||||
- `stopping`
|
||||
- `service`
|
||||
- `stressed`
|
||||
- `nonrecover`
|
||||
- `no contact`
|
||||
- `lost comm`
|
||||
|
||||
Note that there is some overlap with service state.
|
||||
|
||||
### Example metric
|
||||
Lists the services that have a 'disabled' start mode.
|
||||
```
|
||||
wmi_service_start_mode{exported_name=~"(mssqlserver|sqlserveragent)",start_mode="disabled"}
|
||||
```
|
||||
|
||||
## Useful queries
|
||||
Counts the number of Microsoft SQL Server/Agent Processes
|
||||
```
|
||||
count(wmi_service_state{exported_name=~"(sqlserveragent|mssqlserver)",state="running"})
|
||||
```
|
||||
|
||||
## Alerting examples
|
||||
**prometheus.rules**
|
||||
```yaml
|
||||
groups:
|
||||
- name: Microsoft SQL Server Alerts
|
||||
rules:
|
||||
|
||||
# Sends an alert when the 'sqlserveragent' service is not in the running state for 3 minutes.
|
||||
- alert: SQL Server Agent DOWN
|
||||
expr: wmi_service_state{instance="SQL",exported_name="sqlserveragent",state="running"} == 0
|
||||
for: 3m
|
||||
labels:
|
||||
severity: high
|
||||
annotations:
|
||||
summary: "Service {{ $labels.exported_name }} down"
|
||||
description: "Service {{ $labels.exported_name }} on instance {{ $labels.instance }} has been down for more than 3 minutes."
|
||||
|
||||
# Sends an alert when the 'mssqlserver' service is not in the running state for 3 minutes.
|
||||
- alert: SQL Server DOWN
|
||||
expr: wmi_service_state{instance="SQL",exported_name="mssqlserver",state="running"} == 0
|
||||
for: 3m
|
||||
labels:
|
||||
severity: high
|
||||
annotations:
|
||||
summary: "Service {{ $labels.exported_name }} down"
|
||||
description: "Service {{ $labels.exported_name }} on instance {{ $labels.instance }} has been down for more than 3 minutes."
|
||||
```
|
||||
In this example, `instance` is the target label of the host. So each alert will be processed per host, which is then used in the alert description.
|
||||
40
docs/collector.system.md
Normal file
40
docs/collector.system.md
Normal file
@@ -0,0 +1,40 @@
|
||||
# system collector
|
||||
|
||||
The system collector exposes metrics about ...
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `system`
|
||||
Data source | Perflib
|
||||
Classes | [`Win32_PerfRawData_PerfOS_System`](https://web.archive.org/web/20050830140516/http://msdn.microsoft.com/library/en-us/wmisdk/wmi/win32_perfrawdata_perfos_system.asp)
|
||||
Enabled by default? | Yes
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_system_context_switches_total` | Total number of [context switches](https://en.wikipedia.org/wiki/Context_switch) | counter | None
|
||||
`wmi_system_exception_dispatches_total` | Total exceptions dispatched by the system | counter | None
|
||||
`wmi_system_processor_queue_length` | Number of threads in the processor queue. There is a single queue for processor time even on computers with multiple processors. | gauge | None
|
||||
`wmi_system_system_calls_total` | Total combined calls to Windows NT system service routines by all processes running on the computer | counter | None
|
||||
`wmi_system_system_up_time` | Time of last boot of system | gauge | None
|
||||
`wmi_system_threads` | Number of Windows system [threads](https://en.wikipedia.org/wiki/Thread_(computing)) | gauge | None
|
||||
|
||||
### Example metric
|
||||
Show current number of system threads
|
||||
```
|
||||
wmi_system_threads{instance="localhost"}
|
||||
```
|
||||
|
||||
## Useful queries
|
||||
Find hosts that have rebooted in the last 24 hours
|
||||
```
|
||||
time() - wmi_system_system_up_time < 86400
|
||||
```
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
36
docs/collector.tcp.md
Normal file
36
docs/collector.tcp.md
Normal file
@@ -0,0 +1,36 @@
|
||||
# tcp collector
|
||||
|
||||
The tcp collector exposes metrics about the TCP/IPv4 network stack.
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `tcp`
|
||||
Classes | [`Win32_PerfRawData_Tcpip_TCPv4`](https://msdn.microsoft.com/en-us/library/aa394341(v=vs.85).aspx)
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_tcp_connection_failures` | Number of times TCP connections have made a direct transition to the CLOSED state from the SYN-SENT state or the SYN-RCVD state, plus the number of times TCP connections have made a direct transition from the SYN-RCVD state to the LISTEN state | counter | None
|
||||
`wmi_tcp_connections_active` | Number of times TCP connections have made a direct transition from the CLOSED state to the SYN-SENT state.| counter | None
|
||||
`wmi_tcp_connections_established` | Number of TCP connections for which the current state is either ESTABLISHED or CLOSE-WAIT. | counter | None
|
||||
`wmi_tcp_connections_passive` | Number of times TCP connections have made a direct transition from the LISTEN state to the SYN-RCVD state. | counter | None
|
||||
`wmi_tcp_connections_reset` | Number of times TCP connections have made a direct transition from the LISTEN state to the SYN-RCVD state. | counter | None
|
||||
`wmi_tcp_segments_total` | Total segments sent or received using the TCP protocol | counter | None
|
||||
`wmi_tcp_segments_received_total` | Total segments received, including those received in error. This count includes segments received on currently established connections | counter | None
|
||||
`wmi_tcp_segments_retransmitted_total` | Total segments retransmitted. That is, segments transmitted that contain one or more previously transmitted bytes | counter | None
|
||||
`wmi_tcp_segments_sent_total` | Total segments sent, including those on current connections, but excluding those containing *only* retransmitted bytes | counter | None
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
56
docs/collector.textfile.md
Normal file
56
docs/collector.textfile.md
Normal file
@@ -0,0 +1,56 @@
|
||||
# textfile collector
|
||||
|
||||
The textfile collector exposes metrics from files written by other processes.
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `textfile`
|
||||
Classes | None
|
||||
Enabled by default? | Yes
|
||||
|
||||
## Flags
|
||||
|
||||
### `--collector.textfile.directory`
|
||||
|
||||
The directory containing the files to be ingested. Only files with the extension `.prom` are read. The `.prom` file must end with an empty line feed to work properly.
|
||||
|
||||
Default value: `C:\Program Files\wmi_exporter\textfile_inputs`
|
||||
|
||||
Required: No
|
||||
|
||||
## Metrics
|
||||
|
||||
Metrics will primarily come from the files on disk. The below listed metrics
|
||||
are collected to give information about the reading of the metrics themselves.
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_textfile_scrape_error` | 1 if there was an error opening or reading a file, 0 otherwise | gauge | None
|
||||
`wmi_textfile_mtime_seconds` | Unix epoch-formatted mtime (modified time) of textfiles successfully read | gauge | file
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
|
||||
# Example use
|
||||
This Powershell script, when run in the `collector.textfile.directory` (default `C:\Program Files\wmi_exporter\textfile_inputs`), generates a valid `.prom` file that should successfully ingested by wmi_exporter.
|
||||
|
||||
```Powershell
|
||||
$alpha = 42
|
||||
$beta = @{ left=3.1415; right=2.718281828; }
|
||||
|
||||
Set-Content -Path test1.prom -Encoding Ascii -NoNewline -Value ""
|
||||
Add-Content -Path test1.prom -Encoding Ascii -NoNewline -Value "# HELP test_alpha_total Some random metric.`n"
|
||||
Add-Content -Path test1.prom -Encoding Ascii -NoNewline -Value "# TYPE test_alpha_total counter`n"
|
||||
Add-Content -Path test1.prom -Encoding Ascii -NoNewline -Value "test_alpha_total ${alpha}`n"
|
||||
Add-Content -Path test1.prom -Encoding Ascii -NoNewline -Value "# HELP test_beta_bytes Some other metric.`n"
|
||||
Add-Content -Path test1.prom -Encoding Ascii -NoNewline -Value "# TYPE test_beta_bytes gauge`n"
|
||||
foreach ($k in $beta.Keys) {
|
||||
Add-Content -Path test1.prom -Encoding Ascii -NoNewline -Value "test_beta_bytes{spin=""${k}""} $( $beta[$k] )`n"
|
||||
}
|
||||
```
|
||||
32
docs/collector.thermalzone.md
Normal file
32
docs/collector.thermalzone.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# thermalzone collector
|
||||
|
||||
The thermalzone collector exposes metrics about system temps. Note that temperature is given in Kelvin
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `thermalzone`
|
||||
Classes | [`Win32_PerfRawData_Counters_ThermalZoneInformation`](https://wutils.com/wmi/root/cimv2/win32_perfrawdata_counters_thermalzoneinformation/#temperature_properties)
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_thermalzone_percent_passive_limit` | % Passive Limit is the current limit this thermal zone is placing on the devices it controls. A limit of 100% indicates the devices are unconstrained. A limit of 0% indicates the devices are fully constrained. | gauge | None
|
||||
`wmi_thermalzone_temperature_celsius ` | Temperature of the thermal zone, in degrees Celsius. | gauge | None
|
||||
`wmi_thermalzone_throttle_reasons ` | Throttle Reasons indicate reasons why the thermal zone is limiting performance of the devices it controls. 0x0 – The zone is not throttled. 0x1 – The zone is throttled for thermal reasons. 0x2 – The zone is throttled to limit electrical current. | gauge | None
|
||||
|
||||
[`Throttle reasons` source](https://docs.microsoft.com/en-us/windows-hardware/design/device-experiences/examples--requirements-and-diagnostics)
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
46
docs/collector.vmware.md
Normal file
46
docs/collector.vmware.md
Normal file
@@ -0,0 +1,46 @@
|
||||
# vmware collector
|
||||
|
||||
The vmware collector exposes metrics about a VMware guest VM
|
||||
|
||||
|||
|
||||
-|-
|
||||
Metric name prefix | `vmware`
|
||||
Classes | `Win32_PerfRawData_vmGuestLib_VMem`, `Win32_PerfRawData_vmGuestLib_VCPU`
|
||||
Enabled by default? | No
|
||||
|
||||
## Flags
|
||||
|
||||
None
|
||||
|
||||
## Metrics
|
||||
|
||||
Name | Description | Type | Labels
|
||||
-----|-------------|------|-------
|
||||
`wmi_vmware_mem_active_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_vmware_mem_ballooned_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_vmware_mem_limit_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_vmware_mem_mapped_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_vmware_mem_overhead_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_vmware_mem_reservation_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_vmware_mem_shared_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_vmware_mem_shared_saved_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_vmware_mem_shares` | _Not yet documented_ | gauge | None
|
||||
`wmi_vmware_mem_swapped_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_vmware_mem_target_size_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_vmware_mem_used_bytes` | _Not yet documented_ | gauge | None
|
||||
`wmi_vmware_cpu_limit_mhz` | _Not yet documented_ | gauge | None
|
||||
`wmi_vmware_cpu_reservation_mhz` | _Not yet documented_ | gauge | None
|
||||
`wmi_vmware_cpu_shares` | _Not yet documented_ | gauge | None
|
||||
`wmi_vmware_cpu_stolen_seconds_total` | _Not yet documented_ | counter | None
|
||||
`wmi_vmware_cpu_time_seconds_total` | _Not yet documented_ | counter | None
|
||||
`wmi_vmware_effective_vm_speed_mhz` | _Not yet documented_ | gauge | None
|
||||
`wmi_vmware_host_processor_speed_mhz` | _Not yet documented_ | gauge | None
|
||||
|
||||
### Example metric
|
||||
_This collector does not yet have explained examples, we would appreciate your help adding them!_
|
||||
|
||||
## Useful queries
|
||||
_This collector does not yet have any useful queries added, we would appreciate your help adding them!_
|
||||
|
||||
## Alerting examples
|
||||
_This collector does not yet have alerting examples, we would appreciate your help adding them!_
|
||||
259
exporter.go
259
exporter.go
@@ -1,10 +1,13 @@
|
||||
// +build windows
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -22,7 +25,8 @@ import (
|
||||
|
||||
// WmiCollector implements the prometheus.Collector interface.
|
||||
type WmiCollector struct {
|
||||
collectors map[string]collector.Collector
|
||||
maxScrapeDuration time.Duration
|
||||
collectors map[string]collector.Collector
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -44,6 +48,18 @@ var (
|
||||
[]string{"collector"},
|
||||
nil,
|
||||
)
|
||||
scrapeTimeoutDesc = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(collector.Namespace, "exporter", "collector_timeout"),
|
||||
"wmi_exporter: Whether the collector timed out.",
|
||||
[]string{"collector"},
|
||||
nil,
|
||||
)
|
||||
snapshotDuration = prometheus.NewDesc(
|
||||
prometheus.BuildFQName(collector.Namespace, "exporter", "perflib_snapshot_duration_seconds"),
|
||||
"Duration of perflib snapshot capture",
|
||||
nil,
|
||||
nil,
|
||||
)
|
||||
|
||||
// This can be removed when client_golang exposes this on Windows
|
||||
// (See https://github.com/prometheus/client_golang/issues/376)
|
||||
@@ -63,63 +79,136 @@ func (coll WmiCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||
ch <- scrapeSuccessDesc
|
||||
}
|
||||
|
||||
// Collect sends the collected metrics from each of the collectors to
|
||||
// prometheus. Collect could be called several times concurrently
|
||||
// and thus its run is protected by a single mutex.
|
||||
func (coll WmiCollector) Collect(ch chan<- prometheus.Metric) {
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(len(coll.collectors))
|
||||
for name, c := range coll.collectors {
|
||||
go func(name string, c collector.Collector) {
|
||||
execute(name, c, ch)
|
||||
wg.Done()
|
||||
}(name, c)
|
||||
}
|
||||
type collectorOutcome int
|
||||
|
||||
const (
|
||||
pending collectorOutcome = iota
|
||||
success
|
||||
failed
|
||||
)
|
||||
|
||||
// Collect sends the collected metrics from each of the collectors to
|
||||
// prometheus.
|
||||
func (coll WmiCollector) Collect(ch chan<- prometheus.Metric) {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
startTimeDesc,
|
||||
prometheus.CounterValue,
|
||||
startTime,
|
||||
)
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func filterAvailableCollectors(collectors string) string {
|
||||
var availableCollectors []string
|
||||
for _, c := range strings.Split(collectors, ",") {
|
||||
_, ok := collector.Factories[c]
|
||||
if ok {
|
||||
availableCollectors = append(availableCollectors, c)
|
||||
}
|
||||
t := time.Now()
|
||||
cs := make([]string, 0, len(coll.collectors))
|
||||
for name := range coll.collectors {
|
||||
cs = append(cs, name)
|
||||
}
|
||||
return strings.Join(availableCollectors, ",")
|
||||
}
|
||||
|
||||
func execute(name string, c collector.Collector, ch chan<- prometheus.Metric) {
|
||||
begin := time.Now()
|
||||
err := c.Collect(ch)
|
||||
duration := time.Since(begin)
|
||||
var success float64
|
||||
|
||||
scrapeContext, err := collector.PrepareScrapeContext(cs)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
snapshotDuration,
|
||||
prometheus.GaugeValue,
|
||||
time.Since(t).Seconds(),
|
||||
)
|
||||
if err != nil {
|
||||
log.Errorf("collector %s failed after %fs: %s", name, duration.Seconds(), err)
|
||||
success = 0
|
||||
} else {
|
||||
log.Debugf("collector %s succeeded after %fs.", name, duration.Seconds())
|
||||
success = 1
|
||||
ch <- prometheus.NewInvalidMetric(scrapeSuccessDesc, fmt.Errorf("failed to prepare scrape: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(len(coll.collectors))
|
||||
collectorOutcomes := make(map[string]collectorOutcome)
|
||||
for name := range coll.collectors {
|
||||
collectorOutcomes[name] = pending
|
||||
}
|
||||
|
||||
metricsBuffer := make(chan prometheus.Metric)
|
||||
l := sync.Mutex{}
|
||||
finished := false
|
||||
go func() {
|
||||
for m := range metricsBuffer {
|
||||
l.Lock()
|
||||
if !finished {
|
||||
ch <- m
|
||||
}
|
||||
l.Unlock()
|
||||
}
|
||||
}()
|
||||
|
||||
for name, c := range coll.collectors {
|
||||
go func(name string, c collector.Collector) {
|
||||
defer wg.Done()
|
||||
outcome := execute(name, c, scrapeContext, metricsBuffer)
|
||||
l.Lock()
|
||||
if !finished {
|
||||
collectorOutcomes[name] = outcome
|
||||
}
|
||||
l.Unlock()
|
||||
}(name, c)
|
||||
}
|
||||
|
||||
allDone := make(chan struct{})
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(allDone)
|
||||
close(metricsBuffer)
|
||||
}()
|
||||
|
||||
// Wait until either all collectors finish, or timeout expires
|
||||
select {
|
||||
case <-allDone:
|
||||
case <-time.After(coll.maxScrapeDuration):
|
||||
}
|
||||
|
||||
l.Lock()
|
||||
finished = true
|
||||
|
||||
remainingCollectorNames := make([]string, 0)
|
||||
for name, outcome := range collectorOutcomes {
|
||||
var successValue, timeoutValue float64
|
||||
if outcome == pending {
|
||||
timeoutValue = 1.0
|
||||
remainingCollectorNames = append(remainingCollectorNames, name)
|
||||
}
|
||||
if outcome == success {
|
||||
successValue = 1.0
|
||||
}
|
||||
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
scrapeSuccessDesc,
|
||||
prometheus.GaugeValue,
|
||||
successValue,
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
scrapeTimeoutDesc,
|
||||
prometheus.GaugeValue,
|
||||
timeoutValue,
|
||||
name,
|
||||
)
|
||||
}
|
||||
|
||||
if len(remainingCollectorNames) > 0 {
|
||||
log.Warn("Collection timed out, still waiting for ", remainingCollectorNames)
|
||||
}
|
||||
|
||||
l.Unlock()
|
||||
}
|
||||
|
||||
func execute(name string, c collector.Collector, ctx *collector.ScrapeContext, ch chan<- prometheus.Metric) collectorOutcome {
|
||||
t := time.Now()
|
||||
err := c.Collect(ctx, ch)
|
||||
duration := time.Since(t).Seconds()
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
scrapeDurationDesc,
|
||||
prometheus.GaugeValue,
|
||||
duration.Seconds(),
|
||||
name,
|
||||
)
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
scrapeSuccessDesc,
|
||||
prometheus.GaugeValue,
|
||||
success,
|
||||
duration,
|
||||
name,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("collector %s failed after %fs: %s", name, duration, err)
|
||||
return failed
|
||||
}
|
||||
log.Debugf("collector %s succeeded after %fs.", name, duration)
|
||||
return success
|
||||
}
|
||||
|
||||
func expandEnabledCollectors(enabled string) []string {
|
||||
@@ -143,21 +232,14 @@ func loadCollectors(list string) (map[string]collector.Collector, error) {
|
||||
enabled := expandEnabledCollectors(list)
|
||||
|
||||
for _, name := range enabled {
|
||||
fn, ok := collector.Factories[name]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("collector '%s' not available", name)
|
||||
}
|
||||
c, err := fn()
|
||||
c, err := collector.Build(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
collectors[name] = c
|
||||
}
|
||||
return collectors, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(version.NewCollector("wmi_exporter"))
|
||||
return collectors, nil
|
||||
}
|
||||
|
||||
func initWbem() {
|
||||
@@ -185,12 +267,16 @@ func main() {
|
||||
).Default("/metrics").String()
|
||||
enabledCollectors = kingpin.Flag(
|
||||
"collectors.enabled",
|
||||
"Comma-separated list of collectors to use. Use '[default]' as a placeholder for all the collectors enabled by default.").
|
||||
Default(filterAvailableCollectors(defaultCollectors)).String()
|
||||
"Comma-separated list of collectors to use. Use '[defaults]' as a placeholder for all the collectors enabled by default.").
|
||||
Default(defaultCollectors).String()
|
||||
printCollectors = kingpin.Flag(
|
||||
"collectors.print",
|
||||
"If true, print available collectors and exit.",
|
||||
).Bool()
|
||||
timeoutMargin = kingpin.Flag(
|
||||
"scrape.timeout-margin",
|
||||
"Seconds to subtract from the timeout allowed by the client. Tune to allow for overhead or high loads.",
|
||||
).Default("0.5").Float64()
|
||||
)
|
||||
|
||||
log.AddFlags(kingpin.CommandLine)
|
||||
@@ -199,8 +285,9 @@ func main() {
|
||||
kingpin.Parse()
|
||||
|
||||
if *printCollectors {
|
||||
collectorNames := make(sort.StringSlice, 0, len(collector.Factories))
|
||||
for n := range collector.Factories {
|
||||
collectors := collector.Available()
|
||||
collectorNames := make(sort.StringSlice, 0, len(collectors))
|
||||
for _, n := range collectors {
|
||||
collectorNames = append(collectorNames, n)
|
||||
}
|
||||
collectorNames.Sort()
|
||||
@@ -220,7 +307,12 @@ func main() {
|
||||
|
||||
stopCh := make(chan bool)
|
||||
if !isInteractive {
|
||||
go svc.Run(serviceName, &wmiExporterService{stopCh: stopCh})
|
||||
go func() {
|
||||
err = svc.Run(serviceName, &wmiExporterService{stopCh: stopCh})
|
||||
if err != nil {
|
||||
log.Errorf("Failed to start service: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
collectors, err := loadCollectors(*enabledCollectors)
|
||||
@@ -230,10 +322,17 @@ func main() {
|
||||
|
||||
log.Infof("Enabled collectors: %v", strings.Join(keys(collectors), ", "))
|
||||
|
||||
nodeCollector := WmiCollector{collectors: collectors}
|
||||
prometheus.MustRegister(nodeCollector)
|
||||
h := &metricsHandler{
|
||||
timeoutMargin: *timeoutMargin,
|
||||
collectorFactory: func(timeout time.Duration) *WmiCollector {
|
||||
return &WmiCollector{
|
||||
collectors: collectors,
|
||||
maxScrapeDuration: timeout,
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
http.Handle(*metricsPath, promhttp.Handler())
|
||||
http.Handle(*metricsPath, h)
|
||||
http.HandleFunc("/health", healthCheck)
|
||||
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
http.Redirect(w, r, *metricsPath, http.StatusMovedPermanently)
|
||||
@@ -257,7 +356,10 @@ func main() {
|
||||
|
||||
func healthCheck(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
io.WriteString(w, `{"status":"ok"}`)
|
||||
_, err := fmt.Fprintln(w, `{"status":"ok"}`)
|
||||
if err != nil {
|
||||
log.Debugf("Failed to write to stream: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func keys(m map[string]collector.Collector) []string {
|
||||
@@ -294,3 +396,36 @@ loop:
|
||||
changes <- svc.Status{State: svc.StopPending}
|
||||
return
|
||||
}
|
||||
|
||||
type metricsHandler struct {
|
||||
timeoutMargin float64
|
||||
collectorFactory func(timeout time.Duration) *WmiCollector
|
||||
}
|
||||
|
||||
func (mh *metricsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
const defaultTimeout = 10.0
|
||||
|
||||
var timeoutSeconds float64
|
||||
if v := r.Header.Get("X-Prometheus-Scrape-Timeout-Seconds"); v != "" {
|
||||
var err error
|
||||
timeoutSeconds, err = strconv.ParseFloat(v, 64)
|
||||
if err != nil {
|
||||
log.Warnf("Couldn't parse X-Prometheus-Scrape-Timeout-Seconds: %q. Defaulting timeout to %f", v, defaultTimeout)
|
||||
}
|
||||
}
|
||||
if timeoutSeconds == 0 {
|
||||
timeoutSeconds = defaultTimeout
|
||||
}
|
||||
timeoutSeconds = timeoutSeconds - mh.timeoutMargin
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
reg.MustRegister(mh.collectorFactory(time.Duration(timeoutSeconds * float64(time.Second))))
|
||||
reg.MustRegister(
|
||||
prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}),
|
||||
prometheus.NewGoCollector(),
|
||||
version.NewCollector("wmi_exporter"),
|
||||
)
|
||||
|
||||
h := promhttp.HandlerFor(reg, promhttp.HandlerOpts{})
|
||||
h.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
// +build windows
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
|
||||
17
go.mod
Normal file
17
go.mod
Normal file
@@ -0,0 +1,17 @@
|
||||
module github.com/martinlindhe/wmi_exporter
|
||||
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/Microsoft/go-winio v0.4.14 // indirect
|
||||
github.com/Microsoft/hcsshim v0.8.6
|
||||
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6
|
||||
github.com/dimchansky/utfbom v1.1.0
|
||||
github.com/go-ole/go-ole v1.2.1 // indirect
|
||||
github.com/leoluk/perflib_exporter v0.1.0
|
||||
github.com/prometheus/client_golang v0.9.2
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910
|
||||
github.com/prometheus/common v0.2.0
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6
|
||||
)
|
||||
89
go.sum
Normal file
89
go.sum
Normal file
@@ -0,0 +1,89 @@
|
||||
github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU=
|
||||
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
|
||||
github.com/Microsoft/hcsshim v0.8.6 h1:ZfF0+zZeYdzMIVMZHKtDKJvLHj76XCuVae/jNkjj0IA=
|
||||
github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
|
||||
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8=
|
||||
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
|
||||
github.com/alecthomas/kingpin v2.2.6+incompatible/go.mod h1:59OFYbFVLKQKq+mqrL6Rw5bR0c3ACQaawgXx0QYndlE=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a h1:BtpsbiV638WQZwhA98cEZw2BsbnQJrbd0BI7tsy0W1c=
|
||||
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4=
|
||||
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E=
|
||||
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/golang/protobuf v1.0.0 h1:lsek0oXi8iFE9L+EXARyHIjU5rlWIhhTkjDz3vHhWWQ=
|
||||
github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/leoluk/perflib_exporter v0.0.1 h1:MRwP1Ohh/mVevUy4ZUzvSlxnJtm9/NWHeM3aROxnRiQ=
|
||||
github.com/leoluk/perflib_exporter v0.0.1/go.mod h1:4APOQriqHobMzovXV7guPQv0ynKH6vZD3XNmT2MBc6w=
|
||||
github.com/leoluk/perflib_exporter v0.1.0 h1:fXe/mDaf9jR+Zk8FjFlcCSksACuIj2VNN4GyKHmQqtA=
|
||||
github.com/leoluk/perflib_exporter v0.1.0/go.mod h1:rpV0lYj7lemdTm31t7zpCqYqPnw7xs86f+BaaNBVYFM=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.0 h1:YNOwxxSJzSUARoD9KRZLzM9Y858MNGCOACTvCW9TSAc=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.0/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.8.0 h1:1921Yw9Gc3iSc4VQh3PIoOqgPCZS7G/4xQNVUp8Mda8=
|
||||
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740=
|
||||
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
|
||||
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5 h1:cLL6NowurKLMfCeQy4tIeph12XNQWgANCNvdyrOYKV4=
|
||||
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/common v0.0.0-20180312112859-e4aa40a9169a h1:JLXgXKi9RCmLk8DMn8+PCvN++iwpD3KptUbVvHBsKtU=
|
||||
github.com/prometheus/common v0.0.0-20180312112859-e4aa40a9169a/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.2.0 h1:kUZDBDTdBVBYBj5Tmh2NZLlF60mfjA27rM34b+cVwNU=
|
||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/procfs v0.0.0-20180310141954-54d17b57dd7d h1:iF+U2tTdys559fmqt0MNaC8QLIJh1twxIIOylDGhswM=
|
||||
github.com/prometheus/procfs v0.0.0-20180310141954-54d17b57dd7d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE=
|
||||
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/sirupsen/logrus v1.0.5 h1:8c8b5uO0zS4X6RPl/sd1ENwSkIc0/H2PaHxE3udaE8I=
|
||||
github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
||||
github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
golang.org/x/crypto v0.0.0-20180312195533-182114d58262 h1:1NLVUmR8SQ7cNNA5Vo7ronpXbR+5A+9IwIC/bLE7D8Y=
|
||||
golang.org/x/crypto v0.0.0-20180312195533-182114d58262/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180313075820-8c0ece68c283 h1:DE/w7won1Ns86VoWjUZ4cJS6//TObJntGkxuZ63asRc=
|
||||
golang.org/x/sys v0.0.0-20180313075820-8c0ece68c283/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67 h1:1Fzlr8kkDLQwqMP8GxrhptBLqZG/EDpiATneiZHY998=
|
||||
golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b h1:ag/x1USPSsqHud38I9BAC88qdNLDHHtQ4mlgQIZPPNA=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
@@ -19,6 +19,9 @@
|
||||
<Property Id="ENABLED_COLLECTORS" Secure="yes"/>
|
||||
<SetProperty Id="CollectorsFlag" After="InstallFiles" Sequence="execute" Value="--collectors.enabled [ENABLED_COLLECTORS]">ENABLED_COLLECTORS</SetProperty>
|
||||
|
||||
<Property Id="EXTRA_FLAGS" Secure="yes"/>
|
||||
<SetProperty Id="ExtraFlags" After="InstallFiles" Sequence="execute" Value="[EXTRA_FLAGS]">EXTRA_FLAGS</SetProperty>
|
||||
|
||||
<Property Id="LISTEN_ADDR" Secure="yes" />
|
||||
<Property Id="LISTEN_PORT" Secure="yes" Value="9182" />
|
||||
<SetProperty Id="ListenFlag" After="InstallFiles" Sequence="execute" Value="--telemetry.addr [LISTEN_ADDR]:[LISTEN_PORT]">LISTEN_ADDR OR LISTEN_PORT</SetProperty>
|
||||
@@ -42,10 +45,11 @@
|
||||
<File Id="wmi_exporter.exe" Name="wmi_exporter.exe" Source="Work\wmi_exporter.exe" KeyPath="yes">
|
||||
<fw:FirewallException Id="MetricsEndpoint" Name="WMI Exporter (HTTP [LISTEN_PORT])" Description="WMI Exporter HTTP endpoint" Port="[LISTEN_PORT]" Protocol="tcp" Scope="any" IgnoreFailure="yes" />
|
||||
</File>
|
||||
<ServiceInstall Id="InstallExporterService" Name="wmi_exporter" DisplayName="WMI exporter" Description="Exports Prometheus metrics from WMI queries" ErrorControl="normal" Start="auto" Type="ownProcess" Arguments="--log.format logger:eventlog?name=wmi_exporter [CollectorsFlag] [ListenFlag] [MetricsPathFlag] [TextfileDirFlag]">
|
||||
<ServiceInstall Id="InstallExporterService" Name="wmi_exporter" DisplayName="WMI exporter" Description="Exports Prometheus metrics from WMI queries" ErrorControl="normal" Start="auto" Type="ownProcess" Arguments="--log.format logger:eventlog?name=wmi_exporter [CollectorsFlag] [ListenFlag] [MetricsPathFlag] [TextfileDirFlag] [ExtraFlags]">
|
||||
<util:ServiceConfig FirstFailureActionType="restart" SecondFailureActionType="restart" ThirdFailureActionType="restart" RestartServiceDelayInSeconds="5" />
|
||||
</ServiceInstall>
|
||||
<ServiceControl Id="ServiceStateControl" Name="wmi_exporter" Remove="uninstall" Start="install" Stop="both" />
|
||||
<util:EventSource Log="Application" Name="wmi_exporter" EventMessageFile="%SystemRoot%\System32\EventCreate.exe" />
|
||||
</Component>
|
||||
<Component Id="CreateTextfileDirectory" Directory="textfile_inputs" Guid="d03ef58a-9cbf-4165-ad39-d143e9b27e14">
|
||||
<CreateFolder />
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
// returns data points from {{ .Class }}
|
||||
// <add link to documentation here> - {{ .Class }} class
|
||||
package collector
|
||||
import (
|
||||
"github.com/StackExchange/wmi"
|
||||
@@ -31,13 +29,15 @@ func New{{ .CollectorName }}Collector() (Collector, error) {
|
||||
}
|
||||
// Collect sends the metric values for each metric
|
||||
// to the provided prometheus Metric channel.
|
||||
func (c *{{ .CollectorName }}Collector) Collect(ch chan<- prometheus.Metric) error {
|
||||
func (c *{{ .CollectorName }}Collector) Collect(ctx *ScrapeContext, ch chan<- prometheus.Metric) error {
|
||||
if desc, err := c.collect(ch); err != nil {
|
||||
log.Error("failed collecting {{ .CollectorName | toLower }} metrics:", desc, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// {{ .Class }} docs:
|
||||
// - <add link to documentation here>
|
||||
type {{ .Class }} struct {
|
||||
Name string
|
||||
{{ range $m := .Members }}
|
||||
|
||||
@@ -25,7 +25,7 @@ func main() {
|
||||
panic(err)
|
||||
}
|
||||
var data TemplateData
|
||||
if err := json.Unmarshal(bytes, &data); err != nil {
|
||||
if err = json.Unmarshal(bytes, &data); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -33,11 +33,11 @@ func main() {
|
||||
"toLower": strings.ToLower,
|
||||
"toSnakeCase": toSnakeCase,
|
||||
}
|
||||
template, err := template.New("template").Funcs(funcs).ParseFiles("collector.template")
|
||||
tmpl, err := template.New("template").Funcs(funcs).ParseFiles("collector.template")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = template.ExecuteTemplate(os.Stdout, "collector.template", data)
|
||||
err = tmpl.ExecuteTemplate(os.Stdout, "collector.template", data)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
20
vendor/github.com/StackExchange/wmi/LICENSE
generated
vendored
20
vendor/github.com/StackExchange/wmi/LICENSE
generated
vendored
@@ -1,20 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 Stack Exchange
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
260
vendor/github.com/StackExchange/wmi/swbemservices.go
generated
vendored
260
vendor/github.com/StackExchange/wmi/swbemservices.go
generated
vendored
@@ -1,260 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package wmi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"github.com/go-ole/go-ole"
|
||||
"github.com/go-ole/go-ole/oleutil"
|
||||
)
|
||||
|
||||
// SWbemServices is used to access wmi. See https://msdn.microsoft.com/en-us/library/aa393719(v=vs.85).aspx
|
||||
type SWbemServices struct {
|
||||
//TODO: track namespace. Not sure if we can re connect to a different namespace using the same instance
|
||||
cWMIClient *Client //This could also be an embedded struct, but then we would need to branch on Client vs SWbemServices in the Query method
|
||||
sWbemLocatorIUnknown *ole.IUnknown
|
||||
sWbemLocatorIDispatch *ole.IDispatch
|
||||
queries chan *queryRequest
|
||||
closeError chan error
|
||||
lQueryorClose sync.Mutex
|
||||
}
|
||||
|
||||
type queryRequest struct {
|
||||
query string
|
||||
dst interface{}
|
||||
args []interface{}
|
||||
finished chan error
|
||||
}
|
||||
|
||||
// InitializeSWbemServices will return a new SWbemServices object that can be used to query WMI
|
||||
func InitializeSWbemServices(c *Client, connectServerArgs ...interface{}) (*SWbemServices, error) {
|
||||
//fmt.Println("InitializeSWbemServices: Starting")
|
||||
//TODO: implement connectServerArgs as optional argument for init with connectServer call
|
||||
s := new(SWbemServices)
|
||||
s.cWMIClient = c
|
||||
s.queries = make(chan *queryRequest)
|
||||
initError := make(chan error)
|
||||
go s.process(initError)
|
||||
|
||||
err, ok := <-initError
|
||||
if ok {
|
||||
return nil, err //Send error to caller
|
||||
}
|
||||
//fmt.Println("InitializeSWbemServices: Finished")
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Close will clear and release all of the SWbemServices resources
|
||||
func (s *SWbemServices) Close() error {
|
||||
s.lQueryorClose.Lock()
|
||||
if s == nil || s.sWbemLocatorIDispatch == nil {
|
||||
s.lQueryorClose.Unlock()
|
||||
return fmt.Errorf("SWbemServices is not Initialized")
|
||||
}
|
||||
if s.queries == nil {
|
||||
s.lQueryorClose.Unlock()
|
||||
return fmt.Errorf("SWbemServices has been closed")
|
||||
}
|
||||
//fmt.Println("Close: sending close request")
|
||||
var result error
|
||||
ce := make(chan error)
|
||||
s.closeError = ce //Race condition if multiple callers to close. May need to lock here
|
||||
close(s.queries) //Tell background to shut things down
|
||||
s.lQueryorClose.Unlock()
|
||||
err, ok := <-ce
|
||||
if ok {
|
||||
result = err
|
||||
}
|
||||
//fmt.Println("Close: finished")
|
||||
return result
|
||||
}
|
||||
|
||||
func (s *SWbemServices) process(initError chan error) {
|
||||
//fmt.Println("process: starting background thread initialization")
|
||||
//All OLE/WMI calls must happen on the same initialized thead, so lock this goroutine
|
||||
runtime.LockOSThread()
|
||||
defer runtime.LockOSThread()
|
||||
|
||||
err := ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED)
|
||||
if err != nil {
|
||||
oleCode := err.(*ole.OleError).Code()
|
||||
if oleCode != ole.S_OK && oleCode != S_FALSE {
|
||||
initError <- fmt.Errorf("ole.CoInitializeEx error: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
defer ole.CoUninitialize()
|
||||
|
||||
unknown, err := oleutil.CreateObject("WbemScripting.SWbemLocator")
|
||||
if err != nil {
|
||||
initError <- fmt.Errorf("CreateObject SWbemLocator error: %v", err)
|
||||
return
|
||||
} else if unknown == nil {
|
||||
initError <- ErrNilCreateObject
|
||||
return
|
||||
}
|
||||
defer unknown.Release()
|
||||
s.sWbemLocatorIUnknown = unknown
|
||||
|
||||
dispatch, err := s.sWbemLocatorIUnknown.QueryInterface(ole.IID_IDispatch)
|
||||
if err != nil {
|
||||
initError <- fmt.Errorf("SWbemLocator QueryInterface error: %v", err)
|
||||
return
|
||||
}
|
||||
defer dispatch.Release()
|
||||
s.sWbemLocatorIDispatch = dispatch
|
||||
|
||||
// we can't do the ConnectServer call outside the loop unless we find a way to track and re-init the connectServerArgs
|
||||
//fmt.Println("process: initialized. closing initError")
|
||||
close(initError)
|
||||
//fmt.Println("process: waiting for queries")
|
||||
for q := range s.queries {
|
||||
//fmt.Printf("process: new query: len(query)=%d\n", len(q.query))
|
||||
errQuery := s.queryBackground(q)
|
||||
//fmt.Println("process: s.queryBackground finished")
|
||||
if errQuery != nil {
|
||||
q.finished <- errQuery
|
||||
}
|
||||
close(q.finished)
|
||||
}
|
||||
//fmt.Println("process: queries channel closed")
|
||||
s.queries = nil //set channel to nil so we know it is closed
|
||||
//TODO: I think the Release/Clear calls can panic if things are in a bad state.
|
||||
//TODO: May need to recover from panics and send error to method caller instead.
|
||||
close(s.closeError)
|
||||
}
|
||||
|
||||
// Query runs the WQL query using a SWbemServices instance and appends the values to dst.
|
||||
//
|
||||
// dst must have type *[]S or *[]*S, for some struct type S. Fields selected in
|
||||
// the query must have the same name in dst. Supported types are all signed and
|
||||
// unsigned integers, time.Time, string, bool, or a pointer to one of those.
|
||||
// Array types are not supported.
|
||||
//
|
||||
// By default, the local machine and default namespace are used. These can be
|
||||
// changed using connectServerArgs. See
|
||||
// http://msdn.microsoft.com/en-us/library/aa393720.aspx for details.
|
||||
func (s *SWbemServices) Query(query string, dst interface{}, connectServerArgs ...interface{}) error {
|
||||
s.lQueryorClose.Lock()
|
||||
if s == nil || s.sWbemLocatorIDispatch == nil {
|
||||
s.lQueryorClose.Unlock()
|
||||
return fmt.Errorf("SWbemServices is not Initialized")
|
||||
}
|
||||
if s.queries == nil {
|
||||
s.lQueryorClose.Unlock()
|
||||
return fmt.Errorf("SWbemServices has been closed")
|
||||
}
|
||||
|
||||
//fmt.Println("Query: Sending query request")
|
||||
qr := queryRequest{
|
||||
query: query,
|
||||
dst: dst,
|
||||
args: connectServerArgs,
|
||||
finished: make(chan error),
|
||||
}
|
||||
s.queries <- &qr
|
||||
s.lQueryorClose.Unlock()
|
||||
err, ok := <-qr.finished
|
||||
if ok {
|
||||
//fmt.Println("Query: Finished with error")
|
||||
return err //Send error to caller
|
||||
}
|
||||
//fmt.Println("Query: Finished")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SWbemServices) queryBackground(q *queryRequest) error {
|
||||
if s == nil || s.sWbemLocatorIDispatch == nil {
|
||||
return fmt.Errorf("SWbemServices is not Initialized")
|
||||
}
|
||||
wmi := s.sWbemLocatorIDispatch //Should just rename in the code, but this will help as we break things apart
|
||||
//fmt.Println("queryBackground: Starting")
|
||||
|
||||
dv := reflect.ValueOf(q.dst)
|
||||
if dv.Kind() != reflect.Ptr || dv.IsNil() {
|
||||
return ErrInvalidEntityType
|
||||
}
|
||||
dv = dv.Elem()
|
||||
mat, elemType := checkMultiArg(dv)
|
||||
if mat == multiArgTypeInvalid {
|
||||
return ErrInvalidEntityType
|
||||
}
|
||||
|
||||
// service is a SWbemServices
|
||||
serviceRaw, err := oleutil.CallMethod(wmi, "ConnectServer", q.args...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
service := serviceRaw.ToIDispatch()
|
||||
defer serviceRaw.Clear()
|
||||
|
||||
// result is a SWBemObjectSet
|
||||
resultRaw, err := oleutil.CallMethod(service, "ExecQuery", q.query)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
result := resultRaw.ToIDispatch()
|
||||
defer resultRaw.Clear()
|
||||
|
||||
count, err := oleInt64(result, "Count")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
enumProperty, err := result.GetProperty("_NewEnum")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer enumProperty.Clear()
|
||||
|
||||
enum, err := enumProperty.ToIUnknown().IEnumVARIANT(ole.IID_IEnumVariant)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if enum == nil {
|
||||
return fmt.Errorf("can't get IEnumVARIANT, enum is nil")
|
||||
}
|
||||
defer enum.Release()
|
||||
|
||||
// Initialize a slice with Count capacity
|
||||
dv.Set(reflect.MakeSlice(dv.Type(), 0, int(count)))
|
||||
|
||||
var errFieldMismatch error
|
||||
for itemRaw, length, err := enum.Next(1); length > 0; itemRaw, length, err = enum.Next(1) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := func() error {
|
||||
// item is a SWbemObject, but really a Win32_Process
|
||||
item := itemRaw.ToIDispatch()
|
||||
defer item.Release()
|
||||
|
||||
ev := reflect.New(elemType)
|
||||
if err = s.cWMIClient.loadEntity(ev.Interface(), item); err != nil {
|
||||
if _, ok := err.(*ErrFieldMismatch); ok {
|
||||
// We continue loading entities even in the face of field mismatch errors.
|
||||
// If we encounter any other error, that other error is returned. Otherwise,
|
||||
// an ErrFieldMismatch is returned.
|
||||
errFieldMismatch = err
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if mat != multiArgTypeStructPtr {
|
||||
ev = ev.Elem()
|
||||
}
|
||||
dv.Set(reflect.Append(dv, ev))
|
||||
return nil
|
||||
}()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
//fmt.Println("queryBackground: Finished")
|
||||
return errFieldMismatch
|
||||
}
|
||||
486
vendor/github.com/StackExchange/wmi/wmi.go
generated
vendored
486
vendor/github.com/StackExchange/wmi/wmi.go
generated
vendored
@@ -1,486 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
/*
|
||||
Package wmi provides a WQL interface for WMI on Windows.
|
||||
|
||||
Example code to print names of running processes:
|
||||
|
||||
type Win32_Process struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
func main() {
|
||||
var dst []Win32_Process
|
||||
q := wmi.CreateQuery(&dst, "")
|
||||
err := wmi.Query(q, &dst)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
for i, v := range dst {
|
||||
println(i, v.Name)
|
||||
}
|
||||
}
|
||||
|
||||
*/
|
||||
package wmi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-ole/go-ole"
|
||||
"github.com/go-ole/go-ole/oleutil"
|
||||
)
|
||||
|
||||
var l = log.New(os.Stdout, "", log.LstdFlags)
|
||||
|
||||
var (
|
||||
ErrInvalidEntityType = errors.New("wmi: invalid entity type")
|
||||
// ErrNilCreateObject is the error returned if CreateObject returns nil even
|
||||
// if the error was nil.
|
||||
ErrNilCreateObject = errors.New("wmi: create object returned nil")
|
||||
lock sync.Mutex
|
||||
)
|
||||
|
||||
// S_FALSE is returned by CoInitializeEx if it was already called on this thread.
|
||||
const S_FALSE = 0x00000001
|
||||
|
||||
// QueryNamespace invokes Query with the given namespace on the local machine.
|
||||
func QueryNamespace(query string, dst interface{}, namespace string) error {
|
||||
return Query(query, dst, nil, namespace)
|
||||
}
|
||||
|
||||
// Query runs the WQL query and appends the values to dst.
|
||||
//
|
||||
// dst must have type *[]S or *[]*S, for some struct type S. Fields selected in
|
||||
// the query must have the same name in dst. Supported types are all signed and
|
||||
// unsigned integers, time.Time, string, bool, or a pointer to one of those.
|
||||
// Array types are not supported.
|
||||
//
|
||||
// By default, the local machine and default namespace are used. These can be
|
||||
// changed using connectServerArgs. See
|
||||
// http://msdn.microsoft.com/en-us/library/aa393720.aspx for details.
|
||||
//
|
||||
// Query is a wrapper around DefaultClient.Query.
|
||||
func Query(query string, dst interface{}, connectServerArgs ...interface{}) error {
|
||||
if DefaultClient.SWbemServicesClient == nil {
|
||||
return DefaultClient.Query(query, dst, connectServerArgs...)
|
||||
}
|
||||
return DefaultClient.SWbemServicesClient.Query(query, dst, connectServerArgs...)
|
||||
}
|
||||
|
||||
// A Client is an WMI query client.
|
||||
//
|
||||
// Its zero value (DefaultClient) is a usable client.
|
||||
type Client struct {
|
||||
// NonePtrZero specifies if nil values for fields which aren't pointers
|
||||
// should be returned as the field types zero value.
|
||||
//
|
||||
// Setting this to true allows stucts without pointer fields to be used
|
||||
// without the risk failure should a nil value returned from WMI.
|
||||
NonePtrZero bool
|
||||
|
||||
// PtrNil specifies if nil values for pointer fields should be returned
|
||||
// as nil.
|
||||
//
|
||||
// Setting this to true will set pointer fields to nil where WMI
|
||||
// returned nil, otherwise the types zero value will be returned.
|
||||
PtrNil bool
|
||||
|
||||
// AllowMissingFields specifies that struct fields not present in the
|
||||
// query result should not result in an error.
|
||||
//
|
||||
// Setting this to true allows custom queries to be used with full
|
||||
// struct definitions instead of having to define multiple structs.
|
||||
AllowMissingFields bool
|
||||
|
||||
// SWbemServiceClient is an optional SWbemServices object that can be
|
||||
// initialized and then reused across multiple queries. If it is null
|
||||
// then the method will initialize a new temporary client each time.
|
||||
SWbemServicesClient *SWbemServices
|
||||
}
|
||||
|
||||
// DefaultClient is the default Client and is used by Query, QueryNamespace
|
||||
var DefaultClient = &Client{}
|
||||
|
||||
// Query runs the WQL query and appends the values to dst.
|
||||
//
|
||||
// dst must have type *[]S or *[]*S, for some struct type S. Fields selected in
|
||||
// the query must have the same name in dst. Supported types are all signed and
|
||||
// unsigned integers, time.Time, string, bool, or a pointer to one of those.
|
||||
// Array types are not supported.
|
||||
//
|
||||
// By default, the local machine and default namespace are used. These can be
|
||||
// changed using connectServerArgs. See
|
||||
// http://msdn.microsoft.com/en-us/library/aa393720.aspx for details.
|
||||
func (c *Client) Query(query string, dst interface{}, connectServerArgs ...interface{}) error {
|
||||
dv := reflect.ValueOf(dst)
|
||||
if dv.Kind() != reflect.Ptr || dv.IsNil() {
|
||||
return ErrInvalidEntityType
|
||||
}
|
||||
dv = dv.Elem()
|
||||
mat, elemType := checkMultiArg(dv)
|
||||
if mat == multiArgTypeInvalid {
|
||||
return ErrInvalidEntityType
|
||||
}
|
||||
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
err := ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED)
|
||||
if err != nil {
|
||||
oleCode := err.(*ole.OleError).Code()
|
||||
if oleCode != ole.S_OK && oleCode != S_FALSE {
|
||||
return err
|
||||
}
|
||||
}
|
||||
defer ole.CoUninitialize()
|
||||
|
||||
unknown, err := oleutil.CreateObject("WbemScripting.SWbemLocator")
|
||||
if err != nil {
|
||||
return err
|
||||
} else if unknown == nil {
|
||||
return ErrNilCreateObject
|
||||
}
|
||||
defer unknown.Release()
|
||||
|
||||
wmi, err := unknown.QueryInterface(ole.IID_IDispatch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer wmi.Release()
|
||||
|
||||
// service is a SWbemServices
|
||||
serviceRaw, err := oleutil.CallMethod(wmi, "ConnectServer", connectServerArgs...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
service := serviceRaw.ToIDispatch()
|
||||
defer serviceRaw.Clear()
|
||||
|
||||
// result is a SWBemObjectSet
|
||||
resultRaw, err := oleutil.CallMethod(service, "ExecQuery", query)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
result := resultRaw.ToIDispatch()
|
||||
defer resultRaw.Clear()
|
||||
|
||||
count, err := oleInt64(result, "Count")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
enumProperty, err := result.GetProperty("_NewEnum")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer enumProperty.Clear()
|
||||
|
||||
enum, err := enumProperty.ToIUnknown().IEnumVARIANT(ole.IID_IEnumVariant)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if enum == nil {
|
||||
return fmt.Errorf("can't get IEnumVARIANT, enum is nil")
|
||||
}
|
||||
defer enum.Release()
|
||||
|
||||
// Initialize a slice with Count capacity
|
||||
dv.Set(reflect.MakeSlice(dv.Type(), 0, int(count)))
|
||||
|
||||
var errFieldMismatch error
|
||||
for itemRaw, length, err := enum.Next(1); length > 0; itemRaw, length, err = enum.Next(1) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := func() error {
|
||||
// item is a SWbemObject, but really a Win32_Process
|
||||
item := itemRaw.ToIDispatch()
|
||||
defer item.Release()
|
||||
|
||||
ev := reflect.New(elemType)
|
||||
if err = c.loadEntity(ev.Interface(), item); err != nil {
|
||||
if _, ok := err.(*ErrFieldMismatch); ok {
|
||||
// We continue loading entities even in the face of field mismatch errors.
|
||||
// If we encounter any other error, that other error is returned. Otherwise,
|
||||
// an ErrFieldMismatch is returned.
|
||||
errFieldMismatch = err
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if mat != multiArgTypeStructPtr {
|
||||
ev = ev.Elem()
|
||||
}
|
||||
dv.Set(reflect.Append(dv, ev))
|
||||
return nil
|
||||
}()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return errFieldMismatch
|
||||
}
|
||||
|
||||
// ErrFieldMismatch is returned when a field is to be loaded into a different
|
||||
// type than the one it was stored from, or when a field is missing or
|
||||
// unexported in the destination struct.
|
||||
// StructType is the type of the struct pointed to by the destination argument.
|
||||
type ErrFieldMismatch struct {
|
||||
StructType reflect.Type
|
||||
FieldName string
|
||||
Reason string
|
||||
}
|
||||
|
||||
func (e *ErrFieldMismatch) Error() string {
|
||||
return fmt.Sprintf("wmi: cannot load field %q into a %q: %s",
|
||||
e.FieldName, e.StructType, e.Reason)
|
||||
}
|
||||
|
||||
var timeType = reflect.TypeOf(time.Time{})
|
||||
|
||||
// loadEntity loads a SWbemObject into a struct pointer.
|
||||
func (c *Client) loadEntity(dst interface{}, src *ole.IDispatch) (errFieldMismatch error) {
|
||||
v := reflect.ValueOf(dst).Elem()
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
f := v.Field(i)
|
||||
of := f
|
||||
isPtr := f.Kind() == reflect.Ptr
|
||||
if isPtr {
|
||||
ptr := reflect.New(f.Type().Elem())
|
||||
f.Set(ptr)
|
||||
f = f.Elem()
|
||||
}
|
||||
n := v.Type().Field(i).Name
|
||||
if !f.CanSet() {
|
||||
return &ErrFieldMismatch{
|
||||
StructType: of.Type(),
|
||||
FieldName: n,
|
||||
Reason: "CanSet() is false",
|
||||
}
|
||||
}
|
||||
prop, err := oleutil.GetProperty(src, n)
|
||||
if err != nil {
|
||||
if !c.AllowMissingFields {
|
||||
errFieldMismatch = &ErrFieldMismatch{
|
||||
StructType: of.Type(),
|
||||
FieldName: n,
|
||||
Reason: "no such struct field",
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
defer prop.Clear()
|
||||
|
||||
switch val := prop.Value().(type) {
|
||||
case int8, int16, int32, int64, int:
|
||||
v := reflect.ValueOf(val).Int()
|
||||
switch f.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
f.SetInt(v)
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
f.SetUint(uint64(v))
|
||||
default:
|
||||
return &ErrFieldMismatch{
|
||||
StructType: of.Type(),
|
||||
FieldName: n,
|
||||
Reason: "not an integer class",
|
||||
}
|
||||
}
|
||||
case uint8, uint16, uint32, uint64:
|
||||
v := reflect.ValueOf(val).Uint()
|
||||
switch f.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
f.SetInt(int64(v))
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
f.SetUint(v)
|
||||
default:
|
||||
return &ErrFieldMismatch{
|
||||
StructType: of.Type(),
|
||||
FieldName: n,
|
||||
Reason: "not an integer class",
|
||||
}
|
||||
}
|
||||
case string:
|
||||
switch f.Kind() {
|
||||
case reflect.String:
|
||||
f.SetString(val)
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
iv, err := strconv.ParseInt(val, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.SetInt(iv)
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
uv, err := strconv.ParseUint(val, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.SetUint(uv)
|
||||
case reflect.Struct:
|
||||
switch f.Type() {
|
||||
case timeType:
|
||||
if len(val) == 25 {
|
||||
mins, err := strconv.Atoi(val[22:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
val = val[:22] + fmt.Sprintf("%02d%02d", mins/60, mins%60)
|
||||
}
|
||||
t, err := time.Parse("20060102150405.000000-0700", val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.Set(reflect.ValueOf(t))
|
||||
}
|
||||
}
|
||||
case bool:
|
||||
switch f.Kind() {
|
||||
case reflect.Bool:
|
||||
f.SetBool(val)
|
||||
default:
|
||||
return &ErrFieldMismatch{
|
||||
StructType: of.Type(),
|
||||
FieldName: n,
|
||||
Reason: "not a bool",
|
||||
}
|
||||
}
|
||||
case float32:
|
||||
switch f.Kind() {
|
||||
case reflect.Float32:
|
||||
f.SetFloat(float64(val))
|
||||
default:
|
||||
return &ErrFieldMismatch{
|
||||
StructType: of.Type(),
|
||||
FieldName: n,
|
||||
Reason: "not a Float32",
|
||||
}
|
||||
}
|
||||
default:
|
||||
if f.Kind() == reflect.Slice {
|
||||
switch f.Type().Elem().Kind() {
|
||||
case reflect.String:
|
||||
safeArray := prop.ToArray()
|
||||
if safeArray != nil {
|
||||
arr := safeArray.ToValueArray()
|
||||
fArr := reflect.MakeSlice(f.Type(), len(arr), len(arr))
|
||||
for i, v := range arr {
|
||||
s := fArr.Index(i)
|
||||
s.SetString(v.(string))
|
||||
}
|
||||
f.Set(fArr)
|
||||
}
|
||||
case reflect.Uint8:
|
||||
safeArray := prop.ToArray()
|
||||
if safeArray != nil {
|
||||
arr := safeArray.ToValueArray()
|
||||
fArr := reflect.MakeSlice(f.Type(), len(arr), len(arr))
|
||||
for i, v := range arr {
|
||||
s := fArr.Index(i)
|
||||
s.SetUint(reflect.ValueOf(v).Uint())
|
||||
}
|
||||
f.Set(fArr)
|
||||
}
|
||||
default:
|
||||
return &ErrFieldMismatch{
|
||||
StructType: of.Type(),
|
||||
FieldName: n,
|
||||
Reason: fmt.Sprintf("unsupported slice type (%T)", val),
|
||||
}
|
||||
}
|
||||
} else {
|
||||
typeof := reflect.TypeOf(val)
|
||||
if typeof == nil && (isPtr || c.NonePtrZero) {
|
||||
if (isPtr && c.PtrNil) || (!isPtr && c.NonePtrZero) {
|
||||
of.Set(reflect.Zero(of.Type()))
|
||||
}
|
||||
break
|
||||
}
|
||||
return &ErrFieldMismatch{
|
||||
StructType: of.Type(),
|
||||
FieldName: n,
|
||||
Reason: fmt.Sprintf("unsupported type (%T)", val),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return errFieldMismatch
|
||||
}
|
||||
|
||||
type multiArgType int
|
||||
|
||||
const (
|
||||
multiArgTypeInvalid multiArgType = iota
|
||||
multiArgTypeStruct
|
||||
multiArgTypeStructPtr
|
||||
)
|
||||
|
||||
// checkMultiArg checks that v has type []S, []*S for some struct type S.
|
||||
//
|
||||
// It returns what category the slice's elements are, and the reflect.Type
|
||||
// that represents S.
|
||||
func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) {
|
||||
if v.Kind() != reflect.Slice {
|
||||
return multiArgTypeInvalid, nil
|
||||
}
|
||||
elemType = v.Type().Elem()
|
||||
switch elemType.Kind() {
|
||||
case reflect.Struct:
|
||||
return multiArgTypeStruct, elemType
|
||||
case reflect.Ptr:
|
||||
elemType = elemType.Elem()
|
||||
if elemType.Kind() == reflect.Struct {
|
||||
return multiArgTypeStructPtr, elemType
|
||||
}
|
||||
}
|
||||
return multiArgTypeInvalid, nil
|
||||
}
|
||||
|
||||
func oleInt64(item *ole.IDispatch, prop string) (int64, error) {
|
||||
v, err := oleutil.GetProperty(item, prop)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer v.Clear()
|
||||
|
||||
i := int64(v.Val)
|
||||
return i, nil
|
||||
}
|
||||
|
||||
// CreateQuery returns a WQL query string that queries all columns of src. where
|
||||
// is an optional string that is appended to the query, to be used with WHERE
|
||||
// clauses. In such a case, the "WHERE" string should appear at the beginning.
|
||||
func CreateQuery(src interface{}, where string) string {
|
||||
var b bytes.Buffer
|
||||
b.WriteString("SELECT ")
|
||||
s := reflect.Indirect(reflect.ValueOf(src))
|
||||
t := s.Type()
|
||||
if s.Kind() == reflect.Slice {
|
||||
t = t.Elem()
|
||||
}
|
||||
if t.Kind() != reflect.Struct {
|
||||
return ""
|
||||
}
|
||||
var fields []string
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
fields = append(fields, t.Field(i).Name)
|
||||
}
|
||||
b.WriteString(strings.Join(fields, ", "))
|
||||
b.WriteString(" FROM ")
|
||||
b.WriteString(t.Name())
|
||||
b.WriteString(" " + where)
|
||||
return b.String()
|
||||
}
|
||||
27
vendor/github.com/alecthomas/template/LICENSE
generated
vendored
27
vendor/github.com/alecthomas/template/LICENSE
generated
vendored
@@ -1,27 +0,0 @@
|
||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
25
vendor/github.com/alecthomas/template/README.md
generated
vendored
25
vendor/github.com/alecthomas/template/README.md
generated
vendored
@@ -1,25 +0,0 @@
|
||||
# Go's `text/template` package with newline elision
|
||||
|
||||
This is a fork of Go 1.4's [text/template](http://golang.org/pkg/text/template/) package with one addition: a backslash immediately after a closing delimiter will delete all subsequent newlines until a non-newline.
|
||||
|
||||
eg.
|
||||
|
||||
```
|
||||
{{if true}}\
|
||||
hello
|
||||
{{end}}\
|
||||
```
|
||||
|
||||
Will result in:
|
||||
|
||||
```
|
||||
hello\n
|
||||
```
|
||||
|
||||
Rather than:
|
||||
|
||||
```
|
||||
\n
|
||||
hello\n
|
||||
\n
|
||||
```
|
||||
406
vendor/github.com/alecthomas/template/doc.go
generated
vendored
406
vendor/github.com/alecthomas/template/doc.go
generated
vendored
@@ -1,406 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package template implements data-driven templates for generating textual output.
|
||||
|
||||
To generate HTML output, see package html/template, which has the same interface
|
||||
as this package but automatically secures HTML output against certain attacks.
|
||||
|
||||
Templates are executed by applying them to a data structure. Annotations in the
|
||||
template refer to elements of the data structure (typically a field of a struct
|
||||
or a key in a map) to control execution and derive values to be displayed.
|
||||
Execution of the template walks the structure and sets the cursor, represented
|
||||
by a period '.' and called "dot", to the value at the current location in the
|
||||
structure as execution proceeds.
|
||||
|
||||
The input text for a template is UTF-8-encoded text in any format.
|
||||
"Actions"--data evaluations or control structures--are delimited by
|
||||
"{{" and "}}"; all text outside actions is copied to the output unchanged.
|
||||
Actions may not span newlines, although comments can.
|
||||
|
||||
Once parsed, a template may be executed safely in parallel.
|
||||
|
||||
Here is a trivial example that prints "17 items are made of wool".
|
||||
|
||||
type Inventory struct {
|
||||
Material string
|
||||
Count uint
|
||||
}
|
||||
sweaters := Inventory{"wool", 17}
|
||||
tmpl, err := template.New("test").Parse("{{.Count}} items are made of {{.Material}}")
|
||||
if err != nil { panic(err) }
|
||||
err = tmpl.Execute(os.Stdout, sweaters)
|
||||
if err != nil { panic(err) }
|
||||
|
||||
More intricate examples appear below.
|
||||
|
||||
Actions
|
||||
|
||||
Here is the list of actions. "Arguments" and "pipelines" are evaluations of
|
||||
data, defined in detail below.
|
||||
|
||||
*/
|
||||
// {{/* a comment */}}
|
||||
// A comment; discarded. May contain newlines.
|
||||
// Comments do not nest and must start and end at the
|
||||
// delimiters, as shown here.
|
||||
/*
|
||||
|
||||
{{pipeline}}
|
||||
The default textual representation of the value of the pipeline
|
||||
is copied to the output.
|
||||
|
||||
{{if pipeline}} T1 {{end}}
|
||||
If the value of the pipeline is empty, no output is generated;
|
||||
otherwise, T1 is executed. The empty values are false, 0, any
|
||||
nil pointer or interface value, and any array, slice, map, or
|
||||
string of length zero.
|
||||
Dot is unaffected.
|
||||
|
||||
{{if pipeline}} T1 {{else}} T0 {{end}}
|
||||
If the value of the pipeline is empty, T0 is executed;
|
||||
otherwise, T1 is executed. Dot is unaffected.
|
||||
|
||||
{{if pipeline}} T1 {{else if pipeline}} T0 {{end}}
|
||||
To simplify the appearance of if-else chains, the else action
|
||||
of an if may include another if directly; the effect is exactly
|
||||
the same as writing
|
||||
{{if pipeline}} T1 {{else}}{{if pipeline}} T0 {{end}}{{end}}
|
||||
|
||||
{{range pipeline}} T1 {{end}}
|
||||
The value of the pipeline must be an array, slice, map, or channel.
|
||||
If the value of the pipeline has length zero, nothing is output;
|
||||
otherwise, dot is set to the successive elements of the array,
|
||||
slice, or map and T1 is executed. If the value is a map and the
|
||||
keys are of basic type with a defined order ("comparable"), the
|
||||
elements will be visited in sorted key order.
|
||||
|
||||
{{range pipeline}} T1 {{else}} T0 {{end}}
|
||||
The value of the pipeline must be an array, slice, map, or channel.
|
||||
If the value of the pipeline has length zero, dot is unaffected and
|
||||
T0 is executed; otherwise, dot is set to the successive elements
|
||||
of the array, slice, or map and T1 is executed.
|
||||
|
||||
{{template "name"}}
|
||||
The template with the specified name is executed with nil data.
|
||||
|
||||
{{template "name" pipeline}}
|
||||
The template with the specified name is executed with dot set
|
||||
to the value of the pipeline.
|
||||
|
||||
{{with pipeline}} T1 {{end}}
|
||||
If the value of the pipeline is empty, no output is generated;
|
||||
otherwise, dot is set to the value of the pipeline and T1 is
|
||||
executed.
|
||||
|
||||
{{with pipeline}} T1 {{else}} T0 {{end}}
|
||||
If the value of the pipeline is empty, dot is unaffected and T0
|
||||
is executed; otherwise, dot is set to the value of the pipeline
|
||||
and T1 is executed.
|
||||
|
||||
Arguments
|
||||
|
||||
An argument is a simple value, denoted by one of the following.
|
||||
|
||||
- A boolean, string, character, integer, floating-point, imaginary
|
||||
or complex constant in Go syntax. These behave like Go's untyped
|
||||
constants, although raw strings may not span newlines.
|
||||
- The keyword nil, representing an untyped Go nil.
|
||||
- The character '.' (period):
|
||||
.
|
||||
The result is the value of dot.
|
||||
- A variable name, which is a (possibly empty) alphanumeric string
|
||||
preceded by a dollar sign, such as
|
||||
$piOver2
|
||||
or
|
||||
$
|
||||
The result is the value of the variable.
|
||||
Variables are described below.
|
||||
- The name of a field of the data, which must be a struct, preceded
|
||||
by a period, such as
|
||||
.Field
|
||||
The result is the value of the field. Field invocations may be
|
||||
chained:
|
||||
.Field1.Field2
|
||||
Fields can also be evaluated on variables, including chaining:
|
||||
$x.Field1.Field2
|
||||
- The name of a key of the data, which must be a map, preceded
|
||||
by a period, such as
|
||||
.Key
|
||||
The result is the map element value indexed by the key.
|
||||
Key invocations may be chained and combined with fields to any
|
||||
depth:
|
||||
.Field1.Key1.Field2.Key2
|
||||
Although the key must be an alphanumeric identifier, unlike with
|
||||
field names they do not need to start with an upper case letter.
|
||||
Keys can also be evaluated on variables, including chaining:
|
||||
$x.key1.key2
|
||||
- The name of a niladic method of the data, preceded by a period,
|
||||
such as
|
||||
.Method
|
||||
The result is the value of invoking the method with dot as the
|
||||
receiver, dot.Method(). Such a method must have one return value (of
|
||||
any type) or two return values, the second of which is an error.
|
||||
If it has two and the returned error is non-nil, execution terminates
|
||||
and an error is returned to the caller as the value of Execute.
|
||||
Method invocations may be chained and combined with fields and keys
|
||||
to any depth:
|
||||
.Field1.Key1.Method1.Field2.Key2.Method2
|
||||
Methods can also be evaluated on variables, including chaining:
|
||||
$x.Method1.Field
|
||||
- The name of a niladic function, such as
|
||||
fun
|
||||
The result is the value of invoking the function, fun(). The return
|
||||
types and values behave as in methods. Functions and function
|
||||
names are described below.
|
||||
- A parenthesized instance of one the above, for grouping. The result
|
||||
may be accessed by a field or map key invocation.
|
||||
print (.F1 arg1) (.F2 arg2)
|
||||
(.StructValuedMethod "arg").Field
|
||||
|
||||
Arguments may evaluate to any type; if they are pointers the implementation
|
||||
automatically indirects to the base type when required.
|
||||
If an evaluation yields a function value, such as a function-valued
|
||||
field of a struct, the function is not invoked automatically, but it
|
||||
can be used as a truth value for an if action and the like. To invoke
|
||||
it, use the call function, defined below.
|
||||
|
||||
A pipeline is a possibly chained sequence of "commands". A command is a simple
|
||||
value (argument) or a function or method call, possibly with multiple arguments:
|
||||
|
||||
Argument
|
||||
The result is the value of evaluating the argument.
|
||||
.Method [Argument...]
|
||||
The method can be alone or the last element of a chain but,
|
||||
unlike methods in the middle of a chain, it can take arguments.
|
||||
The result is the value of calling the method with the
|
||||
arguments:
|
||||
dot.Method(Argument1, etc.)
|
||||
functionName [Argument...]
|
||||
The result is the value of calling the function associated
|
||||
with the name:
|
||||
function(Argument1, etc.)
|
||||
Functions and function names are described below.
|
||||
|
||||
Pipelines
|
||||
|
||||
A pipeline may be "chained" by separating a sequence of commands with pipeline
|
||||
characters '|'. In a chained pipeline, the result of the each command is
|
||||
passed as the last argument of the following command. The output of the final
|
||||
command in the pipeline is the value of the pipeline.
|
||||
|
||||
The output of a command will be either one value or two values, the second of
|
||||
which has type error. If that second value is present and evaluates to
|
||||
non-nil, execution terminates and the error is returned to the caller of
|
||||
Execute.
|
||||
|
||||
Variables
|
||||
|
||||
A pipeline inside an action may initialize a variable to capture the result.
|
||||
The initialization has syntax
|
||||
|
||||
$variable := pipeline
|
||||
|
||||
where $variable is the name of the variable. An action that declares a
|
||||
variable produces no output.
|
||||
|
||||
If a "range" action initializes a variable, the variable is set to the
|
||||
successive elements of the iteration. Also, a "range" may declare two
|
||||
variables, separated by a comma:
|
||||
|
||||
range $index, $element := pipeline
|
||||
|
||||
in which case $index and $element are set to the successive values of the
|
||||
array/slice index or map key and element, respectively. Note that if there is
|
||||
only one variable, it is assigned the element; this is opposite to the
|
||||
convention in Go range clauses.
|
||||
|
||||
A variable's scope extends to the "end" action of the control structure ("if",
|
||||
"with", or "range") in which it is declared, or to the end of the template if
|
||||
there is no such control structure. A template invocation does not inherit
|
||||
variables from the point of its invocation.
|
||||
|
||||
When execution begins, $ is set to the data argument passed to Execute, that is,
|
||||
to the starting value of dot.
|
||||
|
||||
Examples
|
||||
|
||||
Here are some example one-line templates demonstrating pipelines and variables.
|
||||
All produce the quoted word "output":
|
||||
|
||||
{{"\"output\""}}
|
||||
A string constant.
|
||||
{{`"output"`}}
|
||||
A raw string constant.
|
||||
{{printf "%q" "output"}}
|
||||
A function call.
|
||||
{{"output" | printf "%q"}}
|
||||
A function call whose final argument comes from the previous
|
||||
command.
|
||||
{{printf "%q" (print "out" "put")}}
|
||||
A parenthesized argument.
|
||||
{{"put" | printf "%s%s" "out" | printf "%q"}}
|
||||
A more elaborate call.
|
||||
{{"output" | printf "%s" | printf "%q"}}
|
||||
A longer chain.
|
||||
{{with "output"}}{{printf "%q" .}}{{end}}
|
||||
A with action using dot.
|
||||
{{with $x := "output" | printf "%q"}}{{$x}}{{end}}
|
||||
A with action that creates and uses a variable.
|
||||
{{with $x := "output"}}{{printf "%q" $x}}{{end}}
|
||||
A with action that uses the variable in another action.
|
||||
{{with $x := "output"}}{{$x | printf "%q"}}{{end}}
|
||||
The same, but pipelined.
|
||||
|
||||
Functions
|
||||
|
||||
During execution functions are found in two function maps: first in the
|
||||
template, then in the global function map. By default, no functions are defined
|
||||
in the template but the Funcs method can be used to add them.
|
||||
|
||||
Predefined global functions are named as follows.
|
||||
|
||||
and
|
||||
Returns the boolean AND of its arguments by returning the
|
||||
first empty argument or the last argument, that is,
|
||||
"and x y" behaves as "if x then y else x". All the
|
||||
arguments are evaluated.
|
||||
call
|
||||
Returns the result of calling the first argument, which
|
||||
must be a function, with the remaining arguments as parameters.
|
||||
Thus "call .X.Y 1 2" is, in Go notation, dot.X.Y(1, 2) where
|
||||
Y is a func-valued field, map entry, or the like.
|
||||
The first argument must be the result of an evaluation
|
||||
that yields a value of function type (as distinct from
|
||||
a predefined function such as print). The function must
|
||||
return either one or two result values, the second of which
|
||||
is of type error. If the arguments don't match the function
|
||||
or the returned error value is non-nil, execution stops.
|
||||
html
|
||||
Returns the escaped HTML equivalent of the textual
|
||||
representation of its arguments.
|
||||
index
|
||||
Returns the result of indexing its first argument by the
|
||||
following arguments. Thus "index x 1 2 3" is, in Go syntax,
|
||||
x[1][2][3]. Each indexed item must be a map, slice, or array.
|
||||
js
|
||||
Returns the escaped JavaScript equivalent of the textual
|
||||
representation of its arguments.
|
||||
len
|
||||
Returns the integer length of its argument.
|
||||
not
|
||||
Returns the boolean negation of its single argument.
|
||||
or
|
||||
Returns the boolean OR of its arguments by returning the
|
||||
first non-empty argument or the last argument, that is,
|
||||
"or x y" behaves as "if x then x else y". All the
|
||||
arguments are evaluated.
|
||||
print
|
||||
An alias for fmt.Sprint
|
||||
printf
|
||||
An alias for fmt.Sprintf
|
||||
println
|
||||
An alias for fmt.Sprintln
|
||||
urlquery
|
||||
Returns the escaped value of the textual representation of
|
||||
its arguments in a form suitable for embedding in a URL query.
|
||||
|
||||
The boolean functions take any zero value to be false and a non-zero
|
||||
value to be true.
|
||||
|
||||
There is also a set of binary comparison operators defined as
|
||||
functions:
|
||||
|
||||
eq
|
||||
Returns the boolean truth of arg1 == arg2
|
||||
ne
|
||||
Returns the boolean truth of arg1 != arg2
|
||||
lt
|
||||
Returns the boolean truth of arg1 < arg2
|
||||
le
|
||||
Returns the boolean truth of arg1 <= arg2
|
||||
gt
|
||||
Returns the boolean truth of arg1 > arg2
|
||||
ge
|
||||
Returns the boolean truth of arg1 >= arg2
|
||||
|
||||
For simpler multi-way equality tests, eq (only) accepts two or more
|
||||
arguments and compares the second and subsequent to the first,
|
||||
returning in effect
|
||||
|
||||
arg1==arg2 || arg1==arg3 || arg1==arg4 ...
|
||||
|
||||
(Unlike with || in Go, however, eq is a function call and all the
|
||||
arguments will be evaluated.)
|
||||
|
||||
The comparison functions work on basic types only (or named basic
|
||||
types, such as "type Celsius float32"). They implement the Go rules
|
||||
for comparison of values, except that size and exact type are
|
||||
ignored, so any integer value, signed or unsigned, may be compared
|
||||
with any other integer value. (The arithmetic value is compared,
|
||||
not the bit pattern, so all negative integers are less than all
|
||||
unsigned integers.) However, as usual, one may not compare an int
|
||||
with a float32 and so on.
|
||||
|
||||
Associated templates
|
||||
|
||||
Each template is named by a string specified when it is created. Also, each
|
||||
template is associated with zero or more other templates that it may invoke by
|
||||
name; such associations are transitive and form a name space of templates.
|
||||
|
||||
A template may use a template invocation to instantiate another associated
|
||||
template; see the explanation of the "template" action above. The name must be
|
||||
that of a template associated with the template that contains the invocation.
|
||||
|
||||
Nested template definitions
|
||||
|
||||
When parsing a template, another template may be defined and associated with the
|
||||
template being parsed. Template definitions must appear at the top level of the
|
||||
template, much like global variables in a Go program.
|
||||
|
||||
The syntax of such definitions is to surround each template declaration with a
|
||||
"define" and "end" action.
|
||||
|
||||
The define action names the template being created by providing a string
|
||||
constant. Here is a simple example:
|
||||
|
||||
`{{define "T1"}}ONE{{end}}
|
||||
{{define "T2"}}TWO{{end}}
|
||||
{{define "T3"}}{{template "T1"}} {{template "T2"}}{{end}}
|
||||
{{template "T3"}}`
|
||||
|
||||
This defines two templates, T1 and T2, and a third T3 that invokes the other two
|
||||
when it is executed. Finally it invokes T3. If executed this template will
|
||||
produce the text
|
||||
|
||||
ONE TWO
|
||||
|
||||
By construction, a template may reside in only one association. If it's
|
||||
necessary to have a template addressable from multiple associations, the
|
||||
template definition must be parsed multiple times to create distinct *Template
|
||||
values, or must be copied with the Clone or AddParseTree method.
|
||||
|
||||
Parse may be called multiple times to assemble the various associated templates;
|
||||
see the ParseFiles and ParseGlob functions and methods for simple ways to parse
|
||||
related templates stored in files.
|
||||
|
||||
A template may be executed directly or through ExecuteTemplate, which executes
|
||||
an associated template identified by name. To invoke our example above, we
|
||||
might write,
|
||||
|
||||
err := tmpl.Execute(os.Stdout, "no data needed")
|
||||
if err != nil {
|
||||
log.Fatalf("execution failed: %s", err)
|
||||
}
|
||||
|
||||
or to invoke a particular template explicitly by name,
|
||||
|
||||
err := tmpl.ExecuteTemplate(os.Stdout, "T2", "no data needed")
|
||||
if err != nil {
|
||||
log.Fatalf("execution failed: %s", err)
|
||||
}
|
||||
|
||||
*/
|
||||
package template
|
||||
845
vendor/github.com/alecthomas/template/exec.go
generated
vendored
845
vendor/github.com/alecthomas/template/exec.go
generated
vendored
@@ -1,845 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package template
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/alecthomas/template/parse"
|
||||
)
|
||||
|
||||
// state represents the state of an execution. It's not part of the
|
||||
// template so that multiple executions of the same template
|
||||
// can execute in parallel.
|
||||
type state struct {
|
||||
tmpl *Template
|
||||
wr io.Writer
|
||||
node parse.Node // current node, for errors
|
||||
vars []variable // push-down stack of variable values.
|
||||
}
|
||||
|
||||
// variable holds the dynamic value of a variable such as $, $x etc.
|
||||
type variable struct {
|
||||
name string
|
||||
value reflect.Value
|
||||
}
|
||||
|
||||
// push pushes a new variable on the stack.
|
||||
func (s *state) push(name string, value reflect.Value) {
|
||||
s.vars = append(s.vars, variable{name, value})
|
||||
}
|
||||
|
||||
// mark returns the length of the variable stack.
|
||||
func (s *state) mark() int {
|
||||
return len(s.vars)
|
||||
}
|
||||
|
||||
// pop pops the variable stack up to the mark.
|
||||
func (s *state) pop(mark int) {
|
||||
s.vars = s.vars[0:mark]
|
||||
}
|
||||
|
||||
// setVar overwrites the top-nth variable on the stack. Used by range iterations.
|
||||
func (s *state) setVar(n int, value reflect.Value) {
|
||||
s.vars[len(s.vars)-n].value = value
|
||||
}
|
||||
|
||||
// varValue returns the value of the named variable.
|
||||
func (s *state) varValue(name string) reflect.Value {
|
||||
for i := s.mark() - 1; i >= 0; i-- {
|
||||
if s.vars[i].name == name {
|
||||
return s.vars[i].value
|
||||
}
|
||||
}
|
||||
s.errorf("undefined variable: %s", name)
|
||||
return zero
|
||||
}
|
||||
|
||||
var zero reflect.Value
|
||||
|
||||
// at marks the state to be on node n, for error reporting.
|
||||
func (s *state) at(node parse.Node) {
|
||||
s.node = node
|
||||
}
|
||||
|
||||
// doublePercent returns the string with %'s replaced by %%, if necessary,
|
||||
// so it can be used safely inside a Printf format string.
|
||||
func doublePercent(str string) string {
|
||||
if strings.Contains(str, "%") {
|
||||
str = strings.Replace(str, "%", "%%", -1)
|
||||
}
|
||||
return str
|
||||
}
|
||||
|
||||
// errorf formats the error and terminates processing.
|
||||
func (s *state) errorf(format string, args ...interface{}) {
|
||||
name := doublePercent(s.tmpl.Name())
|
||||
if s.node == nil {
|
||||
format = fmt.Sprintf("template: %s: %s", name, format)
|
||||
} else {
|
||||
location, context := s.tmpl.ErrorContext(s.node)
|
||||
format = fmt.Sprintf("template: %s: executing %q at <%s>: %s", location, name, doublePercent(context), format)
|
||||
}
|
||||
panic(fmt.Errorf(format, args...))
|
||||
}
|
||||
|
||||
// errRecover is the handler that turns panics into returns from the top
|
||||
// level of Parse.
|
||||
func errRecover(errp *error) {
|
||||
e := recover()
|
||||
if e != nil {
|
||||
switch err := e.(type) {
|
||||
case runtime.Error:
|
||||
panic(e)
|
||||
case error:
|
||||
*errp = err
|
||||
default:
|
||||
panic(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ExecuteTemplate applies the template associated with t that has the given name
|
||||
// to the specified data object and writes the output to wr.
|
||||
// If an error occurs executing the template or writing its output,
|
||||
// execution stops, but partial results may already have been written to
|
||||
// the output writer.
|
||||
// A template may be executed safely in parallel.
|
||||
func (t *Template) ExecuteTemplate(wr io.Writer, name string, data interface{}) error {
|
||||
tmpl := t.tmpl[name]
|
||||
if tmpl == nil {
|
||||
return fmt.Errorf("template: no template %q associated with template %q", name, t.name)
|
||||
}
|
||||
return tmpl.Execute(wr, data)
|
||||
}
|
||||
|
||||
// Execute applies a parsed template to the specified data object,
|
||||
// and writes the output to wr.
|
||||
// If an error occurs executing the template or writing its output,
|
||||
// execution stops, but partial results may already have been written to
|
||||
// the output writer.
|
||||
// A template may be executed safely in parallel.
|
||||
func (t *Template) Execute(wr io.Writer, data interface{}) (err error) {
|
||||
defer errRecover(&err)
|
||||
value := reflect.ValueOf(data)
|
||||
state := &state{
|
||||
tmpl: t,
|
||||
wr: wr,
|
||||
vars: []variable{{"$", value}},
|
||||
}
|
||||
t.init()
|
||||
if t.Tree == nil || t.Root == nil {
|
||||
var b bytes.Buffer
|
||||
for name, tmpl := range t.tmpl {
|
||||
if tmpl.Tree == nil || tmpl.Root == nil {
|
||||
continue
|
||||
}
|
||||
if b.Len() > 0 {
|
||||
b.WriteString(", ")
|
||||
}
|
||||
fmt.Fprintf(&b, "%q", name)
|
||||
}
|
||||
var s string
|
||||
if b.Len() > 0 {
|
||||
s = "; defined templates are: " + b.String()
|
||||
}
|
||||
state.errorf("%q is an incomplete or empty template%s", t.Name(), s)
|
||||
}
|
||||
state.walk(value, t.Root)
|
||||
return
|
||||
}
|
||||
|
||||
// Walk functions step through the major pieces of the template structure,
|
||||
// generating output as they go.
|
||||
func (s *state) walk(dot reflect.Value, node parse.Node) {
|
||||
s.at(node)
|
||||
switch node := node.(type) {
|
||||
case *parse.ActionNode:
|
||||
// Do not pop variables so they persist until next end.
|
||||
// Also, if the action declares variables, don't print the result.
|
||||
val := s.evalPipeline(dot, node.Pipe)
|
||||
if len(node.Pipe.Decl) == 0 {
|
||||
s.printValue(node, val)
|
||||
}
|
||||
case *parse.IfNode:
|
||||
s.walkIfOrWith(parse.NodeIf, dot, node.Pipe, node.List, node.ElseList)
|
||||
case *parse.ListNode:
|
||||
for _, node := range node.Nodes {
|
||||
s.walk(dot, node)
|
||||
}
|
||||
case *parse.RangeNode:
|
||||
s.walkRange(dot, node)
|
||||
case *parse.TemplateNode:
|
||||
s.walkTemplate(dot, node)
|
||||
case *parse.TextNode:
|
||||
if _, err := s.wr.Write(node.Text); err != nil {
|
||||
s.errorf("%s", err)
|
||||
}
|
||||
case *parse.WithNode:
|
||||
s.walkIfOrWith(parse.NodeWith, dot, node.Pipe, node.List, node.ElseList)
|
||||
default:
|
||||
s.errorf("unknown node: %s", node)
|
||||
}
|
||||
}
|
||||
|
||||
// walkIfOrWith walks an 'if' or 'with' node. The two control structures
|
||||
// are identical in behavior except that 'with' sets dot.
|
||||
func (s *state) walkIfOrWith(typ parse.NodeType, dot reflect.Value, pipe *parse.PipeNode, list, elseList *parse.ListNode) {
|
||||
defer s.pop(s.mark())
|
||||
val := s.evalPipeline(dot, pipe)
|
||||
truth, ok := isTrue(val)
|
||||
if !ok {
|
||||
s.errorf("if/with can't use %v", val)
|
||||
}
|
||||
if truth {
|
||||
if typ == parse.NodeWith {
|
||||
s.walk(val, list)
|
||||
} else {
|
||||
s.walk(dot, list)
|
||||
}
|
||||
} else if elseList != nil {
|
||||
s.walk(dot, elseList)
|
||||
}
|
||||
}
|
||||
|
||||
// isTrue reports whether the value is 'true', in the sense of not the zero of its type,
|
||||
// and whether the value has a meaningful truth value.
|
||||
func isTrue(val reflect.Value) (truth, ok bool) {
|
||||
if !val.IsValid() {
|
||||
// Something like var x interface{}, never set. It's a form of nil.
|
||||
return false, true
|
||||
}
|
||||
switch val.Kind() {
|
||||
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
||||
truth = val.Len() > 0
|
||||
case reflect.Bool:
|
||||
truth = val.Bool()
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
truth = val.Complex() != 0
|
||||
case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface:
|
||||
truth = !val.IsNil()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
truth = val.Int() != 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
truth = val.Float() != 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
truth = val.Uint() != 0
|
||||
case reflect.Struct:
|
||||
truth = true // Struct values are always true.
|
||||
default:
|
||||
return
|
||||
}
|
||||
return truth, true
|
||||
}
|
||||
|
||||
func (s *state) walkRange(dot reflect.Value, r *parse.RangeNode) {
|
||||
s.at(r)
|
||||
defer s.pop(s.mark())
|
||||
val, _ := indirect(s.evalPipeline(dot, r.Pipe))
|
||||
// mark top of stack before any variables in the body are pushed.
|
||||
mark := s.mark()
|
||||
oneIteration := func(index, elem reflect.Value) {
|
||||
// Set top var (lexically the second if there are two) to the element.
|
||||
if len(r.Pipe.Decl) > 0 {
|
||||
s.setVar(1, elem)
|
||||
}
|
||||
// Set next var (lexically the first if there are two) to the index.
|
||||
if len(r.Pipe.Decl) > 1 {
|
||||
s.setVar(2, index)
|
||||
}
|
||||
s.walk(elem, r.List)
|
||||
s.pop(mark)
|
||||
}
|
||||
switch val.Kind() {
|
||||
case reflect.Array, reflect.Slice:
|
||||
if val.Len() == 0 {
|
||||
break
|
||||
}
|
||||
for i := 0; i < val.Len(); i++ {
|
||||
oneIteration(reflect.ValueOf(i), val.Index(i))
|
||||
}
|
||||
return
|
||||
case reflect.Map:
|
||||
if val.Len() == 0 {
|
||||
break
|
||||
}
|
||||
for _, key := range sortKeys(val.MapKeys()) {
|
||||
oneIteration(key, val.MapIndex(key))
|
||||
}
|
||||
return
|
||||
case reflect.Chan:
|
||||
if val.IsNil() {
|
||||
break
|
||||
}
|
||||
i := 0
|
||||
for ; ; i++ {
|
||||
elem, ok := val.Recv()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
oneIteration(reflect.ValueOf(i), elem)
|
||||
}
|
||||
if i == 0 {
|
||||
break
|
||||
}
|
||||
return
|
||||
case reflect.Invalid:
|
||||
break // An invalid value is likely a nil map, etc. and acts like an empty map.
|
||||
default:
|
||||
s.errorf("range can't iterate over %v", val)
|
||||
}
|
||||
if r.ElseList != nil {
|
||||
s.walk(dot, r.ElseList)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state) walkTemplate(dot reflect.Value, t *parse.TemplateNode) {
|
||||
s.at(t)
|
||||
tmpl := s.tmpl.tmpl[t.Name]
|
||||
if tmpl == nil {
|
||||
s.errorf("template %q not defined", t.Name)
|
||||
}
|
||||
// Variables declared by the pipeline persist.
|
||||
dot = s.evalPipeline(dot, t.Pipe)
|
||||
newState := *s
|
||||
newState.tmpl = tmpl
|
||||
// No dynamic scoping: template invocations inherit no variables.
|
||||
newState.vars = []variable{{"$", dot}}
|
||||
newState.walk(dot, tmpl.Root)
|
||||
}
|
||||
|
||||
// Eval functions evaluate pipelines, commands, and their elements and extract
|
||||
// values from the data structure by examining fields, calling methods, and so on.
|
||||
// The printing of those values happens only through walk functions.
|
||||
|
||||
// evalPipeline returns the value acquired by evaluating a pipeline. If the
|
||||
// pipeline has a variable declaration, the variable will be pushed on the
|
||||
// stack. Callers should therefore pop the stack after they are finished
|
||||
// executing commands depending on the pipeline value.
|
||||
func (s *state) evalPipeline(dot reflect.Value, pipe *parse.PipeNode) (value reflect.Value) {
|
||||
if pipe == nil {
|
||||
return
|
||||
}
|
||||
s.at(pipe)
|
||||
for _, cmd := range pipe.Cmds {
|
||||
value = s.evalCommand(dot, cmd, value) // previous value is this one's final arg.
|
||||
// If the object has type interface{}, dig down one level to the thing inside.
|
||||
if value.Kind() == reflect.Interface && value.Type().NumMethod() == 0 {
|
||||
value = reflect.ValueOf(value.Interface()) // lovely!
|
||||
}
|
||||
}
|
||||
for _, variable := range pipe.Decl {
|
||||
s.push(variable.Ident[0], value)
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
func (s *state) notAFunction(args []parse.Node, final reflect.Value) {
|
||||
if len(args) > 1 || final.IsValid() {
|
||||
s.errorf("can't give argument to non-function %s", args[0])
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state) evalCommand(dot reflect.Value, cmd *parse.CommandNode, final reflect.Value) reflect.Value {
|
||||
firstWord := cmd.Args[0]
|
||||
switch n := firstWord.(type) {
|
||||
case *parse.FieldNode:
|
||||
return s.evalFieldNode(dot, n, cmd.Args, final)
|
||||
case *parse.ChainNode:
|
||||
return s.evalChainNode(dot, n, cmd.Args, final)
|
||||
case *parse.IdentifierNode:
|
||||
// Must be a function.
|
||||
return s.evalFunction(dot, n, cmd, cmd.Args, final)
|
||||
case *parse.PipeNode:
|
||||
// Parenthesized pipeline. The arguments are all inside the pipeline; final is ignored.
|
||||
return s.evalPipeline(dot, n)
|
||||
case *parse.VariableNode:
|
||||
return s.evalVariableNode(dot, n, cmd.Args, final)
|
||||
}
|
||||
s.at(firstWord)
|
||||
s.notAFunction(cmd.Args, final)
|
||||
switch word := firstWord.(type) {
|
||||
case *parse.BoolNode:
|
||||
return reflect.ValueOf(word.True)
|
||||
case *parse.DotNode:
|
||||
return dot
|
||||
case *parse.NilNode:
|
||||
s.errorf("nil is not a command")
|
||||
case *parse.NumberNode:
|
||||
return s.idealConstant(word)
|
||||
case *parse.StringNode:
|
||||
return reflect.ValueOf(word.Text)
|
||||
}
|
||||
s.errorf("can't evaluate command %q", firstWord)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
// idealConstant is called to return the value of a number in a context where
|
||||
// we don't know the type. In that case, the syntax of the number tells us
|
||||
// its type, and we use Go rules to resolve. Note there is no such thing as
|
||||
// a uint ideal constant in this situation - the value must be of int type.
|
||||
func (s *state) idealConstant(constant *parse.NumberNode) reflect.Value {
|
||||
// These are ideal constants but we don't know the type
|
||||
// and we have no context. (If it was a method argument,
|
||||
// we'd know what we need.) The syntax guides us to some extent.
|
||||
s.at(constant)
|
||||
switch {
|
||||
case constant.IsComplex:
|
||||
return reflect.ValueOf(constant.Complex128) // incontrovertible.
|
||||
case constant.IsFloat && !isHexConstant(constant.Text) && strings.IndexAny(constant.Text, ".eE") >= 0:
|
||||
return reflect.ValueOf(constant.Float64)
|
||||
case constant.IsInt:
|
||||
n := int(constant.Int64)
|
||||
if int64(n) != constant.Int64 {
|
||||
s.errorf("%s overflows int", constant.Text)
|
||||
}
|
||||
return reflect.ValueOf(n)
|
||||
case constant.IsUint:
|
||||
s.errorf("%s overflows int", constant.Text)
|
||||
}
|
||||
return zero
|
||||
}
|
||||
|
||||
func isHexConstant(s string) bool {
|
||||
return len(s) > 2 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X')
|
||||
}
|
||||
|
||||
func (s *state) evalFieldNode(dot reflect.Value, field *parse.FieldNode, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
s.at(field)
|
||||
return s.evalFieldChain(dot, dot, field, field.Ident, args, final)
|
||||
}
|
||||
|
||||
func (s *state) evalChainNode(dot reflect.Value, chain *parse.ChainNode, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
s.at(chain)
|
||||
// (pipe).Field1.Field2 has pipe as .Node, fields as .Field. Eval the pipeline, then the fields.
|
||||
pipe := s.evalArg(dot, nil, chain.Node)
|
||||
if len(chain.Field) == 0 {
|
||||
s.errorf("internal error: no fields in evalChainNode")
|
||||
}
|
||||
return s.evalFieldChain(dot, pipe, chain, chain.Field, args, final)
|
||||
}
|
||||
|
||||
func (s *state) evalVariableNode(dot reflect.Value, variable *parse.VariableNode, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
// $x.Field has $x as the first ident, Field as the second. Eval the var, then the fields.
|
||||
s.at(variable)
|
||||
value := s.varValue(variable.Ident[0])
|
||||
if len(variable.Ident) == 1 {
|
||||
s.notAFunction(args, final)
|
||||
return value
|
||||
}
|
||||
return s.evalFieldChain(dot, value, variable, variable.Ident[1:], args, final)
|
||||
}
|
||||
|
||||
// evalFieldChain evaluates .X.Y.Z possibly followed by arguments.
|
||||
// dot is the environment in which to evaluate arguments, while
|
||||
// receiver is the value being walked along the chain.
|
||||
func (s *state) evalFieldChain(dot, receiver reflect.Value, node parse.Node, ident []string, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
n := len(ident)
|
||||
for i := 0; i < n-1; i++ {
|
||||
receiver = s.evalField(dot, ident[i], node, nil, zero, receiver)
|
||||
}
|
||||
// Now if it's a method, it gets the arguments.
|
||||
return s.evalField(dot, ident[n-1], node, args, final, receiver)
|
||||
}
|
||||
|
||||
func (s *state) evalFunction(dot reflect.Value, node *parse.IdentifierNode, cmd parse.Node, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
s.at(node)
|
||||
name := node.Ident
|
||||
function, ok := findFunction(name, s.tmpl)
|
||||
if !ok {
|
||||
s.errorf("%q is not a defined function", name)
|
||||
}
|
||||
return s.evalCall(dot, function, cmd, name, args, final)
|
||||
}
|
||||
|
||||
// evalField evaluates an expression like (.Field) or (.Field arg1 arg2).
|
||||
// The 'final' argument represents the return value from the preceding
|
||||
// value of the pipeline, if any.
|
||||
func (s *state) evalField(dot reflect.Value, fieldName string, node parse.Node, args []parse.Node, final, receiver reflect.Value) reflect.Value {
|
||||
if !receiver.IsValid() {
|
||||
return zero
|
||||
}
|
||||
typ := receiver.Type()
|
||||
receiver, _ = indirect(receiver)
|
||||
// Unless it's an interface, need to get to a value of type *T to guarantee
|
||||
// we see all methods of T and *T.
|
||||
ptr := receiver
|
||||
if ptr.Kind() != reflect.Interface && ptr.CanAddr() {
|
||||
ptr = ptr.Addr()
|
||||
}
|
||||
if method := ptr.MethodByName(fieldName); method.IsValid() {
|
||||
return s.evalCall(dot, method, node, fieldName, args, final)
|
||||
}
|
||||
hasArgs := len(args) > 1 || final.IsValid()
|
||||
// It's not a method; must be a field of a struct or an element of a map. The receiver must not be nil.
|
||||
receiver, isNil := indirect(receiver)
|
||||
if isNil {
|
||||
s.errorf("nil pointer evaluating %s.%s", typ, fieldName)
|
||||
}
|
||||
switch receiver.Kind() {
|
||||
case reflect.Struct:
|
||||
tField, ok := receiver.Type().FieldByName(fieldName)
|
||||
if ok {
|
||||
field := receiver.FieldByIndex(tField.Index)
|
||||
if tField.PkgPath != "" { // field is unexported
|
||||
s.errorf("%s is an unexported field of struct type %s", fieldName, typ)
|
||||
}
|
||||
// If it's a function, we must call it.
|
||||
if hasArgs {
|
||||
s.errorf("%s has arguments but cannot be invoked as function", fieldName)
|
||||
}
|
||||
return field
|
||||
}
|
||||
s.errorf("%s is not a field of struct type %s", fieldName, typ)
|
||||
case reflect.Map:
|
||||
// If it's a map, attempt to use the field name as a key.
|
||||
nameVal := reflect.ValueOf(fieldName)
|
||||
if nameVal.Type().AssignableTo(receiver.Type().Key()) {
|
||||
if hasArgs {
|
||||
s.errorf("%s is not a method but has arguments", fieldName)
|
||||
}
|
||||
return receiver.MapIndex(nameVal)
|
||||
}
|
||||
}
|
||||
s.errorf("can't evaluate field %s in type %s", fieldName, typ)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
var (
|
||||
errorType = reflect.TypeOf((*error)(nil)).Elem()
|
||||
fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
|
||||
)
|
||||
|
||||
// evalCall executes a function or method call. If it's a method, fun already has the receiver bound, so
|
||||
// it looks just like a function call. The arg list, if non-nil, includes (in the manner of the shell), arg[0]
|
||||
// as the function itself.
|
||||
func (s *state) evalCall(dot, fun reflect.Value, node parse.Node, name string, args []parse.Node, final reflect.Value) reflect.Value {
|
||||
if args != nil {
|
||||
args = args[1:] // Zeroth arg is function name/node; not passed to function.
|
||||
}
|
||||
typ := fun.Type()
|
||||
numIn := len(args)
|
||||
if final.IsValid() {
|
||||
numIn++
|
||||
}
|
||||
numFixed := len(args)
|
||||
if typ.IsVariadic() {
|
||||
numFixed = typ.NumIn() - 1 // last arg is the variadic one.
|
||||
if numIn < numFixed {
|
||||
s.errorf("wrong number of args for %s: want at least %d got %d", name, typ.NumIn()-1, len(args))
|
||||
}
|
||||
} else if numIn < typ.NumIn()-1 || !typ.IsVariadic() && numIn != typ.NumIn() {
|
||||
s.errorf("wrong number of args for %s: want %d got %d", name, typ.NumIn(), len(args))
|
||||
}
|
||||
if !goodFunc(typ) {
|
||||
// TODO: This could still be a confusing error; maybe goodFunc should provide info.
|
||||
s.errorf("can't call method/function %q with %d results", name, typ.NumOut())
|
||||
}
|
||||
// Build the arg list.
|
||||
argv := make([]reflect.Value, numIn)
|
||||
// Args must be evaluated. Fixed args first.
|
||||
i := 0
|
||||
for ; i < numFixed && i < len(args); i++ {
|
||||
argv[i] = s.evalArg(dot, typ.In(i), args[i])
|
||||
}
|
||||
// Now the ... args.
|
||||
if typ.IsVariadic() {
|
||||
argType := typ.In(typ.NumIn() - 1).Elem() // Argument is a slice.
|
||||
for ; i < len(args); i++ {
|
||||
argv[i] = s.evalArg(dot, argType, args[i])
|
||||
}
|
||||
}
|
||||
// Add final value if necessary.
|
||||
if final.IsValid() {
|
||||
t := typ.In(typ.NumIn() - 1)
|
||||
if typ.IsVariadic() {
|
||||
t = t.Elem()
|
||||
}
|
||||
argv[i] = s.validateType(final, t)
|
||||
}
|
||||
result := fun.Call(argv)
|
||||
// If we have an error that is not nil, stop execution and return that error to the caller.
|
||||
if len(result) == 2 && !result[1].IsNil() {
|
||||
s.at(node)
|
||||
s.errorf("error calling %s: %s", name, result[1].Interface().(error))
|
||||
}
|
||||
return result[0]
|
||||
}
|
||||
|
||||
// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero.
|
||||
func canBeNil(typ reflect.Type) bool {
|
||||
switch typ.Kind() {
|
||||
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// validateType guarantees that the value is valid and assignable to the type.
|
||||
func (s *state) validateType(value reflect.Value, typ reflect.Type) reflect.Value {
|
||||
if !value.IsValid() {
|
||||
if typ == nil || canBeNil(typ) {
|
||||
// An untyped nil interface{}. Accept as a proper nil value.
|
||||
return reflect.Zero(typ)
|
||||
}
|
||||
s.errorf("invalid value; expected %s", typ)
|
||||
}
|
||||
if typ != nil && !value.Type().AssignableTo(typ) {
|
||||
if value.Kind() == reflect.Interface && !value.IsNil() {
|
||||
value = value.Elem()
|
||||
if value.Type().AssignableTo(typ) {
|
||||
return value
|
||||
}
|
||||
// fallthrough
|
||||
}
|
||||
// Does one dereference or indirection work? We could do more, as we
|
||||
// do with method receivers, but that gets messy and method receivers
|
||||
// are much more constrained, so it makes more sense there than here.
|
||||
// Besides, one is almost always all you need.
|
||||
switch {
|
||||
case value.Kind() == reflect.Ptr && value.Type().Elem().AssignableTo(typ):
|
||||
value = value.Elem()
|
||||
if !value.IsValid() {
|
||||
s.errorf("dereference of nil pointer of type %s", typ)
|
||||
}
|
||||
case reflect.PtrTo(value.Type()).AssignableTo(typ) && value.CanAddr():
|
||||
value = value.Addr()
|
||||
default:
|
||||
s.errorf("wrong type for value; expected %s; got %s", typ, value.Type())
|
||||
}
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
func (s *state) evalArg(dot reflect.Value, typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
switch arg := n.(type) {
|
||||
case *parse.DotNode:
|
||||
return s.validateType(dot, typ)
|
||||
case *parse.NilNode:
|
||||
if canBeNil(typ) {
|
||||
return reflect.Zero(typ)
|
||||
}
|
||||
s.errorf("cannot assign nil to %s", typ)
|
||||
case *parse.FieldNode:
|
||||
return s.validateType(s.evalFieldNode(dot, arg, []parse.Node{n}, zero), typ)
|
||||
case *parse.VariableNode:
|
||||
return s.validateType(s.evalVariableNode(dot, arg, nil, zero), typ)
|
||||
case *parse.PipeNode:
|
||||
return s.validateType(s.evalPipeline(dot, arg), typ)
|
||||
case *parse.IdentifierNode:
|
||||
return s.evalFunction(dot, arg, arg, nil, zero)
|
||||
case *parse.ChainNode:
|
||||
return s.validateType(s.evalChainNode(dot, arg, nil, zero), typ)
|
||||
}
|
||||
switch typ.Kind() {
|
||||
case reflect.Bool:
|
||||
return s.evalBool(typ, n)
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
return s.evalComplex(typ, n)
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return s.evalFloat(typ, n)
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return s.evalInteger(typ, n)
|
||||
case reflect.Interface:
|
||||
if typ.NumMethod() == 0 {
|
||||
return s.evalEmptyInterface(dot, n)
|
||||
}
|
||||
case reflect.String:
|
||||
return s.evalString(typ, n)
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return s.evalUnsignedInteger(typ, n)
|
||||
}
|
||||
s.errorf("can't handle %s for arg of type %s", n, typ)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalBool(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
if n, ok := n.(*parse.BoolNode); ok {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetBool(n.True)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected bool; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalString(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
if n, ok := n.(*parse.StringNode); ok {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetString(n.Text)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected string; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalInteger(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
if n, ok := n.(*parse.NumberNode); ok && n.IsInt {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetInt(n.Int64)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected integer; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalUnsignedInteger(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
if n, ok := n.(*parse.NumberNode); ok && n.IsUint {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetUint(n.Uint64)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected unsigned integer; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalFloat(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
if n, ok := n.(*parse.NumberNode); ok && n.IsFloat {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetFloat(n.Float64)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected float; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalComplex(typ reflect.Type, n parse.Node) reflect.Value {
|
||||
if n, ok := n.(*parse.NumberNode); ok && n.IsComplex {
|
||||
value := reflect.New(typ).Elem()
|
||||
value.SetComplex(n.Complex128)
|
||||
return value
|
||||
}
|
||||
s.errorf("expected complex; found %s", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
func (s *state) evalEmptyInterface(dot reflect.Value, n parse.Node) reflect.Value {
|
||||
s.at(n)
|
||||
switch n := n.(type) {
|
||||
case *parse.BoolNode:
|
||||
return reflect.ValueOf(n.True)
|
||||
case *parse.DotNode:
|
||||
return dot
|
||||
case *parse.FieldNode:
|
||||
return s.evalFieldNode(dot, n, nil, zero)
|
||||
case *parse.IdentifierNode:
|
||||
return s.evalFunction(dot, n, n, nil, zero)
|
||||
case *parse.NilNode:
|
||||
// NilNode is handled in evalArg, the only place that calls here.
|
||||
s.errorf("evalEmptyInterface: nil (can't happen)")
|
||||
case *parse.NumberNode:
|
||||
return s.idealConstant(n)
|
||||
case *parse.StringNode:
|
||||
return reflect.ValueOf(n.Text)
|
||||
case *parse.VariableNode:
|
||||
return s.evalVariableNode(dot, n, nil, zero)
|
||||
case *parse.PipeNode:
|
||||
return s.evalPipeline(dot, n)
|
||||
}
|
||||
s.errorf("can't handle assignment of %s to empty interface argument", n)
|
||||
panic("not reached")
|
||||
}
|
||||
|
||||
// indirect returns the item at the end of indirection, and a bool to indicate if it's nil.
|
||||
// We indirect through pointers and empty interfaces (only) because
|
||||
// non-empty interfaces have methods we might need.
|
||||
func indirect(v reflect.Value) (rv reflect.Value, isNil bool) {
|
||||
for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() {
|
||||
if v.IsNil() {
|
||||
return v, true
|
||||
}
|
||||
if v.Kind() == reflect.Interface && v.NumMethod() > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return v, false
|
||||
}
|
||||
|
||||
// printValue writes the textual representation of the value to the output of
|
||||
// the template.
|
||||
func (s *state) printValue(n parse.Node, v reflect.Value) {
|
||||
s.at(n)
|
||||
iface, ok := printableValue(v)
|
||||
if !ok {
|
||||
s.errorf("can't print %s of type %s", n, v.Type())
|
||||
}
|
||||
fmt.Fprint(s.wr, iface)
|
||||
}
|
||||
|
||||
// printableValue returns the, possibly indirected, interface value inside v that
|
||||
// is best for a call to formatted printer.
|
||||
func printableValue(v reflect.Value) (interface{}, bool) {
|
||||
if v.Kind() == reflect.Ptr {
|
||||
v, _ = indirect(v) // fmt.Fprint handles nil.
|
||||
}
|
||||
if !v.IsValid() {
|
||||
return "<no value>", true
|
||||
}
|
||||
|
||||
if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) {
|
||||
if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) {
|
||||
v = v.Addr()
|
||||
} else {
|
||||
switch v.Kind() {
|
||||
case reflect.Chan, reflect.Func:
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
}
|
||||
return v.Interface(), true
|
||||
}
|
||||
|
||||
// Types to help sort the keys in a map for reproducible output.
|
||||
|
||||
type rvs []reflect.Value
|
||||
|
||||
func (x rvs) Len() int { return len(x) }
|
||||
func (x rvs) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
type rvInts struct{ rvs }
|
||||
|
||||
func (x rvInts) Less(i, j int) bool { return x.rvs[i].Int() < x.rvs[j].Int() }
|
||||
|
||||
type rvUints struct{ rvs }
|
||||
|
||||
func (x rvUints) Less(i, j int) bool { return x.rvs[i].Uint() < x.rvs[j].Uint() }
|
||||
|
||||
type rvFloats struct{ rvs }
|
||||
|
||||
func (x rvFloats) Less(i, j int) bool { return x.rvs[i].Float() < x.rvs[j].Float() }
|
||||
|
||||
type rvStrings struct{ rvs }
|
||||
|
||||
func (x rvStrings) Less(i, j int) bool { return x.rvs[i].String() < x.rvs[j].String() }
|
||||
|
||||
// sortKeys sorts (if it can) the slice of reflect.Values, which is a slice of map keys.
|
||||
func sortKeys(v []reflect.Value) []reflect.Value {
|
||||
if len(v) <= 1 {
|
||||
return v
|
||||
}
|
||||
switch v[0].Kind() {
|
||||
case reflect.Float32, reflect.Float64:
|
||||
sort.Sort(rvFloats{v})
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
sort.Sort(rvInts{v})
|
||||
case reflect.String:
|
||||
sort.Sort(rvStrings{v})
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
sort.Sort(rvUints{v})
|
||||
}
|
||||
return v
|
||||
}
|
||||
598
vendor/github.com/alecthomas/template/funcs.go
generated
vendored
598
vendor/github.com/alecthomas/template/funcs.go
generated
vendored
@@ -1,598 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package template
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// FuncMap is the type of the map defining the mapping from names to functions.
|
||||
// Each function must have either a single return value, or two return values of
|
||||
// which the second has type error. In that case, if the second (error)
|
||||
// return value evaluates to non-nil during execution, execution terminates and
|
||||
// Execute returns that error.
|
||||
type FuncMap map[string]interface{}
|
||||
|
||||
var builtins = FuncMap{
|
||||
"and": and,
|
||||
"call": call,
|
||||
"html": HTMLEscaper,
|
||||
"index": index,
|
||||
"js": JSEscaper,
|
||||
"len": length,
|
||||
"not": not,
|
||||
"or": or,
|
||||
"print": fmt.Sprint,
|
||||
"printf": fmt.Sprintf,
|
||||
"println": fmt.Sprintln,
|
||||
"urlquery": URLQueryEscaper,
|
||||
|
||||
// Comparisons
|
||||
"eq": eq, // ==
|
||||
"ge": ge, // >=
|
||||
"gt": gt, // >
|
||||
"le": le, // <=
|
||||
"lt": lt, // <
|
||||
"ne": ne, // !=
|
||||
}
|
||||
|
||||
var builtinFuncs = createValueFuncs(builtins)
|
||||
|
||||
// createValueFuncs turns a FuncMap into a map[string]reflect.Value
|
||||
func createValueFuncs(funcMap FuncMap) map[string]reflect.Value {
|
||||
m := make(map[string]reflect.Value)
|
||||
addValueFuncs(m, funcMap)
|
||||
return m
|
||||
}
|
||||
|
||||
// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values.
|
||||
func addValueFuncs(out map[string]reflect.Value, in FuncMap) {
|
||||
for name, fn := range in {
|
||||
v := reflect.ValueOf(fn)
|
||||
if v.Kind() != reflect.Func {
|
||||
panic("value for " + name + " not a function")
|
||||
}
|
||||
if !goodFunc(v.Type()) {
|
||||
panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut()))
|
||||
}
|
||||
out[name] = v
|
||||
}
|
||||
}
|
||||
|
||||
// addFuncs adds to values the functions in funcs. It does no checking of the input -
|
||||
// call addValueFuncs first.
|
||||
func addFuncs(out, in FuncMap) {
|
||||
for name, fn := range in {
|
||||
out[name] = fn
|
||||
}
|
||||
}
|
||||
|
||||
// goodFunc checks that the function or method has the right result signature.
|
||||
func goodFunc(typ reflect.Type) bool {
|
||||
// We allow functions with 1 result or 2 results where the second is an error.
|
||||
switch {
|
||||
case typ.NumOut() == 1:
|
||||
return true
|
||||
case typ.NumOut() == 2 && typ.Out(1) == errorType:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// findFunction looks for a function in the template, and global map.
|
||||
func findFunction(name string, tmpl *Template) (reflect.Value, bool) {
|
||||
if tmpl != nil && tmpl.common != nil {
|
||||
if fn := tmpl.execFuncs[name]; fn.IsValid() {
|
||||
return fn, true
|
||||
}
|
||||
}
|
||||
if fn := builtinFuncs[name]; fn.IsValid() {
|
||||
return fn, true
|
||||
}
|
||||
return reflect.Value{}, false
|
||||
}
|
||||
|
||||
// Indexing.
|
||||
|
||||
// index returns the result of indexing its first argument by the following
|
||||
// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each
|
||||
// indexed item must be a map, slice, or array.
|
||||
func index(item interface{}, indices ...interface{}) (interface{}, error) {
|
||||
v := reflect.ValueOf(item)
|
||||
for _, i := range indices {
|
||||
index := reflect.ValueOf(i)
|
||||
var isNil bool
|
||||
if v, isNil = indirect(v); isNil {
|
||||
return nil, fmt.Errorf("index of nil pointer")
|
||||
}
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Slice, reflect.String:
|
||||
var x int64
|
||||
switch index.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
x = index.Int()
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
x = int64(index.Uint())
|
||||
default:
|
||||
return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type())
|
||||
}
|
||||
if x < 0 || x >= int64(v.Len()) {
|
||||
return nil, fmt.Errorf("index out of range: %d", x)
|
||||
}
|
||||
v = v.Index(int(x))
|
||||
case reflect.Map:
|
||||
if !index.IsValid() {
|
||||
index = reflect.Zero(v.Type().Key())
|
||||
}
|
||||
if !index.Type().AssignableTo(v.Type().Key()) {
|
||||
return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type())
|
||||
}
|
||||
if x := v.MapIndex(index); x.IsValid() {
|
||||
v = x
|
||||
} else {
|
||||
v = reflect.Zero(v.Type().Elem())
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("can't index item of type %s", v.Type())
|
||||
}
|
||||
}
|
||||
return v.Interface(), nil
|
||||
}
|
||||
|
||||
// Length
|
||||
|
||||
// length returns the length of the item, with an error if it has no defined length.
|
||||
func length(item interface{}) (int, error) {
|
||||
v, isNil := indirect(reflect.ValueOf(item))
|
||||
if isNil {
|
||||
return 0, fmt.Errorf("len of nil pointer")
|
||||
}
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
|
||||
return v.Len(), nil
|
||||
}
|
||||
return 0, fmt.Errorf("len of type %s", v.Type())
|
||||
}
|
||||
|
||||
// Function invocation
|
||||
|
||||
// call returns the result of evaluating the first argument as a function.
|
||||
// The function must return 1 result, or 2 results, the second of which is an error.
|
||||
func call(fn interface{}, args ...interface{}) (interface{}, error) {
|
||||
v := reflect.ValueOf(fn)
|
||||
typ := v.Type()
|
||||
if typ.Kind() != reflect.Func {
|
||||
return nil, fmt.Errorf("non-function of type %s", typ)
|
||||
}
|
||||
if !goodFunc(typ) {
|
||||
return nil, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut())
|
||||
}
|
||||
numIn := typ.NumIn()
|
||||
var dddType reflect.Type
|
||||
if typ.IsVariadic() {
|
||||
if len(args) < numIn-1 {
|
||||
return nil, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1)
|
||||
}
|
||||
dddType = typ.In(numIn - 1).Elem()
|
||||
} else {
|
||||
if len(args) != numIn {
|
||||
return nil, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn)
|
||||
}
|
||||
}
|
||||
argv := make([]reflect.Value, len(args))
|
||||
for i, arg := range args {
|
||||
value := reflect.ValueOf(arg)
|
||||
// Compute the expected type. Clumsy because of variadics.
|
||||
var argType reflect.Type
|
||||
if !typ.IsVariadic() || i < numIn-1 {
|
||||
argType = typ.In(i)
|
||||
} else {
|
||||
argType = dddType
|
||||
}
|
||||
if !value.IsValid() && canBeNil(argType) {
|
||||
value = reflect.Zero(argType)
|
||||
}
|
||||
if !value.Type().AssignableTo(argType) {
|
||||
return nil, fmt.Errorf("arg %d has type %s; should be %s", i, value.Type(), argType)
|
||||
}
|
||||
argv[i] = value
|
||||
}
|
||||
result := v.Call(argv)
|
||||
if len(result) == 2 && !result[1].IsNil() {
|
||||
return result[0].Interface(), result[1].Interface().(error)
|
||||
}
|
||||
return result[0].Interface(), nil
|
||||
}
|
||||
|
||||
// Boolean logic.
|
||||
|
||||
func truth(a interface{}) bool {
|
||||
t, _ := isTrue(reflect.ValueOf(a))
|
||||
return t
|
||||
}
|
||||
|
||||
// and computes the Boolean AND of its arguments, returning
|
||||
// the first false argument it encounters, or the last argument.
|
||||
func and(arg0 interface{}, args ...interface{}) interface{} {
|
||||
if !truth(arg0) {
|
||||
return arg0
|
||||
}
|
||||
for i := range args {
|
||||
arg0 = args[i]
|
||||
if !truth(arg0) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return arg0
|
||||
}
|
||||
|
||||
// or computes the Boolean OR of its arguments, returning
|
||||
// the first true argument it encounters, or the last argument.
|
||||
func or(arg0 interface{}, args ...interface{}) interface{} {
|
||||
if truth(arg0) {
|
||||
return arg0
|
||||
}
|
||||
for i := range args {
|
||||
arg0 = args[i]
|
||||
if truth(arg0) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return arg0
|
||||
}
|
||||
|
||||
// not returns the Boolean negation of its argument.
|
||||
func not(arg interface{}) (truth bool) {
|
||||
truth, _ = isTrue(reflect.ValueOf(arg))
|
||||
return !truth
|
||||
}
|
||||
|
||||
// Comparison.
|
||||
|
||||
// TODO: Perhaps allow comparison between signed and unsigned integers.
|
||||
|
||||
var (
|
||||
errBadComparisonType = errors.New("invalid type for comparison")
|
||||
errBadComparison = errors.New("incompatible types for comparison")
|
||||
errNoComparison = errors.New("missing argument for comparison")
|
||||
)
|
||||
|
||||
type kind int
|
||||
|
||||
const (
|
||||
invalidKind kind = iota
|
||||
boolKind
|
||||
complexKind
|
||||
intKind
|
||||
floatKind
|
||||
integerKind
|
||||
stringKind
|
||||
uintKind
|
||||
)
|
||||
|
||||
func basicKind(v reflect.Value) (kind, error) {
|
||||
switch v.Kind() {
|
||||
case reflect.Bool:
|
||||
return boolKind, nil
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return intKind, nil
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return uintKind, nil
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return floatKind, nil
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
return complexKind, nil
|
||||
case reflect.String:
|
||||
return stringKind, nil
|
||||
}
|
||||
return invalidKind, errBadComparisonType
|
||||
}
|
||||
|
||||
// eq evaluates the comparison a == b || a == c || ...
|
||||
func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) {
|
||||
v1 := reflect.ValueOf(arg1)
|
||||
k1, err := basicKind(v1)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(arg2) == 0 {
|
||||
return false, errNoComparison
|
||||
}
|
||||
for _, arg := range arg2 {
|
||||
v2 := reflect.ValueOf(arg)
|
||||
k2, err := basicKind(v2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
truth := false
|
||||
if k1 != k2 {
|
||||
// Special case: Can compare integer values regardless of type's sign.
|
||||
switch {
|
||||
case k1 == intKind && k2 == uintKind:
|
||||
truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint()
|
||||
case k1 == uintKind && k2 == intKind:
|
||||
truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int())
|
||||
default:
|
||||
return false, errBadComparison
|
||||
}
|
||||
} else {
|
||||
switch k1 {
|
||||
case boolKind:
|
||||
truth = v1.Bool() == v2.Bool()
|
||||
case complexKind:
|
||||
truth = v1.Complex() == v2.Complex()
|
||||
case floatKind:
|
||||
truth = v1.Float() == v2.Float()
|
||||
case intKind:
|
||||
truth = v1.Int() == v2.Int()
|
||||
case stringKind:
|
||||
truth = v1.String() == v2.String()
|
||||
case uintKind:
|
||||
truth = v1.Uint() == v2.Uint()
|
||||
default:
|
||||
panic("invalid kind")
|
||||
}
|
||||
}
|
||||
if truth {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// ne evaluates the comparison a != b.
|
||||
func ne(arg1, arg2 interface{}) (bool, error) {
|
||||
// != is the inverse of ==.
|
||||
equal, err := eq(arg1, arg2)
|
||||
return !equal, err
|
||||
}
|
||||
|
||||
// lt evaluates the comparison a < b.
|
||||
func lt(arg1, arg2 interface{}) (bool, error) {
|
||||
v1 := reflect.ValueOf(arg1)
|
||||
k1, err := basicKind(v1)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
v2 := reflect.ValueOf(arg2)
|
||||
k2, err := basicKind(v2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
truth := false
|
||||
if k1 != k2 {
|
||||
// Special case: Can compare integer values regardless of type's sign.
|
||||
switch {
|
||||
case k1 == intKind && k2 == uintKind:
|
||||
truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint()
|
||||
case k1 == uintKind && k2 == intKind:
|
||||
truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int())
|
||||
default:
|
||||
return false, errBadComparison
|
||||
}
|
||||
} else {
|
||||
switch k1 {
|
||||
case boolKind, complexKind:
|
||||
return false, errBadComparisonType
|
||||
case floatKind:
|
||||
truth = v1.Float() < v2.Float()
|
||||
case intKind:
|
||||
truth = v1.Int() < v2.Int()
|
||||
case stringKind:
|
||||
truth = v1.String() < v2.String()
|
||||
case uintKind:
|
||||
truth = v1.Uint() < v2.Uint()
|
||||
default:
|
||||
panic("invalid kind")
|
||||
}
|
||||
}
|
||||
return truth, nil
|
||||
}
|
||||
|
||||
// le evaluates the comparison <= b.
|
||||
func le(arg1, arg2 interface{}) (bool, error) {
|
||||
// <= is < or ==.
|
||||
lessThan, err := lt(arg1, arg2)
|
||||
if lessThan || err != nil {
|
||||
return lessThan, err
|
||||
}
|
||||
return eq(arg1, arg2)
|
||||
}
|
||||
|
||||
// gt evaluates the comparison a > b.
|
||||
func gt(arg1, arg2 interface{}) (bool, error) {
|
||||
// > is the inverse of <=.
|
||||
lessOrEqual, err := le(arg1, arg2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !lessOrEqual, nil
|
||||
}
|
||||
|
||||
// ge evaluates the comparison a >= b.
|
||||
func ge(arg1, arg2 interface{}) (bool, error) {
|
||||
// >= is the inverse of <.
|
||||
lessThan, err := lt(arg1, arg2)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return !lessThan, nil
|
||||
}
|
||||
|
||||
// HTML escaping.
|
||||
|
||||
var (
|
||||
htmlQuot = []byte(""") // shorter than """
|
||||
htmlApos = []byte("'") // shorter than "'" and apos was not in HTML until HTML5
|
||||
htmlAmp = []byte("&")
|
||||
htmlLt = []byte("<")
|
||||
htmlGt = []byte(">")
|
||||
)
|
||||
|
||||
// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b.
|
||||
func HTMLEscape(w io.Writer, b []byte) {
|
||||
last := 0
|
||||
for i, c := range b {
|
||||
var html []byte
|
||||
switch c {
|
||||
case '"':
|
||||
html = htmlQuot
|
||||
case '\'':
|
||||
html = htmlApos
|
||||
case '&':
|
||||
html = htmlAmp
|
||||
case '<':
|
||||
html = htmlLt
|
||||
case '>':
|
||||
html = htmlGt
|
||||
default:
|
||||
continue
|
||||
}
|
||||
w.Write(b[last:i])
|
||||
w.Write(html)
|
||||
last = i + 1
|
||||
}
|
||||
w.Write(b[last:])
|
||||
}
|
||||
|
||||
// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s.
|
||||
func HTMLEscapeString(s string) string {
|
||||
// Avoid allocation if we can.
|
||||
if strings.IndexAny(s, `'"&<>`) < 0 {
|
||||
return s
|
||||
}
|
||||
var b bytes.Buffer
|
||||
HTMLEscape(&b, []byte(s))
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// HTMLEscaper returns the escaped HTML equivalent of the textual
|
||||
// representation of its arguments.
|
||||
func HTMLEscaper(args ...interface{}) string {
|
||||
return HTMLEscapeString(evalArgs(args))
|
||||
}
|
||||
|
||||
// JavaScript escaping.
|
||||
|
||||
var (
|
||||
jsLowUni = []byte(`\u00`)
|
||||
hex = []byte("0123456789ABCDEF")
|
||||
|
||||
jsBackslash = []byte(`\\`)
|
||||
jsApos = []byte(`\'`)
|
||||
jsQuot = []byte(`\"`)
|
||||
jsLt = []byte(`\x3C`)
|
||||
jsGt = []byte(`\x3E`)
|
||||
)
|
||||
|
||||
// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b.
|
||||
func JSEscape(w io.Writer, b []byte) {
|
||||
last := 0
|
||||
for i := 0; i < len(b); i++ {
|
||||
c := b[i]
|
||||
|
||||
if !jsIsSpecial(rune(c)) {
|
||||
// fast path: nothing to do
|
||||
continue
|
||||
}
|
||||
w.Write(b[last:i])
|
||||
|
||||
if c < utf8.RuneSelf {
|
||||
// Quotes, slashes and angle brackets get quoted.
|
||||
// Control characters get written as \u00XX.
|
||||
switch c {
|
||||
case '\\':
|
||||
w.Write(jsBackslash)
|
||||
case '\'':
|
||||
w.Write(jsApos)
|
||||
case '"':
|
||||
w.Write(jsQuot)
|
||||
case '<':
|
||||
w.Write(jsLt)
|
||||
case '>':
|
||||
w.Write(jsGt)
|
||||
default:
|
||||
w.Write(jsLowUni)
|
||||
t, b := c>>4, c&0x0f
|
||||
w.Write(hex[t : t+1])
|
||||
w.Write(hex[b : b+1])
|
||||
}
|
||||
} else {
|
||||
// Unicode rune.
|
||||
r, size := utf8.DecodeRune(b[i:])
|
||||
if unicode.IsPrint(r) {
|
||||
w.Write(b[i : i+size])
|
||||
} else {
|
||||
fmt.Fprintf(w, "\\u%04X", r)
|
||||
}
|
||||
i += size - 1
|
||||
}
|
||||
last = i + 1
|
||||
}
|
||||
w.Write(b[last:])
|
||||
}
|
||||
|
||||
// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s.
|
||||
func JSEscapeString(s string) string {
|
||||
// Avoid allocation if we can.
|
||||
if strings.IndexFunc(s, jsIsSpecial) < 0 {
|
||||
return s
|
||||
}
|
||||
var b bytes.Buffer
|
||||
JSEscape(&b, []byte(s))
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func jsIsSpecial(r rune) bool {
|
||||
switch r {
|
||||
case '\\', '\'', '"', '<', '>':
|
||||
return true
|
||||
}
|
||||
return r < ' ' || utf8.RuneSelf <= r
|
||||
}
|
||||
|
||||
// JSEscaper returns the escaped JavaScript equivalent of the textual
|
||||
// representation of its arguments.
|
||||
func JSEscaper(args ...interface{}) string {
|
||||
return JSEscapeString(evalArgs(args))
|
||||
}
|
||||
|
||||
// URLQueryEscaper returns the escaped value of the textual representation of
|
||||
// its arguments in a form suitable for embedding in a URL query.
|
||||
func URLQueryEscaper(args ...interface{}) string {
|
||||
return url.QueryEscape(evalArgs(args))
|
||||
}
|
||||
|
||||
// evalArgs formats the list of arguments into a string. It is therefore equivalent to
|
||||
// fmt.Sprint(args...)
|
||||
// except that each argument is indirected (if a pointer), as required,
|
||||
// using the same rules as the default string evaluation during template
|
||||
// execution.
|
||||
func evalArgs(args []interface{}) string {
|
||||
ok := false
|
||||
var s string
|
||||
// Fast path for simple common case.
|
||||
if len(args) == 1 {
|
||||
s, ok = args[0].(string)
|
||||
}
|
||||
if !ok {
|
||||
for i, arg := range args {
|
||||
a, ok := printableValue(reflect.ValueOf(arg))
|
||||
if ok {
|
||||
args[i] = a
|
||||
} // else left fmt do its thing
|
||||
}
|
||||
s = fmt.Sprint(args...)
|
||||
}
|
||||
return s
|
||||
}
|
||||
108
vendor/github.com/alecthomas/template/helper.go
generated
vendored
108
vendor/github.com/alecthomas/template/helper.go
generated
vendored
@@ -1,108 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Helper functions to make constructing templates easier.
|
||||
|
||||
package template
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// Functions and methods to parse templates.
|
||||
|
||||
// Must is a helper that wraps a call to a function returning (*Template, error)
|
||||
// and panics if the error is non-nil. It is intended for use in variable
|
||||
// initializations such as
|
||||
// var t = template.Must(template.New("name").Parse("text"))
|
||||
func Must(t *Template, err error) *Template {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// ParseFiles creates a new Template and parses the template definitions from
|
||||
// the named files. The returned template's name will have the (base) name and
|
||||
// (parsed) contents of the first file. There must be at least one file.
|
||||
// If an error occurs, parsing stops and the returned *Template is nil.
|
||||
func ParseFiles(filenames ...string) (*Template, error) {
|
||||
return parseFiles(nil, filenames...)
|
||||
}
|
||||
|
||||
// ParseFiles parses the named files and associates the resulting templates with
|
||||
// t. If an error occurs, parsing stops and the returned template is nil;
|
||||
// otherwise it is t. There must be at least one file.
|
||||
func (t *Template) ParseFiles(filenames ...string) (*Template, error) {
|
||||
return parseFiles(t, filenames...)
|
||||
}
|
||||
|
||||
// parseFiles is the helper for the method and function. If the argument
|
||||
// template is nil, it is created from the first file.
|
||||
func parseFiles(t *Template, filenames ...string) (*Template, error) {
|
||||
if len(filenames) == 0 {
|
||||
// Not really a problem, but be consistent.
|
||||
return nil, fmt.Errorf("template: no files named in call to ParseFiles")
|
||||
}
|
||||
for _, filename := range filenames {
|
||||
b, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := string(b)
|
||||
name := filepath.Base(filename)
|
||||
// First template becomes return value if not already defined,
|
||||
// and we use that one for subsequent New calls to associate
|
||||
// all the templates together. Also, if this file has the same name
|
||||
// as t, this file becomes the contents of t, so
|
||||
// t, err := New(name).Funcs(xxx).ParseFiles(name)
|
||||
// works. Otherwise we create a new template associated with t.
|
||||
var tmpl *Template
|
||||
if t == nil {
|
||||
t = New(name)
|
||||
}
|
||||
if name == t.Name() {
|
||||
tmpl = t
|
||||
} else {
|
||||
tmpl = t.New(name)
|
||||
}
|
||||
_, err = tmpl.Parse(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// ParseGlob creates a new Template and parses the template definitions from the
|
||||
// files identified by the pattern, which must match at least one file. The
|
||||
// returned template will have the (base) name and (parsed) contents of the
|
||||
// first file matched by the pattern. ParseGlob is equivalent to calling
|
||||
// ParseFiles with the list of files matched by the pattern.
|
||||
func ParseGlob(pattern string) (*Template, error) {
|
||||
return parseGlob(nil, pattern)
|
||||
}
|
||||
|
||||
// ParseGlob parses the template definitions in the files identified by the
|
||||
// pattern and associates the resulting templates with t. The pattern is
|
||||
// processed by filepath.Glob and must match at least one file. ParseGlob is
|
||||
// equivalent to calling t.ParseFiles with the list of files matched by the
|
||||
// pattern.
|
||||
func (t *Template) ParseGlob(pattern string) (*Template, error) {
|
||||
return parseGlob(t, pattern)
|
||||
}
|
||||
|
||||
// parseGlob is the implementation of the function and method ParseGlob.
|
||||
func parseGlob(t *Template, pattern string) (*Template, error) {
|
||||
filenames, err := filepath.Glob(pattern)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(filenames) == 0 {
|
||||
return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern)
|
||||
}
|
||||
return parseFiles(t, filenames...)
|
||||
}
|
||||
556
vendor/github.com/alecthomas/template/parse/lex.go
generated
vendored
556
vendor/github.com/alecthomas/template/parse/lex.go
generated
vendored
@@ -1,556 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package parse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// item represents a token or text string returned from the scanner.
|
||||
type item struct {
|
||||
typ itemType // The type of this item.
|
||||
pos Pos // The starting position, in bytes, of this item in the input string.
|
||||
val string // The value of this item.
|
||||
}
|
||||
|
||||
func (i item) String() string {
|
||||
switch {
|
||||
case i.typ == itemEOF:
|
||||
return "EOF"
|
||||
case i.typ == itemError:
|
||||
return i.val
|
||||
case i.typ > itemKeyword:
|
||||
return fmt.Sprintf("<%s>", i.val)
|
||||
case len(i.val) > 10:
|
||||
return fmt.Sprintf("%.10q...", i.val)
|
||||
}
|
||||
return fmt.Sprintf("%q", i.val)
|
||||
}
|
||||
|
||||
// itemType identifies the type of lex items.
|
||||
type itemType int
|
||||
|
||||
const (
|
||||
itemError itemType = iota // error occurred; value is text of error
|
||||
itemBool // boolean constant
|
||||
itemChar // printable ASCII character; grab bag for comma etc.
|
||||
itemCharConstant // character constant
|
||||
itemComplex // complex constant (1+2i); imaginary is just a number
|
||||
itemColonEquals // colon-equals (':=') introducing a declaration
|
||||
itemEOF
|
||||
itemField // alphanumeric identifier starting with '.'
|
||||
itemIdentifier // alphanumeric identifier not starting with '.'
|
||||
itemLeftDelim // left action delimiter
|
||||
itemLeftParen // '(' inside action
|
||||
itemNumber // simple number, including imaginary
|
||||
itemPipe // pipe symbol
|
||||
itemRawString // raw quoted string (includes quotes)
|
||||
itemRightDelim // right action delimiter
|
||||
itemElideNewline // elide newline after right delim
|
||||
itemRightParen // ')' inside action
|
||||
itemSpace // run of spaces separating arguments
|
||||
itemString // quoted string (includes quotes)
|
||||
itemText // plain text
|
||||
itemVariable // variable starting with '$', such as '$' or '$1' or '$hello'
|
||||
// Keywords appear after all the rest.
|
||||
itemKeyword // used only to delimit the keywords
|
||||
itemDot // the cursor, spelled '.'
|
||||
itemDefine // define keyword
|
||||
itemElse // else keyword
|
||||
itemEnd // end keyword
|
||||
itemIf // if keyword
|
||||
itemNil // the untyped nil constant, easiest to treat as a keyword
|
||||
itemRange // range keyword
|
||||
itemTemplate // template keyword
|
||||
itemWith // with keyword
|
||||
)
|
||||
|
||||
var key = map[string]itemType{
|
||||
".": itemDot,
|
||||
"define": itemDefine,
|
||||
"else": itemElse,
|
||||
"end": itemEnd,
|
||||
"if": itemIf,
|
||||
"range": itemRange,
|
||||
"nil": itemNil,
|
||||
"template": itemTemplate,
|
||||
"with": itemWith,
|
||||
}
|
||||
|
||||
const eof = -1
|
||||
|
||||
// stateFn represents the state of the scanner as a function that returns the next state.
|
||||
type stateFn func(*lexer) stateFn
|
||||
|
||||
// lexer holds the state of the scanner.
|
||||
type lexer struct {
|
||||
name string // the name of the input; used only for error reports
|
||||
input string // the string being scanned
|
||||
leftDelim string // start of action
|
||||
rightDelim string // end of action
|
||||
state stateFn // the next lexing function to enter
|
||||
pos Pos // current position in the input
|
||||
start Pos // start position of this item
|
||||
width Pos // width of last rune read from input
|
||||
lastPos Pos // position of most recent item returned by nextItem
|
||||
items chan item // channel of scanned items
|
||||
parenDepth int // nesting depth of ( ) exprs
|
||||
}
|
||||
|
||||
// next returns the next rune in the input.
|
||||
func (l *lexer) next() rune {
|
||||
if int(l.pos) >= len(l.input) {
|
||||
l.width = 0
|
||||
return eof
|
||||
}
|
||||
r, w := utf8.DecodeRuneInString(l.input[l.pos:])
|
||||
l.width = Pos(w)
|
||||
l.pos += l.width
|
||||
return r
|
||||
}
|
||||
|
||||
// peek returns but does not consume the next rune in the input.
|
||||
func (l *lexer) peek() rune {
|
||||
r := l.next()
|
||||
l.backup()
|
||||
return r
|
||||
}
|
||||
|
||||
// backup steps back one rune. Can only be called once per call of next.
|
||||
func (l *lexer) backup() {
|
||||
l.pos -= l.width
|
||||
}
|
||||
|
||||
// emit passes an item back to the client.
|
||||
func (l *lexer) emit(t itemType) {
|
||||
l.items <- item{t, l.start, l.input[l.start:l.pos]}
|
||||
l.start = l.pos
|
||||
}
|
||||
|
||||
// ignore skips over the pending input before this point.
|
||||
func (l *lexer) ignore() {
|
||||
l.start = l.pos
|
||||
}
|
||||
|
||||
// accept consumes the next rune if it's from the valid set.
|
||||
func (l *lexer) accept(valid string) bool {
|
||||
if strings.IndexRune(valid, l.next()) >= 0 {
|
||||
return true
|
||||
}
|
||||
l.backup()
|
||||
return false
|
||||
}
|
||||
|
||||
// acceptRun consumes a run of runes from the valid set.
|
||||
func (l *lexer) acceptRun(valid string) {
|
||||
for strings.IndexRune(valid, l.next()) >= 0 {
|
||||
}
|
||||
l.backup()
|
||||
}
|
||||
|
||||
// lineNumber reports which line we're on, based on the position of
|
||||
// the previous item returned by nextItem. Doing it this way
|
||||
// means we don't have to worry about peek double counting.
|
||||
func (l *lexer) lineNumber() int {
|
||||
return 1 + strings.Count(l.input[:l.lastPos], "\n")
|
||||
}
|
||||
|
||||
// errorf returns an error token and terminates the scan by passing
|
||||
// back a nil pointer that will be the next state, terminating l.nextItem.
|
||||
func (l *lexer) errorf(format string, args ...interface{}) stateFn {
|
||||
l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)}
|
||||
return nil
|
||||
}
|
||||
|
||||
// nextItem returns the next item from the input.
|
||||
func (l *lexer) nextItem() item {
|
||||
item := <-l.items
|
||||
l.lastPos = item.pos
|
||||
return item
|
||||
}
|
||||
|
||||
// lex creates a new scanner for the input string.
|
||||
func lex(name, input, left, right string) *lexer {
|
||||
if left == "" {
|
||||
left = leftDelim
|
||||
}
|
||||
if right == "" {
|
||||
right = rightDelim
|
||||
}
|
||||
l := &lexer{
|
||||
name: name,
|
||||
input: input,
|
||||
leftDelim: left,
|
||||
rightDelim: right,
|
||||
items: make(chan item),
|
||||
}
|
||||
go l.run()
|
||||
return l
|
||||
}
|
||||
|
||||
// run runs the state machine for the lexer.
|
||||
func (l *lexer) run() {
|
||||
for l.state = lexText; l.state != nil; {
|
||||
l.state = l.state(l)
|
||||
}
|
||||
}
|
||||
|
||||
// state functions
|
||||
|
||||
const (
|
||||
leftDelim = "{{"
|
||||
rightDelim = "}}"
|
||||
leftComment = "/*"
|
||||
rightComment = "*/"
|
||||
)
|
||||
|
||||
// lexText scans until an opening action delimiter, "{{".
|
||||
func lexText(l *lexer) stateFn {
|
||||
for {
|
||||
if strings.HasPrefix(l.input[l.pos:], l.leftDelim) {
|
||||
if l.pos > l.start {
|
||||
l.emit(itemText)
|
||||
}
|
||||
return lexLeftDelim
|
||||
}
|
||||
if l.next() == eof {
|
||||
break
|
||||
}
|
||||
}
|
||||
// Correctly reached EOF.
|
||||
if l.pos > l.start {
|
||||
l.emit(itemText)
|
||||
}
|
||||
l.emit(itemEOF)
|
||||
return nil
|
||||
}
|
||||
|
||||
// lexLeftDelim scans the left delimiter, which is known to be present.
|
||||
func lexLeftDelim(l *lexer) stateFn {
|
||||
l.pos += Pos(len(l.leftDelim))
|
||||
if strings.HasPrefix(l.input[l.pos:], leftComment) {
|
||||
return lexComment
|
||||
}
|
||||
l.emit(itemLeftDelim)
|
||||
l.parenDepth = 0
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexComment scans a comment. The left comment marker is known to be present.
|
||||
func lexComment(l *lexer) stateFn {
|
||||
l.pos += Pos(len(leftComment))
|
||||
i := strings.Index(l.input[l.pos:], rightComment)
|
||||
if i < 0 {
|
||||
return l.errorf("unclosed comment")
|
||||
}
|
||||
l.pos += Pos(i + len(rightComment))
|
||||
if !strings.HasPrefix(l.input[l.pos:], l.rightDelim) {
|
||||
return l.errorf("comment ends before closing delimiter")
|
||||
|
||||
}
|
||||
l.pos += Pos(len(l.rightDelim))
|
||||
l.ignore()
|
||||
return lexText
|
||||
}
|
||||
|
||||
// lexRightDelim scans the right delimiter, which is known to be present.
|
||||
func lexRightDelim(l *lexer) stateFn {
|
||||
l.pos += Pos(len(l.rightDelim))
|
||||
l.emit(itemRightDelim)
|
||||
if l.peek() == '\\' {
|
||||
l.pos++
|
||||
l.emit(itemElideNewline)
|
||||
}
|
||||
return lexText
|
||||
}
|
||||
|
||||
// lexInsideAction scans the elements inside action delimiters.
|
||||
func lexInsideAction(l *lexer) stateFn {
|
||||
// Either number, quoted string, or identifier.
|
||||
// Spaces separate arguments; runs of spaces turn into itemSpace.
|
||||
// Pipe symbols separate and are emitted.
|
||||
if strings.HasPrefix(l.input[l.pos:], l.rightDelim+"\\") || strings.HasPrefix(l.input[l.pos:], l.rightDelim) {
|
||||
if l.parenDepth == 0 {
|
||||
return lexRightDelim
|
||||
}
|
||||
return l.errorf("unclosed left paren")
|
||||
}
|
||||
switch r := l.next(); {
|
||||
case r == eof || isEndOfLine(r):
|
||||
return l.errorf("unclosed action")
|
||||
case isSpace(r):
|
||||
return lexSpace
|
||||
case r == ':':
|
||||
if l.next() != '=' {
|
||||
return l.errorf("expected :=")
|
||||
}
|
||||
l.emit(itemColonEquals)
|
||||
case r == '|':
|
||||
l.emit(itemPipe)
|
||||
case r == '"':
|
||||
return lexQuote
|
||||
case r == '`':
|
||||
return lexRawQuote
|
||||
case r == '$':
|
||||
return lexVariable
|
||||
case r == '\'':
|
||||
return lexChar
|
||||
case r == '.':
|
||||
// special look-ahead for ".field" so we don't break l.backup().
|
||||
if l.pos < Pos(len(l.input)) {
|
||||
r := l.input[l.pos]
|
||||
if r < '0' || '9' < r {
|
||||
return lexField
|
||||
}
|
||||
}
|
||||
fallthrough // '.' can start a number.
|
||||
case r == '+' || r == '-' || ('0' <= r && r <= '9'):
|
||||
l.backup()
|
||||
return lexNumber
|
||||
case isAlphaNumeric(r):
|
||||
l.backup()
|
||||
return lexIdentifier
|
||||
case r == '(':
|
||||
l.emit(itemLeftParen)
|
||||
l.parenDepth++
|
||||
return lexInsideAction
|
||||
case r == ')':
|
||||
l.emit(itemRightParen)
|
||||
l.parenDepth--
|
||||
if l.parenDepth < 0 {
|
||||
return l.errorf("unexpected right paren %#U", r)
|
||||
}
|
||||
return lexInsideAction
|
||||
case r <= unicode.MaxASCII && unicode.IsPrint(r):
|
||||
l.emit(itemChar)
|
||||
return lexInsideAction
|
||||
default:
|
||||
return l.errorf("unrecognized character in action: %#U", r)
|
||||
}
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexSpace scans a run of space characters.
|
||||
// One space has already been seen.
|
||||
func lexSpace(l *lexer) stateFn {
|
||||
for isSpace(l.peek()) {
|
||||
l.next()
|
||||
}
|
||||
l.emit(itemSpace)
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexIdentifier scans an alphanumeric.
|
||||
func lexIdentifier(l *lexer) stateFn {
|
||||
Loop:
|
||||
for {
|
||||
switch r := l.next(); {
|
||||
case isAlphaNumeric(r):
|
||||
// absorb.
|
||||
default:
|
||||
l.backup()
|
||||
word := l.input[l.start:l.pos]
|
||||
if !l.atTerminator() {
|
||||
return l.errorf("bad character %#U", r)
|
||||
}
|
||||
switch {
|
||||
case key[word] > itemKeyword:
|
||||
l.emit(key[word])
|
||||
case word[0] == '.':
|
||||
l.emit(itemField)
|
||||
case word == "true", word == "false":
|
||||
l.emit(itemBool)
|
||||
default:
|
||||
l.emit(itemIdentifier)
|
||||
}
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexField scans a field: .Alphanumeric.
|
||||
// The . has been scanned.
|
||||
func lexField(l *lexer) stateFn {
|
||||
return lexFieldOrVariable(l, itemField)
|
||||
}
|
||||
|
||||
// lexVariable scans a Variable: $Alphanumeric.
|
||||
// The $ has been scanned.
|
||||
func lexVariable(l *lexer) stateFn {
|
||||
if l.atTerminator() { // Nothing interesting follows -> "$".
|
||||
l.emit(itemVariable)
|
||||
return lexInsideAction
|
||||
}
|
||||
return lexFieldOrVariable(l, itemVariable)
|
||||
}
|
||||
|
||||
// lexVariable scans a field or variable: [.$]Alphanumeric.
|
||||
// The . or $ has been scanned.
|
||||
func lexFieldOrVariable(l *lexer, typ itemType) stateFn {
|
||||
if l.atTerminator() { // Nothing interesting follows -> "." or "$".
|
||||
if typ == itemVariable {
|
||||
l.emit(itemVariable)
|
||||
} else {
|
||||
l.emit(itemDot)
|
||||
}
|
||||
return lexInsideAction
|
||||
}
|
||||
var r rune
|
||||
for {
|
||||
r = l.next()
|
||||
if !isAlphaNumeric(r) {
|
||||
l.backup()
|
||||
break
|
||||
}
|
||||
}
|
||||
if !l.atTerminator() {
|
||||
return l.errorf("bad character %#U", r)
|
||||
}
|
||||
l.emit(typ)
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// atTerminator reports whether the input is at valid termination character to
|
||||
// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases
|
||||
// like "$x+2" not being acceptable without a space, in case we decide one
|
||||
// day to implement arithmetic.
|
||||
func (l *lexer) atTerminator() bool {
|
||||
r := l.peek()
|
||||
if isSpace(r) || isEndOfLine(r) {
|
||||
return true
|
||||
}
|
||||
switch r {
|
||||
case eof, '.', ',', '|', ':', ')', '(':
|
||||
return true
|
||||
}
|
||||
// Does r start the delimiter? This can be ambiguous (with delim=="//", $x/2 will
|
||||
// succeed but should fail) but only in extremely rare cases caused by willfully
|
||||
// bad choice of delimiter.
|
||||
if rd, _ := utf8.DecodeRuneInString(l.rightDelim); rd == r {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// lexChar scans a character constant. The initial quote is already
|
||||
// scanned. Syntax checking is done by the parser.
|
||||
func lexChar(l *lexer) stateFn {
|
||||
Loop:
|
||||
for {
|
||||
switch l.next() {
|
||||
case '\\':
|
||||
if r := l.next(); r != eof && r != '\n' {
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
case eof, '\n':
|
||||
return l.errorf("unterminated character constant")
|
||||
case '\'':
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
l.emit(itemCharConstant)
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This
|
||||
// isn't a perfect number scanner - for instance it accepts "." and "0x0.2"
|
||||
// and "089" - but when it's wrong the input is invalid and the parser (via
|
||||
// strconv) will notice.
|
||||
func lexNumber(l *lexer) stateFn {
|
||||
if !l.scanNumber() {
|
||||
return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
|
||||
}
|
||||
if sign := l.peek(); sign == '+' || sign == '-' {
|
||||
// Complex: 1+2i. No spaces, must end in 'i'.
|
||||
if !l.scanNumber() || l.input[l.pos-1] != 'i' {
|
||||
return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
|
||||
}
|
||||
l.emit(itemComplex)
|
||||
} else {
|
||||
l.emit(itemNumber)
|
||||
}
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
func (l *lexer) scanNumber() bool {
|
||||
// Optional leading sign.
|
||||
l.accept("+-")
|
||||
// Is it hex?
|
||||
digits := "0123456789"
|
||||
if l.accept("0") && l.accept("xX") {
|
||||
digits = "0123456789abcdefABCDEF"
|
||||
}
|
||||
l.acceptRun(digits)
|
||||
if l.accept(".") {
|
||||
l.acceptRun(digits)
|
||||
}
|
||||
if l.accept("eE") {
|
||||
l.accept("+-")
|
||||
l.acceptRun("0123456789")
|
||||
}
|
||||
// Is it imaginary?
|
||||
l.accept("i")
|
||||
// Next thing mustn't be alphanumeric.
|
||||
if isAlphaNumeric(l.peek()) {
|
||||
l.next()
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// lexQuote scans a quoted string.
|
||||
func lexQuote(l *lexer) stateFn {
|
||||
Loop:
|
||||
for {
|
||||
switch l.next() {
|
||||
case '\\':
|
||||
if r := l.next(); r != eof && r != '\n' {
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
case eof, '\n':
|
||||
return l.errorf("unterminated quoted string")
|
||||
case '"':
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
l.emit(itemString)
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// lexRawQuote scans a raw quoted string.
|
||||
func lexRawQuote(l *lexer) stateFn {
|
||||
Loop:
|
||||
for {
|
||||
switch l.next() {
|
||||
case eof, '\n':
|
||||
return l.errorf("unterminated raw quoted string")
|
||||
case '`':
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
l.emit(itemRawString)
|
||||
return lexInsideAction
|
||||
}
|
||||
|
||||
// isSpace reports whether r is a space character.
|
||||
func isSpace(r rune) bool {
|
||||
return r == ' ' || r == '\t'
|
||||
}
|
||||
|
||||
// isEndOfLine reports whether r is an end-of-line character.
|
||||
func isEndOfLine(r rune) bool {
|
||||
return r == '\r' || r == '\n'
|
||||
}
|
||||
|
||||
// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
|
||||
func isAlphaNumeric(r rune) bool {
|
||||
return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
|
||||
}
|
||||
834
vendor/github.com/alecthomas/template/parse/node.go
generated
vendored
834
vendor/github.com/alecthomas/template/parse/node.go
generated
vendored
@@ -1,834 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Parse nodes.
|
||||
|
||||
package parse
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var textFormat = "%s" // Changed to "%q" in tests for better error messages.
|
||||
|
||||
// A Node is an element in the parse tree. The interface is trivial.
|
||||
// The interface contains an unexported method so that only
|
||||
// types local to this package can satisfy it.
|
||||
type Node interface {
|
||||
Type() NodeType
|
||||
String() string
|
||||
// Copy does a deep copy of the Node and all its components.
|
||||
// To avoid type assertions, some XxxNodes also have specialized
|
||||
// CopyXxx methods that return *XxxNode.
|
||||
Copy() Node
|
||||
Position() Pos // byte position of start of node in full original input string
|
||||
// tree returns the containing *Tree.
|
||||
// It is unexported so all implementations of Node are in this package.
|
||||
tree() *Tree
|
||||
}
|
||||
|
||||
// NodeType identifies the type of a parse tree node.
|
||||
type NodeType int
|
||||
|
||||
// Pos represents a byte position in the original input text from which
|
||||
// this template was parsed.
|
||||
type Pos int
|
||||
|
||||
func (p Pos) Position() Pos {
|
||||
return p
|
||||
}
|
||||
|
||||
// Type returns itself and provides an easy default implementation
|
||||
// for embedding in a Node. Embedded in all non-trivial Nodes.
|
||||
func (t NodeType) Type() NodeType {
|
||||
return t
|
||||
}
|
||||
|
||||
const (
|
||||
NodeText NodeType = iota // Plain text.
|
||||
NodeAction // A non-control action such as a field evaluation.
|
||||
NodeBool // A boolean constant.
|
||||
NodeChain // A sequence of field accesses.
|
||||
NodeCommand // An element of a pipeline.
|
||||
NodeDot // The cursor, dot.
|
||||
nodeElse // An else action. Not added to tree.
|
||||
nodeEnd // An end action. Not added to tree.
|
||||
NodeField // A field or method name.
|
||||
NodeIdentifier // An identifier; always a function name.
|
||||
NodeIf // An if action.
|
||||
NodeList // A list of Nodes.
|
||||
NodeNil // An untyped nil constant.
|
||||
NodeNumber // A numerical constant.
|
||||
NodePipe // A pipeline of commands.
|
||||
NodeRange // A range action.
|
||||
NodeString // A string constant.
|
||||
NodeTemplate // A template invocation action.
|
||||
NodeVariable // A $ variable.
|
||||
NodeWith // A with action.
|
||||
)
|
||||
|
||||
// Nodes.
|
||||
|
||||
// ListNode holds a sequence of nodes.
|
||||
type ListNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Nodes []Node // The element nodes in lexical order.
|
||||
}
|
||||
|
||||
func (t *Tree) newList(pos Pos) *ListNode {
|
||||
return &ListNode{tr: t, NodeType: NodeList, Pos: pos}
|
||||
}
|
||||
|
||||
func (l *ListNode) append(n Node) {
|
||||
l.Nodes = append(l.Nodes, n)
|
||||
}
|
||||
|
||||
func (l *ListNode) tree() *Tree {
|
||||
return l.tr
|
||||
}
|
||||
|
||||
func (l *ListNode) String() string {
|
||||
b := new(bytes.Buffer)
|
||||
for _, n := range l.Nodes {
|
||||
fmt.Fprint(b, n)
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func (l *ListNode) CopyList() *ListNode {
|
||||
if l == nil {
|
||||
return l
|
||||
}
|
||||
n := l.tr.newList(l.Pos)
|
||||
for _, elem := range l.Nodes {
|
||||
n.append(elem.Copy())
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (l *ListNode) Copy() Node {
|
||||
return l.CopyList()
|
||||
}
|
||||
|
||||
// TextNode holds plain text.
|
||||
type TextNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Text []byte // The text; may span newlines.
|
||||
}
|
||||
|
||||
func (t *Tree) newText(pos Pos, text string) *TextNode {
|
||||
return &TextNode{tr: t, NodeType: NodeText, Pos: pos, Text: []byte(text)}
|
||||
}
|
||||
|
||||
func (t *TextNode) String() string {
|
||||
return fmt.Sprintf(textFormat, t.Text)
|
||||
}
|
||||
|
||||
func (t *TextNode) tree() *Tree {
|
||||
return t.tr
|
||||
}
|
||||
|
||||
func (t *TextNode) Copy() Node {
|
||||
return &TextNode{tr: t.tr, NodeType: NodeText, Pos: t.Pos, Text: append([]byte{}, t.Text...)}
|
||||
}
|
||||
|
||||
// PipeNode holds a pipeline with optional declaration
|
||||
type PipeNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Line int // The line number in the input (deprecated; kept for compatibility)
|
||||
Decl []*VariableNode // Variable declarations in lexical order.
|
||||
Cmds []*CommandNode // The commands in lexical order.
|
||||
}
|
||||
|
||||
func (t *Tree) newPipeline(pos Pos, line int, decl []*VariableNode) *PipeNode {
|
||||
return &PipeNode{tr: t, NodeType: NodePipe, Pos: pos, Line: line, Decl: decl}
|
||||
}
|
||||
|
||||
func (p *PipeNode) append(command *CommandNode) {
|
||||
p.Cmds = append(p.Cmds, command)
|
||||
}
|
||||
|
||||
func (p *PipeNode) String() string {
|
||||
s := ""
|
||||
if len(p.Decl) > 0 {
|
||||
for i, v := range p.Decl {
|
||||
if i > 0 {
|
||||
s += ", "
|
||||
}
|
||||
s += v.String()
|
||||
}
|
||||
s += " := "
|
||||
}
|
||||
for i, c := range p.Cmds {
|
||||
if i > 0 {
|
||||
s += " | "
|
||||
}
|
||||
s += c.String()
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (p *PipeNode) tree() *Tree {
|
||||
return p.tr
|
||||
}
|
||||
|
||||
func (p *PipeNode) CopyPipe() *PipeNode {
|
||||
if p == nil {
|
||||
return p
|
||||
}
|
||||
var decl []*VariableNode
|
||||
for _, d := range p.Decl {
|
||||
decl = append(decl, d.Copy().(*VariableNode))
|
||||
}
|
||||
n := p.tr.newPipeline(p.Pos, p.Line, decl)
|
||||
for _, c := range p.Cmds {
|
||||
n.append(c.Copy().(*CommandNode))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (p *PipeNode) Copy() Node {
|
||||
return p.CopyPipe()
|
||||
}
|
||||
|
||||
// ActionNode holds an action (something bounded by delimiters).
|
||||
// Control actions have their own nodes; ActionNode represents simple
|
||||
// ones such as field evaluations and parenthesized pipelines.
|
||||
type ActionNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Line int // The line number in the input (deprecated; kept for compatibility)
|
||||
Pipe *PipeNode // The pipeline in the action.
|
||||
}
|
||||
|
||||
func (t *Tree) newAction(pos Pos, line int, pipe *PipeNode) *ActionNode {
|
||||
return &ActionNode{tr: t, NodeType: NodeAction, Pos: pos, Line: line, Pipe: pipe}
|
||||
}
|
||||
|
||||
func (a *ActionNode) String() string {
|
||||
return fmt.Sprintf("{{%s}}", a.Pipe)
|
||||
|
||||
}
|
||||
|
||||
func (a *ActionNode) tree() *Tree {
|
||||
return a.tr
|
||||
}
|
||||
|
||||
func (a *ActionNode) Copy() Node {
|
||||
return a.tr.newAction(a.Pos, a.Line, a.Pipe.CopyPipe())
|
||||
|
||||
}
|
||||
|
||||
// CommandNode holds a command (a pipeline inside an evaluating action).
|
||||
type CommandNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Args []Node // Arguments in lexical order: Identifier, field, or constant.
|
||||
}
|
||||
|
||||
func (t *Tree) newCommand(pos Pos) *CommandNode {
|
||||
return &CommandNode{tr: t, NodeType: NodeCommand, Pos: pos}
|
||||
}
|
||||
|
||||
func (c *CommandNode) append(arg Node) {
|
||||
c.Args = append(c.Args, arg)
|
||||
}
|
||||
|
||||
func (c *CommandNode) String() string {
|
||||
s := ""
|
||||
for i, arg := range c.Args {
|
||||
if i > 0 {
|
||||
s += " "
|
||||
}
|
||||
if arg, ok := arg.(*PipeNode); ok {
|
||||
s += "(" + arg.String() + ")"
|
||||
continue
|
||||
}
|
||||
s += arg.String()
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (c *CommandNode) tree() *Tree {
|
||||
return c.tr
|
||||
}
|
||||
|
||||
func (c *CommandNode) Copy() Node {
|
||||
if c == nil {
|
||||
return c
|
||||
}
|
||||
n := c.tr.newCommand(c.Pos)
|
||||
for _, c := range c.Args {
|
||||
n.append(c.Copy())
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// IdentifierNode holds an identifier.
|
||||
type IdentifierNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Ident string // The identifier's name.
|
||||
}
|
||||
|
||||
// NewIdentifier returns a new IdentifierNode with the given identifier name.
|
||||
func NewIdentifier(ident string) *IdentifierNode {
|
||||
return &IdentifierNode{NodeType: NodeIdentifier, Ident: ident}
|
||||
}
|
||||
|
||||
// SetPos sets the position. NewIdentifier is a public method so we can't modify its signature.
|
||||
// Chained for convenience.
|
||||
// TODO: fix one day?
|
||||
func (i *IdentifierNode) SetPos(pos Pos) *IdentifierNode {
|
||||
i.Pos = pos
|
||||
return i
|
||||
}
|
||||
|
||||
// SetTree sets the parent tree for the node. NewIdentifier is a public method so we can't modify its signature.
|
||||
// Chained for convenience.
|
||||
// TODO: fix one day?
|
||||
func (i *IdentifierNode) SetTree(t *Tree) *IdentifierNode {
|
||||
i.tr = t
|
||||
return i
|
||||
}
|
||||
|
||||
func (i *IdentifierNode) String() string {
|
||||
return i.Ident
|
||||
}
|
||||
|
||||
func (i *IdentifierNode) tree() *Tree {
|
||||
return i.tr
|
||||
}
|
||||
|
||||
func (i *IdentifierNode) Copy() Node {
|
||||
return NewIdentifier(i.Ident).SetTree(i.tr).SetPos(i.Pos)
|
||||
}
|
||||
|
||||
// VariableNode holds a list of variable names, possibly with chained field
|
||||
// accesses. The dollar sign is part of the (first) name.
|
||||
type VariableNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Ident []string // Variable name and fields in lexical order.
|
||||
}
|
||||
|
||||
func (t *Tree) newVariable(pos Pos, ident string) *VariableNode {
|
||||
return &VariableNode{tr: t, NodeType: NodeVariable, Pos: pos, Ident: strings.Split(ident, ".")}
|
||||
}
|
||||
|
||||
func (v *VariableNode) String() string {
|
||||
s := ""
|
||||
for i, id := range v.Ident {
|
||||
if i > 0 {
|
||||
s += "."
|
||||
}
|
||||
s += id
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (v *VariableNode) tree() *Tree {
|
||||
return v.tr
|
||||
}
|
||||
|
||||
func (v *VariableNode) Copy() Node {
|
||||
return &VariableNode{tr: v.tr, NodeType: NodeVariable, Pos: v.Pos, Ident: append([]string{}, v.Ident...)}
|
||||
}
|
||||
|
||||
// DotNode holds the special identifier '.'.
|
||||
type DotNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
}
|
||||
|
||||
func (t *Tree) newDot(pos Pos) *DotNode {
|
||||
return &DotNode{tr: t, NodeType: NodeDot, Pos: pos}
|
||||
}
|
||||
|
||||
func (d *DotNode) Type() NodeType {
|
||||
// Override method on embedded NodeType for API compatibility.
|
||||
// TODO: Not really a problem; could change API without effect but
|
||||
// api tool complains.
|
||||
return NodeDot
|
||||
}
|
||||
|
||||
func (d *DotNode) String() string {
|
||||
return "."
|
||||
}
|
||||
|
||||
func (d *DotNode) tree() *Tree {
|
||||
return d.tr
|
||||
}
|
||||
|
||||
func (d *DotNode) Copy() Node {
|
||||
return d.tr.newDot(d.Pos)
|
||||
}
|
||||
|
||||
// NilNode holds the special identifier 'nil' representing an untyped nil constant.
|
||||
type NilNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
}
|
||||
|
||||
func (t *Tree) newNil(pos Pos) *NilNode {
|
||||
return &NilNode{tr: t, NodeType: NodeNil, Pos: pos}
|
||||
}
|
||||
|
||||
func (n *NilNode) Type() NodeType {
|
||||
// Override method on embedded NodeType for API compatibility.
|
||||
// TODO: Not really a problem; could change API without effect but
|
||||
// api tool complains.
|
||||
return NodeNil
|
||||
}
|
||||
|
||||
func (n *NilNode) String() string {
|
||||
return "nil"
|
||||
}
|
||||
|
||||
func (n *NilNode) tree() *Tree {
|
||||
return n.tr
|
||||
}
|
||||
|
||||
func (n *NilNode) Copy() Node {
|
||||
return n.tr.newNil(n.Pos)
|
||||
}
|
||||
|
||||
// FieldNode holds a field (identifier starting with '.').
|
||||
// The names may be chained ('.x.y').
|
||||
// The period is dropped from each ident.
|
||||
type FieldNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Ident []string // The identifiers in lexical order.
|
||||
}
|
||||
|
||||
func (t *Tree) newField(pos Pos, ident string) *FieldNode {
|
||||
return &FieldNode{tr: t, NodeType: NodeField, Pos: pos, Ident: strings.Split(ident[1:], ".")} // [1:] to drop leading period
|
||||
}
|
||||
|
||||
func (f *FieldNode) String() string {
|
||||
s := ""
|
||||
for _, id := range f.Ident {
|
||||
s += "." + id
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (f *FieldNode) tree() *Tree {
|
||||
return f.tr
|
||||
}
|
||||
|
||||
func (f *FieldNode) Copy() Node {
|
||||
return &FieldNode{tr: f.tr, NodeType: NodeField, Pos: f.Pos, Ident: append([]string{}, f.Ident...)}
|
||||
}
|
||||
|
||||
// ChainNode holds a term followed by a chain of field accesses (identifier starting with '.').
|
||||
// The names may be chained ('.x.y').
|
||||
// The periods are dropped from each ident.
|
||||
type ChainNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Node Node
|
||||
Field []string // The identifiers in lexical order.
|
||||
}
|
||||
|
||||
func (t *Tree) newChain(pos Pos, node Node) *ChainNode {
|
||||
return &ChainNode{tr: t, NodeType: NodeChain, Pos: pos, Node: node}
|
||||
}
|
||||
|
||||
// Add adds the named field (which should start with a period) to the end of the chain.
|
||||
func (c *ChainNode) Add(field string) {
|
||||
if len(field) == 0 || field[0] != '.' {
|
||||
panic("no dot in field")
|
||||
}
|
||||
field = field[1:] // Remove leading dot.
|
||||
if field == "" {
|
||||
panic("empty field")
|
||||
}
|
||||
c.Field = append(c.Field, field)
|
||||
}
|
||||
|
||||
func (c *ChainNode) String() string {
|
||||
s := c.Node.String()
|
||||
if _, ok := c.Node.(*PipeNode); ok {
|
||||
s = "(" + s + ")"
|
||||
}
|
||||
for _, field := range c.Field {
|
||||
s += "." + field
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (c *ChainNode) tree() *Tree {
|
||||
return c.tr
|
||||
}
|
||||
|
||||
func (c *ChainNode) Copy() Node {
|
||||
return &ChainNode{tr: c.tr, NodeType: NodeChain, Pos: c.Pos, Node: c.Node, Field: append([]string{}, c.Field...)}
|
||||
}
|
||||
|
||||
// BoolNode holds a boolean constant.
|
||||
type BoolNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
True bool // The value of the boolean constant.
|
||||
}
|
||||
|
||||
func (t *Tree) newBool(pos Pos, true bool) *BoolNode {
|
||||
return &BoolNode{tr: t, NodeType: NodeBool, Pos: pos, True: true}
|
||||
}
|
||||
|
||||
func (b *BoolNode) String() string {
|
||||
if b.True {
|
||||
return "true"
|
||||
}
|
||||
return "false"
|
||||
}
|
||||
|
||||
func (b *BoolNode) tree() *Tree {
|
||||
return b.tr
|
||||
}
|
||||
|
||||
func (b *BoolNode) Copy() Node {
|
||||
return b.tr.newBool(b.Pos, b.True)
|
||||
}
|
||||
|
||||
// NumberNode holds a number: signed or unsigned integer, float, or complex.
|
||||
// The value is parsed and stored under all the types that can represent the value.
|
||||
// This simulates in a small amount of code the behavior of Go's ideal constants.
|
||||
type NumberNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
IsInt bool // Number has an integral value.
|
||||
IsUint bool // Number has an unsigned integral value.
|
||||
IsFloat bool // Number has a floating-point value.
|
||||
IsComplex bool // Number is complex.
|
||||
Int64 int64 // The signed integer value.
|
||||
Uint64 uint64 // The unsigned integer value.
|
||||
Float64 float64 // The floating-point value.
|
||||
Complex128 complex128 // The complex value.
|
||||
Text string // The original textual representation from the input.
|
||||
}
|
||||
|
||||
func (t *Tree) newNumber(pos Pos, text string, typ itemType) (*NumberNode, error) {
|
||||
n := &NumberNode{tr: t, NodeType: NodeNumber, Pos: pos, Text: text}
|
||||
switch typ {
|
||||
case itemCharConstant:
|
||||
rune, _, tail, err := strconv.UnquoteChar(text[1:], text[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tail != "'" {
|
||||
return nil, fmt.Errorf("malformed character constant: %s", text)
|
||||
}
|
||||
n.Int64 = int64(rune)
|
||||
n.IsInt = true
|
||||
n.Uint64 = uint64(rune)
|
||||
n.IsUint = true
|
||||
n.Float64 = float64(rune) // odd but those are the rules.
|
||||
n.IsFloat = true
|
||||
return n, nil
|
||||
case itemComplex:
|
||||
// fmt.Sscan can parse the pair, so let it do the work.
|
||||
if _, err := fmt.Sscan(text, &n.Complex128); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
n.IsComplex = true
|
||||
n.simplifyComplex()
|
||||
return n, nil
|
||||
}
|
||||
// Imaginary constants can only be complex unless they are zero.
|
||||
if len(text) > 0 && text[len(text)-1] == 'i' {
|
||||
f, err := strconv.ParseFloat(text[:len(text)-1], 64)
|
||||
if err == nil {
|
||||
n.IsComplex = true
|
||||
n.Complex128 = complex(0, f)
|
||||
n.simplifyComplex()
|
||||
return n, nil
|
||||
}
|
||||
}
|
||||
// Do integer test first so we get 0x123 etc.
|
||||
u, err := strconv.ParseUint(text, 0, 64) // will fail for -0; fixed below.
|
||||
if err == nil {
|
||||
n.IsUint = true
|
||||
n.Uint64 = u
|
||||
}
|
||||
i, err := strconv.ParseInt(text, 0, 64)
|
||||
if err == nil {
|
||||
n.IsInt = true
|
||||
n.Int64 = i
|
||||
if i == 0 {
|
||||
n.IsUint = true // in case of -0.
|
||||
n.Uint64 = u
|
||||
}
|
||||
}
|
||||
// If an integer extraction succeeded, promote the float.
|
||||
if n.IsInt {
|
||||
n.IsFloat = true
|
||||
n.Float64 = float64(n.Int64)
|
||||
} else if n.IsUint {
|
||||
n.IsFloat = true
|
||||
n.Float64 = float64(n.Uint64)
|
||||
} else {
|
||||
f, err := strconv.ParseFloat(text, 64)
|
||||
if err == nil {
|
||||
n.IsFloat = true
|
||||
n.Float64 = f
|
||||
// If a floating-point extraction succeeded, extract the int if needed.
|
||||
if !n.IsInt && float64(int64(f)) == f {
|
||||
n.IsInt = true
|
||||
n.Int64 = int64(f)
|
||||
}
|
||||
if !n.IsUint && float64(uint64(f)) == f {
|
||||
n.IsUint = true
|
||||
n.Uint64 = uint64(f)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !n.IsInt && !n.IsUint && !n.IsFloat {
|
||||
return nil, fmt.Errorf("illegal number syntax: %q", text)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// simplifyComplex pulls out any other types that are represented by the complex number.
|
||||
// These all require that the imaginary part be zero.
|
||||
func (n *NumberNode) simplifyComplex() {
|
||||
n.IsFloat = imag(n.Complex128) == 0
|
||||
if n.IsFloat {
|
||||
n.Float64 = real(n.Complex128)
|
||||
n.IsInt = float64(int64(n.Float64)) == n.Float64
|
||||
if n.IsInt {
|
||||
n.Int64 = int64(n.Float64)
|
||||
}
|
||||
n.IsUint = float64(uint64(n.Float64)) == n.Float64
|
||||
if n.IsUint {
|
||||
n.Uint64 = uint64(n.Float64)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (n *NumberNode) String() string {
|
||||
return n.Text
|
||||
}
|
||||
|
||||
func (n *NumberNode) tree() *Tree {
|
||||
return n.tr
|
||||
}
|
||||
|
||||
func (n *NumberNode) Copy() Node {
|
||||
nn := new(NumberNode)
|
||||
*nn = *n // Easy, fast, correct.
|
||||
return nn
|
||||
}
|
||||
|
||||
// StringNode holds a string constant. The value has been "unquoted".
|
||||
type StringNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Quoted string // The original text of the string, with quotes.
|
||||
Text string // The string, after quote processing.
|
||||
}
|
||||
|
||||
func (t *Tree) newString(pos Pos, orig, text string) *StringNode {
|
||||
return &StringNode{tr: t, NodeType: NodeString, Pos: pos, Quoted: orig, Text: text}
|
||||
}
|
||||
|
||||
func (s *StringNode) String() string {
|
||||
return s.Quoted
|
||||
}
|
||||
|
||||
func (s *StringNode) tree() *Tree {
|
||||
return s.tr
|
||||
}
|
||||
|
||||
func (s *StringNode) Copy() Node {
|
||||
return s.tr.newString(s.Pos, s.Quoted, s.Text)
|
||||
}
|
||||
|
||||
// endNode represents an {{end}} action.
|
||||
// It does not appear in the final parse tree.
|
||||
type endNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
}
|
||||
|
||||
func (t *Tree) newEnd(pos Pos) *endNode {
|
||||
return &endNode{tr: t, NodeType: nodeEnd, Pos: pos}
|
||||
}
|
||||
|
||||
func (e *endNode) String() string {
|
||||
return "{{end}}"
|
||||
}
|
||||
|
||||
func (e *endNode) tree() *Tree {
|
||||
return e.tr
|
||||
}
|
||||
|
||||
func (e *endNode) Copy() Node {
|
||||
return e.tr.newEnd(e.Pos)
|
||||
}
|
||||
|
||||
// elseNode represents an {{else}} action. Does not appear in the final tree.
|
||||
type elseNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Line int // The line number in the input (deprecated; kept for compatibility)
|
||||
}
|
||||
|
||||
func (t *Tree) newElse(pos Pos, line int) *elseNode {
|
||||
return &elseNode{tr: t, NodeType: nodeElse, Pos: pos, Line: line}
|
||||
}
|
||||
|
||||
func (e *elseNode) Type() NodeType {
|
||||
return nodeElse
|
||||
}
|
||||
|
||||
func (e *elseNode) String() string {
|
||||
return "{{else}}"
|
||||
}
|
||||
|
||||
func (e *elseNode) tree() *Tree {
|
||||
return e.tr
|
||||
}
|
||||
|
||||
func (e *elseNode) Copy() Node {
|
||||
return e.tr.newElse(e.Pos, e.Line)
|
||||
}
|
||||
|
||||
// BranchNode is the common representation of if, range, and with.
|
||||
type BranchNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Line int // The line number in the input (deprecated; kept for compatibility)
|
||||
Pipe *PipeNode // The pipeline to be evaluated.
|
||||
List *ListNode // What to execute if the value is non-empty.
|
||||
ElseList *ListNode // What to execute if the value is empty (nil if absent).
|
||||
}
|
||||
|
||||
func (b *BranchNode) String() string {
|
||||
name := ""
|
||||
switch b.NodeType {
|
||||
case NodeIf:
|
||||
name = "if"
|
||||
case NodeRange:
|
||||
name = "range"
|
||||
case NodeWith:
|
||||
name = "with"
|
||||
default:
|
||||
panic("unknown branch type")
|
||||
}
|
||||
if b.ElseList != nil {
|
||||
return fmt.Sprintf("{{%s %s}}%s{{else}}%s{{end}}", name, b.Pipe, b.List, b.ElseList)
|
||||
}
|
||||
return fmt.Sprintf("{{%s %s}}%s{{end}}", name, b.Pipe, b.List)
|
||||
}
|
||||
|
||||
func (b *BranchNode) tree() *Tree {
|
||||
return b.tr
|
||||
}
|
||||
|
||||
func (b *BranchNode) Copy() Node {
|
||||
switch b.NodeType {
|
||||
case NodeIf:
|
||||
return b.tr.newIf(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
|
||||
case NodeRange:
|
||||
return b.tr.newRange(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
|
||||
case NodeWith:
|
||||
return b.tr.newWith(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
|
||||
default:
|
||||
panic("unknown branch type")
|
||||
}
|
||||
}
|
||||
|
||||
// IfNode represents an {{if}} action and its commands.
|
||||
type IfNode struct {
|
||||
BranchNode
|
||||
}
|
||||
|
||||
func (t *Tree) newIf(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *IfNode {
|
||||
return &IfNode{BranchNode{tr: t, NodeType: NodeIf, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
|
||||
}
|
||||
|
||||
func (i *IfNode) Copy() Node {
|
||||
return i.tr.newIf(i.Pos, i.Line, i.Pipe.CopyPipe(), i.List.CopyList(), i.ElseList.CopyList())
|
||||
}
|
||||
|
||||
// RangeNode represents a {{range}} action and its commands.
|
||||
type RangeNode struct {
|
||||
BranchNode
|
||||
}
|
||||
|
||||
func (t *Tree) newRange(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *RangeNode {
|
||||
return &RangeNode{BranchNode{tr: t, NodeType: NodeRange, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
|
||||
}
|
||||
|
||||
func (r *RangeNode) Copy() Node {
|
||||
return r.tr.newRange(r.Pos, r.Line, r.Pipe.CopyPipe(), r.List.CopyList(), r.ElseList.CopyList())
|
||||
}
|
||||
|
||||
// WithNode represents a {{with}} action and its commands.
|
||||
type WithNode struct {
|
||||
BranchNode
|
||||
}
|
||||
|
||||
func (t *Tree) newWith(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *WithNode {
|
||||
return &WithNode{BranchNode{tr: t, NodeType: NodeWith, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
|
||||
}
|
||||
|
||||
func (w *WithNode) Copy() Node {
|
||||
return w.tr.newWith(w.Pos, w.Line, w.Pipe.CopyPipe(), w.List.CopyList(), w.ElseList.CopyList())
|
||||
}
|
||||
|
||||
// TemplateNode represents a {{template}} action.
|
||||
type TemplateNode struct {
|
||||
NodeType
|
||||
Pos
|
||||
tr *Tree
|
||||
Line int // The line number in the input (deprecated; kept for compatibility)
|
||||
Name string // The name of the template (unquoted).
|
||||
Pipe *PipeNode // The command to evaluate as dot for the template.
|
||||
}
|
||||
|
||||
func (t *Tree) newTemplate(pos Pos, line int, name string, pipe *PipeNode) *TemplateNode {
|
||||
return &TemplateNode{tr: t, NodeType: NodeTemplate, Pos: pos, Line: line, Name: name, Pipe: pipe}
|
||||
}
|
||||
|
||||
func (t *TemplateNode) String() string {
|
||||
if t.Pipe == nil {
|
||||
return fmt.Sprintf("{{template %q}}", t.Name)
|
||||
}
|
||||
return fmt.Sprintf("{{template %q %s}}", t.Name, t.Pipe)
|
||||
}
|
||||
|
||||
func (t *TemplateNode) tree() *Tree {
|
||||
return t.tr
|
||||
}
|
||||
|
||||
func (t *TemplateNode) Copy() Node {
|
||||
return t.tr.newTemplate(t.Pos, t.Line, t.Name, t.Pipe.CopyPipe())
|
||||
}
|
||||
700
vendor/github.com/alecthomas/template/parse/parse.go
generated
vendored
700
vendor/github.com/alecthomas/template/parse/parse.go
generated
vendored
@@ -1,700 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package parse builds parse trees for templates as defined by text/template
|
||||
// and html/template. Clients should use those packages to construct templates
|
||||
// rather than this one, which provides shared internal data structures not
|
||||
// intended for general use.
|
||||
package parse
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Tree is the representation of a single parsed template.
|
||||
type Tree struct {
|
||||
Name string // name of the template represented by the tree.
|
||||
ParseName string // name of the top-level template during parsing, for error messages.
|
||||
Root *ListNode // top-level root of the tree.
|
||||
text string // text parsed to create the template (or its parent)
|
||||
// Parsing only; cleared after parse.
|
||||
funcs []map[string]interface{}
|
||||
lex *lexer
|
||||
token [3]item // three-token lookahead for parser.
|
||||
peekCount int
|
||||
vars []string // variables defined at the moment.
|
||||
}
|
||||
|
||||
// Copy returns a copy of the Tree. Any parsing state is discarded.
|
||||
func (t *Tree) Copy() *Tree {
|
||||
if t == nil {
|
||||
return nil
|
||||
}
|
||||
return &Tree{
|
||||
Name: t.Name,
|
||||
ParseName: t.ParseName,
|
||||
Root: t.Root.CopyList(),
|
||||
text: t.text,
|
||||
}
|
||||
}
|
||||
|
||||
// Parse returns a map from template name to parse.Tree, created by parsing the
|
||||
// templates described in the argument string. The top-level template will be
|
||||
// given the specified name. If an error is encountered, parsing stops and an
|
||||
// empty map is returned with the error.
|
||||
func Parse(name, text, leftDelim, rightDelim string, funcs ...map[string]interface{}) (treeSet map[string]*Tree, err error) {
|
||||
treeSet = make(map[string]*Tree)
|
||||
t := New(name)
|
||||
t.text = text
|
||||
_, err = t.Parse(text, leftDelim, rightDelim, treeSet, funcs...)
|
||||
return
|
||||
}
|
||||
|
||||
// next returns the next token.
|
||||
func (t *Tree) next() item {
|
||||
if t.peekCount > 0 {
|
||||
t.peekCount--
|
||||
} else {
|
||||
t.token[0] = t.lex.nextItem()
|
||||
}
|
||||
return t.token[t.peekCount]
|
||||
}
|
||||
|
||||
// backup backs the input stream up one token.
|
||||
func (t *Tree) backup() {
|
||||
t.peekCount++
|
||||
}
|
||||
|
||||
// backup2 backs the input stream up two tokens.
|
||||
// The zeroth token is already there.
|
||||
func (t *Tree) backup2(t1 item) {
|
||||
t.token[1] = t1
|
||||
t.peekCount = 2
|
||||
}
|
||||
|
||||
// backup3 backs the input stream up three tokens
|
||||
// The zeroth token is already there.
|
||||
func (t *Tree) backup3(t2, t1 item) { // Reverse order: we're pushing back.
|
||||
t.token[1] = t1
|
||||
t.token[2] = t2
|
||||
t.peekCount = 3
|
||||
}
|
||||
|
||||
// peek returns but does not consume the next token.
|
||||
func (t *Tree) peek() item {
|
||||
if t.peekCount > 0 {
|
||||
return t.token[t.peekCount-1]
|
||||
}
|
||||
t.peekCount = 1
|
||||
t.token[0] = t.lex.nextItem()
|
||||
return t.token[0]
|
||||
}
|
||||
|
||||
// nextNonSpace returns the next non-space token.
|
||||
func (t *Tree) nextNonSpace() (token item) {
|
||||
for {
|
||||
token = t.next()
|
||||
if token.typ != itemSpace {
|
||||
break
|
||||
}
|
||||
}
|
||||
return token
|
||||
}
|
||||
|
||||
// peekNonSpace returns but does not consume the next non-space token.
|
||||
func (t *Tree) peekNonSpace() (token item) {
|
||||
for {
|
||||
token = t.next()
|
||||
if token.typ != itemSpace {
|
||||
break
|
||||
}
|
||||
}
|
||||
t.backup()
|
||||
return token
|
||||
}
|
||||
|
||||
// Parsing.
|
||||
|
||||
// New allocates a new parse tree with the given name.
|
||||
func New(name string, funcs ...map[string]interface{}) *Tree {
|
||||
return &Tree{
|
||||
Name: name,
|
||||
funcs: funcs,
|
||||
}
|
||||
}
|
||||
|
||||
// ErrorContext returns a textual representation of the location of the node in the input text.
|
||||
// The receiver is only used when the node does not have a pointer to the tree inside,
|
||||
// which can occur in old code.
|
||||
func (t *Tree) ErrorContext(n Node) (location, context string) {
|
||||
pos := int(n.Position())
|
||||
tree := n.tree()
|
||||
if tree == nil {
|
||||
tree = t
|
||||
}
|
||||
text := tree.text[:pos]
|
||||
byteNum := strings.LastIndex(text, "\n")
|
||||
if byteNum == -1 {
|
||||
byteNum = pos // On first line.
|
||||
} else {
|
||||
byteNum++ // After the newline.
|
||||
byteNum = pos - byteNum
|
||||
}
|
||||
lineNum := 1 + strings.Count(text, "\n")
|
||||
context = n.String()
|
||||
if len(context) > 20 {
|
||||
context = fmt.Sprintf("%.20s...", context)
|
||||
}
|
||||
return fmt.Sprintf("%s:%d:%d", tree.ParseName, lineNum, byteNum), context
|
||||
}
|
||||
|
||||
// errorf formats the error and terminates processing.
|
||||
func (t *Tree) errorf(format string, args ...interface{}) {
|
||||
t.Root = nil
|
||||
format = fmt.Sprintf("template: %s:%d: %s", t.ParseName, t.lex.lineNumber(), format)
|
||||
panic(fmt.Errorf(format, args...))
|
||||
}
|
||||
|
||||
// error terminates processing.
|
||||
func (t *Tree) error(err error) {
|
||||
t.errorf("%s", err)
|
||||
}
|
||||
|
||||
// expect consumes the next token and guarantees it has the required type.
|
||||
func (t *Tree) expect(expected itemType, context string) item {
|
||||
token := t.nextNonSpace()
|
||||
if token.typ != expected {
|
||||
t.unexpected(token, context)
|
||||
}
|
||||
return token
|
||||
}
|
||||
|
||||
// expectOneOf consumes the next token and guarantees it has one of the required types.
|
||||
func (t *Tree) expectOneOf(expected1, expected2 itemType, context string) item {
|
||||
token := t.nextNonSpace()
|
||||
if token.typ != expected1 && token.typ != expected2 {
|
||||
t.unexpected(token, context)
|
||||
}
|
||||
return token
|
||||
}
|
||||
|
||||
// unexpected complains about the token and terminates processing.
|
||||
func (t *Tree) unexpected(token item, context string) {
|
||||
t.errorf("unexpected %s in %s", token, context)
|
||||
}
|
||||
|
||||
// recover is the handler that turns panics into returns from the top level of Parse.
|
||||
func (t *Tree) recover(errp *error) {
|
||||
e := recover()
|
||||
if e != nil {
|
||||
if _, ok := e.(runtime.Error); ok {
|
||||
panic(e)
|
||||
}
|
||||
if t != nil {
|
||||
t.stopParse()
|
||||
}
|
||||
*errp = e.(error)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// startParse initializes the parser, using the lexer.
|
||||
func (t *Tree) startParse(funcs []map[string]interface{}, lex *lexer) {
|
||||
t.Root = nil
|
||||
t.lex = lex
|
||||
t.vars = []string{"$"}
|
||||
t.funcs = funcs
|
||||
}
|
||||
|
||||
// stopParse terminates parsing.
|
||||
func (t *Tree) stopParse() {
|
||||
t.lex = nil
|
||||
t.vars = nil
|
||||
t.funcs = nil
|
||||
}
|
||||
|
||||
// Parse parses the template definition string to construct a representation of
|
||||
// the template for execution. If either action delimiter string is empty, the
|
||||
// default ("{{" or "}}") is used. Embedded template definitions are added to
|
||||
// the treeSet map.
|
||||
func (t *Tree) Parse(text, leftDelim, rightDelim string, treeSet map[string]*Tree, funcs ...map[string]interface{}) (tree *Tree, err error) {
|
||||
defer t.recover(&err)
|
||||
t.ParseName = t.Name
|
||||
t.startParse(funcs, lex(t.Name, text, leftDelim, rightDelim))
|
||||
t.text = text
|
||||
t.parse(treeSet)
|
||||
t.add(treeSet)
|
||||
t.stopParse()
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// add adds tree to the treeSet.
|
||||
func (t *Tree) add(treeSet map[string]*Tree) {
|
||||
tree := treeSet[t.Name]
|
||||
if tree == nil || IsEmptyTree(tree.Root) {
|
||||
treeSet[t.Name] = t
|
||||
return
|
||||
}
|
||||
if !IsEmptyTree(t.Root) {
|
||||
t.errorf("template: multiple definition of template %q", t.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// IsEmptyTree reports whether this tree (node) is empty of everything but space.
|
||||
func IsEmptyTree(n Node) bool {
|
||||
switch n := n.(type) {
|
||||
case nil:
|
||||
return true
|
||||
case *ActionNode:
|
||||
case *IfNode:
|
||||
case *ListNode:
|
||||
for _, node := range n.Nodes {
|
||||
if !IsEmptyTree(node) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case *RangeNode:
|
||||
case *TemplateNode:
|
||||
case *TextNode:
|
||||
return len(bytes.TrimSpace(n.Text)) == 0
|
||||
case *WithNode:
|
||||
default:
|
||||
panic("unknown node: " + n.String())
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// parse is the top-level parser for a template, essentially the same
|
||||
// as itemList except it also parses {{define}} actions.
|
||||
// It runs to EOF.
|
||||
func (t *Tree) parse(treeSet map[string]*Tree) (next Node) {
|
||||
t.Root = t.newList(t.peek().pos)
|
||||
for t.peek().typ != itemEOF {
|
||||
if t.peek().typ == itemLeftDelim {
|
||||
delim := t.next()
|
||||
if t.nextNonSpace().typ == itemDefine {
|
||||
newT := New("definition") // name will be updated once we know it.
|
||||
newT.text = t.text
|
||||
newT.ParseName = t.ParseName
|
||||
newT.startParse(t.funcs, t.lex)
|
||||
newT.parseDefinition(treeSet)
|
||||
continue
|
||||
}
|
||||
t.backup2(delim)
|
||||
}
|
||||
n := t.textOrAction()
|
||||
if n.Type() == nodeEnd {
|
||||
t.errorf("unexpected %s", n)
|
||||
}
|
||||
t.Root.append(n)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseDefinition parses a {{define}} ... {{end}} template definition and
|
||||
// installs the definition in the treeSet map. The "define" keyword has already
|
||||
// been scanned.
|
||||
func (t *Tree) parseDefinition(treeSet map[string]*Tree) {
|
||||
const context = "define clause"
|
||||
name := t.expectOneOf(itemString, itemRawString, context)
|
||||
var err error
|
||||
t.Name, err = strconv.Unquote(name.val)
|
||||
if err != nil {
|
||||
t.error(err)
|
||||
}
|
||||
t.expect(itemRightDelim, context)
|
||||
var end Node
|
||||
t.Root, end = t.itemList()
|
||||
if end.Type() != nodeEnd {
|
||||
t.errorf("unexpected %s in %s", end, context)
|
||||
}
|
||||
t.add(treeSet)
|
||||
t.stopParse()
|
||||
}
|
||||
|
||||
// itemList:
|
||||
// textOrAction*
|
||||
// Terminates at {{end}} or {{else}}, returned separately.
|
||||
func (t *Tree) itemList() (list *ListNode, next Node) {
|
||||
list = t.newList(t.peekNonSpace().pos)
|
||||
for t.peekNonSpace().typ != itemEOF {
|
||||
n := t.textOrAction()
|
||||
switch n.Type() {
|
||||
case nodeEnd, nodeElse:
|
||||
return list, n
|
||||
}
|
||||
list.append(n)
|
||||
}
|
||||
t.errorf("unexpected EOF")
|
||||
return
|
||||
}
|
||||
|
||||
// textOrAction:
|
||||
// text | action
|
||||
func (t *Tree) textOrAction() Node {
|
||||
switch token := t.nextNonSpace(); token.typ {
|
||||
case itemElideNewline:
|
||||
return t.elideNewline()
|
||||
case itemText:
|
||||
return t.newText(token.pos, token.val)
|
||||
case itemLeftDelim:
|
||||
return t.action()
|
||||
default:
|
||||
t.unexpected(token, "input")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// elideNewline:
|
||||
// Remove newlines trailing rightDelim if \\ is present.
|
||||
func (t *Tree) elideNewline() Node {
|
||||
token := t.peek()
|
||||
if token.typ != itemText {
|
||||
t.unexpected(token, "input")
|
||||
return nil
|
||||
}
|
||||
|
||||
t.next()
|
||||
stripped := strings.TrimLeft(token.val, "\n\r")
|
||||
diff := len(token.val) - len(stripped)
|
||||
if diff > 0 {
|
||||
// This is a bit nasty. We mutate the token in-place to remove
|
||||
// preceding newlines.
|
||||
token.pos += Pos(diff)
|
||||
token.val = stripped
|
||||
}
|
||||
return t.newText(token.pos, token.val)
|
||||
}
|
||||
|
||||
// Action:
|
||||
// control
|
||||
// command ("|" command)*
|
||||
// Left delim is past. Now get actions.
|
||||
// First word could be a keyword such as range.
|
||||
func (t *Tree) action() (n Node) {
|
||||
switch token := t.nextNonSpace(); token.typ {
|
||||
case itemElse:
|
||||
return t.elseControl()
|
||||
case itemEnd:
|
||||
return t.endControl()
|
||||
case itemIf:
|
||||
return t.ifControl()
|
||||
case itemRange:
|
||||
return t.rangeControl()
|
||||
case itemTemplate:
|
||||
return t.templateControl()
|
||||
case itemWith:
|
||||
return t.withControl()
|
||||
}
|
||||
t.backup()
|
||||
// Do not pop variables; they persist until "end".
|
||||
return t.newAction(t.peek().pos, t.lex.lineNumber(), t.pipeline("command"))
|
||||
}
|
||||
|
||||
// Pipeline:
|
||||
// declarations? command ('|' command)*
|
||||
func (t *Tree) pipeline(context string) (pipe *PipeNode) {
|
||||
var decl []*VariableNode
|
||||
pos := t.peekNonSpace().pos
|
||||
// Are there declarations?
|
||||
for {
|
||||
if v := t.peekNonSpace(); v.typ == itemVariable {
|
||||
t.next()
|
||||
// Since space is a token, we need 3-token look-ahead here in the worst case:
|
||||
// in "$x foo" we need to read "foo" (as opposed to ":=") to know that $x is an
|
||||
// argument variable rather than a declaration. So remember the token
|
||||
// adjacent to the variable so we can push it back if necessary.
|
||||
tokenAfterVariable := t.peek()
|
||||
if next := t.peekNonSpace(); next.typ == itemColonEquals || (next.typ == itemChar && next.val == ",") {
|
||||
t.nextNonSpace()
|
||||
variable := t.newVariable(v.pos, v.val)
|
||||
decl = append(decl, variable)
|
||||
t.vars = append(t.vars, v.val)
|
||||
if next.typ == itemChar && next.val == "," {
|
||||
if context == "range" && len(decl) < 2 {
|
||||
continue
|
||||
}
|
||||
t.errorf("too many declarations in %s", context)
|
||||
}
|
||||
} else if tokenAfterVariable.typ == itemSpace {
|
||||
t.backup3(v, tokenAfterVariable)
|
||||
} else {
|
||||
t.backup2(v)
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
pipe = t.newPipeline(pos, t.lex.lineNumber(), decl)
|
||||
for {
|
||||
switch token := t.nextNonSpace(); token.typ {
|
||||
case itemRightDelim, itemRightParen:
|
||||
if len(pipe.Cmds) == 0 {
|
||||
t.errorf("missing value for %s", context)
|
||||
}
|
||||
if token.typ == itemRightParen {
|
||||
t.backup()
|
||||
}
|
||||
return
|
||||
case itemBool, itemCharConstant, itemComplex, itemDot, itemField, itemIdentifier,
|
||||
itemNumber, itemNil, itemRawString, itemString, itemVariable, itemLeftParen:
|
||||
t.backup()
|
||||
pipe.append(t.command())
|
||||
default:
|
||||
t.unexpected(token, context)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) {
|
||||
defer t.popVars(len(t.vars))
|
||||
line = t.lex.lineNumber()
|
||||
pipe = t.pipeline(context)
|
||||
var next Node
|
||||
list, next = t.itemList()
|
||||
switch next.Type() {
|
||||
case nodeEnd: //done
|
||||
case nodeElse:
|
||||
if allowElseIf {
|
||||
// Special case for "else if". If the "else" is followed immediately by an "if",
|
||||
// the elseControl will have left the "if" token pending. Treat
|
||||
// {{if a}}_{{else if b}}_{{end}}
|
||||
// as
|
||||
// {{if a}}_{{else}}{{if b}}_{{end}}{{end}}.
|
||||
// To do this, parse the if as usual and stop at it {{end}}; the subsequent{{end}}
|
||||
// is assumed. This technique works even for long if-else-if chains.
|
||||
// TODO: Should we allow else-if in with and range?
|
||||
if t.peek().typ == itemIf {
|
||||
t.next() // Consume the "if" token.
|
||||
elseList = t.newList(next.Position())
|
||||
elseList.append(t.ifControl())
|
||||
// Do not consume the next item - only one {{end}} required.
|
||||
break
|
||||
}
|
||||
}
|
||||
elseList, next = t.itemList()
|
||||
if next.Type() != nodeEnd {
|
||||
t.errorf("expected end; found %s", next)
|
||||
}
|
||||
}
|
||||
return pipe.Position(), line, pipe, list, elseList
|
||||
}
|
||||
|
||||
// If:
|
||||
// {{if pipeline}} itemList {{end}}
|
||||
// {{if pipeline}} itemList {{else}} itemList {{end}}
|
||||
// If keyword is past.
|
||||
func (t *Tree) ifControl() Node {
|
||||
return t.newIf(t.parseControl(true, "if"))
|
||||
}
|
||||
|
||||
// Range:
|
||||
// {{range pipeline}} itemList {{end}}
|
||||
// {{range pipeline}} itemList {{else}} itemList {{end}}
|
||||
// Range keyword is past.
|
||||
func (t *Tree) rangeControl() Node {
|
||||
return t.newRange(t.parseControl(false, "range"))
|
||||
}
|
||||
|
||||
// With:
|
||||
// {{with pipeline}} itemList {{end}}
|
||||
// {{with pipeline}} itemList {{else}} itemList {{end}}
|
||||
// If keyword is past.
|
||||
func (t *Tree) withControl() Node {
|
||||
return t.newWith(t.parseControl(false, "with"))
|
||||
}
|
||||
|
||||
// End:
|
||||
// {{end}}
|
||||
// End keyword is past.
|
||||
func (t *Tree) endControl() Node {
|
||||
return t.newEnd(t.expect(itemRightDelim, "end").pos)
|
||||
}
|
||||
|
||||
// Else:
|
||||
// {{else}}
|
||||
// Else keyword is past.
|
||||
func (t *Tree) elseControl() Node {
|
||||
// Special case for "else if".
|
||||
peek := t.peekNonSpace()
|
||||
if peek.typ == itemIf {
|
||||
// We see "{{else if ... " but in effect rewrite it to {{else}}{{if ... ".
|
||||
return t.newElse(peek.pos, t.lex.lineNumber())
|
||||
}
|
||||
return t.newElse(t.expect(itemRightDelim, "else").pos, t.lex.lineNumber())
|
||||
}
|
||||
|
||||
// Template:
|
||||
// {{template stringValue pipeline}}
|
||||
// Template keyword is past. The name must be something that can evaluate
|
||||
// to a string.
|
||||
func (t *Tree) templateControl() Node {
|
||||
var name string
|
||||
token := t.nextNonSpace()
|
||||
switch token.typ {
|
||||
case itemString, itemRawString:
|
||||
s, err := strconv.Unquote(token.val)
|
||||
if err != nil {
|
||||
t.error(err)
|
||||
}
|
||||
name = s
|
||||
default:
|
||||
t.unexpected(token, "template invocation")
|
||||
}
|
||||
var pipe *PipeNode
|
||||
if t.nextNonSpace().typ != itemRightDelim {
|
||||
t.backup()
|
||||
// Do not pop variables; they persist until "end".
|
||||
pipe = t.pipeline("template")
|
||||
}
|
||||
return t.newTemplate(token.pos, t.lex.lineNumber(), name, pipe)
|
||||
}
|
||||
|
||||
// command:
|
||||
// operand (space operand)*
|
||||
// space-separated arguments up to a pipeline character or right delimiter.
|
||||
// we consume the pipe character but leave the right delim to terminate the action.
|
||||
func (t *Tree) command() *CommandNode {
|
||||
cmd := t.newCommand(t.peekNonSpace().pos)
|
||||
for {
|
||||
t.peekNonSpace() // skip leading spaces.
|
||||
operand := t.operand()
|
||||
if operand != nil {
|
||||
cmd.append(operand)
|
||||
}
|
||||
switch token := t.next(); token.typ {
|
||||
case itemSpace:
|
||||
continue
|
||||
case itemError:
|
||||
t.errorf("%s", token.val)
|
||||
case itemRightDelim, itemRightParen:
|
||||
t.backup()
|
||||
case itemPipe:
|
||||
default:
|
||||
t.errorf("unexpected %s in operand; missing space?", token)
|
||||
}
|
||||
break
|
||||
}
|
||||
if len(cmd.Args) == 0 {
|
||||
t.errorf("empty command")
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
// operand:
|
||||
// term .Field*
|
||||
// An operand is a space-separated component of a command,
|
||||
// a term possibly followed by field accesses.
|
||||
// A nil return means the next item is not an operand.
|
||||
func (t *Tree) operand() Node {
|
||||
node := t.term()
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
if t.peek().typ == itemField {
|
||||
chain := t.newChain(t.peek().pos, node)
|
||||
for t.peek().typ == itemField {
|
||||
chain.Add(t.next().val)
|
||||
}
|
||||
// Compatibility with original API: If the term is of type NodeField
|
||||
// or NodeVariable, just put more fields on the original.
|
||||
// Otherwise, keep the Chain node.
|
||||
// TODO: Switch to Chains always when we can.
|
||||
switch node.Type() {
|
||||
case NodeField:
|
||||
node = t.newField(chain.Position(), chain.String())
|
||||
case NodeVariable:
|
||||
node = t.newVariable(chain.Position(), chain.String())
|
||||
default:
|
||||
node = chain
|
||||
}
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// term:
|
||||
// literal (number, string, nil, boolean)
|
||||
// function (identifier)
|
||||
// .
|
||||
// .Field
|
||||
// $
|
||||
// '(' pipeline ')'
|
||||
// A term is a simple "expression".
|
||||
// A nil return means the next item is not a term.
|
||||
func (t *Tree) term() Node {
|
||||
switch token := t.nextNonSpace(); token.typ {
|
||||
case itemError:
|
||||
t.errorf("%s", token.val)
|
||||
case itemIdentifier:
|
||||
if !t.hasFunction(token.val) {
|
||||
t.errorf("function %q not defined", token.val)
|
||||
}
|
||||
return NewIdentifier(token.val).SetTree(t).SetPos(token.pos)
|
||||
case itemDot:
|
||||
return t.newDot(token.pos)
|
||||
case itemNil:
|
||||
return t.newNil(token.pos)
|
||||
case itemVariable:
|
||||
return t.useVar(token.pos, token.val)
|
||||
case itemField:
|
||||
return t.newField(token.pos, token.val)
|
||||
case itemBool:
|
||||
return t.newBool(token.pos, token.val == "true")
|
||||
case itemCharConstant, itemComplex, itemNumber:
|
||||
number, err := t.newNumber(token.pos, token.val, token.typ)
|
||||
if err != nil {
|
||||
t.error(err)
|
||||
}
|
||||
return number
|
||||
case itemLeftParen:
|
||||
pipe := t.pipeline("parenthesized pipeline")
|
||||
if token := t.next(); token.typ != itemRightParen {
|
||||
t.errorf("unclosed right paren: unexpected %s", token)
|
||||
}
|
||||
return pipe
|
||||
case itemString, itemRawString:
|
||||
s, err := strconv.Unquote(token.val)
|
||||
if err != nil {
|
||||
t.error(err)
|
||||
}
|
||||
return t.newString(token.pos, token.val, s)
|
||||
}
|
||||
t.backup()
|
||||
return nil
|
||||
}
|
||||
|
||||
// hasFunction reports if a function name exists in the Tree's maps.
|
||||
func (t *Tree) hasFunction(name string) bool {
|
||||
for _, funcMap := range t.funcs {
|
||||
if funcMap == nil {
|
||||
continue
|
||||
}
|
||||
if funcMap[name] != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// popVars trims the variable list to the specified length
|
||||
func (t *Tree) popVars(n int) {
|
||||
t.vars = t.vars[:n]
|
||||
}
|
||||
|
||||
// useVar returns a node for a variable reference. It errors if the
|
||||
// variable is not defined.
|
||||
func (t *Tree) useVar(pos Pos, name string) Node {
|
||||
v := t.newVariable(pos, name)
|
||||
for _, varName := range t.vars {
|
||||
if varName == v.Ident[0] {
|
||||
return v
|
||||
}
|
||||
}
|
||||
t.errorf("undefined variable %q", v.Ident[0])
|
||||
return nil
|
||||
}
|
||||
218
vendor/github.com/alecthomas/template/template.go
generated
vendored
218
vendor/github.com/alecthomas/template/template.go
generated
vendored
@@ -1,218 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package template
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/alecthomas/template/parse"
|
||||
)
|
||||
|
||||
// common holds the information shared by related templates.
|
||||
type common struct {
|
||||
tmpl map[string]*Template
|
||||
// We use two maps, one for parsing and one for execution.
|
||||
// This separation makes the API cleaner since it doesn't
|
||||
// expose reflection to the client.
|
||||
parseFuncs FuncMap
|
||||
execFuncs map[string]reflect.Value
|
||||
}
|
||||
|
||||
// Template is the representation of a parsed template. The *parse.Tree
|
||||
// field is exported only for use by html/template and should be treated
|
||||
// as unexported by all other clients.
|
||||
type Template struct {
|
||||
name string
|
||||
*parse.Tree
|
||||
*common
|
||||
leftDelim string
|
||||
rightDelim string
|
||||
}
|
||||
|
||||
// New allocates a new template with the given name.
|
||||
func New(name string) *Template {
|
||||
return &Template{
|
||||
name: name,
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the name of the template.
|
||||
func (t *Template) Name() string {
|
||||
return t.name
|
||||
}
|
||||
|
||||
// New allocates a new template associated with the given one and with the same
|
||||
// delimiters. The association, which is transitive, allows one template to
|
||||
// invoke another with a {{template}} action.
|
||||
func (t *Template) New(name string) *Template {
|
||||
t.init()
|
||||
return &Template{
|
||||
name: name,
|
||||
common: t.common,
|
||||
leftDelim: t.leftDelim,
|
||||
rightDelim: t.rightDelim,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Template) init() {
|
||||
if t.common == nil {
|
||||
t.common = new(common)
|
||||
t.tmpl = make(map[string]*Template)
|
||||
t.parseFuncs = make(FuncMap)
|
||||
t.execFuncs = make(map[string]reflect.Value)
|
||||
}
|
||||
}
|
||||
|
||||
// Clone returns a duplicate of the template, including all associated
|
||||
// templates. The actual representation is not copied, but the name space of
|
||||
// associated templates is, so further calls to Parse in the copy will add
|
||||
// templates to the copy but not to the original. Clone can be used to prepare
|
||||
// common templates and use them with variant definitions for other templates
|
||||
// by adding the variants after the clone is made.
|
||||
func (t *Template) Clone() (*Template, error) {
|
||||
nt := t.copy(nil)
|
||||
nt.init()
|
||||
nt.tmpl[t.name] = nt
|
||||
for k, v := range t.tmpl {
|
||||
if k == t.name { // Already installed.
|
||||
continue
|
||||
}
|
||||
// The associated templates share nt's common structure.
|
||||
tmpl := v.copy(nt.common)
|
||||
nt.tmpl[k] = tmpl
|
||||
}
|
||||
for k, v := range t.parseFuncs {
|
||||
nt.parseFuncs[k] = v
|
||||
}
|
||||
for k, v := range t.execFuncs {
|
||||
nt.execFuncs[k] = v
|
||||
}
|
||||
return nt, nil
|
||||
}
|
||||
|
||||
// copy returns a shallow copy of t, with common set to the argument.
|
||||
func (t *Template) copy(c *common) *Template {
|
||||
nt := New(t.name)
|
||||
nt.Tree = t.Tree
|
||||
nt.common = c
|
||||
nt.leftDelim = t.leftDelim
|
||||
nt.rightDelim = t.rightDelim
|
||||
return nt
|
||||
}
|
||||
|
||||
// AddParseTree creates a new template with the name and parse tree
|
||||
// and associates it with t.
|
||||
func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) {
|
||||
if t.common != nil && t.tmpl[name] != nil {
|
||||
return nil, fmt.Errorf("template: redefinition of template %q", name)
|
||||
}
|
||||
nt := t.New(name)
|
||||
nt.Tree = tree
|
||||
t.tmpl[name] = nt
|
||||
return nt, nil
|
||||
}
|
||||
|
||||
// Templates returns a slice of the templates associated with t, including t
|
||||
// itself.
|
||||
func (t *Template) Templates() []*Template {
|
||||
if t.common == nil {
|
||||
return nil
|
||||
}
|
||||
// Return a slice so we don't expose the map.
|
||||
m := make([]*Template, 0, len(t.tmpl))
|
||||
for _, v := range t.tmpl {
|
||||
m = append(m, v)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// Delims sets the action delimiters to the specified strings, to be used in
|
||||
// subsequent calls to Parse, ParseFiles, or ParseGlob. Nested template
|
||||
// definitions will inherit the settings. An empty delimiter stands for the
|
||||
// corresponding default: {{ or }}.
|
||||
// The return value is the template, so calls can be chained.
|
||||
func (t *Template) Delims(left, right string) *Template {
|
||||
t.leftDelim = left
|
||||
t.rightDelim = right
|
||||
return t
|
||||
}
|
||||
|
||||
// Funcs adds the elements of the argument map to the template's function map.
|
||||
// It panics if a value in the map is not a function with appropriate return
|
||||
// type. However, it is legal to overwrite elements of the map. The return
|
||||
// value is the template, so calls can be chained.
|
||||
func (t *Template) Funcs(funcMap FuncMap) *Template {
|
||||
t.init()
|
||||
addValueFuncs(t.execFuncs, funcMap)
|
||||
addFuncs(t.parseFuncs, funcMap)
|
||||
return t
|
||||
}
|
||||
|
||||
// Lookup returns the template with the given name that is associated with t,
|
||||
// or nil if there is no such template.
|
||||
func (t *Template) Lookup(name string) *Template {
|
||||
if t.common == nil {
|
||||
return nil
|
||||
}
|
||||
return t.tmpl[name]
|
||||
}
|
||||
|
||||
// Parse parses a string into a template. Nested template definitions will be
|
||||
// associated with the top-level template t. Parse may be called multiple times
|
||||
// to parse definitions of templates to associate with t. It is an error if a
|
||||
// resulting template is non-empty (contains content other than template
|
||||
// definitions) and would replace a non-empty template with the same name.
|
||||
// (In multiple calls to Parse with the same receiver template, only one call
|
||||
// can contain text other than space, comments, and template definitions.)
|
||||
func (t *Template) Parse(text string) (*Template, error) {
|
||||
t.init()
|
||||
trees, err := parse.Parse(t.name, text, t.leftDelim, t.rightDelim, t.parseFuncs, builtins)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Add the newly parsed trees, including the one for t, into our common structure.
|
||||
for name, tree := range trees {
|
||||
// If the name we parsed is the name of this template, overwrite this template.
|
||||
// The associate method checks it's not a redefinition.
|
||||
tmpl := t
|
||||
if name != t.name {
|
||||
tmpl = t.New(name)
|
||||
}
|
||||
// Even if t == tmpl, we need to install it in the common.tmpl map.
|
||||
if replace, err := t.associate(tmpl, tree); err != nil {
|
||||
return nil, err
|
||||
} else if replace {
|
||||
tmpl.Tree = tree
|
||||
}
|
||||
tmpl.leftDelim = t.leftDelim
|
||||
tmpl.rightDelim = t.rightDelim
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// associate installs the new template into the group of templates associated
|
||||
// with t. It is an error to reuse a name except to overwrite an empty
|
||||
// template. The two are already known to share the common structure.
|
||||
// The boolean return value reports wither to store this tree as t.Tree.
|
||||
func (t *Template) associate(new *Template, tree *parse.Tree) (bool, error) {
|
||||
if new.common != t.common {
|
||||
panic("internal error: associate not common")
|
||||
}
|
||||
name := new.name
|
||||
if old := t.tmpl[name]; old != nil {
|
||||
oldIsEmpty := parse.IsEmptyTree(old.Root)
|
||||
newIsEmpty := parse.IsEmptyTree(tree.Root)
|
||||
if newIsEmpty {
|
||||
// Whether old is empty or not, new is empty; no reason to replace old.
|
||||
return false, nil
|
||||
}
|
||||
if !oldIsEmpty {
|
||||
return false, fmt.Errorf("template: redefinition of template %q", name)
|
||||
}
|
||||
}
|
||||
t.tmpl[name] = new
|
||||
return true, nil
|
||||
}
|
||||
19
vendor/github.com/alecthomas/units/COPYING
generated
vendored
19
vendor/github.com/alecthomas/units/COPYING
generated
vendored
@@ -1,19 +0,0 @@
|
||||
Copyright (C) 2014 Alec Thomas
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
11
vendor/github.com/alecthomas/units/README.md
generated
vendored
11
vendor/github.com/alecthomas/units/README.md
generated
vendored
@@ -1,11 +0,0 @@
|
||||
# Units - Helpful unit multipliers and functions for Go
|
||||
|
||||
The goal of this package is to have functionality similar to the [time](http://golang.org/pkg/time/) package.
|
||||
|
||||
It allows for code like this:
|
||||
|
||||
```go
|
||||
n, err := ParseBase2Bytes("1KB")
|
||||
// n == 1024
|
||||
n = units.Mebibyte * 512
|
||||
```
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user